Skip to content

Commit 8a0516e

Browse files
Mel Gormantorvalds
Mel Gorman
authored andcommitted
mm: convert p[te|md]_numa users to p[te|md]_protnone_numa
Convert existing users of pte_numa and friends to the new helper. Note that the kernel is broken after this patch is applied until the other page table modifiers are also altered. This patch layout is to make review easier. Signed-off-by: Mel Gorman <[email protected]> Acked-by: Linus Torvalds <[email protected]> Acked-by: Aneesh Kumar <[email protected]> Acked-by: Benjamin Herrenschmidt <[email protected]> Tested-by: Sasha Levin <[email protected]> Cc: Dave Jones <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Kirill Shutemov <[email protected]> Cc: Paul Mackerras <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Sasha Levin <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent e7bb4b6 commit 8a0516e

File tree

11 files changed

+40
-57
lines changed

11 files changed

+40
-57
lines changed

arch/powerpc/kvm/book3s_hv_rm_mmu.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -212,7 +212,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
212212
/* Look up the Linux PTE for the backing page */
213213
pte_size = psize;
214214
pte = lookup_linux_pte_and_update(pgdir, hva, writing, &pte_size);
215-
if (pte_present(pte) && !pte_numa(pte)) {
215+
if (pte_present(pte) && !pte_protnone(pte)) {
216216
if (writing && !pte_write(pte))
217217
/* make the actual HPTE be read-only */
218218
ptel = hpte_make_readonly(ptel);

arch/powerpc/mm/fault.c

-5
Original file line numberDiff line numberDiff line change
@@ -398,8 +398,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
398398
* processors use the same I/D cache coherency mechanism
399399
* as embedded.
400400
*/
401-
if (error_code & DSISR_PROTFAULT)
402-
goto bad_area;
403401
#endif /* CONFIG_PPC_STD_MMU */
404402

405403
/*
@@ -423,9 +421,6 @@ int __kprobes do_page_fault(struct pt_regs *regs, unsigned long address,
423421
flags |= FAULT_FLAG_WRITE;
424422
/* a read */
425423
} else {
426-
/* protection fault */
427-
if (error_code & 0x08000000)
428-
goto bad_area;
429424
if (!(vma->vm_flags & (VM_READ | VM_EXEC | VM_WRITE)))
430425
goto bad_area;
431426
}

arch/powerpc/mm/pgtable.c

+8-3
Original file line numberDiff line numberDiff line change
@@ -172,9 +172,14 @@ static pte_t set_access_flags_filter(pte_t pte, struct vm_area_struct *vma,
172172
void set_pte_at(struct mm_struct *mm, unsigned long addr, pte_t *ptep,
173173
pte_t pte)
174174
{
175-
#ifdef CONFIG_DEBUG_VM
176-
WARN_ON(pte_val(*ptep) & _PAGE_PRESENT);
177-
#endif
175+
/*
176+
* When handling numa faults, we already have the pte marked
177+
* _PAGE_PRESENT, but we can be sure that it is not in hpte.
178+
* Hence we can use set_pte_at for them.
179+
*/
180+
VM_WARN_ON((pte_val(*ptep) & (_PAGE_PRESENT | _PAGE_USER)) ==
181+
(_PAGE_PRESENT | _PAGE_USER));
182+
178183
/* Note: mm->context.id might not yet have been assigned as
179184
* this context might not have been activated yet when this
180185
* is called.

arch/powerpc/mm/pgtable_64.c

+2-1
Original file line numberDiff line numberDiff line change
@@ -718,7 +718,8 @@ void set_pmd_at(struct mm_struct *mm, unsigned long addr,
718718
pmd_t *pmdp, pmd_t pmd)
719719
{
720720
#ifdef CONFIG_DEBUG_VM
721-
WARN_ON(pmd_val(*pmdp) & _PAGE_PRESENT);
721+
WARN_ON((pmd_val(*pmdp) & (_PAGE_PRESENT | _PAGE_USER)) ==
722+
(_PAGE_PRESENT | _PAGE_USER));
722723
assert_spin_locked(&mm->page_table_lock);
723724
WARN_ON(!pmd_trans_huge(pmd));
724725
#endif

arch/x86/mm/gup.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -84,7 +84,7 @@ static noinline int gup_pte_range(pmd_t pmd, unsigned long addr,
8484
struct page *page;
8585

8686
/* Similar to the PMD case, NUMA hinting must take slow path */
87-
if (pte_numa(pte)) {
87+
if (pte_protnone(pte)) {
8888
pte_unmap(ptep);
8989
return 0;
9090
}
@@ -178,7 +178,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
178178
* slowpath for accounting purposes and so that they
179179
* can be serialised against THP migration.
180180
*/
181-
if (pmd_numa(pmd))
181+
if (pmd_protnone(pmd))
182182
return 0;
183183
if (!gup_huge_pmd(pmd, addr, next, write, pages, nr))
184184
return 0;

include/uapi/linux/mempolicy.h

+1-1
Original file line numberDiff line numberDiff line change
@@ -67,7 +67,7 @@ enum mpol_rebind_step {
6767
#define MPOL_F_LOCAL (1 << 1) /* preferred local allocation */
6868
#define MPOL_F_REBINDING (1 << 2) /* identify policies in rebinding */
6969
#define MPOL_F_MOF (1 << 3) /* this policy wants migrate on fault */
70-
#define MPOL_F_MORON (1 << 4) /* Migrate On pte_numa Reference On Node */
70+
#define MPOL_F_MORON (1 << 4) /* Migrate On protnone Reference On Node */
7171

7272

7373
#endif /* _UAPI_LINUX_MEMPOLICY_H */

mm/gup.c

+5-5
Original file line numberDiff line numberDiff line change
@@ -64,7 +64,7 @@ static struct page *follow_page_pte(struct vm_area_struct *vma,
6464
migration_entry_wait(mm, pmd, address);
6565
goto retry;
6666
}
67-
if ((flags & FOLL_NUMA) && pte_numa(pte))
67+
if ((flags & FOLL_NUMA) && pte_protnone(pte))
6868
goto no_page;
6969
if ((flags & FOLL_WRITE) && !pte_write(pte)) {
7070
pte_unmap_unlock(ptep, ptl);
@@ -184,7 +184,7 @@ struct page *follow_page_mask(struct vm_area_struct *vma,
184184
return page;
185185
return no_page_table(vma, flags);
186186
}
187-
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
187+
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
188188
return no_page_table(vma, flags);
189189
if (pmd_trans_huge(*pmd)) {
190190
if (flags & FOLL_SPLIT) {
@@ -906,10 +906,10 @@ static int gup_pte_range(pmd_t pmd, unsigned long addr, unsigned long end,
906906

907907
/*
908908
* Similar to the PMD case below, NUMA hinting must take slow
909-
* path
909+
* path using the pte_protnone check.
910910
*/
911911
if (!pte_present(pte) || pte_special(pte) ||
912-
pte_numa(pte) || (write && !pte_write(pte)))
912+
pte_protnone(pte) || (write && !pte_write(pte)))
913913
goto pte_unmap;
914914

915915
VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
@@ -1104,7 +1104,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
11041104
* slowpath for accounting purposes and so that they
11051105
* can be serialised against THP migration.
11061106
*/
1107-
if (pmd_numa(pmd))
1107+
if (pmd_protnone(pmd))
11081108
return 0;
11091109

11101110
if (!gup_huge_pmd(pmd, pmdp, addr, next, write,

mm/huge_memory.c

+8-8
Original file line numberDiff line numberDiff line change
@@ -1211,7 +1211,7 @@ struct page *follow_trans_huge_pmd(struct vm_area_struct *vma,
12111211
return ERR_PTR(-EFAULT);
12121212

12131213
/* Full NUMA hinting faults to serialise migration in fault paths */
1214-
if ((flags & FOLL_NUMA) && pmd_numa(*pmd))
1214+
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
12151215
goto out;
12161216

12171217
page = pmd_page(*pmd);
@@ -1342,7 +1342,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
13421342

13431343
/*
13441344
* Migrate the THP to the requested node, returns with page unlocked
1345-
* and pmd_numa cleared.
1345+
* and access rights restored.
13461346
*/
13471347
spin_unlock(ptl);
13481348
migrated = migrate_misplaced_transhuge_page(mm, vma,
@@ -1357,7 +1357,7 @@ int do_huge_pmd_numa_page(struct mm_struct *mm, struct vm_area_struct *vma,
13571357
BUG_ON(!PageLocked(page));
13581358
pmd = pmd_mknonnuma(pmd);
13591359
set_pmd_at(mm, haddr, pmdp, pmd);
1360-
VM_BUG_ON(pmd_numa(*pmdp));
1360+
VM_BUG_ON(pmd_protnone(*pmdp));
13611361
update_mmu_cache_pmd(vma, addr, pmdp);
13621362
unlock_page(page);
13631363
out_unlock:
@@ -1483,7 +1483,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
14831483
ret = 1;
14841484
if (!prot_numa) {
14851485
entry = pmdp_get_and_clear_notify(mm, addr, pmd);
1486-
if (pmd_numa(entry))
1486+
if (pmd_protnone(entry))
14871487
entry = pmd_mknonnuma(entry);
14881488
entry = pmd_modify(entry, newprot);
14891489
ret = HPAGE_PMD_NR;
@@ -1499,7 +1499,7 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
14991499
* local vs remote hits on the zero page.
15001500
*/
15011501
if (!is_huge_zero_page(page) &&
1502-
!pmd_numa(*pmd)) {
1502+
!pmd_protnone(*pmd)) {
15031503
pmdp_set_numa(mm, addr, pmd);
15041504
ret = HPAGE_PMD_NR;
15051505
}
@@ -1767,9 +1767,9 @@ static int __split_huge_page_map(struct page *page,
17671767
pte_t *pte, entry;
17681768
BUG_ON(PageCompound(page+i));
17691769
/*
1770-
* Note that pmd_numa is not transferred deliberately
1771-
* to avoid any possibility that pte_numa leaks to
1772-
* a PROT_NONE VMA by accident.
1770+
* Note that NUMA hinting access restrictions are not
1771+
* transferred to avoid any possibility of altering
1772+
* permissions across VMAs.
17731773
*/
17741774
entry = mk_pte(page + i, vma->vm_page_prot);
17751775
entry = maybe_mkwrite(pte_mkdirty(entry), vma);

mm/memory.c

+2-2
Original file line numberDiff line numberDiff line change
@@ -3124,7 +3124,7 @@ static int handle_pte_fault(struct mm_struct *mm,
31243124
pte, pmd, flags, entry);
31253125
}
31263126

3127-
if (pte_numa(entry))
3127+
if (pte_protnone(entry))
31283128
return do_numa_page(mm, vma, address, entry, pte, pmd);
31293129

31303130
ptl = pte_lockptr(mm, pmd);
@@ -3202,7 +3202,7 @@ static int __handle_mm_fault(struct mm_struct *mm, struct vm_area_struct *vma,
32023202
if (pmd_trans_splitting(orig_pmd))
32033203
return 0;
32043204

3205-
if (pmd_numa(orig_pmd))
3205+
if (pmd_protnone(orig_pmd))
32063206
return do_huge_pmd_numa_page(mm, vma, address,
32073207
orig_pmd, pmd);
32083208

mm/mprotect.c

+10-28
Original file line numberDiff line numberDiff line change
@@ -75,36 +75,18 @@ static unsigned long change_pte_range(struct vm_area_struct *vma, pmd_t *pmd,
7575
oldpte = *pte;
7676
if (pte_present(oldpte)) {
7777
pte_t ptent;
78-
bool updated = false;
7978

80-
if (!prot_numa) {
81-
ptent = ptep_modify_prot_start(mm, addr, pte);
82-
if (pte_numa(ptent))
83-
ptent = pte_mknonnuma(ptent);
84-
ptent = pte_modify(ptent, newprot);
85-
/*
86-
* Avoid taking write faults for pages we
87-
* know to be dirty.
88-
*/
89-
if (dirty_accountable && pte_dirty(ptent) &&
90-
(pte_soft_dirty(ptent) ||
91-
!(vma->vm_flags & VM_SOFTDIRTY)))
92-
ptent = pte_mkwrite(ptent);
93-
ptep_modify_prot_commit(mm, addr, pte, ptent);
94-
updated = true;
95-
} else {
96-
struct page *page;
97-
98-
page = vm_normal_page(vma, addr, oldpte);
99-
if (page && !PageKsm(page)) {
100-
if (!pte_numa(oldpte)) {
101-
ptep_set_numa(mm, addr, pte);
102-
updated = true;
103-
}
104-
}
79+
ptent = ptep_modify_prot_start(mm, addr, pte);
80+
ptent = pte_modify(ptent, newprot);
81+
82+
/* Avoid taking write faults for known dirty pages */
83+
if (dirty_accountable && pte_dirty(ptent) &&
84+
(pte_soft_dirty(ptent) ||
85+
!(vma->vm_flags & VM_SOFTDIRTY))) {
86+
ptent = pte_mkwrite(ptent);
10587
}
106-
if (updated)
107-
pages++;
88+
ptep_modify_prot_commit(mm, addr, pte, ptent);
89+
pages++;
10890
} else if (IS_ENABLED(CONFIG_MIGRATION)) {
10991
swp_entry_t entry = pte_to_swp_entry(oldpte);
11092

mm/pgtable-generic.c

+1-1
Original file line numberDiff line numberDiff line change
@@ -193,7 +193,7 @@ void pmdp_invalidate(struct vm_area_struct *vma, unsigned long address,
193193
pmd_t *pmdp)
194194
{
195195
pmd_t entry = *pmdp;
196-
if (pmd_numa(entry))
196+
if (pmd_protnone(entry))
197197
entry = pmd_mknonnuma(entry);
198198
set_pmd_at(vma->vm_mm, address, pmdp, pmd_mknotpresent(entry));
199199
flush_tlb_range(vma, address, address + HPAGE_PMD_SIZE);

0 commit comments

Comments
 (0)