Skip to content

Commit 84c3fc4

Browse files
x-y-ztorvalds
authored andcommitted
mm: thp: check pmd migration entry in common path
When THP migration is being used, memory management code needs to handle pmd migration entries properly. This patch uses !pmd_present() or is_swap_pmd() (depending on whether pmd_none() needs separate code or not) to check pmd migration entries at the places where a pmd entry is present. Since pmd-related code uses split_huge_page(), split_huge_pmd(), pmd_trans_huge(), pmd_trans_unstable(), or pmd_none_or_trans_huge_or_clear_bad(), this patch: 1. adds pmd migration entry split code in split_huge_pmd(), 2. takes care of pmd migration entries whenever pmd_trans_huge() is present, 3. makes pmd_none_or_trans_huge_or_clear_bad() pmd migration entry aware. Since split_huge_page() uses split_huge_pmd() and pmd_trans_unstable() is equivalent to pmd_none_or_trans_huge_or_clear_bad(), we do not change them. Until this commit, a pmd entry should be: 1. pointing to a pte page, 2. is_swap_pmd(), 3. pmd_trans_huge(), 4. pmd_devmap(), or 5. pmd_none(). Signed-off-by: Zi Yan <[email protected]> Cc: Kirill A. Shutemov <[email protected]> Cc: "H. Peter Anvin" <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Dave Hansen <[email protected]> Cc: David Nellans <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Naoya Horiguchi <[email protected]> Cc: Thomas Gleixner <[email protected]> Cc: Vlastimil Babka <[email protected]> Cc: Andrea Arcangeli <[email protected]> Cc: Michal Hocko <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 616b837 commit 84c3fc4

File tree

9 files changed

+147
-27
lines changed

9 files changed

+147
-27
lines changed

fs/proc/task_mmu.c

Lines changed: 21 additions & 11 deletions
Original file line numberDiff line numberDiff line change
@@ -608,7 +608,8 @@ static int smaps_pte_range(pmd_t *pmd, unsigned long addr, unsigned long end,
608608

609609
ptl = pmd_trans_huge_lock(pmd, vma);
610610
if (ptl) {
611-
smaps_pmd_entry(pmd, addr, walk);
611+
if (pmd_present(*pmd))
612+
smaps_pmd_entry(pmd, addr, walk);
612613
spin_unlock(ptl);
613614
return 0;
614615
}
@@ -1012,6 +1013,9 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
10121013
goto out;
10131014
}
10141015

1016+
if (!pmd_present(*pmd))
1017+
goto out;
1018+
10151019
page = pmd_page(*pmd);
10161020

10171021
/* Clear accessed and referenced bits. */
@@ -1293,27 +1297,33 @@ static int pagemap_pmd_range(pmd_t *pmdp, unsigned long addr, unsigned long end,
12931297
if (ptl) {
12941298
u64 flags = 0, frame = 0;
12951299
pmd_t pmd = *pmdp;
1300+
struct page *page = NULL;
12961301

12971302
if ((vma->vm_flags & VM_SOFTDIRTY) || pmd_soft_dirty(pmd))
12981303
flags |= PM_SOFT_DIRTY;
12991304

1300-
/*
1301-
* Currently pmd for thp is always present because thp
1302-
* can not be swapped-out, migrated, or HWPOISONed
1303-
* (split in such cases instead.)
1304-
* This if-check is just to prepare for future implementation.
1305-
*/
13061305
if (pmd_present(pmd)) {
1307-
struct page *page = pmd_page(pmd);
1308-
1309-
if (page_mapcount(page) == 1)
1310-
flags |= PM_MMAP_EXCLUSIVE;
1306+
page = pmd_page(pmd);
13111307

13121308
flags |= PM_PRESENT;
13131309
if (pm->show_pfn)
13141310
frame = pmd_pfn(pmd) +
13151311
((addr & ~PMD_MASK) >> PAGE_SHIFT);
13161312
}
1313+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1314+
else if (is_swap_pmd(pmd)) {
1315+
swp_entry_t entry = pmd_to_swp_entry(pmd);
1316+
1317+
frame = swp_type(entry) |
1318+
(swp_offset(entry) << MAX_SWAPFILES_SHIFT);
1319+
flags |= PM_SWAP;
1320+
VM_BUG_ON(!is_pmd_migration_entry(pmd));
1321+
page = migration_entry_to_page(entry);
1322+
}
1323+
#endif
1324+
1325+
if (page && page_mapcount(page) == 1)
1326+
flags |= PM_MMAP_EXCLUSIVE;
13171327

13181328
for (; addr != end; addr += PAGE_SIZE) {
13191329
pagemap_entry_t pme = make_pme(frame, flags);

include/asm-generic/pgtable.h

Lines changed: 17 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -846,7 +846,23 @@ static inline int pmd_none_or_trans_huge_or_clear_bad(pmd_t *pmd)
846846
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
847847
barrier();
848848
#endif
849-
if (pmd_none(pmdval) || pmd_trans_huge(pmdval))
849+
/*
850+
* !pmd_present() checks for pmd migration entries
851+
*
852+
* The complete check uses is_pmd_migration_entry() in linux/swapops.h
853+
* But using that requires moving current function and pmd_trans_unstable()
854+
* to linux/swapops.h to resovle dependency, which is too much code move.
855+
*
856+
* !pmd_present() is equivalent to is_pmd_migration_entry() currently,
857+
* because !pmd_present() pages can only be under migration not swapped
858+
* out.
859+
*
860+
* pmd_none() is preseved for future condition checks on pmd migration
861+
* entries and not confusing with this function name, although it is
862+
* redundant with !pmd_present().
863+
*/
864+
if (pmd_none(pmdval) || pmd_trans_huge(pmdval) ||
865+
(IS_ENABLED(CONFIG_ARCH_ENABLE_THP_MIGRATION) && !pmd_present(pmdval)))
850866
return 1;
851867
if (unlikely(pmd_bad(pmdval))) {
852868
pmd_clear_bad(pmd);

include/linux/huge_mm.h

Lines changed: 12 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
147147
#define split_huge_pmd(__vma, __pmd, __address) \
148148
do { \
149149
pmd_t *____pmd = (__pmd); \
150-
if (pmd_trans_huge(*____pmd) \
150+
if (is_swap_pmd(*____pmd) || pmd_trans_huge(*____pmd) \
151151
|| pmd_devmap(*____pmd)) \
152152
__split_huge_pmd(__vma, __pmd, __address, \
153153
false, NULL); \
@@ -178,12 +178,18 @@ extern spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd,
178178
struct vm_area_struct *vma);
179179
extern spinlock_t *__pud_trans_huge_lock(pud_t *pud,
180180
struct vm_area_struct *vma);
181+
182+
static inline int is_swap_pmd(pmd_t pmd)
183+
{
184+
return !pmd_none(pmd) && !pmd_present(pmd);
185+
}
186+
181187
/* mmap_sem must be held on entry */
182188
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
183189
struct vm_area_struct *vma)
184190
{
185191
VM_BUG_ON_VMA(!rwsem_is_locked(&vma->vm_mm->mmap_sem), vma);
186-
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
192+
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd))
187193
return __pmd_trans_huge_lock(pmd, vma);
188194
else
189195
return NULL;
@@ -299,6 +305,10 @@ static inline void vma_adjust_trans_huge(struct vm_area_struct *vma,
299305
long adjust_next)
300306
{
301307
}
308+
static inline int is_swap_pmd(pmd_t pmd)
309+
{
310+
return 0;
311+
}
302312
static inline spinlock_t *pmd_trans_huge_lock(pmd_t *pmd,
303313
struct vm_area_struct *vma)
304314
{

mm/gup.c

Lines changed: 20 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -234,6 +234,16 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
234234
return page;
235235
return no_page_table(vma, flags);
236236
}
237+
retry:
238+
if (!pmd_present(*pmd)) {
239+
if (likely(!(flags & FOLL_MIGRATION)))
240+
return no_page_table(vma, flags);
241+
VM_BUG_ON(thp_migration_supported() &&
242+
!is_pmd_migration_entry(*pmd));
243+
if (is_pmd_migration_entry(*pmd))
244+
pmd_migration_entry_wait(mm, pmd);
245+
goto retry;
246+
}
237247
if (pmd_devmap(*pmd)) {
238248
ptl = pmd_lock(mm, pmd);
239249
page = follow_devmap_pmd(vma, address, pmd, flags);
@@ -247,7 +257,15 @@ static struct page *follow_pmd_mask(struct vm_area_struct *vma,
247257
if ((flags & FOLL_NUMA) && pmd_protnone(*pmd))
248258
return no_page_table(vma, flags);
249259

260+
retry_locked:
250261
ptl = pmd_lock(mm, pmd);
262+
if (unlikely(!pmd_present(*pmd))) {
263+
spin_unlock(ptl);
264+
if (likely(!(flags & FOLL_MIGRATION)))
265+
return no_page_table(vma, flags);
266+
pmd_migration_entry_wait(mm, pmd);
267+
goto retry_locked;
268+
}
251269
if (unlikely(!pmd_trans_huge(*pmd))) {
252270
spin_unlock(ptl);
253271
return follow_page_pte(vma, address, pmd, flags);
@@ -424,7 +442,7 @@ static int get_gate_page(struct mm_struct *mm, unsigned long address,
424442
pud = pud_offset(p4d, address);
425443
BUG_ON(pud_none(*pud));
426444
pmd = pmd_offset(pud, address);
427-
if (pmd_none(*pmd))
445+
if (!pmd_present(*pmd))
428446
return -EFAULT;
429447
VM_BUG_ON(pmd_trans_huge(*pmd));
430448
pte = pte_offset_map(pmd, address);
@@ -1534,7 +1552,7 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end,
15341552
pmd_t pmd = READ_ONCE(*pmdp);
15351553

15361554
next = pmd_addr_end(addr, end);
1537-
if (pmd_none(pmd))
1555+
if (!pmd_present(pmd))
15381556
return 0;
15391557

15401558
if (unlikely(pmd_trans_huge(pmd) || pmd_huge(pmd))) {

mm/huge_memory.c

Lines changed: 59 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -928,6 +928,23 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm,
928928

929929
ret = -EAGAIN;
930930
pmd = *src_pmd;
931+
932+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
933+
if (unlikely(is_swap_pmd(pmd))) {
934+
swp_entry_t entry = pmd_to_swp_entry(pmd);
935+
936+
VM_BUG_ON(!is_pmd_migration_entry(pmd));
937+
if (is_write_migration_entry(entry)) {
938+
make_migration_entry_read(&entry);
939+
pmd = swp_entry_to_pmd(entry);
940+
set_pmd_at(src_mm, addr, src_pmd, pmd);
941+
}
942+
set_pmd_at(dst_mm, addr, dst_pmd, pmd);
943+
ret = 0;
944+
goto out_unlock;
945+
}
946+
#endif
947+
931948
if (unlikely(!pmd_trans_huge(pmd))) {
932949
pte_free(dst_mm, pgtable);
933950
goto out_unlock;
@@ -1599,6 +1616,12 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
15991616
if (is_huge_zero_pmd(orig_pmd))
16001617
goto out;
16011618

1619+
if (unlikely(!pmd_present(orig_pmd))) {
1620+
VM_BUG_ON(thp_migration_supported() &&
1621+
!is_pmd_migration_entry(orig_pmd));
1622+
goto out;
1623+
}
1624+
16021625
page = pmd_page(orig_pmd);
16031626
/*
16041627
* If other processes are mapping this page, we couldn't discard
@@ -1810,6 +1833,25 @@ int change_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
18101833
preserve_write = prot_numa && pmd_write(*pmd);
18111834
ret = 1;
18121835

1836+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
1837+
if (is_swap_pmd(*pmd)) {
1838+
swp_entry_t entry = pmd_to_swp_entry(*pmd);
1839+
1840+
VM_BUG_ON(!is_pmd_migration_entry(*pmd));
1841+
if (is_write_migration_entry(entry)) {
1842+
pmd_t newpmd;
1843+
/*
1844+
* A protection check is difficult so
1845+
* just be safe and disable write
1846+
*/
1847+
make_migration_entry_read(&entry);
1848+
newpmd = swp_entry_to_pmd(entry);
1849+
set_pmd_at(mm, addr, pmd, newpmd);
1850+
}
1851+
goto unlock;
1852+
}
1853+
#endif
1854+
18131855
/*
18141856
* Avoid trapping faults against the zero page. The read-only
18151857
* data is likely to be read-cached on the local CPU and
@@ -1875,7 +1917,8 @@ spinlock_t *__pmd_trans_huge_lock(pmd_t *pmd, struct vm_area_struct *vma)
18751917
{
18761918
spinlock_t *ptl;
18771919
ptl = pmd_lock(vma->vm_mm, pmd);
1878-
if (likely(pmd_trans_huge(*pmd) || pmd_devmap(*pmd)))
1920+
if (likely(is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) ||
1921+
pmd_devmap(*pmd)))
18791922
return ptl;
18801923
spin_unlock(ptl);
18811924
return NULL;
@@ -1993,14 +2036,15 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
19932036
struct page *page;
19942037
pgtable_t pgtable;
19952038
pmd_t _pmd;
1996-
bool young, write, dirty, soft_dirty;
2039+
bool young, write, dirty, soft_dirty, pmd_migration = false;
19972040
unsigned long addr;
19982041
int i;
19992042

20002043
VM_BUG_ON(haddr & ~HPAGE_PMD_MASK);
20012044
VM_BUG_ON_VMA(vma->vm_start > haddr, vma);
20022045
VM_BUG_ON_VMA(vma->vm_end < haddr + HPAGE_PMD_SIZE, vma);
2003-
VM_BUG_ON(!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd));
2046+
VM_BUG_ON(!is_pmd_migration_entry(*pmd) && !pmd_trans_huge(*pmd)
2047+
&& !pmd_devmap(*pmd));
20042048

20052049
count_vm_event(THP_SPLIT_PMD);
20062050

@@ -2025,7 +2069,16 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
20252069
return __split_huge_zero_page_pmd(vma, haddr, pmd);
20262070
}
20272071

2028-
page = pmd_page(*pmd);
2072+
#ifdef CONFIG_ARCH_ENABLE_THP_MIGRATION
2073+
pmd_migration = is_pmd_migration_entry(*pmd);
2074+
if (pmd_migration) {
2075+
swp_entry_t entry;
2076+
2077+
entry = pmd_to_swp_entry(*pmd);
2078+
page = pfn_to_page(swp_offset(entry));
2079+
} else
2080+
#endif
2081+
page = pmd_page(*pmd);
20292082
VM_BUG_ON_PAGE(!page_count(page), page);
20302083
page_ref_add(page, HPAGE_PMD_NR - 1);
20312084
write = pmd_write(*pmd);
@@ -2044,7 +2097,7 @@ static void __split_huge_pmd_locked(struct vm_area_struct *vma, pmd_t *pmd,
20442097
* transferred to avoid any possibility of altering
20452098
* permissions across VMAs.
20462099
*/
2047-
if (freeze) {
2100+
if (freeze || pmd_migration) {
20482101
swp_entry_t swp_entry;
20492102
swp_entry = make_migration_entry(page + i, write);
20502103
entry = swp_entry_to_pte(swp_entry);
@@ -2143,7 +2196,7 @@ void __split_huge_pmd(struct vm_area_struct *vma, pmd_t *pmd,
21432196
page = pmd_page(*pmd);
21442197
if (PageMlocked(page))
21452198
clear_page_mlock(page);
2146-
} else if (!pmd_devmap(*pmd))
2199+
} else if (!(pmd_devmap(*pmd) || is_pmd_migration_entry(*pmd)))
21472200
goto out;
21482201
__split_huge_pmd_locked(vma, pmd, haddr, freeze);
21492202
out:

mm/memcontrol.c

Lines changed: 5 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -4664,6 +4664,11 @@ static enum mc_target_type get_mctgt_type_thp(struct vm_area_struct *vma,
46644664
struct page *page = NULL;
46654665
enum mc_target_type ret = MC_TARGET_NONE;
46664666

4667+
if (unlikely(is_swap_pmd(pmd))) {
4668+
VM_BUG_ON(thp_migration_supported() &&
4669+
!is_pmd_migration_entry(pmd));
4670+
return ret;
4671+
}
46674672
page = pmd_page(pmd);
46684673
VM_BUG_ON_PAGE(!page || !PageHead(page), page);
46694674
if (!(mc.flags & MOVE_ANON))

mm/memory.c

Lines changed: 10 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1065,7 +1065,8 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src
10651065
src_pmd = pmd_offset(src_pud, addr);
10661066
do {
10671067
next = pmd_addr_end(addr, end);
1068-
if (pmd_trans_huge(*src_pmd) || pmd_devmap(*src_pmd)) {
1068+
if (is_swap_pmd(*src_pmd) || pmd_trans_huge(*src_pmd)
1069+
|| pmd_devmap(*src_pmd)) {
10691070
int err;
10701071
VM_BUG_ON_VMA(next-addr != HPAGE_PMD_SIZE, vma);
10711072
err = copy_huge_pmd(dst_mm, src_mm,
@@ -1326,7 +1327,7 @@ static inline unsigned long zap_pmd_range(struct mmu_gather *tlb,
13261327
pmd = pmd_offset(pud, addr);
13271328
do {
13281329
next = pmd_addr_end(addr, end);
1329-
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
1330+
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
13301331
if (next - addr != HPAGE_PMD_SIZE) {
13311332
VM_BUG_ON_VMA(vma_is_anonymous(vma) &&
13321333
!rwsem_is_locked(&tlb->mm->mmap_sem), vma);
@@ -3911,6 +3912,13 @@ static int __handle_mm_fault(struct vm_area_struct *vma, unsigned long address,
39113912
pmd_t orig_pmd = *vmf.pmd;
39123913

39133914
barrier();
3915+
if (unlikely(is_swap_pmd(orig_pmd))) {
3916+
VM_BUG_ON(thp_migration_supported() &&
3917+
!is_pmd_migration_entry(orig_pmd));
3918+
if (is_pmd_migration_entry(orig_pmd))
3919+
pmd_migration_entry_wait(mm, vmf.pmd);
3920+
return 0;
3921+
}
39143922
if (pmd_trans_huge(orig_pmd) || pmd_devmap(orig_pmd)) {
39153923
if (pmd_protnone(orig_pmd) && vma_is_accessible(vma))
39163924
return do_huge_pmd_numa_page(&vmf, orig_pmd);

mm/mprotect.c

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -149,7 +149,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
149149
unsigned long this_pages;
150150

151151
next = pmd_addr_end(addr, end);
152-
if (!pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
152+
if (!is_swap_pmd(*pmd) && !pmd_trans_huge(*pmd) && !pmd_devmap(*pmd)
153153
&& pmd_none_or_clear_bad(pmd))
154154
continue;
155155

@@ -159,7 +159,7 @@ static inline unsigned long change_pmd_range(struct vm_area_struct *vma,
159159
mmu_notifier_invalidate_range_start(mm, mni_start, end);
160160
}
161161

162-
if (pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
162+
if (is_swap_pmd(*pmd) || pmd_trans_huge(*pmd) || pmd_devmap(*pmd)) {
163163
if (next - addr != HPAGE_PMD_SIZE) {
164164
__split_huge_pmd(vma, pmd, addr, false, NULL);
165165
} else {

mm/mremap.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ unsigned long move_page_tables(struct vm_area_struct *vma,
223223
new_pmd = alloc_new_pmd(vma->vm_mm, vma, new_addr);
224224
if (!new_pmd)
225225
break;
226-
if (pmd_trans_huge(*old_pmd)) {
226+
if (is_swap_pmd(*old_pmd) || pmd_trans_huge(*old_pmd)) {
227227
if (extent == HPAGE_PMD_SIZE) {
228228
bool moved;
229229
/* See comment in move_ptes() */

0 commit comments

Comments
 (0)