Skip to content

Commit e59a47b

Browse files
soleentorvalds
authored andcommitted
mm/khugepaged: unify collapse pmd clear, flush and free
Unify the code that flushes, clears pmd entry, and frees the PTE table level into a new function collapse_and_free_pmd(). This cleanup is useful as in the next patch we will add another call to this function to iterate through PTE prior to freeing the level for page table check. Link: https://lkml.kernel.org/r/[email protected] Signed-off-by: Pasha Tatashin <[email protected]> Acked-by: David Rientjes <[email protected]> Cc: Aneesh Kumar K.V <[email protected]> Cc: Anshuman Khandual <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Greg Thelen <[email protected]> Cc: H. Peter Anvin <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Jiri Slaby <[email protected]> Cc: Mike Rapoport <[email protected]> Cc: Muchun Song <[email protected]> Cc: Paul Turner <[email protected]> Cc: Wei Xu <[email protected]> Cc: Will Deacon <[email protected]> Cc: Zi Yan <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 64d8b9e commit e59a47b

File tree

1 file changed

+18
-16
lines changed

1 file changed

+18
-16
lines changed

mm/khugepaged.c

+18-16
Original file line numberDiff line numberDiff line change
@@ -1416,6 +1416,19 @@ static int khugepaged_add_pte_mapped_thp(struct mm_struct *mm,
14161416
return 0;
14171417
}
14181418

1419+
static void collapse_and_free_pmd(struct mm_struct *mm, struct vm_area_struct *vma,
1420+
unsigned long addr, pmd_t *pmdp)
1421+
{
1422+
spinlock_t *ptl;
1423+
pmd_t pmd;
1424+
1425+
ptl = pmd_lock(vma->vm_mm, pmdp);
1426+
pmd = pmdp_collapse_flush(vma, addr, pmdp);
1427+
spin_unlock(ptl);
1428+
mm_dec_nr_ptes(mm);
1429+
pte_free(mm, pmd_pgtable(pmd));
1430+
}
1431+
14191432
/**
14201433
* collapse_pte_mapped_thp - Try to collapse a pte-mapped THP for mm at
14211434
* address haddr.
@@ -1433,7 +1446,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
14331446
struct vm_area_struct *vma = find_vma(mm, haddr);
14341447
struct page *hpage;
14351448
pte_t *start_pte, *pte;
1436-
pmd_t *pmd, _pmd;
1449+
pmd_t *pmd;
14371450
spinlock_t *ptl;
14381451
int count = 0;
14391452
int i;
@@ -1509,12 +1522,7 @@ void collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr)
15091522
}
15101523

15111524
/* step 4: collapse pmd */
1512-
ptl = pmd_lock(vma->vm_mm, pmd);
1513-
_pmd = pmdp_collapse_flush(vma, haddr, pmd);
1514-
spin_unlock(ptl);
1515-
mm_dec_nr_ptes(mm);
1516-
pte_free(mm, pmd_pgtable(_pmd));
1517-
1525+
collapse_and_free_pmd(mm, vma, haddr, pmd);
15181526
drop_hpage:
15191527
unlock_page(hpage);
15201528
put_page(hpage);
@@ -1552,7 +1560,7 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
15521560
struct vm_area_struct *vma;
15531561
struct mm_struct *mm;
15541562
unsigned long addr;
1555-
pmd_t *pmd, _pmd;
1563+
pmd_t *pmd;
15561564

15571565
i_mmap_lock_write(mapping);
15581566
vma_interval_tree_foreach(vma, &mapping->i_mmap, pgoff, pgoff) {
@@ -1591,14 +1599,8 @@ static void retract_page_tables(struct address_space *mapping, pgoff_t pgoff)
15911599
* reverse order. Trylock is a way to avoid deadlock.
15921600
*/
15931601
if (mmap_write_trylock(mm)) {
1594-
if (!khugepaged_test_exit(mm)) {
1595-
spinlock_t *ptl = pmd_lock(mm, pmd);
1596-
/* assume page table is clear */
1597-
_pmd = pmdp_collapse_flush(vma, addr, pmd);
1598-
spin_unlock(ptl);
1599-
mm_dec_nr_ptes(mm);
1600-
pte_free(mm, pmd_pgtable(_pmd));
1601-
}
1602+
if (!khugepaged_test_exit(mm))
1603+
collapse_and_free_pmd(mm, vma, addr, pmd);
16021604
mmap_write_unlock(mm);
16031605
} else {
16041606
/* Try again later */

0 commit comments

Comments
 (0)