Skip to content

Commit 8749cfe

Browse files
Vladimir Davydovtorvalds
Vladimir Davydov
authored andcommitted
mm: add page_check_address_transhuge() helper
page_referenced_one() and page_idle_clear_pte_refs_one() duplicate the code for looking up pte of a (possibly transhuge) page. Move this code to a new helper function, page_check_address_transhuge(), and make the above mentioned functions use it. This is just a cleanup, no functional changes are intended. Signed-off-by: Vladimir Davydov <[email protected]> Reviewed-by: Kirill A. Shutemov <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent d965432 commit 8749cfe

File tree

3 files changed

+99
-98
lines changed

3 files changed

+99
-98
lines changed

include/linux/rmap.h

Lines changed: 19 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -215,6 +215,25 @@ static inline pte_t *page_check_address(struct page *page, struct mm_struct *mm,
215215
return ptep;
216216
}
217217

218+
/*
219+
* Used by idle page tracking to check if a page was referenced via page
220+
* tables.
221+
*/
222+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
223+
bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
224+
unsigned long address, pmd_t **pmdp,
225+
pte_t **ptep, spinlock_t **ptlp);
226+
#else
227+
static inline bool page_check_address_transhuge(struct page *page,
228+
struct mm_struct *mm, unsigned long address,
229+
pmd_t **pmdp, pte_t **ptep, spinlock_t **ptlp)
230+
{
231+
*ptep = page_check_address(page, mm, address, ptlp, 0);
232+
*pmdp = NULL;
233+
return !!*ptep;
234+
}
235+
#endif
236+
218237
/*
219238
* Used by swapoff to help locate where page is expected in vma.
220239
*/

mm/page_idle.c

Lines changed: 9 additions & 54 deletions
Original file line numberDiff line numberDiff line change
@@ -55,71 +55,26 @@ static int page_idle_clear_pte_refs_one(struct page *page,
5555
unsigned long addr, void *arg)
5656
{
5757
struct mm_struct *mm = vma->vm_mm;
58-
spinlock_t *ptl;
59-
pgd_t *pgd;
60-
pud_t *pud;
6158
pmd_t *pmd;
6259
pte_t *pte;
60+
spinlock_t *ptl;
6361
bool referenced = false;
6462

65-
pgd = pgd_offset(mm, addr);
66-
if (!pgd_present(*pgd))
67-
return SWAP_AGAIN;
68-
pud = pud_offset(pgd, addr);
69-
if (!pud_present(*pud))
63+
if (!page_check_address_transhuge(page, mm, addr, &pmd, &pte, &ptl))
7064
return SWAP_AGAIN;
71-
pmd = pmd_offset(pud, addr);
72-
73-
if (pmd_trans_huge(*pmd)) {
74-
ptl = pmd_lock(mm, pmd);
75-
if (!pmd_present(*pmd))
76-
goto unlock_pmd;
77-
if (unlikely(!pmd_trans_huge(*pmd))) {
78-
spin_unlock(ptl);
79-
goto map_pte;
80-
}
81-
82-
if (pmd_page(*pmd) != page)
83-
goto unlock_pmd;
8465

66+
if (pte) {
67+
referenced = ptep_clear_young_notify(vma, addr, pte);
68+
pte_unmap(pte);
69+
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
8570
referenced = pmdp_clear_young_notify(vma, addr, pmd);
86-
spin_unlock(ptl);
87-
goto found;
88-
unlock_pmd:
89-
spin_unlock(ptl);
90-
return SWAP_AGAIN;
9171
} else {
92-
pmd_t pmde = *pmd;
93-
94-
barrier();
95-
if (!pmd_present(pmde) || pmd_trans_huge(pmde))
96-
return SWAP_AGAIN;
97-
98-
}
99-
map_pte:
100-
pte = pte_offset_map(pmd, addr);
101-
if (!pte_present(*pte)) {
102-
pte_unmap(pte);
103-
return SWAP_AGAIN;
72+
/* unexpected pmd-mapped page? */
73+
WARN_ON_ONCE(1);
10474
}
10575

106-
ptl = pte_lockptr(mm, pmd);
107-
spin_lock(ptl);
108-
109-
if (!pte_present(*pte)) {
110-
pte_unmap_unlock(pte, ptl);
111-
return SWAP_AGAIN;
112-
}
113-
114-
/* THP can be referenced by any subpage */
115-
if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
116-
pte_unmap_unlock(pte, ptl);
117-
return SWAP_AGAIN;
118-
}
76+
spin_unlock(ptl);
11977

120-
referenced = ptep_clear_young_notify(vma, addr, pte);
121-
pte_unmap_unlock(pte, ptl);
122-
found:
12378
if (referenced) {
12479
clear_page_idle(page);
12580
/*

mm/rmap.c

Lines changed: 71 additions & 44 deletions
Original file line numberDiff line numberDiff line change
@@ -798,48 +798,44 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
798798
return 1;
799799
}
800800

801-
struct page_referenced_arg {
802-
int mapcount;
803-
int referenced;
804-
unsigned long vm_flags;
805-
struct mem_cgroup *memcg;
806-
};
801+
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
807802
/*
808-
* arg: page_referenced_arg will be passed
803+
* Check that @page is mapped at @address into @mm. In contrast to
804+
* page_check_address(), this function can handle transparent huge pages.
805+
*
806+
* On success returns true with pte mapped and locked. For PMD-mapped
807+
* transparent huge pages *@ptep is set to NULL.
809808
*/
810-
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
811-
unsigned long address, void *arg)
809+
bool page_check_address_transhuge(struct page *page, struct mm_struct *mm,
810+
unsigned long address, pmd_t **pmdp,
811+
pte_t **ptep, spinlock_t **ptlp)
812812
{
813-
struct mm_struct *mm = vma->vm_mm;
814-
spinlock_t *ptl;
815-
int referenced = 0;
816-
struct page_referenced_arg *pra = arg;
817813
pgd_t *pgd;
818814
pud_t *pud;
819815
pmd_t *pmd;
820816
pte_t *pte;
817+
spinlock_t *ptl;
821818

822819
if (unlikely(PageHuge(page))) {
823820
/* when pud is not present, pte will be NULL */
824821
pte = huge_pte_offset(mm, address);
825822
if (!pte)
826-
return SWAP_AGAIN;
823+
return false;
827824

828825
ptl = huge_pte_lockptr(page_hstate(page), mm, pte);
826+
pmd = NULL;
829827
goto check_pte;
830828
}
831829

832830
pgd = pgd_offset(mm, address);
833831
if (!pgd_present(*pgd))
834-
return SWAP_AGAIN;
832+
return false;
835833
pud = pud_offset(pgd, address);
836834
if (!pud_present(*pud))
837-
return SWAP_AGAIN;
835+
return false;
838836
pmd = pmd_offset(pud, address);
839837

840838
if (pmd_trans_huge(*pmd)) {
841-
int ret = SWAP_AGAIN;
842-
843839
ptl = pmd_lock(mm, pmd);
844840
if (!pmd_present(*pmd))
845841
goto unlock_pmd;
@@ -851,31 +847,23 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
851847
if (pmd_page(*pmd) != page)
852848
goto unlock_pmd;
853849

854-
if (vma->vm_flags & VM_LOCKED) {
855-
pra->vm_flags |= VM_LOCKED;
856-
ret = SWAP_FAIL; /* To break the loop */
857-
goto unlock_pmd;
858-
}
859-
860-
if (pmdp_clear_flush_young_notify(vma, address, pmd))
861-
referenced++;
862-
spin_unlock(ptl);
850+
pte = NULL;
863851
goto found;
864852
unlock_pmd:
865853
spin_unlock(ptl);
866-
return ret;
854+
return false;
867855
} else {
868856
pmd_t pmde = *pmd;
869857

870858
barrier();
871859
if (!pmd_present(pmde) || pmd_trans_huge(pmde))
872-
return SWAP_AGAIN;
860+
return false;
873861
}
874862
map_pte:
875863
pte = pte_offset_map(pmd, address);
876864
if (!pte_present(*pte)) {
877865
pte_unmap(pte);
878-
return SWAP_AGAIN;
866+
return false;
879867
}
880868

881869
ptl = pte_lockptr(mm, pmd);
@@ -884,35 +872,74 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
884872

885873
if (!pte_present(*pte)) {
886874
pte_unmap_unlock(pte, ptl);
887-
return SWAP_AGAIN;
875+
return false;
888876
}
889877

890878
/* THP can be referenced by any subpage */
891879
if (pte_pfn(*pte) - page_to_pfn(page) >= hpage_nr_pages(page)) {
892880
pte_unmap_unlock(pte, ptl);
893-
return SWAP_AGAIN;
881+
return false;
894882
}
883+
found:
884+
*ptep = pte;
885+
*pmdp = pmd;
886+
*ptlp = ptl;
887+
return true;
888+
}
889+
#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
890+
891+
struct page_referenced_arg {
892+
int mapcount;
893+
int referenced;
894+
unsigned long vm_flags;
895+
struct mem_cgroup *memcg;
896+
};
897+
/*
898+
* arg: page_referenced_arg will be passed
899+
*/
900+
static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
901+
unsigned long address, void *arg)
902+
{
903+
struct mm_struct *mm = vma->vm_mm;
904+
struct page_referenced_arg *pra = arg;
905+
pmd_t *pmd;
906+
pte_t *pte;
907+
spinlock_t *ptl;
908+
int referenced = 0;
909+
910+
if (!page_check_address_transhuge(page, mm, address, &pmd, &pte, &ptl))
911+
return SWAP_AGAIN;
895912

896913
if (vma->vm_flags & VM_LOCKED) {
897-
pte_unmap_unlock(pte, ptl);
914+
if (pte)
915+
pte_unmap(pte);
916+
spin_unlock(ptl);
898917
pra->vm_flags |= VM_LOCKED;
899918
return SWAP_FAIL; /* To break the loop */
900919
}
901920

902-
if (ptep_clear_flush_young_notify(vma, address, pte)) {
903-
/*
904-
* Don't treat a reference through a sequentially read
905-
* mapping as such. If the page has been used in
906-
* another mapping, we will catch it; if this other
907-
* mapping is already gone, the unmap path will have
908-
* set PG_referenced or activated the page.
909-
*/
910-
if (likely(!(vma->vm_flags & VM_SEQ_READ)))
921+
if (pte) {
922+
if (ptep_clear_flush_young_notify(vma, address, pte)) {
923+
/*
924+
* Don't treat a reference through a sequentially read
925+
* mapping as such. If the page has been used in
926+
* another mapping, we will catch it; if this other
927+
* mapping is already gone, the unmap path will have
928+
* set PG_referenced or activated the page.
929+
*/
930+
if (likely(!(vma->vm_flags & VM_SEQ_READ)))
931+
referenced++;
932+
}
933+
pte_unmap(pte);
934+
} else if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
935+
if (pmdp_clear_flush_young_notify(vma, address, pmd))
911936
referenced++;
937+
} else {
938+
/* unexpected pmd-mapped page? */
939+
WARN_ON_ONCE(1);
912940
}
913-
pte_unmap_unlock(pte, ptl);
941+
spin_unlock(ptl);
914942

915-
found:
916943
if (referenced)
917944
clear_page_idle(page);
918945
if (test_and_clear_page_young(page))

0 commit comments

Comments
 (0)