@@ -798,48 +798,44 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma)
798
798
return 1 ;
799
799
}
800
800
801
- struct page_referenced_arg {
802
- int mapcount ;
803
- int referenced ;
804
- unsigned long vm_flags ;
805
- struct mem_cgroup * memcg ;
806
- };
801
+ #ifdef CONFIG_TRANSPARENT_HUGEPAGE
807
802
/*
808
- * arg: page_referenced_arg will be passed
803
+ * Check that @page is mapped at @address into @mm. In contrast to
804
+ * page_check_address(), this function can handle transparent huge pages.
805
+ *
806
+ * On success returns true with pte mapped and locked. For PMD-mapped
807
+ * transparent huge pages *@ptep is set to NULL.
809
808
*/
810
- static int page_referenced_one (struct page * page , struct vm_area_struct * vma ,
811
- unsigned long address , void * arg )
809
+ bool page_check_address_transhuge (struct page * page , struct mm_struct * mm ,
810
+ unsigned long address , pmd_t * * pmdp ,
811
+ pte_t * * ptep , spinlock_t * * ptlp )
812
812
{
813
- struct mm_struct * mm = vma -> vm_mm ;
814
- spinlock_t * ptl ;
815
- int referenced = 0 ;
816
- struct page_referenced_arg * pra = arg ;
817
813
pgd_t * pgd ;
818
814
pud_t * pud ;
819
815
pmd_t * pmd ;
820
816
pte_t * pte ;
817
+ spinlock_t * ptl ;
821
818
822
819
if (unlikely (PageHuge (page ))) {
823
820
/* when pud is not present, pte will be NULL */
824
821
pte = huge_pte_offset (mm , address );
825
822
if (!pte )
826
- return SWAP_AGAIN ;
823
+ return false ;
827
824
828
825
ptl = huge_pte_lockptr (page_hstate (page ), mm , pte );
826
+ pmd = NULL ;
829
827
goto check_pte ;
830
828
}
831
829
832
830
pgd = pgd_offset (mm , address );
833
831
if (!pgd_present (* pgd ))
834
- return SWAP_AGAIN ;
832
+ return false ;
835
833
pud = pud_offset (pgd , address );
836
834
if (!pud_present (* pud ))
837
- return SWAP_AGAIN ;
835
+ return false ;
838
836
pmd = pmd_offset (pud , address );
839
837
840
838
if (pmd_trans_huge (* pmd )) {
841
- int ret = SWAP_AGAIN ;
842
-
843
839
ptl = pmd_lock (mm , pmd );
844
840
if (!pmd_present (* pmd ))
845
841
goto unlock_pmd ;
@@ -851,31 +847,23 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
851
847
if (pmd_page (* pmd ) != page )
852
848
goto unlock_pmd ;
853
849
854
- if (vma -> vm_flags & VM_LOCKED ) {
855
- pra -> vm_flags |= VM_LOCKED ;
856
- ret = SWAP_FAIL ; /* To break the loop */
857
- goto unlock_pmd ;
858
- }
859
-
860
- if (pmdp_clear_flush_young_notify (vma , address , pmd ))
861
- referenced ++ ;
862
- spin_unlock (ptl );
850
+ pte = NULL ;
863
851
goto found ;
864
852
unlock_pmd :
865
853
spin_unlock (ptl );
866
- return ret ;
854
+ return false ;
867
855
} else {
868
856
pmd_t pmde = * pmd ;
869
857
870
858
barrier ();
871
859
if (!pmd_present (pmde ) || pmd_trans_huge (pmde ))
872
- return SWAP_AGAIN ;
860
+ return false ;
873
861
}
874
862
map_pte :
875
863
pte = pte_offset_map (pmd , address );
876
864
if (!pte_present (* pte )) {
877
865
pte_unmap (pte );
878
- return SWAP_AGAIN ;
866
+ return false ;
879
867
}
880
868
881
869
ptl = pte_lockptr (mm , pmd );
@@ -884,35 +872,74 @@ static int page_referenced_one(struct page *page, struct vm_area_struct *vma,
884
872
885
873
if (!pte_present (* pte )) {
886
874
pte_unmap_unlock (pte , ptl );
887
- return SWAP_AGAIN ;
875
+ return false ;
888
876
}
889
877
890
878
/* THP can be referenced by any subpage */
891
879
if (pte_pfn (* pte ) - page_to_pfn (page ) >= hpage_nr_pages (page )) {
892
880
pte_unmap_unlock (pte , ptl );
893
- return SWAP_AGAIN ;
881
+ return false ;
894
882
}
883
+ found :
884
+ * ptep = pte ;
885
+ * pmdp = pmd ;
886
+ * ptlp = ptl ;
887
+ return true;
888
+ }
889
+ #endif /* CONFIG_TRANSPARENT_HUGEPAGE */
890
+
891
+ struct page_referenced_arg {
892
+ int mapcount ;
893
+ int referenced ;
894
+ unsigned long vm_flags ;
895
+ struct mem_cgroup * memcg ;
896
+ };
897
+ /*
898
+ * arg: page_referenced_arg will be passed
899
+ */
900
+ static int page_referenced_one (struct page * page , struct vm_area_struct * vma ,
901
+ unsigned long address , void * arg )
902
+ {
903
+ struct mm_struct * mm = vma -> vm_mm ;
904
+ struct page_referenced_arg * pra = arg ;
905
+ pmd_t * pmd ;
906
+ pte_t * pte ;
907
+ spinlock_t * ptl ;
908
+ int referenced = 0 ;
909
+
910
+ if (!page_check_address_transhuge (page , mm , address , & pmd , & pte , & ptl ))
911
+ return SWAP_AGAIN ;
895
912
896
913
if (vma -> vm_flags & VM_LOCKED ) {
897
- pte_unmap_unlock (pte , ptl );
914
+ if (pte )
915
+ pte_unmap (pte );
916
+ spin_unlock (ptl );
898
917
pra -> vm_flags |= VM_LOCKED ;
899
918
return SWAP_FAIL ; /* To break the loop */
900
919
}
901
920
902
- if (ptep_clear_flush_young_notify (vma , address , pte )) {
903
- /*
904
- * Don't treat a reference through a sequentially read
905
- * mapping as such. If the page has been used in
906
- * another mapping, we will catch it; if this other
907
- * mapping is already gone, the unmap path will have
908
- * set PG_referenced or activated the page.
909
- */
910
- if (likely (!(vma -> vm_flags & VM_SEQ_READ )))
921
+ if (pte ) {
922
+ if (ptep_clear_flush_young_notify (vma , address , pte )) {
923
+ /*
924
+ * Don't treat a reference through a sequentially read
925
+ * mapping as such. If the page has been used in
926
+ * another mapping, we will catch it; if this other
927
+ * mapping is already gone, the unmap path will have
928
+ * set PG_referenced or activated the page.
929
+ */
930
+ if (likely (!(vma -> vm_flags & VM_SEQ_READ )))
931
+ referenced ++ ;
932
+ }
933
+ pte_unmap (pte );
934
+ } else if (IS_ENABLED (CONFIG_TRANSPARENT_HUGEPAGE )) {
935
+ if (pmdp_clear_flush_young_notify (vma , address , pmd ))
911
936
referenced ++ ;
937
+ } else {
938
+ /* unexpected pmd-mapped page? */
939
+ WARN_ON_ONCE (1 );
912
940
}
913
- pte_unmap_unlock ( pte , ptl );
941
+ spin_unlock ( ptl );
914
942
915
- found :
916
943
if (referenced )
917
944
clear_page_idle (page );
918
945
if (test_and_clear_page_young (page ))
0 commit comments