Skip to content

Commit 802a3a9

Browse files
shligittorvalds
authored andcommitted
mm: reclaim MADV_FREE pages
When memory pressure is high, we free MADV_FREE pages. If the pages are not dirty in pte, the pages could be freed immediately. Otherwise we can't reclaim them. We put the pages back to anonumous LRU list (by setting SwapBacked flag) and the pages will be reclaimed in normal swapout way. We use normal page reclaim policy. Since MADV_FREE pages are put into inactive file list, such pages and inactive file pages are reclaimed according to their age. This is expected, because we don't want to reclaim too many MADV_FREE pages before used once pages. Based on Minchan's original patch [[email protected]: clean up lazyfree page handling] Link: http://lkml.kernel.org/r/20170303025237.GB3503@bbox Link: http://lkml.kernel.org/r/14b8eb1d3f6bf6cc492833f183ac8c304e560484.1487965799.git.shli@fb.com Signed-off-by: Shaohua Li <[email protected]> Signed-off-by: Minchan Kim <[email protected]> Acked-by: Minchan Kim <[email protected]> Acked-by: Michal Hocko <[email protected]> Acked-by: Johannes Weiner <[email protected]> Acked-by: Hillf Danton <[email protected]> Cc: Hugh Dickins <[email protected]> Cc: Rik van Riel <[email protected]> Cc: Mel Gorman <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent f7ad2a6 commit 802a3a9

File tree

5 files changed

+46
-39
lines changed

5 files changed

+46
-39
lines changed

include/linux/rmap.h

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -298,6 +298,6 @@ static inline int page_mkclean(struct page *page)
298298
#define SWAP_AGAIN 1
299299
#define SWAP_FAIL 2
300300
#define SWAP_MLOCK 3
301-
#define SWAP_LZFREE 4
301+
#define SWAP_DIRTY 4
302302

303303
#endif /* _LINUX_RMAP_H */

mm/huge_memory.c

Lines changed: 2 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1572,6 +1572,8 @@ bool madvise_free_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
15721572
set_pmd_at(mm, addr, pmd, orig_pmd);
15731573
tlb_remove_pmd_tlb_entry(tlb, pmd, addr);
15741574
}
1575+
1576+
mark_page_lazyfree(page);
15751577
ret = true;
15761578
out:
15771579
spin_unlock(ptl);

mm/madvise.c

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -413,6 +413,7 @@ static int madvise_free_pte_range(pmd_t *pmd, unsigned long addr,
413413
set_pte_at(mm, addr, pte, ptent);
414414
tlb_remove_tlb_entry(tlb, pte, addr);
415415
}
416+
mark_page_lazyfree(page);
416417
}
417418
out:
418419
if (nr_swap) {

mm/rmap.c

Lines changed: 20 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -1288,11 +1288,6 @@ void page_remove_rmap(struct page *page, bool compound)
12881288
*/
12891289
}
12901290

1291-
struct rmap_private {
1292-
enum ttu_flags flags;
1293-
int lazyfreed;
1294-
};
1295-
12961291
/*
12971292
* @arg: enum ttu_flags will be passed to this argument
12981293
*/
@@ -1308,8 +1303,7 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
13081303
pte_t pteval;
13091304
struct page *subpage;
13101305
int ret = SWAP_AGAIN;
1311-
struct rmap_private *rp = arg;
1312-
enum ttu_flags flags = rp->flags;
1306+
enum ttu_flags flags = (enum ttu_flags)arg;
13131307

13141308
/* munlock has nothing to gain from examining un-locked vmas */
13151309
if ((flags & TTU_MUNLOCK) && !(vma->vm_flags & VM_LOCKED))
@@ -1427,11 +1421,21 @@ static int try_to_unmap_one(struct page *page, struct vm_area_struct *vma,
14271421
VM_BUG_ON_PAGE(!PageSwapCache(page) && PageSwapBacked(page),
14281422
page);
14291423

1430-
if (!PageDirty(page)) {
1431-
/* It's a freeable page by MADV_FREE */
1432-
dec_mm_counter(mm, MM_ANONPAGES);
1433-
rp->lazyfreed++;
1434-
goto discard;
1424+
/* MADV_FREE page check */
1425+
if (!PageSwapBacked(page)) {
1426+
if (!PageDirty(page)) {
1427+
dec_mm_counter(mm, MM_ANONPAGES);
1428+
goto discard;
1429+
}
1430+
1431+
/*
1432+
* If the page was redirtied, it cannot be
1433+
* discarded. Remap the page to page table.
1434+
*/
1435+
set_pte_at(mm, address, pvmw.pte, pteval);
1436+
ret = SWAP_DIRTY;
1437+
page_vma_mapped_walk_done(&pvmw);
1438+
break;
14351439
}
14361440

14371441
if (swap_duplicate(entry) < 0) {
@@ -1499,18 +1503,15 @@ static int page_mapcount_is_zero(struct page *page)
14991503
* SWAP_AGAIN - we missed a mapping, try again later
15001504
* SWAP_FAIL - the page is unswappable
15011505
* SWAP_MLOCK - page is mlocked.
1506+
* SWAP_DIRTY - page is dirty MADV_FREE page
15021507
*/
15031508
int try_to_unmap(struct page *page, enum ttu_flags flags)
15041509
{
15051510
int ret;
1506-
struct rmap_private rp = {
1507-
.flags = flags,
1508-
.lazyfreed = 0,
1509-
};
15101511

15111512
struct rmap_walk_control rwc = {
15121513
.rmap_one = try_to_unmap_one,
1513-
.arg = &rp,
1514+
.arg = (void *)flags,
15141515
.done = page_mapcount_is_zero,
15151516
.anon_lock = page_lock_anon_vma_read,
15161517
};
@@ -1531,11 +1532,8 @@ int try_to_unmap(struct page *page, enum ttu_flags flags)
15311532
else
15321533
ret = rmap_walk(page, &rwc);
15331534

1534-
if (ret != SWAP_MLOCK && !page_mapcount(page)) {
1535+
if (ret != SWAP_MLOCK && !page_mapcount(page))
15351536
ret = SWAP_SUCCESS;
1536-
if (rp.lazyfreed && !PageDirty(page))
1537-
ret = SWAP_LZFREE;
1538-
}
15391537
return ret;
15401538
}
15411539

@@ -1562,14 +1560,10 @@ static int page_not_mapped(struct page *page)
15621560
int try_to_munlock(struct page *page)
15631561
{
15641562
int ret;
1565-
struct rmap_private rp = {
1566-
.flags = TTU_MUNLOCK,
1567-
.lazyfreed = 0,
1568-
};
15691563

15701564
struct rmap_walk_control rwc = {
15711565
.rmap_one = try_to_unmap_one,
1572-
.arg = &rp,
1566+
.arg = (void *)TTU_MUNLOCK,
15731567
.done = page_not_mapped,
15741568
.anon_lock = page_lock_anon_vma_read,
15751569

mm/vmscan.c

Lines changed: 22 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -906,7 +906,8 @@ static void page_check_dirty_writeback(struct page *page,
906906
* Anonymous pages are not handled by flushers and must be written
907907
* from reclaim context. Do not stall reclaim based on them
908908
*/
909-
if (!page_is_file_cache(page)) {
909+
if (!page_is_file_cache(page) ||
910+
(PageAnon(page) && !PageSwapBacked(page))) {
910911
*dirty = false;
911912
*writeback = false;
912913
return;
@@ -987,7 +988,8 @@ static unsigned long shrink_page_list(struct list_head *page_list,
987988
goto keep_locked;
988989

989990
/* Double the slab pressure for mapped and swapcache pages */
990-
if (page_mapped(page) || PageSwapCache(page))
991+
if ((page_mapped(page) || PageSwapCache(page)) &&
992+
!(PageAnon(page) && !PageSwapBacked(page)))
991993
sc->nr_scanned++;
992994

993995
may_enter_fs = (sc->gfp_mask & __GFP_FS) ||
@@ -1113,8 +1115,10 @@ static unsigned long shrink_page_list(struct list_head *page_list,
11131115
/*
11141116
* Anonymous process memory has backing store?
11151117
* Try to allocate it some swap space here.
1118+
* Lazyfree page could be freed directly
11161119
*/
1117-
if (PageAnon(page) && !PageSwapCache(page)) {
1120+
if (PageAnon(page) && PageSwapBacked(page) &&
1121+
!PageSwapCache(page)) {
11181122
if (!(sc->gfp_mask & __GFP_IO))
11191123
goto keep_locked;
11201124
if (!add_to_swap(page, page_list))
@@ -1135,18 +1139,19 @@ static unsigned long shrink_page_list(struct list_head *page_list,
11351139
* The page is mapped into the page tables of one or more
11361140
* processes. Try to unmap it here.
11371141
*/
1138-
if (page_mapped(page) && mapping) {
1142+
if (page_mapped(page)) {
11391143
switch (ret = try_to_unmap(page,
11401144
ttu_flags | TTU_BATCH_FLUSH)) {
1145+
case SWAP_DIRTY:
1146+
SetPageSwapBacked(page);
1147+
/* fall through */
11411148
case SWAP_FAIL:
11421149
nr_unmap_fail++;
11431150
goto activate_locked;
11441151
case SWAP_AGAIN:
11451152
goto keep_locked;
11461153
case SWAP_MLOCK:
11471154
goto cull_mlocked;
1148-
case SWAP_LZFREE:
1149-
goto lazyfree;
11501155
case SWAP_SUCCESS:
11511156
; /* try to free the page below */
11521157
}
@@ -1258,10 +1263,18 @@ static unsigned long shrink_page_list(struct list_head *page_list,
12581263
}
12591264
}
12601265

1261-
lazyfree:
1262-
if (!mapping || !__remove_mapping(mapping, page, true))
1263-
goto keep_locked;
1266+
if (PageAnon(page) && !PageSwapBacked(page)) {
1267+
/* follow __remove_mapping for reference */
1268+
if (!page_ref_freeze(page, 1))
1269+
goto keep_locked;
1270+
if (PageDirty(page)) {
1271+
page_ref_unfreeze(page, 1);
1272+
goto keep_locked;
1273+
}
12641274

1275+
count_vm_event(PGLAZYFREED);
1276+
} else if (!mapping || !__remove_mapping(mapping, page, true))
1277+
goto keep_locked;
12651278
/*
12661279
* At this point, we have no other references and there is
12671280
* no way to pick any more up (removed from LRU, removed
@@ -1271,9 +1284,6 @@ static unsigned long shrink_page_list(struct list_head *page_list,
12711284
*/
12721285
__ClearPageLocked(page);
12731286
free_it:
1274-
if (ret == SWAP_LZFREE)
1275-
count_vm_event(PGLAZYFREED);
1276-
12771287
nr_reclaimed++;
12781288

12791289
/*

0 commit comments

Comments
 (0)