Skip to content

Commit aff6224

Browse files
Rik van Rieltorvalds
Rik van Riel
authored andcommitted
vmscan: only defer compaction for failed order and higher
Currently a failed order-9 (transparent hugepage) compaction can lead to memory compaction being temporarily disabled for a memory zone. Even if we only need compaction for an order 2 allocation, eg. for jumbo frames networking. The fix is relatively straightforward: keep track of the highest order at which compaction is succeeding, and only defer compaction for orders at which compaction is failing. Signed-off-by: Rik van Riel <[email protected]> Cc: Andrea Arcangeli <[email protected]> Acked-by: Mel Gorman <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Minchan Kim <[email protected]> Cc: KOSAKI Motohiro <[email protected]> Cc: Hillf Danton <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent 7be62de commit aff6224

File tree

5 files changed

+27
-8
lines changed

5 files changed

+27
-8
lines changed

include/linux/compaction.h

Lines changed: 10 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -34,20 +34,26 @@ extern unsigned long compaction_suitable(struct zone *zone, int order);
3434
* allocation success. 1 << compact_defer_limit compactions are skipped up
3535
* to a limit of 1 << COMPACT_MAX_DEFER_SHIFT
3636
*/
37-
static inline void defer_compaction(struct zone *zone)
37+
static inline void defer_compaction(struct zone *zone, int order)
3838
{
3939
zone->compact_considered = 0;
4040
zone->compact_defer_shift++;
4141

42+
if (order < zone->compact_order_failed)
43+
zone->compact_order_failed = order;
44+
4245
if (zone->compact_defer_shift > COMPACT_MAX_DEFER_SHIFT)
4346
zone->compact_defer_shift = COMPACT_MAX_DEFER_SHIFT;
4447
}
4548

4649
/* Returns true if compaction should be skipped this time */
47-
static inline bool compaction_deferred(struct zone *zone)
50+
static inline bool compaction_deferred(struct zone *zone, int order)
4851
{
4952
unsigned long defer_limit = 1UL << zone->compact_defer_shift;
5053

54+
if (order < zone->compact_order_failed)
55+
return false;
56+
5157
/* Avoid possible overflow */
5258
if (++zone->compact_considered > defer_limit)
5359
zone->compact_considered = defer_limit;
@@ -73,11 +79,11 @@ static inline unsigned long compaction_suitable(struct zone *zone, int order)
7379
return COMPACT_SKIPPED;
7480
}
7581

76-
static inline void defer_compaction(struct zone *zone)
82+
static inline void defer_compaction(struct zone *zone, int order)
7783
{
7884
}
7985

80-
static inline bool compaction_deferred(struct zone *zone)
86+
static inline bool compaction_deferred(struct zone *zone, int order)
8187
{
8288
return 1;
8389
}

include/linux/mmzone.h

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -365,6 +365,7 @@ struct zone {
365365
*/
366366
unsigned int compact_considered;
367367
unsigned int compact_defer_shift;
368+
int compact_order_failed;
368369
#endif
369370

370371
ZONE_PADDING(_pad1_)

mm/compaction.c

Lines changed: 11 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -695,9 +695,19 @@ static int __compact_pgdat(pg_data_t *pgdat, struct compact_control *cc)
695695
INIT_LIST_HEAD(&cc->freepages);
696696
INIT_LIST_HEAD(&cc->migratepages);
697697

698-
if (cc->order < 0 || !compaction_deferred(zone))
698+
if (cc->order < 0 || !compaction_deferred(zone, cc->order))
699699
compact_zone(zone, cc);
700700

701+
if (cc->order > 0) {
702+
int ok = zone_watermark_ok(zone, cc->order,
703+
low_wmark_pages(zone), 0, 0);
704+
if (ok && cc->order > zone->compact_order_failed)
705+
zone->compact_order_failed = cc->order + 1;
706+
/* Currently async compaction is never deferred. */
707+
else if (!ok && cc->sync)
708+
defer_compaction(zone, cc->order);
709+
}
710+
701711
VM_BUG_ON(!list_empty(&cc->freepages));
702712
VM_BUG_ON(!list_empty(&cc->migratepages));
703713
}

mm/page_alloc.c

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -1990,7 +1990,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
19901990
if (!order)
19911991
return NULL;
19921992

1993-
if (compaction_deferred(preferred_zone)) {
1993+
if (compaction_deferred(preferred_zone, order)) {
19941994
*deferred_compaction = true;
19951995
return NULL;
19961996
}
@@ -2012,6 +2012,8 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
20122012
if (page) {
20132013
preferred_zone->compact_considered = 0;
20142014
preferred_zone->compact_defer_shift = 0;
2015+
if (order >= preferred_zone->compact_order_failed)
2016+
preferred_zone->compact_order_failed = order + 1;
20152017
count_vm_event(COMPACTSUCCESS);
20162018
return page;
20172019
}
@@ -2028,7 +2030,7 @@ __alloc_pages_direct_compact(gfp_t gfp_mask, unsigned int order,
20282030
* defer if the failure was a sync compaction failure.
20292031
*/
20302032
if (sync_migration)
2031-
defer_compaction(preferred_zone);
2033+
defer_compaction(preferred_zone, order);
20322034

20332035
cond_resched();
20342036
}

mm/vmscan.c

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -2198,7 +2198,7 @@ static inline bool compaction_ready(struct zone *zone, struct scan_control *sc)
21982198
* If compaction is deferred, reclaim up to a point where
21992199
* compaction will have a chance of success when re-enabled
22002200
*/
2201-
if (compaction_deferred(zone))
2201+
if (compaction_deferred(zone, sc->order))
22022202
return watermark_ok;
22032203

22042204
/* If compaction is not ready to start, keep reclaiming */

0 commit comments

Comments
 (0)