Skip to content

Commit e30825f

Browse files
JoonsooKimtorvalds
authored andcommitted
mm/debug-pagealloc: prepare boottime configurable on/off
Until now, debug-pagealloc needs extra flags in struct page, so we need to recompile whole source code when we decide to use it. This is really painful, because it takes some time to recompile and sometimes rebuild is not possible due to third party module depending on struct page. So, we can't use this good feature in many cases. Now, we have the page extension feature that allows us to insert extra flags to outside of struct page. This gets rid of third party module issue mentioned above. And, this allows us to determine if we need extra memory for this page extension in boottime. With these property, we can avoid using debug-pagealloc in boottime with low computational overhead in the kernel built with CONFIG_DEBUG_PAGEALLOC. This will help our development process greatly. This patch is the preparation step to achive above goal. debug-pagealloc originally uses extra field of struct page, but, after this patch, it will use field of struct page_ext. Because memory for page_ext is allocated later than initialization of page allocator in CONFIG_SPARSEMEM, we should disable debug-pagealloc feature temporarily until initialization of page_ext. This patch implements this. Signed-off-by: Joonsoo Kim <[email protected]> Cc: Mel Gorman <[email protected]> Cc: Johannes Weiner <[email protected]> Cc: Minchan Kim <[email protected]> Cc: Dave Hansen <[email protected]> Cc: Michal Nazarewicz <[email protected]> Cc: Jungsoo Son <[email protected]> Cc: Ingo Molnar <[email protected]> Cc: Joonsoo Kim <[email protected]> Signed-off-by: Andrew Morton <[email protected]> Signed-off-by: Linus Torvalds <[email protected]>
1 parent eefa864 commit e30825f

File tree

8 files changed

+106
-44
lines changed

8 files changed

+106
-44
lines changed

include/linux/mm.h

Lines changed: 18 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -19,6 +19,7 @@
1919
#include <linux/bit_spinlock.h>
2020
#include <linux/shrinker.h>
2121
#include <linux/resource.h>
22+
#include <linux/page_ext.h>
2223

2324
struct mempolicy;
2425
struct anon_vma;
@@ -2155,20 +2156,36 @@ extern void copy_user_huge_page(struct page *dst, struct page *src,
21552156
unsigned int pages_per_huge_page);
21562157
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
21572158

2159+
extern struct page_ext_operations debug_guardpage_ops;
2160+
extern struct page_ext_operations page_poisoning_ops;
2161+
21582162
#ifdef CONFIG_DEBUG_PAGEALLOC
21592163
extern unsigned int _debug_guardpage_minorder;
2164+
extern bool _debug_guardpage_enabled;
21602165

21612166
static inline unsigned int debug_guardpage_minorder(void)
21622167
{
21632168
return _debug_guardpage_minorder;
21642169
}
21652170

2171+
static inline bool debug_guardpage_enabled(void)
2172+
{
2173+
return _debug_guardpage_enabled;
2174+
}
2175+
21662176
static inline bool page_is_guard(struct page *page)
21672177
{
2168-
return test_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
2178+
struct page_ext *page_ext;
2179+
2180+
if (!debug_guardpage_enabled())
2181+
return false;
2182+
2183+
page_ext = lookup_page_ext(page);
2184+
return test_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
21692185
}
21702186
#else
21712187
static inline unsigned int debug_guardpage_minorder(void) { return 0; }
2188+
static inline bool debug_guardpage_enabled(void) { return false; }
21722189
static inline bool page_is_guard(struct page *page) { return false; }
21732190
#endif /* CONFIG_DEBUG_PAGEALLOC */
21742191

include/linux/mm_types.h

Lines changed: 0 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -10,7 +10,6 @@
1010
#include <linux/rwsem.h>
1111
#include <linux/completion.h>
1212
#include <linux/cpumask.h>
13-
#include <linux/page-debug-flags.h>
1413
#include <linux/uprobes.h>
1514
#include <linux/page-flags-layout.h>
1615
#include <asm/page.h>
@@ -186,9 +185,6 @@ struct page {
186185
void *virtual; /* Kernel virtual address (NULL if
187186
not kmapped, ie. highmem) */
188187
#endif /* WANT_PAGE_VIRTUAL */
189-
#ifdef CONFIG_WANT_PAGE_DEBUG_FLAGS
190-
unsigned long debug_flags; /* Use atomic bitops on this */
191-
#endif
192188

193189
#ifdef CONFIG_KMEMCHECK
194190
/*

include/linux/page-debug-flags.h

Lines changed: 0 additions & 32 deletions
This file was deleted.

include/linux/page_ext.h

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -9,6 +9,21 @@ struct page_ext_operations {
99

1010
#ifdef CONFIG_PAGE_EXTENSION
1111

12+
/*
13+
* page_ext->flags bits:
14+
*
15+
* PAGE_EXT_DEBUG_POISON is set for poisoned pages. This is used to
16+
* implement generic debug pagealloc feature. The pages are filled with
17+
* poison patterns and set this flag after free_pages(). The poisoned
18+
* pages are verified whether the patterns are not corrupted and clear
19+
* the flag before alloc_pages().
20+
*/
21+
22+
enum page_ext_flags {
23+
PAGE_EXT_DEBUG_POISON, /* Page is poisoned */
24+
PAGE_EXT_DEBUG_GUARD,
25+
};
26+
1227
/*
1328
* Page Extension can be considered as an extended mem_map.
1429
* A page_ext page is associated with every page descriptor. The

mm/Kconfig.debug

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -12,6 +12,7 @@ config DEBUG_PAGEALLOC
1212
depends on DEBUG_KERNEL
1313
depends on !HIBERNATION || ARCH_SUPPORTS_DEBUG_PAGEALLOC && !PPC && !SPARC
1414
depends on !KMEMCHECK
15+
select PAGE_EXTENSION
1516
select PAGE_POISONING if !ARCH_SUPPORTS_DEBUG_PAGEALLOC
1617
select PAGE_GUARD if ARCH_SUPPORTS_DEBUG_PAGEALLOC
1718
---help---

mm/debug-pagealloc.c

Lines changed: 33 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -2,23 +2,49 @@
22
#include <linux/string.h>
33
#include <linux/mm.h>
44
#include <linux/highmem.h>
5-
#include <linux/page-debug-flags.h>
5+
#include <linux/page_ext.h>
66
#include <linux/poison.h>
77
#include <linux/ratelimit.h>
88

9+
static bool page_poisoning_enabled __read_mostly;
10+
11+
static bool need_page_poisoning(void)
12+
{
13+
return true;
14+
}
15+
16+
static void init_page_poisoning(void)
17+
{
18+
page_poisoning_enabled = true;
19+
}
20+
21+
struct page_ext_operations page_poisoning_ops = {
22+
.need = need_page_poisoning,
23+
.init = init_page_poisoning,
24+
};
25+
926
static inline void set_page_poison(struct page *page)
1027
{
11-
__set_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
28+
struct page_ext *page_ext;
29+
30+
page_ext = lookup_page_ext(page);
31+
__set_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
1232
}
1333

1434
static inline void clear_page_poison(struct page *page)
1535
{
16-
__clear_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
36+
struct page_ext *page_ext;
37+
38+
page_ext = lookup_page_ext(page);
39+
__clear_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
1740
}
1841

1942
static inline bool page_poison(struct page *page)
2043
{
21-
return test_bit(PAGE_DEBUG_FLAG_POISON, &page->debug_flags);
44+
struct page_ext *page_ext;
45+
46+
page_ext = lookup_page_ext(page);
47+
return test_bit(PAGE_EXT_DEBUG_POISON, &page_ext->flags);
2248
}
2349

2450
static void poison_page(struct page *page)
@@ -95,6 +121,9 @@ static void unpoison_pages(struct page *page, int n)
95121

96122
void kernel_map_pages(struct page *page, int numpages, int enable)
97123
{
124+
if (!page_poisoning_enabled)
125+
return;
126+
98127
if (enable)
99128
unpoison_pages(page, numpages);
100129
else

mm/page_alloc.c

Lines changed: 35 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,7 +56,7 @@
5656
#include <linux/prefetch.h>
5757
#include <linux/mm_inline.h>
5858
#include <linux/migrate.h>
59-
#include <linux/page-debug-flags.h>
59+
#include <linux/page_ext.h>
6060
#include <linux/hugetlb.h>
6161
#include <linux/sched/rt.h>
6262

@@ -425,6 +425,22 @@ static inline void prep_zero_page(struct page *page, unsigned int order,
425425

426426
#ifdef CONFIG_DEBUG_PAGEALLOC
427427
unsigned int _debug_guardpage_minorder;
428+
bool _debug_guardpage_enabled __read_mostly;
429+
430+
static bool need_debug_guardpage(void)
431+
{
432+
return true;
433+
}
434+
435+
static void init_debug_guardpage(void)
436+
{
437+
_debug_guardpage_enabled = true;
438+
}
439+
440+
struct page_ext_operations debug_guardpage_ops = {
441+
.need = need_debug_guardpage,
442+
.init = init_debug_guardpage,
443+
};
428444

429445
static int __init debug_guardpage_minorder_setup(char *buf)
430446
{
@@ -443,7 +459,14 @@ __setup("debug_guardpage_minorder=", debug_guardpage_minorder_setup);
443459
static inline void set_page_guard(struct zone *zone, struct page *page,
444460
unsigned int order, int migratetype)
445461
{
446-
__set_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
462+
struct page_ext *page_ext;
463+
464+
if (!debug_guardpage_enabled())
465+
return;
466+
467+
page_ext = lookup_page_ext(page);
468+
__set_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
469+
447470
INIT_LIST_HEAD(&page->lru);
448471
set_page_private(page, order);
449472
/* Guard pages are not available for any usage */
@@ -453,12 +476,20 @@ static inline void set_page_guard(struct zone *zone, struct page *page,
453476
static inline void clear_page_guard(struct zone *zone, struct page *page,
454477
unsigned int order, int migratetype)
455478
{
456-
__clear_bit(PAGE_DEBUG_FLAG_GUARD, &page->debug_flags);
479+
struct page_ext *page_ext;
480+
481+
if (!debug_guardpage_enabled())
482+
return;
483+
484+
page_ext = lookup_page_ext(page);
485+
__clear_bit(PAGE_EXT_DEBUG_GUARD, &page_ext->flags);
486+
457487
set_page_private(page, 0);
458488
if (!is_migrate_isolate(migratetype))
459489
__mod_zone_freepage_state(zone, (1 << order), migratetype);
460490
}
461491
#else
492+
struct page_ext_operations debug_guardpage_ops = { NULL, };
462493
static inline void set_page_guard(struct zone *zone, struct page *page,
463494
unsigned int order, int migratetype) {}
464495
static inline void clear_page_guard(struct zone *zone, struct page *page,
@@ -869,6 +900,7 @@ static inline void expand(struct zone *zone, struct page *page,
869900
VM_BUG_ON_PAGE(bad_range(zone, &page[size]), &page[size]);
870901

871902
if (IS_ENABLED(CONFIG_DEBUG_PAGEALLOC) &&
903+
debug_guardpage_enabled() &&
872904
high < debug_guardpage_minorder()) {
873905
/*
874906
* Mark as guard pages (or page), that will allow to

mm/page_ext.c

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -51,6 +51,10 @@
5151
*/
5252

5353
static struct page_ext_operations *page_ext_ops[] = {
54+
&debug_guardpage_ops,
55+
#ifdef CONFIG_PAGE_POISONING
56+
&page_poisoning_ops,
57+
#endif
5458
};
5559

5660
static unsigned long total_usage;

0 commit comments

Comments
 (0)