[PATCH v4] mm/debug-pagealloc: make debug-pagealloc boottime configurable

From: Joonsoo Kim
Date: Wed Nov 05 2014 - 02:05:14 EST


Now, we have prepared to avoid using debug-pagealloc in boottime. So,
make it disabled in default and introduce new kernel-parameter
to enable debug-pagealloc in boottime. This is a debugging feature, so
default disabled may be more useful.

Only non-intuitive part is change of guard page functions. Because
guard page is effective only if debug-pagealloc is enabled, turning on/off
according to debug-pagealloc is reasonable thing to do.

v2: makes debug-pagealloc boottime configurable for page poisoning, too
v4: makes debug-pagealloc disabled in default

Signed-off-by: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
---
Documentation/kernel-parameters.txt | 9 +++++++++
arch/powerpc/mm/hash_utils_64.c | 2 +-
arch/powerpc/mm/pgtable_32.c | 2 +-
arch/s390/mm/pageattr.c | 2 +-
arch/sparc/mm/init_64.c | 2 +-
arch/x86/mm/pageattr.c | 2 +-
include/linux/mm.h | 17 ++++++++++++++++-
mm/debug-pagealloc.c | 8 +++++++-
mm/page_alloc.c | 20 ++++++++++++++++++++
9 files changed, 57 insertions(+), 7 deletions(-)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 3c5a178..e35bfcc 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -829,6 +829,15 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
CONFIG_DEBUG_PAGEALLOC, hence this option will not help
tracking down these problems.

+ debug_pagealloc=
+ [KNL] When CONFIG_DEBUG_PAGEALLOC is set, this
+ parameter enables the feature at boot time. In
+ default, it is disabled. We can avoid allocating huge
+ chunk of memory for debug pagealloc if we don't enable
+ it at boot time and the system will work mostly same
+ with the kernel built without CONFIG_DEBUG_PAGEALLOC.
+ on: enable the feature
+
debugpat [X86] Enable PAT debugging

decnet.addr= [HW,NET]
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index d5339a3..57b9c23 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -1432,7 +1432,7 @@ static void kernel_unmap_linear_page(unsigned long vaddr, unsigned long lmi)
mmu_kernel_ssize, 0);
}

-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long flags, vaddr, lmi;
int i;
diff --git a/arch/powerpc/mm/pgtable_32.c b/arch/powerpc/mm/pgtable_32.c
index cf11342..b98aac6 100644
--- a/arch/powerpc/mm/pgtable_32.c
+++ b/arch/powerpc/mm/pgtable_32.c
@@ -430,7 +430,7 @@ static int change_page_attr(struct page *page, int numpages, pgprot_t prot)
}


-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
diff --git a/arch/s390/mm/pageattr.c b/arch/s390/mm/pageattr.c
index 3fef3b2..426c9d4 100644
--- a/arch/s390/mm/pageattr.c
+++ b/arch/s390/mm/pageattr.c
@@ -120,7 +120,7 @@ static void ipte_range(pte_t *pte, unsigned long address, int nr)
}
}

-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long address;
int nr, i, j;
diff --git a/arch/sparc/mm/init_64.c b/arch/sparc/mm/init_64.c
index 2d91c62..3ea267c 100644
--- a/arch/sparc/mm/init_64.c
+++ b/arch/sparc/mm/init_64.c
@@ -1621,7 +1621,7 @@ static void __init kernel_physical_mapping_init(void)
}

#ifdef CONFIG_DEBUG_PAGEALLOC
-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
unsigned long phys_start = page_to_pfn(page) << PAGE_SHIFT;
unsigned long phys_end = phys_start + (numpages * PAGE_SIZE);
diff --git a/arch/x86/mm/pageattr.c b/arch/x86/mm/pageattr.c
index 36de293..4d304e1 100644
--- a/arch/x86/mm/pageattr.c
+++ b/arch/x86/mm/pageattr.c
@@ -1801,7 +1801,7 @@ static int __set_pages_np(struct page *page, int numpages)
return __change_page_attr_set_clr(&cpa, 0);
}

-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (PageHighMem(page))
return;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 5a8d4d4..5dc11e7 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -2055,7 +2055,22 @@ static inline void vm_stat_account(struct mm_struct *mm,
#endif /* CONFIG_PROC_FS */

#ifdef CONFIG_DEBUG_PAGEALLOC
-extern void kernel_map_pages(struct page *page, int numpages, int enable);
+extern bool _debug_pagealloc_enabled;
+extern void __kernel_map_pages(struct page *page, int numpages, int enable);
+
+static inline bool debug_pagealloc_enabled(void)
+{
+ return _debug_pagealloc_enabled;
+}
+
+static inline void
+kernel_map_pages(struct page *page, int numpages, int enable)
+{
+ if (!debug_pagealloc_enabled())
+ return;
+
+ __kernel_map_pages(page, numpages, enable);
+}
#ifdef CONFIG_HIBERNATION
extern bool kernel_page_present(struct page *page);
#endif /* CONFIG_HIBERNATION */
diff --git a/mm/debug-pagealloc.c b/mm/debug-pagealloc.c
index 0072f2c..5bf5906 100644
--- a/mm/debug-pagealloc.c
+++ b/mm/debug-pagealloc.c
@@ -10,11 +10,17 @@ static bool page_poisoning_enabled __read_mostly;

static bool need_page_poisoning(void)
{
+ if (!debug_pagealloc_enabled())
+ return false;
+
return true;
}

static void init_page_poisoning(void)
{
+ if (!debug_pagealloc_enabled())
+ return;
+
page_poisoning_enabled = true;
}

@@ -119,7 +125,7 @@ static void unpoison_pages(struct page *page, int n)
unpoison_page(page + i);
}

-void kernel_map_pages(struct page *page, int numpages, int enable)
+void __kernel_map_pages(struct page *page, int numpages, int enable)
{
if (!page_poisoning_enabled)
return;
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 7534733..10310ad 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -426,15 +426,35 @@ static inline void prep_zero_page(struct page *page, unsigned int order,

#ifdef CONFIG_DEBUG_PAGEALLOC
unsigned int _debug_guardpage_minorder;
+bool _debug_pagealloc_enabled __read_mostly;
bool _debug_guardpage_enabled __read_mostly;

+static int __init early_debug_pagealloc(char *buf)
+{
+ if (!buf)
+ return -EINVAL;
+
+ if (strcmp(buf, "on") == 0)
+ _debug_pagealloc_enabled = true;
+
+ return 0;
+}
+early_param("debug_pagealloc", early_debug_pagealloc);
+
static bool need_debug_guardpage(void)
{
+ /* If we don't use debug_pagealloc, we don't need guard page */
+ if (!debug_pagealloc_enabled())
+ return false;
+
return true;
}

static void init_debug_guardpage(void)
{
+ if (!debug_pagealloc_enabled())
+ return;
+
_debug_guardpage_enabled = true;
}

--
1.7.9.5

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/