x86: query dynamic DEBUG_PAGEALLOC setting
authorChristian Borntraeger <borntraeger@de.ibm.com>
Tue, 15 Mar 2016 21:57:33 +0000 (14:57 -0700)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 15 Mar 2016 23:55:16 +0000 (16:55 -0700)
We can use debug_pagealloc_enabled() to check if we can map the identity
mapping with 2MB pages.  We can also add the state into the dump_stack
output.

The patch does not touch the code for the 1GB pages, which ignored
CONFIG_DEBUG_PAGEALLOC.  Do we need to fence this as well?

Signed-off-by: Christian Borntraeger <borntraeger@de.ibm.com>
Reviewed-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: David Rientjes <rientjes@google.com>
Cc: Laura Abbott <labbott@fedoraproject.org>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/x86/kernel/dumpstack.c
arch/x86/mm/init.c
arch/x86/mm/pageattr.c

index 9c30acfadae24757cca11f00513d089e488da78d..32e5699eadfee52043a3028e2040a00ea2cc3375 100644 (file)
@@ -265,9 +265,8 @@ int __die(const char *str, struct pt_regs *regs, long err)
 #ifdef CONFIG_SMP
        printk("SMP ");
 #endif
-#ifdef CONFIG_DEBUG_PAGEALLOC
-       printk("DEBUG_PAGEALLOC ");
-#endif
+       if (debug_pagealloc_enabled())
+               printk("DEBUG_PAGEALLOC ");
 #ifdef CONFIG_KASAN
        printk("KASAN");
 #endif
index 493f54172b4a5c90b1596708f1d5cb2a0f15156c..39823fd91396942f61de855598a7d5b8c77bedf8 100644 (file)
@@ -150,13 +150,14 @@ static int page_size_mask;
 
 static void __init probe_page_size_mask(void)
 {
-#if !defined(CONFIG_DEBUG_PAGEALLOC) && !defined(CONFIG_KMEMCHECK)
+#if !defined(CONFIG_KMEMCHECK)
        /*
-        * For CONFIG_DEBUG_PAGEALLOC, identity mapping will use small pages.
+        * For CONFIG_KMEMCHECK or pagealloc debugging, identity mapping will
+        * use small pages.
         * This will simplify cpa(), which otherwise needs to support splitting
         * large pages into small in interrupt context, etc.
         */
-       if (cpu_has_pse)
+       if (cpu_has_pse && !debug_pagealloc_enabled())
                page_size_mask |= 1 << PG_LEVEL_2M;
 #endif
 
index 1c37e650acacff422460b1d9b705a867e01a09a5..e64a4703f8b12ef3b602dc8131ac52d8c6c7f8e1 100644 (file)
@@ -106,12 +106,6 @@ static inline unsigned long highmap_end_pfn(void)
 
 #endif
 
-#ifdef CONFIG_DEBUG_PAGEALLOC
-# define debug_pagealloc 1
-#else
-# define debug_pagealloc 0
-#endif
-
 static inline int
 within(unsigned long addr, unsigned long start, unsigned long end)
 {
@@ -714,10 +708,10 @@ static int split_large_page(struct cpa_data *cpa, pte_t *kpte,
 {
        struct page *base;
 
-       if (!debug_pagealloc)
+       if (!debug_pagealloc_enabled())
                spin_unlock(&cpa_lock);
        base = alloc_pages(GFP_KERNEL | __GFP_NOTRACK, 0);
-       if (!debug_pagealloc)
+       if (!debug_pagealloc_enabled())
                spin_lock(&cpa_lock);
        if (!base)
                return -ENOMEM;
@@ -1337,10 +1331,10 @@ static int __change_page_attr_set_clr(struct cpa_data *cpa, int checkalias)
                if (cpa->flags & (CPA_ARRAY | CPA_PAGES_ARRAY))
                        cpa->numpages = 1;
 
-               if (!debug_pagealloc)
+               if (!debug_pagealloc_enabled())
                        spin_lock(&cpa_lock);
                ret = __change_page_attr(cpa, checkalias);
-               if (!debug_pagealloc)
+               if (!debug_pagealloc_enabled())
                        spin_unlock(&cpa_lock);
                if (ret)
                        return ret;