mm: defer kmemleak object creation of module_alloc()
authorKefeng Wang <wangkefeng.wang@huawei.com>
Fri, 14 Jan 2022 22:04:11 +0000 (14:04 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Sat, 15 Jan 2022 14:30:25 +0000 (16:30 +0200)
Yongqiang reports a kmemleak panic when module insmod/rmmod with KASAN
enabled(without KASAN_VMALLOC) on x86[1].

When the module area allocates memory, it's kmemleak_object is created
successfully, but the KASAN shadow memory of module allocation is not
ready, so when kmemleak scan the module's pointer, it will panic due to
no shadow memory with KASAN check.

  module_alloc
    __vmalloc_node_range
      kmemleak_vmalloc
kmemleak_scan
  update_checksum
    kasan_module_alloc
      kmemleak_ignore

Note, there is no problem if KASAN_VMALLOC enabled, the modules area
entire shadow memory is preallocated.  Thus, the bug only exits on ARCH
which supports dynamic allocation of module area per module load, for
now, only x86/arm64/s390 are involved.

Add a VM_DEFER_KMEMLEAK flags, defer vmalloc'ed object register of
kmemleak in module_alloc() to fix this issue.

[1] https://lore.kernel.org/all/6d41e2b9-4692-5ec4-b1cd-cbe29ae89739@huawei.com/

[wangkefeng.wang@huawei.com: fix build]
Link: https://lkml.kernel.org/r/20211125080307.27225-1-wangkefeng.wang@huawei.com
[akpm@linux-foundation.org: simplify ifdefs, per Andrey]
Link: https://lkml.kernel.org/r/CA+fCnZcnwJHUQq34VuRxpdoY6_XbJCDJ-jopksS5Eia4PijPzw@mail.gmail.com
Link: https://lkml.kernel.org/r/20211124142034.192078-1-wangkefeng.wang@huawei.com
Fixes: 793213a82de4 ("s390/kasan: dynamic shadow mem allocation for modules")
Fixes: 39d114ddc682 ("arm64: add KASAN support")
Fixes: bebf56a1b176 ("kasan: enable instrumentation of global variables")
Signed-off-by: Kefeng Wang <wangkefeng.wang@huawei.com>
Reported-by: Yongqiang Liu <liuyongqiang13@huawei.com>
Cc: Andrey Konovalov <andreyknvl@gmail.com>
Cc: Andrey Ryabinin <ryabinin.a.a@gmail.com>
Cc: Dmitry Vyukov <dvyukov@google.com>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Heiko Carstens <hca@linux.ibm.com>
Cc: Vasily Gorbik <gor@linux.ibm.com>
Cc: Christian Borntraeger <borntraeger@linux.ibm.com>
Cc: Alexander Gordeev <agordeev@linux.ibm.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Dave Hansen <dave.hansen@linux.intel.com>
Cc: Alexander Potapenko <glider@google.com>
Cc: Kefeng Wang <wangkefeng.wang@huawei.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
arch/arm64/kernel/module.c
arch/s390/kernel/module.c
arch/x86/kernel/module.c
include/linux/kasan.h
include/linux/vmalloc.h
mm/kasan/shadow.c
mm/vmalloc.c

index b5ec010c481f37c87c5d977a870a955ca86377af..309a27553c8759fc133cac344472eaf1e0dfbac2 100644 (file)
@@ -36,7 +36,7 @@ void *module_alloc(unsigned long size)
                module_alloc_end = MODULES_END;
 
        p = __vmalloc_node_range(size, MODULE_ALIGN, module_alloc_base,
-                               module_alloc_end, gfp_mask, PAGE_KERNEL, 0,
+                               module_alloc_end, gfp_mask, PAGE_KERNEL, VM_DEFER_KMEMLEAK,
                                NUMA_NO_NODE, __builtin_return_address(0));
 
        if (!p && IS_ENABLED(CONFIG_ARM64_MODULE_PLTS) &&
@@ -58,7 +58,7 @@ void *module_alloc(unsigned long size)
                                PAGE_KERNEL, 0, NUMA_NO_NODE,
                                __builtin_return_address(0));
 
-       if (p && (kasan_module_alloc(p, size) < 0)) {
+       if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
                vfree(p);
                return NULL;
        }
index b01ba460b7cad8e3e8164df891bb94ee241d72f0..d52d85367bf73013230e576f17253f8c96dad6ec 100644 (file)
 
 void *module_alloc(unsigned long size)
 {
+       gfp_t gfp_mask = GFP_KERNEL;
        void *p;
 
        if (PAGE_ALIGN(size) > MODULES_LEN)
                return NULL;
        p = __vmalloc_node_range(size, MODULE_ALIGN, MODULES_VADDR, MODULES_END,
-                                GFP_KERNEL, PAGE_KERNEL_EXEC, 0, NUMA_NO_NODE,
+                                gfp_mask, PAGE_KERNEL_EXEC, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
                                 __builtin_return_address(0));
-       if (p && (kasan_module_alloc(p, size) < 0)) {
+       if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
                vfree(p);
                return NULL;
        }
index 169fb6f4cd2eeef3f097a57be11a97995cd0acb5..95fa745e310a5fdc97d8fdf2b803d2067c99472e 100644 (file)
@@ -67,6 +67,7 @@ static unsigned long int get_module_load_offset(void)
 
 void *module_alloc(unsigned long size)
 {
+       gfp_t gfp_mask = GFP_KERNEL;
        void *p;
 
        if (PAGE_ALIGN(size) > MODULES_LEN)
@@ -74,10 +75,10 @@ void *module_alloc(unsigned long size)
 
        p = __vmalloc_node_range(size, MODULE_ALIGN,
                                    MODULES_VADDR + get_module_load_offset(),
-                                   MODULES_END, GFP_KERNEL,
-                                   PAGE_KERNEL, 0, NUMA_NO_NODE,
+                                   MODULES_END, gfp_mask,
+                                   PAGE_KERNEL, VM_DEFER_KMEMLEAK, NUMA_NO_NODE,
                                    __builtin_return_address(0));
-       if (p && (kasan_module_alloc(p, size) < 0)) {
+       if (p && (kasan_module_alloc(p, size, gfp_mask) < 0)) {
                vfree(p);
                return NULL;
        }
index d8783b68266957a5ddde5f0d2fb28bbcec355a2e..89c99e5e67de57235b7aebd54a4ab8824d4b9ec7 100644 (file)
@@ -474,12 +474,12 @@ static inline void kasan_populate_early_vm_area_shadow(void *start,
  * allocations with real shadow memory. With KASAN vmalloc, the special
  * case is unnecessary, as the work is handled in the generic case.
  */
-int kasan_module_alloc(void *addr, size_t size);
+int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
 void kasan_free_shadow(const struct vm_struct *vm);
 
 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
 
-static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
+static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
 static inline void kasan_free_shadow(const struct vm_struct *vm) {}
 
 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
index 6e022cc712e611635cd12720eb49ac90cf7f5d00..880227b9f04405c8e8427fdbace2f0efb94704f8 100644 (file)
@@ -28,6 +28,13 @@ struct notifier_block;               /* in notifier.h */
 #define VM_MAP_PUT_PAGES       0x00000200      /* put pages and free array in vfree */
 #define VM_NO_HUGE_VMAP                0x00000400      /* force PAGE_SIZE pte mapping */
 
+#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
+       !defined(CONFIG_KASAN_VMALLOC)
+#define VM_DEFER_KMEMLEAK      0x00000800      /* defer kmemleak object creation */
+#else
+#define VM_DEFER_KMEMLEAK      0
+#endif
+
 /*
  * VM_KASAN is used slightly differently depending on CONFIG_KASAN_VMALLOC.
  *
index 4a4929b29a237f7fd3617817491d173ed7d205b0..94136f84b449772222be4cda45d9b44aeae6880d 100644 (file)
@@ -498,7 +498,7 @@ void kasan_release_vmalloc(unsigned long start, unsigned long end,
 
 #else /* CONFIG_KASAN_VMALLOC */
 
-int kasan_module_alloc(void *addr, size_t size)
+int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask)
 {
        void *ret;
        size_t scaled_size;
@@ -520,9 +520,14 @@ int kasan_module_alloc(void *addr, size_t size)
                        __builtin_return_address(0));
 
        if (ret) {
+               struct vm_struct *vm = find_vm_area(addr);
                __memset(ret, KASAN_SHADOW_INIT, shadow_size);
-               find_vm_area(addr)->flags |= VM_KASAN;
+               vm->flags |= VM_KASAN;
                kmemleak_ignore(ret);
+
+               if (vm->flags & VM_DEFER_KMEMLEAK)
+                       kmemleak_vmalloc(vm, size, gfp_mask);
+
                return 0;
        }
 
index d2a00ad4e1dd155eb474c797e060f6b85f3d37d4..bf3c2fe8f5285a5aae3cca14bb0d8a6c813d2885 100644 (file)
@@ -3074,7 +3074,8 @@ again:
        clear_vm_uninitialized_flag(area);
 
        size = PAGE_ALIGN(size);
-       kmemleak_vmalloc(area, size, gfp_mask);
+       if (!(vm_flags & VM_DEFER_KMEMLEAK))
+               kmemleak_vmalloc(area, size, gfp_mask);
 
        return addr;