mm/slub: avoid recursive loop with kmemleak
authorKees Cook <keescook@chromium.org>
Thu, 25 Apr 2024 20:55:23 +0000 (13:55 -0700)
committerAndrew Morton <akpm@linux-foundation.org>
Fri, 26 Apr 2024 03:55:59 +0000 (20:55 -0700)
The system will immediate fill up stack and crash when both
CONFIG_DEBUG_KMEMLEAK and CONFIG_MEM_ALLOC_PROFILING are enabled.  Avoid
allocation tagging of kmemleak caches, otherwise recursive allocation
tracking occurs.

Link: https://lkml.kernel.org/r/20240425205516.work.220-kees@kernel.org
Fixes: 279bb991b4d9 ("mm/slab: add allocation accounting into slab allocation and free paths")
Signed-off-by: Kees Cook <keescook@chromium.org>
Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Christoph Lameter <cl@linux.com>
Cc: David Rientjes <rientjes@google.com>
Cc: Hyeonggon Yoo <42.hyeyoo@gmail.com>
Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com>
Cc: Kent Overstreet <kent.overstreet@linux.dev>
Cc: Pekka Enberg <penberg@kernel.org>
Cc: Roman Gushchin <roman.gushchin@linux.dev>
Cc: Suren Baghdasaryan <surenb@google.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
mm/kmemleak.c
mm/slub.c

index 6a540c2b27c52457856cbb862e522cef4e857eab..d2037073ed9197df09a17b05f7dbc8eefe0f103f 100644 (file)
@@ -463,7 +463,7 @@ static struct kmemleak_object *mem_pool_alloc(gfp_t gfp)
 
        /* try the slab allocator first */
        if (object_cache) {
-               object = kmem_cache_alloc(object_cache, gfp_kmemleak_mask(gfp));
+               object = kmem_cache_alloc_noprof(object_cache, gfp_kmemleak_mask(gfp));
                if (object)
                        return object;
        }
@@ -947,7 +947,7 @@ static void add_scan_area(unsigned long ptr, size_t size, gfp_t gfp)
        untagged_objp = (unsigned long)kasan_reset_tag((void *)object->pointer);
 
        if (scan_area_cache)
-               area = kmem_cache_alloc(scan_area_cache, gfp_kmemleak_mask(gfp));
+               area = kmem_cache_alloc_noprof(scan_area_cache, gfp_kmemleak_mask(gfp));
 
        raw_spin_lock_irqsave(&object->lock, flags);
        if (!area) {
index be047279c9e9740ea0551c4b3befb59ec8921ed4..6437746be03d8e76bae9898752d8fa29fba552a6 100644 (file)
--- a/mm/slub.c
+++ b/mm/slub.c
@@ -2018,7 +2018,7 @@ prepare_slab_obj_exts_hook(struct kmem_cache *s, gfp_t flags, void *p)
        if (!p)
                return NULL;
 
-       if (s->flags & SLAB_NO_OBJ_EXT)
+       if (s->flags & (SLAB_NO_OBJ_EXT | SLAB_NOLEAKTRACE))
                return NULL;
 
        if (flags & __GFP_NO_OBJ_EXT)