X-Git-Url: https://git.kernel.dk/?a=blobdiff_plain;f=mm%2Fslub.c;h=e3629cd7aff1640854ffa30b50ac012f8c78ff68;hb=b29940c1abd7a4c3abeb926df0a5ec84d6902d47;hp=8da34a8af53d58754ff4a394f26f6f252cbafbe0;hpb=cafa017dc5df8abc38308fedae3f1bd7ff67802d;p=linux-2.6-block.git diff --git a/mm/slub.c b/mm/slub.c index 8da34a8af53d..e3629cd7aff1 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1276,16 +1276,54 @@ out: __setup("slub_debug", setup_slub_debug); +/* + * kmem_cache_flags - apply debugging options to the cache + * @object_size: the size of an object without meta data + * @flags: flags to set + * @name: name of the cache + * @ctor: constructor function + * + * Debug option(s) are applied to @flags. In addition to the debug + * option(s), if a slab name (or multiple) is specified i.e. + * slub_debug=,, ... + * then only the select slabs will receive the debug option(s). + */ slab_flags_t kmem_cache_flags(unsigned int object_size, slab_flags_t flags, const char *name, void (*ctor)(void *)) { - /* - * Enable debugging if selected on the kernel commandline. - */ - if (slub_debug && (!slub_debug_slabs || (name && - !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs))))) - flags |= slub_debug; + char *iter; + size_t len; + + /* If slub_debug = 0, it folds into the if conditional. */ + if (!slub_debug_slabs) + return flags | slub_debug; + + len = strlen(name); + iter = slub_debug_slabs; + while (*iter) { + char *end, *glob; + size_t cmplen; + + end = strchr(iter, ','); + if (!end) + end = iter + strlen(iter); + + glob = strnchr(iter, end - iter, '*'); + if (glob) + cmplen = glob - iter; + else + cmplen = max_t(size_t, len, (end - iter)); + + if (!strncmp(name, iter, cmplen)) { + flags |= slub_debug; + break; + } + + if (!*end) + break; + iter = end + 1; + } return flags; } @@ -3621,9 +3659,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, #ifdef CONFIG_SLUB_DEBUG void *addr = page_address(page); void *p; - unsigned long *map = kcalloc(BITS_TO_LONGS(page->objects), - sizeof(long), - GFP_ATOMIC); + unsigned long *map = bitmap_zalloc(page->objects, GFP_ATOMIC); if (!map) return; slab_err(s, page, text, s->name); @@ -3638,7 +3674,7 @@ static void list_slab_objects(struct kmem_cache *s, struct page *page, } } slab_unlock(page); - kfree(map); + bitmap_free(map); #endif } @@ -4411,10 +4447,8 @@ static long validate_slab_cache(struct kmem_cache *s) { int node; unsigned long count = 0; - unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)), - sizeof(unsigned long), - GFP_KERNEL); struct kmem_cache_node *n; + unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); if (!map) return -ENOMEM; @@ -4422,7 +4456,7 @@ static long validate_slab_cache(struct kmem_cache *s) flush_all(s); for_each_kmem_cache_node(s, node, n) count += validate_slab_node(s, n, map); - kfree(map); + bitmap_free(map); return count; } /* @@ -4573,14 +4607,12 @@ static int list_locations(struct kmem_cache *s, char *buf, unsigned long i; struct loc_track t = { 0, 0, NULL }; int node; - unsigned long *map = kmalloc_array(BITS_TO_LONGS(oo_objects(s->max)), - sizeof(unsigned long), - GFP_KERNEL); struct kmem_cache_node *n; + unsigned long *map = bitmap_alloc(oo_objects(s->max), GFP_KERNEL); if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location), GFP_KERNEL)) { - kfree(map); + bitmap_free(map); return sprintf(buf, "Out of memory\n"); } /* Push back cpu slabs */ @@ -4646,7 +4678,7 @@ static int list_locations(struct kmem_cache *s, char *buf, } free_loc_track(&t); - kfree(map); + bitmap_free(map); if (!t.count) len += sprintf(buf, "No data\n"); return len; @@ -4657,6 +4689,7 @@ static int list_locations(struct kmem_cache *s, char *buf, static void __init resiliency_test(void) { u8 *p; + int type = KMALLOC_NORMAL; BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10); @@ -4669,7 +4702,7 @@ static void __init resiliency_test(void) pr_err("\n1. kmalloc-16: Clobber Redzone/next pointer 0x12->0x%p\n\n", p + 16); - validate_slab_cache(kmalloc_caches[4]); + validate_slab_cache(kmalloc_caches[type][4]); /* Hmmm... The next two are dangerous */ p = kzalloc(32, GFP_KERNEL); @@ -4678,33 +4711,33 @@ static void __init resiliency_test(void) p); pr_err("If allocated object is overwritten then not detectable\n\n"); - validate_slab_cache(kmalloc_caches[5]); + validate_slab_cache(kmalloc_caches[type][5]); p = kzalloc(64, GFP_KERNEL); p += 64 + (get_cycles() & 0xff) * sizeof(void *); *p = 0x56; pr_err("\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n", p); pr_err("If allocated object is overwritten then not detectable\n\n"); - validate_slab_cache(kmalloc_caches[6]); + validate_slab_cache(kmalloc_caches[type][6]); pr_err("\nB. Corruption after free\n"); p = kzalloc(128, GFP_KERNEL); kfree(p); *p = 0x78; pr_err("1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p); - validate_slab_cache(kmalloc_caches[7]); + validate_slab_cache(kmalloc_caches[type][7]); p = kzalloc(256, GFP_KERNEL); kfree(p); p[50] = 0x9a; pr_err("\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p); - validate_slab_cache(kmalloc_caches[8]); + validate_slab_cache(kmalloc_caches[type][8]); p = kzalloc(512, GFP_KERNEL); kfree(p); p[512] = 0xab; pr_err("\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p); - validate_slab_cache(kmalloc_caches[9]); + validate_slab_cache(kmalloc_caches[type][9]); } #else #ifdef CONFIG_SYSFS