mm/slab: do not change cache size if debug pagealloc isn't possible
[linux-2.6-block.git] / mm / slab.c
index 9b56685fb79b3b3f0446af6dab72734817d39ece..21aad9d518a76337eb890e1cccd50dd2d9dd9adc 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -2206,10 +2206,17 @@ __kmem_cache_create (struct kmem_cache *cachep, unsigned long flags)
         */
        if (debug_pagealloc_enabled() && (flags & SLAB_POISON) &&
                !slab_early_init && size >= kmalloc_size(INDEX_NODE) &&
-               size >= 256 && cachep->object_size > cache_line_size() &&
-               size < PAGE_SIZE) {
-               cachep->obj_offset += PAGE_SIZE - size;
-               size = PAGE_SIZE;
+               size >= 256 && cachep->object_size > cache_line_size()) {
+               if (size < PAGE_SIZE || size % PAGE_SIZE == 0) {
+                       size_t tmp_size = ALIGN(size, PAGE_SIZE);
+
+                       if (set_off_slab_cache(cachep, tmp_size, flags)) {
+                               flags |= CFLGS_OFF_SLAB;
+                               cachep->obj_offset += tmp_size - size;
+                               size = tmp_size;
+                               goto done;
+                       }
+               }
        }
 #endif