slab: remove /proc/slab_allocators
[linux-2.6-block.git] / mm / slab.c
index 284ab737faee01f1aa3d3178fa123ff08f8f27bf..f7117ad9b3a34ddf3ce6cc12689eef6ebd155763 100644 (file)
--- a/mm/slab.c
+++ b/mm/slab.c
@@ -362,29 +362,6 @@ static void **dbg_userword(struct kmem_cache *cachep, void *objp)
 
 #endif
 
-#ifdef CONFIG_DEBUG_SLAB_LEAK
-
-static inline bool is_store_user_clean(struct kmem_cache *cachep)
-{
-       return atomic_read(&cachep->store_user_clean) == 1;
-}
-
-static inline void set_store_user_clean(struct kmem_cache *cachep)
-{
-       atomic_set(&cachep->store_user_clean, 1);
-}
-
-static inline void set_store_user_dirty(struct kmem_cache *cachep)
-{
-       if (is_store_user_clean(cachep))
-               atomic_set(&cachep->store_user_clean, 0);
-}
-
-#else
-static inline void set_store_user_dirty(struct kmem_cache *cachep) {}
-
-#endif
-
 /*
  * Do not go above this order unless 0 objects fit into the slab or
  * overridden on the command line.
@@ -990,10 +967,8 @@ static void cpuup_canceled(long cpu)
 
                /* cpu is dead; no one can alloc from it. */
                nc = per_cpu_ptr(cachep->cpu_cache, cpu);
-               if (nc) {
-                       free_block(cachep, nc->entry, nc->avail, node, &list);
-                       nc->avail = 0;
-               }
+               free_block(cachep, nc->entry, nc->avail, node, &list);
+               nc->avail = 0;
 
                if (!cpumask_empty(mask)) {
                        spin_unlock_irq(&n->list_lock);
@@ -1674,8 +1649,8 @@ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list)
 {
        struct page *page, *n;
 
-       list_for_each_entry_safe(page, n, list, lru) {
-               list_del(&page->lru);
+       list_for_each_entry_safe(page, n, list, slab_list) {
+               list_del(&page->slab_list);
                slab_destroy(cachep, page);
        }
 }
@@ -2231,8 +2206,8 @@ static int drain_freelist(struct kmem_cache *cache,
                        goto out;
                }
 
-               page = list_entry(p, struct page, lru);
-               list_del(&page->lru);
+               page = list_entry(p, struct page, slab_list);
+               list_del(&page->slab_list);
                n->free_slabs--;
                n->total_slabs--;
                /*
@@ -2554,11 +2529,6 @@ static void *slab_get_obj(struct kmem_cache *cachep, struct page *page)
        objp = index_to_obj(cachep, page, get_free_obj(page, page->active));
        page->active++;
 
-#if DEBUG
-       if (cachep->flags & SLAB_STORE_USER)
-               set_store_user_dirty(cachep);
-#endif
-
        return objp;
 }
 
@@ -2691,13 +2661,13 @@ static void cache_grow_end(struct kmem_cache *cachep, struct page *page)
        if (!page)
                return;
 
-       INIT_LIST_HEAD(&page->lru);
+       INIT_LIST_HEAD(&page->slab_list);
        n = get_node(cachep, page_to_nid(page));
 
        spin_lock(&n->list_lock);
        n->total_slabs++;
        if (!page->active) {
-               list_add_tail(&page->lru, &(n->slabs_free));
+               list_add_tail(&page->slab_list, &n->slabs_free);
                n->free_slabs++;
        } else
                fixup_slab_list(cachep, n, page, &list);
@@ -2764,10 +2734,8 @@ static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
                *dbg_redzone1(cachep, objp) = RED_INACTIVE;
                *dbg_redzone2(cachep, objp) = RED_INACTIVE;
        }
-       if (cachep->flags & SLAB_STORE_USER) {
-               set_store_user_dirty(cachep);
+       if (cachep->flags & SLAB_STORE_USER)
                *dbg_userword(cachep, objp) = (void *)caller;
-       }
 
        objnr = obj_to_index(cachep, page, objp);
 
@@ -2806,9 +2774,9 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
                                void **list)
 {
        /* move slabp to correct slabp list: */
-       list_del(&page->lru);
+       list_del(&page->slab_list);
        if (page->active == cachep->num) {
-               list_add(&page->lru, &n->slabs_full);
+               list_add(&page->slab_list, &n->slabs_full);
                if (OBJFREELIST_SLAB(cachep)) {
 #if DEBUG
                        /* Poisoning will be done without holding the lock */
@@ -2822,7 +2790,7 @@ static inline void fixup_slab_list(struct kmem_cache *cachep,
                        page->freelist = NULL;
                }
        } else
-               list_add(&page->lru, &n->slabs_partial);
+               list_add(&page->slab_list, &n->slabs_partial);
 }
 
 /* Try to find non-pfmemalloc slab if needed */
@@ -2845,20 +2813,20 @@ static noinline struct page *get_valid_first_slab(struct kmem_cache_node *n,
        }
 
        /* Move pfmemalloc slab to the end of list to speed up next search */
-       list_del(&page->lru);
+       list_del(&page->slab_list);
        if (!page->active) {
-               list_add_tail(&page->lru, &n->slabs_free);
+               list_add_tail(&page->slab_list, &n->slabs_free);
                n->free_slabs++;
        } else
-               list_add_tail(&page->lru, &n->slabs_partial);
+               list_add_tail(&page->slab_list, &n->slabs_partial);
 
-       list_for_each_entry(page, &n->slabs_partial, lru) {
+       list_for_each_entry(page, &n->slabs_partial, slab_list) {
                if (!PageSlabPfmemalloc(page))
                        return page;
        }
 
        n->free_touched = 1;
-       list_for_each_entry(page, &n->slabs_free, lru) {
+       list_for_each_entry(page, &n->slabs_free, slab_list) {
                if (!PageSlabPfmemalloc(page)) {
                        n->free_slabs--;
                        return page;
@@ -2873,11 +2841,12 @@ static struct page *get_first_slab(struct kmem_cache_node *n, bool pfmemalloc)
        struct page *page;
 
        assert_spin_locked(&n->list_lock);
-       page = list_first_entry_or_null(&n->slabs_partial, struct page, lru);
+       page = list_first_entry_or_null(&n->slabs_partial, struct page,
+                                       slab_list);
        if (!page) {
                n->free_touched = 1;
                page = list_first_entry_or_null(&n->slabs_free, struct page,
-                                               lru);
+                                               slab_list);
                if (page)
                        n->free_slabs--;
        }
@@ -3378,29 +3347,29 @@ static void free_block(struct kmem_cache *cachep, void **objpp,
                objp = objpp[i];
 
                page = virt_to_head_page(objp);
-               list_del(&page->lru);
+               list_del(&page->slab_list);
                check_spinlock_acquired_node(cachep, node);
                slab_put_obj(cachep, page, objp);
                STATS_DEC_ACTIVE(cachep);
 
                /* fixup slab chains */
                if (page->active == 0) {
-                       list_add(&page->lru, &n->slabs_free);
+                       list_add(&page->slab_list, &n->slabs_free);
                        n->free_slabs++;
                } else {
                        /* Unconditionally move a slab to the end of the
                         * partial list on free - maximum time for the
                         * other objects to be freed, too.
                         */
-                       list_add_tail(&page->lru, &n->slabs_partial);
+                       list_add_tail(&page->slab_list, &n->slabs_partial);
                }
        }
 
        while (n->free_objects > n->free_limit && !list_empty(&n->slabs_free)) {
                n->free_objects -= cachep->num;
 
-               page = list_last_entry(&n->slabs_free, struct page, lru);
-               list_move(&page->lru, list);
+               page = list_last_entry(&n->slabs_free, struct page, slab_list);
+               list_move(&page->slab_list, list);
                n->free_slabs--;
                n->total_slabs--;
        }
@@ -3438,7 +3407,7 @@ free_done:
                int i = 0;
                struct page *page;
 
-               list_for_each_entry(page, &n->slabs_free, lru) {
+               list_for_each_entry(page, &n->slabs_free, slab_list) {
                        BUG_ON(page->active);
 
                        i++;
@@ -4185,196 +4154,6 @@ ssize_t slabinfo_write(struct file *file, const char __user *buffer,
        return res;
 }
 
-#ifdef CONFIG_DEBUG_SLAB_LEAK
-
-static inline int add_caller(unsigned long *n, unsigned long v)
-{
-       unsigned long *p;
-       int l;
-       if (!v)
-               return 1;
-       l = n[1];
-       p = n + 2;
-       while (l) {
-               int i = l/2;
-               unsigned long *q = p + 2 * i;
-               if (*q == v) {
-                       q[1]++;
-                       return 1;
-               }
-               if (*q > v) {
-                       l = i;
-               } else {
-                       p = q + 2;
-                       l -= i + 1;
-               }
-       }
-       if (++n[1] == n[0])
-               return 0;
-       memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
-       p[0] = v;
-       p[1] = 1;
-       return 1;
-}
-
-static void handle_slab(unsigned long *n, struct kmem_cache *c,
-                                               struct page *page)
-{
-       void *p;
-       int i, j;
-       unsigned long v;
-
-       if (n[0] == n[1])
-               return;
-       for (i = 0, p = page->s_mem; i < c->num; i++, p += c->size) {
-               bool active = true;
-
-               for (j = page->active; j < c->num; j++) {
-                       if (get_free_obj(page, j) == i) {
-                               active = false;
-                               break;
-                       }
-               }
-
-               if (!active)
-                       continue;
-
-               /*
-                * probe_kernel_read() is used for DEBUG_PAGEALLOC. page table
-                * mapping is established when actual object allocation and
-                * we could mistakenly access the unmapped object in the cpu
-                * cache.
-                */
-               if (probe_kernel_read(&v, dbg_userword(c, p), sizeof(v)))
-                       continue;
-
-               if (!add_caller(n, v))
-                       return;
-       }
-}
-
-static void show_symbol(struct seq_file *m, unsigned long address)
-{
-#ifdef CONFIG_KALLSYMS
-       unsigned long offset, size;
-       char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
-
-       if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
-               seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
-               if (modname[0])
-                       seq_printf(m, " [%s]", modname);
-               return;
-       }
-#endif
-       seq_printf(m, "%px", (void *)address);
-}
-
-static int leaks_show(struct seq_file *m, void *p)
-{
-       struct kmem_cache *cachep = list_entry(p, struct kmem_cache,
-                                              root_caches_node);
-       struct page *page;
-       struct kmem_cache_node *n;
-       const char *name;
-       unsigned long *x = m->private;
-       int node;
-       int i;
-
-       if (!(cachep->flags & SLAB_STORE_USER))
-               return 0;
-       if (!(cachep->flags & SLAB_RED_ZONE))
-               return 0;
-
-       /*
-        * Set store_user_clean and start to grab stored user information
-        * for all objects on this cache. If some alloc/free requests comes
-        * during the processing, information would be wrong so restart
-        * whole processing.
-        */
-       do {
-               set_store_user_clean(cachep);
-               drain_cpu_caches(cachep);
-
-               x[1] = 0;
-
-               for_each_kmem_cache_node(cachep, node, n) {
-
-                       check_irq_on();
-                       spin_lock_irq(&n->list_lock);
-
-                       list_for_each_entry(page, &n->slabs_full, lru)
-                               handle_slab(x, cachep, page);
-                       list_for_each_entry(page, &n->slabs_partial, lru)
-                               handle_slab(x, cachep, page);
-                       spin_unlock_irq(&n->list_lock);
-               }
-       } while (!is_store_user_clean(cachep));
-
-       name = cachep->name;
-       if (x[0] == x[1]) {
-               /* Increase the buffer size */
-               mutex_unlock(&slab_mutex);
-               m->private = kcalloc(x[0] * 4, sizeof(unsigned long),
-                                    GFP_KERNEL);
-               if (!m->private) {
-                       /* Too bad, we are really out */
-                       m->private = x;
-                       mutex_lock(&slab_mutex);
-                       return -ENOMEM;
-               }
-               *(unsigned long *)m->private = x[0] * 2;
-               kfree(x);
-               mutex_lock(&slab_mutex);
-               /* Now make sure this entry will be retried */
-               m->count = m->size;
-               return 0;
-       }
-       for (i = 0; i < x[1]; i++) {
-               seq_printf(m, "%s: %lu ", name, x[2*i+3]);
-               show_symbol(m, x[2*i+2]);
-               seq_putc(m, '\n');
-       }
-
-       return 0;
-}
-
-static const struct seq_operations slabstats_op = {
-       .start = slab_start,
-       .next = slab_next,
-       .stop = slab_stop,
-       .show = leaks_show,
-};
-
-static int slabstats_open(struct inode *inode, struct file *file)
-{
-       unsigned long *n;
-
-       n = __seq_open_private(file, &slabstats_op, PAGE_SIZE);
-       if (!n)
-               return -ENOMEM;
-
-       *n = PAGE_SIZE / (2 * sizeof(unsigned long));
-
-       return 0;
-}
-
-static const struct file_operations proc_slabstats_operations = {
-       .open           = slabstats_open,
-       .read           = seq_read,
-       .llseek         = seq_lseek,
-       .release        = seq_release_private,
-};
-#endif
-
-static int __init slab_proc_init(void)
-{
-#ifdef CONFIG_DEBUG_SLAB_LEAK
-       proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
-#endif
-       return 0;
-}
-module_init(slab_proc_init);
-
 #ifdef CONFIG_HARDENED_USERCOPY
 /*
  * Rejects incorrectly sized objects and objects that are to be copied