Merge master.kernel.org:/home/rmk/linux-2.6-arm
[linux-2.6-block.git] / mm / slub.c
CommitLineData
81819f0f
CL
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
81819f0f
CL
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
7b3c3a50 17#include <linux/proc_fs.h>
81819f0f
CL
18#include <linux/seq_file.h>
19#include <linux/cpu.h>
20#include <linux/cpuset.h>
21#include <linux/mempolicy.h>
22#include <linux/ctype.h>
3ac7fe5a 23#include <linux/debugobjects.h>
81819f0f 24#include <linux/kallsyms.h>
b9049e23 25#include <linux/memory.h>
f8bd2258 26#include <linux/math64.h>
81819f0f
CL
27
28/*
29 * Lock order:
30 * 1. slab_lock(page)
31 * 2. slab->list_lock
32 *
33 * The slab_lock protects operations on the object of a particular
34 * slab and its metadata in the page struct. If the slab lock
35 * has been taken then no allocations nor frees can be performed
36 * on the objects in the slab nor can the slab be added or removed
37 * from the partial or full lists since this would mean modifying
38 * the page_struct of the slab.
39 *
40 * The list_lock protects the partial and full list on each node and
41 * the partial slab counter. If taken then no new slabs may be added or
42 * removed from the lists nor make the number of partial slabs be modified.
43 * (Note that the total number of slabs is an atomic value that may be
44 * modified without taking the list lock).
45 *
46 * The list_lock is a centralized lock and thus we avoid taking it as
47 * much as possible. As long as SLUB does not have to handle partial
48 * slabs, operations can continue without any centralized lock. F.e.
49 * allocating a long series of objects that fill up slabs does not require
50 * the list lock.
51 *
52 * The lock order is sometimes inverted when we are trying to get a slab
53 * off a list. We take the list_lock and then look for a page on the list
54 * to use. While we do that objects in the slabs may be freed. We can
55 * only operate on the slab if we have also taken the slab_lock. So we use
56 * a slab_trylock() on the slab. If trylock was successful then no frees
57 * can occur anymore and we can use the slab for allocations etc. If the
58 * slab_trylock() does not succeed then frees are in progress in the slab and
59 * we must stay away from it for a while since we may cause a bouncing
60 * cacheline if we try to acquire the lock. So go onto the next slab.
61 * If all pages are busy then we may allocate a new slab instead of reusing
62 * a partial slab. A new slab has noone operating on it and thus there is
63 * no danger of cacheline contention.
64 *
65 * Interrupts are disabled during allocation and deallocation in order to
66 * make the slab allocator safe to use in the context of an irq. In addition
67 * interrupts are disabled to ensure that the processor does not change
68 * while handling per_cpu slabs, due to kernel preemption.
69 *
70 * SLUB assigns one slab for allocation to each processor.
71 * Allocations only occur from these slabs called cpu slabs.
72 *
672bba3a
CL
73 * Slabs with free elements are kept on a partial list and during regular
74 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 75 * freed then the slab will show up again on the partial lists.
672bba3a
CL
76 * We track full slabs for debugging purposes though because otherwise we
77 * cannot scan all objects.
81819f0f
CL
78 *
79 * Slabs are freed when they become empty. Teardown and setup is
80 * minimal so we rely on the page allocators per cpu caches for
81 * fast frees and allocs.
82 *
83 * Overloading of page flags that are otherwise used for LRU management.
84 *
4b6f0750
CL
85 * PageActive The slab is frozen and exempt from list processing.
86 * This means that the slab is dedicated to a purpose
87 * such as satisfying allocations for a specific
88 * processor. Objects may be freed in the slab while
89 * it is frozen but slab_free will then skip the usual
90 * list operations. It is up to the processor holding
91 * the slab to integrate the slab into the slab lists
92 * when the slab is no longer needed.
93 *
94 * One use of this flag is to mark slabs that are
95 * used for allocations. Then such a slab becomes a cpu
96 * slab. The cpu slab may be equipped with an additional
dfb4f096 97 * freelist that allows lockless access to
894b8788
CL
98 * free objects in addition to the regular freelist
99 * that requires the slab lock.
81819f0f
CL
100 *
101 * PageError Slab requires special handling due to debug
102 * options set. This moves slab handling out of
894b8788 103 * the fast path and disables lockless freelists.
81819f0f
CL
104 */
105
5577bd8a 106#ifdef CONFIG_SLUB_DEBUG
8a38082d 107#define SLABDEBUG 1
5577bd8a
CL
108#else
109#define SLABDEBUG 0
110#endif
111
81819f0f
CL
112/*
113 * Issues still to be resolved:
114 *
81819f0f
CL
115 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
116 *
81819f0f
CL
117 * - Variable sizing of the per node arrays
118 */
119
120/* Enable to test recovery from slab corruption on boot */
121#undef SLUB_RESILIENCY_TEST
122
2086d26a
CL
123/*
124 * Mininum number of partial slabs. These will be left on the partial
125 * lists even if they are empty. kmem_cache_shrink may reclaim them.
126 */
76be8950 127#define MIN_PARTIAL 5
e95eed57 128
2086d26a
CL
129/*
130 * Maximum number of desirable partial slabs.
131 * The existence of more partial slabs makes kmem_cache_shrink
132 * sort the partial list by the number of objects in the.
133 */
134#define MAX_PARTIAL 10
135
81819f0f
CL
136#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
137 SLAB_POISON | SLAB_STORE_USER)
672bba3a 138
81819f0f
CL
139/*
140 * Set of flags that will prevent slab merging
141 */
142#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
143 SLAB_TRACE | SLAB_DESTROY_BY_RCU)
144
145#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
146 SLAB_CACHE_DMA)
147
148#ifndef ARCH_KMALLOC_MINALIGN
47bfdc0d 149#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
150#endif
151
152#ifndef ARCH_SLAB_MINALIGN
47bfdc0d 153#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
154#endif
155
156/* Internal SLUB flags */
1ceef402
CL
157#define __OBJECT_POISON 0x80000000 /* Poison object */
158#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
81819f0f
CL
159
160static int kmem_size = sizeof(struct kmem_cache);
161
162#ifdef CONFIG_SMP
163static struct notifier_block slab_notifier;
164#endif
165
166static enum {
167 DOWN, /* No slab functionality available */
168 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
672bba3a 169 UP, /* Everything works but does not show up in sysfs */
81819f0f
CL
170 SYSFS /* Sysfs up */
171} slab_state = DOWN;
172
173/* A list of all slab caches on the system */
174static DECLARE_RWSEM(slub_lock);
5af328a5 175static LIST_HEAD(slab_caches);
81819f0f 176
02cbc874
CL
177/*
178 * Tracking user of a slab.
179 */
180struct track {
181 void *addr; /* Called from address */
182 int cpu; /* Was running on cpu */
183 int pid; /* Pid context */
184 unsigned long when; /* When did the operation occur */
185};
186
187enum track_item { TRACK_ALLOC, TRACK_FREE };
188
f6acb635 189#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
190static int sysfs_slab_add(struct kmem_cache *);
191static int sysfs_slab_alias(struct kmem_cache *, const char *);
192static void sysfs_slab_remove(struct kmem_cache *);
8ff12cfc 193
81819f0f 194#else
0c710013
CL
195static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
196static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
197 { return 0; }
151c602f
CL
198static inline void sysfs_slab_remove(struct kmem_cache *s)
199{
200 kfree(s);
201}
8ff12cfc 202
81819f0f
CL
203#endif
204
8ff12cfc
CL
205static inline void stat(struct kmem_cache_cpu *c, enum stat_item si)
206{
207#ifdef CONFIG_SLUB_STATS
208 c->stat[si]++;
209#endif
210}
211
81819f0f
CL
212/********************************************************************
213 * Core slab cache functions
214 *******************************************************************/
215
216int slab_is_available(void)
217{
218 return slab_state >= UP;
219}
220
221static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
222{
223#ifdef CONFIG_NUMA
224 return s->node[node];
225#else
226 return &s->local_node;
227#endif
228}
229
dfb4f096
CL
230static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
231{
4c93c355
CL
232#ifdef CONFIG_SMP
233 return s->cpu_slab[cpu];
234#else
235 return &s->cpu_slab;
236#endif
dfb4f096
CL
237}
238
6446faa2 239/* Verify that a pointer has an address that is valid within a slab page */
02cbc874
CL
240static inline int check_valid_pointer(struct kmem_cache *s,
241 struct page *page, const void *object)
242{
243 void *base;
244
a973e9dd 245 if (!object)
02cbc874
CL
246 return 1;
247
a973e9dd 248 base = page_address(page);
39b26464 249 if (object < base || object >= base + page->objects * s->size ||
02cbc874
CL
250 (object - base) % s->size) {
251 return 0;
252 }
253
254 return 1;
255}
256
7656c72b
CL
257/*
258 * Slow version of get and set free pointer.
259 *
260 * This version requires touching the cache lines of kmem_cache which
261 * we avoid to do in the fast alloc free paths. There we obtain the offset
262 * from the page struct.
263 */
264static inline void *get_freepointer(struct kmem_cache *s, void *object)
265{
266 return *(void **)(object + s->offset);
267}
268
269static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
270{
271 *(void **)(object + s->offset) = fp;
272}
273
274/* Loop over all objects in a slab */
224a88be
CL
275#define for_each_object(__p, __s, __addr, __objects) \
276 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b
CL
277 __p += (__s)->size)
278
279/* Scan freelist */
280#define for_each_free_object(__p, __s, __free) \
a973e9dd 281 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
7656c72b
CL
282
283/* Determine object index from a given position */
284static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
285{
286 return (p - addr) / s->size;
287}
288
834f3d11
CL
289static inline struct kmem_cache_order_objects oo_make(int order,
290 unsigned long size)
291{
292 struct kmem_cache_order_objects x = {
293 (order << 16) + (PAGE_SIZE << order) / size
294 };
295
296 return x;
297}
298
299static inline int oo_order(struct kmem_cache_order_objects x)
300{
301 return x.x >> 16;
302}
303
304static inline int oo_objects(struct kmem_cache_order_objects x)
305{
306 return x.x & ((1 << 16) - 1);
307}
308
41ecc55b
CL
309#ifdef CONFIG_SLUB_DEBUG
310/*
311 * Debug settings:
312 */
f0630fff
CL
313#ifdef CONFIG_SLUB_DEBUG_ON
314static int slub_debug = DEBUG_DEFAULT_FLAGS;
315#else
41ecc55b 316static int slub_debug;
f0630fff 317#endif
41ecc55b
CL
318
319static char *slub_debug_slabs;
320
81819f0f
CL
321/*
322 * Object debugging
323 */
324static void print_section(char *text, u8 *addr, unsigned int length)
325{
326 int i, offset;
327 int newline = 1;
328 char ascii[17];
329
330 ascii[16] = 0;
331
332 for (i = 0; i < length; i++) {
333 if (newline) {
24922684 334 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
81819f0f
CL
335 newline = 0;
336 }
06428780 337 printk(KERN_CONT " %02x", addr[i]);
81819f0f
CL
338 offset = i % 16;
339 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
340 if (offset == 15) {
06428780 341 printk(KERN_CONT " %s\n", ascii);
81819f0f
CL
342 newline = 1;
343 }
344 }
345 if (!newline) {
346 i %= 16;
347 while (i < 16) {
06428780 348 printk(KERN_CONT " ");
81819f0f
CL
349 ascii[i] = ' ';
350 i++;
351 }
06428780 352 printk(KERN_CONT " %s\n", ascii);
81819f0f
CL
353 }
354}
355
81819f0f
CL
356static struct track *get_track(struct kmem_cache *s, void *object,
357 enum track_item alloc)
358{
359 struct track *p;
360
361 if (s->offset)
362 p = object + s->offset + sizeof(void *);
363 else
364 p = object + s->inuse;
365
366 return p + alloc;
367}
368
369static void set_track(struct kmem_cache *s, void *object,
370 enum track_item alloc, void *addr)
371{
372 struct track *p;
373
374 if (s->offset)
375 p = object + s->offset + sizeof(void *);
376 else
377 p = object + s->inuse;
378
379 p += alloc;
380 if (addr) {
381 p->addr = addr;
382 p->cpu = smp_processor_id();
88e4ccf2 383 p->pid = current->pid;
81819f0f
CL
384 p->when = jiffies;
385 } else
386 memset(p, 0, sizeof(struct track));
387}
388
81819f0f
CL
389static void init_tracking(struct kmem_cache *s, void *object)
390{
24922684
CL
391 if (!(s->flags & SLAB_STORE_USER))
392 return;
393
394 set_track(s, object, TRACK_FREE, NULL);
395 set_track(s, object, TRACK_ALLOC, NULL);
81819f0f
CL
396}
397
398static void print_track(const char *s, struct track *t)
399{
400 if (!t->addr)
401 return;
402
7daf705f
LT
403 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
404 s, t->addr, jiffies - t->when, t->cpu, t->pid);
24922684
CL
405}
406
407static void print_tracking(struct kmem_cache *s, void *object)
408{
409 if (!(s->flags & SLAB_STORE_USER))
410 return;
411
412 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
413 print_track("Freed", get_track(s, object, TRACK_FREE));
414}
415
416static void print_page_info(struct page *page)
417{
39b26464
CL
418 printk(KERN_ERR "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
419 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
420
421}
422
423static void slab_bug(struct kmem_cache *s, char *fmt, ...)
424{
425 va_list args;
426 char buf[100];
427
428 va_start(args, fmt);
429 vsnprintf(buf, sizeof(buf), fmt, args);
430 va_end(args);
431 printk(KERN_ERR "========================================"
432 "=====================================\n");
433 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
434 printk(KERN_ERR "----------------------------------------"
435 "-------------------------------------\n\n");
81819f0f
CL
436}
437
24922684
CL
438static void slab_fix(struct kmem_cache *s, char *fmt, ...)
439{
440 va_list args;
441 char buf[100];
442
443 va_start(args, fmt);
444 vsnprintf(buf, sizeof(buf), fmt, args);
445 va_end(args);
446 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
447}
448
449static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
450{
451 unsigned int off; /* Offset of last byte */
a973e9dd 452 u8 *addr = page_address(page);
24922684
CL
453
454 print_tracking(s, p);
455
456 print_page_info(page);
457
458 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
459 p, p - addr, get_freepointer(s, p));
460
461 if (p > addr + 16)
462 print_section("Bytes b4", p - 16, 16);
463
0ebd652b 464 print_section("Object", p, min_t(unsigned long, s->objsize, PAGE_SIZE));
81819f0f
CL
465
466 if (s->flags & SLAB_RED_ZONE)
467 print_section("Redzone", p + s->objsize,
468 s->inuse - s->objsize);
469
81819f0f
CL
470 if (s->offset)
471 off = s->offset + sizeof(void *);
472 else
473 off = s->inuse;
474
24922684 475 if (s->flags & SLAB_STORE_USER)
81819f0f 476 off += 2 * sizeof(struct track);
81819f0f
CL
477
478 if (off != s->size)
479 /* Beginning of the filler is the free pointer */
24922684
CL
480 print_section("Padding", p + off, s->size - off);
481
482 dump_stack();
81819f0f
CL
483}
484
485static void object_err(struct kmem_cache *s, struct page *page,
486 u8 *object, char *reason)
487{
3dc50637 488 slab_bug(s, "%s", reason);
24922684 489 print_trailer(s, page, object);
81819f0f
CL
490}
491
24922684 492static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0f
CL
493{
494 va_list args;
495 char buf[100];
496
24922684
CL
497 va_start(args, fmt);
498 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 499 va_end(args);
3dc50637 500 slab_bug(s, "%s", buf);
24922684 501 print_page_info(page);
81819f0f
CL
502 dump_stack();
503}
504
505static void init_object(struct kmem_cache *s, void *object, int active)
506{
507 u8 *p = object;
508
509 if (s->flags & __OBJECT_POISON) {
510 memset(p, POISON_FREE, s->objsize - 1);
06428780 511 p[s->objsize - 1] = POISON_END;
81819f0f
CL
512 }
513
514 if (s->flags & SLAB_RED_ZONE)
515 memset(p + s->objsize,
516 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
517 s->inuse - s->objsize);
518}
519
24922684 520static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
81819f0f
CL
521{
522 while (bytes) {
523 if (*start != (u8)value)
24922684 524 return start;
81819f0f
CL
525 start++;
526 bytes--;
527 }
24922684
CL
528 return NULL;
529}
530
531static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
532 void *from, void *to)
533{
534 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
535 memset(from, data, to - from);
536}
537
538static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
539 u8 *object, char *what,
06428780 540 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
541{
542 u8 *fault;
543 u8 *end;
544
545 fault = check_bytes(start, value, bytes);
546 if (!fault)
547 return 1;
548
549 end = start + bytes;
550 while (end > fault && end[-1] == value)
551 end--;
552
553 slab_bug(s, "%s overwritten", what);
554 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
555 fault, end - 1, fault[0], value);
556 print_trailer(s, page, object);
557
558 restore_bytes(s, what, value, fault, end);
559 return 0;
81819f0f
CL
560}
561
81819f0f
CL
562/*
563 * Object layout:
564 *
565 * object address
566 * Bytes of the object to be managed.
567 * If the freepointer may overlay the object then the free
568 * pointer is the first word of the object.
672bba3a 569 *
81819f0f
CL
570 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
571 * 0xa5 (POISON_END)
572 *
573 * object + s->objsize
574 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a
CL
575 * Padding is extended by another word if Redzoning is enabled and
576 * objsize == inuse.
577 *
81819f0f
CL
578 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
579 * 0xcc (RED_ACTIVE) for objects in use.
580 *
581 * object + s->inuse
672bba3a
CL
582 * Meta data starts here.
583 *
81819f0f
CL
584 * A. Free pointer (if we cannot overwrite object on free)
585 * B. Tracking data for SLAB_STORE_USER
672bba3a 586 * C. Padding to reach required alignment boundary or at mininum
6446faa2 587 * one word if debugging is on to be able to detect writes
672bba3a
CL
588 * before the word boundary.
589 *
590 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
591 *
592 * object + s->size
672bba3a 593 * Nothing is used beyond s->size.
81819f0f 594 *
672bba3a
CL
595 * If slabcaches are merged then the objsize and inuse boundaries are mostly
596 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
597 * may be used with merged slabcaches.
598 */
599
81819f0f
CL
600static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
601{
602 unsigned long off = s->inuse; /* The end of info */
603
604 if (s->offset)
605 /* Freepointer is placed after the object. */
606 off += sizeof(void *);
607
608 if (s->flags & SLAB_STORE_USER)
609 /* We also have user information there */
610 off += 2 * sizeof(struct track);
611
612 if (s->size == off)
613 return 1;
614
24922684
CL
615 return check_bytes_and_report(s, page, p, "Object padding",
616 p + off, POISON_INUSE, s->size - off);
81819f0f
CL
617}
618
39b26464 619/* Check the pad bytes at the end of a slab page */
81819f0f
CL
620static int slab_pad_check(struct kmem_cache *s, struct page *page)
621{
24922684
CL
622 u8 *start;
623 u8 *fault;
624 u8 *end;
625 int length;
626 int remainder;
81819f0f
CL
627
628 if (!(s->flags & SLAB_POISON))
629 return 1;
630
a973e9dd 631 start = page_address(page);
834f3d11 632 length = (PAGE_SIZE << compound_order(page));
39b26464
CL
633 end = start + length;
634 remainder = length % s->size;
81819f0f
CL
635 if (!remainder)
636 return 1;
637
39b26464 638 fault = check_bytes(end - remainder, POISON_INUSE, remainder);
24922684
CL
639 if (!fault)
640 return 1;
641 while (end > fault && end[-1] == POISON_INUSE)
642 end--;
643
644 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
39b26464 645 print_section("Padding", end - remainder, remainder);
24922684
CL
646
647 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
648 return 0;
81819f0f
CL
649}
650
651static int check_object(struct kmem_cache *s, struct page *page,
652 void *object, int active)
653{
654 u8 *p = object;
655 u8 *endobject = object + s->objsize;
656
657 if (s->flags & SLAB_RED_ZONE) {
658 unsigned int red =
659 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
660
24922684
CL
661 if (!check_bytes_and_report(s, page, object, "Redzone",
662 endobject, red, s->inuse - s->objsize))
81819f0f 663 return 0;
81819f0f 664 } else {
3adbefee
IM
665 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse) {
666 check_bytes_and_report(s, page, p, "Alignment padding",
667 endobject, POISON_INUSE, s->inuse - s->objsize);
668 }
81819f0f
CL
669 }
670
671 if (s->flags & SLAB_POISON) {
672 if (!active && (s->flags & __OBJECT_POISON) &&
24922684
CL
673 (!check_bytes_and_report(s, page, p, "Poison", p,
674 POISON_FREE, s->objsize - 1) ||
675 !check_bytes_and_report(s, page, p, "Poison",
06428780 676 p + s->objsize - 1, POISON_END, 1)))
81819f0f 677 return 0;
81819f0f
CL
678 /*
679 * check_pad_bytes cleans up on its own.
680 */
681 check_pad_bytes(s, page, p);
682 }
683
684 if (!s->offset && active)
685 /*
686 * Object and freepointer overlap. Cannot check
687 * freepointer while object is allocated.
688 */
689 return 1;
690
691 /* Check free pointer validity */
692 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
693 object_err(s, page, p, "Freepointer corrupt");
694 /*
695 * No choice but to zap it and thus loose the remainder
696 * of the free objects in this slab. May cause
672bba3a 697 * another error because the object count is now wrong.
81819f0f 698 */
a973e9dd 699 set_freepointer(s, p, NULL);
81819f0f
CL
700 return 0;
701 }
702 return 1;
703}
704
705static int check_slab(struct kmem_cache *s, struct page *page)
706{
39b26464
CL
707 int maxobj;
708
81819f0f
CL
709 VM_BUG_ON(!irqs_disabled());
710
711 if (!PageSlab(page)) {
24922684 712 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
713 return 0;
714 }
39b26464
CL
715
716 maxobj = (PAGE_SIZE << compound_order(page)) / s->size;
717 if (page->objects > maxobj) {
718 slab_err(s, page, "objects %u > max %u",
719 s->name, page->objects, maxobj);
720 return 0;
721 }
722 if (page->inuse > page->objects) {
24922684 723 slab_err(s, page, "inuse %u > max %u",
39b26464 724 s->name, page->inuse, page->objects);
81819f0f
CL
725 return 0;
726 }
727 /* Slab_pad_check fixes things up after itself */
728 slab_pad_check(s, page);
729 return 1;
730}
731
732/*
672bba3a
CL
733 * Determine if a certain object on a page is on the freelist. Must hold the
734 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
735 */
736static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
737{
738 int nr = 0;
739 void *fp = page->freelist;
740 void *object = NULL;
224a88be 741 unsigned long max_objects;
81819f0f 742
39b26464 743 while (fp && nr <= page->objects) {
81819f0f
CL
744 if (fp == search)
745 return 1;
746 if (!check_valid_pointer(s, page, fp)) {
747 if (object) {
748 object_err(s, page, object,
749 "Freechain corrupt");
a973e9dd 750 set_freepointer(s, object, NULL);
81819f0f
CL
751 break;
752 } else {
24922684 753 slab_err(s, page, "Freepointer corrupt");
a973e9dd 754 page->freelist = NULL;
39b26464 755 page->inuse = page->objects;
24922684 756 slab_fix(s, "Freelist cleared");
81819f0f
CL
757 return 0;
758 }
759 break;
760 }
761 object = fp;
762 fp = get_freepointer(s, object);
763 nr++;
764 }
765
224a88be
CL
766 max_objects = (PAGE_SIZE << compound_order(page)) / s->size;
767 if (max_objects > 65535)
768 max_objects = 65535;
769
770 if (page->objects != max_objects) {
771 slab_err(s, page, "Wrong number of objects. Found %d but "
772 "should be %d", page->objects, max_objects);
773 page->objects = max_objects;
774 slab_fix(s, "Number of objects adjusted.");
775 }
39b26464 776 if (page->inuse != page->objects - nr) {
70d71228 777 slab_err(s, page, "Wrong object count. Counter is %d but "
39b26464
CL
778 "counted were %d", page->inuse, page->objects - nr);
779 page->inuse = page->objects - nr;
24922684 780 slab_fix(s, "Object count adjusted.");
81819f0f
CL
781 }
782 return search == NULL;
783}
784
0121c619
CL
785static void trace(struct kmem_cache *s, struct page *page, void *object,
786 int alloc)
3ec09742
CL
787{
788 if (s->flags & SLAB_TRACE) {
789 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
790 s->name,
791 alloc ? "alloc" : "free",
792 object, page->inuse,
793 page->freelist);
794
795 if (!alloc)
796 print_section("Object", (void *)object, s->objsize);
797
798 dump_stack();
799 }
800}
801
643b1138 802/*
672bba3a 803 * Tracking of fully allocated slabs for debugging purposes.
643b1138 804 */
e95eed57 805static void add_full(struct kmem_cache_node *n, struct page *page)
643b1138 806{
643b1138
CL
807 spin_lock(&n->list_lock);
808 list_add(&page->lru, &n->full);
809 spin_unlock(&n->list_lock);
810}
811
812static void remove_full(struct kmem_cache *s, struct page *page)
813{
814 struct kmem_cache_node *n;
815
816 if (!(s->flags & SLAB_STORE_USER))
817 return;
818
819 n = get_node(s, page_to_nid(page));
820
821 spin_lock(&n->list_lock);
822 list_del(&page->lru);
823 spin_unlock(&n->list_lock);
824}
825
0f389ec6
CL
826/* Tracking of the number of slabs for debugging purposes */
827static inline unsigned long slabs_node(struct kmem_cache *s, int node)
828{
829 struct kmem_cache_node *n = get_node(s, node);
830
831 return atomic_long_read(&n->nr_slabs);
832}
833
205ab99d 834static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
835{
836 struct kmem_cache_node *n = get_node(s, node);
837
838 /*
839 * May be called early in order to allocate a slab for the
840 * kmem_cache_node structure. Solve the chicken-egg
841 * dilemma by deferring the increment of the count during
842 * bootstrap (see early_kmem_cache_node_alloc).
843 */
205ab99d 844 if (!NUMA_BUILD || n) {
0f389ec6 845 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
846 atomic_long_add(objects, &n->total_objects);
847 }
0f389ec6 848}
205ab99d 849static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
850{
851 struct kmem_cache_node *n = get_node(s, node);
852
853 atomic_long_dec(&n->nr_slabs);
205ab99d 854 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
855}
856
857/* Object debug checks for alloc/free paths */
3ec09742
CL
858static void setup_object_debug(struct kmem_cache *s, struct page *page,
859 void *object)
860{
861 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
862 return;
863
864 init_object(s, object, 0);
865 init_tracking(s, object);
866}
867
868static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
869 void *object, void *addr)
81819f0f
CL
870{
871 if (!check_slab(s, page))
872 goto bad;
873
d692ef6d 874 if (!on_freelist(s, page, object)) {
24922684 875 object_err(s, page, object, "Object already allocated");
70d71228 876 goto bad;
81819f0f
CL
877 }
878
879 if (!check_valid_pointer(s, page, object)) {
880 object_err(s, page, object, "Freelist Pointer check fails");
70d71228 881 goto bad;
81819f0f
CL
882 }
883
d692ef6d 884 if (!check_object(s, page, object, 0))
81819f0f 885 goto bad;
81819f0f 886
3ec09742
CL
887 /* Success perform special debug activities for allocs */
888 if (s->flags & SLAB_STORE_USER)
889 set_track(s, object, TRACK_ALLOC, addr);
890 trace(s, page, object, 1);
891 init_object(s, object, 1);
81819f0f 892 return 1;
3ec09742 893
81819f0f
CL
894bad:
895 if (PageSlab(page)) {
896 /*
897 * If this is a slab page then lets do the best we can
898 * to avoid issues in the future. Marking all objects
672bba3a 899 * as used avoids touching the remaining objects.
81819f0f 900 */
24922684 901 slab_fix(s, "Marking all objects used");
39b26464 902 page->inuse = page->objects;
a973e9dd 903 page->freelist = NULL;
81819f0f
CL
904 }
905 return 0;
906}
907
3ec09742
CL
908static int free_debug_processing(struct kmem_cache *s, struct page *page,
909 void *object, void *addr)
81819f0f
CL
910{
911 if (!check_slab(s, page))
912 goto fail;
913
914 if (!check_valid_pointer(s, page, object)) {
70d71228 915 slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0f
CL
916 goto fail;
917 }
918
919 if (on_freelist(s, page, object)) {
24922684 920 object_err(s, page, object, "Object already free");
81819f0f
CL
921 goto fail;
922 }
923
924 if (!check_object(s, page, object, 1))
925 return 0;
926
927 if (unlikely(s != page->slab)) {
3adbefee 928 if (!PageSlab(page)) {
70d71228
CL
929 slab_err(s, page, "Attempt to free object(0x%p) "
930 "outside of slab", object);
3adbefee 931 } else if (!page->slab) {
81819f0f 932 printk(KERN_ERR
70d71228 933 "SLUB <none>: no slab for object 0x%p.\n",
81819f0f 934 object);
70d71228 935 dump_stack();
06428780 936 } else
24922684
CL
937 object_err(s, page, object,
938 "page slab pointer corrupt.");
81819f0f
CL
939 goto fail;
940 }
3ec09742
CL
941
942 /* Special debug activities for freeing objects */
8a38082d 943 if (!PageSlubFrozen(page) && !page->freelist)
3ec09742
CL
944 remove_full(s, page);
945 if (s->flags & SLAB_STORE_USER)
946 set_track(s, object, TRACK_FREE, addr);
947 trace(s, page, object, 0);
948 init_object(s, object, 0);
81819f0f 949 return 1;
3ec09742 950
81819f0f 951fail:
24922684 952 slab_fix(s, "Object at 0x%p not freed", object);
81819f0f
CL
953 return 0;
954}
955
41ecc55b
CL
956static int __init setup_slub_debug(char *str)
957{
f0630fff
CL
958 slub_debug = DEBUG_DEFAULT_FLAGS;
959 if (*str++ != '=' || !*str)
960 /*
961 * No options specified. Switch on full debugging.
962 */
963 goto out;
964
965 if (*str == ',')
966 /*
967 * No options but restriction on slabs. This means full
968 * debugging for slabs matching a pattern.
969 */
970 goto check_slabs;
971
972 slub_debug = 0;
973 if (*str == '-')
974 /*
975 * Switch off all debugging measures.
976 */
977 goto out;
978
979 /*
980 * Determine which debug features should be switched on
981 */
06428780 982 for (; *str && *str != ','; str++) {
f0630fff
CL
983 switch (tolower(*str)) {
984 case 'f':
985 slub_debug |= SLAB_DEBUG_FREE;
986 break;
987 case 'z':
988 slub_debug |= SLAB_RED_ZONE;
989 break;
990 case 'p':
991 slub_debug |= SLAB_POISON;
992 break;
993 case 'u':
994 slub_debug |= SLAB_STORE_USER;
995 break;
996 case 't':
997 slub_debug |= SLAB_TRACE;
998 break;
999 default:
1000 printk(KERN_ERR "slub_debug option '%c' "
06428780 1001 "unknown. skipped\n", *str);
f0630fff 1002 }
41ecc55b
CL
1003 }
1004
f0630fff 1005check_slabs:
41ecc55b
CL
1006 if (*str == ',')
1007 slub_debug_slabs = str + 1;
f0630fff 1008out:
41ecc55b
CL
1009 return 1;
1010}
1011
1012__setup("slub_debug", setup_slub_debug);
1013
ba0268a8
CL
1014static unsigned long kmem_cache_flags(unsigned long objsize,
1015 unsigned long flags, const char *name,
51cc5068 1016 void (*ctor)(void *))
41ecc55b
CL
1017{
1018 /*
e153362a 1019 * Enable debugging if selected on the kernel commandline.
41ecc55b 1020 */
e153362a
CL
1021 if (slub_debug && (!slub_debug_slabs ||
1022 strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)) == 0))
1023 flags |= slub_debug;
ba0268a8
CL
1024
1025 return flags;
41ecc55b
CL
1026}
1027#else
3ec09742
CL
1028static inline void setup_object_debug(struct kmem_cache *s,
1029 struct page *page, void *object) {}
41ecc55b 1030
3ec09742
CL
1031static inline int alloc_debug_processing(struct kmem_cache *s,
1032 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1033
3ec09742
CL
1034static inline int free_debug_processing(struct kmem_cache *s,
1035 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1036
41ecc55b
CL
1037static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1038 { return 1; }
1039static inline int check_object(struct kmem_cache *s, struct page *page,
1040 void *object, int active) { return 1; }
3ec09742 1041static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
ba0268a8
CL
1042static inline unsigned long kmem_cache_flags(unsigned long objsize,
1043 unsigned long flags, const char *name,
51cc5068 1044 void (*ctor)(void *))
ba0268a8
CL
1045{
1046 return flags;
1047}
41ecc55b 1048#define slub_debug 0
0f389ec6
CL
1049
1050static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1051 { return 0; }
205ab99d
CL
1052static inline void inc_slabs_node(struct kmem_cache *s, int node,
1053 int objects) {}
1054static inline void dec_slabs_node(struct kmem_cache *s, int node,
1055 int objects) {}
41ecc55b 1056#endif
205ab99d 1057
81819f0f
CL
1058/*
1059 * Slab allocation and freeing
1060 */
65c3376a
CL
1061static inline struct page *alloc_slab_page(gfp_t flags, int node,
1062 struct kmem_cache_order_objects oo)
1063{
1064 int order = oo_order(oo);
1065
1066 if (node == -1)
1067 return alloc_pages(flags, order);
1068 else
1069 return alloc_pages_node(node, flags, order);
1070}
1071
81819f0f
CL
1072static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1073{
06428780 1074 struct page *page;
834f3d11 1075 struct kmem_cache_order_objects oo = s->oo;
81819f0f 1076
b7a49f0d 1077 flags |= s->allocflags;
e12ba74d 1078
65c3376a
CL
1079 page = alloc_slab_page(flags | __GFP_NOWARN | __GFP_NORETRY, node,
1080 oo);
1081 if (unlikely(!page)) {
1082 oo = s->min;
1083 /*
1084 * Allocation may have failed due to fragmentation.
1085 * Try a lower order alloc if possible
1086 */
1087 page = alloc_slab_page(flags, node, oo);
1088 if (!page)
1089 return NULL;
81819f0f 1090
65c3376a
CL
1091 stat(get_cpu_slab(s, raw_smp_processor_id()), ORDER_FALLBACK);
1092 }
834f3d11 1093 page->objects = oo_objects(oo);
81819f0f
CL
1094 mod_zone_page_state(page_zone(page),
1095 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1096 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376a 1097 1 << oo_order(oo));
81819f0f
CL
1098
1099 return page;
1100}
1101
1102static void setup_object(struct kmem_cache *s, struct page *page,
1103 void *object)
1104{
3ec09742 1105 setup_object_debug(s, page, object);
4f104934 1106 if (unlikely(s->ctor))
51cc5068 1107 s->ctor(object);
81819f0f
CL
1108}
1109
1110static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1111{
1112 struct page *page;
81819f0f 1113 void *start;
81819f0f
CL
1114 void *last;
1115 void *p;
1116
6cb06229 1117 BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0f 1118
6cb06229
CL
1119 page = allocate_slab(s,
1120 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0f
CL
1121 if (!page)
1122 goto out;
1123
205ab99d 1124 inc_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1125 page->slab = s;
1126 page->flags |= 1 << PG_slab;
1127 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1128 SLAB_STORE_USER | SLAB_TRACE))
8a38082d 1129 __SetPageSlubDebug(page);
81819f0f
CL
1130
1131 start = page_address(page);
81819f0f
CL
1132
1133 if (unlikely(s->flags & SLAB_POISON))
834f3d11 1134 memset(start, POISON_INUSE, PAGE_SIZE << compound_order(page));
81819f0f
CL
1135
1136 last = start;
224a88be 1137 for_each_object(p, s, start, page->objects) {
81819f0f
CL
1138 setup_object(s, page, last);
1139 set_freepointer(s, last, p);
1140 last = p;
1141 }
1142 setup_object(s, page, last);
a973e9dd 1143 set_freepointer(s, last, NULL);
81819f0f
CL
1144
1145 page->freelist = start;
1146 page->inuse = 0;
1147out:
81819f0f
CL
1148 return page;
1149}
1150
1151static void __free_slab(struct kmem_cache *s, struct page *page)
1152{
834f3d11
CL
1153 int order = compound_order(page);
1154 int pages = 1 << order;
81819f0f 1155
8a38082d 1156 if (unlikely(SLABDEBUG && PageSlubDebug(page))) {
81819f0f
CL
1157 void *p;
1158
1159 slab_pad_check(s, page);
224a88be
CL
1160 for_each_object(p, s, page_address(page),
1161 page->objects)
81819f0f 1162 check_object(s, page, p, 0);
8a38082d 1163 __ClearPageSlubDebug(page);
81819f0f
CL
1164 }
1165
1166 mod_zone_page_state(page_zone(page),
1167 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1168 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1169 -pages);
81819f0f 1170
49bd5221
CL
1171 __ClearPageSlab(page);
1172 reset_page_mapcount(page);
834f3d11 1173 __free_pages(page, order);
81819f0f
CL
1174}
1175
1176static void rcu_free_slab(struct rcu_head *h)
1177{
1178 struct page *page;
1179
1180 page = container_of((struct list_head *)h, struct page, lru);
1181 __free_slab(page->slab, page);
1182}
1183
1184static void free_slab(struct kmem_cache *s, struct page *page)
1185{
1186 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1187 /*
1188 * RCU free overloads the RCU head over the LRU
1189 */
1190 struct rcu_head *head = (void *)&page->lru;
1191
1192 call_rcu(head, rcu_free_slab);
1193 } else
1194 __free_slab(s, page);
1195}
1196
1197static void discard_slab(struct kmem_cache *s, struct page *page)
1198{
205ab99d 1199 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1200 free_slab(s, page);
1201}
1202
1203/*
1204 * Per slab locking using the pagelock
1205 */
1206static __always_inline void slab_lock(struct page *page)
1207{
1208 bit_spin_lock(PG_locked, &page->flags);
1209}
1210
1211static __always_inline void slab_unlock(struct page *page)
1212{
a76d3546 1213 __bit_spin_unlock(PG_locked, &page->flags);
81819f0f
CL
1214}
1215
1216static __always_inline int slab_trylock(struct page *page)
1217{
1218 int rc = 1;
1219
1220 rc = bit_spin_trylock(PG_locked, &page->flags);
1221 return rc;
1222}
1223
1224/*
1225 * Management of partially allocated slabs
1226 */
7c2e132c
CL
1227static void add_partial(struct kmem_cache_node *n,
1228 struct page *page, int tail)
81819f0f 1229{
e95eed57
CL
1230 spin_lock(&n->list_lock);
1231 n->nr_partial++;
7c2e132c
CL
1232 if (tail)
1233 list_add_tail(&page->lru, &n->partial);
1234 else
1235 list_add(&page->lru, &n->partial);
81819f0f
CL
1236 spin_unlock(&n->list_lock);
1237}
1238
0121c619 1239static void remove_partial(struct kmem_cache *s, struct page *page)
81819f0f
CL
1240{
1241 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1242
1243 spin_lock(&n->list_lock);
1244 list_del(&page->lru);
1245 n->nr_partial--;
1246 spin_unlock(&n->list_lock);
1247}
1248
1249/*
672bba3a 1250 * Lock slab and remove from the partial list.
81819f0f 1251 *
672bba3a 1252 * Must hold list_lock.
81819f0f 1253 */
0121c619
CL
1254static inline int lock_and_freeze_slab(struct kmem_cache_node *n,
1255 struct page *page)
81819f0f
CL
1256{
1257 if (slab_trylock(page)) {
1258 list_del(&page->lru);
1259 n->nr_partial--;
8a38082d 1260 __SetPageSlubFrozen(page);
81819f0f
CL
1261 return 1;
1262 }
1263 return 0;
1264}
1265
1266/*
672bba3a 1267 * Try to allocate a partial slab from a specific node.
81819f0f
CL
1268 */
1269static struct page *get_partial_node(struct kmem_cache_node *n)
1270{
1271 struct page *page;
1272
1273 /*
1274 * Racy check. If we mistakenly see no partial slabs then we
1275 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1276 * partial slab and there is none available then get_partials()
1277 * will return NULL.
81819f0f
CL
1278 */
1279 if (!n || !n->nr_partial)
1280 return NULL;
1281
1282 spin_lock(&n->list_lock);
1283 list_for_each_entry(page, &n->partial, lru)
4b6f0750 1284 if (lock_and_freeze_slab(n, page))
81819f0f
CL
1285 goto out;
1286 page = NULL;
1287out:
1288 spin_unlock(&n->list_lock);
1289 return page;
1290}
1291
1292/*
672bba3a 1293 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f
CL
1294 */
1295static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1296{
1297#ifdef CONFIG_NUMA
1298 struct zonelist *zonelist;
dd1a239f 1299 struct zoneref *z;
54a6eb5c
MG
1300 struct zone *zone;
1301 enum zone_type high_zoneidx = gfp_zone(flags);
81819f0f
CL
1302 struct page *page;
1303
1304 /*
672bba3a
CL
1305 * The defrag ratio allows a configuration of the tradeoffs between
1306 * inter node defragmentation and node local allocations. A lower
1307 * defrag_ratio increases the tendency to do local allocations
1308 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1309 *
672bba3a
CL
1310 * If the defrag_ratio is set to 0 then kmalloc() always
1311 * returns node local objects. If the ratio is higher then kmalloc()
1312 * may return off node objects because partial slabs are obtained
1313 * from other nodes and filled up.
81819f0f 1314 *
6446faa2 1315 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a
CL
1316 * defrag_ratio = 1000) then every (well almost) allocation will
1317 * first attempt to defrag slab caches on other nodes. This means
1318 * scanning over all nodes to look for partial slabs which may be
1319 * expensive if we do it every time we are trying to find a slab
1320 * with available objects.
81819f0f 1321 */
9824601e
CL
1322 if (!s->remote_node_defrag_ratio ||
1323 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1324 return NULL;
1325
0e88460d 1326 zonelist = node_zonelist(slab_node(current->mempolicy), flags);
54a6eb5c 1327 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
81819f0f
CL
1328 struct kmem_cache_node *n;
1329
54a6eb5c 1330 n = get_node(s, zone_to_nid(zone));
81819f0f 1331
54a6eb5c 1332 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
5595cffc 1333 n->nr_partial > n->min_partial) {
81819f0f
CL
1334 page = get_partial_node(n);
1335 if (page)
1336 return page;
1337 }
1338 }
1339#endif
1340 return NULL;
1341}
1342
1343/*
1344 * Get a partial page, lock it and return it.
1345 */
1346static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1347{
1348 struct page *page;
1349 int searchnode = (node == -1) ? numa_node_id() : node;
1350
1351 page = get_partial_node(get_node(s, searchnode));
1352 if (page || (flags & __GFP_THISNODE))
1353 return page;
1354
1355 return get_any_partial(s, flags);
1356}
1357
1358/*
1359 * Move a page back to the lists.
1360 *
1361 * Must be called with the slab lock held.
1362 *
1363 * On exit the slab lock will have been dropped.
1364 */
7c2e132c 1365static void unfreeze_slab(struct kmem_cache *s, struct page *page, int tail)
81819f0f 1366{
e95eed57 1367 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
8ff12cfc 1368 struct kmem_cache_cpu *c = get_cpu_slab(s, smp_processor_id());
e95eed57 1369
8a38082d 1370 __ClearPageSlubFrozen(page);
81819f0f 1371 if (page->inuse) {
e95eed57 1372
a973e9dd 1373 if (page->freelist) {
7c2e132c 1374 add_partial(n, page, tail);
8ff12cfc
CL
1375 stat(c, tail ? DEACTIVATE_TO_TAIL : DEACTIVATE_TO_HEAD);
1376 } else {
1377 stat(c, DEACTIVATE_FULL);
8a38082d
AW
1378 if (SLABDEBUG && PageSlubDebug(page) &&
1379 (s->flags & SLAB_STORE_USER))
8ff12cfc
CL
1380 add_full(n, page);
1381 }
81819f0f
CL
1382 slab_unlock(page);
1383 } else {
8ff12cfc 1384 stat(c, DEACTIVATE_EMPTY);
5595cffc 1385 if (n->nr_partial < n->min_partial) {
e95eed57 1386 /*
672bba3a
CL
1387 * Adding an empty slab to the partial slabs in order
1388 * to avoid page allocator overhead. This slab needs
1389 * to come after the other slabs with objects in
6446faa2
CL
1390 * so that the others get filled first. That way the
1391 * size of the partial list stays small.
1392 *
0121c619
CL
1393 * kmem_cache_shrink can reclaim any empty slabs from
1394 * the partial list.
e95eed57 1395 */
7c2e132c 1396 add_partial(n, page, 1);
e95eed57
CL
1397 slab_unlock(page);
1398 } else {
1399 slab_unlock(page);
8ff12cfc 1400 stat(get_cpu_slab(s, raw_smp_processor_id()), FREE_SLAB);
e95eed57
CL
1401 discard_slab(s, page);
1402 }
81819f0f
CL
1403 }
1404}
1405
1406/*
1407 * Remove the cpu slab
1408 */
dfb4f096 1409static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1410{
dfb4f096 1411 struct page *page = c->page;
7c2e132c 1412 int tail = 1;
8ff12cfc 1413
b773ad73 1414 if (page->freelist)
8ff12cfc 1415 stat(c, DEACTIVATE_REMOTE_FREES);
894b8788 1416 /*
6446faa2 1417 * Merge cpu freelist into slab freelist. Typically we get here
894b8788
CL
1418 * because both freelists are empty. So this is unlikely
1419 * to occur.
1420 */
a973e9dd 1421 while (unlikely(c->freelist)) {
894b8788
CL
1422 void **object;
1423
7c2e132c
CL
1424 tail = 0; /* Hot objects. Put the slab first */
1425
894b8788 1426 /* Retrieve object from cpu_freelist */
dfb4f096 1427 object = c->freelist;
b3fba8da 1428 c->freelist = c->freelist[c->offset];
894b8788
CL
1429
1430 /* And put onto the regular freelist */
b3fba8da 1431 object[c->offset] = page->freelist;
894b8788
CL
1432 page->freelist = object;
1433 page->inuse--;
1434 }
dfb4f096 1435 c->page = NULL;
7c2e132c 1436 unfreeze_slab(s, page, tail);
81819f0f
CL
1437}
1438
dfb4f096 1439static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1440{
8ff12cfc 1441 stat(c, CPUSLAB_FLUSH);
dfb4f096
CL
1442 slab_lock(c->page);
1443 deactivate_slab(s, c);
81819f0f
CL
1444}
1445
1446/*
1447 * Flush cpu slab.
6446faa2 1448 *
81819f0f
CL
1449 * Called from IPI handler with interrupts disabled.
1450 */
0c710013 1451static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 1452{
dfb4f096 1453 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
81819f0f 1454
dfb4f096
CL
1455 if (likely(c && c->page))
1456 flush_slab(s, c);
81819f0f
CL
1457}
1458
1459static void flush_cpu_slab(void *d)
1460{
1461 struct kmem_cache *s = d;
81819f0f 1462
dfb4f096 1463 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
1464}
1465
1466static void flush_all(struct kmem_cache *s)
1467{
15c8b6c1 1468 on_each_cpu(flush_cpu_slab, s, 1);
81819f0f
CL
1469}
1470
dfb4f096
CL
1471/*
1472 * Check if the objects in a per cpu structure fit numa
1473 * locality expectations.
1474 */
1475static inline int node_match(struct kmem_cache_cpu *c, int node)
1476{
1477#ifdef CONFIG_NUMA
1478 if (node != -1 && c->node != node)
1479 return 0;
1480#endif
1481 return 1;
1482}
1483
81819f0f 1484/*
894b8788
CL
1485 * Slow path. The lockless freelist is empty or we need to perform
1486 * debugging duties.
1487 *
1488 * Interrupts are disabled.
81819f0f 1489 *
894b8788
CL
1490 * Processing is still very fast if new objects have been freed to the
1491 * regular freelist. In that case we simply take over the regular freelist
1492 * as the lockless freelist and zap the regular freelist.
81819f0f 1493 *
894b8788
CL
1494 * If that is not working then we fall back to the partial lists. We take the
1495 * first element of the freelist as the object to allocate now and move the
1496 * rest of the freelist to the lockless freelist.
81819f0f 1497 *
894b8788 1498 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
1499 * we need to allocate a new slab. This is the slowest path since it involves
1500 * a call to the page allocator and the setup of a new slab.
81819f0f 1501 */
894b8788 1502static void *__slab_alloc(struct kmem_cache *s,
dfb4f096 1503 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
81819f0f 1504{
81819f0f 1505 void **object;
dfb4f096 1506 struct page *new;
81819f0f 1507
e72e9c23
LT
1508 /* We handle __GFP_ZERO in the caller */
1509 gfpflags &= ~__GFP_ZERO;
1510
dfb4f096 1511 if (!c->page)
81819f0f
CL
1512 goto new_slab;
1513
dfb4f096
CL
1514 slab_lock(c->page);
1515 if (unlikely(!node_match(c, node)))
81819f0f 1516 goto another_slab;
6446faa2 1517
8ff12cfc 1518 stat(c, ALLOC_REFILL);
6446faa2 1519
894b8788 1520load_freelist:
dfb4f096 1521 object = c->page->freelist;
a973e9dd 1522 if (unlikely(!object))
81819f0f 1523 goto another_slab;
8a38082d 1524 if (unlikely(SLABDEBUG && PageSlubDebug(c->page)))
81819f0f
CL
1525 goto debug;
1526
b3fba8da 1527 c->freelist = object[c->offset];
39b26464 1528 c->page->inuse = c->page->objects;
a973e9dd 1529 c->page->freelist = NULL;
dfb4f096 1530 c->node = page_to_nid(c->page);
1f84260c 1531unlock_out:
dfb4f096 1532 slab_unlock(c->page);
8ff12cfc 1533 stat(c, ALLOC_SLOWPATH);
81819f0f
CL
1534 return object;
1535
1536another_slab:
dfb4f096 1537 deactivate_slab(s, c);
81819f0f
CL
1538
1539new_slab:
dfb4f096
CL
1540 new = get_partial(s, gfpflags, node);
1541 if (new) {
1542 c->page = new;
8ff12cfc 1543 stat(c, ALLOC_FROM_PARTIAL);
894b8788 1544 goto load_freelist;
81819f0f
CL
1545 }
1546
b811c202
CL
1547 if (gfpflags & __GFP_WAIT)
1548 local_irq_enable();
1549
dfb4f096 1550 new = new_slab(s, gfpflags, node);
b811c202
CL
1551
1552 if (gfpflags & __GFP_WAIT)
1553 local_irq_disable();
1554
dfb4f096
CL
1555 if (new) {
1556 c = get_cpu_slab(s, smp_processor_id());
8ff12cfc 1557 stat(c, ALLOC_SLAB);
05aa3450 1558 if (c->page)
dfb4f096 1559 flush_slab(s, c);
dfb4f096 1560 slab_lock(new);
8a38082d 1561 __SetPageSlubFrozen(new);
dfb4f096 1562 c->page = new;
4b6f0750 1563 goto load_freelist;
81819f0f 1564 }
71c7a06f 1565 return NULL;
81819f0f 1566debug:
dfb4f096 1567 if (!alloc_debug_processing(s, c->page, object, addr))
81819f0f 1568 goto another_slab;
894b8788 1569
dfb4f096 1570 c->page->inuse++;
b3fba8da 1571 c->page->freelist = object[c->offset];
ee3c72a1 1572 c->node = -1;
1f84260c 1573 goto unlock_out;
894b8788
CL
1574}
1575
1576/*
1577 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1578 * have the fastpath folded into their functions. So no function call
1579 * overhead for requests that can be satisfied on the fastpath.
1580 *
1581 * The fastpath works by first checking if the lockless freelist can be used.
1582 * If not then __slab_alloc is called for slow processing.
1583 *
1584 * Otherwise we can simply pick the next object from the lockless free list.
1585 */
06428780 1586static __always_inline void *slab_alloc(struct kmem_cache *s,
ce15fea8 1587 gfp_t gfpflags, int node, void *addr)
894b8788 1588{
894b8788 1589 void **object;
dfb4f096 1590 struct kmem_cache_cpu *c;
1f84260c 1591 unsigned long flags;
bdb21928 1592 unsigned int objsize;
1f84260c 1593
894b8788 1594 local_irq_save(flags);
dfb4f096 1595 c = get_cpu_slab(s, smp_processor_id());
bdb21928 1596 objsize = c->objsize;
a973e9dd 1597 if (unlikely(!c->freelist || !node_match(c, node)))
894b8788 1598
dfb4f096 1599 object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788
CL
1600
1601 else {
dfb4f096 1602 object = c->freelist;
b3fba8da 1603 c->freelist = object[c->offset];
8ff12cfc 1604 stat(c, ALLOC_FASTPATH);
894b8788
CL
1605 }
1606 local_irq_restore(flags);
d07dbea4
CL
1607
1608 if (unlikely((gfpflags & __GFP_ZERO) && object))
bdb21928 1609 memset(object, 0, objsize);
d07dbea4 1610
894b8788 1611 return object;
81819f0f
CL
1612}
1613
1614void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1615{
ce15fea8 1616 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
81819f0f
CL
1617}
1618EXPORT_SYMBOL(kmem_cache_alloc);
1619
1620#ifdef CONFIG_NUMA
1621void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1622{
ce15fea8 1623 return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
81819f0f
CL
1624}
1625EXPORT_SYMBOL(kmem_cache_alloc_node);
1626#endif
1627
1628/*
894b8788
CL
1629 * Slow patch handling. This may still be called frequently since objects
1630 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 1631 *
894b8788
CL
1632 * So we still attempt to reduce cache line usage. Just take the slab
1633 * lock and free the item. If there is no additional partial page
1634 * handling required then we can return immediately.
81819f0f 1635 */
894b8788 1636static void __slab_free(struct kmem_cache *s, struct page *page,
b3fba8da 1637 void *x, void *addr, unsigned int offset)
81819f0f
CL
1638{
1639 void *prior;
1640 void **object = (void *)x;
8ff12cfc 1641 struct kmem_cache_cpu *c;
81819f0f 1642
8ff12cfc
CL
1643 c = get_cpu_slab(s, raw_smp_processor_id());
1644 stat(c, FREE_SLOWPATH);
81819f0f
CL
1645 slab_lock(page);
1646
8a38082d 1647 if (unlikely(SLABDEBUG && PageSlubDebug(page)))
81819f0f 1648 goto debug;
6446faa2 1649
81819f0f 1650checks_ok:
b3fba8da 1651 prior = object[offset] = page->freelist;
81819f0f
CL
1652 page->freelist = object;
1653 page->inuse--;
1654
8a38082d 1655 if (unlikely(PageSlubFrozen(page))) {
8ff12cfc 1656 stat(c, FREE_FROZEN);
81819f0f 1657 goto out_unlock;
8ff12cfc 1658 }
81819f0f
CL
1659
1660 if (unlikely(!page->inuse))
1661 goto slab_empty;
1662
1663 /*
6446faa2 1664 * Objects left in the slab. If it was not on the partial list before
81819f0f
CL
1665 * then add it.
1666 */
a973e9dd 1667 if (unlikely(!prior)) {
7c2e132c 1668 add_partial(get_node(s, page_to_nid(page)), page, 1);
8ff12cfc
CL
1669 stat(c, FREE_ADD_PARTIAL);
1670 }
81819f0f
CL
1671
1672out_unlock:
1673 slab_unlock(page);
81819f0f
CL
1674 return;
1675
1676slab_empty:
a973e9dd 1677 if (prior) {
81819f0f 1678 /*
672bba3a 1679 * Slab still on the partial list.
81819f0f
CL
1680 */
1681 remove_partial(s, page);
8ff12cfc
CL
1682 stat(c, FREE_REMOVE_PARTIAL);
1683 }
81819f0f 1684 slab_unlock(page);
8ff12cfc 1685 stat(c, FREE_SLAB);
81819f0f 1686 discard_slab(s, page);
81819f0f
CL
1687 return;
1688
1689debug:
3ec09742 1690 if (!free_debug_processing(s, page, x, addr))
77c5e2d0 1691 goto out_unlock;
77c5e2d0 1692 goto checks_ok;
81819f0f
CL
1693}
1694
894b8788
CL
1695/*
1696 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1697 * can perform fastpath freeing without additional function calls.
1698 *
1699 * The fastpath is only possible if we are freeing to the current cpu slab
1700 * of this processor. This typically the case if we have just allocated
1701 * the item before.
1702 *
1703 * If fastpath is not possible then fall back to __slab_free where we deal
1704 * with all sorts of special processing.
1705 */
06428780 1706static __always_inline void slab_free(struct kmem_cache *s,
894b8788
CL
1707 struct page *page, void *x, void *addr)
1708{
1709 void **object = (void *)x;
dfb4f096 1710 struct kmem_cache_cpu *c;
1f84260c
CL
1711 unsigned long flags;
1712
894b8788 1713 local_irq_save(flags);
dfb4f096 1714 c = get_cpu_slab(s, smp_processor_id());
27d9e4e9 1715 debug_check_no_locks_freed(object, c->objsize);
3ac7fe5a
TG
1716 if (!(s->flags & SLAB_DEBUG_OBJECTS))
1717 debug_check_no_obj_freed(object, s->objsize);
ee3c72a1 1718 if (likely(page == c->page && c->node >= 0)) {
b3fba8da 1719 object[c->offset] = c->freelist;
dfb4f096 1720 c->freelist = object;
8ff12cfc 1721 stat(c, FREE_FASTPATH);
894b8788 1722 } else
b3fba8da 1723 __slab_free(s, page, x, addr, c->offset);
894b8788
CL
1724
1725 local_irq_restore(flags);
1726}
1727
81819f0f
CL
1728void kmem_cache_free(struct kmem_cache *s, void *x)
1729{
77c5e2d0 1730 struct page *page;
81819f0f 1731
b49af68f 1732 page = virt_to_head_page(x);
81819f0f 1733
77c5e2d0 1734 slab_free(s, page, x, __builtin_return_address(0));
81819f0f
CL
1735}
1736EXPORT_SYMBOL(kmem_cache_free);
1737
1738/* Figure out on which slab object the object resides */
1739static struct page *get_object_page(const void *x)
1740{
b49af68f 1741 struct page *page = virt_to_head_page(x);
81819f0f
CL
1742
1743 if (!PageSlab(page))
1744 return NULL;
1745
1746 return page;
1747}
1748
1749/*
672bba3a
CL
1750 * Object placement in a slab is made very easy because we always start at
1751 * offset 0. If we tune the size of the object to the alignment then we can
1752 * get the required alignment by putting one properly sized object after
1753 * another.
81819f0f
CL
1754 *
1755 * Notice that the allocation order determines the sizes of the per cpu
1756 * caches. Each processor has always one slab available for allocations.
1757 * Increasing the allocation order reduces the number of times that slabs
672bba3a 1758 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 1759 * locking overhead.
81819f0f
CL
1760 */
1761
1762/*
1763 * Mininum / Maximum order of slab pages. This influences locking overhead
1764 * and slab fragmentation. A higher order reduces the number of partial slabs
1765 * and increases the number of allocations possible without having to
1766 * take the list_lock.
1767 */
1768static int slub_min_order;
114e9e89 1769static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506 1770static int slub_min_objects;
81819f0f
CL
1771
1772/*
1773 * Merge control. If this is set then no merging of slab caches will occur.
672bba3a 1774 * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0f
CL
1775 */
1776static int slub_nomerge;
1777
81819f0f
CL
1778/*
1779 * Calculate the order of allocation given an slab object size.
1780 *
672bba3a
CL
1781 * The order of allocation has significant impact on performance and other
1782 * system components. Generally order 0 allocations should be preferred since
1783 * order 0 does not cause fragmentation in the page allocator. Larger objects
1784 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 1785 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
1786 * would be wasted.
1787 *
1788 * In order to reach satisfactory performance we must ensure that a minimum
1789 * number of objects is in one slab. Otherwise we may generate too much
1790 * activity on the partial lists which requires taking the list_lock. This is
1791 * less a concern for large slabs though which are rarely used.
81819f0f 1792 *
672bba3a
CL
1793 * slub_max_order specifies the order where we begin to stop considering the
1794 * number of objects in a slab as critical. If we reach slub_max_order then
1795 * we try to keep the page order as low as possible. So we accept more waste
1796 * of space in favor of a small page order.
81819f0f 1797 *
672bba3a
CL
1798 * Higher order allocations also allow the placement of more objects in a
1799 * slab and thereby reduce object handling overhead. If the user has
1800 * requested a higher mininum order then we start with that one instead of
1801 * the smallest order which will fit the object.
81819f0f 1802 */
5e6d444e
CL
1803static inline int slab_order(int size, int min_objects,
1804 int max_order, int fract_leftover)
81819f0f
CL
1805{
1806 int order;
1807 int rem;
6300ea75 1808 int min_order = slub_min_order;
81819f0f 1809
39b26464
CL
1810 if ((PAGE_SIZE << min_order) / size > 65535)
1811 return get_order(size * 65535) - 1;
1812
6300ea75 1813 for (order = max(min_order,
5e6d444e
CL
1814 fls(min_objects * size - 1) - PAGE_SHIFT);
1815 order <= max_order; order++) {
81819f0f 1816
5e6d444e 1817 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 1818
5e6d444e 1819 if (slab_size < min_objects * size)
81819f0f
CL
1820 continue;
1821
1822 rem = slab_size % size;
1823
5e6d444e 1824 if (rem <= slab_size / fract_leftover)
81819f0f
CL
1825 break;
1826
1827 }
672bba3a 1828
81819f0f
CL
1829 return order;
1830}
1831
5e6d444e
CL
1832static inline int calculate_order(int size)
1833{
1834 int order;
1835 int min_objects;
1836 int fraction;
1837
1838 /*
1839 * Attempt to find best configuration for a slab. This
1840 * works by first attempting to generate a layout with
1841 * the best configuration and backing off gradually.
1842 *
1843 * First we reduce the acceptable waste in a slab. Then
1844 * we reduce the minimum objects required in a slab.
1845 */
1846 min_objects = slub_min_objects;
9b2cd506
CL
1847 if (!min_objects)
1848 min_objects = 4 * (fls(nr_cpu_ids) + 1);
5e6d444e 1849 while (min_objects > 1) {
c124f5b5 1850 fraction = 16;
5e6d444e
CL
1851 while (fraction >= 4) {
1852 order = slab_order(size, min_objects,
1853 slub_max_order, fraction);
1854 if (order <= slub_max_order)
1855 return order;
1856 fraction /= 2;
1857 }
1858 min_objects /= 2;
1859 }
1860
1861 /*
1862 * We were unable to place multiple objects in a slab. Now
1863 * lets see if we can place a single object there.
1864 */
1865 order = slab_order(size, 1, slub_max_order, 1);
1866 if (order <= slub_max_order)
1867 return order;
1868
1869 /*
1870 * Doh this slab cannot be placed using slub_max_order.
1871 */
1872 order = slab_order(size, 1, MAX_ORDER, 1);
1873 if (order <= MAX_ORDER)
1874 return order;
1875 return -ENOSYS;
1876}
1877
81819f0f 1878/*
672bba3a 1879 * Figure out what the alignment of the objects will be.
81819f0f
CL
1880 */
1881static unsigned long calculate_alignment(unsigned long flags,
1882 unsigned long align, unsigned long size)
1883{
1884 /*
6446faa2
CL
1885 * If the user wants hardware cache aligned objects then follow that
1886 * suggestion if the object is sufficiently large.
81819f0f 1887 *
6446faa2
CL
1888 * The hardware cache alignment cannot override the specified
1889 * alignment though. If that is greater then use it.
81819f0f 1890 */
b6210386
NP
1891 if (flags & SLAB_HWCACHE_ALIGN) {
1892 unsigned long ralign = cache_line_size();
1893 while (size <= ralign / 2)
1894 ralign /= 2;
1895 align = max(align, ralign);
1896 }
81819f0f
CL
1897
1898 if (align < ARCH_SLAB_MINALIGN)
b6210386 1899 align = ARCH_SLAB_MINALIGN;
81819f0f
CL
1900
1901 return ALIGN(align, sizeof(void *));
1902}
1903
dfb4f096
CL
1904static void init_kmem_cache_cpu(struct kmem_cache *s,
1905 struct kmem_cache_cpu *c)
1906{
1907 c->page = NULL;
a973e9dd 1908 c->freelist = NULL;
dfb4f096 1909 c->node = 0;
42a9fdbb
CL
1910 c->offset = s->offset / sizeof(void *);
1911 c->objsize = s->objsize;
62f75532
PE
1912#ifdef CONFIG_SLUB_STATS
1913 memset(c->stat, 0, NR_SLUB_STAT_ITEMS * sizeof(unsigned));
1914#endif
dfb4f096
CL
1915}
1916
5595cffc
PE
1917static void
1918init_kmem_cache_node(struct kmem_cache_node *n, struct kmem_cache *s)
81819f0f
CL
1919{
1920 n->nr_partial = 0;
5595cffc
PE
1921
1922 /*
1923 * The larger the object size is, the more pages we want on the partial
1924 * list to avoid pounding the page allocator excessively.
1925 */
1926 n->min_partial = ilog2(s->size);
1927 if (n->min_partial < MIN_PARTIAL)
1928 n->min_partial = MIN_PARTIAL;
1929 else if (n->min_partial > MAX_PARTIAL)
1930 n->min_partial = MAX_PARTIAL;
1931
81819f0f
CL
1932 spin_lock_init(&n->list_lock);
1933 INIT_LIST_HEAD(&n->partial);
8ab1372f 1934#ifdef CONFIG_SLUB_DEBUG
0f389ec6 1935 atomic_long_set(&n->nr_slabs, 0);
02b71b70 1936 atomic_long_set(&n->total_objects, 0);
643b1138 1937 INIT_LIST_HEAD(&n->full);
8ab1372f 1938#endif
81819f0f
CL
1939}
1940
4c93c355
CL
1941#ifdef CONFIG_SMP
1942/*
1943 * Per cpu array for per cpu structures.
1944 *
1945 * The per cpu array places all kmem_cache_cpu structures from one processor
1946 * close together meaning that it becomes possible that multiple per cpu
1947 * structures are contained in one cacheline. This may be particularly
1948 * beneficial for the kmalloc caches.
1949 *
1950 * A desktop system typically has around 60-80 slabs. With 100 here we are
1951 * likely able to get per cpu structures for all caches from the array defined
1952 * here. We must be able to cover all kmalloc caches during bootstrap.
1953 *
1954 * If the per cpu array is exhausted then fall back to kmalloc
1955 * of individual cachelines. No sharing is possible then.
1956 */
1957#define NR_KMEM_CACHE_CPU 100
1958
1959static DEFINE_PER_CPU(struct kmem_cache_cpu,
1960 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1961
1962static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1963static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1964
1965static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1966 int cpu, gfp_t flags)
1967{
1968 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1969
1970 if (c)
1971 per_cpu(kmem_cache_cpu_free, cpu) =
1972 (void *)c->freelist;
1973 else {
1974 /* Table overflow: So allocate ourselves */
1975 c = kmalloc_node(
1976 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1977 flags, cpu_to_node(cpu));
1978 if (!c)
1979 return NULL;
1980 }
1981
1982 init_kmem_cache_cpu(s, c);
1983 return c;
1984}
1985
1986static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1987{
1988 if (c < per_cpu(kmem_cache_cpu, cpu) ||
1989 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
1990 kfree(c);
1991 return;
1992 }
1993 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
1994 per_cpu(kmem_cache_cpu_free, cpu) = c;
1995}
1996
1997static void free_kmem_cache_cpus(struct kmem_cache *s)
1998{
1999 int cpu;
2000
2001 for_each_online_cpu(cpu) {
2002 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2003
2004 if (c) {
2005 s->cpu_slab[cpu] = NULL;
2006 free_kmem_cache_cpu(c, cpu);
2007 }
2008 }
2009}
2010
2011static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2012{
2013 int cpu;
2014
2015 for_each_online_cpu(cpu) {
2016 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2017
2018 if (c)
2019 continue;
2020
2021 c = alloc_kmem_cache_cpu(s, cpu, flags);
2022 if (!c) {
2023 free_kmem_cache_cpus(s);
2024 return 0;
2025 }
2026 s->cpu_slab[cpu] = c;
2027 }
2028 return 1;
2029}
2030
2031/*
2032 * Initialize the per cpu array.
2033 */
2034static void init_alloc_cpu_cpu(int cpu)
2035{
2036 int i;
2037
2038 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
2039 return;
2040
2041 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
2042 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
2043
2044 cpu_set(cpu, kmem_cach_cpu_free_init_once);
2045}
2046
2047static void __init init_alloc_cpu(void)
2048{
2049 int cpu;
2050
2051 for_each_online_cpu(cpu)
2052 init_alloc_cpu_cpu(cpu);
2053 }
2054
2055#else
2056static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
2057static inline void init_alloc_cpu(void) {}
2058
2059static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
2060{
2061 init_kmem_cache_cpu(s, &s->cpu_slab);
2062 return 1;
2063}
2064#endif
2065
81819f0f
CL
2066#ifdef CONFIG_NUMA
2067/*
2068 * No kmalloc_node yet so do it by hand. We know that this is the first
2069 * slab on the node for this slabcache. There are no concurrent accesses
2070 * possible.
2071 *
2072 * Note that this function only works on the kmalloc_node_cache
4c93c355
CL
2073 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2074 * memory on a fresh node that has no slab structures yet.
81819f0f 2075 */
1cd7daa5
AB
2076static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2077 int node)
81819f0f
CL
2078{
2079 struct page *page;
2080 struct kmem_cache_node *n;
ba84c73c 2081 unsigned long flags;
81819f0f
CL
2082
2083 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2084
a2f92ee7 2085 page = new_slab(kmalloc_caches, gfpflags, node);
81819f0f
CL
2086
2087 BUG_ON(!page);
a2f92ee7
CL
2088 if (page_to_nid(page) != node) {
2089 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2090 "node %d\n", node);
2091 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2092 "in order to be able to continue\n");
2093 }
2094
81819f0f
CL
2095 n = page->freelist;
2096 BUG_ON(!n);
2097 page->freelist = get_freepointer(kmalloc_caches, n);
2098 page->inuse++;
2099 kmalloc_caches->node[node] = n;
8ab1372f 2100#ifdef CONFIG_SLUB_DEBUG
d45f39cb
CL
2101 init_object(kmalloc_caches, n, 1);
2102 init_tracking(kmalloc_caches, n);
8ab1372f 2103#endif
5595cffc 2104 init_kmem_cache_node(n, kmalloc_caches);
205ab99d 2105 inc_slabs_node(kmalloc_caches, node, page->objects);
6446faa2 2106
ba84c73c 2107 /*
2108 * lockdep requires consistent irq usage for each lock
2109 * so even though there cannot be a race this early in
2110 * the boot sequence, we still disable irqs.
2111 */
2112 local_irq_save(flags);
7c2e132c 2113 add_partial(n, page, 0);
ba84c73c 2114 local_irq_restore(flags);
81819f0f
CL
2115 return n;
2116}
2117
2118static void free_kmem_cache_nodes(struct kmem_cache *s)
2119{
2120 int node;
2121
f64dc58c 2122 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2123 struct kmem_cache_node *n = s->node[node];
2124 if (n && n != &s->local_node)
2125 kmem_cache_free(kmalloc_caches, n);
2126 s->node[node] = NULL;
2127 }
2128}
2129
2130static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2131{
2132 int node;
2133 int local_node;
2134
2135 if (slab_state >= UP)
2136 local_node = page_to_nid(virt_to_page(s));
2137 else
2138 local_node = 0;
2139
f64dc58c 2140 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2141 struct kmem_cache_node *n;
2142
2143 if (local_node == node)
2144 n = &s->local_node;
2145 else {
2146 if (slab_state == DOWN) {
2147 n = early_kmem_cache_node_alloc(gfpflags,
2148 node);
2149 continue;
2150 }
2151 n = kmem_cache_alloc_node(kmalloc_caches,
2152 gfpflags, node);
2153
2154 if (!n) {
2155 free_kmem_cache_nodes(s);
2156 return 0;
2157 }
2158
2159 }
2160 s->node[node] = n;
5595cffc 2161 init_kmem_cache_node(n, s);
81819f0f
CL
2162 }
2163 return 1;
2164}
2165#else
2166static void free_kmem_cache_nodes(struct kmem_cache *s)
2167{
2168}
2169
2170static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2171{
5595cffc 2172 init_kmem_cache_node(&s->local_node, s);
81819f0f
CL
2173 return 1;
2174}
2175#endif
2176
2177/*
2178 * calculate_sizes() determines the order and the distribution of data within
2179 * a slab object.
2180 */
06b285dc 2181static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f
CL
2182{
2183 unsigned long flags = s->flags;
2184 unsigned long size = s->objsize;
2185 unsigned long align = s->align;
834f3d11 2186 int order;
81819f0f 2187
d8b42bf5
CL
2188 /*
2189 * Round up object size to the next word boundary. We can only
2190 * place the free pointer at word boundaries and this determines
2191 * the possible location of the free pointer.
2192 */
2193 size = ALIGN(size, sizeof(void *));
2194
2195#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2196 /*
2197 * Determine if we can poison the object itself. If the user of
2198 * the slab may touch the object after free or before allocation
2199 * then we should never poison the object itself.
2200 */
2201 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f 2202 !s->ctor)
81819f0f
CL
2203 s->flags |= __OBJECT_POISON;
2204 else
2205 s->flags &= ~__OBJECT_POISON;
2206
81819f0f
CL
2207
2208 /*
672bba3a 2209 * If we are Redzoning then check if there is some space between the
81819f0f 2210 * end of the object and the free pointer. If not then add an
672bba3a 2211 * additional word to have some bytes to store Redzone information.
81819f0f
CL
2212 */
2213 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2214 size += sizeof(void *);
41ecc55b 2215#endif
81819f0f
CL
2216
2217 /*
672bba3a
CL
2218 * With that we have determined the number of bytes in actual use
2219 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
2220 */
2221 s->inuse = size;
2222
2223 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f 2224 s->ctor)) {
81819f0f
CL
2225 /*
2226 * Relocate free pointer after the object if it is not
2227 * permitted to overwrite the first word of the object on
2228 * kmem_cache_free.
2229 *
2230 * This is the case if we do RCU, have a constructor or
2231 * destructor or are poisoning the objects.
2232 */
2233 s->offset = size;
2234 size += sizeof(void *);
2235 }
2236
c12b3c62 2237#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2238 if (flags & SLAB_STORE_USER)
2239 /*
2240 * Need to store information about allocs and frees after
2241 * the object.
2242 */
2243 size += 2 * sizeof(struct track);
2244
be7b3fbc 2245 if (flags & SLAB_RED_ZONE)
81819f0f
CL
2246 /*
2247 * Add some empty padding so that we can catch
2248 * overwrites from earlier objects rather than let
2249 * tracking information or the free pointer be
2250 * corrupted if an user writes before the start
2251 * of the object.
2252 */
2253 size += sizeof(void *);
41ecc55b 2254#endif
672bba3a 2255
81819f0f
CL
2256 /*
2257 * Determine the alignment based on various parameters that the
65c02d4c
CL
2258 * user specified and the dynamic determination of cache line size
2259 * on bootup.
81819f0f
CL
2260 */
2261 align = calculate_alignment(flags, align, s->objsize);
2262
2263 /*
2264 * SLUB stores one object immediately after another beginning from
2265 * offset 0. In order to align the objects we have to simply size
2266 * each object to conform to the alignment.
2267 */
2268 size = ALIGN(size, align);
2269 s->size = size;
06b285dc
CL
2270 if (forced_order >= 0)
2271 order = forced_order;
2272 else
2273 order = calculate_order(size);
81819f0f 2274
834f3d11 2275 if (order < 0)
81819f0f
CL
2276 return 0;
2277
b7a49f0d 2278 s->allocflags = 0;
834f3d11 2279 if (order)
b7a49f0d
CL
2280 s->allocflags |= __GFP_COMP;
2281
2282 if (s->flags & SLAB_CACHE_DMA)
2283 s->allocflags |= SLUB_DMA;
2284
2285 if (s->flags & SLAB_RECLAIM_ACCOUNT)
2286 s->allocflags |= __GFP_RECLAIMABLE;
2287
81819f0f
CL
2288 /*
2289 * Determine the number of objects per slab
2290 */
834f3d11 2291 s->oo = oo_make(order, size);
65c3376a 2292 s->min = oo_make(get_order(size), size);
205ab99d
CL
2293 if (oo_objects(s->oo) > oo_objects(s->max))
2294 s->max = s->oo;
81819f0f 2295
834f3d11 2296 return !!oo_objects(s->oo);
81819f0f
CL
2297
2298}
2299
81819f0f
CL
2300static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2301 const char *name, size_t size,
2302 size_t align, unsigned long flags,
51cc5068 2303 void (*ctor)(void *))
81819f0f
CL
2304{
2305 memset(s, 0, kmem_size);
2306 s->name = name;
2307 s->ctor = ctor;
81819f0f 2308 s->objsize = size;
81819f0f 2309 s->align = align;
ba0268a8 2310 s->flags = kmem_cache_flags(size, flags, name, ctor);
81819f0f 2311
06b285dc 2312 if (!calculate_sizes(s, -1))
81819f0f
CL
2313 goto error;
2314
2315 s->refcount = 1;
2316#ifdef CONFIG_NUMA
e2cb96b7 2317 s->remote_node_defrag_ratio = 1000;
81819f0f 2318#endif
dfb4f096
CL
2319 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2320 goto error;
81819f0f 2321
dfb4f096 2322 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
81819f0f 2323 return 1;
4c93c355 2324 free_kmem_cache_nodes(s);
81819f0f
CL
2325error:
2326 if (flags & SLAB_PANIC)
2327 panic("Cannot create slab %s size=%lu realsize=%u "
2328 "order=%u offset=%u flags=%lx\n",
834f3d11 2329 s->name, (unsigned long)size, s->size, oo_order(s->oo),
81819f0f
CL
2330 s->offset, flags);
2331 return 0;
2332}
81819f0f
CL
2333
2334/*
2335 * Check if a given pointer is valid
2336 */
2337int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2338{
06428780 2339 struct page *page;
81819f0f
CL
2340
2341 page = get_object_page(object);
2342
2343 if (!page || s != page->slab)
2344 /* No slab or wrong slab */
2345 return 0;
2346
abcd08a6 2347 if (!check_valid_pointer(s, page, object))
81819f0f
CL
2348 return 0;
2349
2350 /*
2351 * We could also check if the object is on the slabs freelist.
2352 * But this would be too expensive and it seems that the main
6446faa2 2353 * purpose of kmem_ptr_valid() is to check if the object belongs
81819f0f
CL
2354 * to a certain slab.
2355 */
2356 return 1;
2357}
2358EXPORT_SYMBOL(kmem_ptr_validate);
2359
2360/*
2361 * Determine the size of a slab object
2362 */
2363unsigned int kmem_cache_size(struct kmem_cache *s)
2364{
2365 return s->objsize;
2366}
2367EXPORT_SYMBOL(kmem_cache_size);
2368
2369const char *kmem_cache_name(struct kmem_cache *s)
2370{
2371 return s->name;
2372}
2373EXPORT_SYMBOL(kmem_cache_name);
2374
33b12c38
CL
2375static void list_slab_objects(struct kmem_cache *s, struct page *page,
2376 const char *text)
2377{
2378#ifdef CONFIG_SLUB_DEBUG
2379 void *addr = page_address(page);
2380 void *p;
2381 DECLARE_BITMAP(map, page->objects);
2382
2383 bitmap_zero(map, page->objects);
2384 slab_err(s, page, "%s", text);
2385 slab_lock(page);
2386 for_each_free_object(p, s, page->freelist)
2387 set_bit(slab_index(p, s, addr), map);
2388
2389 for_each_object(p, s, addr, page->objects) {
2390
2391 if (!test_bit(slab_index(p, s, addr), map)) {
2392 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
2393 p, p - addr);
2394 print_tracking(s, p);
2395 }
2396 }
2397 slab_unlock(page);
2398#endif
2399}
2400
81819f0f 2401/*
599870b1 2402 * Attempt to free all partial slabs on a node.
81819f0f 2403 */
599870b1 2404static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 2405{
81819f0f
CL
2406 unsigned long flags;
2407 struct page *page, *h;
2408
2409 spin_lock_irqsave(&n->list_lock, flags);
33b12c38 2410 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f
CL
2411 if (!page->inuse) {
2412 list_del(&page->lru);
2413 discard_slab(s, page);
599870b1 2414 n->nr_partial--;
33b12c38
CL
2415 } else {
2416 list_slab_objects(s, page,
2417 "Objects remaining on kmem_cache_close()");
599870b1 2418 }
33b12c38 2419 }
81819f0f 2420 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2421}
2422
2423/*
672bba3a 2424 * Release all resources used by a slab cache.
81819f0f 2425 */
0c710013 2426static inline int kmem_cache_close(struct kmem_cache *s)
81819f0f
CL
2427{
2428 int node;
2429
2430 flush_all(s);
2431
2432 /* Attempt to free all objects */
4c93c355 2433 free_kmem_cache_cpus(s);
f64dc58c 2434 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2435 struct kmem_cache_node *n = get_node(s, node);
2436
599870b1
CL
2437 free_partial(s, n);
2438 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
2439 return 1;
2440 }
2441 free_kmem_cache_nodes(s);
2442 return 0;
2443}
2444
2445/*
2446 * Close a cache and release the kmem_cache structure
2447 * (must be used for caches created using kmem_cache_create)
2448 */
2449void kmem_cache_destroy(struct kmem_cache *s)
2450{
2451 down_write(&slub_lock);
2452 s->refcount--;
2453 if (!s->refcount) {
2454 list_del(&s->list);
a0e1d1be 2455 up_write(&slub_lock);
d629d819
PE
2456 if (kmem_cache_close(s)) {
2457 printk(KERN_ERR "SLUB %s: %s called for cache that "
2458 "still has objects.\n", s->name, __func__);
2459 dump_stack();
2460 }
81819f0f 2461 sysfs_slab_remove(s);
a0e1d1be
CL
2462 } else
2463 up_write(&slub_lock);
81819f0f
CL
2464}
2465EXPORT_SYMBOL(kmem_cache_destroy);
2466
2467/********************************************************************
2468 * Kmalloc subsystem
2469 *******************************************************************/
2470
331dc558 2471struct kmem_cache kmalloc_caches[PAGE_SHIFT + 1] __cacheline_aligned;
81819f0f
CL
2472EXPORT_SYMBOL(kmalloc_caches);
2473
81819f0f
CL
2474static int __init setup_slub_min_order(char *str)
2475{
06428780 2476 get_option(&str, &slub_min_order);
81819f0f
CL
2477
2478 return 1;
2479}
2480
2481__setup("slub_min_order=", setup_slub_min_order);
2482
2483static int __init setup_slub_max_order(char *str)
2484{
06428780 2485 get_option(&str, &slub_max_order);
81819f0f
CL
2486
2487 return 1;
2488}
2489
2490__setup("slub_max_order=", setup_slub_max_order);
2491
2492static int __init setup_slub_min_objects(char *str)
2493{
06428780 2494 get_option(&str, &slub_min_objects);
81819f0f
CL
2495
2496 return 1;
2497}
2498
2499__setup("slub_min_objects=", setup_slub_min_objects);
2500
2501static int __init setup_slub_nomerge(char *str)
2502{
2503 slub_nomerge = 1;
2504 return 1;
2505}
2506
2507__setup("slub_nomerge", setup_slub_nomerge);
2508
81819f0f
CL
2509static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2510 const char *name, int size, gfp_t gfp_flags)
2511{
2512 unsigned int flags = 0;
2513
2514 if (gfp_flags & SLUB_DMA)
2515 flags = SLAB_CACHE_DMA;
2516
2517 down_write(&slub_lock);
2518 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
319d1e24 2519 flags, NULL))
81819f0f
CL
2520 goto panic;
2521
2522 list_add(&s->list, &slab_caches);
2523 up_write(&slub_lock);
2524 if (sysfs_slab_add(s))
2525 goto panic;
2526 return s;
2527
2528panic:
2529 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2530}
2531
2e443fd0 2532#ifdef CONFIG_ZONE_DMA
4097d601 2533static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT + 1];
1ceef402
CL
2534
2535static void sysfs_add_func(struct work_struct *w)
2536{
2537 struct kmem_cache *s;
2538
2539 down_write(&slub_lock);
2540 list_for_each_entry(s, &slab_caches, list) {
2541 if (s->flags & __SYSFS_ADD_DEFERRED) {
2542 s->flags &= ~__SYSFS_ADD_DEFERRED;
2543 sysfs_slab_add(s);
2544 }
2545 }
2546 up_write(&slub_lock);
2547}
2548
2549static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2550
2e443fd0
CL
2551static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2552{
2553 struct kmem_cache *s;
2e443fd0
CL
2554 char *text;
2555 size_t realsize;
2556
2557 s = kmalloc_caches_dma[index];
2558 if (s)
2559 return s;
2560
2561 /* Dynamically create dma cache */
1ceef402
CL
2562 if (flags & __GFP_WAIT)
2563 down_write(&slub_lock);
2564 else {
2565 if (!down_write_trylock(&slub_lock))
2566 goto out;
2567 }
2568
2569 if (kmalloc_caches_dma[index])
2570 goto unlock_out;
2e443fd0 2571
7b55f620 2572 realsize = kmalloc_caches[index].objsize;
3adbefee
IM
2573 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d",
2574 (unsigned int)realsize);
1ceef402
CL
2575 s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2576
2577 if (!s || !text || !kmem_cache_open(s, flags, text,
2578 realsize, ARCH_KMALLOC_MINALIGN,
2579 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2580 kfree(s);
2581 kfree(text);
2582 goto unlock_out;
dfce8648 2583 }
1ceef402
CL
2584
2585 list_add(&s->list, &slab_caches);
2586 kmalloc_caches_dma[index] = s;
2587
2588 schedule_work(&sysfs_add_work);
2589
2590unlock_out:
dfce8648 2591 up_write(&slub_lock);
1ceef402 2592out:
dfce8648 2593 return kmalloc_caches_dma[index];
2e443fd0
CL
2594}
2595#endif
2596
f1b26339
CL
2597/*
2598 * Conversion table for small slabs sizes / 8 to the index in the
2599 * kmalloc array. This is necessary for slabs < 192 since we have non power
2600 * of two cache sizes there. The size of larger slabs can be determined using
2601 * fls.
2602 */
2603static s8 size_index[24] = {
2604 3, /* 8 */
2605 4, /* 16 */
2606 5, /* 24 */
2607 5, /* 32 */
2608 6, /* 40 */
2609 6, /* 48 */
2610 6, /* 56 */
2611 6, /* 64 */
2612 1, /* 72 */
2613 1, /* 80 */
2614 1, /* 88 */
2615 1, /* 96 */
2616 7, /* 104 */
2617 7, /* 112 */
2618 7, /* 120 */
2619 7, /* 128 */
2620 2, /* 136 */
2621 2, /* 144 */
2622 2, /* 152 */
2623 2, /* 160 */
2624 2, /* 168 */
2625 2, /* 176 */
2626 2, /* 184 */
2627 2 /* 192 */
2628};
2629
81819f0f
CL
2630static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2631{
f1b26339 2632 int index;
81819f0f 2633
f1b26339
CL
2634 if (size <= 192) {
2635 if (!size)
2636 return ZERO_SIZE_PTR;
81819f0f 2637
f1b26339 2638 index = size_index[(size - 1) / 8];
aadb4bc4 2639 } else
f1b26339 2640 index = fls(size - 1);
81819f0f
CL
2641
2642#ifdef CONFIG_ZONE_DMA
f1b26339 2643 if (unlikely((flags & SLUB_DMA)))
2e443fd0 2644 return dma_kmalloc_cache(index, flags);
f1b26339 2645
81819f0f
CL
2646#endif
2647 return &kmalloc_caches[index];
2648}
2649
2650void *__kmalloc(size_t size, gfp_t flags)
2651{
aadb4bc4 2652 struct kmem_cache *s;
81819f0f 2653
331dc558 2654 if (unlikely(size > PAGE_SIZE))
eada35ef 2655 return kmalloc_large(size, flags);
aadb4bc4
CL
2656
2657 s = get_slab(size, flags);
2658
2659 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2660 return s;
2661
ce15fea8 2662 return slab_alloc(s, flags, -1, __builtin_return_address(0));
81819f0f
CL
2663}
2664EXPORT_SYMBOL(__kmalloc);
2665
f619cfe1
CL
2666static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
2667{
2668 struct page *page = alloc_pages_node(node, flags | __GFP_COMP,
2669 get_order(size));
2670
2671 if (page)
2672 return page_address(page);
2673 else
2674 return NULL;
2675}
2676
81819f0f
CL
2677#ifdef CONFIG_NUMA
2678void *__kmalloc_node(size_t size, gfp_t flags, int node)
2679{
aadb4bc4 2680 struct kmem_cache *s;
81819f0f 2681
331dc558 2682 if (unlikely(size > PAGE_SIZE))
f619cfe1 2683 return kmalloc_large_node(size, flags, node);
aadb4bc4
CL
2684
2685 s = get_slab(size, flags);
2686
2687 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2688 return s;
2689
ce15fea8 2690 return slab_alloc(s, flags, node, __builtin_return_address(0));
81819f0f
CL
2691}
2692EXPORT_SYMBOL(__kmalloc_node);
2693#endif
2694
2695size_t ksize(const void *object)
2696{
272c1d21 2697 struct page *page;
81819f0f
CL
2698 struct kmem_cache *s;
2699
ef8b4520 2700 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
2701 return 0;
2702
294a80a8 2703 page = virt_to_head_page(object);
294a80a8 2704
76994412
PE
2705 if (unlikely(!PageSlab(page))) {
2706 WARN_ON(!PageCompound(page));
294a80a8 2707 return PAGE_SIZE << compound_order(page);
76994412 2708 }
81819f0f 2709 s = page->slab;
81819f0f 2710
ae20bfda 2711#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2712 /*
2713 * Debugging requires use of the padding between object
2714 * and whatever may come after it.
2715 */
2716 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2717 return s->objsize;
2718
ae20bfda 2719#endif
81819f0f
CL
2720 /*
2721 * If we have the need to store the freelist pointer
2722 * back there or track user information then we can
2723 * only use the space before that information.
2724 */
2725 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2726 return s->inuse;
81819f0f
CL
2727 /*
2728 * Else we can use all the padding etc for the allocation
2729 */
2730 return s->size;
2731}
81819f0f
CL
2732
2733void kfree(const void *x)
2734{
81819f0f 2735 struct page *page;
5bb983b0 2736 void *object = (void *)x;
81819f0f 2737
2408c550 2738 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
2739 return;
2740
b49af68f 2741 page = virt_to_head_page(x);
aadb4bc4 2742 if (unlikely(!PageSlab(page))) {
0937502a 2743 BUG_ON(!PageCompound(page));
aadb4bc4
CL
2744 put_page(page);
2745 return;
2746 }
5bb983b0 2747 slab_free(page->slab, page, object, __builtin_return_address(0));
81819f0f
CL
2748}
2749EXPORT_SYMBOL(kfree);
2750
2086d26a 2751/*
672bba3a
CL
2752 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2753 * the remaining slabs by the number of items in use. The slabs with the
2754 * most items in use come first. New allocations will then fill those up
2755 * and thus they can be removed from the partial lists.
2756 *
2757 * The slabs with the least items are placed last. This results in them
2758 * being allocated from last increasing the chance that the last objects
2759 * are freed in them.
2086d26a
CL
2760 */
2761int kmem_cache_shrink(struct kmem_cache *s)
2762{
2763 int node;
2764 int i;
2765 struct kmem_cache_node *n;
2766 struct page *page;
2767 struct page *t;
205ab99d 2768 int objects = oo_objects(s->max);
2086d26a 2769 struct list_head *slabs_by_inuse =
834f3d11 2770 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a
CL
2771 unsigned long flags;
2772
2773 if (!slabs_by_inuse)
2774 return -ENOMEM;
2775
2776 flush_all(s);
f64dc58c 2777 for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a
CL
2778 n = get_node(s, node);
2779
2780 if (!n->nr_partial)
2781 continue;
2782
834f3d11 2783 for (i = 0; i < objects; i++)
2086d26a
CL
2784 INIT_LIST_HEAD(slabs_by_inuse + i);
2785
2786 spin_lock_irqsave(&n->list_lock, flags);
2787
2788 /*
672bba3a 2789 * Build lists indexed by the items in use in each slab.
2086d26a 2790 *
672bba3a
CL
2791 * Note that concurrent frees may occur while we hold the
2792 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
2793 */
2794 list_for_each_entry_safe(page, t, &n->partial, lru) {
2795 if (!page->inuse && slab_trylock(page)) {
2796 /*
2797 * Must hold slab lock here because slab_free
2798 * may have freed the last object and be
2799 * waiting to release the slab.
2800 */
2801 list_del(&page->lru);
2802 n->nr_partial--;
2803 slab_unlock(page);
2804 discard_slab(s, page);
2805 } else {
fcda3d89
CL
2806 list_move(&page->lru,
2807 slabs_by_inuse + page->inuse);
2086d26a
CL
2808 }
2809 }
2810
2086d26a 2811 /*
672bba3a
CL
2812 * Rebuild the partial list with the slabs filled up most
2813 * first and the least used slabs at the end.
2086d26a 2814 */
834f3d11 2815 for (i = objects - 1; i >= 0; i--)
2086d26a
CL
2816 list_splice(slabs_by_inuse + i, n->partial.prev);
2817
2086d26a
CL
2818 spin_unlock_irqrestore(&n->list_lock, flags);
2819 }
2820
2821 kfree(slabs_by_inuse);
2822 return 0;
2823}
2824EXPORT_SYMBOL(kmem_cache_shrink);
2825
b9049e23
YG
2826#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
2827static int slab_mem_going_offline_callback(void *arg)
2828{
2829 struct kmem_cache *s;
2830
2831 down_read(&slub_lock);
2832 list_for_each_entry(s, &slab_caches, list)
2833 kmem_cache_shrink(s);
2834 up_read(&slub_lock);
2835
2836 return 0;
2837}
2838
2839static void slab_mem_offline_callback(void *arg)
2840{
2841 struct kmem_cache_node *n;
2842 struct kmem_cache *s;
2843 struct memory_notify *marg = arg;
2844 int offline_node;
2845
2846 offline_node = marg->status_change_nid;
2847
2848 /*
2849 * If the node still has available memory. we need kmem_cache_node
2850 * for it yet.
2851 */
2852 if (offline_node < 0)
2853 return;
2854
2855 down_read(&slub_lock);
2856 list_for_each_entry(s, &slab_caches, list) {
2857 n = get_node(s, offline_node);
2858 if (n) {
2859 /*
2860 * if n->nr_slabs > 0, slabs still exist on the node
2861 * that is going down. We were unable to free them,
2862 * and offline_pages() function shoudn't call this
2863 * callback. So, we must fail.
2864 */
0f389ec6 2865 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
2866
2867 s->node[offline_node] = NULL;
2868 kmem_cache_free(kmalloc_caches, n);
2869 }
2870 }
2871 up_read(&slub_lock);
2872}
2873
2874static int slab_mem_going_online_callback(void *arg)
2875{
2876 struct kmem_cache_node *n;
2877 struct kmem_cache *s;
2878 struct memory_notify *marg = arg;
2879 int nid = marg->status_change_nid;
2880 int ret = 0;
2881
2882 /*
2883 * If the node's memory is already available, then kmem_cache_node is
2884 * already created. Nothing to do.
2885 */
2886 if (nid < 0)
2887 return 0;
2888
2889 /*
0121c619 2890 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
2891 * allocate a kmem_cache_node structure in order to bring the node
2892 * online.
2893 */
2894 down_read(&slub_lock);
2895 list_for_each_entry(s, &slab_caches, list) {
2896 /*
2897 * XXX: kmem_cache_alloc_node will fallback to other nodes
2898 * since memory is not yet available from the node that
2899 * is brought up.
2900 */
2901 n = kmem_cache_alloc(kmalloc_caches, GFP_KERNEL);
2902 if (!n) {
2903 ret = -ENOMEM;
2904 goto out;
2905 }
5595cffc 2906 init_kmem_cache_node(n, s);
b9049e23
YG
2907 s->node[nid] = n;
2908 }
2909out:
2910 up_read(&slub_lock);
2911 return ret;
2912}
2913
2914static int slab_memory_callback(struct notifier_block *self,
2915 unsigned long action, void *arg)
2916{
2917 int ret = 0;
2918
2919 switch (action) {
2920 case MEM_GOING_ONLINE:
2921 ret = slab_mem_going_online_callback(arg);
2922 break;
2923 case MEM_GOING_OFFLINE:
2924 ret = slab_mem_going_offline_callback(arg);
2925 break;
2926 case MEM_OFFLINE:
2927 case MEM_CANCEL_ONLINE:
2928 slab_mem_offline_callback(arg);
2929 break;
2930 case MEM_ONLINE:
2931 case MEM_CANCEL_OFFLINE:
2932 break;
2933 }
2934
2935 ret = notifier_from_errno(ret);
2936 return ret;
2937}
2938
2939#endif /* CONFIG_MEMORY_HOTPLUG */
2940
81819f0f
CL
2941/********************************************************************
2942 * Basic setup of slabs
2943 *******************************************************************/
2944
2945void __init kmem_cache_init(void)
2946{
2947 int i;
4b356be0 2948 int caches = 0;
81819f0f 2949
4c93c355
CL
2950 init_alloc_cpu();
2951
81819f0f
CL
2952#ifdef CONFIG_NUMA
2953 /*
2954 * Must first have the slab cache available for the allocations of the
672bba3a 2955 * struct kmem_cache_node's. There is special bootstrap code in
81819f0f
CL
2956 * kmem_cache_open for slab_state == DOWN.
2957 */
2958 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2959 sizeof(struct kmem_cache_node), GFP_KERNEL);
8ffa6875 2960 kmalloc_caches[0].refcount = -1;
4b356be0 2961 caches++;
b9049e23 2962
0c40ba4f 2963 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
81819f0f
CL
2964#endif
2965
2966 /* Able to allocate the per node structures */
2967 slab_state = PARTIAL;
2968
2969 /* Caches that are not of the two-to-the-power-of size */
4b356be0
CL
2970 if (KMALLOC_MIN_SIZE <= 64) {
2971 create_kmalloc_cache(&kmalloc_caches[1],
81819f0f 2972 "kmalloc-96", 96, GFP_KERNEL);
4b356be0 2973 caches++;
4b356be0 2974 create_kmalloc_cache(&kmalloc_caches[2],
81819f0f 2975 "kmalloc-192", 192, GFP_KERNEL);
4b356be0
CL
2976 caches++;
2977 }
81819f0f 2978
331dc558 2979 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++) {
81819f0f
CL
2980 create_kmalloc_cache(&kmalloc_caches[i],
2981 "kmalloc", 1 << i, GFP_KERNEL);
4b356be0
CL
2982 caches++;
2983 }
81819f0f 2984
f1b26339
CL
2985
2986 /*
2987 * Patch up the size_index table if we have strange large alignment
2988 * requirements for the kmalloc array. This is only the case for
6446faa2 2989 * MIPS it seems. The standard arches will not generate any code here.
f1b26339
CL
2990 *
2991 * Largest permitted alignment is 256 bytes due to the way we
2992 * handle the index determination for the smaller caches.
2993 *
2994 * Make sure that nothing crazy happens if someone starts tinkering
2995 * around with ARCH_KMALLOC_MINALIGN
2996 */
2997 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2998 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2999
12ad6843 3000 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
f1b26339
CL
3001 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
3002
41d54d3b
CL
3003 if (KMALLOC_MIN_SIZE == 128) {
3004 /*
3005 * The 192 byte sized cache is not used if the alignment
3006 * is 128 byte. Redirect kmalloc to use the 256 byte cache
3007 * instead.
3008 */
3009 for (i = 128 + 8; i <= 192; i += 8)
3010 size_index[(i - 1) / 8] = 8;
3011 }
3012
81819f0f
CL
3013 slab_state = UP;
3014
3015 /* Provide the correct kmalloc names now that the caches are up */
331dc558 3016 for (i = KMALLOC_SHIFT_LOW; i <= PAGE_SHIFT; i++)
81819f0f
CL
3017 kmalloc_caches[i]. name =
3018 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
3019
3020#ifdef CONFIG_SMP
3021 register_cpu_notifier(&slab_notifier);
4c93c355
CL
3022 kmem_size = offsetof(struct kmem_cache, cpu_slab) +
3023 nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
3024#else
3025 kmem_size = sizeof(struct kmem_cache);
81819f0f
CL
3026#endif
3027
3adbefee
IM
3028 printk(KERN_INFO
3029 "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be0
CL
3030 " CPUs=%d, Nodes=%d\n",
3031 caches, cache_line_size(),
81819f0f
CL
3032 slub_min_order, slub_max_order, slub_min_objects,
3033 nr_cpu_ids, nr_node_ids);
3034}
3035
3036/*
3037 * Find a mergeable slab cache
3038 */
3039static int slab_unmergeable(struct kmem_cache *s)
3040{
3041 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3042 return 1;
3043
c59def9f 3044 if (s->ctor)
81819f0f
CL
3045 return 1;
3046
8ffa6875
CL
3047 /*
3048 * We may have set a slab to be unmergeable during bootstrap.
3049 */
3050 if (s->refcount < 0)
3051 return 1;
3052
81819f0f
CL
3053 return 0;
3054}
3055
3056static struct kmem_cache *find_mergeable(size_t size,
ba0268a8 3057 size_t align, unsigned long flags, const char *name,
51cc5068 3058 void (*ctor)(void *))
81819f0f 3059{
5b95a4ac 3060 struct kmem_cache *s;
81819f0f
CL
3061
3062 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3063 return NULL;
3064
c59def9f 3065 if (ctor)
81819f0f
CL
3066 return NULL;
3067
3068 size = ALIGN(size, sizeof(void *));
3069 align = calculate_alignment(flags, align, size);
3070 size = ALIGN(size, align);
ba0268a8 3071 flags = kmem_cache_flags(size, flags, name, NULL);
81819f0f 3072
5b95a4ac 3073 list_for_each_entry(s, &slab_caches, list) {
81819f0f
CL
3074 if (slab_unmergeable(s))
3075 continue;
3076
3077 if (size > s->size)
3078 continue;
3079
ba0268a8 3080 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0f
CL
3081 continue;
3082 /*
3083 * Check if alignment is compatible.
3084 * Courtesy of Adrian Drzewiecki
3085 */
06428780 3086 if ((s->size & ~(align - 1)) != s->size)
81819f0f
CL
3087 continue;
3088
3089 if (s->size - size >= sizeof(void *))
3090 continue;
3091
3092 return s;
3093 }
3094 return NULL;
3095}
3096
3097struct kmem_cache *kmem_cache_create(const char *name, size_t size,
51cc5068 3098 size_t align, unsigned long flags, void (*ctor)(void *))
81819f0f
CL
3099{
3100 struct kmem_cache *s;
3101
3102 down_write(&slub_lock);
ba0268a8 3103 s = find_mergeable(size, align, flags, name, ctor);
81819f0f 3104 if (s) {
42a9fdbb
CL
3105 int cpu;
3106
81819f0f
CL
3107 s->refcount++;
3108 /*
3109 * Adjust the object sizes so that we clear
3110 * the complete object on kzalloc.
3111 */
3112 s->objsize = max(s->objsize, (int)size);
42a9fdbb
CL
3113
3114 /*
3115 * And then we need to update the object size in the
3116 * per cpu structures
3117 */
3118 for_each_online_cpu(cpu)
3119 get_cpu_slab(s, cpu)->objsize = s->objsize;
6446faa2 3120
81819f0f 3121 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
a0e1d1be 3122 up_write(&slub_lock);
6446faa2 3123
81819f0f
CL
3124 if (sysfs_slab_alias(s, name))
3125 goto err;
a0e1d1be
CL
3126 return s;
3127 }
6446faa2 3128
a0e1d1be
CL
3129 s = kmalloc(kmem_size, GFP_KERNEL);
3130 if (s) {
3131 if (kmem_cache_open(s, GFP_KERNEL, name,
c59def9f 3132 size, align, flags, ctor)) {
81819f0f 3133 list_add(&s->list, &slab_caches);
a0e1d1be
CL
3134 up_write(&slub_lock);
3135 if (sysfs_slab_add(s))
3136 goto err;
3137 return s;
3138 }
3139 kfree(s);
81819f0f
CL
3140 }
3141 up_write(&slub_lock);
81819f0f
CL
3142
3143err:
81819f0f
CL
3144 if (flags & SLAB_PANIC)
3145 panic("Cannot create slabcache %s\n", name);
3146 else
3147 s = NULL;
3148 return s;
3149}
3150EXPORT_SYMBOL(kmem_cache_create);
3151
81819f0f 3152#ifdef CONFIG_SMP
81819f0f 3153/*
672bba3a
CL
3154 * Use the cpu notifier to insure that the cpu slabs are flushed when
3155 * necessary.
81819f0f
CL
3156 */
3157static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
3158 unsigned long action, void *hcpu)
3159{
3160 long cpu = (long)hcpu;
5b95a4ac
CL
3161 struct kmem_cache *s;
3162 unsigned long flags;
81819f0f
CL
3163
3164 switch (action) {
4c93c355
CL
3165 case CPU_UP_PREPARE:
3166 case CPU_UP_PREPARE_FROZEN:
3167 init_alloc_cpu_cpu(cpu);
3168 down_read(&slub_lock);
3169 list_for_each_entry(s, &slab_caches, list)
3170 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
3171 GFP_KERNEL);
3172 up_read(&slub_lock);
3173 break;
3174
81819f0f 3175 case CPU_UP_CANCELED:
8bb78442 3176 case CPU_UP_CANCELED_FROZEN:
81819f0f 3177 case CPU_DEAD:
8bb78442 3178 case CPU_DEAD_FROZEN:
5b95a4ac
CL
3179 down_read(&slub_lock);
3180 list_for_each_entry(s, &slab_caches, list) {
4c93c355
CL
3181 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3182
5b95a4ac
CL
3183 local_irq_save(flags);
3184 __flush_cpu_slab(s, cpu);
3185 local_irq_restore(flags);
4c93c355
CL
3186 free_kmem_cache_cpu(c, cpu);
3187 s->cpu_slab[cpu] = NULL;
5b95a4ac
CL
3188 }
3189 up_read(&slub_lock);
81819f0f
CL
3190 break;
3191 default:
3192 break;
3193 }
3194 return NOTIFY_OK;
3195}
3196
06428780 3197static struct notifier_block __cpuinitdata slab_notifier = {
3adbefee 3198 .notifier_call = slab_cpuup_callback
06428780 3199};
81819f0f
CL
3200
3201#endif
3202
81819f0f
CL
3203void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
3204{
aadb4bc4
CL
3205 struct kmem_cache *s;
3206
331dc558 3207 if (unlikely(size > PAGE_SIZE))
eada35ef
PE
3208 return kmalloc_large(size, gfpflags);
3209
aadb4bc4 3210 s = get_slab(size, gfpflags);
81819f0f 3211
2408c550 3212 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3213 return s;
81819f0f 3214
ce15fea8 3215 return slab_alloc(s, gfpflags, -1, caller);
81819f0f
CL
3216}
3217
3218void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
3219 int node, void *caller)
3220{
aadb4bc4
CL
3221 struct kmem_cache *s;
3222
331dc558 3223 if (unlikely(size > PAGE_SIZE))
f619cfe1 3224 return kmalloc_large_node(size, gfpflags, node);
eada35ef 3225
aadb4bc4 3226 s = get_slab(size, gfpflags);
81819f0f 3227
2408c550 3228 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3229 return s;
81819f0f 3230
ce15fea8 3231 return slab_alloc(s, gfpflags, node, caller);
81819f0f
CL
3232}
3233
f6acb635 3234#ifdef CONFIG_SLUB_DEBUG
205ab99d
CL
3235static unsigned long count_partial(struct kmem_cache_node *n,
3236 int (*get_count)(struct page *))
5b06c853
CL
3237{
3238 unsigned long flags;
3239 unsigned long x = 0;
3240 struct page *page;
3241
3242 spin_lock_irqsave(&n->list_lock, flags);
3243 list_for_each_entry(page, &n->partial, lru)
205ab99d 3244 x += get_count(page);
5b06c853
CL
3245 spin_unlock_irqrestore(&n->list_lock, flags);
3246 return x;
3247}
205ab99d
CL
3248
3249static int count_inuse(struct page *page)
3250{
3251 return page->inuse;
3252}
3253
3254static int count_total(struct page *page)
3255{
3256 return page->objects;
3257}
3258
3259static int count_free(struct page *page)
3260{
3261 return page->objects - page->inuse;
3262}
5b06c853 3263
434e245d
CL
3264static int validate_slab(struct kmem_cache *s, struct page *page,
3265 unsigned long *map)
53e15af0
CL
3266{
3267 void *p;
a973e9dd 3268 void *addr = page_address(page);
53e15af0
CL
3269
3270 if (!check_slab(s, page) ||
3271 !on_freelist(s, page, NULL))
3272 return 0;
3273
3274 /* Now we know that a valid freelist exists */
39b26464 3275 bitmap_zero(map, page->objects);
53e15af0 3276
7656c72b
CL
3277 for_each_free_object(p, s, page->freelist) {
3278 set_bit(slab_index(p, s, addr), map);
53e15af0
CL
3279 if (!check_object(s, page, p, 0))
3280 return 0;
3281 }
3282
224a88be 3283 for_each_object(p, s, addr, page->objects)
7656c72b 3284 if (!test_bit(slab_index(p, s, addr), map))
53e15af0
CL
3285 if (!check_object(s, page, p, 1))
3286 return 0;
3287 return 1;
3288}
3289
434e245d
CL
3290static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3291 unsigned long *map)
53e15af0
CL
3292{
3293 if (slab_trylock(page)) {
434e245d 3294 validate_slab(s, page, map);
53e15af0
CL
3295 slab_unlock(page);
3296 } else
3297 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3298 s->name, page);
3299
3300 if (s->flags & DEBUG_DEFAULT_FLAGS) {
8a38082d
AW
3301 if (!PageSlubDebug(page))
3302 printk(KERN_ERR "SLUB %s: SlubDebug not set "
53e15af0
CL
3303 "on slab 0x%p\n", s->name, page);
3304 } else {
8a38082d
AW
3305 if (PageSlubDebug(page))
3306 printk(KERN_ERR "SLUB %s: SlubDebug set on "
53e15af0
CL
3307 "slab 0x%p\n", s->name, page);
3308 }
3309}
3310
434e245d
CL
3311static int validate_slab_node(struct kmem_cache *s,
3312 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
3313{
3314 unsigned long count = 0;
3315 struct page *page;
3316 unsigned long flags;
3317
3318 spin_lock_irqsave(&n->list_lock, flags);
3319
3320 list_for_each_entry(page, &n->partial, lru) {
434e245d 3321 validate_slab_slab(s, page, map);
53e15af0
CL
3322 count++;
3323 }
3324 if (count != n->nr_partial)
3325 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3326 "counter=%ld\n", s->name, count, n->nr_partial);
3327
3328 if (!(s->flags & SLAB_STORE_USER))
3329 goto out;
3330
3331 list_for_each_entry(page, &n->full, lru) {
434e245d 3332 validate_slab_slab(s, page, map);
53e15af0
CL
3333 count++;
3334 }
3335 if (count != atomic_long_read(&n->nr_slabs))
3336 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3337 "counter=%ld\n", s->name, count,
3338 atomic_long_read(&n->nr_slabs));
3339
3340out:
3341 spin_unlock_irqrestore(&n->list_lock, flags);
3342 return count;
3343}
3344
434e245d 3345static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
3346{
3347 int node;
3348 unsigned long count = 0;
205ab99d 3349 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245d
CL
3350 sizeof(unsigned long), GFP_KERNEL);
3351
3352 if (!map)
3353 return -ENOMEM;
53e15af0
CL
3354
3355 flush_all(s);
f64dc58c 3356 for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af0
CL
3357 struct kmem_cache_node *n = get_node(s, node);
3358
434e245d 3359 count += validate_slab_node(s, n, map);
53e15af0 3360 }
434e245d 3361 kfree(map);
53e15af0
CL
3362 return count;
3363}
3364
b3459709
CL
3365#ifdef SLUB_RESILIENCY_TEST
3366static void resiliency_test(void)
3367{
3368 u8 *p;
3369
3370 printk(KERN_ERR "SLUB resiliency testing\n");
3371 printk(KERN_ERR "-----------------------\n");
3372 printk(KERN_ERR "A. Corruption after allocation\n");
3373
3374 p = kzalloc(16, GFP_KERNEL);
3375 p[16] = 0x12;
3376 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3377 " 0x12->0x%p\n\n", p + 16);
3378
3379 validate_slab_cache(kmalloc_caches + 4);
3380
3381 /* Hmmm... The next two are dangerous */
3382 p = kzalloc(32, GFP_KERNEL);
3383 p[32 + sizeof(void *)] = 0x34;
3384 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3adbefee
IM
3385 " 0x34 -> -0x%p\n", p);
3386 printk(KERN_ERR
3387 "If allocated object is overwritten then not detectable\n\n");
b3459709
CL
3388
3389 validate_slab_cache(kmalloc_caches + 5);
3390 p = kzalloc(64, GFP_KERNEL);
3391 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3392 *p = 0x56;
3393 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3394 p);
3adbefee
IM
3395 printk(KERN_ERR
3396 "If allocated object is overwritten then not detectable\n\n");
b3459709
CL
3397 validate_slab_cache(kmalloc_caches + 6);
3398
3399 printk(KERN_ERR "\nB. Corruption after free\n");
3400 p = kzalloc(128, GFP_KERNEL);
3401 kfree(p);
3402 *p = 0x78;
3403 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3404 validate_slab_cache(kmalloc_caches + 7);
3405
3406 p = kzalloc(256, GFP_KERNEL);
3407 kfree(p);
3408 p[50] = 0x9a;
3adbefee
IM
3409 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
3410 p);
b3459709
CL
3411 validate_slab_cache(kmalloc_caches + 8);
3412
3413 p = kzalloc(512, GFP_KERNEL);
3414 kfree(p);
3415 p[512] = 0xab;
3416 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3417 validate_slab_cache(kmalloc_caches + 9);
3418}
3419#else
3420static void resiliency_test(void) {};
3421#endif
3422
88a420e4 3423/*
672bba3a 3424 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
3425 * and freed.
3426 */
3427
3428struct location {
3429 unsigned long count;
3430 void *addr;
45edfa58
CL
3431 long long sum_time;
3432 long min_time;
3433 long max_time;
3434 long min_pid;
3435 long max_pid;
3436 cpumask_t cpus;
3437 nodemask_t nodes;
88a420e4
CL
3438};
3439
3440struct loc_track {
3441 unsigned long max;
3442 unsigned long count;
3443 struct location *loc;
3444};
3445
3446static void free_loc_track(struct loc_track *t)
3447{
3448 if (t->max)
3449 free_pages((unsigned long)t->loc,
3450 get_order(sizeof(struct location) * t->max));
3451}
3452
68dff6a9 3453static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
3454{
3455 struct location *l;
3456 int order;
3457
88a420e4
CL
3458 order = get_order(sizeof(struct location) * max);
3459
68dff6a9 3460 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
3461 if (!l)
3462 return 0;
3463
3464 if (t->count) {
3465 memcpy(l, t->loc, sizeof(struct location) * t->count);
3466 free_loc_track(t);
3467 }
3468 t->max = max;
3469 t->loc = l;
3470 return 1;
3471}
3472
3473static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 3474 const struct track *track)
88a420e4
CL
3475{
3476 long start, end, pos;
3477 struct location *l;
3478 void *caddr;
45edfa58 3479 unsigned long age = jiffies - track->when;
88a420e4
CL
3480
3481 start = -1;
3482 end = t->count;
3483
3484 for ( ; ; ) {
3485 pos = start + (end - start + 1) / 2;
3486
3487 /*
3488 * There is nothing at "end". If we end up there
3489 * we need to add something to before end.
3490 */
3491 if (pos == end)
3492 break;
3493
3494 caddr = t->loc[pos].addr;
45edfa58
CL
3495 if (track->addr == caddr) {
3496
3497 l = &t->loc[pos];
3498 l->count++;
3499 if (track->when) {
3500 l->sum_time += age;
3501 if (age < l->min_time)
3502 l->min_time = age;
3503 if (age > l->max_time)
3504 l->max_time = age;
3505
3506 if (track->pid < l->min_pid)
3507 l->min_pid = track->pid;
3508 if (track->pid > l->max_pid)
3509 l->max_pid = track->pid;
3510
3511 cpu_set(track->cpu, l->cpus);
3512 }
3513 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3514 return 1;
3515 }
3516
45edfa58 3517 if (track->addr < caddr)
88a420e4
CL
3518 end = pos;
3519 else
3520 start = pos;
3521 }
3522
3523 /*
672bba3a 3524 * Not found. Insert new tracking element.
88a420e4 3525 */
68dff6a9 3526 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
3527 return 0;
3528
3529 l = t->loc + pos;
3530 if (pos < t->count)
3531 memmove(l + 1, l,
3532 (t->count - pos) * sizeof(struct location));
3533 t->count++;
3534 l->count = 1;
45edfa58
CL
3535 l->addr = track->addr;
3536 l->sum_time = age;
3537 l->min_time = age;
3538 l->max_time = age;
3539 l->min_pid = track->pid;
3540 l->max_pid = track->pid;
3541 cpus_clear(l->cpus);
3542 cpu_set(track->cpu, l->cpus);
3543 nodes_clear(l->nodes);
3544 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3545 return 1;
3546}
3547
3548static void process_slab(struct loc_track *t, struct kmem_cache *s,
3549 struct page *page, enum track_item alloc)
3550{
a973e9dd 3551 void *addr = page_address(page);
39b26464 3552 DECLARE_BITMAP(map, page->objects);
88a420e4
CL
3553 void *p;
3554
39b26464 3555 bitmap_zero(map, page->objects);
7656c72b
CL
3556 for_each_free_object(p, s, page->freelist)
3557 set_bit(slab_index(p, s, addr), map);
88a420e4 3558
224a88be 3559 for_each_object(p, s, addr, page->objects)
45edfa58
CL
3560 if (!test_bit(slab_index(p, s, addr), map))
3561 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
3562}
3563
3564static int list_locations(struct kmem_cache *s, char *buf,
3565 enum track_item alloc)
3566{
e374d483 3567 int len = 0;
88a420e4 3568 unsigned long i;
68dff6a9 3569 struct loc_track t = { 0, 0, NULL };
88a420e4
CL
3570 int node;
3571
68dff6a9 3572 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
ea3061d2 3573 GFP_TEMPORARY))
68dff6a9 3574 return sprintf(buf, "Out of memory\n");
88a420e4
CL
3575
3576 /* Push back cpu slabs */
3577 flush_all(s);
3578
f64dc58c 3579 for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4
CL
3580 struct kmem_cache_node *n = get_node(s, node);
3581 unsigned long flags;
3582 struct page *page;
3583
9e86943b 3584 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
3585 continue;
3586
3587 spin_lock_irqsave(&n->list_lock, flags);
3588 list_for_each_entry(page, &n->partial, lru)
3589 process_slab(&t, s, page, alloc);
3590 list_for_each_entry(page, &n->full, lru)
3591 process_slab(&t, s, page, alloc);
3592 spin_unlock_irqrestore(&n->list_lock, flags);
3593 }
3594
3595 for (i = 0; i < t.count; i++) {
45edfa58 3596 struct location *l = &t.loc[i];
88a420e4 3597
e374d483 3598 if (len > PAGE_SIZE - 100)
88a420e4 3599 break;
e374d483 3600 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
3601
3602 if (l->addr)
e374d483 3603 len += sprint_symbol(buf + len, (unsigned long)l->addr);
88a420e4 3604 else
e374d483 3605 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
3606
3607 if (l->sum_time != l->min_time) {
e374d483 3608 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
3609 l->min_time,
3610 (long)div_u64(l->sum_time, l->count),
3611 l->max_time);
45edfa58 3612 } else
e374d483 3613 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
3614 l->min_time);
3615
3616 if (l->min_pid != l->max_pid)
e374d483 3617 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
3618 l->min_pid, l->max_pid);
3619 else
e374d483 3620 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
3621 l->min_pid);
3622
84966343 3623 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
e374d483
HH
3624 len < PAGE_SIZE - 60) {
3625 len += sprintf(buf + len, " cpus=");
3626 len += cpulist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa58
CL
3627 l->cpus);
3628 }
3629
84966343 3630 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
e374d483
HH
3631 len < PAGE_SIZE - 60) {
3632 len += sprintf(buf + len, " nodes=");
3633 len += nodelist_scnprintf(buf + len, PAGE_SIZE - len - 50,
45edfa58
CL
3634 l->nodes);
3635 }
3636
e374d483 3637 len += sprintf(buf + len, "\n");
88a420e4
CL
3638 }
3639
3640 free_loc_track(&t);
3641 if (!t.count)
e374d483
HH
3642 len += sprintf(buf, "No data\n");
3643 return len;
88a420e4
CL
3644}
3645
81819f0f 3646enum slab_stat_type {
205ab99d
CL
3647 SL_ALL, /* All slabs */
3648 SL_PARTIAL, /* Only partially allocated slabs */
3649 SL_CPU, /* Only slabs used for cpu caches */
3650 SL_OBJECTS, /* Determine allocated objects not slabs */
3651 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
3652};
3653
205ab99d 3654#define SO_ALL (1 << SL_ALL)
81819f0f
CL
3655#define SO_PARTIAL (1 << SL_PARTIAL)
3656#define SO_CPU (1 << SL_CPU)
3657#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 3658#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 3659
62e5c4b4
CG
3660static ssize_t show_slab_objects(struct kmem_cache *s,
3661 char *buf, unsigned long flags)
81819f0f
CL
3662{
3663 unsigned long total = 0;
81819f0f
CL
3664 int node;
3665 int x;
3666 unsigned long *nodes;
3667 unsigned long *per_cpu;
3668
3669 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4
CG
3670 if (!nodes)
3671 return -ENOMEM;
81819f0f
CL
3672 per_cpu = nodes + nr_node_ids;
3673
205ab99d
CL
3674 if (flags & SO_CPU) {
3675 int cpu;
81819f0f 3676
205ab99d
CL
3677 for_each_possible_cpu(cpu) {
3678 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
dfb4f096 3679
205ab99d
CL
3680 if (!c || c->node < 0)
3681 continue;
3682
3683 if (c->page) {
3684 if (flags & SO_TOTAL)
3685 x = c->page->objects;
3686 else if (flags & SO_OBJECTS)
3687 x = c->page->inuse;
81819f0f
CL
3688 else
3689 x = 1;
205ab99d 3690
81819f0f 3691 total += x;
205ab99d 3692 nodes[c->node] += x;
81819f0f 3693 }
205ab99d 3694 per_cpu[c->node]++;
81819f0f
CL
3695 }
3696 }
3697
205ab99d
CL
3698 if (flags & SO_ALL) {
3699 for_each_node_state(node, N_NORMAL_MEMORY) {
3700 struct kmem_cache_node *n = get_node(s, node);
3701
3702 if (flags & SO_TOTAL)
3703 x = atomic_long_read(&n->total_objects);
3704 else if (flags & SO_OBJECTS)
3705 x = atomic_long_read(&n->total_objects) -
3706 count_partial(n, count_free);
81819f0f 3707
81819f0f 3708 else
205ab99d 3709 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
3710 total += x;
3711 nodes[node] += x;
3712 }
3713
205ab99d
CL
3714 } else if (flags & SO_PARTIAL) {
3715 for_each_node_state(node, N_NORMAL_MEMORY) {
3716 struct kmem_cache_node *n = get_node(s, node);
81819f0f 3717
205ab99d
CL
3718 if (flags & SO_TOTAL)
3719 x = count_partial(n, count_total);
3720 else if (flags & SO_OBJECTS)
3721 x = count_partial(n, count_inuse);
81819f0f 3722 else
205ab99d 3723 x = n->nr_partial;
81819f0f
CL
3724 total += x;
3725 nodes[node] += x;
3726 }
3727 }
81819f0f
CL
3728 x = sprintf(buf, "%lu", total);
3729#ifdef CONFIG_NUMA
f64dc58c 3730 for_each_node_state(node, N_NORMAL_MEMORY)
81819f0f
CL
3731 if (nodes[node])
3732 x += sprintf(buf + x, " N%d=%lu",
3733 node, nodes[node]);
3734#endif
3735 kfree(nodes);
3736 return x + sprintf(buf + x, "\n");
3737}
3738
3739static int any_slab_objects(struct kmem_cache *s)
3740{
3741 int node;
81819f0f 3742
dfb4f096 3743 for_each_online_node(node) {
81819f0f
CL
3744 struct kmem_cache_node *n = get_node(s, node);
3745
dfb4f096
CL
3746 if (!n)
3747 continue;
3748
4ea33e2d 3749 if (atomic_long_read(&n->total_objects))
81819f0f
CL
3750 return 1;
3751 }
3752 return 0;
3753}
3754
3755#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3756#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3757
3758struct slab_attribute {
3759 struct attribute attr;
3760 ssize_t (*show)(struct kmem_cache *s, char *buf);
3761 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3762};
3763
3764#define SLAB_ATTR_RO(_name) \
3765 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3766
3767#define SLAB_ATTR(_name) \
3768 static struct slab_attribute _name##_attr = \
3769 __ATTR(_name, 0644, _name##_show, _name##_store)
3770
81819f0f
CL
3771static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3772{
3773 return sprintf(buf, "%d\n", s->size);
3774}
3775SLAB_ATTR_RO(slab_size);
3776
3777static ssize_t align_show(struct kmem_cache *s, char *buf)
3778{
3779 return sprintf(buf, "%d\n", s->align);
3780}
3781SLAB_ATTR_RO(align);
3782
3783static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3784{
3785 return sprintf(buf, "%d\n", s->objsize);
3786}
3787SLAB_ATTR_RO(object_size);
3788
3789static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3790{
834f3d11 3791 return sprintf(buf, "%d\n", oo_objects(s->oo));
81819f0f
CL
3792}
3793SLAB_ATTR_RO(objs_per_slab);
3794
06b285dc
CL
3795static ssize_t order_store(struct kmem_cache *s,
3796 const char *buf, size_t length)
3797{
0121c619
CL
3798 unsigned long order;
3799 int err;
3800
3801 err = strict_strtoul(buf, 10, &order);
3802 if (err)
3803 return err;
06b285dc
CL
3804
3805 if (order > slub_max_order || order < slub_min_order)
3806 return -EINVAL;
3807
3808 calculate_sizes(s, order);
3809 return length;
3810}
3811
81819f0f
CL
3812static ssize_t order_show(struct kmem_cache *s, char *buf)
3813{
834f3d11 3814 return sprintf(buf, "%d\n", oo_order(s->oo));
81819f0f 3815}
06b285dc 3816SLAB_ATTR(order);
81819f0f
CL
3817
3818static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3819{
3820 if (s->ctor) {
3821 int n = sprint_symbol(buf, (unsigned long)s->ctor);
3822
3823 return n + sprintf(buf + n, "\n");
3824 }
3825 return 0;
3826}
3827SLAB_ATTR_RO(ctor);
3828
81819f0f
CL
3829static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3830{
3831 return sprintf(buf, "%d\n", s->refcount - 1);
3832}
3833SLAB_ATTR_RO(aliases);
3834
3835static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3836{
205ab99d 3837 return show_slab_objects(s, buf, SO_ALL);
81819f0f
CL
3838}
3839SLAB_ATTR_RO(slabs);
3840
3841static ssize_t partial_show(struct kmem_cache *s, char *buf)
3842{
d9acf4b7 3843 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
3844}
3845SLAB_ATTR_RO(partial);
3846
3847static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3848{
d9acf4b7 3849 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
3850}
3851SLAB_ATTR_RO(cpu_slabs);
3852
3853static ssize_t objects_show(struct kmem_cache *s, char *buf)
3854{
205ab99d 3855 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
3856}
3857SLAB_ATTR_RO(objects);
3858
205ab99d
CL
3859static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
3860{
3861 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
3862}
3863SLAB_ATTR_RO(objects_partial);
3864
3865static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
3866{
3867 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
3868}
3869SLAB_ATTR_RO(total_objects);
3870
81819f0f
CL
3871static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3872{
3873 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3874}
3875
3876static ssize_t sanity_checks_store(struct kmem_cache *s,
3877 const char *buf, size_t length)
3878{
3879 s->flags &= ~SLAB_DEBUG_FREE;
3880 if (buf[0] == '1')
3881 s->flags |= SLAB_DEBUG_FREE;
3882 return length;
3883}
3884SLAB_ATTR(sanity_checks);
3885
3886static ssize_t trace_show(struct kmem_cache *s, char *buf)
3887{
3888 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3889}
3890
3891static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3892 size_t length)
3893{
3894 s->flags &= ~SLAB_TRACE;
3895 if (buf[0] == '1')
3896 s->flags |= SLAB_TRACE;
3897 return length;
3898}
3899SLAB_ATTR(trace);
3900
3901static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3902{
3903 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3904}
3905
3906static ssize_t reclaim_account_store(struct kmem_cache *s,
3907 const char *buf, size_t length)
3908{
3909 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3910 if (buf[0] == '1')
3911 s->flags |= SLAB_RECLAIM_ACCOUNT;
3912 return length;
3913}
3914SLAB_ATTR(reclaim_account);
3915
3916static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3917{
5af60839 3918 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
81819f0f
CL
3919}
3920SLAB_ATTR_RO(hwcache_align);
3921
3922#ifdef CONFIG_ZONE_DMA
3923static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3924{
3925 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3926}
3927SLAB_ATTR_RO(cache_dma);
3928#endif
3929
3930static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3931{
3932 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3933}
3934SLAB_ATTR_RO(destroy_by_rcu);
3935
3936static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3937{
3938 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3939}
3940
3941static ssize_t red_zone_store(struct kmem_cache *s,
3942 const char *buf, size_t length)
3943{
3944 if (any_slab_objects(s))
3945 return -EBUSY;
3946
3947 s->flags &= ~SLAB_RED_ZONE;
3948 if (buf[0] == '1')
3949 s->flags |= SLAB_RED_ZONE;
06b285dc 3950 calculate_sizes(s, -1);
81819f0f
CL
3951 return length;
3952}
3953SLAB_ATTR(red_zone);
3954
3955static ssize_t poison_show(struct kmem_cache *s, char *buf)
3956{
3957 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3958}
3959
3960static ssize_t poison_store(struct kmem_cache *s,
3961 const char *buf, size_t length)
3962{
3963 if (any_slab_objects(s))
3964 return -EBUSY;
3965
3966 s->flags &= ~SLAB_POISON;
3967 if (buf[0] == '1')
3968 s->flags |= SLAB_POISON;
06b285dc 3969 calculate_sizes(s, -1);
81819f0f
CL
3970 return length;
3971}
3972SLAB_ATTR(poison);
3973
3974static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3975{
3976 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3977}
3978
3979static ssize_t store_user_store(struct kmem_cache *s,
3980 const char *buf, size_t length)
3981{
3982 if (any_slab_objects(s))
3983 return -EBUSY;
3984
3985 s->flags &= ~SLAB_STORE_USER;
3986 if (buf[0] == '1')
3987 s->flags |= SLAB_STORE_USER;
06b285dc 3988 calculate_sizes(s, -1);
81819f0f
CL
3989 return length;
3990}
3991SLAB_ATTR(store_user);
3992
53e15af0
CL
3993static ssize_t validate_show(struct kmem_cache *s, char *buf)
3994{
3995 return 0;
3996}
3997
3998static ssize_t validate_store(struct kmem_cache *s,
3999 const char *buf, size_t length)
4000{
434e245d
CL
4001 int ret = -EINVAL;
4002
4003 if (buf[0] == '1') {
4004 ret = validate_slab_cache(s);
4005 if (ret >= 0)
4006 ret = length;
4007 }
4008 return ret;
53e15af0
CL
4009}
4010SLAB_ATTR(validate);
4011
2086d26a
CL
4012static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4013{
4014 return 0;
4015}
4016
4017static ssize_t shrink_store(struct kmem_cache *s,
4018 const char *buf, size_t length)
4019{
4020 if (buf[0] == '1') {
4021 int rc = kmem_cache_shrink(s);
4022
4023 if (rc)
4024 return rc;
4025 } else
4026 return -EINVAL;
4027 return length;
4028}
4029SLAB_ATTR(shrink);
4030
88a420e4
CL
4031static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4032{
4033 if (!(s->flags & SLAB_STORE_USER))
4034 return -ENOSYS;
4035 return list_locations(s, buf, TRACK_ALLOC);
4036}
4037SLAB_ATTR_RO(alloc_calls);
4038
4039static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4040{
4041 if (!(s->flags & SLAB_STORE_USER))
4042 return -ENOSYS;
4043 return list_locations(s, buf, TRACK_FREE);
4044}
4045SLAB_ATTR_RO(free_calls);
4046
81819f0f 4047#ifdef CONFIG_NUMA
9824601e 4048static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 4049{
9824601e 4050 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
4051}
4052
9824601e 4053static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
4054 const char *buf, size_t length)
4055{
0121c619
CL
4056 unsigned long ratio;
4057 int err;
4058
4059 err = strict_strtoul(buf, 10, &ratio);
4060 if (err)
4061 return err;
4062
e2cb96b7 4063 if (ratio <= 100)
0121c619 4064 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 4065
81819f0f
CL
4066 return length;
4067}
9824601e 4068SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
4069#endif
4070
8ff12cfc 4071#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
4072static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4073{
4074 unsigned long sum = 0;
4075 int cpu;
4076 int len;
4077 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4078
4079 if (!data)
4080 return -ENOMEM;
4081
4082 for_each_online_cpu(cpu) {
4083 unsigned x = get_cpu_slab(s, cpu)->stat[si];
4084
4085 data[cpu] = x;
4086 sum += x;
4087 }
4088
4089 len = sprintf(buf, "%lu", sum);
4090
50ef37b9 4091#ifdef CONFIG_SMP
8ff12cfc
CL
4092 for_each_online_cpu(cpu) {
4093 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 4094 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 4095 }
50ef37b9 4096#endif
8ff12cfc
CL
4097 kfree(data);
4098 return len + sprintf(buf + len, "\n");
4099}
4100
4101#define STAT_ATTR(si, text) \
4102static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4103{ \
4104 return show_stat(s, buf, si); \
4105} \
4106SLAB_ATTR_RO(text); \
4107
4108STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4109STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4110STAT_ATTR(FREE_FASTPATH, free_fastpath);
4111STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4112STAT_ATTR(FREE_FROZEN, free_frozen);
4113STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4114STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4115STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4116STAT_ATTR(ALLOC_SLAB, alloc_slab);
4117STAT_ATTR(ALLOC_REFILL, alloc_refill);
4118STAT_ATTR(FREE_SLAB, free_slab);
4119STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4120STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4121STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4122STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4123STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4124STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
65c3376a 4125STAT_ATTR(ORDER_FALLBACK, order_fallback);
8ff12cfc
CL
4126#endif
4127
06428780 4128static struct attribute *slab_attrs[] = {
81819f0f
CL
4129 &slab_size_attr.attr,
4130 &object_size_attr.attr,
4131 &objs_per_slab_attr.attr,
4132 &order_attr.attr,
4133 &objects_attr.attr,
205ab99d
CL
4134 &objects_partial_attr.attr,
4135 &total_objects_attr.attr,
81819f0f
CL
4136 &slabs_attr.attr,
4137 &partial_attr.attr,
4138 &cpu_slabs_attr.attr,
4139 &ctor_attr.attr,
81819f0f
CL
4140 &aliases_attr.attr,
4141 &align_attr.attr,
4142 &sanity_checks_attr.attr,
4143 &trace_attr.attr,
4144 &hwcache_align_attr.attr,
4145 &reclaim_account_attr.attr,
4146 &destroy_by_rcu_attr.attr,
4147 &red_zone_attr.attr,
4148 &poison_attr.attr,
4149 &store_user_attr.attr,
53e15af0 4150 &validate_attr.attr,
2086d26a 4151 &shrink_attr.attr,
88a420e4
CL
4152 &alloc_calls_attr.attr,
4153 &free_calls_attr.attr,
81819f0f
CL
4154#ifdef CONFIG_ZONE_DMA
4155 &cache_dma_attr.attr,
4156#endif
4157#ifdef CONFIG_NUMA
9824601e 4158 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
4159#endif
4160#ifdef CONFIG_SLUB_STATS
4161 &alloc_fastpath_attr.attr,
4162 &alloc_slowpath_attr.attr,
4163 &free_fastpath_attr.attr,
4164 &free_slowpath_attr.attr,
4165 &free_frozen_attr.attr,
4166 &free_add_partial_attr.attr,
4167 &free_remove_partial_attr.attr,
4168 &alloc_from_partial_attr.attr,
4169 &alloc_slab_attr.attr,
4170 &alloc_refill_attr.attr,
4171 &free_slab_attr.attr,
4172 &cpuslab_flush_attr.attr,
4173 &deactivate_full_attr.attr,
4174 &deactivate_empty_attr.attr,
4175 &deactivate_to_head_attr.attr,
4176 &deactivate_to_tail_attr.attr,
4177 &deactivate_remote_frees_attr.attr,
65c3376a 4178 &order_fallback_attr.attr,
81819f0f
CL
4179#endif
4180 NULL
4181};
4182
4183static struct attribute_group slab_attr_group = {
4184 .attrs = slab_attrs,
4185};
4186
4187static ssize_t slab_attr_show(struct kobject *kobj,
4188 struct attribute *attr,
4189 char *buf)
4190{
4191 struct slab_attribute *attribute;
4192 struct kmem_cache *s;
4193 int err;
4194
4195 attribute = to_slab_attr(attr);
4196 s = to_slab(kobj);
4197
4198 if (!attribute->show)
4199 return -EIO;
4200
4201 err = attribute->show(s, buf);
4202
4203 return err;
4204}
4205
4206static ssize_t slab_attr_store(struct kobject *kobj,
4207 struct attribute *attr,
4208 const char *buf, size_t len)
4209{
4210 struct slab_attribute *attribute;
4211 struct kmem_cache *s;
4212 int err;
4213
4214 attribute = to_slab_attr(attr);
4215 s = to_slab(kobj);
4216
4217 if (!attribute->store)
4218 return -EIO;
4219
4220 err = attribute->store(s, buf, len);
4221
4222 return err;
4223}
4224
151c602f
CL
4225static void kmem_cache_release(struct kobject *kobj)
4226{
4227 struct kmem_cache *s = to_slab(kobj);
4228
4229 kfree(s);
4230}
4231
81819f0f
CL
4232static struct sysfs_ops slab_sysfs_ops = {
4233 .show = slab_attr_show,
4234 .store = slab_attr_store,
4235};
4236
4237static struct kobj_type slab_ktype = {
4238 .sysfs_ops = &slab_sysfs_ops,
151c602f 4239 .release = kmem_cache_release
81819f0f
CL
4240};
4241
4242static int uevent_filter(struct kset *kset, struct kobject *kobj)
4243{
4244 struct kobj_type *ktype = get_ktype(kobj);
4245
4246 if (ktype == &slab_ktype)
4247 return 1;
4248 return 0;
4249}
4250
4251static struct kset_uevent_ops slab_uevent_ops = {
4252 .filter = uevent_filter,
4253};
4254
27c3a314 4255static struct kset *slab_kset;
81819f0f
CL
4256
4257#define ID_STR_LENGTH 64
4258
4259/* Create a unique string id for a slab cache:
6446faa2
CL
4260 *
4261 * Format :[flags-]size
81819f0f
CL
4262 */
4263static char *create_unique_id(struct kmem_cache *s)
4264{
4265 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
4266 char *p = name;
4267
4268 BUG_ON(!name);
4269
4270 *p++ = ':';
4271 /*
4272 * First flags affecting slabcache operations. We will only
4273 * get here for aliasable slabs so we do not need to support
4274 * too many flags. The flags here must cover all flags that
4275 * are matched during merging to guarantee that the id is
4276 * unique.
4277 */
4278 if (s->flags & SLAB_CACHE_DMA)
4279 *p++ = 'd';
4280 if (s->flags & SLAB_RECLAIM_ACCOUNT)
4281 *p++ = 'a';
4282 if (s->flags & SLAB_DEBUG_FREE)
4283 *p++ = 'F';
4284 if (p != name + 1)
4285 *p++ = '-';
4286 p += sprintf(p, "%07d", s->size);
4287 BUG_ON(p > name + ID_STR_LENGTH - 1);
4288 return name;
4289}
4290
4291static int sysfs_slab_add(struct kmem_cache *s)
4292{
4293 int err;
4294 const char *name;
4295 int unmergeable;
4296
4297 if (slab_state < SYSFS)
4298 /* Defer until later */
4299 return 0;
4300
4301 unmergeable = slab_unmergeable(s);
4302 if (unmergeable) {
4303 /*
4304 * Slabcache can never be merged so we can use the name proper.
4305 * This is typically the case for debug situations. In that
4306 * case we can catch duplicate names easily.
4307 */
27c3a314 4308 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
4309 name = s->name;
4310 } else {
4311 /*
4312 * Create a unique name for the slab as a target
4313 * for the symlinks.
4314 */
4315 name = create_unique_id(s);
4316 }
4317
27c3a314 4318 s->kobj.kset = slab_kset;
1eada11c
GKH
4319 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, name);
4320 if (err) {
4321 kobject_put(&s->kobj);
81819f0f 4322 return err;
1eada11c 4323 }
81819f0f
CL
4324
4325 err = sysfs_create_group(&s->kobj, &slab_attr_group);
4326 if (err)
4327 return err;
4328 kobject_uevent(&s->kobj, KOBJ_ADD);
4329 if (!unmergeable) {
4330 /* Setup first alias */
4331 sysfs_slab_alias(s, s->name);
4332 kfree(name);
4333 }
4334 return 0;
4335}
4336
4337static void sysfs_slab_remove(struct kmem_cache *s)
4338{
4339 kobject_uevent(&s->kobj, KOBJ_REMOVE);
4340 kobject_del(&s->kobj);
151c602f 4341 kobject_put(&s->kobj);
81819f0f
CL
4342}
4343
4344/*
4345 * Need to buffer aliases during bootup until sysfs becomes
4346 * available lest we loose that information.
4347 */
4348struct saved_alias {
4349 struct kmem_cache *s;
4350 const char *name;
4351 struct saved_alias *next;
4352};
4353
5af328a5 4354static struct saved_alias *alias_list;
81819f0f
CL
4355
4356static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
4357{
4358 struct saved_alias *al;
4359
4360 if (slab_state == SYSFS) {
4361 /*
4362 * If we have a leftover link then remove it.
4363 */
27c3a314
GKH
4364 sysfs_remove_link(&slab_kset->kobj, name);
4365 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
4366 }
4367
4368 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
4369 if (!al)
4370 return -ENOMEM;
4371
4372 al->s = s;
4373 al->name = name;
4374 al->next = alias_list;
4375 alias_list = al;
4376 return 0;
4377}
4378
4379static int __init slab_sysfs_init(void)
4380{
5b95a4ac 4381 struct kmem_cache *s;
81819f0f
CL
4382 int err;
4383
0ff21e46 4384 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 4385 if (!slab_kset) {
81819f0f
CL
4386 printk(KERN_ERR "Cannot register slab subsystem.\n");
4387 return -ENOSYS;
4388 }
4389
26a7bd03
CL
4390 slab_state = SYSFS;
4391
5b95a4ac 4392 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 4393 err = sysfs_slab_add(s);
5d540fb7
CL
4394 if (err)
4395 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
4396 " to sysfs\n", s->name);
26a7bd03 4397 }
81819f0f
CL
4398
4399 while (alias_list) {
4400 struct saved_alias *al = alias_list;
4401
4402 alias_list = alias_list->next;
4403 err = sysfs_slab_alias(al->s, al->name);
5d540fb7
CL
4404 if (err)
4405 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4406 " %s to sysfs\n", s->name);
81819f0f
CL
4407 kfree(al);
4408 }
4409
4410 resiliency_test();
4411 return 0;
4412}
4413
4414__initcall(slab_sysfs_init);
81819f0f 4415#endif
57ed3eda
PE
4416
4417/*
4418 * The /proc/slabinfo ABI
4419 */
158a9624 4420#ifdef CONFIG_SLABINFO
57ed3eda
PE
4421static void print_slabinfo_header(struct seq_file *m)
4422{
4423 seq_puts(m, "slabinfo - version: 2.1\n");
4424 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4425 "<objperslab> <pagesperslab>");
4426 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4427 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
4428 seq_putc(m, '\n');
4429}
4430
4431static void *s_start(struct seq_file *m, loff_t *pos)
4432{
4433 loff_t n = *pos;
4434
4435 down_read(&slub_lock);
4436 if (!n)
4437 print_slabinfo_header(m);
4438
4439 return seq_list_start(&slab_caches, *pos);
4440}
4441
4442static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4443{
4444 return seq_list_next(p, &slab_caches, pos);
4445}
4446
4447static void s_stop(struct seq_file *m, void *p)
4448{
4449 up_read(&slub_lock);
4450}
4451
4452static int s_show(struct seq_file *m, void *p)
4453{
4454 unsigned long nr_partials = 0;
4455 unsigned long nr_slabs = 0;
4456 unsigned long nr_inuse = 0;
205ab99d
CL
4457 unsigned long nr_objs = 0;
4458 unsigned long nr_free = 0;
57ed3eda
PE
4459 struct kmem_cache *s;
4460 int node;
4461
4462 s = list_entry(p, struct kmem_cache, list);
4463
4464 for_each_online_node(node) {
4465 struct kmem_cache_node *n = get_node(s, node);
4466
4467 if (!n)
4468 continue;
4469
4470 nr_partials += n->nr_partial;
4471 nr_slabs += atomic_long_read(&n->nr_slabs);
205ab99d
CL
4472 nr_objs += atomic_long_read(&n->total_objects);
4473 nr_free += count_partial(n, count_free);
57ed3eda
PE
4474 }
4475
205ab99d 4476 nr_inuse = nr_objs - nr_free;
57ed3eda
PE
4477
4478 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d", s->name, nr_inuse,
834f3d11
CL
4479 nr_objs, s->size, oo_objects(s->oo),
4480 (1 << oo_order(s->oo)));
57ed3eda
PE
4481 seq_printf(m, " : tunables %4u %4u %4u", 0, 0, 0);
4482 seq_printf(m, " : slabdata %6lu %6lu %6lu", nr_slabs, nr_slabs,
4483 0UL);
4484 seq_putc(m, '\n');
4485 return 0;
4486}
4487
7b3c3a50 4488static const struct seq_operations slabinfo_op = {
57ed3eda
PE
4489 .start = s_start,
4490 .next = s_next,
4491 .stop = s_stop,
4492 .show = s_show,
4493};
4494
7b3c3a50
AD
4495static int slabinfo_open(struct inode *inode, struct file *file)
4496{
4497 return seq_open(file, &slabinfo_op);
4498}
4499
4500static const struct file_operations proc_slabinfo_operations = {
4501 .open = slabinfo_open,
4502 .read = seq_read,
4503 .llseek = seq_lseek,
4504 .release = seq_release,
4505};
4506
4507static int __init slab_proc_init(void)
4508{
4509 proc_create("slabinfo",S_IWUSR|S_IRUGO,NULL,&proc_slabinfo_operations);
4510 return 0;
4511}
4512module_init(slab_proc_init);
158a9624 4513#endif /* CONFIG_SLABINFO */