mm/page_alloc.c: make code static
[linux-2.6-block.git] / mm / slub.c
CommitLineData
81819f0f
CL
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/seq_file.h>
18#include <linux/cpu.h>
19#include <linux/cpuset.h>
20#include <linux/mempolicy.h>
21#include <linux/ctype.h>
22#include <linux/kallsyms.h>
23
24/*
25 * Lock order:
26 * 1. slab_lock(page)
27 * 2. slab->list_lock
28 *
29 * The slab_lock protects operations on the object of a particular
30 * slab and its metadata in the page struct. If the slab lock
31 * has been taken then no allocations nor frees can be performed
32 * on the objects in the slab nor can the slab be added or removed
33 * from the partial or full lists since this would mean modifying
34 * the page_struct of the slab.
35 *
36 * The list_lock protects the partial and full list on each node and
37 * the partial slab counter. If taken then no new slabs may be added or
38 * removed from the lists nor make the number of partial slabs be modified.
39 * (Note that the total number of slabs is an atomic value that may be
40 * modified without taking the list lock).
41 *
42 * The list_lock is a centralized lock and thus we avoid taking it as
43 * much as possible. As long as SLUB does not have to handle partial
44 * slabs, operations can continue without any centralized lock. F.e.
45 * allocating a long series of objects that fill up slabs does not require
46 * the list lock.
47 *
48 * The lock order is sometimes inverted when we are trying to get a slab
49 * off a list. We take the list_lock and then look for a page on the list
50 * to use. While we do that objects in the slabs may be freed. We can
51 * only operate on the slab if we have also taken the slab_lock. So we use
52 * a slab_trylock() on the slab. If trylock was successful then no frees
53 * can occur anymore and we can use the slab for allocations etc. If the
54 * slab_trylock() does not succeed then frees are in progress in the slab and
55 * we must stay away from it for a while since we may cause a bouncing
56 * cacheline if we try to acquire the lock. So go onto the next slab.
57 * If all pages are busy then we may allocate a new slab instead of reusing
58 * a partial slab. A new slab has noone operating on it and thus there is
59 * no danger of cacheline contention.
60 *
61 * Interrupts are disabled during allocation and deallocation in order to
62 * make the slab allocator safe to use in the context of an irq. In addition
63 * interrupts are disabled to ensure that the processor does not change
64 * while handling per_cpu slabs, due to kernel preemption.
65 *
66 * SLUB assigns one slab for allocation to each processor.
67 * Allocations only occur from these slabs called cpu slabs.
68 *
672bba3a
CL
69 * Slabs with free elements are kept on a partial list and during regular
70 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 71 * freed then the slab will show up again on the partial lists.
672bba3a
CL
72 * We track full slabs for debugging purposes though because otherwise we
73 * cannot scan all objects.
81819f0f
CL
74 *
75 * Slabs are freed when they become empty. Teardown and setup is
76 * minimal so we rely on the page allocators per cpu caches for
77 * fast frees and allocs.
78 *
79 * Overloading of page flags that are otherwise used for LRU management.
80 *
4b6f0750
CL
81 * PageActive The slab is frozen and exempt from list processing.
82 * This means that the slab is dedicated to a purpose
83 * such as satisfying allocations for a specific
84 * processor. Objects may be freed in the slab while
85 * it is frozen but slab_free will then skip the usual
86 * list operations. It is up to the processor holding
87 * the slab to integrate the slab into the slab lists
88 * when the slab is no longer needed.
89 *
90 * One use of this flag is to mark slabs that are
91 * used for allocations. Then such a slab becomes a cpu
92 * slab. The cpu slab may be equipped with an additional
894b8788
CL
93 * lockless_freelist that allows lockless access to
94 * free objects in addition to the regular freelist
95 * that requires the slab lock.
81819f0f
CL
96 *
97 * PageError Slab requires special handling due to debug
98 * options set. This moves slab handling out of
894b8788 99 * the fast path and disables lockless freelists.
81819f0f
CL
100 */
101
5577bd8a
CL
102#define FROZEN (1 << PG_active)
103
104#ifdef CONFIG_SLUB_DEBUG
105#define SLABDEBUG (1 << PG_error)
106#else
107#define SLABDEBUG 0
108#endif
109
4b6f0750
CL
110static inline int SlabFrozen(struct page *page)
111{
5577bd8a 112 return page->flags & FROZEN;
4b6f0750
CL
113}
114
115static inline void SetSlabFrozen(struct page *page)
116{
5577bd8a 117 page->flags |= FROZEN;
4b6f0750
CL
118}
119
120static inline void ClearSlabFrozen(struct page *page)
121{
5577bd8a 122 page->flags &= ~FROZEN;
4b6f0750
CL
123}
124
35e5d7ee
CL
125static inline int SlabDebug(struct page *page)
126{
5577bd8a 127 return page->flags & SLABDEBUG;
35e5d7ee
CL
128}
129
130static inline void SetSlabDebug(struct page *page)
131{
5577bd8a 132 page->flags |= SLABDEBUG;
35e5d7ee
CL
133}
134
135static inline void ClearSlabDebug(struct page *page)
136{
5577bd8a 137 page->flags &= ~SLABDEBUG;
35e5d7ee
CL
138}
139
81819f0f
CL
140/*
141 * Issues still to be resolved:
142 *
143 * - The per cpu array is updated for each new slab and and is a remote
144 * cacheline for most nodes. This could become a bouncing cacheline given
672bba3a
CL
145 * enough frequent updates. There are 16 pointers in a cacheline, so at
146 * max 16 cpus could compete for the cacheline which may be okay.
81819f0f
CL
147 *
148 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
149 *
81819f0f
CL
150 * - Variable sizing of the per node arrays
151 */
152
153/* Enable to test recovery from slab corruption on boot */
154#undef SLUB_RESILIENCY_TEST
155
156#if PAGE_SHIFT <= 12
157
158/*
159 * Small page size. Make sure that we do not fragment memory
160 */
161#define DEFAULT_MAX_ORDER 1
162#define DEFAULT_MIN_OBJECTS 4
163
164#else
165
166/*
167 * Large page machines are customarily able to handle larger
168 * page orders.
169 */
170#define DEFAULT_MAX_ORDER 2
171#define DEFAULT_MIN_OBJECTS 8
172
173#endif
174
2086d26a
CL
175/*
176 * Mininum number of partial slabs. These will be left on the partial
177 * lists even if they are empty. kmem_cache_shrink may reclaim them.
178 */
e95eed57
CL
179#define MIN_PARTIAL 2
180
2086d26a
CL
181/*
182 * Maximum number of desirable partial slabs.
183 * The existence of more partial slabs makes kmem_cache_shrink
184 * sort the partial list by the number of objects in the.
185 */
186#define MAX_PARTIAL 10
187
81819f0f
CL
188#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
189 SLAB_POISON | SLAB_STORE_USER)
672bba3a 190
81819f0f
CL
191/*
192 * Set of flags that will prevent slab merging
193 */
194#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
195 SLAB_TRACE | SLAB_DESTROY_BY_RCU)
196
197#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
198 SLAB_CACHE_DMA)
199
200#ifndef ARCH_KMALLOC_MINALIGN
47bfdc0d 201#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
202#endif
203
204#ifndef ARCH_SLAB_MINALIGN
47bfdc0d 205#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
206#endif
207
6300ea75
CL
208/*
209 * The page->inuse field is 16 bit thus we have this limitation
210 */
211#define MAX_OBJECTS_PER_SLAB 65535
212
81819f0f 213/* Internal SLUB flags */
1ceef402
CL
214#define __OBJECT_POISON 0x80000000 /* Poison object */
215#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
81819f0f 216
65c02d4c
CL
217/* Not all arches define cache_line_size */
218#ifndef cache_line_size
219#define cache_line_size() L1_CACHE_BYTES
220#endif
221
81819f0f
CL
222static int kmem_size = sizeof(struct kmem_cache);
223
224#ifdef CONFIG_SMP
225static struct notifier_block slab_notifier;
226#endif
227
228static enum {
229 DOWN, /* No slab functionality available */
230 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
672bba3a 231 UP, /* Everything works but does not show up in sysfs */
81819f0f
CL
232 SYSFS /* Sysfs up */
233} slab_state = DOWN;
234
235/* A list of all slab caches on the system */
236static DECLARE_RWSEM(slub_lock);
5af328a5 237static LIST_HEAD(slab_caches);
81819f0f 238
02cbc874
CL
239/*
240 * Tracking user of a slab.
241 */
242struct track {
243 void *addr; /* Called from address */
244 int cpu; /* Was running on cpu */
245 int pid; /* Pid context */
246 unsigned long when; /* When did the operation occur */
247};
248
249enum track_item { TRACK_ALLOC, TRACK_FREE };
250
41ecc55b 251#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
81819f0f
CL
252static int sysfs_slab_add(struct kmem_cache *);
253static int sysfs_slab_alias(struct kmem_cache *, const char *);
254static void sysfs_slab_remove(struct kmem_cache *);
255#else
0c710013
CL
256static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
257static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
258 { return 0; }
259static inline void sysfs_slab_remove(struct kmem_cache *s) {}
81819f0f
CL
260#endif
261
262/********************************************************************
263 * Core slab cache functions
264 *******************************************************************/
265
266int slab_is_available(void)
267{
268 return slab_state >= UP;
269}
270
271static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
272{
273#ifdef CONFIG_NUMA
274 return s->node[node];
275#else
276 return &s->local_node;
277#endif
278}
279
02cbc874
CL
280static inline int check_valid_pointer(struct kmem_cache *s,
281 struct page *page, const void *object)
282{
283 void *base;
284
285 if (!object)
286 return 1;
287
288 base = page_address(page);
289 if (object < base || object >= base + s->objects * s->size ||
290 (object - base) % s->size) {
291 return 0;
292 }
293
294 return 1;
295}
296
7656c72b
CL
297/*
298 * Slow version of get and set free pointer.
299 *
300 * This version requires touching the cache lines of kmem_cache which
301 * we avoid to do in the fast alloc free paths. There we obtain the offset
302 * from the page struct.
303 */
304static inline void *get_freepointer(struct kmem_cache *s, void *object)
305{
306 return *(void **)(object + s->offset);
307}
308
309static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
310{
311 *(void **)(object + s->offset) = fp;
312}
313
314/* Loop over all objects in a slab */
315#define for_each_object(__p, __s, __addr) \
316 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
317 __p += (__s)->size)
318
319/* Scan freelist */
320#define for_each_free_object(__p, __s, __free) \
321 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
322
323/* Determine object index from a given position */
324static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
325{
326 return (p - addr) / s->size;
327}
328
41ecc55b
CL
329#ifdef CONFIG_SLUB_DEBUG
330/*
331 * Debug settings:
332 */
f0630fff
CL
333#ifdef CONFIG_SLUB_DEBUG_ON
334static int slub_debug = DEBUG_DEFAULT_FLAGS;
335#else
41ecc55b 336static int slub_debug;
f0630fff 337#endif
41ecc55b
CL
338
339static char *slub_debug_slabs;
340
81819f0f
CL
341/*
342 * Object debugging
343 */
344static void print_section(char *text, u8 *addr, unsigned int length)
345{
346 int i, offset;
347 int newline = 1;
348 char ascii[17];
349
350 ascii[16] = 0;
351
352 for (i = 0; i < length; i++) {
353 if (newline) {
24922684 354 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
81819f0f
CL
355 newline = 0;
356 }
357 printk(" %02x", addr[i]);
358 offset = i % 16;
359 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
360 if (offset == 15) {
361 printk(" %s\n",ascii);
362 newline = 1;
363 }
364 }
365 if (!newline) {
366 i %= 16;
367 while (i < 16) {
368 printk(" ");
369 ascii[i] = ' ';
370 i++;
371 }
372 printk(" %s\n", ascii);
373 }
374}
375
81819f0f
CL
376static struct track *get_track(struct kmem_cache *s, void *object,
377 enum track_item alloc)
378{
379 struct track *p;
380
381 if (s->offset)
382 p = object + s->offset + sizeof(void *);
383 else
384 p = object + s->inuse;
385
386 return p + alloc;
387}
388
389static void set_track(struct kmem_cache *s, void *object,
390 enum track_item alloc, void *addr)
391{
392 struct track *p;
393
394 if (s->offset)
395 p = object + s->offset + sizeof(void *);
396 else
397 p = object + s->inuse;
398
399 p += alloc;
400 if (addr) {
401 p->addr = addr;
402 p->cpu = smp_processor_id();
403 p->pid = current ? current->pid : -1;
404 p->when = jiffies;
405 } else
406 memset(p, 0, sizeof(struct track));
407}
408
81819f0f
CL
409static void init_tracking(struct kmem_cache *s, void *object)
410{
24922684
CL
411 if (!(s->flags & SLAB_STORE_USER))
412 return;
413
414 set_track(s, object, TRACK_FREE, NULL);
415 set_track(s, object, TRACK_ALLOC, NULL);
81819f0f
CL
416}
417
418static void print_track(const char *s, struct track *t)
419{
420 if (!t->addr)
421 return;
422
24922684 423 printk(KERN_ERR "INFO: %s in ", s);
81819f0f 424 __print_symbol("%s", (unsigned long)t->addr);
24922684
CL
425 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
426}
427
428static void print_tracking(struct kmem_cache *s, void *object)
429{
430 if (!(s->flags & SLAB_STORE_USER))
431 return;
432
433 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
434 print_track("Freed", get_track(s, object, TRACK_FREE));
435}
436
437static void print_page_info(struct page *page)
438{
439 printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
440 page, page->inuse, page->freelist, page->flags);
441
442}
443
444static void slab_bug(struct kmem_cache *s, char *fmt, ...)
445{
446 va_list args;
447 char buf[100];
448
449 va_start(args, fmt);
450 vsnprintf(buf, sizeof(buf), fmt, args);
451 va_end(args);
452 printk(KERN_ERR "========================================"
453 "=====================================\n");
454 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
455 printk(KERN_ERR "----------------------------------------"
456 "-------------------------------------\n\n");
81819f0f
CL
457}
458
24922684
CL
459static void slab_fix(struct kmem_cache *s, char *fmt, ...)
460{
461 va_list args;
462 char buf[100];
463
464 va_start(args, fmt);
465 vsnprintf(buf, sizeof(buf), fmt, args);
466 va_end(args);
467 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
468}
469
470static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
471{
472 unsigned int off; /* Offset of last byte */
24922684
CL
473 u8 *addr = page_address(page);
474
475 print_tracking(s, p);
476
477 print_page_info(page);
478
479 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
480 p, p - addr, get_freepointer(s, p));
481
482 if (p > addr + 16)
483 print_section("Bytes b4", p - 16, 16);
484
485 print_section("Object", p, min(s->objsize, 128));
81819f0f
CL
486
487 if (s->flags & SLAB_RED_ZONE)
488 print_section("Redzone", p + s->objsize,
489 s->inuse - s->objsize);
490
81819f0f
CL
491 if (s->offset)
492 off = s->offset + sizeof(void *);
493 else
494 off = s->inuse;
495
24922684 496 if (s->flags & SLAB_STORE_USER)
81819f0f 497 off += 2 * sizeof(struct track);
81819f0f
CL
498
499 if (off != s->size)
500 /* Beginning of the filler is the free pointer */
24922684
CL
501 print_section("Padding", p + off, s->size - off);
502
503 dump_stack();
81819f0f
CL
504}
505
506static void object_err(struct kmem_cache *s, struct page *page,
507 u8 *object, char *reason)
508{
24922684
CL
509 slab_bug(s, reason);
510 print_trailer(s, page, object);
81819f0f
CL
511}
512
24922684 513static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0f
CL
514{
515 va_list args;
516 char buf[100];
517
24922684
CL
518 va_start(args, fmt);
519 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 520 va_end(args);
24922684
CL
521 slab_bug(s, fmt);
522 print_page_info(page);
81819f0f
CL
523 dump_stack();
524}
525
526static void init_object(struct kmem_cache *s, void *object, int active)
527{
528 u8 *p = object;
529
530 if (s->flags & __OBJECT_POISON) {
531 memset(p, POISON_FREE, s->objsize - 1);
532 p[s->objsize -1] = POISON_END;
533 }
534
535 if (s->flags & SLAB_RED_ZONE)
536 memset(p + s->objsize,
537 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
538 s->inuse - s->objsize);
539}
540
24922684 541static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
81819f0f
CL
542{
543 while (bytes) {
544 if (*start != (u8)value)
24922684 545 return start;
81819f0f
CL
546 start++;
547 bytes--;
548 }
24922684
CL
549 return NULL;
550}
551
552static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
553 void *from, void *to)
554{
555 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
556 memset(from, data, to - from);
557}
558
559static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
560 u8 *object, char *what,
561 u8* start, unsigned int value, unsigned int bytes)
562{
563 u8 *fault;
564 u8 *end;
565
566 fault = check_bytes(start, value, bytes);
567 if (!fault)
568 return 1;
569
570 end = start + bytes;
571 while (end > fault && end[-1] == value)
572 end--;
573
574 slab_bug(s, "%s overwritten", what);
575 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
576 fault, end - 1, fault[0], value);
577 print_trailer(s, page, object);
578
579 restore_bytes(s, what, value, fault, end);
580 return 0;
81819f0f
CL
581}
582
81819f0f
CL
583/*
584 * Object layout:
585 *
586 * object address
587 * Bytes of the object to be managed.
588 * If the freepointer may overlay the object then the free
589 * pointer is the first word of the object.
672bba3a 590 *
81819f0f
CL
591 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
592 * 0xa5 (POISON_END)
593 *
594 * object + s->objsize
595 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a
CL
596 * Padding is extended by another word if Redzoning is enabled and
597 * objsize == inuse.
598 *
81819f0f
CL
599 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
600 * 0xcc (RED_ACTIVE) for objects in use.
601 *
602 * object + s->inuse
672bba3a
CL
603 * Meta data starts here.
604 *
81819f0f
CL
605 * A. Free pointer (if we cannot overwrite object on free)
606 * B. Tracking data for SLAB_STORE_USER
672bba3a
CL
607 * C. Padding to reach required alignment boundary or at mininum
608 * one word if debuggin is on to be able to detect writes
609 * before the word boundary.
610 *
611 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
612 *
613 * object + s->size
672bba3a 614 * Nothing is used beyond s->size.
81819f0f 615 *
672bba3a
CL
616 * If slabcaches are merged then the objsize and inuse boundaries are mostly
617 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
618 * may be used with merged slabcaches.
619 */
620
81819f0f
CL
621static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
622{
623 unsigned long off = s->inuse; /* The end of info */
624
625 if (s->offset)
626 /* Freepointer is placed after the object. */
627 off += sizeof(void *);
628
629 if (s->flags & SLAB_STORE_USER)
630 /* We also have user information there */
631 off += 2 * sizeof(struct track);
632
633 if (s->size == off)
634 return 1;
635
24922684
CL
636 return check_bytes_and_report(s, page, p, "Object padding",
637 p + off, POISON_INUSE, s->size - off);
81819f0f
CL
638}
639
640static int slab_pad_check(struct kmem_cache *s, struct page *page)
641{
24922684
CL
642 u8 *start;
643 u8 *fault;
644 u8 *end;
645 int length;
646 int remainder;
81819f0f
CL
647
648 if (!(s->flags & SLAB_POISON))
649 return 1;
650
24922684
CL
651 start = page_address(page);
652 end = start + (PAGE_SIZE << s->order);
81819f0f 653 length = s->objects * s->size;
24922684 654 remainder = end - (start + length);
81819f0f
CL
655 if (!remainder)
656 return 1;
657
24922684
CL
658 fault = check_bytes(start + length, POISON_INUSE, remainder);
659 if (!fault)
660 return 1;
661 while (end > fault && end[-1] == POISON_INUSE)
662 end--;
663
664 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
665 print_section("Padding", start, length);
666
667 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
668 return 0;
81819f0f
CL
669}
670
671static int check_object(struct kmem_cache *s, struct page *page,
672 void *object, int active)
673{
674 u8 *p = object;
675 u8 *endobject = object + s->objsize;
676
677 if (s->flags & SLAB_RED_ZONE) {
678 unsigned int red =
679 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
680
24922684
CL
681 if (!check_bytes_and_report(s, page, object, "Redzone",
682 endobject, red, s->inuse - s->objsize))
81819f0f 683 return 0;
81819f0f 684 } else {
24922684
CL
685 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
686 check_bytes_and_report(s, page, p, "Alignment padding", endobject,
687 POISON_INUSE, s->inuse - s->objsize);
81819f0f
CL
688 }
689
690 if (s->flags & SLAB_POISON) {
691 if (!active && (s->flags & __OBJECT_POISON) &&
24922684
CL
692 (!check_bytes_and_report(s, page, p, "Poison", p,
693 POISON_FREE, s->objsize - 1) ||
694 !check_bytes_and_report(s, page, p, "Poison",
695 p + s->objsize -1, POISON_END, 1)))
81819f0f 696 return 0;
81819f0f
CL
697 /*
698 * check_pad_bytes cleans up on its own.
699 */
700 check_pad_bytes(s, page, p);
701 }
702
703 if (!s->offset && active)
704 /*
705 * Object and freepointer overlap. Cannot check
706 * freepointer while object is allocated.
707 */
708 return 1;
709
710 /* Check free pointer validity */
711 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
712 object_err(s, page, p, "Freepointer corrupt");
713 /*
714 * No choice but to zap it and thus loose the remainder
715 * of the free objects in this slab. May cause
672bba3a 716 * another error because the object count is now wrong.
81819f0f
CL
717 */
718 set_freepointer(s, p, NULL);
719 return 0;
720 }
721 return 1;
722}
723
724static int check_slab(struct kmem_cache *s, struct page *page)
725{
726 VM_BUG_ON(!irqs_disabled());
727
728 if (!PageSlab(page)) {
24922684 729 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
730 return 0;
731 }
732 if (page->offset * sizeof(void *) != s->offset) {
24922684
CL
733 slab_err(s, page, "Corrupted offset %lu",
734 (unsigned long)(page->offset * sizeof(void *)));
81819f0f
CL
735 return 0;
736 }
737 if (page->inuse > s->objects) {
24922684
CL
738 slab_err(s, page, "inuse %u > max %u",
739 s->name, page->inuse, s->objects);
81819f0f
CL
740 return 0;
741 }
742 /* Slab_pad_check fixes things up after itself */
743 slab_pad_check(s, page);
744 return 1;
745}
746
747/*
672bba3a
CL
748 * Determine if a certain object on a page is on the freelist. Must hold the
749 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
750 */
751static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
752{
753 int nr = 0;
754 void *fp = page->freelist;
755 void *object = NULL;
756
757 while (fp && nr <= s->objects) {
758 if (fp == search)
759 return 1;
760 if (!check_valid_pointer(s, page, fp)) {
761 if (object) {
762 object_err(s, page, object,
763 "Freechain corrupt");
764 set_freepointer(s, object, NULL);
765 break;
766 } else {
24922684 767 slab_err(s, page, "Freepointer corrupt");
81819f0f
CL
768 page->freelist = NULL;
769 page->inuse = s->objects;
24922684 770 slab_fix(s, "Freelist cleared");
81819f0f
CL
771 return 0;
772 }
773 break;
774 }
775 object = fp;
776 fp = get_freepointer(s, object);
777 nr++;
778 }
779
780 if (page->inuse != s->objects - nr) {
70d71228 781 slab_err(s, page, "Wrong object count. Counter is %d but "
24922684 782 "counted were %d", page->inuse, s->objects - nr);
81819f0f 783 page->inuse = s->objects - nr;
24922684 784 slab_fix(s, "Object count adjusted.");
81819f0f
CL
785 }
786 return search == NULL;
787}
788
3ec09742
CL
789static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
790{
791 if (s->flags & SLAB_TRACE) {
792 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
793 s->name,
794 alloc ? "alloc" : "free",
795 object, page->inuse,
796 page->freelist);
797
798 if (!alloc)
799 print_section("Object", (void *)object, s->objsize);
800
801 dump_stack();
802 }
803}
804
643b1138 805/*
672bba3a 806 * Tracking of fully allocated slabs for debugging purposes.
643b1138 807 */
e95eed57 808static void add_full(struct kmem_cache_node *n, struct page *page)
643b1138 809{
643b1138
CL
810 spin_lock(&n->list_lock);
811 list_add(&page->lru, &n->full);
812 spin_unlock(&n->list_lock);
813}
814
815static void remove_full(struct kmem_cache *s, struct page *page)
816{
817 struct kmem_cache_node *n;
818
819 if (!(s->flags & SLAB_STORE_USER))
820 return;
821
822 n = get_node(s, page_to_nid(page));
823
824 spin_lock(&n->list_lock);
825 list_del(&page->lru);
826 spin_unlock(&n->list_lock);
827}
828
3ec09742
CL
829static void setup_object_debug(struct kmem_cache *s, struct page *page,
830 void *object)
831{
832 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
833 return;
834
835 init_object(s, object, 0);
836 init_tracking(s, object);
837}
838
839static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
840 void *object, void *addr)
81819f0f
CL
841{
842 if (!check_slab(s, page))
843 goto bad;
844
845 if (object && !on_freelist(s, page, object)) {
24922684 846 object_err(s, page, object, "Object already allocated");
70d71228 847 goto bad;
81819f0f
CL
848 }
849
850 if (!check_valid_pointer(s, page, object)) {
851 object_err(s, page, object, "Freelist Pointer check fails");
70d71228 852 goto bad;
81819f0f
CL
853 }
854
3ec09742 855 if (object && !check_object(s, page, object, 0))
81819f0f 856 goto bad;
81819f0f 857
3ec09742
CL
858 /* Success perform special debug activities for allocs */
859 if (s->flags & SLAB_STORE_USER)
860 set_track(s, object, TRACK_ALLOC, addr);
861 trace(s, page, object, 1);
862 init_object(s, object, 1);
81819f0f 863 return 1;
3ec09742 864
81819f0f
CL
865bad:
866 if (PageSlab(page)) {
867 /*
868 * If this is a slab page then lets do the best we can
869 * to avoid issues in the future. Marking all objects
672bba3a 870 * as used avoids touching the remaining objects.
81819f0f 871 */
24922684 872 slab_fix(s, "Marking all objects used");
81819f0f
CL
873 page->inuse = s->objects;
874 page->freelist = NULL;
875 /* Fix up fields that may be corrupted */
876 page->offset = s->offset / sizeof(void *);
877 }
878 return 0;
879}
880
3ec09742
CL
881static int free_debug_processing(struct kmem_cache *s, struct page *page,
882 void *object, void *addr)
81819f0f
CL
883{
884 if (!check_slab(s, page))
885 goto fail;
886
887 if (!check_valid_pointer(s, page, object)) {
70d71228 888 slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0f
CL
889 goto fail;
890 }
891
892 if (on_freelist(s, page, object)) {
24922684 893 object_err(s, page, object, "Object already free");
81819f0f
CL
894 goto fail;
895 }
896
897 if (!check_object(s, page, object, 1))
898 return 0;
899
900 if (unlikely(s != page->slab)) {
901 if (!PageSlab(page))
70d71228
CL
902 slab_err(s, page, "Attempt to free object(0x%p) "
903 "outside of slab", object);
81819f0f 904 else
70d71228 905 if (!page->slab) {
81819f0f 906 printk(KERN_ERR
70d71228 907 "SLUB <none>: no slab for object 0x%p.\n",
81819f0f 908 object);
70d71228
CL
909 dump_stack();
910 }
81819f0f 911 else
24922684
CL
912 object_err(s, page, object,
913 "page slab pointer corrupt.");
81819f0f
CL
914 goto fail;
915 }
3ec09742
CL
916
917 /* Special debug activities for freeing objects */
918 if (!SlabFrozen(page) && !page->freelist)
919 remove_full(s, page);
920 if (s->flags & SLAB_STORE_USER)
921 set_track(s, object, TRACK_FREE, addr);
922 trace(s, page, object, 0);
923 init_object(s, object, 0);
81819f0f 924 return 1;
3ec09742 925
81819f0f 926fail:
24922684 927 slab_fix(s, "Object at 0x%p not freed", object);
81819f0f
CL
928 return 0;
929}
930
41ecc55b
CL
931static int __init setup_slub_debug(char *str)
932{
f0630fff
CL
933 slub_debug = DEBUG_DEFAULT_FLAGS;
934 if (*str++ != '=' || !*str)
935 /*
936 * No options specified. Switch on full debugging.
937 */
938 goto out;
939
940 if (*str == ',')
941 /*
942 * No options but restriction on slabs. This means full
943 * debugging for slabs matching a pattern.
944 */
945 goto check_slabs;
946
947 slub_debug = 0;
948 if (*str == '-')
949 /*
950 * Switch off all debugging measures.
951 */
952 goto out;
953
954 /*
955 * Determine which debug features should be switched on
956 */
957 for ( ;*str && *str != ','; str++) {
958 switch (tolower(*str)) {
959 case 'f':
960 slub_debug |= SLAB_DEBUG_FREE;
961 break;
962 case 'z':
963 slub_debug |= SLAB_RED_ZONE;
964 break;
965 case 'p':
966 slub_debug |= SLAB_POISON;
967 break;
968 case 'u':
969 slub_debug |= SLAB_STORE_USER;
970 break;
971 case 't':
972 slub_debug |= SLAB_TRACE;
973 break;
974 default:
975 printk(KERN_ERR "slub_debug option '%c' "
976 "unknown. skipped\n",*str);
977 }
41ecc55b
CL
978 }
979
f0630fff 980check_slabs:
41ecc55b
CL
981 if (*str == ',')
982 slub_debug_slabs = str + 1;
f0630fff 983out:
41ecc55b
CL
984 return 1;
985}
986
987__setup("slub_debug", setup_slub_debug);
988
ba0268a8
CL
989static unsigned long kmem_cache_flags(unsigned long objsize,
990 unsigned long flags, const char *name,
991 void (*ctor)(void *, struct kmem_cache *, unsigned long))
41ecc55b
CL
992{
993 /*
994 * The page->offset field is only 16 bit wide. This is an offset
995 * in units of words from the beginning of an object. If the slab
996 * size is bigger then we cannot move the free pointer behind the
997 * object anymore.
998 *
999 * On 32 bit platforms the limit is 256k. On 64bit platforms
1000 * the limit is 512k.
1001 *
c59def9f 1002 * Debugging or ctor may create a need to move the free
41ecc55b
CL
1003 * pointer. Fail if this happens.
1004 */
ba0268a8
CL
1005 if (objsize >= 65535 * sizeof(void *)) {
1006 BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
41ecc55b 1007 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
ba0268a8
CL
1008 BUG_ON(ctor);
1009 } else {
41ecc55b
CL
1010 /*
1011 * Enable debugging if selected on the kernel commandline.
1012 */
1013 if (slub_debug && (!slub_debug_slabs ||
ba0268a8 1014 strncmp(slub_debug_slabs, name,
41ecc55b 1015 strlen(slub_debug_slabs)) == 0))
ba0268a8
CL
1016 flags |= slub_debug;
1017 }
1018
1019 return flags;
41ecc55b
CL
1020}
1021#else
3ec09742
CL
1022static inline void setup_object_debug(struct kmem_cache *s,
1023 struct page *page, void *object) {}
41ecc55b 1024
3ec09742
CL
1025static inline int alloc_debug_processing(struct kmem_cache *s,
1026 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1027
3ec09742
CL
1028static inline int free_debug_processing(struct kmem_cache *s,
1029 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1030
41ecc55b
CL
1031static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1032 { return 1; }
1033static inline int check_object(struct kmem_cache *s, struct page *page,
1034 void *object, int active) { return 1; }
3ec09742 1035static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
ba0268a8
CL
1036static inline unsigned long kmem_cache_flags(unsigned long objsize,
1037 unsigned long flags, const char *name,
1038 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1039{
1040 return flags;
1041}
41ecc55b
CL
1042#define slub_debug 0
1043#endif
81819f0f
CL
1044/*
1045 * Slab allocation and freeing
1046 */
1047static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1048{
1049 struct page * page;
1050 int pages = 1 << s->order;
1051
1052 if (s->order)
1053 flags |= __GFP_COMP;
1054
1055 if (s->flags & SLAB_CACHE_DMA)
1056 flags |= SLUB_DMA;
1057
e12ba74d
MG
1058 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1059 flags |= __GFP_RECLAIMABLE;
1060
81819f0f
CL
1061 if (node == -1)
1062 page = alloc_pages(flags, s->order);
1063 else
1064 page = alloc_pages_node(node, flags, s->order);
1065
1066 if (!page)
1067 return NULL;
1068
1069 mod_zone_page_state(page_zone(page),
1070 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1071 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1072 pages);
1073
1074 return page;
1075}
1076
1077static void setup_object(struct kmem_cache *s, struct page *page,
1078 void *object)
1079{
3ec09742 1080 setup_object_debug(s, page, object);
4f104934 1081 if (unlikely(s->ctor))
a35afb83 1082 s->ctor(object, s, 0);
81819f0f
CL
1083}
1084
1085static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1086{
1087 struct page *page;
1088 struct kmem_cache_node *n;
1089 void *start;
1090 void *end;
1091 void *last;
1092 void *p;
1093
6cb06229 1094 BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0f
CL
1095
1096 if (flags & __GFP_WAIT)
1097 local_irq_enable();
1098
6cb06229
CL
1099 page = allocate_slab(s,
1100 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0f
CL
1101 if (!page)
1102 goto out;
1103
1104 n = get_node(s, page_to_nid(page));
1105 if (n)
1106 atomic_long_inc(&n->nr_slabs);
1107 page->offset = s->offset / sizeof(void *);
1108 page->slab = s;
1109 page->flags |= 1 << PG_slab;
1110 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1111 SLAB_STORE_USER | SLAB_TRACE))
35e5d7ee 1112 SetSlabDebug(page);
81819f0f
CL
1113
1114 start = page_address(page);
1115 end = start + s->objects * s->size;
1116
1117 if (unlikely(s->flags & SLAB_POISON))
1118 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1119
1120 last = start;
7656c72b 1121 for_each_object(p, s, start) {
81819f0f
CL
1122 setup_object(s, page, last);
1123 set_freepointer(s, last, p);
1124 last = p;
1125 }
1126 setup_object(s, page, last);
1127 set_freepointer(s, last, NULL);
1128
1129 page->freelist = start;
894b8788 1130 page->lockless_freelist = NULL;
81819f0f
CL
1131 page->inuse = 0;
1132out:
1133 if (flags & __GFP_WAIT)
1134 local_irq_disable();
1135 return page;
1136}
1137
1138static void __free_slab(struct kmem_cache *s, struct page *page)
1139{
1140 int pages = 1 << s->order;
1141
c59def9f 1142 if (unlikely(SlabDebug(page))) {
81819f0f
CL
1143 void *p;
1144
1145 slab_pad_check(s, page);
c59def9f 1146 for_each_object(p, s, page_address(page))
81819f0f 1147 check_object(s, page, p, 0);
2208b764 1148 ClearSlabDebug(page);
81819f0f
CL
1149 }
1150
1151 mod_zone_page_state(page_zone(page),
1152 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1153 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1154 - pages);
1155
1156 page->mapping = NULL;
1157 __free_pages(page, s->order);
1158}
1159
1160static void rcu_free_slab(struct rcu_head *h)
1161{
1162 struct page *page;
1163
1164 page = container_of((struct list_head *)h, struct page, lru);
1165 __free_slab(page->slab, page);
1166}
1167
1168static void free_slab(struct kmem_cache *s, struct page *page)
1169{
1170 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1171 /*
1172 * RCU free overloads the RCU head over the LRU
1173 */
1174 struct rcu_head *head = (void *)&page->lru;
1175
1176 call_rcu(head, rcu_free_slab);
1177 } else
1178 __free_slab(s, page);
1179}
1180
1181static void discard_slab(struct kmem_cache *s, struct page *page)
1182{
1183 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1184
1185 atomic_long_dec(&n->nr_slabs);
1186 reset_page_mapcount(page);
35e5d7ee 1187 __ClearPageSlab(page);
81819f0f
CL
1188 free_slab(s, page);
1189}
1190
1191/*
1192 * Per slab locking using the pagelock
1193 */
1194static __always_inline void slab_lock(struct page *page)
1195{
1196 bit_spin_lock(PG_locked, &page->flags);
1197}
1198
1199static __always_inline void slab_unlock(struct page *page)
1200{
1201 bit_spin_unlock(PG_locked, &page->flags);
1202}
1203
1204static __always_inline int slab_trylock(struct page *page)
1205{
1206 int rc = 1;
1207
1208 rc = bit_spin_trylock(PG_locked, &page->flags);
1209 return rc;
1210}
1211
1212/*
1213 * Management of partially allocated slabs
1214 */
e95eed57 1215static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
81819f0f 1216{
e95eed57
CL
1217 spin_lock(&n->list_lock);
1218 n->nr_partial++;
1219 list_add_tail(&page->lru, &n->partial);
1220 spin_unlock(&n->list_lock);
1221}
81819f0f 1222
e95eed57
CL
1223static void add_partial(struct kmem_cache_node *n, struct page *page)
1224{
81819f0f
CL
1225 spin_lock(&n->list_lock);
1226 n->nr_partial++;
1227 list_add(&page->lru, &n->partial);
1228 spin_unlock(&n->list_lock);
1229}
1230
1231static void remove_partial(struct kmem_cache *s,
1232 struct page *page)
1233{
1234 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1235
1236 spin_lock(&n->list_lock);
1237 list_del(&page->lru);
1238 n->nr_partial--;
1239 spin_unlock(&n->list_lock);
1240}
1241
1242/*
672bba3a 1243 * Lock slab and remove from the partial list.
81819f0f 1244 *
672bba3a 1245 * Must hold list_lock.
81819f0f 1246 */
4b6f0750 1247static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
81819f0f
CL
1248{
1249 if (slab_trylock(page)) {
1250 list_del(&page->lru);
1251 n->nr_partial--;
4b6f0750 1252 SetSlabFrozen(page);
81819f0f
CL
1253 return 1;
1254 }
1255 return 0;
1256}
1257
1258/*
672bba3a 1259 * Try to allocate a partial slab from a specific node.
81819f0f
CL
1260 */
1261static struct page *get_partial_node(struct kmem_cache_node *n)
1262{
1263 struct page *page;
1264
1265 /*
1266 * Racy check. If we mistakenly see no partial slabs then we
1267 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1268 * partial slab and there is none available then get_partials()
1269 * will return NULL.
81819f0f
CL
1270 */
1271 if (!n || !n->nr_partial)
1272 return NULL;
1273
1274 spin_lock(&n->list_lock);
1275 list_for_each_entry(page, &n->partial, lru)
4b6f0750 1276 if (lock_and_freeze_slab(n, page))
81819f0f
CL
1277 goto out;
1278 page = NULL;
1279out:
1280 spin_unlock(&n->list_lock);
1281 return page;
1282}
1283
1284/*
672bba3a 1285 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f
CL
1286 */
1287static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1288{
1289#ifdef CONFIG_NUMA
1290 struct zonelist *zonelist;
1291 struct zone **z;
1292 struct page *page;
1293
1294 /*
672bba3a
CL
1295 * The defrag ratio allows a configuration of the tradeoffs between
1296 * inter node defragmentation and node local allocations. A lower
1297 * defrag_ratio increases the tendency to do local allocations
1298 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1299 *
672bba3a
CL
1300 * If the defrag_ratio is set to 0 then kmalloc() always
1301 * returns node local objects. If the ratio is higher then kmalloc()
1302 * may return off node objects because partial slabs are obtained
1303 * from other nodes and filled up.
81819f0f
CL
1304 *
1305 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a
CL
1306 * defrag_ratio = 1000) then every (well almost) allocation will
1307 * first attempt to defrag slab caches on other nodes. This means
1308 * scanning over all nodes to look for partial slabs which may be
1309 * expensive if we do it every time we are trying to find a slab
1310 * with available objects.
81819f0f
CL
1311 */
1312 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
1313 return NULL;
1314
1315 zonelist = &NODE_DATA(slab_node(current->mempolicy))
1316 ->node_zonelists[gfp_zone(flags)];
1317 for (z = zonelist->zones; *z; z++) {
1318 struct kmem_cache_node *n;
1319
1320 n = get_node(s, zone_to_nid(*z));
1321
1322 if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
e95eed57 1323 n->nr_partial > MIN_PARTIAL) {
81819f0f
CL
1324 page = get_partial_node(n);
1325 if (page)
1326 return page;
1327 }
1328 }
1329#endif
1330 return NULL;
1331}
1332
1333/*
1334 * Get a partial page, lock it and return it.
1335 */
1336static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1337{
1338 struct page *page;
1339 int searchnode = (node == -1) ? numa_node_id() : node;
1340
1341 page = get_partial_node(get_node(s, searchnode));
1342 if (page || (flags & __GFP_THISNODE))
1343 return page;
1344
1345 return get_any_partial(s, flags);
1346}
1347
1348/*
1349 * Move a page back to the lists.
1350 *
1351 * Must be called with the slab lock held.
1352 *
1353 * On exit the slab lock will have been dropped.
1354 */
4b6f0750 1355static void unfreeze_slab(struct kmem_cache *s, struct page *page)
81819f0f 1356{
e95eed57
CL
1357 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1358
4b6f0750 1359 ClearSlabFrozen(page);
81819f0f 1360 if (page->inuse) {
e95eed57 1361
81819f0f 1362 if (page->freelist)
e95eed57 1363 add_partial(n, page);
35e5d7ee 1364 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
e95eed57 1365 add_full(n, page);
81819f0f 1366 slab_unlock(page);
e95eed57 1367
81819f0f 1368 } else {
e95eed57
CL
1369 if (n->nr_partial < MIN_PARTIAL) {
1370 /*
672bba3a
CL
1371 * Adding an empty slab to the partial slabs in order
1372 * to avoid page allocator overhead. This slab needs
1373 * to come after the other slabs with objects in
1374 * order to fill them up. That way the size of the
1375 * partial list stays small. kmem_cache_shrink can
1376 * reclaim empty slabs from the partial list.
e95eed57
CL
1377 */
1378 add_partial_tail(n, page);
1379 slab_unlock(page);
1380 } else {
1381 slab_unlock(page);
1382 discard_slab(s, page);
1383 }
81819f0f
CL
1384 }
1385}
1386
1387/*
1388 * Remove the cpu slab
1389 */
1390static void deactivate_slab(struct kmem_cache *s, struct page *page, int cpu)
1391{
894b8788
CL
1392 /*
1393 * Merge cpu freelist into freelist. Typically we get here
1394 * because both freelists are empty. So this is unlikely
1395 * to occur.
1396 */
1397 while (unlikely(page->lockless_freelist)) {
1398 void **object;
1399
1400 /* Retrieve object from cpu_freelist */
1401 object = page->lockless_freelist;
1402 page->lockless_freelist = page->lockless_freelist[page->offset];
1403
1404 /* And put onto the regular freelist */
1405 object[page->offset] = page->freelist;
1406 page->freelist = object;
1407 page->inuse--;
1408 }
81819f0f 1409 s->cpu_slab[cpu] = NULL;
4b6f0750 1410 unfreeze_slab(s, page);
81819f0f
CL
1411}
1412
0c710013 1413static inline void flush_slab(struct kmem_cache *s, struct page *page, int cpu)
81819f0f
CL
1414{
1415 slab_lock(page);
1416 deactivate_slab(s, page, cpu);
1417}
1418
1419/*
1420 * Flush cpu slab.
1421 * Called from IPI handler with interrupts disabled.
1422 */
0c710013 1423static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f
CL
1424{
1425 struct page *page = s->cpu_slab[cpu];
1426
1427 if (likely(page))
1428 flush_slab(s, page, cpu);
1429}
1430
1431static void flush_cpu_slab(void *d)
1432{
1433 struct kmem_cache *s = d;
1434 int cpu = smp_processor_id();
1435
1436 __flush_cpu_slab(s, cpu);
1437}
1438
1439static void flush_all(struct kmem_cache *s)
1440{
1441#ifdef CONFIG_SMP
1442 on_each_cpu(flush_cpu_slab, s, 1, 1);
1443#else
1444 unsigned long flags;
1445
1446 local_irq_save(flags);
1447 flush_cpu_slab(s);
1448 local_irq_restore(flags);
1449#endif
1450}
1451
1452/*
894b8788
CL
1453 * Slow path. The lockless freelist is empty or we need to perform
1454 * debugging duties.
1455 *
1456 * Interrupts are disabled.
81819f0f 1457 *
894b8788
CL
1458 * Processing is still very fast if new objects have been freed to the
1459 * regular freelist. In that case we simply take over the regular freelist
1460 * as the lockless freelist and zap the regular freelist.
81819f0f 1461 *
894b8788
CL
1462 * If that is not working then we fall back to the partial lists. We take the
1463 * first element of the freelist as the object to allocate now and move the
1464 * rest of the freelist to the lockless freelist.
81819f0f 1465 *
894b8788
CL
1466 * And if we were unable to get a new slab from the partial slab lists then
1467 * we need to allocate a new slab. This is slowest path since we may sleep.
81819f0f 1468 */
894b8788
CL
1469static void *__slab_alloc(struct kmem_cache *s,
1470 gfp_t gfpflags, int node, void *addr, struct page *page)
81819f0f 1471{
81819f0f 1472 void **object;
894b8788 1473 int cpu = smp_processor_id();
81819f0f 1474
81819f0f
CL
1475 if (!page)
1476 goto new_slab;
1477
1478 slab_lock(page);
1479 if (unlikely(node != -1 && page_to_nid(page) != node))
1480 goto another_slab;
894b8788 1481load_freelist:
81819f0f
CL
1482 object = page->freelist;
1483 if (unlikely(!object))
1484 goto another_slab;
35e5d7ee 1485 if (unlikely(SlabDebug(page)))
81819f0f
CL
1486 goto debug;
1487
894b8788
CL
1488 object = page->freelist;
1489 page->lockless_freelist = object[page->offset];
1490 page->inuse = s->objects;
1491 page->freelist = NULL;
81819f0f 1492 slab_unlock(page);
81819f0f
CL
1493 return object;
1494
1495another_slab:
1496 deactivate_slab(s, page, cpu);
1497
1498new_slab:
1499 page = get_partial(s, gfpflags, node);
894b8788 1500 if (page) {
81819f0f 1501 s->cpu_slab[cpu] = page;
894b8788 1502 goto load_freelist;
81819f0f
CL
1503 }
1504
1505 page = new_slab(s, gfpflags, node);
1506 if (page) {
1507 cpu = smp_processor_id();
1508 if (s->cpu_slab[cpu]) {
1509 /*
672bba3a
CL
1510 * Someone else populated the cpu_slab while we
1511 * enabled interrupts, or we have gotten scheduled
1512 * on another cpu. The page may not be on the
1513 * requested node even if __GFP_THISNODE was
1514 * specified. So we need to recheck.
81819f0f
CL
1515 */
1516 if (node == -1 ||
1517 page_to_nid(s->cpu_slab[cpu]) == node) {
1518 /*
1519 * Current cpuslab is acceptable and we
1520 * want the current one since its cache hot
1521 */
1522 discard_slab(s, page);
1523 page = s->cpu_slab[cpu];
1524 slab_lock(page);
894b8788 1525 goto load_freelist;
81819f0f 1526 }
672bba3a 1527 /* New slab does not fit our expectations */
81819f0f
CL
1528 flush_slab(s, s->cpu_slab[cpu], cpu);
1529 }
1530 slab_lock(page);
4b6f0750
CL
1531 SetSlabFrozen(page);
1532 s->cpu_slab[cpu] = page;
1533 goto load_freelist;
81819f0f 1534 }
81819f0f
CL
1535 return NULL;
1536debug:
894b8788 1537 object = page->freelist;
3ec09742 1538 if (!alloc_debug_processing(s, page, object, addr))
81819f0f 1539 goto another_slab;
894b8788
CL
1540
1541 page->inuse++;
1542 page->freelist = object[page->offset];
1543 slab_unlock(page);
1544 return object;
1545}
1546
1547/*
1548 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1549 * have the fastpath folded into their functions. So no function call
1550 * overhead for requests that can be satisfied on the fastpath.
1551 *
1552 * The fastpath works by first checking if the lockless freelist can be used.
1553 * If not then __slab_alloc is called for slow processing.
1554 *
1555 * Otherwise we can simply pick the next object from the lockless free list.
1556 */
1557static void __always_inline *slab_alloc(struct kmem_cache *s,
ce15fea8 1558 gfp_t gfpflags, int node, void *addr)
894b8788
CL
1559{
1560 struct page *page;
1561 void **object;
1562 unsigned long flags;
1563
1564 local_irq_save(flags);
1565 page = s->cpu_slab[smp_processor_id()];
1566 if (unlikely(!page || !page->lockless_freelist ||
1567 (node != -1 && page_to_nid(page) != node)))
1568
1569 object = __slab_alloc(s, gfpflags, node, addr, page);
1570
1571 else {
1572 object = page->lockless_freelist;
1573 page->lockless_freelist = object[page->offset];
1574 }
1575 local_irq_restore(flags);
d07dbea4
CL
1576
1577 if (unlikely((gfpflags & __GFP_ZERO) && object))
ce15fea8 1578 memset(object, 0, s->objsize);
d07dbea4 1579
894b8788 1580 return object;
81819f0f
CL
1581}
1582
1583void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1584{
ce15fea8 1585 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
81819f0f
CL
1586}
1587EXPORT_SYMBOL(kmem_cache_alloc);
1588
1589#ifdef CONFIG_NUMA
1590void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1591{
ce15fea8 1592 return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
81819f0f
CL
1593}
1594EXPORT_SYMBOL(kmem_cache_alloc_node);
1595#endif
1596
1597/*
894b8788
CL
1598 * Slow patch handling. This may still be called frequently since objects
1599 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 1600 *
894b8788
CL
1601 * So we still attempt to reduce cache line usage. Just take the slab
1602 * lock and free the item. If there is no additional partial page
1603 * handling required then we can return immediately.
81819f0f 1604 */
894b8788 1605static void __slab_free(struct kmem_cache *s, struct page *page,
77c5e2d0 1606 void *x, void *addr)
81819f0f
CL
1607{
1608 void *prior;
1609 void **object = (void *)x;
81819f0f 1610
81819f0f
CL
1611 slab_lock(page);
1612
35e5d7ee 1613 if (unlikely(SlabDebug(page)))
81819f0f
CL
1614 goto debug;
1615checks_ok:
1616 prior = object[page->offset] = page->freelist;
1617 page->freelist = object;
1618 page->inuse--;
1619
4b6f0750 1620 if (unlikely(SlabFrozen(page)))
81819f0f
CL
1621 goto out_unlock;
1622
1623 if (unlikely(!page->inuse))
1624 goto slab_empty;
1625
1626 /*
1627 * Objects left in the slab. If it
1628 * was not on the partial list before
1629 * then add it.
1630 */
1631 if (unlikely(!prior))
e95eed57 1632 add_partial(get_node(s, page_to_nid(page)), page);
81819f0f
CL
1633
1634out_unlock:
1635 slab_unlock(page);
81819f0f
CL
1636 return;
1637
1638slab_empty:
1639 if (prior)
1640 /*
672bba3a 1641 * Slab still on the partial list.
81819f0f
CL
1642 */
1643 remove_partial(s, page);
1644
1645 slab_unlock(page);
1646 discard_slab(s, page);
81819f0f
CL
1647 return;
1648
1649debug:
3ec09742 1650 if (!free_debug_processing(s, page, x, addr))
77c5e2d0 1651 goto out_unlock;
77c5e2d0 1652 goto checks_ok;
81819f0f
CL
1653}
1654
894b8788
CL
1655/*
1656 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1657 * can perform fastpath freeing without additional function calls.
1658 *
1659 * The fastpath is only possible if we are freeing to the current cpu slab
1660 * of this processor. This typically the case if we have just allocated
1661 * the item before.
1662 *
1663 * If fastpath is not possible then fall back to __slab_free where we deal
1664 * with all sorts of special processing.
1665 */
1666static void __always_inline slab_free(struct kmem_cache *s,
1667 struct page *page, void *x, void *addr)
1668{
1669 void **object = (void *)x;
1670 unsigned long flags;
1671
1672 local_irq_save(flags);
02febdf7 1673 debug_check_no_locks_freed(object, s->objsize);
894b8788
CL
1674 if (likely(page == s->cpu_slab[smp_processor_id()] &&
1675 !SlabDebug(page))) {
1676 object[page->offset] = page->lockless_freelist;
1677 page->lockless_freelist = object;
1678 } else
1679 __slab_free(s, page, x, addr);
1680
1681 local_irq_restore(flags);
1682}
1683
81819f0f
CL
1684void kmem_cache_free(struct kmem_cache *s, void *x)
1685{
77c5e2d0 1686 struct page *page;
81819f0f 1687
b49af68f 1688 page = virt_to_head_page(x);
81819f0f 1689
77c5e2d0 1690 slab_free(s, page, x, __builtin_return_address(0));
81819f0f
CL
1691}
1692EXPORT_SYMBOL(kmem_cache_free);
1693
1694/* Figure out on which slab object the object resides */
1695static struct page *get_object_page(const void *x)
1696{
b49af68f 1697 struct page *page = virt_to_head_page(x);
81819f0f
CL
1698
1699 if (!PageSlab(page))
1700 return NULL;
1701
1702 return page;
1703}
1704
1705/*
672bba3a
CL
1706 * Object placement in a slab is made very easy because we always start at
1707 * offset 0. If we tune the size of the object to the alignment then we can
1708 * get the required alignment by putting one properly sized object after
1709 * another.
81819f0f
CL
1710 *
1711 * Notice that the allocation order determines the sizes of the per cpu
1712 * caches. Each processor has always one slab available for allocations.
1713 * Increasing the allocation order reduces the number of times that slabs
672bba3a 1714 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 1715 * locking overhead.
81819f0f
CL
1716 */
1717
1718/*
1719 * Mininum / Maximum order of slab pages. This influences locking overhead
1720 * and slab fragmentation. A higher order reduces the number of partial slabs
1721 * and increases the number of allocations possible without having to
1722 * take the list_lock.
1723 */
1724static int slub_min_order;
1725static int slub_max_order = DEFAULT_MAX_ORDER;
81819f0f
CL
1726static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1727
1728/*
1729 * Merge control. If this is set then no merging of slab caches will occur.
672bba3a 1730 * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0f
CL
1731 */
1732static int slub_nomerge;
1733
81819f0f
CL
1734/*
1735 * Calculate the order of allocation given an slab object size.
1736 *
672bba3a
CL
1737 * The order of allocation has significant impact on performance and other
1738 * system components. Generally order 0 allocations should be preferred since
1739 * order 0 does not cause fragmentation in the page allocator. Larger objects
1740 * be problematic to put into order 0 slabs because there may be too much
1741 * unused space left. We go to a higher order if more than 1/8th of the slab
1742 * would be wasted.
1743 *
1744 * In order to reach satisfactory performance we must ensure that a minimum
1745 * number of objects is in one slab. Otherwise we may generate too much
1746 * activity on the partial lists which requires taking the list_lock. This is
1747 * less a concern for large slabs though which are rarely used.
81819f0f 1748 *
672bba3a
CL
1749 * slub_max_order specifies the order where we begin to stop considering the
1750 * number of objects in a slab as critical. If we reach slub_max_order then
1751 * we try to keep the page order as low as possible. So we accept more waste
1752 * of space in favor of a small page order.
81819f0f 1753 *
672bba3a
CL
1754 * Higher order allocations also allow the placement of more objects in a
1755 * slab and thereby reduce object handling overhead. If the user has
1756 * requested a higher mininum order then we start with that one instead of
1757 * the smallest order which will fit the object.
81819f0f 1758 */
5e6d444e
CL
1759static inline int slab_order(int size, int min_objects,
1760 int max_order, int fract_leftover)
81819f0f
CL
1761{
1762 int order;
1763 int rem;
6300ea75 1764 int min_order = slub_min_order;
81819f0f 1765
6300ea75
CL
1766 /*
1767 * If we would create too many object per slab then reduce
1768 * the slab order even if it goes below slub_min_order.
1769 */
1770 while (min_order > 0 &&
1771 (PAGE_SIZE << min_order) >= MAX_OBJECTS_PER_SLAB * size)
1772 min_order--;
1773
1774 for (order = max(min_order,
5e6d444e
CL
1775 fls(min_objects * size - 1) - PAGE_SHIFT);
1776 order <= max_order; order++) {
81819f0f 1777
5e6d444e 1778 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 1779
5e6d444e 1780 if (slab_size < min_objects * size)
81819f0f
CL
1781 continue;
1782
1783 rem = slab_size % size;
1784
5e6d444e 1785 if (rem <= slab_size / fract_leftover)
81819f0f
CL
1786 break;
1787
6300ea75
CL
1788 /* If the next size is too high then exit now */
1789 if (slab_size * 2 >= MAX_OBJECTS_PER_SLAB * size)
1790 break;
81819f0f 1791 }
672bba3a 1792
81819f0f
CL
1793 return order;
1794}
1795
5e6d444e
CL
1796static inline int calculate_order(int size)
1797{
1798 int order;
1799 int min_objects;
1800 int fraction;
1801
1802 /*
1803 * Attempt to find best configuration for a slab. This
1804 * works by first attempting to generate a layout with
1805 * the best configuration and backing off gradually.
1806 *
1807 * First we reduce the acceptable waste in a slab. Then
1808 * we reduce the minimum objects required in a slab.
1809 */
1810 min_objects = slub_min_objects;
1811 while (min_objects > 1) {
1812 fraction = 8;
1813 while (fraction >= 4) {
1814 order = slab_order(size, min_objects,
1815 slub_max_order, fraction);
1816 if (order <= slub_max_order)
1817 return order;
1818 fraction /= 2;
1819 }
1820 min_objects /= 2;
1821 }
1822
1823 /*
1824 * We were unable to place multiple objects in a slab. Now
1825 * lets see if we can place a single object there.
1826 */
1827 order = slab_order(size, 1, slub_max_order, 1);
1828 if (order <= slub_max_order)
1829 return order;
1830
1831 /*
1832 * Doh this slab cannot be placed using slub_max_order.
1833 */
1834 order = slab_order(size, 1, MAX_ORDER, 1);
1835 if (order <= MAX_ORDER)
1836 return order;
1837 return -ENOSYS;
1838}
1839
81819f0f 1840/*
672bba3a 1841 * Figure out what the alignment of the objects will be.
81819f0f
CL
1842 */
1843static unsigned long calculate_alignment(unsigned long flags,
1844 unsigned long align, unsigned long size)
1845{
1846 /*
1847 * If the user wants hardware cache aligned objects then
1848 * follow that suggestion if the object is sufficiently
1849 * large.
1850 *
1851 * The hardware cache alignment cannot override the
1852 * specified alignment though. If that is greater
1853 * then use it.
1854 */
5af60839 1855 if ((flags & SLAB_HWCACHE_ALIGN) &&
65c02d4c
CL
1856 size > cache_line_size() / 2)
1857 return max_t(unsigned long, align, cache_line_size());
81819f0f
CL
1858
1859 if (align < ARCH_SLAB_MINALIGN)
1860 return ARCH_SLAB_MINALIGN;
1861
1862 return ALIGN(align, sizeof(void *));
1863}
1864
1865static void init_kmem_cache_node(struct kmem_cache_node *n)
1866{
1867 n->nr_partial = 0;
1868 atomic_long_set(&n->nr_slabs, 0);
1869 spin_lock_init(&n->list_lock);
1870 INIT_LIST_HEAD(&n->partial);
8ab1372f 1871#ifdef CONFIG_SLUB_DEBUG
643b1138 1872 INIT_LIST_HEAD(&n->full);
8ab1372f 1873#endif
81819f0f
CL
1874}
1875
1876#ifdef CONFIG_NUMA
1877/*
1878 * No kmalloc_node yet so do it by hand. We know that this is the first
1879 * slab on the node for this slabcache. There are no concurrent accesses
1880 * possible.
1881 *
1882 * Note that this function only works on the kmalloc_node_cache
1883 * when allocating for the kmalloc_node_cache.
1884 */
1cd7daa5
AB
1885static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
1886 int node)
81819f0f
CL
1887{
1888 struct page *page;
1889 struct kmem_cache_node *n;
1890
1891 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
1892
a2f92ee7 1893 page = new_slab(kmalloc_caches, gfpflags, node);
81819f0f
CL
1894
1895 BUG_ON(!page);
a2f92ee7
CL
1896 if (page_to_nid(page) != node) {
1897 printk(KERN_ERR "SLUB: Unable to allocate memory from "
1898 "node %d\n", node);
1899 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
1900 "in order to be able to continue\n");
1901 }
1902
81819f0f
CL
1903 n = page->freelist;
1904 BUG_ON(!n);
1905 page->freelist = get_freepointer(kmalloc_caches, n);
1906 page->inuse++;
1907 kmalloc_caches->node[node] = n;
8ab1372f 1908#ifdef CONFIG_SLUB_DEBUG
d45f39cb
CL
1909 init_object(kmalloc_caches, n, 1);
1910 init_tracking(kmalloc_caches, n);
8ab1372f 1911#endif
81819f0f
CL
1912 init_kmem_cache_node(n);
1913 atomic_long_inc(&n->nr_slabs);
e95eed57 1914 add_partial(n, page);
dbc55faa
CL
1915
1916 /*
1917 * new_slab() disables interupts. If we do not reenable interrupts here
1918 * then bootup would continue with interrupts disabled.
1919 */
1920 local_irq_enable();
81819f0f
CL
1921 return n;
1922}
1923
1924static void free_kmem_cache_nodes(struct kmem_cache *s)
1925{
1926 int node;
1927
f64dc58c 1928 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
1929 struct kmem_cache_node *n = s->node[node];
1930 if (n && n != &s->local_node)
1931 kmem_cache_free(kmalloc_caches, n);
1932 s->node[node] = NULL;
1933 }
1934}
1935
1936static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1937{
1938 int node;
1939 int local_node;
1940
1941 if (slab_state >= UP)
1942 local_node = page_to_nid(virt_to_page(s));
1943 else
1944 local_node = 0;
1945
f64dc58c 1946 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
1947 struct kmem_cache_node *n;
1948
1949 if (local_node == node)
1950 n = &s->local_node;
1951 else {
1952 if (slab_state == DOWN) {
1953 n = early_kmem_cache_node_alloc(gfpflags,
1954 node);
1955 continue;
1956 }
1957 n = kmem_cache_alloc_node(kmalloc_caches,
1958 gfpflags, node);
1959
1960 if (!n) {
1961 free_kmem_cache_nodes(s);
1962 return 0;
1963 }
1964
1965 }
1966 s->node[node] = n;
1967 init_kmem_cache_node(n);
1968 }
1969 return 1;
1970}
1971#else
1972static void free_kmem_cache_nodes(struct kmem_cache *s)
1973{
1974}
1975
1976static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
1977{
1978 init_kmem_cache_node(&s->local_node);
1979 return 1;
1980}
1981#endif
1982
1983/*
1984 * calculate_sizes() determines the order and the distribution of data within
1985 * a slab object.
1986 */
1987static int calculate_sizes(struct kmem_cache *s)
1988{
1989 unsigned long flags = s->flags;
1990 unsigned long size = s->objsize;
1991 unsigned long align = s->align;
1992
1993 /*
1994 * Determine if we can poison the object itself. If the user of
1995 * the slab may touch the object after free or before allocation
1996 * then we should never poison the object itself.
1997 */
1998 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f 1999 !s->ctor)
81819f0f
CL
2000 s->flags |= __OBJECT_POISON;
2001 else
2002 s->flags &= ~__OBJECT_POISON;
2003
2004 /*
2005 * Round up object size to the next word boundary. We can only
2006 * place the free pointer at word boundaries and this determines
2007 * the possible location of the free pointer.
2008 */
2009 size = ALIGN(size, sizeof(void *));
2010
41ecc55b 2011#ifdef CONFIG_SLUB_DEBUG
81819f0f 2012 /*
672bba3a 2013 * If we are Redzoning then check if there is some space between the
81819f0f 2014 * end of the object and the free pointer. If not then add an
672bba3a 2015 * additional word to have some bytes to store Redzone information.
81819f0f
CL
2016 */
2017 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2018 size += sizeof(void *);
41ecc55b 2019#endif
81819f0f
CL
2020
2021 /*
672bba3a
CL
2022 * With that we have determined the number of bytes in actual use
2023 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
2024 */
2025 s->inuse = size;
2026
2027 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f 2028 s->ctor)) {
81819f0f
CL
2029 /*
2030 * Relocate free pointer after the object if it is not
2031 * permitted to overwrite the first word of the object on
2032 * kmem_cache_free.
2033 *
2034 * This is the case if we do RCU, have a constructor or
2035 * destructor or are poisoning the objects.
2036 */
2037 s->offset = size;
2038 size += sizeof(void *);
2039 }
2040
c12b3c62 2041#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2042 if (flags & SLAB_STORE_USER)
2043 /*
2044 * Need to store information about allocs and frees after
2045 * the object.
2046 */
2047 size += 2 * sizeof(struct track);
2048
be7b3fbc 2049 if (flags & SLAB_RED_ZONE)
81819f0f
CL
2050 /*
2051 * Add some empty padding so that we can catch
2052 * overwrites from earlier objects rather than let
2053 * tracking information or the free pointer be
2054 * corrupted if an user writes before the start
2055 * of the object.
2056 */
2057 size += sizeof(void *);
41ecc55b 2058#endif
672bba3a 2059
81819f0f
CL
2060 /*
2061 * Determine the alignment based on various parameters that the
65c02d4c
CL
2062 * user specified and the dynamic determination of cache line size
2063 * on bootup.
81819f0f
CL
2064 */
2065 align = calculate_alignment(flags, align, s->objsize);
2066
2067 /*
2068 * SLUB stores one object immediately after another beginning from
2069 * offset 0. In order to align the objects we have to simply size
2070 * each object to conform to the alignment.
2071 */
2072 size = ALIGN(size, align);
2073 s->size = size;
2074
2075 s->order = calculate_order(size);
2076 if (s->order < 0)
2077 return 0;
2078
2079 /*
2080 * Determine the number of objects per slab
2081 */
2082 s->objects = (PAGE_SIZE << s->order) / size;
2083
2084 /*
2085 * Verify that the number of objects is within permitted limits.
2086 * The page->inuse field is only 16 bit wide! So we cannot have
2087 * more than 64k objects per slab.
2088 */
6300ea75 2089 if (!s->objects || s->objects > MAX_OBJECTS_PER_SLAB)
81819f0f
CL
2090 return 0;
2091 return 1;
2092
2093}
2094
81819f0f
CL
2095static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2096 const char *name, size_t size,
2097 size_t align, unsigned long flags,
c59def9f 2098 void (*ctor)(void *, struct kmem_cache *, unsigned long))
81819f0f
CL
2099{
2100 memset(s, 0, kmem_size);
2101 s->name = name;
2102 s->ctor = ctor;
81819f0f 2103 s->objsize = size;
81819f0f 2104 s->align = align;
ba0268a8 2105 s->flags = kmem_cache_flags(size, flags, name, ctor);
81819f0f
CL
2106
2107 if (!calculate_sizes(s))
2108 goto error;
2109
2110 s->refcount = 1;
2111#ifdef CONFIG_NUMA
2112 s->defrag_ratio = 100;
2113#endif
2114
2115 if (init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2116 return 1;
2117error:
2118 if (flags & SLAB_PANIC)
2119 panic("Cannot create slab %s size=%lu realsize=%u "
2120 "order=%u offset=%u flags=%lx\n",
2121 s->name, (unsigned long)size, s->size, s->order,
2122 s->offset, flags);
2123 return 0;
2124}
81819f0f
CL
2125
2126/*
2127 * Check if a given pointer is valid
2128 */
2129int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2130{
2131 struct page * page;
81819f0f
CL
2132
2133 page = get_object_page(object);
2134
2135 if (!page || s != page->slab)
2136 /* No slab or wrong slab */
2137 return 0;
2138
abcd08a6 2139 if (!check_valid_pointer(s, page, object))
81819f0f
CL
2140 return 0;
2141
2142 /*
2143 * We could also check if the object is on the slabs freelist.
2144 * But this would be too expensive and it seems that the main
2145 * purpose of kmem_ptr_valid is to check if the object belongs
2146 * to a certain slab.
2147 */
2148 return 1;
2149}
2150EXPORT_SYMBOL(kmem_ptr_validate);
2151
2152/*
2153 * Determine the size of a slab object
2154 */
2155unsigned int kmem_cache_size(struct kmem_cache *s)
2156{
2157 return s->objsize;
2158}
2159EXPORT_SYMBOL(kmem_cache_size);
2160
2161const char *kmem_cache_name(struct kmem_cache *s)
2162{
2163 return s->name;
2164}
2165EXPORT_SYMBOL(kmem_cache_name);
2166
2167/*
672bba3a
CL
2168 * Attempt to free all slabs on a node. Return the number of slabs we
2169 * were unable to free.
81819f0f
CL
2170 */
2171static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2172 struct list_head *list)
2173{
2174 int slabs_inuse = 0;
2175 unsigned long flags;
2176 struct page *page, *h;
2177
2178 spin_lock_irqsave(&n->list_lock, flags);
2179 list_for_each_entry_safe(page, h, list, lru)
2180 if (!page->inuse) {
2181 list_del(&page->lru);
2182 discard_slab(s, page);
2183 } else
2184 slabs_inuse++;
2185 spin_unlock_irqrestore(&n->list_lock, flags);
2186 return slabs_inuse;
2187}
2188
2189/*
672bba3a 2190 * Release all resources used by a slab cache.
81819f0f 2191 */
0c710013 2192static inline int kmem_cache_close(struct kmem_cache *s)
81819f0f
CL
2193{
2194 int node;
2195
2196 flush_all(s);
2197
2198 /* Attempt to free all objects */
f64dc58c 2199 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2200 struct kmem_cache_node *n = get_node(s, node);
2201
2086d26a 2202 n->nr_partial -= free_list(s, n, &n->partial);
81819f0f
CL
2203 if (atomic_long_read(&n->nr_slabs))
2204 return 1;
2205 }
2206 free_kmem_cache_nodes(s);
2207 return 0;
2208}
2209
2210/*
2211 * Close a cache and release the kmem_cache structure
2212 * (must be used for caches created using kmem_cache_create)
2213 */
2214void kmem_cache_destroy(struct kmem_cache *s)
2215{
2216 down_write(&slub_lock);
2217 s->refcount--;
2218 if (!s->refcount) {
2219 list_del(&s->list);
a0e1d1be 2220 up_write(&slub_lock);
81819f0f
CL
2221 if (kmem_cache_close(s))
2222 WARN_ON(1);
2223 sysfs_slab_remove(s);
2224 kfree(s);
a0e1d1be
CL
2225 } else
2226 up_write(&slub_lock);
81819f0f
CL
2227}
2228EXPORT_SYMBOL(kmem_cache_destroy);
2229
2230/********************************************************************
2231 * Kmalloc subsystem
2232 *******************************************************************/
2233
aadb4bc4 2234struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
81819f0f
CL
2235EXPORT_SYMBOL(kmalloc_caches);
2236
2237#ifdef CONFIG_ZONE_DMA
aadb4bc4 2238static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
81819f0f
CL
2239#endif
2240
2241static int __init setup_slub_min_order(char *str)
2242{
2243 get_option (&str, &slub_min_order);
2244
2245 return 1;
2246}
2247
2248__setup("slub_min_order=", setup_slub_min_order);
2249
2250static int __init setup_slub_max_order(char *str)
2251{
2252 get_option (&str, &slub_max_order);
2253
2254 return 1;
2255}
2256
2257__setup("slub_max_order=", setup_slub_max_order);
2258
2259static int __init setup_slub_min_objects(char *str)
2260{
2261 get_option (&str, &slub_min_objects);
2262
2263 return 1;
2264}
2265
2266__setup("slub_min_objects=", setup_slub_min_objects);
2267
2268static int __init setup_slub_nomerge(char *str)
2269{
2270 slub_nomerge = 1;
2271 return 1;
2272}
2273
2274__setup("slub_nomerge", setup_slub_nomerge);
2275
81819f0f
CL
2276static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2277 const char *name, int size, gfp_t gfp_flags)
2278{
2279 unsigned int flags = 0;
2280
2281 if (gfp_flags & SLUB_DMA)
2282 flags = SLAB_CACHE_DMA;
2283
2284 down_write(&slub_lock);
2285 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
c59def9f 2286 flags, NULL))
81819f0f
CL
2287 goto panic;
2288
2289 list_add(&s->list, &slab_caches);
2290 up_write(&slub_lock);
2291 if (sysfs_slab_add(s))
2292 goto panic;
2293 return s;
2294
2295panic:
2296 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2297}
2298
2e443fd0 2299#ifdef CONFIG_ZONE_DMA
1ceef402
CL
2300
2301static void sysfs_add_func(struct work_struct *w)
2302{
2303 struct kmem_cache *s;
2304
2305 down_write(&slub_lock);
2306 list_for_each_entry(s, &slab_caches, list) {
2307 if (s->flags & __SYSFS_ADD_DEFERRED) {
2308 s->flags &= ~__SYSFS_ADD_DEFERRED;
2309 sysfs_slab_add(s);
2310 }
2311 }
2312 up_write(&slub_lock);
2313}
2314
2315static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2316
2e443fd0
CL
2317static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2318{
2319 struct kmem_cache *s;
2e443fd0
CL
2320 char *text;
2321 size_t realsize;
2322
2323 s = kmalloc_caches_dma[index];
2324 if (s)
2325 return s;
2326
2327 /* Dynamically create dma cache */
1ceef402
CL
2328 if (flags & __GFP_WAIT)
2329 down_write(&slub_lock);
2330 else {
2331 if (!down_write_trylock(&slub_lock))
2332 goto out;
2333 }
2334
2335 if (kmalloc_caches_dma[index])
2336 goto unlock_out;
2e443fd0 2337
7b55f620 2338 realsize = kmalloc_caches[index].objsize;
1ceef402
CL
2339 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
2340 s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2341
2342 if (!s || !text || !kmem_cache_open(s, flags, text,
2343 realsize, ARCH_KMALLOC_MINALIGN,
2344 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2345 kfree(s);
2346 kfree(text);
2347 goto unlock_out;
dfce8648 2348 }
1ceef402
CL
2349
2350 list_add(&s->list, &slab_caches);
2351 kmalloc_caches_dma[index] = s;
2352
2353 schedule_work(&sysfs_add_work);
2354
2355unlock_out:
dfce8648 2356 up_write(&slub_lock);
1ceef402 2357out:
dfce8648 2358 return kmalloc_caches_dma[index];
2e443fd0
CL
2359}
2360#endif
2361
f1b26339
CL
2362/*
2363 * Conversion table for small slabs sizes / 8 to the index in the
2364 * kmalloc array. This is necessary for slabs < 192 since we have non power
2365 * of two cache sizes there. The size of larger slabs can be determined using
2366 * fls.
2367 */
2368static s8 size_index[24] = {
2369 3, /* 8 */
2370 4, /* 16 */
2371 5, /* 24 */
2372 5, /* 32 */
2373 6, /* 40 */
2374 6, /* 48 */
2375 6, /* 56 */
2376 6, /* 64 */
2377 1, /* 72 */
2378 1, /* 80 */
2379 1, /* 88 */
2380 1, /* 96 */
2381 7, /* 104 */
2382 7, /* 112 */
2383 7, /* 120 */
2384 7, /* 128 */
2385 2, /* 136 */
2386 2, /* 144 */
2387 2, /* 152 */
2388 2, /* 160 */
2389 2, /* 168 */
2390 2, /* 176 */
2391 2, /* 184 */
2392 2 /* 192 */
2393};
2394
81819f0f
CL
2395static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2396{
f1b26339 2397 int index;
81819f0f 2398
f1b26339
CL
2399 if (size <= 192) {
2400 if (!size)
2401 return ZERO_SIZE_PTR;
81819f0f 2402
f1b26339 2403 index = size_index[(size - 1) / 8];
aadb4bc4 2404 } else
f1b26339 2405 index = fls(size - 1);
81819f0f
CL
2406
2407#ifdef CONFIG_ZONE_DMA
f1b26339 2408 if (unlikely((flags & SLUB_DMA)))
2e443fd0 2409 return dma_kmalloc_cache(index, flags);
f1b26339 2410
81819f0f
CL
2411#endif
2412 return &kmalloc_caches[index];
2413}
2414
2415void *__kmalloc(size_t size, gfp_t flags)
2416{
aadb4bc4 2417 struct kmem_cache *s;
81819f0f 2418
aadb4bc4
CL
2419 if (unlikely(size > PAGE_SIZE / 2))
2420 return (void *)__get_free_pages(flags | __GFP_COMP,
2421 get_order(size));
2422
2423 s = get_slab(size, flags);
2424
2425 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2426 return s;
2427
ce15fea8 2428 return slab_alloc(s, flags, -1, __builtin_return_address(0));
81819f0f
CL
2429}
2430EXPORT_SYMBOL(__kmalloc);
2431
2432#ifdef CONFIG_NUMA
2433void *__kmalloc_node(size_t size, gfp_t flags, int node)
2434{
aadb4bc4 2435 struct kmem_cache *s;
81819f0f 2436
aadb4bc4
CL
2437 if (unlikely(size > PAGE_SIZE / 2))
2438 return (void *)__get_free_pages(flags | __GFP_COMP,
2439 get_order(size));
2440
2441 s = get_slab(size, flags);
2442
2443 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2444 return s;
2445
ce15fea8 2446 return slab_alloc(s, flags, node, __builtin_return_address(0));
81819f0f
CL
2447}
2448EXPORT_SYMBOL(__kmalloc_node);
2449#endif
2450
2451size_t ksize(const void *object)
2452{
272c1d21 2453 struct page *page;
81819f0f
CL
2454 struct kmem_cache *s;
2455
ef8b4520
CL
2456 BUG_ON(!object);
2457 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
2458 return 0;
2459
2460 page = get_object_page(object);
81819f0f
CL
2461 BUG_ON(!page);
2462 s = page->slab;
2463 BUG_ON(!s);
2464
2465 /*
2466 * Debugging requires use of the padding between object
2467 * and whatever may come after it.
2468 */
2469 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2470 return s->objsize;
2471
2472 /*
2473 * If we have the need to store the freelist pointer
2474 * back there or track user information then we can
2475 * only use the space before that information.
2476 */
2477 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2478 return s->inuse;
2479
2480 /*
2481 * Else we can use all the padding etc for the allocation
2482 */
2483 return s->size;
2484}
2485EXPORT_SYMBOL(ksize);
2486
2487void kfree(const void *x)
2488{
81819f0f
CL
2489 struct page *page;
2490
2408c550 2491 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
2492 return;
2493
b49af68f 2494 page = virt_to_head_page(x);
aadb4bc4
CL
2495 if (unlikely(!PageSlab(page))) {
2496 put_page(page);
2497 return;
2498 }
2499 slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
81819f0f
CL
2500}
2501EXPORT_SYMBOL(kfree);
2502
2086d26a 2503/*
672bba3a
CL
2504 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2505 * the remaining slabs by the number of items in use. The slabs with the
2506 * most items in use come first. New allocations will then fill those up
2507 * and thus they can be removed from the partial lists.
2508 *
2509 * The slabs with the least items are placed last. This results in them
2510 * being allocated from last increasing the chance that the last objects
2511 * are freed in them.
2086d26a
CL
2512 */
2513int kmem_cache_shrink(struct kmem_cache *s)
2514{
2515 int node;
2516 int i;
2517 struct kmem_cache_node *n;
2518 struct page *page;
2519 struct page *t;
2520 struct list_head *slabs_by_inuse =
2521 kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2522 unsigned long flags;
2523
2524 if (!slabs_by_inuse)
2525 return -ENOMEM;
2526
2527 flush_all(s);
f64dc58c 2528 for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a
CL
2529 n = get_node(s, node);
2530
2531 if (!n->nr_partial)
2532 continue;
2533
2534 for (i = 0; i < s->objects; i++)
2535 INIT_LIST_HEAD(slabs_by_inuse + i);
2536
2537 spin_lock_irqsave(&n->list_lock, flags);
2538
2539 /*
672bba3a 2540 * Build lists indexed by the items in use in each slab.
2086d26a 2541 *
672bba3a
CL
2542 * Note that concurrent frees may occur while we hold the
2543 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
2544 */
2545 list_for_each_entry_safe(page, t, &n->partial, lru) {
2546 if (!page->inuse && slab_trylock(page)) {
2547 /*
2548 * Must hold slab lock here because slab_free
2549 * may have freed the last object and be
2550 * waiting to release the slab.
2551 */
2552 list_del(&page->lru);
2553 n->nr_partial--;
2554 slab_unlock(page);
2555 discard_slab(s, page);
2556 } else {
fcda3d89
CL
2557 list_move(&page->lru,
2558 slabs_by_inuse + page->inuse);
2086d26a
CL
2559 }
2560 }
2561
2086d26a 2562 /*
672bba3a
CL
2563 * Rebuild the partial list with the slabs filled up most
2564 * first and the least used slabs at the end.
2086d26a
CL
2565 */
2566 for (i = s->objects - 1; i >= 0; i--)
2567 list_splice(slabs_by_inuse + i, n->partial.prev);
2568
2086d26a
CL
2569 spin_unlock_irqrestore(&n->list_lock, flags);
2570 }
2571
2572 kfree(slabs_by_inuse);
2573 return 0;
2574}
2575EXPORT_SYMBOL(kmem_cache_shrink);
2576
81819f0f
CL
2577/********************************************************************
2578 * Basic setup of slabs
2579 *******************************************************************/
2580
2581void __init kmem_cache_init(void)
2582{
2583 int i;
4b356be0 2584 int caches = 0;
81819f0f
CL
2585
2586#ifdef CONFIG_NUMA
2587 /*
2588 * Must first have the slab cache available for the allocations of the
672bba3a 2589 * struct kmem_cache_node's. There is special bootstrap code in
81819f0f
CL
2590 * kmem_cache_open for slab_state == DOWN.
2591 */
2592 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2593 sizeof(struct kmem_cache_node), GFP_KERNEL);
8ffa6875 2594 kmalloc_caches[0].refcount = -1;
4b356be0 2595 caches++;
81819f0f
CL
2596#endif
2597
2598 /* Able to allocate the per node structures */
2599 slab_state = PARTIAL;
2600
2601 /* Caches that are not of the two-to-the-power-of size */
4b356be0
CL
2602 if (KMALLOC_MIN_SIZE <= 64) {
2603 create_kmalloc_cache(&kmalloc_caches[1],
81819f0f 2604 "kmalloc-96", 96, GFP_KERNEL);
4b356be0
CL
2605 caches++;
2606 }
2607 if (KMALLOC_MIN_SIZE <= 128) {
2608 create_kmalloc_cache(&kmalloc_caches[2],
81819f0f 2609 "kmalloc-192", 192, GFP_KERNEL);
4b356be0
CL
2610 caches++;
2611 }
81819f0f 2612
aadb4bc4 2613 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
81819f0f
CL
2614 create_kmalloc_cache(&kmalloc_caches[i],
2615 "kmalloc", 1 << i, GFP_KERNEL);
4b356be0
CL
2616 caches++;
2617 }
81819f0f 2618
f1b26339
CL
2619
2620 /*
2621 * Patch up the size_index table if we have strange large alignment
2622 * requirements for the kmalloc array. This is only the case for
2623 * mips it seems. The standard arches will not generate any code here.
2624 *
2625 * Largest permitted alignment is 256 bytes due to the way we
2626 * handle the index determination for the smaller caches.
2627 *
2628 * Make sure that nothing crazy happens if someone starts tinkering
2629 * around with ARCH_KMALLOC_MINALIGN
2630 */
2631 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2632 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2633
12ad6843 2634 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
f1b26339
CL
2635 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2636
81819f0f
CL
2637 slab_state = UP;
2638
2639 /* Provide the correct kmalloc names now that the caches are up */
aadb4bc4 2640 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
81819f0f
CL
2641 kmalloc_caches[i]. name =
2642 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2643
2644#ifdef CONFIG_SMP
2645 register_cpu_notifier(&slab_notifier);
2646#endif
2647
bcf889f9
CL
2648 kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2649 nr_cpu_ids * sizeof(struct page *);
81819f0f
CL
2650
2651 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be0
CL
2652 " CPUs=%d, Nodes=%d\n",
2653 caches, cache_line_size(),
81819f0f
CL
2654 slub_min_order, slub_max_order, slub_min_objects,
2655 nr_cpu_ids, nr_node_ids);
2656}
2657
2658/*
2659 * Find a mergeable slab cache
2660 */
2661static int slab_unmergeable(struct kmem_cache *s)
2662{
2663 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2664 return 1;
2665
c59def9f 2666 if (s->ctor)
81819f0f
CL
2667 return 1;
2668
8ffa6875
CL
2669 /*
2670 * We may have set a slab to be unmergeable during bootstrap.
2671 */
2672 if (s->refcount < 0)
2673 return 1;
2674
81819f0f
CL
2675 return 0;
2676}
2677
2678static struct kmem_cache *find_mergeable(size_t size,
ba0268a8 2679 size_t align, unsigned long flags, const char *name,
c59def9f 2680 void (*ctor)(void *, struct kmem_cache *, unsigned long))
81819f0f 2681{
5b95a4ac 2682 struct kmem_cache *s;
81819f0f
CL
2683
2684 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2685 return NULL;
2686
c59def9f 2687 if (ctor)
81819f0f
CL
2688 return NULL;
2689
2690 size = ALIGN(size, sizeof(void *));
2691 align = calculate_alignment(flags, align, size);
2692 size = ALIGN(size, align);
ba0268a8 2693 flags = kmem_cache_flags(size, flags, name, NULL);
81819f0f 2694
5b95a4ac 2695 list_for_each_entry(s, &slab_caches, list) {
81819f0f
CL
2696 if (slab_unmergeable(s))
2697 continue;
2698
2699 if (size > s->size)
2700 continue;
2701
ba0268a8 2702 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0f
CL
2703 continue;
2704 /*
2705 * Check if alignment is compatible.
2706 * Courtesy of Adrian Drzewiecki
2707 */
2708 if ((s->size & ~(align -1)) != s->size)
2709 continue;
2710
2711 if (s->size - size >= sizeof(void *))
2712 continue;
2713
2714 return s;
2715 }
2716 return NULL;
2717}
2718
2719struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2720 size_t align, unsigned long flags,
20c2df83 2721 void (*ctor)(void *, struct kmem_cache *, unsigned long))
81819f0f
CL
2722{
2723 struct kmem_cache *s;
2724
2725 down_write(&slub_lock);
ba0268a8 2726 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
2727 if (s) {
2728 s->refcount++;
2729 /*
2730 * Adjust the object sizes so that we clear
2731 * the complete object on kzalloc.
2732 */
2733 s->objsize = max(s->objsize, (int)size);
2734 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
a0e1d1be 2735 up_write(&slub_lock);
81819f0f
CL
2736 if (sysfs_slab_alias(s, name))
2737 goto err;
a0e1d1be
CL
2738 return s;
2739 }
2740 s = kmalloc(kmem_size, GFP_KERNEL);
2741 if (s) {
2742 if (kmem_cache_open(s, GFP_KERNEL, name,
c59def9f 2743 size, align, flags, ctor)) {
81819f0f 2744 list_add(&s->list, &slab_caches);
a0e1d1be
CL
2745 up_write(&slub_lock);
2746 if (sysfs_slab_add(s))
2747 goto err;
2748 return s;
2749 }
2750 kfree(s);
81819f0f
CL
2751 }
2752 up_write(&slub_lock);
81819f0f
CL
2753
2754err:
81819f0f
CL
2755 if (flags & SLAB_PANIC)
2756 panic("Cannot create slabcache %s\n", name);
2757 else
2758 s = NULL;
2759 return s;
2760}
2761EXPORT_SYMBOL(kmem_cache_create);
2762
81819f0f 2763#ifdef CONFIG_SMP
81819f0f 2764/*
672bba3a
CL
2765 * Use the cpu notifier to insure that the cpu slabs are flushed when
2766 * necessary.
81819f0f
CL
2767 */
2768static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2769 unsigned long action, void *hcpu)
2770{
2771 long cpu = (long)hcpu;
5b95a4ac
CL
2772 struct kmem_cache *s;
2773 unsigned long flags;
81819f0f
CL
2774
2775 switch (action) {
2776 case CPU_UP_CANCELED:
8bb78442 2777 case CPU_UP_CANCELED_FROZEN:
81819f0f 2778 case CPU_DEAD:
8bb78442 2779 case CPU_DEAD_FROZEN:
5b95a4ac
CL
2780 down_read(&slub_lock);
2781 list_for_each_entry(s, &slab_caches, list) {
2782 local_irq_save(flags);
2783 __flush_cpu_slab(s, cpu);
2784 local_irq_restore(flags);
2785 }
2786 up_read(&slub_lock);
81819f0f
CL
2787 break;
2788 default:
2789 break;
2790 }
2791 return NOTIFY_OK;
2792}
2793
2794static struct notifier_block __cpuinitdata slab_notifier =
2795 { &slab_cpuup_callback, NULL, 0 };
2796
2797#endif
2798
81819f0f
CL
2799void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2800{
aadb4bc4
CL
2801 struct kmem_cache *s;
2802
2803 if (unlikely(size > PAGE_SIZE / 2))
2804 return (void *)__get_free_pages(gfpflags | __GFP_COMP,
2805 get_order(size));
2806 s = get_slab(size, gfpflags);
81819f0f 2807
2408c550 2808 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 2809 return s;
81819f0f 2810
ce15fea8 2811 return slab_alloc(s, gfpflags, -1, caller);
81819f0f
CL
2812}
2813
2814void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2815 int node, void *caller)
2816{
aadb4bc4
CL
2817 struct kmem_cache *s;
2818
2819 if (unlikely(size > PAGE_SIZE / 2))
2820 return (void *)__get_free_pages(gfpflags | __GFP_COMP,
2821 get_order(size));
2822 s = get_slab(size, gfpflags);
81819f0f 2823
2408c550 2824 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 2825 return s;
81819f0f 2826
ce15fea8 2827 return slab_alloc(s, gfpflags, node, caller);
81819f0f
CL
2828}
2829
41ecc55b 2830#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
434e245d
CL
2831static int validate_slab(struct kmem_cache *s, struct page *page,
2832 unsigned long *map)
53e15af0
CL
2833{
2834 void *p;
2835 void *addr = page_address(page);
53e15af0
CL
2836
2837 if (!check_slab(s, page) ||
2838 !on_freelist(s, page, NULL))
2839 return 0;
2840
2841 /* Now we know that a valid freelist exists */
2842 bitmap_zero(map, s->objects);
2843
7656c72b
CL
2844 for_each_free_object(p, s, page->freelist) {
2845 set_bit(slab_index(p, s, addr), map);
53e15af0
CL
2846 if (!check_object(s, page, p, 0))
2847 return 0;
2848 }
2849
7656c72b
CL
2850 for_each_object(p, s, addr)
2851 if (!test_bit(slab_index(p, s, addr), map))
53e15af0
CL
2852 if (!check_object(s, page, p, 1))
2853 return 0;
2854 return 1;
2855}
2856
434e245d
CL
2857static void validate_slab_slab(struct kmem_cache *s, struct page *page,
2858 unsigned long *map)
53e15af0
CL
2859{
2860 if (slab_trylock(page)) {
434e245d 2861 validate_slab(s, page, map);
53e15af0
CL
2862 slab_unlock(page);
2863 } else
2864 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
2865 s->name, page);
2866
2867 if (s->flags & DEBUG_DEFAULT_FLAGS) {
35e5d7ee
CL
2868 if (!SlabDebug(page))
2869 printk(KERN_ERR "SLUB %s: SlabDebug not set "
53e15af0
CL
2870 "on slab 0x%p\n", s->name, page);
2871 } else {
35e5d7ee
CL
2872 if (SlabDebug(page))
2873 printk(KERN_ERR "SLUB %s: SlabDebug set on "
53e15af0
CL
2874 "slab 0x%p\n", s->name, page);
2875 }
2876}
2877
434e245d
CL
2878static int validate_slab_node(struct kmem_cache *s,
2879 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
2880{
2881 unsigned long count = 0;
2882 struct page *page;
2883 unsigned long flags;
2884
2885 spin_lock_irqsave(&n->list_lock, flags);
2886
2887 list_for_each_entry(page, &n->partial, lru) {
434e245d 2888 validate_slab_slab(s, page, map);
53e15af0
CL
2889 count++;
2890 }
2891 if (count != n->nr_partial)
2892 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
2893 "counter=%ld\n", s->name, count, n->nr_partial);
2894
2895 if (!(s->flags & SLAB_STORE_USER))
2896 goto out;
2897
2898 list_for_each_entry(page, &n->full, lru) {
434e245d 2899 validate_slab_slab(s, page, map);
53e15af0
CL
2900 count++;
2901 }
2902 if (count != atomic_long_read(&n->nr_slabs))
2903 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
2904 "counter=%ld\n", s->name, count,
2905 atomic_long_read(&n->nr_slabs));
2906
2907out:
2908 spin_unlock_irqrestore(&n->list_lock, flags);
2909 return count;
2910}
2911
434e245d 2912static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
2913{
2914 int node;
2915 unsigned long count = 0;
434e245d
CL
2916 unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
2917 sizeof(unsigned long), GFP_KERNEL);
2918
2919 if (!map)
2920 return -ENOMEM;
53e15af0
CL
2921
2922 flush_all(s);
f64dc58c 2923 for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af0
CL
2924 struct kmem_cache_node *n = get_node(s, node);
2925
434e245d 2926 count += validate_slab_node(s, n, map);
53e15af0 2927 }
434e245d 2928 kfree(map);
53e15af0
CL
2929 return count;
2930}
2931
b3459709
CL
2932#ifdef SLUB_RESILIENCY_TEST
2933static void resiliency_test(void)
2934{
2935 u8 *p;
2936
2937 printk(KERN_ERR "SLUB resiliency testing\n");
2938 printk(KERN_ERR "-----------------------\n");
2939 printk(KERN_ERR "A. Corruption after allocation\n");
2940
2941 p = kzalloc(16, GFP_KERNEL);
2942 p[16] = 0x12;
2943 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
2944 " 0x12->0x%p\n\n", p + 16);
2945
2946 validate_slab_cache(kmalloc_caches + 4);
2947
2948 /* Hmmm... The next two are dangerous */
2949 p = kzalloc(32, GFP_KERNEL);
2950 p[32 + sizeof(void *)] = 0x34;
2951 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
2952 " 0x34 -> -0x%p\n", p);
2953 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2954
2955 validate_slab_cache(kmalloc_caches + 5);
2956 p = kzalloc(64, GFP_KERNEL);
2957 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
2958 *p = 0x56;
2959 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
2960 p);
2961 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
2962 validate_slab_cache(kmalloc_caches + 6);
2963
2964 printk(KERN_ERR "\nB. Corruption after free\n");
2965 p = kzalloc(128, GFP_KERNEL);
2966 kfree(p);
2967 *p = 0x78;
2968 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
2969 validate_slab_cache(kmalloc_caches + 7);
2970
2971 p = kzalloc(256, GFP_KERNEL);
2972 kfree(p);
2973 p[50] = 0x9a;
2974 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
2975 validate_slab_cache(kmalloc_caches + 8);
2976
2977 p = kzalloc(512, GFP_KERNEL);
2978 kfree(p);
2979 p[512] = 0xab;
2980 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
2981 validate_slab_cache(kmalloc_caches + 9);
2982}
2983#else
2984static void resiliency_test(void) {};
2985#endif
2986
88a420e4 2987/*
672bba3a 2988 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
2989 * and freed.
2990 */
2991
2992struct location {
2993 unsigned long count;
2994 void *addr;
45edfa58
CL
2995 long long sum_time;
2996 long min_time;
2997 long max_time;
2998 long min_pid;
2999 long max_pid;
3000 cpumask_t cpus;
3001 nodemask_t nodes;
88a420e4
CL
3002};
3003
3004struct loc_track {
3005 unsigned long max;
3006 unsigned long count;
3007 struct location *loc;
3008};
3009
3010static void free_loc_track(struct loc_track *t)
3011{
3012 if (t->max)
3013 free_pages((unsigned long)t->loc,
3014 get_order(sizeof(struct location) * t->max));
3015}
3016
68dff6a9 3017static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
3018{
3019 struct location *l;
3020 int order;
3021
88a420e4
CL
3022 order = get_order(sizeof(struct location) * max);
3023
68dff6a9 3024 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
3025 if (!l)
3026 return 0;
3027
3028 if (t->count) {
3029 memcpy(l, t->loc, sizeof(struct location) * t->count);
3030 free_loc_track(t);
3031 }
3032 t->max = max;
3033 t->loc = l;
3034 return 1;
3035}
3036
3037static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 3038 const struct track *track)
88a420e4
CL
3039{
3040 long start, end, pos;
3041 struct location *l;
3042 void *caddr;
45edfa58 3043 unsigned long age = jiffies - track->when;
88a420e4
CL
3044
3045 start = -1;
3046 end = t->count;
3047
3048 for ( ; ; ) {
3049 pos = start + (end - start + 1) / 2;
3050
3051 /*
3052 * There is nothing at "end". If we end up there
3053 * we need to add something to before end.
3054 */
3055 if (pos == end)
3056 break;
3057
3058 caddr = t->loc[pos].addr;
45edfa58
CL
3059 if (track->addr == caddr) {
3060
3061 l = &t->loc[pos];
3062 l->count++;
3063 if (track->when) {
3064 l->sum_time += age;
3065 if (age < l->min_time)
3066 l->min_time = age;
3067 if (age > l->max_time)
3068 l->max_time = age;
3069
3070 if (track->pid < l->min_pid)
3071 l->min_pid = track->pid;
3072 if (track->pid > l->max_pid)
3073 l->max_pid = track->pid;
3074
3075 cpu_set(track->cpu, l->cpus);
3076 }
3077 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3078 return 1;
3079 }
3080
45edfa58 3081 if (track->addr < caddr)
88a420e4
CL
3082 end = pos;
3083 else
3084 start = pos;
3085 }
3086
3087 /*
672bba3a 3088 * Not found. Insert new tracking element.
88a420e4 3089 */
68dff6a9 3090 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
3091 return 0;
3092
3093 l = t->loc + pos;
3094 if (pos < t->count)
3095 memmove(l + 1, l,
3096 (t->count - pos) * sizeof(struct location));
3097 t->count++;
3098 l->count = 1;
45edfa58
CL
3099 l->addr = track->addr;
3100 l->sum_time = age;
3101 l->min_time = age;
3102 l->max_time = age;
3103 l->min_pid = track->pid;
3104 l->max_pid = track->pid;
3105 cpus_clear(l->cpus);
3106 cpu_set(track->cpu, l->cpus);
3107 nodes_clear(l->nodes);
3108 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3109 return 1;
3110}
3111
3112static void process_slab(struct loc_track *t, struct kmem_cache *s,
3113 struct page *page, enum track_item alloc)
3114{
3115 void *addr = page_address(page);
7656c72b 3116 DECLARE_BITMAP(map, s->objects);
88a420e4
CL
3117 void *p;
3118
3119 bitmap_zero(map, s->objects);
7656c72b
CL
3120 for_each_free_object(p, s, page->freelist)
3121 set_bit(slab_index(p, s, addr), map);
88a420e4 3122
7656c72b 3123 for_each_object(p, s, addr)
45edfa58
CL
3124 if (!test_bit(slab_index(p, s, addr), map))
3125 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
3126}
3127
3128static int list_locations(struct kmem_cache *s, char *buf,
3129 enum track_item alloc)
3130{
3131 int n = 0;
3132 unsigned long i;
68dff6a9 3133 struct loc_track t = { 0, 0, NULL };
88a420e4
CL
3134 int node;
3135
68dff6a9
CL
3136 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3137 GFP_KERNEL))
3138 return sprintf(buf, "Out of memory\n");
88a420e4
CL
3139
3140 /* Push back cpu slabs */
3141 flush_all(s);
3142
f64dc58c 3143 for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4
CL
3144 struct kmem_cache_node *n = get_node(s, node);
3145 unsigned long flags;
3146 struct page *page;
3147
9e86943b 3148 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
3149 continue;
3150
3151 spin_lock_irqsave(&n->list_lock, flags);
3152 list_for_each_entry(page, &n->partial, lru)
3153 process_slab(&t, s, page, alloc);
3154 list_for_each_entry(page, &n->full, lru)
3155 process_slab(&t, s, page, alloc);
3156 spin_unlock_irqrestore(&n->list_lock, flags);
3157 }
3158
3159 for (i = 0; i < t.count; i++) {
45edfa58 3160 struct location *l = &t.loc[i];
88a420e4
CL
3161
3162 if (n > PAGE_SIZE - 100)
3163 break;
45edfa58
CL
3164 n += sprintf(buf + n, "%7ld ", l->count);
3165
3166 if (l->addr)
3167 n += sprint_symbol(buf + n, (unsigned long)l->addr);
88a420e4
CL
3168 else
3169 n += sprintf(buf + n, "<not-available>");
45edfa58
CL
3170
3171 if (l->sum_time != l->min_time) {
3172 unsigned long remainder;
3173
3174 n += sprintf(buf + n, " age=%ld/%ld/%ld",
3175 l->min_time,
3176 div_long_long_rem(l->sum_time, l->count, &remainder),
3177 l->max_time);
3178 } else
3179 n += sprintf(buf + n, " age=%ld",
3180 l->min_time);
3181
3182 if (l->min_pid != l->max_pid)
3183 n += sprintf(buf + n, " pid=%ld-%ld",
3184 l->min_pid, l->max_pid);
3185 else
3186 n += sprintf(buf + n, " pid=%ld",
3187 l->min_pid);
3188
84966343
CL
3189 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3190 n < PAGE_SIZE - 60) {
45edfa58
CL
3191 n += sprintf(buf + n, " cpus=");
3192 n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
3193 l->cpus);
3194 }
3195
84966343
CL
3196 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3197 n < PAGE_SIZE - 60) {
45edfa58
CL
3198 n += sprintf(buf + n, " nodes=");
3199 n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
3200 l->nodes);
3201 }
3202
88a420e4
CL
3203 n += sprintf(buf + n, "\n");
3204 }
3205
3206 free_loc_track(&t);
3207 if (!t.count)
3208 n += sprintf(buf, "No data\n");
3209 return n;
3210}
3211
81819f0f
CL
3212static unsigned long count_partial(struct kmem_cache_node *n)
3213{
3214 unsigned long flags;
3215 unsigned long x = 0;
3216 struct page *page;
3217
3218 spin_lock_irqsave(&n->list_lock, flags);
3219 list_for_each_entry(page, &n->partial, lru)
3220 x += page->inuse;
3221 spin_unlock_irqrestore(&n->list_lock, flags);
3222 return x;
3223}
3224
3225enum slab_stat_type {
3226 SL_FULL,
3227 SL_PARTIAL,
3228 SL_CPU,
3229 SL_OBJECTS
3230};
3231
3232#define SO_FULL (1 << SL_FULL)
3233#define SO_PARTIAL (1 << SL_PARTIAL)
3234#define SO_CPU (1 << SL_CPU)
3235#define SO_OBJECTS (1 << SL_OBJECTS)
3236
3237static unsigned long slab_objects(struct kmem_cache *s,
3238 char *buf, unsigned long flags)
3239{
3240 unsigned long total = 0;
3241 int cpu;
3242 int node;
3243 int x;
3244 unsigned long *nodes;
3245 unsigned long *per_cpu;
3246
3247 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3248 per_cpu = nodes + nr_node_ids;
3249
3250 for_each_possible_cpu(cpu) {
3251 struct page *page = s->cpu_slab[cpu];
3252 int node;
3253
3254 if (page) {
3255 node = page_to_nid(page);
3256 if (flags & SO_CPU) {
3257 int x = 0;
3258
3259 if (flags & SO_OBJECTS)
3260 x = page->inuse;
3261 else
3262 x = 1;
3263 total += x;
3264 nodes[node] += x;
3265 }
3266 per_cpu[node]++;
3267 }
3268 }
3269
f64dc58c 3270 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3271 struct kmem_cache_node *n = get_node(s, node);
3272
3273 if (flags & SO_PARTIAL) {
3274 if (flags & SO_OBJECTS)
3275 x = count_partial(n);
3276 else
3277 x = n->nr_partial;
3278 total += x;
3279 nodes[node] += x;
3280 }
3281
3282 if (flags & SO_FULL) {
9e86943b 3283 int full_slabs = atomic_long_read(&n->nr_slabs)
81819f0f
CL
3284 - per_cpu[node]
3285 - n->nr_partial;
3286
3287 if (flags & SO_OBJECTS)
3288 x = full_slabs * s->objects;
3289 else
3290 x = full_slabs;
3291 total += x;
3292 nodes[node] += x;
3293 }
3294 }
3295
3296 x = sprintf(buf, "%lu", total);
3297#ifdef CONFIG_NUMA
f64dc58c 3298 for_each_node_state(node, N_NORMAL_MEMORY)
81819f0f
CL
3299 if (nodes[node])
3300 x += sprintf(buf + x, " N%d=%lu",
3301 node, nodes[node]);
3302#endif
3303 kfree(nodes);
3304 return x + sprintf(buf + x, "\n");
3305}
3306
3307static int any_slab_objects(struct kmem_cache *s)
3308{
3309 int node;
3310 int cpu;
3311
3312 for_each_possible_cpu(cpu)
3313 if (s->cpu_slab[cpu])
3314 return 1;
3315
3316 for_each_node(node) {
3317 struct kmem_cache_node *n = get_node(s, node);
3318
9e86943b 3319 if (n->nr_partial || atomic_long_read(&n->nr_slabs))
81819f0f
CL
3320 return 1;
3321 }
3322 return 0;
3323}
3324
3325#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3326#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3327
3328struct slab_attribute {
3329 struct attribute attr;
3330 ssize_t (*show)(struct kmem_cache *s, char *buf);
3331 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3332};
3333
3334#define SLAB_ATTR_RO(_name) \
3335 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3336
3337#define SLAB_ATTR(_name) \
3338 static struct slab_attribute _name##_attr = \
3339 __ATTR(_name, 0644, _name##_show, _name##_store)
3340
81819f0f
CL
3341static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3342{
3343 return sprintf(buf, "%d\n", s->size);
3344}
3345SLAB_ATTR_RO(slab_size);
3346
3347static ssize_t align_show(struct kmem_cache *s, char *buf)
3348{
3349 return sprintf(buf, "%d\n", s->align);
3350}
3351SLAB_ATTR_RO(align);
3352
3353static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3354{
3355 return sprintf(buf, "%d\n", s->objsize);
3356}
3357SLAB_ATTR_RO(object_size);
3358
3359static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3360{
3361 return sprintf(buf, "%d\n", s->objects);
3362}
3363SLAB_ATTR_RO(objs_per_slab);
3364
3365static ssize_t order_show(struct kmem_cache *s, char *buf)
3366{
3367 return sprintf(buf, "%d\n", s->order);
3368}
3369SLAB_ATTR_RO(order);
3370
3371static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3372{
3373 if (s->ctor) {
3374 int n = sprint_symbol(buf, (unsigned long)s->ctor);
3375
3376 return n + sprintf(buf + n, "\n");
3377 }
3378 return 0;
3379}
3380SLAB_ATTR_RO(ctor);
3381
81819f0f
CL
3382static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3383{
3384 return sprintf(buf, "%d\n", s->refcount - 1);
3385}
3386SLAB_ATTR_RO(aliases);
3387
3388static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3389{
3390 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3391}
3392SLAB_ATTR_RO(slabs);
3393
3394static ssize_t partial_show(struct kmem_cache *s, char *buf)
3395{
3396 return slab_objects(s, buf, SO_PARTIAL);
3397}
3398SLAB_ATTR_RO(partial);
3399
3400static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3401{
3402 return slab_objects(s, buf, SO_CPU);
3403}
3404SLAB_ATTR_RO(cpu_slabs);
3405
3406static ssize_t objects_show(struct kmem_cache *s, char *buf)
3407{
3408 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3409}
3410SLAB_ATTR_RO(objects);
3411
3412static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3413{
3414 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3415}
3416
3417static ssize_t sanity_checks_store(struct kmem_cache *s,
3418 const char *buf, size_t length)
3419{
3420 s->flags &= ~SLAB_DEBUG_FREE;
3421 if (buf[0] == '1')
3422 s->flags |= SLAB_DEBUG_FREE;
3423 return length;
3424}
3425SLAB_ATTR(sanity_checks);
3426
3427static ssize_t trace_show(struct kmem_cache *s, char *buf)
3428{
3429 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3430}
3431
3432static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3433 size_t length)
3434{
3435 s->flags &= ~SLAB_TRACE;
3436 if (buf[0] == '1')
3437 s->flags |= SLAB_TRACE;
3438 return length;
3439}
3440SLAB_ATTR(trace);
3441
3442static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3443{
3444 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3445}
3446
3447static ssize_t reclaim_account_store(struct kmem_cache *s,
3448 const char *buf, size_t length)
3449{
3450 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3451 if (buf[0] == '1')
3452 s->flags |= SLAB_RECLAIM_ACCOUNT;
3453 return length;
3454}
3455SLAB_ATTR(reclaim_account);
3456
3457static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3458{
5af60839 3459 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
81819f0f
CL
3460}
3461SLAB_ATTR_RO(hwcache_align);
3462
3463#ifdef CONFIG_ZONE_DMA
3464static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3465{
3466 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3467}
3468SLAB_ATTR_RO(cache_dma);
3469#endif
3470
3471static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3472{
3473 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3474}
3475SLAB_ATTR_RO(destroy_by_rcu);
3476
3477static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3478{
3479 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3480}
3481
3482static ssize_t red_zone_store(struct kmem_cache *s,
3483 const char *buf, size_t length)
3484{
3485 if (any_slab_objects(s))
3486 return -EBUSY;
3487
3488 s->flags &= ~SLAB_RED_ZONE;
3489 if (buf[0] == '1')
3490 s->flags |= SLAB_RED_ZONE;
3491 calculate_sizes(s);
3492 return length;
3493}
3494SLAB_ATTR(red_zone);
3495
3496static ssize_t poison_show(struct kmem_cache *s, char *buf)
3497{
3498 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3499}
3500
3501static ssize_t poison_store(struct kmem_cache *s,
3502 const char *buf, size_t length)
3503{
3504 if (any_slab_objects(s))
3505 return -EBUSY;
3506
3507 s->flags &= ~SLAB_POISON;
3508 if (buf[0] == '1')
3509 s->flags |= SLAB_POISON;
3510 calculate_sizes(s);
3511 return length;
3512}
3513SLAB_ATTR(poison);
3514
3515static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3516{
3517 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3518}
3519
3520static ssize_t store_user_store(struct kmem_cache *s,
3521 const char *buf, size_t length)
3522{
3523 if (any_slab_objects(s))
3524 return -EBUSY;
3525
3526 s->flags &= ~SLAB_STORE_USER;
3527 if (buf[0] == '1')
3528 s->flags |= SLAB_STORE_USER;
3529 calculate_sizes(s);
3530 return length;
3531}
3532SLAB_ATTR(store_user);
3533
53e15af0
CL
3534static ssize_t validate_show(struct kmem_cache *s, char *buf)
3535{
3536 return 0;
3537}
3538
3539static ssize_t validate_store(struct kmem_cache *s,
3540 const char *buf, size_t length)
3541{
434e245d
CL
3542 int ret = -EINVAL;
3543
3544 if (buf[0] == '1') {
3545 ret = validate_slab_cache(s);
3546 if (ret >= 0)
3547 ret = length;
3548 }
3549 return ret;
53e15af0
CL
3550}
3551SLAB_ATTR(validate);
3552
2086d26a
CL
3553static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3554{
3555 return 0;
3556}
3557
3558static ssize_t shrink_store(struct kmem_cache *s,
3559 const char *buf, size_t length)
3560{
3561 if (buf[0] == '1') {
3562 int rc = kmem_cache_shrink(s);
3563
3564 if (rc)
3565 return rc;
3566 } else
3567 return -EINVAL;
3568 return length;
3569}
3570SLAB_ATTR(shrink);
3571
88a420e4
CL
3572static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3573{
3574 if (!(s->flags & SLAB_STORE_USER))
3575 return -ENOSYS;
3576 return list_locations(s, buf, TRACK_ALLOC);
3577}
3578SLAB_ATTR_RO(alloc_calls);
3579
3580static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3581{
3582 if (!(s->flags & SLAB_STORE_USER))
3583 return -ENOSYS;
3584 return list_locations(s, buf, TRACK_FREE);
3585}
3586SLAB_ATTR_RO(free_calls);
3587
81819f0f
CL
3588#ifdef CONFIG_NUMA
3589static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
3590{
3591 return sprintf(buf, "%d\n", s->defrag_ratio / 10);
3592}
3593
3594static ssize_t defrag_ratio_store(struct kmem_cache *s,
3595 const char *buf, size_t length)
3596{
3597 int n = simple_strtoul(buf, NULL, 10);
3598
3599 if (n < 100)
3600 s->defrag_ratio = n * 10;
3601 return length;
3602}
3603SLAB_ATTR(defrag_ratio);
3604#endif
3605
3606static struct attribute * slab_attrs[] = {
3607 &slab_size_attr.attr,
3608 &object_size_attr.attr,
3609 &objs_per_slab_attr.attr,
3610 &order_attr.attr,
3611 &objects_attr.attr,
3612 &slabs_attr.attr,
3613 &partial_attr.attr,
3614 &cpu_slabs_attr.attr,
3615 &ctor_attr.attr,
81819f0f
CL
3616 &aliases_attr.attr,
3617 &align_attr.attr,
3618 &sanity_checks_attr.attr,
3619 &trace_attr.attr,
3620 &hwcache_align_attr.attr,
3621 &reclaim_account_attr.attr,
3622 &destroy_by_rcu_attr.attr,
3623 &red_zone_attr.attr,
3624 &poison_attr.attr,
3625 &store_user_attr.attr,
53e15af0 3626 &validate_attr.attr,
2086d26a 3627 &shrink_attr.attr,
88a420e4
CL
3628 &alloc_calls_attr.attr,
3629 &free_calls_attr.attr,
81819f0f
CL
3630#ifdef CONFIG_ZONE_DMA
3631 &cache_dma_attr.attr,
3632#endif
3633#ifdef CONFIG_NUMA
3634 &defrag_ratio_attr.attr,
3635#endif
3636 NULL
3637};
3638
3639static struct attribute_group slab_attr_group = {
3640 .attrs = slab_attrs,
3641};
3642
3643static ssize_t slab_attr_show(struct kobject *kobj,
3644 struct attribute *attr,
3645 char *buf)
3646{
3647 struct slab_attribute *attribute;
3648 struct kmem_cache *s;
3649 int err;
3650
3651 attribute = to_slab_attr(attr);
3652 s = to_slab(kobj);
3653
3654 if (!attribute->show)
3655 return -EIO;
3656
3657 err = attribute->show(s, buf);
3658
3659 return err;
3660}
3661
3662static ssize_t slab_attr_store(struct kobject *kobj,
3663 struct attribute *attr,
3664 const char *buf, size_t len)
3665{
3666 struct slab_attribute *attribute;
3667 struct kmem_cache *s;
3668 int err;
3669
3670 attribute = to_slab_attr(attr);
3671 s = to_slab(kobj);
3672
3673 if (!attribute->store)
3674 return -EIO;
3675
3676 err = attribute->store(s, buf, len);
3677
3678 return err;
3679}
3680
3681static struct sysfs_ops slab_sysfs_ops = {
3682 .show = slab_attr_show,
3683 .store = slab_attr_store,
3684};
3685
3686static struct kobj_type slab_ktype = {
3687 .sysfs_ops = &slab_sysfs_ops,
3688};
3689
3690static int uevent_filter(struct kset *kset, struct kobject *kobj)
3691{
3692 struct kobj_type *ktype = get_ktype(kobj);
3693
3694 if (ktype == &slab_ktype)
3695 return 1;
3696 return 0;
3697}
3698
3699static struct kset_uevent_ops slab_uevent_ops = {
3700 .filter = uevent_filter,
3701};
3702
5af328a5 3703static decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
81819f0f
CL
3704
3705#define ID_STR_LENGTH 64
3706
3707/* Create a unique string id for a slab cache:
3708 * format
3709 * :[flags-]size:[memory address of kmemcache]
3710 */
3711static char *create_unique_id(struct kmem_cache *s)
3712{
3713 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
3714 char *p = name;
3715
3716 BUG_ON(!name);
3717
3718 *p++ = ':';
3719 /*
3720 * First flags affecting slabcache operations. We will only
3721 * get here for aliasable slabs so we do not need to support
3722 * too many flags. The flags here must cover all flags that
3723 * are matched during merging to guarantee that the id is
3724 * unique.
3725 */
3726 if (s->flags & SLAB_CACHE_DMA)
3727 *p++ = 'd';
3728 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3729 *p++ = 'a';
3730 if (s->flags & SLAB_DEBUG_FREE)
3731 *p++ = 'F';
3732 if (p != name + 1)
3733 *p++ = '-';
3734 p += sprintf(p, "%07d", s->size);
3735 BUG_ON(p > name + ID_STR_LENGTH - 1);
3736 return name;
3737}
3738
3739static int sysfs_slab_add(struct kmem_cache *s)
3740{
3741 int err;
3742 const char *name;
3743 int unmergeable;
3744
3745 if (slab_state < SYSFS)
3746 /* Defer until later */
3747 return 0;
3748
3749 unmergeable = slab_unmergeable(s);
3750 if (unmergeable) {
3751 /*
3752 * Slabcache can never be merged so we can use the name proper.
3753 * This is typically the case for debug situations. In that
3754 * case we can catch duplicate names easily.
3755 */
0f9008ef 3756 sysfs_remove_link(&slab_subsys.kobj, s->name);
81819f0f
CL
3757 name = s->name;
3758 } else {
3759 /*
3760 * Create a unique name for the slab as a target
3761 * for the symlinks.
3762 */
3763 name = create_unique_id(s);
3764 }
3765
3766 kobj_set_kset_s(s, slab_subsys);
3767 kobject_set_name(&s->kobj, name);
3768 kobject_init(&s->kobj);
3769 err = kobject_add(&s->kobj);
3770 if (err)
3771 return err;
3772
3773 err = sysfs_create_group(&s->kobj, &slab_attr_group);
3774 if (err)
3775 return err;
3776 kobject_uevent(&s->kobj, KOBJ_ADD);
3777 if (!unmergeable) {
3778 /* Setup first alias */
3779 sysfs_slab_alias(s, s->name);
3780 kfree(name);
3781 }
3782 return 0;
3783}
3784
3785static void sysfs_slab_remove(struct kmem_cache *s)
3786{
3787 kobject_uevent(&s->kobj, KOBJ_REMOVE);
3788 kobject_del(&s->kobj);
3789}
3790
3791/*
3792 * Need to buffer aliases during bootup until sysfs becomes
3793 * available lest we loose that information.
3794 */
3795struct saved_alias {
3796 struct kmem_cache *s;
3797 const char *name;
3798 struct saved_alias *next;
3799};
3800
5af328a5 3801static struct saved_alias *alias_list;
81819f0f
CL
3802
3803static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3804{
3805 struct saved_alias *al;
3806
3807 if (slab_state == SYSFS) {
3808 /*
3809 * If we have a leftover link then remove it.
3810 */
0f9008ef
LT
3811 sysfs_remove_link(&slab_subsys.kobj, name);
3812 return sysfs_create_link(&slab_subsys.kobj,
81819f0f
CL
3813 &s->kobj, name);
3814 }
3815
3816 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
3817 if (!al)
3818 return -ENOMEM;
3819
3820 al->s = s;
3821 al->name = name;
3822 al->next = alias_list;
3823 alias_list = al;
3824 return 0;
3825}
3826
3827static int __init slab_sysfs_init(void)
3828{
5b95a4ac 3829 struct kmem_cache *s;
81819f0f
CL
3830 int err;
3831
3832 err = subsystem_register(&slab_subsys);
3833 if (err) {
3834 printk(KERN_ERR "Cannot register slab subsystem.\n");
3835 return -ENOSYS;
3836 }
3837
26a7bd03
CL
3838 slab_state = SYSFS;
3839
5b95a4ac 3840 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 3841 err = sysfs_slab_add(s);
5d540fb7
CL
3842 if (err)
3843 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
3844 " to sysfs\n", s->name);
26a7bd03 3845 }
81819f0f
CL
3846
3847 while (alias_list) {
3848 struct saved_alias *al = alias_list;
3849
3850 alias_list = alias_list->next;
3851 err = sysfs_slab_alias(al->s, al->name);
5d540fb7
CL
3852 if (err)
3853 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
3854 " %s to sysfs\n", s->name);
81819f0f
CL
3855 kfree(al);
3856 }
3857
3858 resiliency_test();
3859 return 0;
3860}
3861
3862__initcall(slab_sysfs_init);
81819f0f 3863#endif