SLUB: Place kmem_cache_cpu structures in a NUMA aware way
[linux-block.git] / mm / slub.c
CommitLineData
81819f0f
CL
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
5 * The allocator synchronizes using per slab locks and only
6 * uses a centralized lock to manage a pool of partial slabs.
7 *
8 * (C) 2007 SGI, Christoph Lameter <clameter@sgi.com>
9 */
10
11#include <linux/mm.h>
12#include <linux/module.h>
13#include <linux/bit_spinlock.h>
14#include <linux/interrupt.h>
15#include <linux/bitops.h>
16#include <linux/slab.h>
17#include <linux/seq_file.h>
18#include <linux/cpu.h>
19#include <linux/cpuset.h>
20#include <linux/mempolicy.h>
21#include <linux/ctype.h>
22#include <linux/kallsyms.h>
23
24/*
25 * Lock order:
26 * 1. slab_lock(page)
27 * 2. slab->list_lock
28 *
29 * The slab_lock protects operations on the object of a particular
30 * slab and its metadata in the page struct. If the slab lock
31 * has been taken then no allocations nor frees can be performed
32 * on the objects in the slab nor can the slab be added or removed
33 * from the partial or full lists since this would mean modifying
34 * the page_struct of the slab.
35 *
36 * The list_lock protects the partial and full list on each node and
37 * the partial slab counter. If taken then no new slabs may be added or
38 * removed from the lists nor make the number of partial slabs be modified.
39 * (Note that the total number of slabs is an atomic value that may be
40 * modified without taking the list lock).
41 *
42 * The list_lock is a centralized lock and thus we avoid taking it as
43 * much as possible. As long as SLUB does not have to handle partial
44 * slabs, operations can continue without any centralized lock. F.e.
45 * allocating a long series of objects that fill up slabs does not require
46 * the list lock.
47 *
48 * The lock order is sometimes inverted when we are trying to get a slab
49 * off a list. We take the list_lock and then look for a page on the list
50 * to use. While we do that objects in the slabs may be freed. We can
51 * only operate on the slab if we have also taken the slab_lock. So we use
52 * a slab_trylock() on the slab. If trylock was successful then no frees
53 * can occur anymore and we can use the slab for allocations etc. If the
54 * slab_trylock() does not succeed then frees are in progress in the slab and
55 * we must stay away from it for a while since we may cause a bouncing
56 * cacheline if we try to acquire the lock. So go onto the next slab.
57 * If all pages are busy then we may allocate a new slab instead of reusing
58 * a partial slab. A new slab has noone operating on it and thus there is
59 * no danger of cacheline contention.
60 *
61 * Interrupts are disabled during allocation and deallocation in order to
62 * make the slab allocator safe to use in the context of an irq. In addition
63 * interrupts are disabled to ensure that the processor does not change
64 * while handling per_cpu slabs, due to kernel preemption.
65 *
66 * SLUB assigns one slab for allocation to each processor.
67 * Allocations only occur from these slabs called cpu slabs.
68 *
672bba3a
CL
69 * Slabs with free elements are kept on a partial list and during regular
70 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 71 * freed then the slab will show up again on the partial lists.
672bba3a
CL
72 * We track full slabs for debugging purposes though because otherwise we
73 * cannot scan all objects.
81819f0f
CL
74 *
75 * Slabs are freed when they become empty. Teardown and setup is
76 * minimal so we rely on the page allocators per cpu caches for
77 * fast frees and allocs.
78 *
79 * Overloading of page flags that are otherwise used for LRU management.
80 *
4b6f0750
CL
81 * PageActive The slab is frozen and exempt from list processing.
82 * This means that the slab is dedicated to a purpose
83 * such as satisfying allocations for a specific
84 * processor. Objects may be freed in the slab while
85 * it is frozen but slab_free will then skip the usual
86 * list operations. It is up to the processor holding
87 * the slab to integrate the slab into the slab lists
88 * when the slab is no longer needed.
89 *
90 * One use of this flag is to mark slabs that are
91 * used for allocations. Then such a slab becomes a cpu
92 * slab. The cpu slab may be equipped with an additional
dfb4f096 93 * freelist that allows lockless access to
894b8788
CL
94 * free objects in addition to the regular freelist
95 * that requires the slab lock.
81819f0f
CL
96 *
97 * PageError Slab requires special handling due to debug
98 * options set. This moves slab handling out of
894b8788 99 * the fast path and disables lockless freelists.
81819f0f
CL
100 */
101
5577bd8a
CL
102#define FROZEN (1 << PG_active)
103
104#ifdef CONFIG_SLUB_DEBUG
105#define SLABDEBUG (1 << PG_error)
106#else
107#define SLABDEBUG 0
108#endif
109
4b6f0750
CL
110static inline int SlabFrozen(struct page *page)
111{
5577bd8a 112 return page->flags & FROZEN;
4b6f0750
CL
113}
114
115static inline void SetSlabFrozen(struct page *page)
116{
5577bd8a 117 page->flags |= FROZEN;
4b6f0750
CL
118}
119
120static inline void ClearSlabFrozen(struct page *page)
121{
5577bd8a 122 page->flags &= ~FROZEN;
4b6f0750
CL
123}
124
35e5d7ee
CL
125static inline int SlabDebug(struct page *page)
126{
5577bd8a 127 return page->flags & SLABDEBUG;
35e5d7ee
CL
128}
129
130static inline void SetSlabDebug(struct page *page)
131{
5577bd8a 132 page->flags |= SLABDEBUG;
35e5d7ee
CL
133}
134
135static inline void ClearSlabDebug(struct page *page)
136{
5577bd8a 137 page->flags &= ~SLABDEBUG;
35e5d7ee
CL
138}
139
81819f0f
CL
140/*
141 * Issues still to be resolved:
142 *
81819f0f
CL
143 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
144 *
81819f0f
CL
145 * - Variable sizing of the per node arrays
146 */
147
148/* Enable to test recovery from slab corruption on boot */
149#undef SLUB_RESILIENCY_TEST
150
151#if PAGE_SHIFT <= 12
152
153/*
154 * Small page size. Make sure that we do not fragment memory
155 */
156#define DEFAULT_MAX_ORDER 1
157#define DEFAULT_MIN_OBJECTS 4
158
159#else
160
161/*
162 * Large page machines are customarily able to handle larger
163 * page orders.
164 */
165#define DEFAULT_MAX_ORDER 2
166#define DEFAULT_MIN_OBJECTS 8
167
168#endif
169
2086d26a
CL
170/*
171 * Mininum number of partial slabs. These will be left on the partial
172 * lists even if they are empty. kmem_cache_shrink may reclaim them.
173 */
e95eed57
CL
174#define MIN_PARTIAL 2
175
2086d26a
CL
176/*
177 * Maximum number of desirable partial slabs.
178 * The existence of more partial slabs makes kmem_cache_shrink
179 * sort the partial list by the number of objects in the.
180 */
181#define MAX_PARTIAL 10
182
81819f0f
CL
183#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
184 SLAB_POISON | SLAB_STORE_USER)
672bba3a 185
81819f0f
CL
186/*
187 * Set of flags that will prevent slab merging
188 */
189#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
190 SLAB_TRACE | SLAB_DESTROY_BY_RCU)
191
192#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
193 SLAB_CACHE_DMA)
194
195#ifndef ARCH_KMALLOC_MINALIGN
47bfdc0d 196#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
197#endif
198
199#ifndef ARCH_SLAB_MINALIGN
47bfdc0d 200#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
81819f0f
CL
201#endif
202
203/* Internal SLUB flags */
1ceef402
CL
204#define __OBJECT_POISON 0x80000000 /* Poison object */
205#define __SYSFS_ADD_DEFERRED 0x40000000 /* Not yet visible via sysfs */
81819f0f 206
65c02d4c
CL
207/* Not all arches define cache_line_size */
208#ifndef cache_line_size
209#define cache_line_size() L1_CACHE_BYTES
210#endif
211
81819f0f
CL
212static int kmem_size = sizeof(struct kmem_cache);
213
214#ifdef CONFIG_SMP
215static struct notifier_block slab_notifier;
216#endif
217
218static enum {
219 DOWN, /* No slab functionality available */
220 PARTIAL, /* kmem_cache_open() works but kmalloc does not */
672bba3a 221 UP, /* Everything works but does not show up in sysfs */
81819f0f
CL
222 SYSFS /* Sysfs up */
223} slab_state = DOWN;
224
225/* A list of all slab caches on the system */
226static DECLARE_RWSEM(slub_lock);
5af328a5 227static LIST_HEAD(slab_caches);
81819f0f 228
02cbc874
CL
229/*
230 * Tracking user of a slab.
231 */
232struct track {
233 void *addr; /* Called from address */
234 int cpu; /* Was running on cpu */
235 int pid; /* Pid context */
236 unsigned long when; /* When did the operation occur */
237};
238
239enum track_item { TRACK_ALLOC, TRACK_FREE };
240
41ecc55b 241#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
81819f0f
CL
242static int sysfs_slab_add(struct kmem_cache *);
243static int sysfs_slab_alias(struct kmem_cache *, const char *);
244static void sysfs_slab_remove(struct kmem_cache *);
245#else
0c710013
CL
246static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
247static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
248 { return 0; }
249static inline void sysfs_slab_remove(struct kmem_cache *s) {}
81819f0f
CL
250#endif
251
252/********************************************************************
253 * Core slab cache functions
254 *******************************************************************/
255
256int slab_is_available(void)
257{
258 return slab_state >= UP;
259}
260
261static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
262{
263#ifdef CONFIG_NUMA
264 return s->node[node];
265#else
266 return &s->local_node;
267#endif
268}
269
dfb4f096
CL
270static inline struct kmem_cache_cpu *get_cpu_slab(struct kmem_cache *s, int cpu)
271{
4c93c355
CL
272#ifdef CONFIG_SMP
273 return s->cpu_slab[cpu];
274#else
275 return &s->cpu_slab;
276#endif
dfb4f096
CL
277}
278
02cbc874
CL
279static inline int check_valid_pointer(struct kmem_cache *s,
280 struct page *page, const void *object)
281{
282 void *base;
283
284 if (!object)
285 return 1;
286
287 base = page_address(page);
288 if (object < base || object >= base + s->objects * s->size ||
289 (object - base) % s->size) {
290 return 0;
291 }
292
293 return 1;
294}
295
7656c72b
CL
296/*
297 * Slow version of get and set free pointer.
298 *
299 * This version requires touching the cache lines of kmem_cache which
300 * we avoid to do in the fast alloc free paths. There we obtain the offset
301 * from the page struct.
302 */
303static inline void *get_freepointer(struct kmem_cache *s, void *object)
304{
305 return *(void **)(object + s->offset);
306}
307
308static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
309{
310 *(void **)(object + s->offset) = fp;
311}
312
313/* Loop over all objects in a slab */
314#define for_each_object(__p, __s, __addr) \
315 for (__p = (__addr); __p < (__addr) + (__s)->objects * (__s)->size;\
316 __p += (__s)->size)
317
318/* Scan freelist */
319#define for_each_free_object(__p, __s, __free) \
320 for (__p = (__free); __p; __p = get_freepointer((__s), __p))
321
322/* Determine object index from a given position */
323static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
324{
325 return (p - addr) / s->size;
326}
327
41ecc55b
CL
328#ifdef CONFIG_SLUB_DEBUG
329/*
330 * Debug settings:
331 */
f0630fff
CL
332#ifdef CONFIG_SLUB_DEBUG_ON
333static int slub_debug = DEBUG_DEFAULT_FLAGS;
334#else
41ecc55b 335static int slub_debug;
f0630fff 336#endif
41ecc55b
CL
337
338static char *slub_debug_slabs;
339
81819f0f
CL
340/*
341 * Object debugging
342 */
343static void print_section(char *text, u8 *addr, unsigned int length)
344{
345 int i, offset;
346 int newline = 1;
347 char ascii[17];
348
349 ascii[16] = 0;
350
351 for (i = 0; i < length; i++) {
352 if (newline) {
24922684 353 printk(KERN_ERR "%8s 0x%p: ", text, addr + i);
81819f0f
CL
354 newline = 0;
355 }
356 printk(" %02x", addr[i]);
357 offset = i % 16;
358 ascii[offset] = isgraph(addr[i]) ? addr[i] : '.';
359 if (offset == 15) {
360 printk(" %s\n",ascii);
361 newline = 1;
362 }
363 }
364 if (!newline) {
365 i %= 16;
366 while (i < 16) {
367 printk(" ");
368 ascii[i] = ' ';
369 i++;
370 }
371 printk(" %s\n", ascii);
372 }
373}
374
81819f0f
CL
375static struct track *get_track(struct kmem_cache *s, void *object,
376 enum track_item alloc)
377{
378 struct track *p;
379
380 if (s->offset)
381 p = object + s->offset + sizeof(void *);
382 else
383 p = object + s->inuse;
384
385 return p + alloc;
386}
387
388static void set_track(struct kmem_cache *s, void *object,
389 enum track_item alloc, void *addr)
390{
391 struct track *p;
392
393 if (s->offset)
394 p = object + s->offset + sizeof(void *);
395 else
396 p = object + s->inuse;
397
398 p += alloc;
399 if (addr) {
400 p->addr = addr;
401 p->cpu = smp_processor_id();
402 p->pid = current ? current->pid : -1;
403 p->when = jiffies;
404 } else
405 memset(p, 0, sizeof(struct track));
406}
407
81819f0f
CL
408static void init_tracking(struct kmem_cache *s, void *object)
409{
24922684
CL
410 if (!(s->flags & SLAB_STORE_USER))
411 return;
412
413 set_track(s, object, TRACK_FREE, NULL);
414 set_track(s, object, TRACK_ALLOC, NULL);
81819f0f
CL
415}
416
417static void print_track(const char *s, struct track *t)
418{
419 if (!t->addr)
420 return;
421
24922684 422 printk(KERN_ERR "INFO: %s in ", s);
81819f0f 423 __print_symbol("%s", (unsigned long)t->addr);
24922684
CL
424 printk(" age=%lu cpu=%u pid=%d\n", jiffies - t->when, t->cpu, t->pid);
425}
426
427static void print_tracking(struct kmem_cache *s, void *object)
428{
429 if (!(s->flags & SLAB_STORE_USER))
430 return;
431
432 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
433 print_track("Freed", get_track(s, object, TRACK_FREE));
434}
435
436static void print_page_info(struct page *page)
437{
438 printk(KERN_ERR "INFO: Slab 0x%p used=%u fp=0x%p flags=0x%04lx\n",
439 page, page->inuse, page->freelist, page->flags);
440
441}
442
443static void slab_bug(struct kmem_cache *s, char *fmt, ...)
444{
445 va_list args;
446 char buf[100];
447
448 va_start(args, fmt);
449 vsnprintf(buf, sizeof(buf), fmt, args);
450 va_end(args);
451 printk(KERN_ERR "========================================"
452 "=====================================\n");
453 printk(KERN_ERR "BUG %s: %s\n", s->name, buf);
454 printk(KERN_ERR "----------------------------------------"
455 "-------------------------------------\n\n");
81819f0f
CL
456}
457
24922684
CL
458static void slab_fix(struct kmem_cache *s, char *fmt, ...)
459{
460 va_list args;
461 char buf[100];
462
463 va_start(args, fmt);
464 vsnprintf(buf, sizeof(buf), fmt, args);
465 va_end(args);
466 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
467}
468
469static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
470{
471 unsigned int off; /* Offset of last byte */
24922684
CL
472 u8 *addr = page_address(page);
473
474 print_tracking(s, p);
475
476 print_page_info(page);
477
478 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
479 p, p - addr, get_freepointer(s, p));
480
481 if (p > addr + 16)
482 print_section("Bytes b4", p - 16, 16);
483
484 print_section("Object", p, min(s->objsize, 128));
81819f0f
CL
485
486 if (s->flags & SLAB_RED_ZONE)
487 print_section("Redzone", p + s->objsize,
488 s->inuse - s->objsize);
489
81819f0f
CL
490 if (s->offset)
491 off = s->offset + sizeof(void *);
492 else
493 off = s->inuse;
494
24922684 495 if (s->flags & SLAB_STORE_USER)
81819f0f 496 off += 2 * sizeof(struct track);
81819f0f
CL
497
498 if (off != s->size)
499 /* Beginning of the filler is the free pointer */
24922684
CL
500 print_section("Padding", p + off, s->size - off);
501
502 dump_stack();
81819f0f
CL
503}
504
505static void object_err(struct kmem_cache *s, struct page *page,
506 u8 *object, char *reason)
507{
24922684
CL
508 slab_bug(s, reason);
509 print_trailer(s, page, object);
81819f0f
CL
510}
511
24922684 512static void slab_err(struct kmem_cache *s, struct page *page, char *fmt, ...)
81819f0f
CL
513{
514 va_list args;
515 char buf[100];
516
24922684
CL
517 va_start(args, fmt);
518 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 519 va_end(args);
24922684
CL
520 slab_bug(s, fmt);
521 print_page_info(page);
81819f0f
CL
522 dump_stack();
523}
524
525static void init_object(struct kmem_cache *s, void *object, int active)
526{
527 u8 *p = object;
528
529 if (s->flags & __OBJECT_POISON) {
530 memset(p, POISON_FREE, s->objsize - 1);
531 p[s->objsize -1] = POISON_END;
532 }
533
534 if (s->flags & SLAB_RED_ZONE)
535 memset(p + s->objsize,
536 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE,
537 s->inuse - s->objsize);
538}
539
24922684 540static u8 *check_bytes(u8 *start, unsigned int value, unsigned int bytes)
81819f0f
CL
541{
542 while (bytes) {
543 if (*start != (u8)value)
24922684 544 return start;
81819f0f
CL
545 start++;
546 bytes--;
547 }
24922684
CL
548 return NULL;
549}
550
551static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
552 void *from, void *to)
553{
554 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
555 memset(from, data, to - from);
556}
557
558static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
559 u8 *object, char *what,
560 u8* start, unsigned int value, unsigned int bytes)
561{
562 u8 *fault;
563 u8 *end;
564
565 fault = check_bytes(start, value, bytes);
566 if (!fault)
567 return 1;
568
569 end = start + bytes;
570 while (end > fault && end[-1] == value)
571 end--;
572
573 slab_bug(s, "%s overwritten", what);
574 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
575 fault, end - 1, fault[0], value);
576 print_trailer(s, page, object);
577
578 restore_bytes(s, what, value, fault, end);
579 return 0;
81819f0f
CL
580}
581
81819f0f
CL
582/*
583 * Object layout:
584 *
585 * object address
586 * Bytes of the object to be managed.
587 * If the freepointer may overlay the object then the free
588 * pointer is the first word of the object.
672bba3a 589 *
81819f0f
CL
590 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
591 * 0xa5 (POISON_END)
592 *
593 * object + s->objsize
594 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a
CL
595 * Padding is extended by another word if Redzoning is enabled and
596 * objsize == inuse.
597 *
81819f0f
CL
598 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
599 * 0xcc (RED_ACTIVE) for objects in use.
600 *
601 * object + s->inuse
672bba3a
CL
602 * Meta data starts here.
603 *
81819f0f
CL
604 * A. Free pointer (if we cannot overwrite object on free)
605 * B. Tracking data for SLAB_STORE_USER
672bba3a
CL
606 * C. Padding to reach required alignment boundary or at mininum
607 * one word if debuggin is on to be able to detect writes
608 * before the word boundary.
609 *
610 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
611 *
612 * object + s->size
672bba3a 613 * Nothing is used beyond s->size.
81819f0f 614 *
672bba3a
CL
615 * If slabcaches are merged then the objsize and inuse boundaries are mostly
616 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
617 * may be used with merged slabcaches.
618 */
619
81819f0f
CL
620static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
621{
622 unsigned long off = s->inuse; /* The end of info */
623
624 if (s->offset)
625 /* Freepointer is placed after the object. */
626 off += sizeof(void *);
627
628 if (s->flags & SLAB_STORE_USER)
629 /* We also have user information there */
630 off += 2 * sizeof(struct track);
631
632 if (s->size == off)
633 return 1;
634
24922684
CL
635 return check_bytes_and_report(s, page, p, "Object padding",
636 p + off, POISON_INUSE, s->size - off);
81819f0f
CL
637}
638
639static int slab_pad_check(struct kmem_cache *s, struct page *page)
640{
24922684
CL
641 u8 *start;
642 u8 *fault;
643 u8 *end;
644 int length;
645 int remainder;
81819f0f
CL
646
647 if (!(s->flags & SLAB_POISON))
648 return 1;
649
24922684
CL
650 start = page_address(page);
651 end = start + (PAGE_SIZE << s->order);
81819f0f 652 length = s->objects * s->size;
24922684 653 remainder = end - (start + length);
81819f0f
CL
654 if (!remainder)
655 return 1;
656
24922684
CL
657 fault = check_bytes(start + length, POISON_INUSE, remainder);
658 if (!fault)
659 return 1;
660 while (end > fault && end[-1] == POISON_INUSE)
661 end--;
662
663 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
664 print_section("Padding", start, length);
665
666 restore_bytes(s, "slab padding", POISON_INUSE, start, end);
667 return 0;
81819f0f
CL
668}
669
670static int check_object(struct kmem_cache *s, struct page *page,
671 void *object, int active)
672{
673 u8 *p = object;
674 u8 *endobject = object + s->objsize;
675
676 if (s->flags & SLAB_RED_ZONE) {
677 unsigned int red =
678 active ? SLUB_RED_ACTIVE : SLUB_RED_INACTIVE;
679
24922684
CL
680 if (!check_bytes_and_report(s, page, object, "Redzone",
681 endobject, red, s->inuse - s->objsize))
81819f0f 682 return 0;
81819f0f 683 } else {
24922684
CL
684 if ((s->flags & SLAB_POISON) && s->objsize < s->inuse)
685 check_bytes_and_report(s, page, p, "Alignment padding", endobject,
686 POISON_INUSE, s->inuse - s->objsize);
81819f0f
CL
687 }
688
689 if (s->flags & SLAB_POISON) {
690 if (!active && (s->flags & __OBJECT_POISON) &&
24922684
CL
691 (!check_bytes_and_report(s, page, p, "Poison", p,
692 POISON_FREE, s->objsize - 1) ||
693 !check_bytes_and_report(s, page, p, "Poison",
694 p + s->objsize -1, POISON_END, 1)))
81819f0f 695 return 0;
81819f0f
CL
696 /*
697 * check_pad_bytes cleans up on its own.
698 */
699 check_pad_bytes(s, page, p);
700 }
701
702 if (!s->offset && active)
703 /*
704 * Object and freepointer overlap. Cannot check
705 * freepointer while object is allocated.
706 */
707 return 1;
708
709 /* Check free pointer validity */
710 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
711 object_err(s, page, p, "Freepointer corrupt");
712 /*
713 * No choice but to zap it and thus loose the remainder
714 * of the free objects in this slab. May cause
672bba3a 715 * another error because the object count is now wrong.
81819f0f
CL
716 */
717 set_freepointer(s, p, NULL);
718 return 0;
719 }
720 return 1;
721}
722
723static int check_slab(struct kmem_cache *s, struct page *page)
724{
725 VM_BUG_ON(!irqs_disabled());
726
727 if (!PageSlab(page)) {
24922684 728 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
729 return 0;
730 }
81819f0f 731 if (page->inuse > s->objects) {
24922684
CL
732 slab_err(s, page, "inuse %u > max %u",
733 s->name, page->inuse, s->objects);
81819f0f
CL
734 return 0;
735 }
736 /* Slab_pad_check fixes things up after itself */
737 slab_pad_check(s, page);
738 return 1;
739}
740
741/*
672bba3a
CL
742 * Determine if a certain object on a page is on the freelist. Must hold the
743 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
744 */
745static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
746{
747 int nr = 0;
748 void *fp = page->freelist;
749 void *object = NULL;
750
751 while (fp && nr <= s->objects) {
752 if (fp == search)
753 return 1;
754 if (!check_valid_pointer(s, page, fp)) {
755 if (object) {
756 object_err(s, page, object,
757 "Freechain corrupt");
758 set_freepointer(s, object, NULL);
759 break;
760 } else {
24922684 761 slab_err(s, page, "Freepointer corrupt");
81819f0f
CL
762 page->freelist = NULL;
763 page->inuse = s->objects;
24922684 764 slab_fix(s, "Freelist cleared");
81819f0f
CL
765 return 0;
766 }
767 break;
768 }
769 object = fp;
770 fp = get_freepointer(s, object);
771 nr++;
772 }
773
774 if (page->inuse != s->objects - nr) {
70d71228 775 slab_err(s, page, "Wrong object count. Counter is %d but "
24922684 776 "counted were %d", page->inuse, s->objects - nr);
81819f0f 777 page->inuse = s->objects - nr;
24922684 778 slab_fix(s, "Object count adjusted.");
81819f0f
CL
779 }
780 return search == NULL;
781}
782
3ec09742
CL
783static void trace(struct kmem_cache *s, struct page *page, void *object, int alloc)
784{
785 if (s->flags & SLAB_TRACE) {
786 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
787 s->name,
788 alloc ? "alloc" : "free",
789 object, page->inuse,
790 page->freelist);
791
792 if (!alloc)
793 print_section("Object", (void *)object, s->objsize);
794
795 dump_stack();
796 }
797}
798
643b1138 799/*
672bba3a 800 * Tracking of fully allocated slabs for debugging purposes.
643b1138 801 */
e95eed57 802static void add_full(struct kmem_cache_node *n, struct page *page)
643b1138 803{
643b1138
CL
804 spin_lock(&n->list_lock);
805 list_add(&page->lru, &n->full);
806 spin_unlock(&n->list_lock);
807}
808
809static void remove_full(struct kmem_cache *s, struct page *page)
810{
811 struct kmem_cache_node *n;
812
813 if (!(s->flags & SLAB_STORE_USER))
814 return;
815
816 n = get_node(s, page_to_nid(page));
817
818 spin_lock(&n->list_lock);
819 list_del(&page->lru);
820 spin_unlock(&n->list_lock);
821}
822
3ec09742
CL
823static void setup_object_debug(struct kmem_cache *s, struct page *page,
824 void *object)
825{
826 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
827 return;
828
829 init_object(s, object, 0);
830 init_tracking(s, object);
831}
832
833static int alloc_debug_processing(struct kmem_cache *s, struct page *page,
834 void *object, void *addr)
81819f0f
CL
835{
836 if (!check_slab(s, page))
837 goto bad;
838
839 if (object && !on_freelist(s, page, object)) {
24922684 840 object_err(s, page, object, "Object already allocated");
70d71228 841 goto bad;
81819f0f
CL
842 }
843
844 if (!check_valid_pointer(s, page, object)) {
845 object_err(s, page, object, "Freelist Pointer check fails");
70d71228 846 goto bad;
81819f0f
CL
847 }
848
3ec09742 849 if (object && !check_object(s, page, object, 0))
81819f0f 850 goto bad;
81819f0f 851
3ec09742
CL
852 /* Success perform special debug activities for allocs */
853 if (s->flags & SLAB_STORE_USER)
854 set_track(s, object, TRACK_ALLOC, addr);
855 trace(s, page, object, 1);
856 init_object(s, object, 1);
81819f0f 857 return 1;
3ec09742 858
81819f0f
CL
859bad:
860 if (PageSlab(page)) {
861 /*
862 * If this is a slab page then lets do the best we can
863 * to avoid issues in the future. Marking all objects
672bba3a 864 * as used avoids touching the remaining objects.
81819f0f 865 */
24922684 866 slab_fix(s, "Marking all objects used");
81819f0f
CL
867 page->inuse = s->objects;
868 page->freelist = NULL;
81819f0f
CL
869 }
870 return 0;
871}
872
3ec09742
CL
873static int free_debug_processing(struct kmem_cache *s, struct page *page,
874 void *object, void *addr)
81819f0f
CL
875{
876 if (!check_slab(s, page))
877 goto fail;
878
879 if (!check_valid_pointer(s, page, object)) {
70d71228 880 slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0f
CL
881 goto fail;
882 }
883
884 if (on_freelist(s, page, object)) {
24922684 885 object_err(s, page, object, "Object already free");
81819f0f
CL
886 goto fail;
887 }
888
889 if (!check_object(s, page, object, 1))
890 return 0;
891
892 if (unlikely(s != page->slab)) {
893 if (!PageSlab(page))
70d71228
CL
894 slab_err(s, page, "Attempt to free object(0x%p) "
895 "outside of slab", object);
81819f0f 896 else
70d71228 897 if (!page->slab) {
81819f0f 898 printk(KERN_ERR
70d71228 899 "SLUB <none>: no slab for object 0x%p.\n",
81819f0f 900 object);
70d71228
CL
901 dump_stack();
902 }
81819f0f 903 else
24922684
CL
904 object_err(s, page, object,
905 "page slab pointer corrupt.");
81819f0f
CL
906 goto fail;
907 }
3ec09742
CL
908
909 /* Special debug activities for freeing objects */
910 if (!SlabFrozen(page) && !page->freelist)
911 remove_full(s, page);
912 if (s->flags & SLAB_STORE_USER)
913 set_track(s, object, TRACK_FREE, addr);
914 trace(s, page, object, 0);
915 init_object(s, object, 0);
81819f0f 916 return 1;
3ec09742 917
81819f0f 918fail:
24922684 919 slab_fix(s, "Object at 0x%p not freed", object);
81819f0f
CL
920 return 0;
921}
922
41ecc55b
CL
923static int __init setup_slub_debug(char *str)
924{
f0630fff
CL
925 slub_debug = DEBUG_DEFAULT_FLAGS;
926 if (*str++ != '=' || !*str)
927 /*
928 * No options specified. Switch on full debugging.
929 */
930 goto out;
931
932 if (*str == ',')
933 /*
934 * No options but restriction on slabs. This means full
935 * debugging for slabs matching a pattern.
936 */
937 goto check_slabs;
938
939 slub_debug = 0;
940 if (*str == '-')
941 /*
942 * Switch off all debugging measures.
943 */
944 goto out;
945
946 /*
947 * Determine which debug features should be switched on
948 */
949 for ( ;*str && *str != ','; str++) {
950 switch (tolower(*str)) {
951 case 'f':
952 slub_debug |= SLAB_DEBUG_FREE;
953 break;
954 case 'z':
955 slub_debug |= SLAB_RED_ZONE;
956 break;
957 case 'p':
958 slub_debug |= SLAB_POISON;
959 break;
960 case 'u':
961 slub_debug |= SLAB_STORE_USER;
962 break;
963 case 't':
964 slub_debug |= SLAB_TRACE;
965 break;
966 default:
967 printk(KERN_ERR "slub_debug option '%c' "
968 "unknown. skipped\n",*str);
969 }
41ecc55b
CL
970 }
971
f0630fff 972check_slabs:
41ecc55b
CL
973 if (*str == ',')
974 slub_debug_slabs = str + 1;
f0630fff 975out:
41ecc55b
CL
976 return 1;
977}
978
979__setup("slub_debug", setup_slub_debug);
980
ba0268a8
CL
981static unsigned long kmem_cache_flags(unsigned long objsize,
982 unsigned long flags, const char *name,
983 void (*ctor)(void *, struct kmem_cache *, unsigned long))
41ecc55b
CL
984{
985 /*
986 * The page->offset field is only 16 bit wide. This is an offset
987 * in units of words from the beginning of an object. If the slab
988 * size is bigger then we cannot move the free pointer behind the
989 * object anymore.
990 *
991 * On 32 bit platforms the limit is 256k. On 64bit platforms
992 * the limit is 512k.
993 *
c59def9f 994 * Debugging or ctor may create a need to move the free
41ecc55b
CL
995 * pointer. Fail if this happens.
996 */
ba0268a8
CL
997 if (objsize >= 65535 * sizeof(void *)) {
998 BUG_ON(flags & (SLAB_RED_ZONE | SLAB_POISON |
41ecc55b 999 SLAB_STORE_USER | SLAB_DESTROY_BY_RCU));
ba0268a8
CL
1000 BUG_ON(ctor);
1001 } else {
41ecc55b
CL
1002 /*
1003 * Enable debugging if selected on the kernel commandline.
1004 */
1005 if (slub_debug && (!slub_debug_slabs ||
ba0268a8 1006 strncmp(slub_debug_slabs, name,
41ecc55b 1007 strlen(slub_debug_slabs)) == 0))
ba0268a8
CL
1008 flags |= slub_debug;
1009 }
1010
1011 return flags;
41ecc55b
CL
1012}
1013#else
3ec09742
CL
1014static inline void setup_object_debug(struct kmem_cache *s,
1015 struct page *page, void *object) {}
41ecc55b 1016
3ec09742
CL
1017static inline int alloc_debug_processing(struct kmem_cache *s,
1018 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1019
3ec09742
CL
1020static inline int free_debug_processing(struct kmem_cache *s,
1021 struct page *page, void *object, void *addr) { return 0; }
41ecc55b 1022
41ecc55b
CL
1023static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1024 { return 1; }
1025static inline int check_object(struct kmem_cache *s, struct page *page,
1026 void *object, int active) { return 1; }
3ec09742 1027static inline void add_full(struct kmem_cache_node *n, struct page *page) {}
ba0268a8
CL
1028static inline unsigned long kmem_cache_flags(unsigned long objsize,
1029 unsigned long flags, const char *name,
1030 void (*ctor)(void *, struct kmem_cache *, unsigned long))
1031{
1032 return flags;
1033}
41ecc55b
CL
1034#define slub_debug 0
1035#endif
81819f0f
CL
1036/*
1037 * Slab allocation and freeing
1038 */
1039static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1040{
1041 struct page * page;
1042 int pages = 1 << s->order;
1043
1044 if (s->order)
1045 flags |= __GFP_COMP;
1046
1047 if (s->flags & SLAB_CACHE_DMA)
1048 flags |= SLUB_DMA;
1049
e12ba74d
MG
1050 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1051 flags |= __GFP_RECLAIMABLE;
1052
81819f0f
CL
1053 if (node == -1)
1054 page = alloc_pages(flags, s->order);
1055 else
1056 page = alloc_pages_node(node, flags, s->order);
1057
1058 if (!page)
1059 return NULL;
1060
1061 mod_zone_page_state(page_zone(page),
1062 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1063 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1064 pages);
1065
1066 return page;
1067}
1068
1069static void setup_object(struct kmem_cache *s, struct page *page,
1070 void *object)
1071{
3ec09742 1072 setup_object_debug(s, page, object);
4f104934 1073 if (unlikely(s->ctor))
a35afb83 1074 s->ctor(object, s, 0);
81819f0f
CL
1075}
1076
1077static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1078{
1079 struct page *page;
1080 struct kmem_cache_node *n;
1081 void *start;
1082 void *end;
1083 void *last;
1084 void *p;
1085
6cb06229 1086 BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0f
CL
1087
1088 if (flags & __GFP_WAIT)
1089 local_irq_enable();
1090
6cb06229
CL
1091 page = allocate_slab(s,
1092 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0f
CL
1093 if (!page)
1094 goto out;
1095
1096 n = get_node(s, page_to_nid(page));
1097 if (n)
1098 atomic_long_inc(&n->nr_slabs);
81819f0f
CL
1099 page->slab = s;
1100 page->flags |= 1 << PG_slab;
1101 if (s->flags & (SLAB_DEBUG_FREE | SLAB_RED_ZONE | SLAB_POISON |
1102 SLAB_STORE_USER | SLAB_TRACE))
35e5d7ee 1103 SetSlabDebug(page);
81819f0f
CL
1104
1105 start = page_address(page);
1106 end = start + s->objects * s->size;
1107
1108 if (unlikely(s->flags & SLAB_POISON))
1109 memset(start, POISON_INUSE, PAGE_SIZE << s->order);
1110
1111 last = start;
7656c72b 1112 for_each_object(p, s, start) {
81819f0f
CL
1113 setup_object(s, page, last);
1114 set_freepointer(s, last, p);
1115 last = p;
1116 }
1117 setup_object(s, page, last);
1118 set_freepointer(s, last, NULL);
1119
1120 page->freelist = start;
1121 page->inuse = 0;
1122out:
1123 if (flags & __GFP_WAIT)
1124 local_irq_disable();
1125 return page;
1126}
1127
1128static void __free_slab(struct kmem_cache *s, struct page *page)
1129{
1130 int pages = 1 << s->order;
1131
c59def9f 1132 if (unlikely(SlabDebug(page))) {
81819f0f
CL
1133 void *p;
1134
1135 slab_pad_check(s, page);
c59def9f 1136 for_each_object(p, s, page_address(page))
81819f0f 1137 check_object(s, page, p, 0);
2208b764 1138 ClearSlabDebug(page);
81819f0f
CL
1139 }
1140
1141 mod_zone_page_state(page_zone(page),
1142 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1143 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
1144 - pages);
1145
81819f0f
CL
1146 __free_pages(page, s->order);
1147}
1148
1149static void rcu_free_slab(struct rcu_head *h)
1150{
1151 struct page *page;
1152
1153 page = container_of((struct list_head *)h, struct page, lru);
1154 __free_slab(page->slab, page);
1155}
1156
1157static void free_slab(struct kmem_cache *s, struct page *page)
1158{
1159 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
1160 /*
1161 * RCU free overloads the RCU head over the LRU
1162 */
1163 struct rcu_head *head = (void *)&page->lru;
1164
1165 call_rcu(head, rcu_free_slab);
1166 } else
1167 __free_slab(s, page);
1168}
1169
1170static void discard_slab(struct kmem_cache *s, struct page *page)
1171{
1172 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1173
1174 atomic_long_dec(&n->nr_slabs);
1175 reset_page_mapcount(page);
35e5d7ee 1176 __ClearPageSlab(page);
81819f0f
CL
1177 free_slab(s, page);
1178}
1179
1180/*
1181 * Per slab locking using the pagelock
1182 */
1183static __always_inline void slab_lock(struct page *page)
1184{
1185 bit_spin_lock(PG_locked, &page->flags);
1186}
1187
1188static __always_inline void slab_unlock(struct page *page)
1189{
1190 bit_spin_unlock(PG_locked, &page->flags);
1191}
1192
1193static __always_inline int slab_trylock(struct page *page)
1194{
1195 int rc = 1;
1196
1197 rc = bit_spin_trylock(PG_locked, &page->flags);
1198 return rc;
1199}
1200
1201/*
1202 * Management of partially allocated slabs
1203 */
e95eed57 1204static void add_partial_tail(struct kmem_cache_node *n, struct page *page)
81819f0f 1205{
e95eed57
CL
1206 spin_lock(&n->list_lock);
1207 n->nr_partial++;
1208 list_add_tail(&page->lru, &n->partial);
1209 spin_unlock(&n->list_lock);
1210}
81819f0f 1211
e95eed57
CL
1212static void add_partial(struct kmem_cache_node *n, struct page *page)
1213{
81819f0f
CL
1214 spin_lock(&n->list_lock);
1215 n->nr_partial++;
1216 list_add(&page->lru, &n->partial);
1217 spin_unlock(&n->list_lock);
1218}
1219
1220static void remove_partial(struct kmem_cache *s,
1221 struct page *page)
1222{
1223 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1224
1225 spin_lock(&n->list_lock);
1226 list_del(&page->lru);
1227 n->nr_partial--;
1228 spin_unlock(&n->list_lock);
1229}
1230
1231/*
672bba3a 1232 * Lock slab and remove from the partial list.
81819f0f 1233 *
672bba3a 1234 * Must hold list_lock.
81819f0f 1235 */
4b6f0750 1236static inline int lock_and_freeze_slab(struct kmem_cache_node *n, struct page *page)
81819f0f
CL
1237{
1238 if (slab_trylock(page)) {
1239 list_del(&page->lru);
1240 n->nr_partial--;
4b6f0750 1241 SetSlabFrozen(page);
81819f0f
CL
1242 return 1;
1243 }
1244 return 0;
1245}
1246
1247/*
672bba3a 1248 * Try to allocate a partial slab from a specific node.
81819f0f
CL
1249 */
1250static struct page *get_partial_node(struct kmem_cache_node *n)
1251{
1252 struct page *page;
1253
1254 /*
1255 * Racy check. If we mistakenly see no partial slabs then we
1256 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1257 * partial slab and there is none available then get_partials()
1258 * will return NULL.
81819f0f
CL
1259 */
1260 if (!n || !n->nr_partial)
1261 return NULL;
1262
1263 spin_lock(&n->list_lock);
1264 list_for_each_entry(page, &n->partial, lru)
4b6f0750 1265 if (lock_and_freeze_slab(n, page))
81819f0f
CL
1266 goto out;
1267 page = NULL;
1268out:
1269 spin_unlock(&n->list_lock);
1270 return page;
1271}
1272
1273/*
672bba3a 1274 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f
CL
1275 */
1276static struct page *get_any_partial(struct kmem_cache *s, gfp_t flags)
1277{
1278#ifdef CONFIG_NUMA
1279 struct zonelist *zonelist;
1280 struct zone **z;
1281 struct page *page;
1282
1283 /*
672bba3a
CL
1284 * The defrag ratio allows a configuration of the tradeoffs between
1285 * inter node defragmentation and node local allocations. A lower
1286 * defrag_ratio increases the tendency to do local allocations
1287 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1288 *
672bba3a
CL
1289 * If the defrag_ratio is set to 0 then kmalloc() always
1290 * returns node local objects. If the ratio is higher then kmalloc()
1291 * may return off node objects because partial slabs are obtained
1292 * from other nodes and filled up.
81819f0f
CL
1293 *
1294 * If /sys/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a
CL
1295 * defrag_ratio = 1000) then every (well almost) allocation will
1296 * first attempt to defrag slab caches on other nodes. This means
1297 * scanning over all nodes to look for partial slabs which may be
1298 * expensive if we do it every time we are trying to find a slab
1299 * with available objects.
81819f0f
CL
1300 */
1301 if (!s->defrag_ratio || get_cycles() % 1024 > s->defrag_ratio)
1302 return NULL;
1303
1304 zonelist = &NODE_DATA(slab_node(current->mempolicy))
1305 ->node_zonelists[gfp_zone(flags)];
1306 for (z = zonelist->zones; *z; z++) {
1307 struct kmem_cache_node *n;
1308
1309 n = get_node(s, zone_to_nid(*z));
1310
1311 if (n && cpuset_zone_allowed_hardwall(*z, flags) &&
e95eed57 1312 n->nr_partial > MIN_PARTIAL) {
81819f0f
CL
1313 page = get_partial_node(n);
1314 if (page)
1315 return page;
1316 }
1317 }
1318#endif
1319 return NULL;
1320}
1321
1322/*
1323 * Get a partial page, lock it and return it.
1324 */
1325static struct page *get_partial(struct kmem_cache *s, gfp_t flags, int node)
1326{
1327 struct page *page;
1328 int searchnode = (node == -1) ? numa_node_id() : node;
1329
1330 page = get_partial_node(get_node(s, searchnode));
1331 if (page || (flags & __GFP_THISNODE))
1332 return page;
1333
1334 return get_any_partial(s, flags);
1335}
1336
1337/*
1338 * Move a page back to the lists.
1339 *
1340 * Must be called with the slab lock held.
1341 *
1342 * On exit the slab lock will have been dropped.
1343 */
4b6f0750 1344static void unfreeze_slab(struct kmem_cache *s, struct page *page)
81819f0f 1345{
e95eed57
CL
1346 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1347
4b6f0750 1348 ClearSlabFrozen(page);
81819f0f 1349 if (page->inuse) {
e95eed57 1350
81819f0f 1351 if (page->freelist)
e95eed57 1352 add_partial(n, page);
35e5d7ee 1353 else if (SlabDebug(page) && (s->flags & SLAB_STORE_USER))
e95eed57 1354 add_full(n, page);
81819f0f 1355 slab_unlock(page);
e95eed57 1356
81819f0f 1357 } else {
e95eed57
CL
1358 if (n->nr_partial < MIN_PARTIAL) {
1359 /*
672bba3a
CL
1360 * Adding an empty slab to the partial slabs in order
1361 * to avoid page allocator overhead. This slab needs
1362 * to come after the other slabs with objects in
1363 * order to fill them up. That way the size of the
1364 * partial list stays small. kmem_cache_shrink can
1365 * reclaim empty slabs from the partial list.
e95eed57
CL
1366 */
1367 add_partial_tail(n, page);
1368 slab_unlock(page);
1369 } else {
1370 slab_unlock(page);
1371 discard_slab(s, page);
1372 }
81819f0f
CL
1373 }
1374}
1375
1376/*
1377 * Remove the cpu slab
1378 */
dfb4f096 1379static void deactivate_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1380{
dfb4f096 1381 struct page *page = c->page;
894b8788
CL
1382 /*
1383 * Merge cpu freelist into freelist. Typically we get here
1384 * because both freelists are empty. So this is unlikely
1385 * to occur.
1386 */
dfb4f096 1387 while (unlikely(c->freelist)) {
894b8788
CL
1388 void **object;
1389
1390 /* Retrieve object from cpu_freelist */
dfb4f096 1391 object = c->freelist;
b3fba8da 1392 c->freelist = c->freelist[c->offset];
894b8788
CL
1393
1394 /* And put onto the regular freelist */
b3fba8da 1395 object[c->offset] = page->freelist;
894b8788
CL
1396 page->freelist = object;
1397 page->inuse--;
1398 }
dfb4f096 1399 c->page = NULL;
4b6f0750 1400 unfreeze_slab(s, page);
81819f0f
CL
1401}
1402
dfb4f096 1403static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 1404{
dfb4f096
CL
1405 slab_lock(c->page);
1406 deactivate_slab(s, c);
81819f0f
CL
1407}
1408
1409/*
1410 * Flush cpu slab.
1411 * Called from IPI handler with interrupts disabled.
1412 */
0c710013 1413static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 1414{
dfb4f096 1415 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
81819f0f 1416
dfb4f096
CL
1417 if (likely(c && c->page))
1418 flush_slab(s, c);
81819f0f
CL
1419}
1420
1421static void flush_cpu_slab(void *d)
1422{
1423 struct kmem_cache *s = d;
81819f0f 1424
dfb4f096 1425 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
1426}
1427
1428static void flush_all(struct kmem_cache *s)
1429{
1430#ifdef CONFIG_SMP
1431 on_each_cpu(flush_cpu_slab, s, 1, 1);
1432#else
1433 unsigned long flags;
1434
1435 local_irq_save(flags);
1436 flush_cpu_slab(s);
1437 local_irq_restore(flags);
1438#endif
1439}
1440
dfb4f096
CL
1441/*
1442 * Check if the objects in a per cpu structure fit numa
1443 * locality expectations.
1444 */
1445static inline int node_match(struct kmem_cache_cpu *c, int node)
1446{
1447#ifdef CONFIG_NUMA
1448 if (node != -1 && c->node != node)
1449 return 0;
1450#endif
1451 return 1;
1452}
1453
81819f0f 1454/*
894b8788
CL
1455 * Slow path. The lockless freelist is empty or we need to perform
1456 * debugging duties.
1457 *
1458 * Interrupts are disabled.
81819f0f 1459 *
894b8788
CL
1460 * Processing is still very fast if new objects have been freed to the
1461 * regular freelist. In that case we simply take over the regular freelist
1462 * as the lockless freelist and zap the regular freelist.
81819f0f 1463 *
894b8788
CL
1464 * If that is not working then we fall back to the partial lists. We take the
1465 * first element of the freelist as the object to allocate now and move the
1466 * rest of the freelist to the lockless freelist.
81819f0f 1467 *
894b8788
CL
1468 * And if we were unable to get a new slab from the partial slab lists then
1469 * we need to allocate a new slab. This is slowest path since we may sleep.
81819f0f 1470 */
894b8788 1471static void *__slab_alloc(struct kmem_cache *s,
dfb4f096 1472 gfp_t gfpflags, int node, void *addr, struct kmem_cache_cpu *c)
81819f0f 1473{
81819f0f 1474 void **object;
dfb4f096 1475 struct page *new;
81819f0f 1476
dfb4f096 1477 if (!c->page)
81819f0f
CL
1478 goto new_slab;
1479
dfb4f096
CL
1480 slab_lock(c->page);
1481 if (unlikely(!node_match(c, node)))
81819f0f 1482 goto another_slab;
894b8788 1483load_freelist:
dfb4f096 1484 object = c->page->freelist;
81819f0f
CL
1485 if (unlikely(!object))
1486 goto another_slab;
dfb4f096 1487 if (unlikely(SlabDebug(c->page)))
81819f0f
CL
1488 goto debug;
1489
dfb4f096 1490 object = c->page->freelist;
b3fba8da 1491 c->freelist = object[c->offset];
dfb4f096
CL
1492 c->page->inuse = s->objects;
1493 c->page->freelist = NULL;
1494 c->node = page_to_nid(c->page);
1495 slab_unlock(c->page);
81819f0f
CL
1496 return object;
1497
1498another_slab:
dfb4f096 1499 deactivate_slab(s, c);
81819f0f
CL
1500
1501new_slab:
dfb4f096
CL
1502 new = get_partial(s, gfpflags, node);
1503 if (new) {
1504 c->page = new;
894b8788 1505 goto load_freelist;
81819f0f
CL
1506 }
1507
dfb4f096
CL
1508 new = new_slab(s, gfpflags, node);
1509 if (new) {
1510 c = get_cpu_slab(s, smp_processor_id());
1511 if (c->page) {
81819f0f 1512 /*
672bba3a
CL
1513 * Someone else populated the cpu_slab while we
1514 * enabled interrupts, or we have gotten scheduled
1515 * on another cpu. The page may not be on the
1516 * requested node even if __GFP_THISNODE was
1517 * specified. So we need to recheck.
81819f0f 1518 */
dfb4f096 1519 if (node_match(c, node)) {
81819f0f
CL
1520 /*
1521 * Current cpuslab is acceptable and we
1522 * want the current one since its cache hot
1523 */
dfb4f096
CL
1524 discard_slab(s, new);
1525 slab_lock(c->page);
894b8788 1526 goto load_freelist;
81819f0f 1527 }
672bba3a 1528 /* New slab does not fit our expectations */
dfb4f096 1529 flush_slab(s, c);
81819f0f 1530 }
dfb4f096
CL
1531 slab_lock(new);
1532 SetSlabFrozen(new);
1533 c->page = new;
4b6f0750 1534 goto load_freelist;
81819f0f 1535 }
81819f0f
CL
1536 return NULL;
1537debug:
dfb4f096
CL
1538 object = c->page->freelist;
1539 if (!alloc_debug_processing(s, c->page, object, addr))
81819f0f 1540 goto another_slab;
894b8788 1541
dfb4f096 1542 c->page->inuse++;
b3fba8da 1543 c->page->freelist = object[c->offset];
ee3c72a1 1544 c->node = -1;
dfb4f096 1545 slab_unlock(c->page);
894b8788
CL
1546 return object;
1547}
1548
1549/*
1550 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
1551 * have the fastpath folded into their functions. So no function call
1552 * overhead for requests that can be satisfied on the fastpath.
1553 *
1554 * The fastpath works by first checking if the lockless freelist can be used.
1555 * If not then __slab_alloc is called for slow processing.
1556 *
1557 * Otherwise we can simply pick the next object from the lockless free list.
1558 */
1559static void __always_inline *slab_alloc(struct kmem_cache *s,
ce15fea8 1560 gfp_t gfpflags, int node, void *addr)
894b8788 1561{
894b8788
CL
1562 void **object;
1563 unsigned long flags;
dfb4f096 1564 struct kmem_cache_cpu *c;
894b8788
CL
1565
1566 local_irq_save(flags);
dfb4f096 1567 c = get_cpu_slab(s, smp_processor_id());
ee3c72a1 1568 if (unlikely(!c->freelist || !node_match(c, node)))
894b8788 1569
dfb4f096 1570 object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788
CL
1571
1572 else {
dfb4f096 1573 object = c->freelist;
b3fba8da 1574 c->freelist = object[c->offset];
894b8788
CL
1575 }
1576 local_irq_restore(flags);
d07dbea4
CL
1577
1578 if (unlikely((gfpflags & __GFP_ZERO) && object))
ce15fea8 1579 memset(object, 0, s->objsize);
d07dbea4 1580
894b8788 1581 return object;
81819f0f
CL
1582}
1583
1584void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
1585{
ce15fea8 1586 return slab_alloc(s, gfpflags, -1, __builtin_return_address(0));
81819f0f
CL
1587}
1588EXPORT_SYMBOL(kmem_cache_alloc);
1589
1590#ifdef CONFIG_NUMA
1591void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
1592{
ce15fea8 1593 return slab_alloc(s, gfpflags, node, __builtin_return_address(0));
81819f0f
CL
1594}
1595EXPORT_SYMBOL(kmem_cache_alloc_node);
1596#endif
1597
1598/*
894b8788
CL
1599 * Slow patch handling. This may still be called frequently since objects
1600 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 1601 *
894b8788
CL
1602 * So we still attempt to reduce cache line usage. Just take the slab
1603 * lock and free the item. If there is no additional partial page
1604 * handling required then we can return immediately.
81819f0f 1605 */
894b8788 1606static void __slab_free(struct kmem_cache *s, struct page *page,
b3fba8da 1607 void *x, void *addr, unsigned int offset)
81819f0f
CL
1608{
1609 void *prior;
1610 void **object = (void *)x;
81819f0f 1611
81819f0f
CL
1612 slab_lock(page);
1613
35e5d7ee 1614 if (unlikely(SlabDebug(page)))
81819f0f
CL
1615 goto debug;
1616checks_ok:
b3fba8da 1617 prior = object[offset] = page->freelist;
81819f0f
CL
1618 page->freelist = object;
1619 page->inuse--;
1620
4b6f0750 1621 if (unlikely(SlabFrozen(page)))
81819f0f
CL
1622 goto out_unlock;
1623
1624 if (unlikely(!page->inuse))
1625 goto slab_empty;
1626
1627 /*
1628 * Objects left in the slab. If it
1629 * was not on the partial list before
1630 * then add it.
1631 */
1632 if (unlikely(!prior))
e95eed57 1633 add_partial(get_node(s, page_to_nid(page)), page);
81819f0f
CL
1634
1635out_unlock:
1636 slab_unlock(page);
81819f0f
CL
1637 return;
1638
1639slab_empty:
1640 if (prior)
1641 /*
672bba3a 1642 * Slab still on the partial list.
81819f0f
CL
1643 */
1644 remove_partial(s, page);
1645
1646 slab_unlock(page);
1647 discard_slab(s, page);
81819f0f
CL
1648 return;
1649
1650debug:
3ec09742 1651 if (!free_debug_processing(s, page, x, addr))
77c5e2d0 1652 goto out_unlock;
77c5e2d0 1653 goto checks_ok;
81819f0f
CL
1654}
1655
894b8788
CL
1656/*
1657 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
1658 * can perform fastpath freeing without additional function calls.
1659 *
1660 * The fastpath is only possible if we are freeing to the current cpu slab
1661 * of this processor. This typically the case if we have just allocated
1662 * the item before.
1663 *
1664 * If fastpath is not possible then fall back to __slab_free where we deal
1665 * with all sorts of special processing.
1666 */
1667static void __always_inline slab_free(struct kmem_cache *s,
1668 struct page *page, void *x, void *addr)
1669{
1670 void **object = (void *)x;
1671 unsigned long flags;
dfb4f096 1672 struct kmem_cache_cpu *c;
894b8788
CL
1673
1674 local_irq_save(flags);
02febdf7 1675 debug_check_no_locks_freed(object, s->objsize);
dfb4f096 1676 c = get_cpu_slab(s, smp_processor_id());
ee3c72a1 1677 if (likely(page == c->page && c->node >= 0)) {
b3fba8da 1678 object[c->offset] = c->freelist;
dfb4f096 1679 c->freelist = object;
894b8788 1680 } else
b3fba8da 1681 __slab_free(s, page, x, addr, c->offset);
894b8788
CL
1682
1683 local_irq_restore(flags);
1684}
1685
81819f0f
CL
1686void kmem_cache_free(struct kmem_cache *s, void *x)
1687{
77c5e2d0 1688 struct page *page;
81819f0f 1689
b49af68f 1690 page = virt_to_head_page(x);
81819f0f 1691
77c5e2d0 1692 slab_free(s, page, x, __builtin_return_address(0));
81819f0f
CL
1693}
1694EXPORT_SYMBOL(kmem_cache_free);
1695
1696/* Figure out on which slab object the object resides */
1697static struct page *get_object_page(const void *x)
1698{
b49af68f 1699 struct page *page = virt_to_head_page(x);
81819f0f
CL
1700
1701 if (!PageSlab(page))
1702 return NULL;
1703
1704 return page;
1705}
1706
1707/*
672bba3a
CL
1708 * Object placement in a slab is made very easy because we always start at
1709 * offset 0. If we tune the size of the object to the alignment then we can
1710 * get the required alignment by putting one properly sized object after
1711 * another.
81819f0f
CL
1712 *
1713 * Notice that the allocation order determines the sizes of the per cpu
1714 * caches. Each processor has always one slab available for allocations.
1715 * Increasing the allocation order reduces the number of times that slabs
672bba3a 1716 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 1717 * locking overhead.
81819f0f
CL
1718 */
1719
1720/*
1721 * Mininum / Maximum order of slab pages. This influences locking overhead
1722 * and slab fragmentation. A higher order reduces the number of partial slabs
1723 * and increases the number of allocations possible without having to
1724 * take the list_lock.
1725 */
1726static int slub_min_order;
1727static int slub_max_order = DEFAULT_MAX_ORDER;
81819f0f
CL
1728static int slub_min_objects = DEFAULT_MIN_OBJECTS;
1729
1730/*
1731 * Merge control. If this is set then no merging of slab caches will occur.
672bba3a 1732 * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0f
CL
1733 */
1734static int slub_nomerge;
1735
81819f0f
CL
1736/*
1737 * Calculate the order of allocation given an slab object size.
1738 *
672bba3a
CL
1739 * The order of allocation has significant impact on performance and other
1740 * system components. Generally order 0 allocations should be preferred since
1741 * order 0 does not cause fragmentation in the page allocator. Larger objects
1742 * be problematic to put into order 0 slabs because there may be too much
1743 * unused space left. We go to a higher order if more than 1/8th of the slab
1744 * would be wasted.
1745 *
1746 * In order to reach satisfactory performance we must ensure that a minimum
1747 * number of objects is in one slab. Otherwise we may generate too much
1748 * activity on the partial lists which requires taking the list_lock. This is
1749 * less a concern for large slabs though which are rarely used.
81819f0f 1750 *
672bba3a
CL
1751 * slub_max_order specifies the order where we begin to stop considering the
1752 * number of objects in a slab as critical. If we reach slub_max_order then
1753 * we try to keep the page order as low as possible. So we accept more waste
1754 * of space in favor of a small page order.
81819f0f 1755 *
672bba3a
CL
1756 * Higher order allocations also allow the placement of more objects in a
1757 * slab and thereby reduce object handling overhead. If the user has
1758 * requested a higher mininum order then we start with that one instead of
1759 * the smallest order which will fit the object.
81819f0f 1760 */
5e6d444e
CL
1761static inline int slab_order(int size, int min_objects,
1762 int max_order, int fract_leftover)
81819f0f
CL
1763{
1764 int order;
1765 int rem;
6300ea75 1766 int min_order = slub_min_order;
81819f0f 1767
6300ea75 1768 for (order = max(min_order,
5e6d444e
CL
1769 fls(min_objects * size - 1) - PAGE_SHIFT);
1770 order <= max_order; order++) {
81819f0f 1771
5e6d444e 1772 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 1773
5e6d444e 1774 if (slab_size < min_objects * size)
81819f0f
CL
1775 continue;
1776
1777 rem = slab_size % size;
1778
5e6d444e 1779 if (rem <= slab_size / fract_leftover)
81819f0f
CL
1780 break;
1781
1782 }
672bba3a 1783
81819f0f
CL
1784 return order;
1785}
1786
5e6d444e
CL
1787static inline int calculate_order(int size)
1788{
1789 int order;
1790 int min_objects;
1791 int fraction;
1792
1793 /*
1794 * Attempt to find best configuration for a slab. This
1795 * works by first attempting to generate a layout with
1796 * the best configuration and backing off gradually.
1797 *
1798 * First we reduce the acceptable waste in a slab. Then
1799 * we reduce the minimum objects required in a slab.
1800 */
1801 min_objects = slub_min_objects;
1802 while (min_objects > 1) {
1803 fraction = 8;
1804 while (fraction >= 4) {
1805 order = slab_order(size, min_objects,
1806 slub_max_order, fraction);
1807 if (order <= slub_max_order)
1808 return order;
1809 fraction /= 2;
1810 }
1811 min_objects /= 2;
1812 }
1813
1814 /*
1815 * We were unable to place multiple objects in a slab. Now
1816 * lets see if we can place a single object there.
1817 */
1818 order = slab_order(size, 1, slub_max_order, 1);
1819 if (order <= slub_max_order)
1820 return order;
1821
1822 /*
1823 * Doh this slab cannot be placed using slub_max_order.
1824 */
1825 order = slab_order(size, 1, MAX_ORDER, 1);
1826 if (order <= MAX_ORDER)
1827 return order;
1828 return -ENOSYS;
1829}
1830
81819f0f 1831/*
672bba3a 1832 * Figure out what the alignment of the objects will be.
81819f0f
CL
1833 */
1834static unsigned long calculate_alignment(unsigned long flags,
1835 unsigned long align, unsigned long size)
1836{
1837 /*
1838 * If the user wants hardware cache aligned objects then
1839 * follow that suggestion if the object is sufficiently
1840 * large.
1841 *
1842 * The hardware cache alignment cannot override the
1843 * specified alignment though. If that is greater
1844 * then use it.
1845 */
5af60839 1846 if ((flags & SLAB_HWCACHE_ALIGN) &&
65c02d4c
CL
1847 size > cache_line_size() / 2)
1848 return max_t(unsigned long, align, cache_line_size());
81819f0f
CL
1849
1850 if (align < ARCH_SLAB_MINALIGN)
1851 return ARCH_SLAB_MINALIGN;
1852
1853 return ALIGN(align, sizeof(void *));
1854}
1855
dfb4f096
CL
1856static void init_kmem_cache_cpu(struct kmem_cache *s,
1857 struct kmem_cache_cpu *c)
1858{
1859 c->page = NULL;
1860 c->freelist = NULL;
b3fba8da 1861 c->offset = s->offset / sizeof(void *);
dfb4f096
CL
1862 c->node = 0;
1863}
1864
81819f0f
CL
1865static void init_kmem_cache_node(struct kmem_cache_node *n)
1866{
1867 n->nr_partial = 0;
1868 atomic_long_set(&n->nr_slabs, 0);
1869 spin_lock_init(&n->list_lock);
1870 INIT_LIST_HEAD(&n->partial);
8ab1372f 1871#ifdef CONFIG_SLUB_DEBUG
643b1138 1872 INIT_LIST_HEAD(&n->full);
8ab1372f 1873#endif
81819f0f
CL
1874}
1875
4c93c355
CL
1876#ifdef CONFIG_SMP
1877/*
1878 * Per cpu array for per cpu structures.
1879 *
1880 * The per cpu array places all kmem_cache_cpu structures from one processor
1881 * close together meaning that it becomes possible that multiple per cpu
1882 * structures are contained in one cacheline. This may be particularly
1883 * beneficial for the kmalloc caches.
1884 *
1885 * A desktop system typically has around 60-80 slabs. With 100 here we are
1886 * likely able to get per cpu structures for all caches from the array defined
1887 * here. We must be able to cover all kmalloc caches during bootstrap.
1888 *
1889 * If the per cpu array is exhausted then fall back to kmalloc
1890 * of individual cachelines. No sharing is possible then.
1891 */
1892#define NR_KMEM_CACHE_CPU 100
1893
1894static DEFINE_PER_CPU(struct kmem_cache_cpu,
1895 kmem_cache_cpu)[NR_KMEM_CACHE_CPU];
1896
1897static DEFINE_PER_CPU(struct kmem_cache_cpu *, kmem_cache_cpu_free);
1898static cpumask_t kmem_cach_cpu_free_init_once = CPU_MASK_NONE;
1899
1900static struct kmem_cache_cpu *alloc_kmem_cache_cpu(struct kmem_cache *s,
1901 int cpu, gfp_t flags)
1902{
1903 struct kmem_cache_cpu *c = per_cpu(kmem_cache_cpu_free, cpu);
1904
1905 if (c)
1906 per_cpu(kmem_cache_cpu_free, cpu) =
1907 (void *)c->freelist;
1908 else {
1909 /* Table overflow: So allocate ourselves */
1910 c = kmalloc_node(
1911 ALIGN(sizeof(struct kmem_cache_cpu), cache_line_size()),
1912 flags, cpu_to_node(cpu));
1913 if (!c)
1914 return NULL;
1915 }
1916
1917 init_kmem_cache_cpu(s, c);
1918 return c;
1919}
1920
1921static void free_kmem_cache_cpu(struct kmem_cache_cpu *c, int cpu)
1922{
1923 if (c < per_cpu(kmem_cache_cpu, cpu) ||
1924 c > per_cpu(kmem_cache_cpu, cpu) + NR_KMEM_CACHE_CPU) {
1925 kfree(c);
1926 return;
1927 }
1928 c->freelist = (void *)per_cpu(kmem_cache_cpu_free, cpu);
1929 per_cpu(kmem_cache_cpu_free, cpu) = c;
1930}
1931
1932static void free_kmem_cache_cpus(struct kmem_cache *s)
1933{
1934 int cpu;
1935
1936 for_each_online_cpu(cpu) {
1937 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1938
1939 if (c) {
1940 s->cpu_slab[cpu] = NULL;
1941 free_kmem_cache_cpu(c, cpu);
1942 }
1943 }
1944}
1945
1946static int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
1947{
1948 int cpu;
1949
1950 for_each_online_cpu(cpu) {
1951 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
1952
1953 if (c)
1954 continue;
1955
1956 c = alloc_kmem_cache_cpu(s, cpu, flags);
1957 if (!c) {
1958 free_kmem_cache_cpus(s);
1959 return 0;
1960 }
1961 s->cpu_slab[cpu] = c;
1962 }
1963 return 1;
1964}
1965
1966/*
1967 * Initialize the per cpu array.
1968 */
1969static void init_alloc_cpu_cpu(int cpu)
1970{
1971 int i;
1972
1973 if (cpu_isset(cpu, kmem_cach_cpu_free_init_once))
1974 return;
1975
1976 for (i = NR_KMEM_CACHE_CPU - 1; i >= 0; i--)
1977 free_kmem_cache_cpu(&per_cpu(kmem_cache_cpu, cpu)[i], cpu);
1978
1979 cpu_set(cpu, kmem_cach_cpu_free_init_once);
1980}
1981
1982static void __init init_alloc_cpu(void)
1983{
1984 int cpu;
1985
1986 for_each_online_cpu(cpu)
1987 init_alloc_cpu_cpu(cpu);
1988 }
1989
1990#else
1991static inline void free_kmem_cache_cpus(struct kmem_cache *s) {}
1992static inline void init_alloc_cpu(void) {}
1993
1994static inline int alloc_kmem_cache_cpus(struct kmem_cache *s, gfp_t flags)
1995{
1996 init_kmem_cache_cpu(s, &s->cpu_slab);
1997 return 1;
1998}
1999#endif
2000
81819f0f
CL
2001#ifdef CONFIG_NUMA
2002/*
2003 * No kmalloc_node yet so do it by hand. We know that this is the first
2004 * slab on the node for this slabcache. There are no concurrent accesses
2005 * possible.
2006 *
2007 * Note that this function only works on the kmalloc_node_cache
4c93c355
CL
2008 * when allocating for the kmalloc_node_cache. This is used for bootstrapping
2009 * memory on a fresh node that has no slab structures yet.
81819f0f 2010 */
1cd7daa5
AB
2011static struct kmem_cache_node *early_kmem_cache_node_alloc(gfp_t gfpflags,
2012 int node)
81819f0f
CL
2013{
2014 struct page *page;
2015 struct kmem_cache_node *n;
2016
2017 BUG_ON(kmalloc_caches->size < sizeof(struct kmem_cache_node));
2018
a2f92ee7 2019 page = new_slab(kmalloc_caches, gfpflags, node);
81819f0f
CL
2020
2021 BUG_ON(!page);
a2f92ee7
CL
2022 if (page_to_nid(page) != node) {
2023 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2024 "node %d\n", node);
2025 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2026 "in order to be able to continue\n");
2027 }
2028
81819f0f
CL
2029 n = page->freelist;
2030 BUG_ON(!n);
2031 page->freelist = get_freepointer(kmalloc_caches, n);
2032 page->inuse++;
2033 kmalloc_caches->node[node] = n;
8ab1372f 2034#ifdef CONFIG_SLUB_DEBUG
d45f39cb
CL
2035 init_object(kmalloc_caches, n, 1);
2036 init_tracking(kmalloc_caches, n);
8ab1372f 2037#endif
81819f0f
CL
2038 init_kmem_cache_node(n);
2039 atomic_long_inc(&n->nr_slabs);
e95eed57 2040 add_partial(n, page);
dbc55faa
CL
2041
2042 /*
2043 * new_slab() disables interupts. If we do not reenable interrupts here
2044 * then bootup would continue with interrupts disabled.
2045 */
2046 local_irq_enable();
81819f0f
CL
2047 return n;
2048}
2049
2050static void free_kmem_cache_nodes(struct kmem_cache *s)
2051{
2052 int node;
2053
f64dc58c 2054 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2055 struct kmem_cache_node *n = s->node[node];
2056 if (n && n != &s->local_node)
2057 kmem_cache_free(kmalloc_caches, n);
2058 s->node[node] = NULL;
2059 }
2060}
2061
2062static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2063{
2064 int node;
2065 int local_node;
2066
2067 if (slab_state >= UP)
2068 local_node = page_to_nid(virt_to_page(s));
2069 else
2070 local_node = 0;
2071
f64dc58c 2072 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2073 struct kmem_cache_node *n;
2074
2075 if (local_node == node)
2076 n = &s->local_node;
2077 else {
2078 if (slab_state == DOWN) {
2079 n = early_kmem_cache_node_alloc(gfpflags,
2080 node);
2081 continue;
2082 }
2083 n = kmem_cache_alloc_node(kmalloc_caches,
2084 gfpflags, node);
2085
2086 if (!n) {
2087 free_kmem_cache_nodes(s);
2088 return 0;
2089 }
2090
2091 }
2092 s->node[node] = n;
2093 init_kmem_cache_node(n);
2094 }
2095 return 1;
2096}
2097#else
2098static void free_kmem_cache_nodes(struct kmem_cache *s)
2099{
2100}
2101
2102static int init_kmem_cache_nodes(struct kmem_cache *s, gfp_t gfpflags)
2103{
2104 init_kmem_cache_node(&s->local_node);
2105 return 1;
2106}
2107#endif
2108
2109/*
2110 * calculate_sizes() determines the order and the distribution of data within
2111 * a slab object.
2112 */
2113static int calculate_sizes(struct kmem_cache *s)
2114{
2115 unsigned long flags = s->flags;
2116 unsigned long size = s->objsize;
2117 unsigned long align = s->align;
2118
2119 /*
2120 * Determine if we can poison the object itself. If the user of
2121 * the slab may touch the object after free or before allocation
2122 * then we should never poison the object itself.
2123 */
2124 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f 2125 !s->ctor)
81819f0f
CL
2126 s->flags |= __OBJECT_POISON;
2127 else
2128 s->flags &= ~__OBJECT_POISON;
2129
2130 /*
2131 * Round up object size to the next word boundary. We can only
2132 * place the free pointer at word boundaries and this determines
2133 * the possible location of the free pointer.
2134 */
2135 size = ALIGN(size, sizeof(void *));
2136
41ecc55b 2137#ifdef CONFIG_SLUB_DEBUG
81819f0f 2138 /*
672bba3a 2139 * If we are Redzoning then check if there is some space between the
81819f0f 2140 * end of the object and the free pointer. If not then add an
672bba3a 2141 * additional word to have some bytes to store Redzone information.
81819f0f
CL
2142 */
2143 if ((flags & SLAB_RED_ZONE) && size == s->objsize)
2144 size += sizeof(void *);
41ecc55b 2145#endif
81819f0f
CL
2146
2147 /*
672bba3a
CL
2148 * With that we have determined the number of bytes in actual use
2149 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
2150 */
2151 s->inuse = size;
2152
2153 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f 2154 s->ctor)) {
81819f0f
CL
2155 /*
2156 * Relocate free pointer after the object if it is not
2157 * permitted to overwrite the first word of the object on
2158 * kmem_cache_free.
2159 *
2160 * This is the case if we do RCU, have a constructor or
2161 * destructor or are poisoning the objects.
2162 */
2163 s->offset = size;
2164 size += sizeof(void *);
2165 }
2166
c12b3c62 2167#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2168 if (flags & SLAB_STORE_USER)
2169 /*
2170 * Need to store information about allocs and frees after
2171 * the object.
2172 */
2173 size += 2 * sizeof(struct track);
2174
be7b3fbc 2175 if (flags & SLAB_RED_ZONE)
81819f0f
CL
2176 /*
2177 * Add some empty padding so that we can catch
2178 * overwrites from earlier objects rather than let
2179 * tracking information or the free pointer be
2180 * corrupted if an user writes before the start
2181 * of the object.
2182 */
2183 size += sizeof(void *);
41ecc55b 2184#endif
672bba3a 2185
81819f0f
CL
2186 /*
2187 * Determine the alignment based on various parameters that the
65c02d4c
CL
2188 * user specified and the dynamic determination of cache line size
2189 * on bootup.
81819f0f
CL
2190 */
2191 align = calculate_alignment(flags, align, s->objsize);
2192
2193 /*
2194 * SLUB stores one object immediately after another beginning from
2195 * offset 0. In order to align the objects we have to simply size
2196 * each object to conform to the alignment.
2197 */
2198 size = ALIGN(size, align);
2199 s->size = size;
2200
2201 s->order = calculate_order(size);
2202 if (s->order < 0)
2203 return 0;
2204
2205 /*
2206 * Determine the number of objects per slab
2207 */
2208 s->objects = (PAGE_SIZE << s->order) / size;
2209
b3fba8da 2210 return !!s->objects;
81819f0f
CL
2211
2212}
2213
81819f0f
CL
2214static int kmem_cache_open(struct kmem_cache *s, gfp_t gfpflags,
2215 const char *name, size_t size,
2216 size_t align, unsigned long flags,
c59def9f 2217 void (*ctor)(void *, struct kmem_cache *, unsigned long))
81819f0f
CL
2218{
2219 memset(s, 0, kmem_size);
2220 s->name = name;
2221 s->ctor = ctor;
81819f0f 2222 s->objsize = size;
81819f0f 2223 s->align = align;
ba0268a8 2224 s->flags = kmem_cache_flags(size, flags, name, ctor);
81819f0f
CL
2225
2226 if (!calculate_sizes(s))
2227 goto error;
2228
2229 s->refcount = 1;
2230#ifdef CONFIG_NUMA
2231 s->defrag_ratio = 100;
2232#endif
dfb4f096
CL
2233 if (!init_kmem_cache_nodes(s, gfpflags & ~SLUB_DMA))
2234 goto error;
81819f0f 2235
dfb4f096 2236 if (alloc_kmem_cache_cpus(s, gfpflags & ~SLUB_DMA))
81819f0f 2237 return 1;
4c93c355 2238 free_kmem_cache_nodes(s);
81819f0f
CL
2239error:
2240 if (flags & SLAB_PANIC)
2241 panic("Cannot create slab %s size=%lu realsize=%u "
2242 "order=%u offset=%u flags=%lx\n",
2243 s->name, (unsigned long)size, s->size, s->order,
2244 s->offset, flags);
2245 return 0;
2246}
81819f0f
CL
2247
2248/*
2249 * Check if a given pointer is valid
2250 */
2251int kmem_ptr_validate(struct kmem_cache *s, const void *object)
2252{
2253 struct page * page;
81819f0f
CL
2254
2255 page = get_object_page(object);
2256
2257 if (!page || s != page->slab)
2258 /* No slab or wrong slab */
2259 return 0;
2260
abcd08a6 2261 if (!check_valid_pointer(s, page, object))
81819f0f
CL
2262 return 0;
2263
2264 /*
2265 * We could also check if the object is on the slabs freelist.
2266 * But this would be too expensive and it seems that the main
2267 * purpose of kmem_ptr_valid is to check if the object belongs
2268 * to a certain slab.
2269 */
2270 return 1;
2271}
2272EXPORT_SYMBOL(kmem_ptr_validate);
2273
2274/*
2275 * Determine the size of a slab object
2276 */
2277unsigned int kmem_cache_size(struct kmem_cache *s)
2278{
2279 return s->objsize;
2280}
2281EXPORT_SYMBOL(kmem_cache_size);
2282
2283const char *kmem_cache_name(struct kmem_cache *s)
2284{
2285 return s->name;
2286}
2287EXPORT_SYMBOL(kmem_cache_name);
2288
2289/*
672bba3a
CL
2290 * Attempt to free all slabs on a node. Return the number of slabs we
2291 * were unable to free.
81819f0f
CL
2292 */
2293static int free_list(struct kmem_cache *s, struct kmem_cache_node *n,
2294 struct list_head *list)
2295{
2296 int slabs_inuse = 0;
2297 unsigned long flags;
2298 struct page *page, *h;
2299
2300 spin_lock_irqsave(&n->list_lock, flags);
2301 list_for_each_entry_safe(page, h, list, lru)
2302 if (!page->inuse) {
2303 list_del(&page->lru);
2304 discard_slab(s, page);
2305 } else
2306 slabs_inuse++;
2307 spin_unlock_irqrestore(&n->list_lock, flags);
2308 return slabs_inuse;
2309}
2310
2311/*
672bba3a 2312 * Release all resources used by a slab cache.
81819f0f 2313 */
0c710013 2314static inline int kmem_cache_close(struct kmem_cache *s)
81819f0f
CL
2315{
2316 int node;
2317
2318 flush_all(s);
2319
2320 /* Attempt to free all objects */
4c93c355 2321 free_kmem_cache_cpus(s);
f64dc58c 2322 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2323 struct kmem_cache_node *n = get_node(s, node);
2324
2086d26a 2325 n->nr_partial -= free_list(s, n, &n->partial);
81819f0f
CL
2326 if (atomic_long_read(&n->nr_slabs))
2327 return 1;
2328 }
2329 free_kmem_cache_nodes(s);
2330 return 0;
2331}
2332
2333/*
2334 * Close a cache and release the kmem_cache structure
2335 * (must be used for caches created using kmem_cache_create)
2336 */
2337void kmem_cache_destroy(struct kmem_cache *s)
2338{
2339 down_write(&slub_lock);
2340 s->refcount--;
2341 if (!s->refcount) {
2342 list_del(&s->list);
a0e1d1be 2343 up_write(&slub_lock);
81819f0f
CL
2344 if (kmem_cache_close(s))
2345 WARN_ON(1);
2346 sysfs_slab_remove(s);
2347 kfree(s);
a0e1d1be
CL
2348 } else
2349 up_write(&slub_lock);
81819f0f
CL
2350}
2351EXPORT_SYMBOL(kmem_cache_destroy);
2352
2353/********************************************************************
2354 * Kmalloc subsystem
2355 *******************************************************************/
2356
aadb4bc4 2357struct kmem_cache kmalloc_caches[PAGE_SHIFT] __cacheline_aligned;
81819f0f
CL
2358EXPORT_SYMBOL(kmalloc_caches);
2359
2360#ifdef CONFIG_ZONE_DMA
aadb4bc4 2361static struct kmem_cache *kmalloc_caches_dma[PAGE_SHIFT];
81819f0f
CL
2362#endif
2363
2364static int __init setup_slub_min_order(char *str)
2365{
2366 get_option (&str, &slub_min_order);
2367
2368 return 1;
2369}
2370
2371__setup("slub_min_order=", setup_slub_min_order);
2372
2373static int __init setup_slub_max_order(char *str)
2374{
2375 get_option (&str, &slub_max_order);
2376
2377 return 1;
2378}
2379
2380__setup("slub_max_order=", setup_slub_max_order);
2381
2382static int __init setup_slub_min_objects(char *str)
2383{
2384 get_option (&str, &slub_min_objects);
2385
2386 return 1;
2387}
2388
2389__setup("slub_min_objects=", setup_slub_min_objects);
2390
2391static int __init setup_slub_nomerge(char *str)
2392{
2393 slub_nomerge = 1;
2394 return 1;
2395}
2396
2397__setup("slub_nomerge", setup_slub_nomerge);
2398
81819f0f
CL
2399static struct kmem_cache *create_kmalloc_cache(struct kmem_cache *s,
2400 const char *name, int size, gfp_t gfp_flags)
2401{
2402 unsigned int flags = 0;
2403
2404 if (gfp_flags & SLUB_DMA)
2405 flags = SLAB_CACHE_DMA;
2406
2407 down_write(&slub_lock);
2408 if (!kmem_cache_open(s, gfp_flags, name, size, ARCH_KMALLOC_MINALIGN,
c59def9f 2409 flags, NULL))
81819f0f
CL
2410 goto panic;
2411
2412 list_add(&s->list, &slab_caches);
2413 up_write(&slub_lock);
2414 if (sysfs_slab_add(s))
2415 goto panic;
2416 return s;
2417
2418panic:
2419 panic("Creation of kmalloc slab %s size=%d failed.\n", name, size);
2420}
2421
2e443fd0 2422#ifdef CONFIG_ZONE_DMA
1ceef402
CL
2423
2424static void sysfs_add_func(struct work_struct *w)
2425{
2426 struct kmem_cache *s;
2427
2428 down_write(&slub_lock);
2429 list_for_each_entry(s, &slab_caches, list) {
2430 if (s->flags & __SYSFS_ADD_DEFERRED) {
2431 s->flags &= ~__SYSFS_ADD_DEFERRED;
2432 sysfs_slab_add(s);
2433 }
2434 }
2435 up_write(&slub_lock);
2436}
2437
2438static DECLARE_WORK(sysfs_add_work, sysfs_add_func);
2439
2e443fd0
CL
2440static noinline struct kmem_cache *dma_kmalloc_cache(int index, gfp_t flags)
2441{
2442 struct kmem_cache *s;
2e443fd0
CL
2443 char *text;
2444 size_t realsize;
2445
2446 s = kmalloc_caches_dma[index];
2447 if (s)
2448 return s;
2449
2450 /* Dynamically create dma cache */
1ceef402
CL
2451 if (flags & __GFP_WAIT)
2452 down_write(&slub_lock);
2453 else {
2454 if (!down_write_trylock(&slub_lock))
2455 goto out;
2456 }
2457
2458 if (kmalloc_caches_dma[index])
2459 goto unlock_out;
2e443fd0 2460
7b55f620 2461 realsize = kmalloc_caches[index].objsize;
1ceef402
CL
2462 text = kasprintf(flags & ~SLUB_DMA, "kmalloc_dma-%d", (unsigned int)realsize),
2463 s = kmalloc(kmem_size, flags & ~SLUB_DMA);
2464
2465 if (!s || !text || !kmem_cache_open(s, flags, text,
2466 realsize, ARCH_KMALLOC_MINALIGN,
2467 SLAB_CACHE_DMA|__SYSFS_ADD_DEFERRED, NULL)) {
2468 kfree(s);
2469 kfree(text);
2470 goto unlock_out;
dfce8648 2471 }
1ceef402
CL
2472
2473 list_add(&s->list, &slab_caches);
2474 kmalloc_caches_dma[index] = s;
2475
2476 schedule_work(&sysfs_add_work);
2477
2478unlock_out:
dfce8648 2479 up_write(&slub_lock);
1ceef402 2480out:
dfce8648 2481 return kmalloc_caches_dma[index];
2e443fd0
CL
2482}
2483#endif
2484
f1b26339
CL
2485/*
2486 * Conversion table for small slabs sizes / 8 to the index in the
2487 * kmalloc array. This is necessary for slabs < 192 since we have non power
2488 * of two cache sizes there. The size of larger slabs can be determined using
2489 * fls.
2490 */
2491static s8 size_index[24] = {
2492 3, /* 8 */
2493 4, /* 16 */
2494 5, /* 24 */
2495 5, /* 32 */
2496 6, /* 40 */
2497 6, /* 48 */
2498 6, /* 56 */
2499 6, /* 64 */
2500 1, /* 72 */
2501 1, /* 80 */
2502 1, /* 88 */
2503 1, /* 96 */
2504 7, /* 104 */
2505 7, /* 112 */
2506 7, /* 120 */
2507 7, /* 128 */
2508 2, /* 136 */
2509 2, /* 144 */
2510 2, /* 152 */
2511 2, /* 160 */
2512 2, /* 168 */
2513 2, /* 176 */
2514 2, /* 184 */
2515 2 /* 192 */
2516};
2517
81819f0f
CL
2518static struct kmem_cache *get_slab(size_t size, gfp_t flags)
2519{
f1b26339 2520 int index;
81819f0f 2521
f1b26339
CL
2522 if (size <= 192) {
2523 if (!size)
2524 return ZERO_SIZE_PTR;
81819f0f 2525
f1b26339 2526 index = size_index[(size - 1) / 8];
aadb4bc4 2527 } else
f1b26339 2528 index = fls(size - 1);
81819f0f
CL
2529
2530#ifdef CONFIG_ZONE_DMA
f1b26339 2531 if (unlikely((flags & SLUB_DMA)))
2e443fd0 2532 return dma_kmalloc_cache(index, flags);
f1b26339 2533
81819f0f
CL
2534#endif
2535 return &kmalloc_caches[index];
2536}
2537
2538void *__kmalloc(size_t size, gfp_t flags)
2539{
aadb4bc4 2540 struct kmem_cache *s;
81819f0f 2541
aadb4bc4
CL
2542 if (unlikely(size > PAGE_SIZE / 2))
2543 return (void *)__get_free_pages(flags | __GFP_COMP,
2544 get_order(size));
2545
2546 s = get_slab(size, flags);
2547
2548 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2549 return s;
2550
ce15fea8 2551 return slab_alloc(s, flags, -1, __builtin_return_address(0));
81819f0f
CL
2552}
2553EXPORT_SYMBOL(__kmalloc);
2554
2555#ifdef CONFIG_NUMA
2556void *__kmalloc_node(size_t size, gfp_t flags, int node)
2557{
aadb4bc4 2558 struct kmem_cache *s;
81819f0f 2559
aadb4bc4
CL
2560 if (unlikely(size > PAGE_SIZE / 2))
2561 return (void *)__get_free_pages(flags | __GFP_COMP,
2562 get_order(size));
2563
2564 s = get_slab(size, flags);
2565
2566 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
2567 return s;
2568
ce15fea8 2569 return slab_alloc(s, flags, node, __builtin_return_address(0));
81819f0f
CL
2570}
2571EXPORT_SYMBOL(__kmalloc_node);
2572#endif
2573
2574size_t ksize(const void *object)
2575{
272c1d21 2576 struct page *page;
81819f0f
CL
2577 struct kmem_cache *s;
2578
ef8b4520
CL
2579 BUG_ON(!object);
2580 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
2581 return 0;
2582
2583 page = get_object_page(object);
81819f0f
CL
2584 BUG_ON(!page);
2585 s = page->slab;
2586 BUG_ON(!s);
2587
2588 /*
2589 * Debugging requires use of the padding between object
2590 * and whatever may come after it.
2591 */
2592 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
2593 return s->objsize;
2594
2595 /*
2596 * If we have the need to store the freelist pointer
2597 * back there or track user information then we can
2598 * only use the space before that information.
2599 */
2600 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
2601 return s->inuse;
2602
2603 /*
2604 * Else we can use all the padding etc for the allocation
2605 */
2606 return s->size;
2607}
2608EXPORT_SYMBOL(ksize);
2609
2610void kfree(const void *x)
2611{
81819f0f
CL
2612 struct page *page;
2613
2408c550 2614 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
2615 return;
2616
b49af68f 2617 page = virt_to_head_page(x);
aadb4bc4
CL
2618 if (unlikely(!PageSlab(page))) {
2619 put_page(page);
2620 return;
2621 }
2622 slab_free(page->slab, page, (void *)x, __builtin_return_address(0));
81819f0f
CL
2623}
2624EXPORT_SYMBOL(kfree);
2625
2086d26a 2626/*
672bba3a
CL
2627 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
2628 * the remaining slabs by the number of items in use. The slabs with the
2629 * most items in use come first. New allocations will then fill those up
2630 * and thus they can be removed from the partial lists.
2631 *
2632 * The slabs with the least items are placed last. This results in them
2633 * being allocated from last increasing the chance that the last objects
2634 * are freed in them.
2086d26a
CL
2635 */
2636int kmem_cache_shrink(struct kmem_cache *s)
2637{
2638 int node;
2639 int i;
2640 struct kmem_cache_node *n;
2641 struct page *page;
2642 struct page *t;
2643 struct list_head *slabs_by_inuse =
2644 kmalloc(sizeof(struct list_head) * s->objects, GFP_KERNEL);
2645 unsigned long flags;
2646
2647 if (!slabs_by_inuse)
2648 return -ENOMEM;
2649
2650 flush_all(s);
f64dc58c 2651 for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a
CL
2652 n = get_node(s, node);
2653
2654 if (!n->nr_partial)
2655 continue;
2656
2657 for (i = 0; i < s->objects; i++)
2658 INIT_LIST_HEAD(slabs_by_inuse + i);
2659
2660 spin_lock_irqsave(&n->list_lock, flags);
2661
2662 /*
672bba3a 2663 * Build lists indexed by the items in use in each slab.
2086d26a 2664 *
672bba3a
CL
2665 * Note that concurrent frees may occur while we hold the
2666 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
2667 */
2668 list_for_each_entry_safe(page, t, &n->partial, lru) {
2669 if (!page->inuse && slab_trylock(page)) {
2670 /*
2671 * Must hold slab lock here because slab_free
2672 * may have freed the last object and be
2673 * waiting to release the slab.
2674 */
2675 list_del(&page->lru);
2676 n->nr_partial--;
2677 slab_unlock(page);
2678 discard_slab(s, page);
2679 } else {
fcda3d89
CL
2680 list_move(&page->lru,
2681 slabs_by_inuse + page->inuse);
2086d26a
CL
2682 }
2683 }
2684
2086d26a 2685 /*
672bba3a
CL
2686 * Rebuild the partial list with the slabs filled up most
2687 * first and the least used slabs at the end.
2086d26a
CL
2688 */
2689 for (i = s->objects - 1; i >= 0; i--)
2690 list_splice(slabs_by_inuse + i, n->partial.prev);
2691
2086d26a
CL
2692 spin_unlock_irqrestore(&n->list_lock, flags);
2693 }
2694
2695 kfree(slabs_by_inuse);
2696 return 0;
2697}
2698EXPORT_SYMBOL(kmem_cache_shrink);
2699
81819f0f
CL
2700/********************************************************************
2701 * Basic setup of slabs
2702 *******************************************************************/
2703
2704void __init kmem_cache_init(void)
2705{
2706 int i;
4b356be0 2707 int caches = 0;
81819f0f 2708
4c93c355
CL
2709 init_alloc_cpu();
2710
81819f0f
CL
2711#ifdef CONFIG_NUMA
2712 /*
2713 * Must first have the slab cache available for the allocations of the
672bba3a 2714 * struct kmem_cache_node's. There is special bootstrap code in
81819f0f
CL
2715 * kmem_cache_open for slab_state == DOWN.
2716 */
2717 create_kmalloc_cache(&kmalloc_caches[0], "kmem_cache_node",
2718 sizeof(struct kmem_cache_node), GFP_KERNEL);
8ffa6875 2719 kmalloc_caches[0].refcount = -1;
4b356be0 2720 caches++;
81819f0f
CL
2721#endif
2722
2723 /* Able to allocate the per node structures */
2724 slab_state = PARTIAL;
2725
2726 /* Caches that are not of the two-to-the-power-of size */
4b356be0
CL
2727 if (KMALLOC_MIN_SIZE <= 64) {
2728 create_kmalloc_cache(&kmalloc_caches[1],
81819f0f 2729 "kmalloc-96", 96, GFP_KERNEL);
4b356be0
CL
2730 caches++;
2731 }
2732 if (KMALLOC_MIN_SIZE <= 128) {
2733 create_kmalloc_cache(&kmalloc_caches[2],
81819f0f 2734 "kmalloc-192", 192, GFP_KERNEL);
4b356be0
CL
2735 caches++;
2736 }
81819f0f 2737
aadb4bc4 2738 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++) {
81819f0f
CL
2739 create_kmalloc_cache(&kmalloc_caches[i],
2740 "kmalloc", 1 << i, GFP_KERNEL);
4b356be0
CL
2741 caches++;
2742 }
81819f0f 2743
f1b26339
CL
2744
2745 /*
2746 * Patch up the size_index table if we have strange large alignment
2747 * requirements for the kmalloc array. This is only the case for
2748 * mips it seems. The standard arches will not generate any code here.
2749 *
2750 * Largest permitted alignment is 256 bytes due to the way we
2751 * handle the index determination for the smaller caches.
2752 *
2753 * Make sure that nothing crazy happens if someone starts tinkering
2754 * around with ARCH_KMALLOC_MINALIGN
2755 */
2756 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
2757 (KMALLOC_MIN_SIZE & (KMALLOC_MIN_SIZE - 1)));
2758
12ad6843 2759 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8)
f1b26339
CL
2760 size_index[(i - 1) / 8] = KMALLOC_SHIFT_LOW;
2761
81819f0f
CL
2762 slab_state = UP;
2763
2764 /* Provide the correct kmalloc names now that the caches are up */
aadb4bc4 2765 for (i = KMALLOC_SHIFT_LOW; i < PAGE_SHIFT; i++)
81819f0f
CL
2766 kmalloc_caches[i]. name =
2767 kasprintf(GFP_KERNEL, "kmalloc-%d", 1 << i);
2768
2769#ifdef CONFIG_SMP
2770 register_cpu_notifier(&slab_notifier);
4c93c355
CL
2771 kmem_size = offsetof(struct kmem_cache, cpu_slab) +
2772 nr_cpu_ids * sizeof(struct kmem_cache_cpu *);
2773#else
2774 kmem_size = sizeof(struct kmem_cache);
81819f0f
CL
2775#endif
2776
81819f0f
CL
2777
2778 printk(KERN_INFO "SLUB: Genslabs=%d, HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be0
CL
2779 " CPUs=%d, Nodes=%d\n",
2780 caches, cache_line_size(),
81819f0f
CL
2781 slub_min_order, slub_max_order, slub_min_objects,
2782 nr_cpu_ids, nr_node_ids);
2783}
2784
2785/*
2786 * Find a mergeable slab cache
2787 */
2788static int slab_unmergeable(struct kmem_cache *s)
2789{
2790 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
2791 return 1;
2792
c59def9f 2793 if (s->ctor)
81819f0f
CL
2794 return 1;
2795
8ffa6875
CL
2796 /*
2797 * We may have set a slab to be unmergeable during bootstrap.
2798 */
2799 if (s->refcount < 0)
2800 return 1;
2801
81819f0f
CL
2802 return 0;
2803}
2804
2805static struct kmem_cache *find_mergeable(size_t size,
ba0268a8 2806 size_t align, unsigned long flags, const char *name,
c59def9f 2807 void (*ctor)(void *, struct kmem_cache *, unsigned long))
81819f0f 2808{
5b95a4ac 2809 struct kmem_cache *s;
81819f0f
CL
2810
2811 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
2812 return NULL;
2813
c59def9f 2814 if (ctor)
81819f0f
CL
2815 return NULL;
2816
2817 size = ALIGN(size, sizeof(void *));
2818 align = calculate_alignment(flags, align, size);
2819 size = ALIGN(size, align);
ba0268a8 2820 flags = kmem_cache_flags(size, flags, name, NULL);
81819f0f 2821
5b95a4ac 2822 list_for_each_entry(s, &slab_caches, list) {
81819f0f
CL
2823 if (slab_unmergeable(s))
2824 continue;
2825
2826 if (size > s->size)
2827 continue;
2828
ba0268a8 2829 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
81819f0f
CL
2830 continue;
2831 /*
2832 * Check if alignment is compatible.
2833 * Courtesy of Adrian Drzewiecki
2834 */
2835 if ((s->size & ~(align -1)) != s->size)
2836 continue;
2837
2838 if (s->size - size >= sizeof(void *))
2839 continue;
2840
2841 return s;
2842 }
2843 return NULL;
2844}
2845
2846struct kmem_cache *kmem_cache_create(const char *name, size_t size,
2847 size_t align, unsigned long flags,
20c2df83 2848 void (*ctor)(void *, struct kmem_cache *, unsigned long))
81819f0f
CL
2849{
2850 struct kmem_cache *s;
2851
2852 down_write(&slub_lock);
ba0268a8 2853 s = find_mergeable(size, align, flags, name, ctor);
81819f0f
CL
2854 if (s) {
2855 s->refcount++;
2856 /*
2857 * Adjust the object sizes so that we clear
2858 * the complete object on kzalloc.
2859 */
2860 s->objsize = max(s->objsize, (int)size);
2861 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
a0e1d1be 2862 up_write(&slub_lock);
81819f0f
CL
2863 if (sysfs_slab_alias(s, name))
2864 goto err;
a0e1d1be
CL
2865 return s;
2866 }
2867 s = kmalloc(kmem_size, GFP_KERNEL);
2868 if (s) {
2869 if (kmem_cache_open(s, GFP_KERNEL, name,
c59def9f 2870 size, align, flags, ctor)) {
81819f0f 2871 list_add(&s->list, &slab_caches);
a0e1d1be
CL
2872 up_write(&slub_lock);
2873 if (sysfs_slab_add(s))
2874 goto err;
2875 return s;
2876 }
2877 kfree(s);
81819f0f
CL
2878 }
2879 up_write(&slub_lock);
81819f0f
CL
2880
2881err:
81819f0f
CL
2882 if (flags & SLAB_PANIC)
2883 panic("Cannot create slabcache %s\n", name);
2884 else
2885 s = NULL;
2886 return s;
2887}
2888EXPORT_SYMBOL(kmem_cache_create);
2889
81819f0f 2890#ifdef CONFIG_SMP
81819f0f 2891/*
672bba3a
CL
2892 * Use the cpu notifier to insure that the cpu slabs are flushed when
2893 * necessary.
81819f0f
CL
2894 */
2895static int __cpuinit slab_cpuup_callback(struct notifier_block *nfb,
2896 unsigned long action, void *hcpu)
2897{
2898 long cpu = (long)hcpu;
5b95a4ac
CL
2899 struct kmem_cache *s;
2900 unsigned long flags;
81819f0f
CL
2901
2902 switch (action) {
4c93c355
CL
2903 case CPU_UP_PREPARE:
2904 case CPU_UP_PREPARE_FROZEN:
2905 init_alloc_cpu_cpu(cpu);
2906 down_read(&slub_lock);
2907 list_for_each_entry(s, &slab_caches, list)
2908 s->cpu_slab[cpu] = alloc_kmem_cache_cpu(s, cpu,
2909 GFP_KERNEL);
2910 up_read(&slub_lock);
2911 break;
2912
81819f0f 2913 case CPU_UP_CANCELED:
8bb78442 2914 case CPU_UP_CANCELED_FROZEN:
81819f0f 2915 case CPU_DEAD:
8bb78442 2916 case CPU_DEAD_FROZEN:
5b95a4ac
CL
2917 down_read(&slub_lock);
2918 list_for_each_entry(s, &slab_caches, list) {
4c93c355
CL
2919 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
2920
5b95a4ac
CL
2921 local_irq_save(flags);
2922 __flush_cpu_slab(s, cpu);
2923 local_irq_restore(flags);
4c93c355
CL
2924 free_kmem_cache_cpu(c, cpu);
2925 s->cpu_slab[cpu] = NULL;
5b95a4ac
CL
2926 }
2927 up_read(&slub_lock);
81819f0f
CL
2928 break;
2929 default:
2930 break;
2931 }
2932 return NOTIFY_OK;
2933}
2934
2935static struct notifier_block __cpuinitdata slab_notifier =
2936 { &slab_cpuup_callback, NULL, 0 };
2937
2938#endif
2939
81819f0f
CL
2940void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, void *caller)
2941{
aadb4bc4
CL
2942 struct kmem_cache *s;
2943
2944 if (unlikely(size > PAGE_SIZE / 2))
2945 return (void *)__get_free_pages(gfpflags | __GFP_COMP,
2946 get_order(size));
2947 s = get_slab(size, gfpflags);
81819f0f 2948
2408c550 2949 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 2950 return s;
81819f0f 2951
ce15fea8 2952 return slab_alloc(s, gfpflags, -1, caller);
81819f0f
CL
2953}
2954
2955void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
2956 int node, void *caller)
2957{
aadb4bc4
CL
2958 struct kmem_cache *s;
2959
2960 if (unlikely(size > PAGE_SIZE / 2))
2961 return (void *)__get_free_pages(gfpflags | __GFP_COMP,
2962 get_order(size));
2963 s = get_slab(size, gfpflags);
81819f0f 2964
2408c550 2965 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 2966 return s;
81819f0f 2967
ce15fea8 2968 return slab_alloc(s, gfpflags, node, caller);
81819f0f
CL
2969}
2970
41ecc55b 2971#if defined(CONFIG_SYSFS) && defined(CONFIG_SLUB_DEBUG)
434e245d
CL
2972static int validate_slab(struct kmem_cache *s, struct page *page,
2973 unsigned long *map)
53e15af0
CL
2974{
2975 void *p;
2976 void *addr = page_address(page);
53e15af0
CL
2977
2978 if (!check_slab(s, page) ||
2979 !on_freelist(s, page, NULL))
2980 return 0;
2981
2982 /* Now we know that a valid freelist exists */
2983 bitmap_zero(map, s->objects);
2984
7656c72b
CL
2985 for_each_free_object(p, s, page->freelist) {
2986 set_bit(slab_index(p, s, addr), map);
53e15af0
CL
2987 if (!check_object(s, page, p, 0))
2988 return 0;
2989 }
2990
7656c72b
CL
2991 for_each_object(p, s, addr)
2992 if (!test_bit(slab_index(p, s, addr), map))
53e15af0
CL
2993 if (!check_object(s, page, p, 1))
2994 return 0;
2995 return 1;
2996}
2997
434e245d
CL
2998static void validate_slab_slab(struct kmem_cache *s, struct page *page,
2999 unsigned long *map)
53e15af0
CL
3000{
3001 if (slab_trylock(page)) {
434e245d 3002 validate_slab(s, page, map);
53e15af0
CL
3003 slab_unlock(page);
3004 } else
3005 printk(KERN_INFO "SLUB %s: Skipped busy slab 0x%p\n",
3006 s->name, page);
3007
3008 if (s->flags & DEBUG_DEFAULT_FLAGS) {
35e5d7ee
CL
3009 if (!SlabDebug(page))
3010 printk(KERN_ERR "SLUB %s: SlabDebug not set "
53e15af0
CL
3011 "on slab 0x%p\n", s->name, page);
3012 } else {
35e5d7ee
CL
3013 if (SlabDebug(page))
3014 printk(KERN_ERR "SLUB %s: SlabDebug set on "
53e15af0
CL
3015 "slab 0x%p\n", s->name, page);
3016 }
3017}
3018
434e245d
CL
3019static int validate_slab_node(struct kmem_cache *s,
3020 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
3021{
3022 unsigned long count = 0;
3023 struct page *page;
3024 unsigned long flags;
3025
3026 spin_lock_irqsave(&n->list_lock, flags);
3027
3028 list_for_each_entry(page, &n->partial, lru) {
434e245d 3029 validate_slab_slab(s, page, map);
53e15af0
CL
3030 count++;
3031 }
3032 if (count != n->nr_partial)
3033 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3034 "counter=%ld\n", s->name, count, n->nr_partial);
3035
3036 if (!(s->flags & SLAB_STORE_USER))
3037 goto out;
3038
3039 list_for_each_entry(page, &n->full, lru) {
434e245d 3040 validate_slab_slab(s, page, map);
53e15af0
CL
3041 count++;
3042 }
3043 if (count != atomic_long_read(&n->nr_slabs))
3044 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3045 "counter=%ld\n", s->name, count,
3046 atomic_long_read(&n->nr_slabs));
3047
3048out:
3049 spin_unlock_irqrestore(&n->list_lock, flags);
3050 return count;
3051}
3052
434e245d 3053static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
3054{
3055 int node;
3056 unsigned long count = 0;
434e245d
CL
3057 unsigned long *map = kmalloc(BITS_TO_LONGS(s->objects) *
3058 sizeof(unsigned long), GFP_KERNEL);
3059
3060 if (!map)
3061 return -ENOMEM;
53e15af0
CL
3062
3063 flush_all(s);
f64dc58c 3064 for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af0
CL
3065 struct kmem_cache_node *n = get_node(s, node);
3066
434e245d 3067 count += validate_slab_node(s, n, map);
53e15af0 3068 }
434e245d 3069 kfree(map);
53e15af0
CL
3070 return count;
3071}
3072
b3459709
CL
3073#ifdef SLUB_RESILIENCY_TEST
3074static void resiliency_test(void)
3075{
3076 u8 *p;
3077
3078 printk(KERN_ERR "SLUB resiliency testing\n");
3079 printk(KERN_ERR "-----------------------\n");
3080 printk(KERN_ERR "A. Corruption after allocation\n");
3081
3082 p = kzalloc(16, GFP_KERNEL);
3083 p[16] = 0x12;
3084 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
3085 " 0x12->0x%p\n\n", p + 16);
3086
3087 validate_slab_cache(kmalloc_caches + 4);
3088
3089 /* Hmmm... The next two are dangerous */
3090 p = kzalloc(32, GFP_KERNEL);
3091 p[32 + sizeof(void *)] = 0x34;
3092 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
3093 " 0x34 -> -0x%p\n", p);
3094 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
3095
3096 validate_slab_cache(kmalloc_caches + 5);
3097 p = kzalloc(64, GFP_KERNEL);
3098 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
3099 *p = 0x56;
3100 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
3101 p);
3102 printk(KERN_ERR "If allocated object is overwritten then not detectable\n\n");
3103 validate_slab_cache(kmalloc_caches + 6);
3104
3105 printk(KERN_ERR "\nB. Corruption after free\n");
3106 p = kzalloc(128, GFP_KERNEL);
3107 kfree(p);
3108 *p = 0x78;
3109 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
3110 validate_slab_cache(kmalloc_caches + 7);
3111
3112 p = kzalloc(256, GFP_KERNEL);
3113 kfree(p);
3114 p[50] = 0x9a;
3115 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n", p);
3116 validate_slab_cache(kmalloc_caches + 8);
3117
3118 p = kzalloc(512, GFP_KERNEL);
3119 kfree(p);
3120 p[512] = 0xab;
3121 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
3122 validate_slab_cache(kmalloc_caches + 9);
3123}
3124#else
3125static void resiliency_test(void) {};
3126#endif
3127
88a420e4 3128/*
672bba3a 3129 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
3130 * and freed.
3131 */
3132
3133struct location {
3134 unsigned long count;
3135 void *addr;
45edfa58
CL
3136 long long sum_time;
3137 long min_time;
3138 long max_time;
3139 long min_pid;
3140 long max_pid;
3141 cpumask_t cpus;
3142 nodemask_t nodes;
88a420e4
CL
3143};
3144
3145struct loc_track {
3146 unsigned long max;
3147 unsigned long count;
3148 struct location *loc;
3149};
3150
3151static void free_loc_track(struct loc_track *t)
3152{
3153 if (t->max)
3154 free_pages((unsigned long)t->loc,
3155 get_order(sizeof(struct location) * t->max));
3156}
3157
68dff6a9 3158static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
3159{
3160 struct location *l;
3161 int order;
3162
88a420e4
CL
3163 order = get_order(sizeof(struct location) * max);
3164
68dff6a9 3165 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
3166 if (!l)
3167 return 0;
3168
3169 if (t->count) {
3170 memcpy(l, t->loc, sizeof(struct location) * t->count);
3171 free_loc_track(t);
3172 }
3173 t->max = max;
3174 t->loc = l;
3175 return 1;
3176}
3177
3178static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 3179 const struct track *track)
88a420e4
CL
3180{
3181 long start, end, pos;
3182 struct location *l;
3183 void *caddr;
45edfa58 3184 unsigned long age = jiffies - track->when;
88a420e4
CL
3185
3186 start = -1;
3187 end = t->count;
3188
3189 for ( ; ; ) {
3190 pos = start + (end - start + 1) / 2;
3191
3192 /*
3193 * There is nothing at "end". If we end up there
3194 * we need to add something to before end.
3195 */
3196 if (pos == end)
3197 break;
3198
3199 caddr = t->loc[pos].addr;
45edfa58
CL
3200 if (track->addr == caddr) {
3201
3202 l = &t->loc[pos];
3203 l->count++;
3204 if (track->when) {
3205 l->sum_time += age;
3206 if (age < l->min_time)
3207 l->min_time = age;
3208 if (age > l->max_time)
3209 l->max_time = age;
3210
3211 if (track->pid < l->min_pid)
3212 l->min_pid = track->pid;
3213 if (track->pid > l->max_pid)
3214 l->max_pid = track->pid;
3215
3216 cpu_set(track->cpu, l->cpus);
3217 }
3218 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3219 return 1;
3220 }
3221
45edfa58 3222 if (track->addr < caddr)
88a420e4
CL
3223 end = pos;
3224 else
3225 start = pos;
3226 }
3227
3228 /*
672bba3a 3229 * Not found. Insert new tracking element.
88a420e4 3230 */
68dff6a9 3231 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
3232 return 0;
3233
3234 l = t->loc + pos;
3235 if (pos < t->count)
3236 memmove(l + 1, l,
3237 (t->count - pos) * sizeof(struct location));
3238 t->count++;
3239 l->count = 1;
45edfa58
CL
3240 l->addr = track->addr;
3241 l->sum_time = age;
3242 l->min_time = age;
3243 l->max_time = age;
3244 l->min_pid = track->pid;
3245 l->max_pid = track->pid;
3246 cpus_clear(l->cpus);
3247 cpu_set(track->cpu, l->cpus);
3248 nodes_clear(l->nodes);
3249 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
3250 return 1;
3251}
3252
3253static void process_slab(struct loc_track *t, struct kmem_cache *s,
3254 struct page *page, enum track_item alloc)
3255{
3256 void *addr = page_address(page);
7656c72b 3257 DECLARE_BITMAP(map, s->objects);
88a420e4
CL
3258 void *p;
3259
3260 bitmap_zero(map, s->objects);
7656c72b
CL
3261 for_each_free_object(p, s, page->freelist)
3262 set_bit(slab_index(p, s, addr), map);
88a420e4 3263
7656c72b 3264 for_each_object(p, s, addr)
45edfa58
CL
3265 if (!test_bit(slab_index(p, s, addr), map))
3266 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
3267}
3268
3269static int list_locations(struct kmem_cache *s, char *buf,
3270 enum track_item alloc)
3271{
3272 int n = 0;
3273 unsigned long i;
68dff6a9 3274 struct loc_track t = { 0, 0, NULL };
88a420e4
CL
3275 int node;
3276
68dff6a9
CL
3277 if (!alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
3278 GFP_KERNEL))
3279 return sprintf(buf, "Out of memory\n");
88a420e4
CL
3280
3281 /* Push back cpu slabs */
3282 flush_all(s);
3283
f64dc58c 3284 for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4
CL
3285 struct kmem_cache_node *n = get_node(s, node);
3286 unsigned long flags;
3287 struct page *page;
3288
9e86943b 3289 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
3290 continue;
3291
3292 spin_lock_irqsave(&n->list_lock, flags);
3293 list_for_each_entry(page, &n->partial, lru)
3294 process_slab(&t, s, page, alloc);
3295 list_for_each_entry(page, &n->full, lru)
3296 process_slab(&t, s, page, alloc);
3297 spin_unlock_irqrestore(&n->list_lock, flags);
3298 }
3299
3300 for (i = 0; i < t.count; i++) {
45edfa58 3301 struct location *l = &t.loc[i];
88a420e4
CL
3302
3303 if (n > PAGE_SIZE - 100)
3304 break;
45edfa58
CL
3305 n += sprintf(buf + n, "%7ld ", l->count);
3306
3307 if (l->addr)
3308 n += sprint_symbol(buf + n, (unsigned long)l->addr);
88a420e4
CL
3309 else
3310 n += sprintf(buf + n, "<not-available>");
45edfa58
CL
3311
3312 if (l->sum_time != l->min_time) {
3313 unsigned long remainder;
3314
3315 n += sprintf(buf + n, " age=%ld/%ld/%ld",
3316 l->min_time,
3317 div_long_long_rem(l->sum_time, l->count, &remainder),
3318 l->max_time);
3319 } else
3320 n += sprintf(buf + n, " age=%ld",
3321 l->min_time);
3322
3323 if (l->min_pid != l->max_pid)
3324 n += sprintf(buf + n, " pid=%ld-%ld",
3325 l->min_pid, l->max_pid);
3326 else
3327 n += sprintf(buf + n, " pid=%ld",
3328 l->min_pid);
3329
84966343
CL
3330 if (num_online_cpus() > 1 && !cpus_empty(l->cpus) &&
3331 n < PAGE_SIZE - 60) {
45edfa58
CL
3332 n += sprintf(buf + n, " cpus=");
3333 n += cpulist_scnprintf(buf + n, PAGE_SIZE - n - 50,
3334 l->cpus);
3335 }
3336
84966343
CL
3337 if (num_online_nodes() > 1 && !nodes_empty(l->nodes) &&
3338 n < PAGE_SIZE - 60) {
45edfa58
CL
3339 n += sprintf(buf + n, " nodes=");
3340 n += nodelist_scnprintf(buf + n, PAGE_SIZE - n - 50,
3341 l->nodes);
3342 }
3343
88a420e4
CL
3344 n += sprintf(buf + n, "\n");
3345 }
3346
3347 free_loc_track(&t);
3348 if (!t.count)
3349 n += sprintf(buf, "No data\n");
3350 return n;
3351}
3352
81819f0f
CL
3353static unsigned long count_partial(struct kmem_cache_node *n)
3354{
3355 unsigned long flags;
3356 unsigned long x = 0;
3357 struct page *page;
3358
3359 spin_lock_irqsave(&n->list_lock, flags);
3360 list_for_each_entry(page, &n->partial, lru)
3361 x += page->inuse;
3362 spin_unlock_irqrestore(&n->list_lock, flags);
3363 return x;
3364}
3365
3366enum slab_stat_type {
3367 SL_FULL,
3368 SL_PARTIAL,
3369 SL_CPU,
3370 SL_OBJECTS
3371};
3372
3373#define SO_FULL (1 << SL_FULL)
3374#define SO_PARTIAL (1 << SL_PARTIAL)
3375#define SO_CPU (1 << SL_CPU)
3376#define SO_OBJECTS (1 << SL_OBJECTS)
3377
3378static unsigned long slab_objects(struct kmem_cache *s,
3379 char *buf, unsigned long flags)
3380{
3381 unsigned long total = 0;
3382 int cpu;
3383 int node;
3384 int x;
3385 unsigned long *nodes;
3386 unsigned long *per_cpu;
3387
3388 nodes = kzalloc(2 * sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
3389 per_cpu = nodes + nr_node_ids;
3390
3391 for_each_possible_cpu(cpu) {
dfb4f096 3392 struct page *page;
ee3c72a1 3393 int node;
dfb4f096 3394 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
81819f0f 3395
dfb4f096
CL
3396 if (!c)
3397 continue;
3398
3399 page = c->page;
ee3c72a1
CL
3400 node = c->node;
3401 if (node < 0)
3402 continue;
81819f0f 3403 if (page) {
81819f0f
CL
3404 if (flags & SO_CPU) {
3405 int x = 0;
3406
3407 if (flags & SO_OBJECTS)
3408 x = page->inuse;
3409 else
3410 x = 1;
3411 total += x;
ee3c72a1 3412 nodes[node] += x;
81819f0f 3413 }
ee3c72a1 3414 per_cpu[node]++;
81819f0f
CL
3415 }
3416 }
3417
f64dc58c 3418 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3419 struct kmem_cache_node *n = get_node(s, node);
3420
3421 if (flags & SO_PARTIAL) {
3422 if (flags & SO_OBJECTS)
3423 x = count_partial(n);
3424 else
3425 x = n->nr_partial;
3426 total += x;
3427 nodes[node] += x;
3428 }
3429
3430 if (flags & SO_FULL) {
9e86943b 3431 int full_slabs = atomic_long_read(&n->nr_slabs)
81819f0f
CL
3432 - per_cpu[node]
3433 - n->nr_partial;
3434
3435 if (flags & SO_OBJECTS)
3436 x = full_slabs * s->objects;
3437 else
3438 x = full_slabs;
3439 total += x;
3440 nodes[node] += x;
3441 }
3442 }
3443
3444 x = sprintf(buf, "%lu", total);
3445#ifdef CONFIG_NUMA
f64dc58c 3446 for_each_node_state(node, N_NORMAL_MEMORY)
81819f0f
CL
3447 if (nodes[node])
3448 x += sprintf(buf + x, " N%d=%lu",
3449 node, nodes[node]);
3450#endif
3451 kfree(nodes);
3452 return x + sprintf(buf + x, "\n");
3453}
3454
3455static int any_slab_objects(struct kmem_cache *s)
3456{
3457 int node;
3458 int cpu;
3459
dfb4f096
CL
3460 for_each_possible_cpu(cpu) {
3461 struct kmem_cache_cpu *c = get_cpu_slab(s, cpu);
3462
3463 if (c && c->page)
81819f0f 3464 return 1;
dfb4f096 3465 }
81819f0f 3466
dfb4f096 3467 for_each_online_node(node) {
81819f0f
CL
3468 struct kmem_cache_node *n = get_node(s, node);
3469
dfb4f096
CL
3470 if (!n)
3471 continue;
3472
9e86943b 3473 if (n->nr_partial || atomic_long_read(&n->nr_slabs))
81819f0f
CL
3474 return 1;
3475 }
3476 return 0;
3477}
3478
3479#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
3480#define to_slab(n) container_of(n, struct kmem_cache, kobj);
3481
3482struct slab_attribute {
3483 struct attribute attr;
3484 ssize_t (*show)(struct kmem_cache *s, char *buf);
3485 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
3486};
3487
3488#define SLAB_ATTR_RO(_name) \
3489 static struct slab_attribute _name##_attr = __ATTR_RO(_name)
3490
3491#define SLAB_ATTR(_name) \
3492 static struct slab_attribute _name##_attr = \
3493 __ATTR(_name, 0644, _name##_show, _name##_store)
3494
81819f0f
CL
3495static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
3496{
3497 return sprintf(buf, "%d\n", s->size);
3498}
3499SLAB_ATTR_RO(slab_size);
3500
3501static ssize_t align_show(struct kmem_cache *s, char *buf)
3502{
3503 return sprintf(buf, "%d\n", s->align);
3504}
3505SLAB_ATTR_RO(align);
3506
3507static ssize_t object_size_show(struct kmem_cache *s, char *buf)
3508{
3509 return sprintf(buf, "%d\n", s->objsize);
3510}
3511SLAB_ATTR_RO(object_size);
3512
3513static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
3514{
3515 return sprintf(buf, "%d\n", s->objects);
3516}
3517SLAB_ATTR_RO(objs_per_slab);
3518
3519static ssize_t order_show(struct kmem_cache *s, char *buf)
3520{
3521 return sprintf(buf, "%d\n", s->order);
3522}
3523SLAB_ATTR_RO(order);
3524
3525static ssize_t ctor_show(struct kmem_cache *s, char *buf)
3526{
3527 if (s->ctor) {
3528 int n = sprint_symbol(buf, (unsigned long)s->ctor);
3529
3530 return n + sprintf(buf + n, "\n");
3531 }
3532 return 0;
3533}
3534SLAB_ATTR_RO(ctor);
3535
81819f0f
CL
3536static ssize_t aliases_show(struct kmem_cache *s, char *buf)
3537{
3538 return sprintf(buf, "%d\n", s->refcount - 1);
3539}
3540SLAB_ATTR_RO(aliases);
3541
3542static ssize_t slabs_show(struct kmem_cache *s, char *buf)
3543{
3544 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU);
3545}
3546SLAB_ATTR_RO(slabs);
3547
3548static ssize_t partial_show(struct kmem_cache *s, char *buf)
3549{
3550 return slab_objects(s, buf, SO_PARTIAL);
3551}
3552SLAB_ATTR_RO(partial);
3553
3554static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
3555{
3556 return slab_objects(s, buf, SO_CPU);
3557}
3558SLAB_ATTR_RO(cpu_slabs);
3559
3560static ssize_t objects_show(struct kmem_cache *s, char *buf)
3561{
3562 return slab_objects(s, buf, SO_FULL|SO_PARTIAL|SO_CPU|SO_OBJECTS);
3563}
3564SLAB_ATTR_RO(objects);
3565
3566static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
3567{
3568 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
3569}
3570
3571static ssize_t sanity_checks_store(struct kmem_cache *s,
3572 const char *buf, size_t length)
3573{
3574 s->flags &= ~SLAB_DEBUG_FREE;
3575 if (buf[0] == '1')
3576 s->flags |= SLAB_DEBUG_FREE;
3577 return length;
3578}
3579SLAB_ATTR(sanity_checks);
3580
3581static ssize_t trace_show(struct kmem_cache *s, char *buf)
3582{
3583 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
3584}
3585
3586static ssize_t trace_store(struct kmem_cache *s, const char *buf,
3587 size_t length)
3588{
3589 s->flags &= ~SLAB_TRACE;
3590 if (buf[0] == '1')
3591 s->flags |= SLAB_TRACE;
3592 return length;
3593}
3594SLAB_ATTR(trace);
3595
3596static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
3597{
3598 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
3599}
3600
3601static ssize_t reclaim_account_store(struct kmem_cache *s,
3602 const char *buf, size_t length)
3603{
3604 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
3605 if (buf[0] == '1')
3606 s->flags |= SLAB_RECLAIM_ACCOUNT;
3607 return length;
3608}
3609SLAB_ATTR(reclaim_account);
3610
3611static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
3612{
5af60839 3613 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
81819f0f
CL
3614}
3615SLAB_ATTR_RO(hwcache_align);
3616
3617#ifdef CONFIG_ZONE_DMA
3618static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
3619{
3620 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
3621}
3622SLAB_ATTR_RO(cache_dma);
3623#endif
3624
3625static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
3626{
3627 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
3628}
3629SLAB_ATTR_RO(destroy_by_rcu);
3630
3631static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
3632{
3633 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
3634}
3635
3636static ssize_t red_zone_store(struct kmem_cache *s,
3637 const char *buf, size_t length)
3638{
3639 if (any_slab_objects(s))
3640 return -EBUSY;
3641
3642 s->flags &= ~SLAB_RED_ZONE;
3643 if (buf[0] == '1')
3644 s->flags |= SLAB_RED_ZONE;
3645 calculate_sizes(s);
3646 return length;
3647}
3648SLAB_ATTR(red_zone);
3649
3650static ssize_t poison_show(struct kmem_cache *s, char *buf)
3651{
3652 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
3653}
3654
3655static ssize_t poison_store(struct kmem_cache *s,
3656 const char *buf, size_t length)
3657{
3658 if (any_slab_objects(s))
3659 return -EBUSY;
3660
3661 s->flags &= ~SLAB_POISON;
3662 if (buf[0] == '1')
3663 s->flags |= SLAB_POISON;
3664 calculate_sizes(s);
3665 return length;
3666}
3667SLAB_ATTR(poison);
3668
3669static ssize_t store_user_show(struct kmem_cache *s, char *buf)
3670{
3671 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
3672}
3673
3674static ssize_t store_user_store(struct kmem_cache *s,
3675 const char *buf, size_t length)
3676{
3677 if (any_slab_objects(s))
3678 return -EBUSY;
3679
3680 s->flags &= ~SLAB_STORE_USER;
3681 if (buf[0] == '1')
3682 s->flags |= SLAB_STORE_USER;
3683 calculate_sizes(s);
3684 return length;
3685}
3686SLAB_ATTR(store_user);
3687
53e15af0
CL
3688static ssize_t validate_show(struct kmem_cache *s, char *buf)
3689{
3690 return 0;
3691}
3692
3693static ssize_t validate_store(struct kmem_cache *s,
3694 const char *buf, size_t length)
3695{
434e245d
CL
3696 int ret = -EINVAL;
3697
3698 if (buf[0] == '1') {
3699 ret = validate_slab_cache(s);
3700 if (ret >= 0)
3701 ret = length;
3702 }
3703 return ret;
53e15af0
CL
3704}
3705SLAB_ATTR(validate);
3706
2086d26a
CL
3707static ssize_t shrink_show(struct kmem_cache *s, char *buf)
3708{
3709 return 0;
3710}
3711
3712static ssize_t shrink_store(struct kmem_cache *s,
3713 const char *buf, size_t length)
3714{
3715 if (buf[0] == '1') {
3716 int rc = kmem_cache_shrink(s);
3717
3718 if (rc)
3719 return rc;
3720 } else
3721 return -EINVAL;
3722 return length;
3723}
3724SLAB_ATTR(shrink);
3725
88a420e4
CL
3726static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
3727{
3728 if (!(s->flags & SLAB_STORE_USER))
3729 return -ENOSYS;
3730 return list_locations(s, buf, TRACK_ALLOC);
3731}
3732SLAB_ATTR_RO(alloc_calls);
3733
3734static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
3735{
3736 if (!(s->flags & SLAB_STORE_USER))
3737 return -ENOSYS;
3738 return list_locations(s, buf, TRACK_FREE);
3739}
3740SLAB_ATTR_RO(free_calls);
3741
81819f0f
CL
3742#ifdef CONFIG_NUMA
3743static ssize_t defrag_ratio_show(struct kmem_cache *s, char *buf)
3744{
3745 return sprintf(buf, "%d\n", s->defrag_ratio / 10);
3746}
3747
3748static ssize_t defrag_ratio_store(struct kmem_cache *s,
3749 const char *buf, size_t length)
3750{
3751 int n = simple_strtoul(buf, NULL, 10);
3752
3753 if (n < 100)
3754 s->defrag_ratio = n * 10;
3755 return length;
3756}
3757SLAB_ATTR(defrag_ratio);
3758#endif
3759
3760static struct attribute * slab_attrs[] = {
3761 &slab_size_attr.attr,
3762 &object_size_attr.attr,
3763 &objs_per_slab_attr.attr,
3764 &order_attr.attr,
3765 &objects_attr.attr,
3766 &slabs_attr.attr,
3767 &partial_attr.attr,
3768 &cpu_slabs_attr.attr,
3769 &ctor_attr.attr,
81819f0f
CL
3770 &aliases_attr.attr,
3771 &align_attr.attr,
3772 &sanity_checks_attr.attr,
3773 &trace_attr.attr,
3774 &hwcache_align_attr.attr,
3775 &reclaim_account_attr.attr,
3776 &destroy_by_rcu_attr.attr,
3777 &red_zone_attr.attr,
3778 &poison_attr.attr,
3779 &store_user_attr.attr,
53e15af0 3780 &validate_attr.attr,
2086d26a 3781 &shrink_attr.attr,
88a420e4
CL
3782 &alloc_calls_attr.attr,
3783 &free_calls_attr.attr,
81819f0f
CL
3784#ifdef CONFIG_ZONE_DMA
3785 &cache_dma_attr.attr,
3786#endif
3787#ifdef CONFIG_NUMA
3788 &defrag_ratio_attr.attr,
3789#endif
3790 NULL
3791};
3792
3793static struct attribute_group slab_attr_group = {
3794 .attrs = slab_attrs,
3795};
3796
3797static ssize_t slab_attr_show(struct kobject *kobj,
3798 struct attribute *attr,
3799 char *buf)
3800{
3801 struct slab_attribute *attribute;
3802 struct kmem_cache *s;
3803 int err;
3804
3805 attribute = to_slab_attr(attr);
3806 s = to_slab(kobj);
3807
3808 if (!attribute->show)
3809 return -EIO;
3810
3811 err = attribute->show(s, buf);
3812
3813 return err;
3814}
3815
3816static ssize_t slab_attr_store(struct kobject *kobj,
3817 struct attribute *attr,
3818 const char *buf, size_t len)
3819{
3820 struct slab_attribute *attribute;
3821 struct kmem_cache *s;
3822 int err;
3823
3824 attribute = to_slab_attr(attr);
3825 s = to_slab(kobj);
3826
3827 if (!attribute->store)
3828 return -EIO;
3829
3830 err = attribute->store(s, buf, len);
3831
3832 return err;
3833}
3834
3835static struct sysfs_ops slab_sysfs_ops = {
3836 .show = slab_attr_show,
3837 .store = slab_attr_store,
3838};
3839
3840static struct kobj_type slab_ktype = {
3841 .sysfs_ops = &slab_sysfs_ops,
3842};
3843
3844static int uevent_filter(struct kset *kset, struct kobject *kobj)
3845{
3846 struct kobj_type *ktype = get_ktype(kobj);
3847
3848 if (ktype == &slab_ktype)
3849 return 1;
3850 return 0;
3851}
3852
3853static struct kset_uevent_ops slab_uevent_ops = {
3854 .filter = uevent_filter,
3855};
3856
5af328a5 3857static decl_subsys(slab, &slab_ktype, &slab_uevent_ops);
81819f0f
CL
3858
3859#define ID_STR_LENGTH 64
3860
3861/* Create a unique string id for a slab cache:
3862 * format
3863 * :[flags-]size:[memory address of kmemcache]
3864 */
3865static char *create_unique_id(struct kmem_cache *s)
3866{
3867 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
3868 char *p = name;
3869
3870 BUG_ON(!name);
3871
3872 *p++ = ':';
3873 /*
3874 * First flags affecting slabcache operations. We will only
3875 * get here for aliasable slabs so we do not need to support
3876 * too many flags. The flags here must cover all flags that
3877 * are matched during merging to guarantee that the id is
3878 * unique.
3879 */
3880 if (s->flags & SLAB_CACHE_DMA)
3881 *p++ = 'd';
3882 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3883 *p++ = 'a';
3884 if (s->flags & SLAB_DEBUG_FREE)
3885 *p++ = 'F';
3886 if (p != name + 1)
3887 *p++ = '-';
3888 p += sprintf(p, "%07d", s->size);
3889 BUG_ON(p > name + ID_STR_LENGTH - 1);
3890 return name;
3891}
3892
3893static int sysfs_slab_add(struct kmem_cache *s)
3894{
3895 int err;
3896 const char *name;
3897 int unmergeable;
3898
3899 if (slab_state < SYSFS)
3900 /* Defer until later */
3901 return 0;
3902
3903 unmergeable = slab_unmergeable(s);
3904 if (unmergeable) {
3905 /*
3906 * Slabcache can never be merged so we can use the name proper.
3907 * This is typically the case for debug situations. In that
3908 * case we can catch duplicate names easily.
3909 */
0f9008ef 3910 sysfs_remove_link(&slab_subsys.kobj, s->name);
81819f0f
CL
3911 name = s->name;
3912 } else {
3913 /*
3914 * Create a unique name for the slab as a target
3915 * for the symlinks.
3916 */
3917 name = create_unique_id(s);
3918 }
3919
3920 kobj_set_kset_s(s, slab_subsys);
3921 kobject_set_name(&s->kobj, name);
3922 kobject_init(&s->kobj);
3923 err = kobject_add(&s->kobj);
3924 if (err)
3925 return err;
3926
3927 err = sysfs_create_group(&s->kobj, &slab_attr_group);
3928 if (err)
3929 return err;
3930 kobject_uevent(&s->kobj, KOBJ_ADD);
3931 if (!unmergeable) {
3932 /* Setup first alias */
3933 sysfs_slab_alias(s, s->name);
3934 kfree(name);
3935 }
3936 return 0;
3937}
3938
3939static void sysfs_slab_remove(struct kmem_cache *s)
3940{
3941 kobject_uevent(&s->kobj, KOBJ_REMOVE);
3942 kobject_del(&s->kobj);
3943}
3944
3945/*
3946 * Need to buffer aliases during bootup until sysfs becomes
3947 * available lest we loose that information.
3948 */
3949struct saved_alias {
3950 struct kmem_cache *s;
3951 const char *name;
3952 struct saved_alias *next;
3953};
3954
5af328a5 3955static struct saved_alias *alias_list;
81819f0f
CL
3956
3957static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
3958{
3959 struct saved_alias *al;
3960
3961 if (slab_state == SYSFS) {
3962 /*
3963 * If we have a leftover link then remove it.
3964 */
0f9008ef
LT
3965 sysfs_remove_link(&slab_subsys.kobj, name);
3966 return sysfs_create_link(&slab_subsys.kobj,
81819f0f
CL
3967 &s->kobj, name);
3968 }
3969
3970 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
3971 if (!al)
3972 return -ENOMEM;
3973
3974 al->s = s;
3975 al->name = name;
3976 al->next = alias_list;
3977 alias_list = al;
3978 return 0;
3979}
3980
3981static int __init slab_sysfs_init(void)
3982{
5b95a4ac 3983 struct kmem_cache *s;
81819f0f
CL
3984 int err;
3985
3986 err = subsystem_register(&slab_subsys);
3987 if (err) {
3988 printk(KERN_ERR "Cannot register slab subsystem.\n");
3989 return -ENOSYS;
3990 }
3991
26a7bd03
CL
3992 slab_state = SYSFS;
3993
5b95a4ac 3994 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 3995 err = sysfs_slab_add(s);
5d540fb7
CL
3996 if (err)
3997 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
3998 " to sysfs\n", s->name);
26a7bd03 3999 }
81819f0f
CL
4000
4001 while (alias_list) {
4002 struct saved_alias *al = alias_list;
4003
4004 alias_list = alias_list->next;
4005 err = sysfs_slab_alias(al->s, al->name);
5d540fb7
CL
4006 if (err)
4007 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
4008 " %s to sysfs\n", s->name);
81819f0f
CL
4009 kfree(al);
4010 }
4011
4012 resiliency_test();
4013 return 0;
4014}
4015
4016__initcall(slab_sysfs_init);
81819f0f 4017#endif