revert "mm: vmscan: do not swap anon pages just because free+file is low"
[linux-2.6-block.git] / mm / slub.c
CommitLineData
81819f0f
CL
1/*
2 * SLUB: A slab allocator that limits cache line use instead of queuing
3 * objects in per cpu and per node lists.
4 *
881db7fb
CL
5 * The allocator synchronizes using per slab locks or atomic operatios
6 * and only uses a centralized lock to manage a pool of partial slabs.
81819f0f 7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
881db7fb 9 * (C) 2011 Linux Foundation, Christoph Lameter
81819f0f
CL
10 */
11
12#include <linux/mm.h>
1eb5ac64 13#include <linux/swap.h> /* struct reclaim_state */
81819f0f
CL
14#include <linux/module.h>
15#include <linux/bit_spinlock.h>
16#include <linux/interrupt.h>
17#include <linux/bitops.h>
18#include <linux/slab.h>
97d06609 19#include "slab.h"
7b3c3a50 20#include <linux/proc_fs.h>
3ac38faa 21#include <linux/notifier.h>
81819f0f 22#include <linux/seq_file.h>
5a896d9e 23#include <linux/kmemcheck.h>
81819f0f
CL
24#include <linux/cpu.h>
25#include <linux/cpuset.h>
26#include <linux/mempolicy.h>
27#include <linux/ctype.h>
3ac7fe5a 28#include <linux/debugobjects.h>
81819f0f 29#include <linux/kallsyms.h>
b9049e23 30#include <linux/memory.h>
f8bd2258 31#include <linux/math64.h>
773ff60e 32#include <linux/fault-inject.h>
bfa71457 33#include <linux/stacktrace.h>
4de900b4 34#include <linux/prefetch.h>
2633d7a0 35#include <linux/memcontrol.h>
81819f0f 36
4a92379b
RK
37#include <trace/events/kmem.h>
38
072bb0aa
MG
39#include "internal.h"
40
81819f0f
CL
41/*
42 * Lock order:
18004c5d 43 * 1. slab_mutex (Global Mutex)
881db7fb
CL
44 * 2. node->list_lock
45 * 3. slab_lock(page) (Only on some arches and for debugging)
81819f0f 46 *
18004c5d 47 * slab_mutex
881db7fb 48 *
18004c5d 49 * The role of the slab_mutex is to protect the list of all the slabs
881db7fb
CL
50 * and to synchronize major metadata changes to slab cache structures.
51 *
52 * The slab_lock is only used for debugging and on arches that do not
53 * have the ability to do a cmpxchg_double. It only protects the second
54 * double word in the page struct. Meaning
55 * A. page->freelist -> List of object free in a page
56 * B. page->counters -> Counters of objects
57 * C. page->frozen -> frozen state
58 *
59 * If a slab is frozen then it is exempt from list management. It is not
60 * on any list. The processor that froze the slab is the one who can
61 * perform list operations on the page. Other processors may put objects
62 * onto the freelist but the processor that froze the slab is the only
63 * one that can retrieve the objects from the page's freelist.
81819f0f
CL
64 *
65 * The list_lock protects the partial and full list on each node and
66 * the partial slab counter. If taken then no new slabs may be added or
67 * removed from the lists nor make the number of partial slabs be modified.
68 * (Note that the total number of slabs is an atomic value that may be
69 * modified without taking the list lock).
70 *
71 * The list_lock is a centralized lock and thus we avoid taking it as
72 * much as possible. As long as SLUB does not have to handle partial
73 * slabs, operations can continue without any centralized lock. F.e.
74 * allocating a long series of objects that fill up slabs does not require
75 * the list lock.
81819f0f
CL
76 * Interrupts are disabled during allocation and deallocation in order to
77 * make the slab allocator safe to use in the context of an irq. In addition
78 * interrupts are disabled to ensure that the processor does not change
79 * while handling per_cpu slabs, due to kernel preemption.
80 *
81 * SLUB assigns one slab for allocation to each processor.
82 * Allocations only occur from these slabs called cpu slabs.
83 *
672bba3a
CL
84 * Slabs with free elements are kept on a partial list and during regular
85 * operations no list for full slabs is used. If an object in a full slab is
81819f0f 86 * freed then the slab will show up again on the partial lists.
672bba3a
CL
87 * We track full slabs for debugging purposes though because otherwise we
88 * cannot scan all objects.
81819f0f
CL
89 *
90 * Slabs are freed when they become empty. Teardown and setup is
91 * minimal so we rely on the page allocators per cpu caches for
92 * fast frees and allocs.
93 *
94 * Overloading of page flags that are otherwise used for LRU management.
95 *
4b6f0750
CL
96 * PageActive The slab is frozen and exempt from list processing.
97 * This means that the slab is dedicated to a purpose
98 * such as satisfying allocations for a specific
99 * processor. Objects may be freed in the slab while
100 * it is frozen but slab_free will then skip the usual
101 * list operations. It is up to the processor holding
102 * the slab to integrate the slab into the slab lists
103 * when the slab is no longer needed.
104 *
105 * One use of this flag is to mark slabs that are
106 * used for allocations. Then such a slab becomes a cpu
107 * slab. The cpu slab may be equipped with an additional
dfb4f096 108 * freelist that allows lockless access to
894b8788
CL
109 * free objects in addition to the regular freelist
110 * that requires the slab lock.
81819f0f
CL
111 *
112 * PageError Slab requires special handling due to debug
113 * options set. This moves slab handling out of
894b8788 114 * the fast path and disables lockless freelists.
81819f0f
CL
115 */
116
af537b0a
CL
117static inline int kmem_cache_debug(struct kmem_cache *s)
118{
5577bd8a 119#ifdef CONFIG_SLUB_DEBUG
af537b0a 120 return unlikely(s->flags & SLAB_DEBUG_FLAGS);
5577bd8a 121#else
af537b0a 122 return 0;
5577bd8a 123#endif
af537b0a 124}
5577bd8a 125
345c905d
JK
126static inline bool kmem_cache_has_cpu_partial(struct kmem_cache *s)
127{
128#ifdef CONFIG_SLUB_CPU_PARTIAL
129 return !kmem_cache_debug(s);
130#else
131 return false;
132#endif
133}
134
81819f0f
CL
135/*
136 * Issues still to be resolved:
137 *
81819f0f
CL
138 * - Support PAGE_ALLOC_DEBUG. Should be easy to do.
139 *
81819f0f
CL
140 * - Variable sizing of the per node arrays
141 */
142
143/* Enable to test recovery from slab corruption on boot */
144#undef SLUB_RESILIENCY_TEST
145
b789ef51
CL
146/* Enable to log cmpxchg failures */
147#undef SLUB_DEBUG_CMPXCHG
148
2086d26a
CL
149/*
150 * Mininum number of partial slabs. These will be left on the partial
151 * lists even if they are empty. kmem_cache_shrink may reclaim them.
152 */
76be8950 153#define MIN_PARTIAL 5
e95eed57 154
2086d26a
CL
155/*
156 * Maximum number of desirable partial slabs.
157 * The existence of more partial slabs makes kmem_cache_shrink
721ae22a 158 * sort the partial list by the number of objects in use.
2086d26a
CL
159 */
160#define MAX_PARTIAL 10
161
81819f0f
CL
162#define DEBUG_DEFAULT_FLAGS (SLAB_DEBUG_FREE | SLAB_RED_ZONE | \
163 SLAB_POISON | SLAB_STORE_USER)
672bba3a 164
fa5ec8a1 165/*
3de47213
DR
166 * Debugging flags that require metadata to be stored in the slab. These get
167 * disabled when slub_debug=O is used and a cache's min order increases with
168 * metadata.
fa5ec8a1 169 */
3de47213 170#define DEBUG_METADATA_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
fa5ec8a1 171
81819f0f
CL
172/*
173 * Set of flags that will prevent slab merging
174 */
175#define SLUB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
4c13dd3b
DM
176 SLAB_TRACE | SLAB_DESTROY_BY_RCU | SLAB_NOLEAKTRACE | \
177 SLAB_FAILSLAB)
81819f0f
CL
178
179#define SLUB_MERGE_SAME (SLAB_DEBUG_FREE | SLAB_RECLAIM_ACCOUNT | \
5a896d9e 180 SLAB_CACHE_DMA | SLAB_NOTRACK)
81819f0f 181
210b5c06
CG
182#define OO_SHIFT 16
183#define OO_MASK ((1 << OO_SHIFT) - 1)
50d5c41c 184#define MAX_OBJS_PER_PAGE 32767 /* since page.objects is u15 */
210b5c06 185
81819f0f 186/* Internal SLUB flags */
f90ec390 187#define __OBJECT_POISON 0x80000000UL /* Poison object */
b789ef51 188#define __CMPXCHG_DOUBLE 0x40000000UL /* Use cmpxchg_double */
81819f0f 189
81819f0f
CL
190#ifdef CONFIG_SMP
191static struct notifier_block slab_notifier;
192#endif
193
02cbc874
CL
194/*
195 * Tracking user of a slab.
196 */
d6543e39 197#define TRACK_ADDRS_COUNT 16
02cbc874 198struct track {
ce71e27c 199 unsigned long addr; /* Called from address */
d6543e39
BG
200#ifdef CONFIG_STACKTRACE
201 unsigned long addrs[TRACK_ADDRS_COUNT]; /* Called from address */
202#endif
02cbc874
CL
203 int cpu; /* Was running on cpu */
204 int pid; /* Pid context */
205 unsigned long when; /* When did the operation occur */
206};
207
208enum track_item { TRACK_ALLOC, TRACK_FREE };
209
ab4d5ed5 210#ifdef CONFIG_SYSFS
81819f0f
CL
211static int sysfs_slab_add(struct kmem_cache *);
212static int sysfs_slab_alias(struct kmem_cache *, const char *);
213static void sysfs_slab_remove(struct kmem_cache *);
107dab5c 214static void memcg_propagate_slab_attrs(struct kmem_cache *s);
81819f0f 215#else
0c710013
CL
216static inline int sysfs_slab_add(struct kmem_cache *s) { return 0; }
217static inline int sysfs_slab_alias(struct kmem_cache *s, const char *p)
218 { return 0; }
db265eca 219static inline void sysfs_slab_remove(struct kmem_cache *s) { }
8ff12cfc 220
107dab5c 221static inline void memcg_propagate_slab_attrs(struct kmem_cache *s) { }
81819f0f
CL
222#endif
223
4fdccdfb 224static inline void stat(const struct kmem_cache *s, enum stat_item si)
8ff12cfc
CL
225{
226#ifdef CONFIG_SLUB_STATS
88da03a6
CL
227 /*
228 * The rmw is racy on a preemptible kernel but this is acceptable, so
229 * avoid this_cpu_add()'s irq-disable overhead.
230 */
231 raw_cpu_inc(s->cpu_slab->stat[si]);
8ff12cfc
CL
232#endif
233}
234
81819f0f
CL
235/********************************************************************
236 * Core slab cache functions
237 *******************************************************************/
238
81819f0f
CL
239static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
240{
81819f0f 241 return s->node[node];
81819f0f
CL
242}
243
6446faa2 244/* Verify that a pointer has an address that is valid within a slab page */
02cbc874
CL
245static inline int check_valid_pointer(struct kmem_cache *s,
246 struct page *page, const void *object)
247{
248 void *base;
249
a973e9dd 250 if (!object)
02cbc874
CL
251 return 1;
252
a973e9dd 253 base = page_address(page);
39b26464 254 if (object < base || object >= base + page->objects * s->size ||
02cbc874
CL
255 (object - base) % s->size) {
256 return 0;
257 }
258
259 return 1;
260}
261
7656c72b
CL
262static inline void *get_freepointer(struct kmem_cache *s, void *object)
263{
264 return *(void **)(object + s->offset);
265}
266
0ad9500e
ED
267static void prefetch_freepointer(const struct kmem_cache *s, void *object)
268{
269 prefetch(object + s->offset);
270}
271
1393d9a1
CL
272static inline void *get_freepointer_safe(struct kmem_cache *s, void *object)
273{
274 void *p;
275
276#ifdef CONFIG_DEBUG_PAGEALLOC
277 probe_kernel_read(&p, (void **)(object + s->offset), sizeof(p));
278#else
279 p = get_freepointer(s, object);
280#endif
281 return p;
282}
283
7656c72b
CL
284static inline void set_freepointer(struct kmem_cache *s, void *object, void *fp)
285{
286 *(void **)(object + s->offset) = fp;
287}
288
289/* Loop over all objects in a slab */
224a88be
CL
290#define for_each_object(__p, __s, __addr, __objects) \
291 for (__p = (__addr); __p < (__addr) + (__objects) * (__s)->size;\
7656c72b
CL
292 __p += (__s)->size)
293
7656c72b
CL
294/* Determine object index from a given position */
295static inline int slab_index(void *p, struct kmem_cache *s, void *addr)
296{
297 return (p - addr) / s->size;
298}
299
d71f606f
MK
300static inline size_t slab_ksize(const struct kmem_cache *s)
301{
302#ifdef CONFIG_SLUB_DEBUG
303 /*
304 * Debugging requires use of the padding between object
305 * and whatever may come after it.
306 */
307 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
3b0efdfa 308 return s->object_size;
d71f606f
MK
309
310#endif
311 /*
312 * If we have the need to store the freelist pointer
313 * back there or track user information then we can
314 * only use the space before that information.
315 */
316 if (s->flags & (SLAB_DESTROY_BY_RCU | SLAB_STORE_USER))
317 return s->inuse;
318 /*
319 * Else we can use all the padding etc for the allocation
320 */
321 return s->size;
322}
323
ab9a0f19
LJ
324static inline int order_objects(int order, unsigned long size, int reserved)
325{
326 return ((PAGE_SIZE << order) - reserved) / size;
327}
328
834f3d11 329static inline struct kmem_cache_order_objects oo_make(int order,
ab9a0f19 330 unsigned long size, int reserved)
834f3d11
CL
331{
332 struct kmem_cache_order_objects x = {
ab9a0f19 333 (order << OO_SHIFT) + order_objects(order, size, reserved)
834f3d11
CL
334 };
335
336 return x;
337}
338
339static inline int oo_order(struct kmem_cache_order_objects x)
340{
210b5c06 341 return x.x >> OO_SHIFT;
834f3d11
CL
342}
343
344static inline int oo_objects(struct kmem_cache_order_objects x)
345{
210b5c06 346 return x.x & OO_MASK;
834f3d11
CL
347}
348
881db7fb
CL
349/*
350 * Per slab locking using the pagelock
351 */
352static __always_inline void slab_lock(struct page *page)
353{
354 bit_spin_lock(PG_locked, &page->flags);
355}
356
357static __always_inline void slab_unlock(struct page *page)
358{
359 __bit_spin_unlock(PG_locked, &page->flags);
360}
361
a0320865
DH
362static inline void set_page_slub_counters(struct page *page, unsigned long counters_new)
363{
364 struct page tmp;
365 tmp.counters = counters_new;
366 /*
367 * page->counters can cover frozen/inuse/objects as well
368 * as page->_count. If we assign to ->counters directly
369 * we run the risk of losing updates to page->_count, so
370 * be careful and only assign to the fields we need.
371 */
372 page->frozen = tmp.frozen;
373 page->inuse = tmp.inuse;
374 page->objects = tmp.objects;
375}
376
1d07171c
CL
377/* Interrupts must be disabled (for the fallback code to work right) */
378static inline bool __cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
379 void *freelist_old, unsigned long counters_old,
380 void *freelist_new, unsigned long counters_new,
381 const char *n)
382{
383 VM_BUG_ON(!irqs_disabled());
2565409f
HC
384#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
385 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
1d07171c 386 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 387 if (cmpxchg_double(&page->freelist, &page->counters,
1d07171c
CL
388 freelist_old, counters_old,
389 freelist_new, counters_new))
390 return 1;
391 } else
392#endif
393 {
394 slab_lock(page);
d0e0ac97
CG
395 if (page->freelist == freelist_old &&
396 page->counters == counters_old) {
1d07171c 397 page->freelist = freelist_new;
a0320865 398 set_page_slub_counters(page, counters_new);
1d07171c
CL
399 slab_unlock(page);
400 return 1;
401 }
402 slab_unlock(page);
403 }
404
405 cpu_relax();
406 stat(s, CMPXCHG_DOUBLE_FAIL);
407
408#ifdef SLUB_DEBUG_CMPXCHG
409 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
410#endif
411
412 return 0;
413}
414
b789ef51
CL
415static inline bool cmpxchg_double_slab(struct kmem_cache *s, struct page *page,
416 void *freelist_old, unsigned long counters_old,
417 void *freelist_new, unsigned long counters_new,
418 const char *n)
419{
2565409f
HC
420#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
421 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51 422 if (s->flags & __CMPXCHG_DOUBLE) {
cdcd6298 423 if (cmpxchg_double(&page->freelist, &page->counters,
b789ef51
CL
424 freelist_old, counters_old,
425 freelist_new, counters_new))
426 return 1;
427 } else
428#endif
429 {
1d07171c
CL
430 unsigned long flags;
431
432 local_irq_save(flags);
881db7fb 433 slab_lock(page);
d0e0ac97
CG
434 if (page->freelist == freelist_old &&
435 page->counters == counters_old) {
b789ef51 436 page->freelist = freelist_new;
a0320865 437 set_page_slub_counters(page, counters_new);
881db7fb 438 slab_unlock(page);
1d07171c 439 local_irq_restore(flags);
b789ef51
CL
440 return 1;
441 }
881db7fb 442 slab_unlock(page);
1d07171c 443 local_irq_restore(flags);
b789ef51
CL
444 }
445
446 cpu_relax();
447 stat(s, CMPXCHG_DOUBLE_FAIL);
448
449#ifdef SLUB_DEBUG_CMPXCHG
450 printk(KERN_INFO "%s %s: cmpxchg double redo ", n, s->name);
451#endif
452
453 return 0;
454}
455
41ecc55b 456#ifdef CONFIG_SLUB_DEBUG
5f80b13a
CL
457/*
458 * Determine a map of object in use on a page.
459 *
881db7fb 460 * Node listlock must be held to guarantee that the page does
5f80b13a
CL
461 * not vanish from under us.
462 */
463static void get_map(struct kmem_cache *s, struct page *page, unsigned long *map)
464{
465 void *p;
466 void *addr = page_address(page);
467
468 for (p = page->freelist; p; p = get_freepointer(s, p))
469 set_bit(slab_index(p, s, addr), map);
470}
471
41ecc55b
CL
472/*
473 * Debug settings:
474 */
f0630fff
CL
475#ifdef CONFIG_SLUB_DEBUG_ON
476static int slub_debug = DEBUG_DEFAULT_FLAGS;
477#else
41ecc55b 478static int slub_debug;
f0630fff 479#endif
41ecc55b
CL
480
481static char *slub_debug_slabs;
fa5ec8a1 482static int disable_higher_order_debug;
41ecc55b 483
81819f0f
CL
484/*
485 * Object debugging
486 */
487static void print_section(char *text, u8 *addr, unsigned int length)
488{
ffc79d28
SAS
489 print_hex_dump(KERN_ERR, text, DUMP_PREFIX_ADDRESS, 16, 1, addr,
490 length, 1);
81819f0f
CL
491}
492
81819f0f
CL
493static struct track *get_track(struct kmem_cache *s, void *object,
494 enum track_item alloc)
495{
496 struct track *p;
497
498 if (s->offset)
499 p = object + s->offset + sizeof(void *);
500 else
501 p = object + s->inuse;
502
503 return p + alloc;
504}
505
506static void set_track(struct kmem_cache *s, void *object,
ce71e27c 507 enum track_item alloc, unsigned long addr)
81819f0f 508{
1a00df4a 509 struct track *p = get_track(s, object, alloc);
81819f0f 510
81819f0f 511 if (addr) {
d6543e39
BG
512#ifdef CONFIG_STACKTRACE
513 struct stack_trace trace;
514 int i;
515
516 trace.nr_entries = 0;
517 trace.max_entries = TRACK_ADDRS_COUNT;
518 trace.entries = p->addrs;
519 trace.skip = 3;
520 save_stack_trace(&trace);
521
522 /* See rant in lockdep.c */
523 if (trace.nr_entries != 0 &&
524 trace.entries[trace.nr_entries - 1] == ULONG_MAX)
525 trace.nr_entries--;
526
527 for (i = trace.nr_entries; i < TRACK_ADDRS_COUNT; i++)
528 p->addrs[i] = 0;
529#endif
81819f0f
CL
530 p->addr = addr;
531 p->cpu = smp_processor_id();
88e4ccf2 532 p->pid = current->pid;
81819f0f
CL
533 p->when = jiffies;
534 } else
535 memset(p, 0, sizeof(struct track));
536}
537
81819f0f
CL
538static void init_tracking(struct kmem_cache *s, void *object)
539{
24922684
CL
540 if (!(s->flags & SLAB_STORE_USER))
541 return;
542
ce71e27c
EGM
543 set_track(s, object, TRACK_FREE, 0UL);
544 set_track(s, object, TRACK_ALLOC, 0UL);
81819f0f
CL
545}
546
547static void print_track(const char *s, struct track *t)
548{
549 if (!t->addr)
550 return;
551
7daf705f 552 printk(KERN_ERR "INFO: %s in %pS age=%lu cpu=%u pid=%d\n",
ce71e27c 553 s, (void *)t->addr, jiffies - t->when, t->cpu, t->pid);
d6543e39
BG
554#ifdef CONFIG_STACKTRACE
555 {
556 int i;
557 for (i = 0; i < TRACK_ADDRS_COUNT; i++)
558 if (t->addrs[i])
559 printk(KERN_ERR "\t%pS\n", (void *)t->addrs[i]);
560 else
561 break;
562 }
563#endif
24922684
CL
564}
565
566static void print_tracking(struct kmem_cache *s, void *object)
567{
568 if (!(s->flags & SLAB_STORE_USER))
569 return;
570
571 print_track("Allocated", get_track(s, object, TRACK_ALLOC));
572 print_track("Freed", get_track(s, object, TRACK_FREE));
573}
574
575static void print_page_info(struct page *page)
576{
d0e0ac97
CG
577 printk(KERN_ERR
578 "INFO: Slab 0x%p objects=%u used=%u fp=0x%p flags=0x%04lx\n",
579 page, page->objects, page->inuse, page->freelist, page->flags);
24922684
CL
580
581}
582
583static void slab_bug(struct kmem_cache *s, char *fmt, ...)
584{
585 va_list args;
586 char buf[100];
587
588 va_start(args, fmt);
589 vsnprintf(buf, sizeof(buf), fmt, args);
590 va_end(args);
591 printk(KERN_ERR "========================================"
592 "=====================================\n");
265d47e7 593 printk(KERN_ERR "BUG %s (%s): %s\n", s->name, print_tainted(), buf);
24922684
CL
594 printk(KERN_ERR "----------------------------------------"
595 "-------------------------------------\n\n");
645df230 596
373d4d09 597 add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
81819f0f
CL
598}
599
24922684
CL
600static void slab_fix(struct kmem_cache *s, char *fmt, ...)
601{
602 va_list args;
603 char buf[100];
604
605 va_start(args, fmt);
606 vsnprintf(buf, sizeof(buf), fmt, args);
607 va_end(args);
608 printk(KERN_ERR "FIX %s: %s\n", s->name, buf);
609}
610
611static void print_trailer(struct kmem_cache *s, struct page *page, u8 *p)
81819f0f
CL
612{
613 unsigned int off; /* Offset of last byte */
a973e9dd 614 u8 *addr = page_address(page);
24922684
CL
615
616 print_tracking(s, p);
617
618 print_page_info(page);
619
620 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu fp=0x%p\n\n",
621 p, p - addr, get_freepointer(s, p));
622
623 if (p > addr + 16)
ffc79d28 624 print_section("Bytes b4 ", p - 16, 16);
81819f0f 625
3b0efdfa 626 print_section("Object ", p, min_t(unsigned long, s->object_size,
ffc79d28 627 PAGE_SIZE));
81819f0f 628 if (s->flags & SLAB_RED_ZONE)
3b0efdfa
CL
629 print_section("Redzone ", p + s->object_size,
630 s->inuse - s->object_size);
81819f0f 631
81819f0f
CL
632 if (s->offset)
633 off = s->offset + sizeof(void *);
634 else
635 off = s->inuse;
636
24922684 637 if (s->flags & SLAB_STORE_USER)
81819f0f 638 off += 2 * sizeof(struct track);
81819f0f
CL
639
640 if (off != s->size)
641 /* Beginning of the filler is the free pointer */
ffc79d28 642 print_section("Padding ", p + off, s->size - off);
24922684
CL
643
644 dump_stack();
81819f0f
CL
645}
646
647static void object_err(struct kmem_cache *s, struct page *page,
648 u8 *object, char *reason)
649{
3dc50637 650 slab_bug(s, "%s", reason);
24922684 651 print_trailer(s, page, object);
81819f0f
CL
652}
653
d0e0ac97
CG
654static void slab_err(struct kmem_cache *s, struct page *page,
655 const char *fmt, ...)
81819f0f
CL
656{
657 va_list args;
658 char buf[100];
659
24922684
CL
660 va_start(args, fmt);
661 vsnprintf(buf, sizeof(buf), fmt, args);
81819f0f 662 va_end(args);
3dc50637 663 slab_bug(s, "%s", buf);
24922684 664 print_page_info(page);
81819f0f
CL
665 dump_stack();
666}
667
f7cb1933 668static void init_object(struct kmem_cache *s, void *object, u8 val)
81819f0f
CL
669{
670 u8 *p = object;
671
672 if (s->flags & __OBJECT_POISON) {
3b0efdfa
CL
673 memset(p, POISON_FREE, s->object_size - 1);
674 p[s->object_size - 1] = POISON_END;
81819f0f
CL
675 }
676
677 if (s->flags & SLAB_RED_ZONE)
3b0efdfa 678 memset(p + s->object_size, val, s->inuse - s->object_size);
81819f0f
CL
679}
680
24922684
CL
681static void restore_bytes(struct kmem_cache *s, char *message, u8 data,
682 void *from, void *to)
683{
684 slab_fix(s, "Restoring 0x%p-0x%p=0x%x\n", from, to - 1, data);
685 memset(from, data, to - from);
686}
687
688static int check_bytes_and_report(struct kmem_cache *s, struct page *page,
689 u8 *object, char *what,
06428780 690 u8 *start, unsigned int value, unsigned int bytes)
24922684
CL
691{
692 u8 *fault;
693 u8 *end;
694
79824820 695 fault = memchr_inv(start, value, bytes);
24922684
CL
696 if (!fault)
697 return 1;
698
699 end = start + bytes;
700 while (end > fault && end[-1] == value)
701 end--;
702
703 slab_bug(s, "%s overwritten", what);
704 printk(KERN_ERR "INFO: 0x%p-0x%p. First byte 0x%x instead of 0x%x\n",
705 fault, end - 1, fault[0], value);
706 print_trailer(s, page, object);
707
708 restore_bytes(s, what, value, fault, end);
709 return 0;
81819f0f
CL
710}
711
81819f0f
CL
712/*
713 * Object layout:
714 *
715 * object address
716 * Bytes of the object to be managed.
717 * If the freepointer may overlay the object then the free
718 * pointer is the first word of the object.
672bba3a 719 *
81819f0f
CL
720 * Poisoning uses 0x6b (POISON_FREE) and the last byte is
721 * 0xa5 (POISON_END)
722 *
3b0efdfa 723 * object + s->object_size
81819f0f 724 * Padding to reach word boundary. This is also used for Redzoning.
672bba3a 725 * Padding is extended by another word if Redzoning is enabled and
3b0efdfa 726 * object_size == inuse.
672bba3a 727 *
81819f0f
CL
728 * We fill with 0xbb (RED_INACTIVE) for inactive objects and with
729 * 0xcc (RED_ACTIVE) for objects in use.
730 *
731 * object + s->inuse
672bba3a
CL
732 * Meta data starts here.
733 *
81819f0f
CL
734 * A. Free pointer (if we cannot overwrite object on free)
735 * B. Tracking data for SLAB_STORE_USER
672bba3a 736 * C. Padding to reach required alignment boundary or at mininum
6446faa2 737 * one word if debugging is on to be able to detect writes
672bba3a
CL
738 * before the word boundary.
739 *
740 * Padding is done using 0x5a (POISON_INUSE)
81819f0f
CL
741 *
742 * object + s->size
672bba3a 743 * Nothing is used beyond s->size.
81819f0f 744 *
3b0efdfa 745 * If slabcaches are merged then the object_size and inuse boundaries are mostly
672bba3a 746 * ignored. And therefore no slab options that rely on these boundaries
81819f0f
CL
747 * may be used with merged slabcaches.
748 */
749
81819f0f
CL
750static int check_pad_bytes(struct kmem_cache *s, struct page *page, u8 *p)
751{
752 unsigned long off = s->inuse; /* The end of info */
753
754 if (s->offset)
755 /* Freepointer is placed after the object. */
756 off += sizeof(void *);
757
758 if (s->flags & SLAB_STORE_USER)
759 /* We also have user information there */
760 off += 2 * sizeof(struct track);
761
762 if (s->size == off)
763 return 1;
764
24922684
CL
765 return check_bytes_and_report(s, page, p, "Object padding",
766 p + off, POISON_INUSE, s->size - off);
81819f0f
CL
767}
768
39b26464 769/* Check the pad bytes at the end of a slab page */
81819f0f
CL
770static int slab_pad_check(struct kmem_cache *s, struct page *page)
771{
24922684
CL
772 u8 *start;
773 u8 *fault;
774 u8 *end;
775 int length;
776 int remainder;
81819f0f
CL
777
778 if (!(s->flags & SLAB_POISON))
779 return 1;
780
a973e9dd 781 start = page_address(page);
ab9a0f19 782 length = (PAGE_SIZE << compound_order(page)) - s->reserved;
39b26464
CL
783 end = start + length;
784 remainder = length % s->size;
81819f0f
CL
785 if (!remainder)
786 return 1;
787
79824820 788 fault = memchr_inv(end - remainder, POISON_INUSE, remainder);
24922684
CL
789 if (!fault)
790 return 1;
791 while (end > fault && end[-1] == POISON_INUSE)
792 end--;
793
794 slab_err(s, page, "Padding overwritten. 0x%p-0x%p", fault, end - 1);
ffc79d28 795 print_section("Padding ", end - remainder, remainder);
24922684 796
8a3d271d 797 restore_bytes(s, "slab padding", POISON_INUSE, end - remainder, end);
24922684 798 return 0;
81819f0f
CL
799}
800
801static int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 802 void *object, u8 val)
81819f0f
CL
803{
804 u8 *p = object;
3b0efdfa 805 u8 *endobject = object + s->object_size;
81819f0f
CL
806
807 if (s->flags & SLAB_RED_ZONE) {
24922684 808 if (!check_bytes_and_report(s, page, object, "Redzone",
3b0efdfa 809 endobject, val, s->inuse - s->object_size))
81819f0f 810 return 0;
81819f0f 811 } else {
3b0efdfa 812 if ((s->flags & SLAB_POISON) && s->object_size < s->inuse) {
3adbefee 813 check_bytes_and_report(s, page, p, "Alignment padding",
d0e0ac97
CG
814 endobject, POISON_INUSE,
815 s->inuse - s->object_size);
3adbefee 816 }
81819f0f
CL
817 }
818
819 if (s->flags & SLAB_POISON) {
f7cb1933 820 if (val != SLUB_RED_ACTIVE && (s->flags & __OBJECT_POISON) &&
24922684 821 (!check_bytes_and_report(s, page, p, "Poison", p,
3b0efdfa 822 POISON_FREE, s->object_size - 1) ||
24922684 823 !check_bytes_and_report(s, page, p, "Poison",
3b0efdfa 824 p + s->object_size - 1, POISON_END, 1)))
81819f0f 825 return 0;
81819f0f
CL
826 /*
827 * check_pad_bytes cleans up on its own.
828 */
829 check_pad_bytes(s, page, p);
830 }
831
f7cb1933 832 if (!s->offset && val == SLUB_RED_ACTIVE)
81819f0f
CL
833 /*
834 * Object and freepointer overlap. Cannot check
835 * freepointer while object is allocated.
836 */
837 return 1;
838
839 /* Check free pointer validity */
840 if (!check_valid_pointer(s, page, get_freepointer(s, p))) {
841 object_err(s, page, p, "Freepointer corrupt");
842 /*
9f6c708e 843 * No choice but to zap it and thus lose the remainder
81819f0f 844 * of the free objects in this slab. May cause
672bba3a 845 * another error because the object count is now wrong.
81819f0f 846 */
a973e9dd 847 set_freepointer(s, p, NULL);
81819f0f
CL
848 return 0;
849 }
850 return 1;
851}
852
853static int check_slab(struct kmem_cache *s, struct page *page)
854{
39b26464
CL
855 int maxobj;
856
81819f0f
CL
857 VM_BUG_ON(!irqs_disabled());
858
859 if (!PageSlab(page)) {
24922684 860 slab_err(s, page, "Not a valid slab page");
81819f0f
CL
861 return 0;
862 }
39b26464 863
ab9a0f19 864 maxobj = order_objects(compound_order(page), s->size, s->reserved);
39b26464
CL
865 if (page->objects > maxobj) {
866 slab_err(s, page, "objects %u > max %u",
867 s->name, page->objects, maxobj);
868 return 0;
869 }
870 if (page->inuse > page->objects) {
24922684 871 slab_err(s, page, "inuse %u > max %u",
39b26464 872 s->name, page->inuse, page->objects);
81819f0f
CL
873 return 0;
874 }
875 /* Slab_pad_check fixes things up after itself */
876 slab_pad_check(s, page);
877 return 1;
878}
879
880/*
672bba3a
CL
881 * Determine if a certain object on a page is on the freelist. Must hold the
882 * slab lock to guarantee that the chains are in a consistent state.
81819f0f
CL
883 */
884static int on_freelist(struct kmem_cache *s, struct page *page, void *search)
885{
886 int nr = 0;
881db7fb 887 void *fp;
81819f0f 888 void *object = NULL;
224a88be 889 unsigned long max_objects;
81819f0f 890
881db7fb 891 fp = page->freelist;
39b26464 892 while (fp && nr <= page->objects) {
81819f0f
CL
893 if (fp == search)
894 return 1;
895 if (!check_valid_pointer(s, page, fp)) {
896 if (object) {
897 object_err(s, page, object,
898 "Freechain corrupt");
a973e9dd 899 set_freepointer(s, object, NULL);
81819f0f 900 } else {
24922684 901 slab_err(s, page, "Freepointer corrupt");
a973e9dd 902 page->freelist = NULL;
39b26464 903 page->inuse = page->objects;
24922684 904 slab_fix(s, "Freelist cleared");
81819f0f
CL
905 return 0;
906 }
907 break;
908 }
909 object = fp;
910 fp = get_freepointer(s, object);
911 nr++;
912 }
913
ab9a0f19 914 max_objects = order_objects(compound_order(page), s->size, s->reserved);
210b5c06
CG
915 if (max_objects > MAX_OBJS_PER_PAGE)
916 max_objects = MAX_OBJS_PER_PAGE;
224a88be
CL
917
918 if (page->objects != max_objects) {
919 slab_err(s, page, "Wrong number of objects. Found %d but "
920 "should be %d", page->objects, max_objects);
921 page->objects = max_objects;
922 slab_fix(s, "Number of objects adjusted.");
923 }
39b26464 924 if (page->inuse != page->objects - nr) {
70d71228 925 slab_err(s, page, "Wrong object count. Counter is %d but "
39b26464
CL
926 "counted were %d", page->inuse, page->objects - nr);
927 page->inuse = page->objects - nr;
24922684 928 slab_fix(s, "Object count adjusted.");
81819f0f
CL
929 }
930 return search == NULL;
931}
932
0121c619
CL
933static void trace(struct kmem_cache *s, struct page *page, void *object,
934 int alloc)
3ec09742
CL
935{
936 if (s->flags & SLAB_TRACE) {
937 printk(KERN_INFO "TRACE %s %s 0x%p inuse=%d fp=0x%p\n",
938 s->name,
939 alloc ? "alloc" : "free",
940 object, page->inuse,
941 page->freelist);
942
943 if (!alloc)
d0e0ac97
CG
944 print_section("Object ", (void *)object,
945 s->object_size);
3ec09742
CL
946
947 dump_stack();
948 }
949}
950
c016b0bd
CL
951/*
952 * Hooks for other subsystems that check memory allocations. In a typical
953 * production configuration these hooks all should produce no code at all.
954 */
d56791b3
RB
955static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
956{
957 kmemleak_alloc(ptr, size, 1, flags);
958}
959
960static inline void kfree_hook(const void *x)
961{
962 kmemleak_free(x);
963}
964
c016b0bd
CL
965static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
966{
c1d50836 967 flags &= gfp_allowed_mask;
c016b0bd
CL
968 lockdep_trace_alloc(flags);
969 might_sleep_if(flags & __GFP_WAIT);
970
3b0efdfa 971 return should_failslab(s->object_size, flags, s->flags);
c016b0bd
CL
972}
973
d0e0ac97
CG
974static inline void slab_post_alloc_hook(struct kmem_cache *s,
975 gfp_t flags, void *object)
c016b0bd 976{
c1d50836 977 flags &= gfp_allowed_mask;
b3d41885 978 kmemcheck_slab_alloc(s, flags, object, slab_ksize(s));
3b0efdfa 979 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags, flags);
c016b0bd
CL
980}
981
982static inline void slab_free_hook(struct kmem_cache *s, void *x)
983{
984 kmemleak_free_recursive(x, s->flags);
c016b0bd 985
d3f661d6 986 /*
d1756174 987 * Trouble is that we may no longer disable interrupts in the fast path
d3f661d6
CL
988 * So in order to make the debug calls that expect irqs to be
989 * disabled we need to disable interrupts temporarily.
990 */
991#if defined(CONFIG_KMEMCHECK) || defined(CONFIG_LOCKDEP)
992 {
993 unsigned long flags;
994
995 local_irq_save(flags);
3b0efdfa
CL
996 kmemcheck_slab_free(s, x, s->object_size);
997 debug_check_no_locks_freed(x, s->object_size);
d3f661d6
CL
998 local_irq_restore(flags);
999 }
1000#endif
f9b615de 1001 if (!(s->flags & SLAB_DEBUG_OBJECTS))
3b0efdfa 1002 debug_check_no_obj_freed(x, s->object_size);
c016b0bd
CL
1003}
1004
643b1138 1005/*
672bba3a 1006 * Tracking of fully allocated slabs for debugging purposes.
643b1138 1007 */
5cc6eee8
CL
1008static void add_full(struct kmem_cache *s,
1009 struct kmem_cache_node *n, struct page *page)
643b1138 1010{
5cc6eee8
CL
1011 if (!(s->flags & SLAB_STORE_USER))
1012 return;
1013
255d0884 1014 lockdep_assert_held(&n->list_lock);
643b1138 1015 list_add(&page->lru, &n->full);
643b1138
CL
1016}
1017
c65c1877 1018static void remove_full(struct kmem_cache *s, struct kmem_cache_node *n, struct page *page)
643b1138 1019{
643b1138
CL
1020 if (!(s->flags & SLAB_STORE_USER))
1021 return;
1022
255d0884 1023 lockdep_assert_held(&n->list_lock);
643b1138 1024 list_del(&page->lru);
643b1138
CL
1025}
1026
0f389ec6
CL
1027/* Tracking of the number of slabs for debugging purposes */
1028static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1029{
1030 struct kmem_cache_node *n = get_node(s, node);
1031
1032 return atomic_long_read(&n->nr_slabs);
1033}
1034
26c02cf0
AB
1035static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1036{
1037 return atomic_long_read(&n->nr_slabs);
1038}
1039
205ab99d 1040static inline void inc_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1041{
1042 struct kmem_cache_node *n = get_node(s, node);
1043
1044 /*
1045 * May be called early in order to allocate a slab for the
1046 * kmem_cache_node structure. Solve the chicken-egg
1047 * dilemma by deferring the increment of the count during
1048 * bootstrap (see early_kmem_cache_node_alloc).
1049 */
338b2642 1050 if (likely(n)) {
0f389ec6 1051 atomic_long_inc(&n->nr_slabs);
205ab99d
CL
1052 atomic_long_add(objects, &n->total_objects);
1053 }
0f389ec6 1054}
205ab99d 1055static inline void dec_slabs_node(struct kmem_cache *s, int node, int objects)
0f389ec6
CL
1056{
1057 struct kmem_cache_node *n = get_node(s, node);
1058
1059 atomic_long_dec(&n->nr_slabs);
205ab99d 1060 atomic_long_sub(objects, &n->total_objects);
0f389ec6
CL
1061}
1062
1063/* Object debug checks for alloc/free paths */
3ec09742
CL
1064static void setup_object_debug(struct kmem_cache *s, struct page *page,
1065 void *object)
1066{
1067 if (!(s->flags & (SLAB_STORE_USER|SLAB_RED_ZONE|__OBJECT_POISON)))
1068 return;
1069
f7cb1933 1070 init_object(s, object, SLUB_RED_INACTIVE);
3ec09742
CL
1071 init_tracking(s, object);
1072}
1073
d0e0ac97
CG
1074static noinline int alloc_debug_processing(struct kmem_cache *s,
1075 struct page *page,
ce71e27c 1076 void *object, unsigned long addr)
81819f0f
CL
1077{
1078 if (!check_slab(s, page))
1079 goto bad;
1080
81819f0f
CL
1081 if (!check_valid_pointer(s, page, object)) {
1082 object_err(s, page, object, "Freelist Pointer check fails");
70d71228 1083 goto bad;
81819f0f
CL
1084 }
1085
f7cb1933 1086 if (!check_object(s, page, object, SLUB_RED_INACTIVE))
81819f0f 1087 goto bad;
81819f0f 1088
3ec09742
CL
1089 /* Success perform special debug activities for allocs */
1090 if (s->flags & SLAB_STORE_USER)
1091 set_track(s, object, TRACK_ALLOC, addr);
1092 trace(s, page, object, 1);
f7cb1933 1093 init_object(s, object, SLUB_RED_ACTIVE);
81819f0f 1094 return 1;
3ec09742 1095
81819f0f
CL
1096bad:
1097 if (PageSlab(page)) {
1098 /*
1099 * If this is a slab page then lets do the best we can
1100 * to avoid issues in the future. Marking all objects
672bba3a 1101 * as used avoids touching the remaining objects.
81819f0f 1102 */
24922684 1103 slab_fix(s, "Marking all objects used");
39b26464 1104 page->inuse = page->objects;
a973e9dd 1105 page->freelist = NULL;
81819f0f
CL
1106 }
1107 return 0;
1108}
1109
19c7ff9e
CL
1110static noinline struct kmem_cache_node *free_debug_processing(
1111 struct kmem_cache *s, struct page *page, void *object,
1112 unsigned long addr, unsigned long *flags)
81819f0f 1113{
19c7ff9e 1114 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
5c2e4bbb 1115
19c7ff9e 1116 spin_lock_irqsave(&n->list_lock, *flags);
881db7fb
CL
1117 slab_lock(page);
1118
81819f0f
CL
1119 if (!check_slab(s, page))
1120 goto fail;
1121
1122 if (!check_valid_pointer(s, page, object)) {
70d71228 1123 slab_err(s, page, "Invalid object pointer 0x%p", object);
81819f0f
CL
1124 goto fail;
1125 }
1126
1127 if (on_freelist(s, page, object)) {
24922684 1128 object_err(s, page, object, "Object already free");
81819f0f
CL
1129 goto fail;
1130 }
1131
f7cb1933 1132 if (!check_object(s, page, object, SLUB_RED_ACTIVE))
5c2e4bbb 1133 goto out;
81819f0f 1134
1b4f59e3 1135 if (unlikely(s != page->slab_cache)) {
3adbefee 1136 if (!PageSlab(page)) {
70d71228
CL
1137 slab_err(s, page, "Attempt to free object(0x%p) "
1138 "outside of slab", object);
1b4f59e3 1139 } else if (!page->slab_cache) {
81819f0f 1140 printk(KERN_ERR
70d71228 1141 "SLUB <none>: no slab for object 0x%p.\n",
81819f0f 1142 object);
70d71228 1143 dump_stack();
06428780 1144 } else
24922684
CL
1145 object_err(s, page, object,
1146 "page slab pointer corrupt.");
81819f0f
CL
1147 goto fail;
1148 }
3ec09742 1149
3ec09742
CL
1150 if (s->flags & SLAB_STORE_USER)
1151 set_track(s, object, TRACK_FREE, addr);
1152 trace(s, page, object, 0);
f7cb1933 1153 init_object(s, object, SLUB_RED_INACTIVE);
5c2e4bbb 1154out:
881db7fb 1155 slab_unlock(page);
19c7ff9e
CL
1156 /*
1157 * Keep node_lock to preserve integrity
1158 * until the object is actually freed
1159 */
1160 return n;
3ec09742 1161
81819f0f 1162fail:
19c7ff9e
CL
1163 slab_unlock(page);
1164 spin_unlock_irqrestore(&n->list_lock, *flags);
24922684 1165 slab_fix(s, "Object at 0x%p not freed", object);
19c7ff9e 1166 return NULL;
81819f0f
CL
1167}
1168
41ecc55b
CL
1169static int __init setup_slub_debug(char *str)
1170{
f0630fff
CL
1171 slub_debug = DEBUG_DEFAULT_FLAGS;
1172 if (*str++ != '=' || !*str)
1173 /*
1174 * No options specified. Switch on full debugging.
1175 */
1176 goto out;
1177
1178 if (*str == ',')
1179 /*
1180 * No options but restriction on slabs. This means full
1181 * debugging for slabs matching a pattern.
1182 */
1183 goto check_slabs;
1184
fa5ec8a1
DR
1185 if (tolower(*str) == 'o') {
1186 /*
1187 * Avoid enabling debugging on caches if its minimum order
1188 * would increase as a result.
1189 */
1190 disable_higher_order_debug = 1;
1191 goto out;
1192 }
1193
f0630fff
CL
1194 slub_debug = 0;
1195 if (*str == '-')
1196 /*
1197 * Switch off all debugging measures.
1198 */
1199 goto out;
1200
1201 /*
1202 * Determine which debug features should be switched on
1203 */
06428780 1204 for (; *str && *str != ','; str++) {
f0630fff
CL
1205 switch (tolower(*str)) {
1206 case 'f':
1207 slub_debug |= SLAB_DEBUG_FREE;
1208 break;
1209 case 'z':
1210 slub_debug |= SLAB_RED_ZONE;
1211 break;
1212 case 'p':
1213 slub_debug |= SLAB_POISON;
1214 break;
1215 case 'u':
1216 slub_debug |= SLAB_STORE_USER;
1217 break;
1218 case 't':
1219 slub_debug |= SLAB_TRACE;
1220 break;
4c13dd3b
DM
1221 case 'a':
1222 slub_debug |= SLAB_FAILSLAB;
1223 break;
f0630fff
CL
1224 default:
1225 printk(KERN_ERR "slub_debug option '%c' "
06428780 1226 "unknown. skipped\n", *str);
f0630fff 1227 }
41ecc55b
CL
1228 }
1229
f0630fff 1230check_slabs:
41ecc55b
CL
1231 if (*str == ',')
1232 slub_debug_slabs = str + 1;
f0630fff 1233out:
41ecc55b
CL
1234 return 1;
1235}
1236
1237__setup("slub_debug", setup_slub_debug);
1238
3b0efdfa 1239static unsigned long kmem_cache_flags(unsigned long object_size,
ba0268a8 1240 unsigned long flags, const char *name,
51cc5068 1241 void (*ctor)(void *))
41ecc55b
CL
1242{
1243 /*
e153362a 1244 * Enable debugging if selected on the kernel commandline.
41ecc55b 1245 */
c6f58d9b
CL
1246 if (slub_debug && (!slub_debug_slabs || (name &&
1247 !strncmp(slub_debug_slabs, name, strlen(slub_debug_slabs)))))
3de47213 1248 flags |= slub_debug;
ba0268a8
CL
1249
1250 return flags;
41ecc55b
CL
1251}
1252#else
3ec09742
CL
1253static inline void setup_object_debug(struct kmem_cache *s,
1254 struct page *page, void *object) {}
41ecc55b 1255
3ec09742 1256static inline int alloc_debug_processing(struct kmem_cache *s,
ce71e27c 1257 struct page *page, void *object, unsigned long addr) { return 0; }
41ecc55b 1258
19c7ff9e
CL
1259static inline struct kmem_cache_node *free_debug_processing(
1260 struct kmem_cache *s, struct page *page, void *object,
1261 unsigned long addr, unsigned long *flags) { return NULL; }
41ecc55b 1262
41ecc55b
CL
1263static inline int slab_pad_check(struct kmem_cache *s, struct page *page)
1264 { return 1; }
1265static inline int check_object(struct kmem_cache *s, struct page *page,
f7cb1933 1266 void *object, u8 val) { return 1; }
5cc6eee8
CL
1267static inline void add_full(struct kmem_cache *s, struct kmem_cache_node *n,
1268 struct page *page) {}
c65c1877
PZ
1269static inline void remove_full(struct kmem_cache *s, struct kmem_cache_node *n,
1270 struct page *page) {}
3b0efdfa 1271static inline unsigned long kmem_cache_flags(unsigned long object_size,
ba0268a8 1272 unsigned long flags, const char *name,
51cc5068 1273 void (*ctor)(void *))
ba0268a8
CL
1274{
1275 return flags;
1276}
41ecc55b 1277#define slub_debug 0
0f389ec6 1278
fdaa45e9
IM
1279#define disable_higher_order_debug 0
1280
0f389ec6
CL
1281static inline unsigned long slabs_node(struct kmem_cache *s, int node)
1282 { return 0; }
26c02cf0
AB
1283static inline unsigned long node_nr_slabs(struct kmem_cache_node *n)
1284 { return 0; }
205ab99d
CL
1285static inline void inc_slabs_node(struct kmem_cache *s, int node,
1286 int objects) {}
1287static inline void dec_slabs_node(struct kmem_cache *s, int node,
1288 int objects) {}
7d550c56 1289
d56791b3
RB
1290static inline void kmalloc_large_node_hook(void *ptr, size_t size, gfp_t flags)
1291{
1292 kmemleak_alloc(ptr, size, 1, flags);
1293}
1294
1295static inline void kfree_hook(const void *x)
1296{
1297 kmemleak_free(x);
1298}
1299
7d550c56
CL
1300static inline int slab_pre_alloc_hook(struct kmem_cache *s, gfp_t flags)
1301 { return 0; }
1302
1303static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
d56791b3
RB
1304 void *object)
1305{
1306 kmemleak_alloc_recursive(object, s->object_size, 1, s->flags,
1307 flags & gfp_allowed_mask);
1308}
7d550c56 1309
d56791b3
RB
1310static inline void slab_free_hook(struct kmem_cache *s, void *x)
1311{
1312 kmemleak_free_recursive(x, s->flags);
1313}
7d550c56 1314
ab4d5ed5 1315#endif /* CONFIG_SLUB_DEBUG */
205ab99d 1316
81819f0f
CL
1317/*
1318 * Slab allocation and freeing
1319 */
65c3376a
CL
1320static inline struct page *alloc_slab_page(gfp_t flags, int node,
1321 struct kmem_cache_order_objects oo)
1322{
1323 int order = oo_order(oo);
1324
b1eeab67
VN
1325 flags |= __GFP_NOTRACK;
1326
2154a336 1327 if (node == NUMA_NO_NODE)
65c3376a
CL
1328 return alloc_pages(flags, order);
1329 else
6b65aaf3 1330 return alloc_pages_exact_node(node, flags, order);
65c3376a
CL
1331}
1332
81819f0f
CL
1333static struct page *allocate_slab(struct kmem_cache *s, gfp_t flags, int node)
1334{
06428780 1335 struct page *page;
834f3d11 1336 struct kmem_cache_order_objects oo = s->oo;
ba52270d 1337 gfp_t alloc_gfp;
81819f0f 1338
7e0528da
CL
1339 flags &= gfp_allowed_mask;
1340
1341 if (flags & __GFP_WAIT)
1342 local_irq_enable();
1343
b7a49f0d 1344 flags |= s->allocflags;
e12ba74d 1345
ba52270d
PE
1346 /*
1347 * Let the initial higher-order allocation fail under memory pressure
1348 * so we fall-back to the minimum order allocation.
1349 */
1350 alloc_gfp = (flags | __GFP_NOWARN | __GFP_NORETRY) & ~__GFP_NOFAIL;
1351
1352 page = alloc_slab_page(alloc_gfp, node, oo);
65c3376a
CL
1353 if (unlikely(!page)) {
1354 oo = s->min;
80c3a998 1355 alloc_gfp = flags;
65c3376a
CL
1356 /*
1357 * Allocation may have failed due to fragmentation.
1358 * Try a lower order alloc if possible
1359 */
80c3a998 1360 page = alloc_slab_page(alloc_gfp, node, oo);
81819f0f 1361
7e0528da
CL
1362 if (page)
1363 stat(s, ORDER_FALLBACK);
65c3376a 1364 }
5a896d9e 1365
737b719e 1366 if (kmemcheck_enabled && page
5086c389 1367 && !(s->flags & (SLAB_NOTRACK | DEBUG_DEFAULT_FLAGS))) {
b1eeab67
VN
1368 int pages = 1 << oo_order(oo);
1369
80c3a998 1370 kmemcheck_alloc_shadow(page, oo_order(oo), alloc_gfp, node);
b1eeab67
VN
1371
1372 /*
1373 * Objects from caches that have a constructor don't get
1374 * cleared when they're allocated, so we need to do it here.
1375 */
1376 if (s->ctor)
1377 kmemcheck_mark_uninitialized_pages(page, pages);
1378 else
1379 kmemcheck_mark_unallocated_pages(page, pages);
5a896d9e
VN
1380 }
1381
737b719e
DR
1382 if (flags & __GFP_WAIT)
1383 local_irq_disable();
1384 if (!page)
1385 return NULL;
1386
834f3d11 1387 page->objects = oo_objects(oo);
81819f0f
CL
1388 mod_zone_page_state(page_zone(page),
1389 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1390 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
65c3376a 1391 1 << oo_order(oo));
81819f0f
CL
1392
1393 return page;
1394}
1395
1396static void setup_object(struct kmem_cache *s, struct page *page,
1397 void *object)
1398{
3ec09742 1399 setup_object_debug(s, page, object);
4f104934 1400 if (unlikely(s->ctor))
51cc5068 1401 s->ctor(object);
81819f0f
CL
1402}
1403
1404static struct page *new_slab(struct kmem_cache *s, gfp_t flags, int node)
1405{
1406 struct page *page;
81819f0f 1407 void *start;
81819f0f
CL
1408 void *last;
1409 void *p;
1f458cbf 1410 int order;
81819f0f 1411
6cb06229 1412 BUG_ON(flags & GFP_SLAB_BUG_MASK);
81819f0f 1413
6cb06229
CL
1414 page = allocate_slab(s,
1415 flags & (GFP_RECLAIM_MASK | GFP_CONSTRAINT_MASK), node);
81819f0f
CL
1416 if (!page)
1417 goto out;
1418
1f458cbf 1419 order = compound_order(page);
205ab99d 1420 inc_slabs_node(s, page_to_nid(page), page->objects);
1f458cbf 1421 memcg_bind_pages(s, order);
1b4f59e3 1422 page->slab_cache = s;
c03f94cc 1423 __SetPageSlab(page);
072bb0aa
MG
1424 if (page->pfmemalloc)
1425 SetPageSlabPfmemalloc(page);
81819f0f
CL
1426
1427 start = page_address(page);
81819f0f
CL
1428
1429 if (unlikely(s->flags & SLAB_POISON))
1f458cbf 1430 memset(start, POISON_INUSE, PAGE_SIZE << order);
81819f0f
CL
1431
1432 last = start;
224a88be 1433 for_each_object(p, s, start, page->objects) {
81819f0f
CL
1434 setup_object(s, page, last);
1435 set_freepointer(s, last, p);
1436 last = p;
1437 }
1438 setup_object(s, page, last);
a973e9dd 1439 set_freepointer(s, last, NULL);
81819f0f
CL
1440
1441 page->freelist = start;
e6e82ea1 1442 page->inuse = page->objects;
8cb0a506 1443 page->frozen = 1;
81819f0f 1444out:
81819f0f
CL
1445 return page;
1446}
1447
1448static void __free_slab(struct kmem_cache *s, struct page *page)
1449{
834f3d11
CL
1450 int order = compound_order(page);
1451 int pages = 1 << order;
81819f0f 1452
af537b0a 1453 if (kmem_cache_debug(s)) {
81819f0f
CL
1454 void *p;
1455
1456 slab_pad_check(s, page);
224a88be
CL
1457 for_each_object(p, s, page_address(page),
1458 page->objects)
f7cb1933 1459 check_object(s, page, p, SLUB_RED_INACTIVE);
81819f0f
CL
1460 }
1461
b1eeab67 1462 kmemcheck_free_shadow(page, compound_order(page));
5a896d9e 1463
81819f0f
CL
1464 mod_zone_page_state(page_zone(page),
1465 (s->flags & SLAB_RECLAIM_ACCOUNT) ?
1466 NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE,
06428780 1467 -pages);
81819f0f 1468
072bb0aa 1469 __ClearPageSlabPfmemalloc(page);
49bd5221 1470 __ClearPageSlab(page);
1f458cbf
GC
1471
1472 memcg_release_pages(s, order);
22b751c3 1473 page_mapcount_reset(page);
1eb5ac64
NP
1474 if (current->reclaim_state)
1475 current->reclaim_state->reclaimed_slab += pages;
d79923fa 1476 __free_memcg_kmem_pages(page, order);
81819f0f
CL
1477}
1478
da9a638c
LJ
1479#define need_reserve_slab_rcu \
1480 (sizeof(((struct page *)NULL)->lru) < sizeof(struct rcu_head))
1481
81819f0f
CL
1482static void rcu_free_slab(struct rcu_head *h)
1483{
1484 struct page *page;
1485
da9a638c
LJ
1486 if (need_reserve_slab_rcu)
1487 page = virt_to_head_page(h);
1488 else
1489 page = container_of((struct list_head *)h, struct page, lru);
1490
1b4f59e3 1491 __free_slab(page->slab_cache, page);
81819f0f
CL
1492}
1493
1494static void free_slab(struct kmem_cache *s, struct page *page)
1495{
1496 if (unlikely(s->flags & SLAB_DESTROY_BY_RCU)) {
da9a638c
LJ
1497 struct rcu_head *head;
1498
1499 if (need_reserve_slab_rcu) {
1500 int order = compound_order(page);
1501 int offset = (PAGE_SIZE << order) - s->reserved;
1502
1503 VM_BUG_ON(s->reserved != sizeof(*head));
1504 head = page_address(page) + offset;
1505 } else {
1506 /*
1507 * RCU free overloads the RCU head over the LRU
1508 */
1509 head = (void *)&page->lru;
1510 }
81819f0f
CL
1511
1512 call_rcu(head, rcu_free_slab);
1513 } else
1514 __free_slab(s, page);
1515}
1516
1517static void discard_slab(struct kmem_cache *s, struct page *page)
1518{
205ab99d 1519 dec_slabs_node(s, page_to_nid(page), page->objects);
81819f0f
CL
1520 free_slab(s, page);
1521}
1522
1523/*
5cc6eee8 1524 * Management of partially allocated slabs.
81819f0f 1525 */
1e4dd946
SR
1526static inline void
1527__add_partial(struct kmem_cache_node *n, struct page *page, int tail)
81819f0f 1528{
e95eed57 1529 n->nr_partial++;
136333d1 1530 if (tail == DEACTIVATE_TO_TAIL)
7c2e132c
CL
1531 list_add_tail(&page->lru, &n->partial);
1532 else
1533 list_add(&page->lru, &n->partial);
81819f0f
CL
1534}
1535
1e4dd946
SR
1536static inline void add_partial(struct kmem_cache_node *n,
1537 struct page *page, int tail)
62e346a8 1538{
c65c1877 1539 lockdep_assert_held(&n->list_lock);
1e4dd946
SR
1540 __add_partial(n, page, tail);
1541}
c65c1877 1542
1e4dd946
SR
1543static inline void
1544__remove_partial(struct kmem_cache_node *n, struct page *page)
1545{
62e346a8
CL
1546 list_del(&page->lru);
1547 n->nr_partial--;
1548}
1549
1e4dd946
SR
1550static inline void remove_partial(struct kmem_cache_node *n,
1551 struct page *page)
1552{
1553 lockdep_assert_held(&n->list_lock);
1554 __remove_partial(n, page);
1555}
1556
81819f0f 1557/*
7ced3719
CL
1558 * Remove slab from the partial list, freeze it and
1559 * return the pointer to the freelist.
81819f0f 1560 *
497b66f2 1561 * Returns a list of objects or NULL if it fails.
81819f0f 1562 */
497b66f2 1563static inline void *acquire_slab(struct kmem_cache *s,
acd19fd1 1564 struct kmem_cache_node *n, struct page *page,
633b0764 1565 int mode, int *objects)
81819f0f 1566{
2cfb7455
CL
1567 void *freelist;
1568 unsigned long counters;
1569 struct page new;
1570
c65c1877
PZ
1571 lockdep_assert_held(&n->list_lock);
1572
2cfb7455
CL
1573 /*
1574 * Zap the freelist and set the frozen bit.
1575 * The old freelist is the list of objects for the
1576 * per cpu allocation list.
1577 */
7ced3719
CL
1578 freelist = page->freelist;
1579 counters = page->counters;
1580 new.counters = counters;
633b0764 1581 *objects = new.objects - new.inuse;
23910c50 1582 if (mode) {
7ced3719 1583 new.inuse = page->objects;
23910c50
PE
1584 new.freelist = NULL;
1585 } else {
1586 new.freelist = freelist;
1587 }
2cfb7455 1588
a0132ac0 1589 VM_BUG_ON(new.frozen);
7ced3719 1590 new.frozen = 1;
2cfb7455 1591
7ced3719 1592 if (!__cmpxchg_double_slab(s, page,
2cfb7455 1593 freelist, counters,
02d7633f 1594 new.freelist, new.counters,
7ced3719 1595 "acquire_slab"))
7ced3719 1596 return NULL;
2cfb7455
CL
1597
1598 remove_partial(n, page);
7ced3719 1599 WARN_ON(!freelist);
49e22585 1600 return freelist;
81819f0f
CL
1601}
1602
633b0764 1603static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain);
8ba00bb6 1604static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags);
49e22585 1605
81819f0f 1606/*
672bba3a 1607 * Try to allocate a partial slab from a specific node.
81819f0f 1608 */
8ba00bb6
JK
1609static void *get_partial_node(struct kmem_cache *s, struct kmem_cache_node *n,
1610 struct kmem_cache_cpu *c, gfp_t flags)
81819f0f 1611{
49e22585
CL
1612 struct page *page, *page2;
1613 void *object = NULL;
633b0764
JK
1614 int available = 0;
1615 int objects;
81819f0f
CL
1616
1617 /*
1618 * Racy check. If we mistakenly see no partial slabs then we
1619 * just allocate an empty slab. If we mistakenly try to get a
672bba3a
CL
1620 * partial slab and there is none available then get_partials()
1621 * will return NULL.
81819f0f
CL
1622 */
1623 if (!n || !n->nr_partial)
1624 return NULL;
1625
1626 spin_lock(&n->list_lock);
49e22585 1627 list_for_each_entry_safe(page, page2, &n->partial, lru) {
8ba00bb6 1628 void *t;
49e22585 1629
8ba00bb6
JK
1630 if (!pfmemalloc_match(page, flags))
1631 continue;
1632
633b0764 1633 t = acquire_slab(s, n, page, object == NULL, &objects);
49e22585
CL
1634 if (!t)
1635 break;
1636
633b0764 1637 available += objects;
12d79634 1638 if (!object) {
49e22585 1639 c->page = page;
49e22585 1640 stat(s, ALLOC_FROM_PARTIAL);
49e22585 1641 object = t;
49e22585 1642 } else {
633b0764 1643 put_cpu_partial(s, page, 0);
8028dcea 1644 stat(s, CPU_PARTIAL_NODE);
49e22585 1645 }
345c905d
JK
1646 if (!kmem_cache_has_cpu_partial(s)
1647 || available > s->cpu_partial / 2)
49e22585
CL
1648 break;
1649
497b66f2 1650 }
81819f0f 1651 spin_unlock(&n->list_lock);
497b66f2 1652 return object;
81819f0f
CL
1653}
1654
1655/*
672bba3a 1656 * Get a page from somewhere. Search in increasing NUMA distances.
81819f0f 1657 */
de3ec035 1658static void *get_any_partial(struct kmem_cache *s, gfp_t flags,
acd19fd1 1659 struct kmem_cache_cpu *c)
81819f0f
CL
1660{
1661#ifdef CONFIG_NUMA
1662 struct zonelist *zonelist;
dd1a239f 1663 struct zoneref *z;
54a6eb5c
MG
1664 struct zone *zone;
1665 enum zone_type high_zoneidx = gfp_zone(flags);
497b66f2 1666 void *object;
cc9a6c87 1667 unsigned int cpuset_mems_cookie;
81819f0f
CL
1668
1669 /*
672bba3a
CL
1670 * The defrag ratio allows a configuration of the tradeoffs between
1671 * inter node defragmentation and node local allocations. A lower
1672 * defrag_ratio increases the tendency to do local allocations
1673 * instead of attempting to obtain partial slabs from other nodes.
81819f0f 1674 *
672bba3a
CL
1675 * If the defrag_ratio is set to 0 then kmalloc() always
1676 * returns node local objects. If the ratio is higher then kmalloc()
1677 * may return off node objects because partial slabs are obtained
1678 * from other nodes and filled up.
81819f0f 1679 *
6446faa2 1680 * If /sys/kernel/slab/xx/defrag_ratio is set to 100 (which makes
672bba3a
CL
1681 * defrag_ratio = 1000) then every (well almost) allocation will
1682 * first attempt to defrag slab caches on other nodes. This means
1683 * scanning over all nodes to look for partial slabs which may be
1684 * expensive if we do it every time we are trying to find a slab
1685 * with available objects.
81819f0f 1686 */
9824601e
CL
1687 if (!s->remote_node_defrag_ratio ||
1688 get_cycles() % 1024 > s->remote_node_defrag_ratio)
81819f0f
CL
1689 return NULL;
1690
cc9a6c87 1691 do {
d26914d1 1692 cpuset_mems_cookie = read_mems_allowed_begin();
2a389610 1693 zonelist = node_zonelist(mempolicy_slab_node(), flags);
cc9a6c87
MG
1694 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
1695 struct kmem_cache_node *n;
1696
1697 n = get_node(s, zone_to_nid(zone));
1698
1699 if (n && cpuset_zone_allowed_hardwall(zone, flags) &&
1700 n->nr_partial > s->min_partial) {
8ba00bb6 1701 object = get_partial_node(s, n, c, flags);
cc9a6c87
MG
1702 if (object) {
1703 /*
d26914d1
MG
1704 * Don't check read_mems_allowed_retry()
1705 * here - if mems_allowed was updated in
1706 * parallel, that was a harmless race
1707 * between allocation and the cpuset
1708 * update
cc9a6c87 1709 */
cc9a6c87
MG
1710 return object;
1711 }
c0ff7453 1712 }
81819f0f 1713 }
d26914d1 1714 } while (read_mems_allowed_retry(cpuset_mems_cookie));
81819f0f
CL
1715#endif
1716 return NULL;
1717}
1718
1719/*
1720 * Get a partial page, lock it and return it.
1721 */
497b66f2 1722static void *get_partial(struct kmem_cache *s, gfp_t flags, int node,
acd19fd1 1723 struct kmem_cache_cpu *c)
81819f0f 1724{
497b66f2 1725 void *object;
2154a336 1726 int searchnode = (node == NUMA_NO_NODE) ? numa_node_id() : node;
81819f0f 1727
8ba00bb6 1728 object = get_partial_node(s, get_node(s, searchnode), c, flags);
497b66f2
CL
1729 if (object || node != NUMA_NO_NODE)
1730 return object;
81819f0f 1731
acd19fd1 1732 return get_any_partial(s, flags, c);
81819f0f
CL
1733}
1734
8a5ec0ba
CL
1735#ifdef CONFIG_PREEMPT
1736/*
1737 * Calculate the next globally unique transaction for disambiguiation
1738 * during cmpxchg. The transactions start with the cpu number and are then
1739 * incremented by CONFIG_NR_CPUS.
1740 */
1741#define TID_STEP roundup_pow_of_two(CONFIG_NR_CPUS)
1742#else
1743/*
1744 * No preemption supported therefore also no need to check for
1745 * different cpus.
1746 */
1747#define TID_STEP 1
1748#endif
1749
1750static inline unsigned long next_tid(unsigned long tid)
1751{
1752 return tid + TID_STEP;
1753}
1754
1755static inline unsigned int tid_to_cpu(unsigned long tid)
1756{
1757 return tid % TID_STEP;
1758}
1759
1760static inline unsigned long tid_to_event(unsigned long tid)
1761{
1762 return tid / TID_STEP;
1763}
1764
1765static inline unsigned int init_tid(int cpu)
1766{
1767 return cpu;
1768}
1769
1770static inline void note_cmpxchg_failure(const char *n,
1771 const struct kmem_cache *s, unsigned long tid)
1772{
1773#ifdef SLUB_DEBUG_CMPXCHG
1774 unsigned long actual_tid = __this_cpu_read(s->cpu_slab->tid);
1775
1776 printk(KERN_INFO "%s %s: cmpxchg redo ", n, s->name);
1777
1778#ifdef CONFIG_PREEMPT
1779 if (tid_to_cpu(tid) != tid_to_cpu(actual_tid))
1780 printk("due to cpu change %d -> %d\n",
1781 tid_to_cpu(tid), tid_to_cpu(actual_tid));
1782 else
1783#endif
1784 if (tid_to_event(tid) != tid_to_event(actual_tid))
1785 printk("due to cpu running other code. Event %ld->%ld\n",
1786 tid_to_event(tid), tid_to_event(actual_tid));
1787 else
1788 printk("for unknown reason: actual=%lx was=%lx target=%lx\n",
1789 actual_tid, tid, next_tid(tid));
1790#endif
4fdccdfb 1791 stat(s, CMPXCHG_DOUBLE_CPU_FAIL);
8a5ec0ba
CL
1792}
1793
788e1aad 1794static void init_kmem_cache_cpus(struct kmem_cache *s)
8a5ec0ba 1795{
8a5ec0ba
CL
1796 int cpu;
1797
1798 for_each_possible_cpu(cpu)
1799 per_cpu_ptr(s->cpu_slab, cpu)->tid = init_tid(cpu);
8a5ec0ba 1800}
2cfb7455 1801
81819f0f
CL
1802/*
1803 * Remove the cpu slab
1804 */
d0e0ac97
CG
1805static void deactivate_slab(struct kmem_cache *s, struct page *page,
1806 void *freelist)
81819f0f 1807{
2cfb7455 1808 enum slab_modes { M_NONE, M_PARTIAL, M_FULL, M_FREE };
2cfb7455
CL
1809 struct kmem_cache_node *n = get_node(s, page_to_nid(page));
1810 int lock = 0;
1811 enum slab_modes l = M_NONE, m = M_NONE;
2cfb7455 1812 void *nextfree;
136333d1 1813 int tail = DEACTIVATE_TO_HEAD;
2cfb7455
CL
1814 struct page new;
1815 struct page old;
1816
1817 if (page->freelist) {
84e554e6 1818 stat(s, DEACTIVATE_REMOTE_FREES);
136333d1 1819 tail = DEACTIVATE_TO_TAIL;
2cfb7455
CL
1820 }
1821
894b8788 1822 /*
2cfb7455
CL
1823 * Stage one: Free all available per cpu objects back
1824 * to the page freelist while it is still frozen. Leave the
1825 * last one.
1826 *
1827 * There is no need to take the list->lock because the page
1828 * is still frozen.
1829 */
1830 while (freelist && (nextfree = get_freepointer(s, freelist))) {
1831 void *prior;
1832 unsigned long counters;
1833
1834 do {
1835 prior = page->freelist;
1836 counters = page->counters;
1837 set_freepointer(s, freelist, prior);
1838 new.counters = counters;
1839 new.inuse--;
a0132ac0 1840 VM_BUG_ON(!new.frozen);
2cfb7455 1841
1d07171c 1842 } while (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
1843 prior, counters,
1844 freelist, new.counters,
1845 "drain percpu freelist"));
1846
1847 freelist = nextfree;
1848 }
1849
894b8788 1850 /*
2cfb7455
CL
1851 * Stage two: Ensure that the page is unfrozen while the
1852 * list presence reflects the actual number of objects
1853 * during unfreeze.
1854 *
1855 * We setup the list membership and then perform a cmpxchg
1856 * with the count. If there is a mismatch then the page
1857 * is not unfrozen but the page is on the wrong list.
1858 *
1859 * Then we restart the process which may have to remove
1860 * the page from the list that we just put it on again
1861 * because the number of objects in the slab may have
1862 * changed.
894b8788 1863 */
2cfb7455 1864redo:
894b8788 1865
2cfb7455
CL
1866 old.freelist = page->freelist;
1867 old.counters = page->counters;
a0132ac0 1868 VM_BUG_ON(!old.frozen);
7c2e132c 1869
2cfb7455
CL
1870 /* Determine target state of the slab */
1871 new.counters = old.counters;
1872 if (freelist) {
1873 new.inuse--;
1874 set_freepointer(s, freelist, old.freelist);
1875 new.freelist = freelist;
1876 } else
1877 new.freelist = old.freelist;
1878
1879 new.frozen = 0;
1880
81107188 1881 if (!new.inuse && n->nr_partial > s->min_partial)
2cfb7455
CL
1882 m = M_FREE;
1883 else if (new.freelist) {
1884 m = M_PARTIAL;
1885 if (!lock) {
1886 lock = 1;
1887 /*
1888 * Taking the spinlock removes the possiblity
1889 * that acquire_slab() will see a slab page that
1890 * is frozen
1891 */
1892 spin_lock(&n->list_lock);
1893 }
1894 } else {
1895 m = M_FULL;
1896 if (kmem_cache_debug(s) && !lock) {
1897 lock = 1;
1898 /*
1899 * This also ensures that the scanning of full
1900 * slabs from diagnostic functions will not see
1901 * any frozen slabs.
1902 */
1903 spin_lock(&n->list_lock);
1904 }
1905 }
1906
1907 if (l != m) {
1908
1909 if (l == M_PARTIAL)
1910
1911 remove_partial(n, page);
1912
1913 else if (l == M_FULL)
894b8788 1914
c65c1877 1915 remove_full(s, n, page);
2cfb7455
CL
1916
1917 if (m == M_PARTIAL) {
1918
1919 add_partial(n, page, tail);
136333d1 1920 stat(s, tail);
2cfb7455
CL
1921
1922 } else if (m == M_FULL) {
894b8788 1923
2cfb7455
CL
1924 stat(s, DEACTIVATE_FULL);
1925 add_full(s, n, page);
1926
1927 }
1928 }
1929
1930 l = m;
1d07171c 1931 if (!__cmpxchg_double_slab(s, page,
2cfb7455
CL
1932 old.freelist, old.counters,
1933 new.freelist, new.counters,
1934 "unfreezing slab"))
1935 goto redo;
1936
2cfb7455
CL
1937 if (lock)
1938 spin_unlock(&n->list_lock);
1939
1940 if (m == M_FREE) {
1941 stat(s, DEACTIVATE_EMPTY);
1942 discard_slab(s, page);
1943 stat(s, FREE_SLAB);
894b8788 1944 }
81819f0f
CL
1945}
1946
d24ac77f
JK
1947/*
1948 * Unfreeze all the cpu partial slabs.
1949 *
59a09917
CL
1950 * This function must be called with interrupts disabled
1951 * for the cpu using c (or some other guarantee must be there
1952 * to guarantee no concurrent accesses).
d24ac77f 1953 */
59a09917
CL
1954static void unfreeze_partials(struct kmem_cache *s,
1955 struct kmem_cache_cpu *c)
49e22585 1956{
345c905d 1957#ifdef CONFIG_SLUB_CPU_PARTIAL
43d77867 1958 struct kmem_cache_node *n = NULL, *n2 = NULL;
9ada1934 1959 struct page *page, *discard_page = NULL;
49e22585
CL
1960
1961 while ((page = c->partial)) {
49e22585
CL
1962 struct page new;
1963 struct page old;
1964
1965 c->partial = page->next;
43d77867
JK
1966
1967 n2 = get_node(s, page_to_nid(page));
1968 if (n != n2) {
1969 if (n)
1970 spin_unlock(&n->list_lock);
1971
1972 n = n2;
1973 spin_lock(&n->list_lock);
1974 }
49e22585
CL
1975
1976 do {
1977
1978 old.freelist = page->freelist;
1979 old.counters = page->counters;
a0132ac0 1980 VM_BUG_ON(!old.frozen);
49e22585
CL
1981
1982 new.counters = old.counters;
1983 new.freelist = old.freelist;
1984
1985 new.frozen = 0;
1986
d24ac77f 1987 } while (!__cmpxchg_double_slab(s, page,
49e22585
CL
1988 old.freelist, old.counters,
1989 new.freelist, new.counters,
1990 "unfreezing slab"));
1991
43d77867 1992 if (unlikely(!new.inuse && n->nr_partial > s->min_partial)) {
9ada1934
SL
1993 page->next = discard_page;
1994 discard_page = page;
43d77867
JK
1995 } else {
1996 add_partial(n, page, DEACTIVATE_TO_TAIL);
1997 stat(s, FREE_ADD_PARTIAL);
49e22585
CL
1998 }
1999 }
2000
2001 if (n)
2002 spin_unlock(&n->list_lock);
9ada1934
SL
2003
2004 while (discard_page) {
2005 page = discard_page;
2006 discard_page = discard_page->next;
2007
2008 stat(s, DEACTIVATE_EMPTY);
2009 discard_slab(s, page);
2010 stat(s, FREE_SLAB);
2011 }
345c905d 2012#endif
49e22585
CL
2013}
2014
2015/*
2016 * Put a page that was just frozen (in __slab_free) into a partial page
2017 * slot if available. This is done without interrupts disabled and without
2018 * preemption disabled. The cmpxchg is racy and may put the partial page
2019 * onto a random cpus partial slot.
2020 *
2021 * If we did not find a slot then simply move all the partials to the
2022 * per node partial list.
2023 */
633b0764 2024static void put_cpu_partial(struct kmem_cache *s, struct page *page, int drain)
49e22585 2025{
345c905d 2026#ifdef CONFIG_SLUB_CPU_PARTIAL
49e22585
CL
2027 struct page *oldpage;
2028 int pages;
2029 int pobjects;
2030
2031 do {
2032 pages = 0;
2033 pobjects = 0;
2034 oldpage = this_cpu_read(s->cpu_slab->partial);
2035
2036 if (oldpage) {
2037 pobjects = oldpage->pobjects;
2038 pages = oldpage->pages;
2039 if (drain && pobjects > s->cpu_partial) {
2040 unsigned long flags;
2041 /*
2042 * partial array is full. Move the existing
2043 * set to the per node partial list.
2044 */
2045 local_irq_save(flags);
59a09917 2046 unfreeze_partials(s, this_cpu_ptr(s->cpu_slab));
49e22585 2047 local_irq_restore(flags);
e24fc410 2048 oldpage = NULL;
49e22585
CL
2049 pobjects = 0;
2050 pages = 0;
8028dcea 2051 stat(s, CPU_PARTIAL_DRAIN);
49e22585
CL
2052 }
2053 }
2054
2055 pages++;
2056 pobjects += page->objects - page->inuse;
2057
2058 page->pages = pages;
2059 page->pobjects = pobjects;
2060 page->next = oldpage;
2061
d0e0ac97
CG
2062 } while (this_cpu_cmpxchg(s->cpu_slab->partial, oldpage, page)
2063 != oldpage);
345c905d 2064#endif
49e22585
CL
2065}
2066
dfb4f096 2067static inline void flush_slab(struct kmem_cache *s, struct kmem_cache_cpu *c)
81819f0f 2068{
84e554e6 2069 stat(s, CPUSLAB_FLUSH);
c17dda40
CL
2070 deactivate_slab(s, c->page, c->freelist);
2071
2072 c->tid = next_tid(c->tid);
2073 c->page = NULL;
2074 c->freelist = NULL;
81819f0f
CL
2075}
2076
2077/*
2078 * Flush cpu slab.
6446faa2 2079 *
81819f0f
CL
2080 * Called from IPI handler with interrupts disabled.
2081 */
0c710013 2082static inline void __flush_cpu_slab(struct kmem_cache *s, int cpu)
81819f0f 2083{
9dfc6e68 2084 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
81819f0f 2085
49e22585
CL
2086 if (likely(c)) {
2087 if (c->page)
2088 flush_slab(s, c);
2089
59a09917 2090 unfreeze_partials(s, c);
49e22585 2091 }
81819f0f
CL
2092}
2093
2094static void flush_cpu_slab(void *d)
2095{
2096 struct kmem_cache *s = d;
81819f0f 2097
dfb4f096 2098 __flush_cpu_slab(s, smp_processor_id());
81819f0f
CL
2099}
2100
a8364d55
GBY
2101static bool has_cpu_slab(int cpu, void *info)
2102{
2103 struct kmem_cache *s = info;
2104 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab, cpu);
2105
02e1a9cd 2106 return c->page || c->partial;
a8364d55
GBY
2107}
2108
81819f0f
CL
2109static void flush_all(struct kmem_cache *s)
2110{
a8364d55 2111 on_each_cpu_cond(has_cpu_slab, flush_cpu_slab, s, 1, GFP_ATOMIC);
81819f0f
CL
2112}
2113
dfb4f096
CL
2114/*
2115 * Check if the objects in a per cpu structure fit numa
2116 * locality expectations.
2117 */
57d437d2 2118static inline int node_match(struct page *page, int node)
dfb4f096
CL
2119{
2120#ifdef CONFIG_NUMA
4d7868e6 2121 if (!page || (node != NUMA_NO_NODE && page_to_nid(page) != node))
dfb4f096
CL
2122 return 0;
2123#endif
2124 return 1;
2125}
2126
781b2ba6
PE
2127static int count_free(struct page *page)
2128{
2129 return page->objects - page->inuse;
2130}
2131
2132static unsigned long count_partial(struct kmem_cache_node *n,
2133 int (*get_count)(struct page *))
2134{
2135 unsigned long flags;
2136 unsigned long x = 0;
2137 struct page *page;
2138
2139 spin_lock_irqsave(&n->list_lock, flags);
2140 list_for_each_entry(page, &n->partial, lru)
2141 x += get_count(page);
2142 spin_unlock_irqrestore(&n->list_lock, flags);
2143 return x;
2144}
2145
26c02cf0
AB
2146static inline unsigned long node_nr_objs(struct kmem_cache_node *n)
2147{
2148#ifdef CONFIG_SLUB_DEBUG
2149 return atomic_long_read(&n->total_objects);
2150#else
2151 return 0;
2152#endif
2153}
2154
781b2ba6
PE
2155static noinline void
2156slab_out_of_memory(struct kmem_cache *s, gfp_t gfpflags, int nid)
2157{
2158 int node;
2159
2160 printk(KERN_WARNING
2161 "SLUB: Unable to allocate memory on node %d (gfp=0x%x)\n",
2162 nid, gfpflags);
2163 printk(KERN_WARNING " cache: %s, object size: %d, buffer size: %d, "
3b0efdfa 2164 "default order: %d, min order: %d\n", s->name, s->object_size,
781b2ba6
PE
2165 s->size, oo_order(s->oo), oo_order(s->min));
2166
3b0efdfa 2167 if (oo_order(s->min) > get_order(s->object_size))
fa5ec8a1
DR
2168 printk(KERN_WARNING " %s debugging increased min order, use "
2169 "slub_debug=O to disable.\n", s->name);
2170
781b2ba6
PE
2171 for_each_online_node(node) {
2172 struct kmem_cache_node *n = get_node(s, node);
2173 unsigned long nr_slabs;
2174 unsigned long nr_objs;
2175 unsigned long nr_free;
2176
2177 if (!n)
2178 continue;
2179
26c02cf0
AB
2180 nr_free = count_partial(n, count_free);
2181 nr_slabs = node_nr_slabs(n);
2182 nr_objs = node_nr_objs(n);
781b2ba6
PE
2183
2184 printk(KERN_WARNING
2185 " node %d: slabs: %ld, objs: %ld, free: %ld\n",
2186 node, nr_slabs, nr_objs, nr_free);
2187 }
2188}
2189
497b66f2
CL
2190static inline void *new_slab_objects(struct kmem_cache *s, gfp_t flags,
2191 int node, struct kmem_cache_cpu **pc)
2192{
6faa6833 2193 void *freelist;
188fd063
CL
2194 struct kmem_cache_cpu *c = *pc;
2195 struct page *page;
497b66f2 2196
188fd063 2197 freelist = get_partial(s, flags, node, c);
497b66f2 2198
188fd063
CL
2199 if (freelist)
2200 return freelist;
2201
2202 page = new_slab(s, flags, node);
497b66f2
CL
2203 if (page) {
2204 c = __this_cpu_ptr(s->cpu_slab);
2205 if (c->page)
2206 flush_slab(s, c);
2207
2208 /*
2209 * No other reference to the page yet so we can
2210 * muck around with it freely without cmpxchg
2211 */
6faa6833 2212 freelist = page->freelist;
497b66f2
CL
2213 page->freelist = NULL;
2214
2215 stat(s, ALLOC_SLAB);
497b66f2
CL
2216 c->page = page;
2217 *pc = c;
2218 } else
6faa6833 2219 freelist = NULL;
497b66f2 2220
6faa6833 2221 return freelist;
497b66f2
CL
2222}
2223
072bb0aa
MG
2224static inline bool pfmemalloc_match(struct page *page, gfp_t gfpflags)
2225{
2226 if (unlikely(PageSlabPfmemalloc(page)))
2227 return gfp_pfmemalloc_allowed(gfpflags);
2228
2229 return true;
2230}
2231
213eeb9f 2232/*
d0e0ac97
CG
2233 * Check the page->freelist of a page and either transfer the freelist to the
2234 * per cpu freelist or deactivate the page.
213eeb9f
CL
2235 *
2236 * The page is still frozen if the return value is not NULL.
2237 *
2238 * If this function returns NULL then the page has been unfrozen.
d24ac77f
JK
2239 *
2240 * This function must be called with interrupt disabled.
213eeb9f
CL
2241 */
2242static inline void *get_freelist(struct kmem_cache *s, struct page *page)
2243{
2244 struct page new;
2245 unsigned long counters;
2246 void *freelist;
2247
2248 do {
2249 freelist = page->freelist;
2250 counters = page->counters;
6faa6833 2251
213eeb9f 2252 new.counters = counters;
a0132ac0 2253 VM_BUG_ON(!new.frozen);
213eeb9f
CL
2254
2255 new.inuse = page->objects;
2256 new.frozen = freelist != NULL;
2257
d24ac77f 2258 } while (!__cmpxchg_double_slab(s, page,
213eeb9f
CL
2259 freelist, counters,
2260 NULL, new.counters,
2261 "get_freelist"));
2262
2263 return freelist;
2264}
2265
81819f0f 2266/*
894b8788
CL
2267 * Slow path. The lockless freelist is empty or we need to perform
2268 * debugging duties.
2269 *
894b8788
CL
2270 * Processing is still very fast if new objects have been freed to the
2271 * regular freelist. In that case we simply take over the regular freelist
2272 * as the lockless freelist and zap the regular freelist.
81819f0f 2273 *
894b8788
CL
2274 * If that is not working then we fall back to the partial lists. We take the
2275 * first element of the freelist as the object to allocate now and move the
2276 * rest of the freelist to the lockless freelist.
81819f0f 2277 *
894b8788 2278 * And if we were unable to get a new slab from the partial slab lists then
6446faa2
CL
2279 * we need to allocate a new slab. This is the slowest path since it involves
2280 * a call to the page allocator and the setup of a new slab.
81819f0f 2281 */
ce71e27c
EGM
2282static void *__slab_alloc(struct kmem_cache *s, gfp_t gfpflags, int node,
2283 unsigned long addr, struct kmem_cache_cpu *c)
81819f0f 2284{
6faa6833 2285 void *freelist;
f6e7def7 2286 struct page *page;
8a5ec0ba
CL
2287 unsigned long flags;
2288
2289 local_irq_save(flags);
2290#ifdef CONFIG_PREEMPT
2291 /*
2292 * We may have been preempted and rescheduled on a different
2293 * cpu before disabling interrupts. Need to reload cpu area
2294 * pointer.
2295 */
2296 c = this_cpu_ptr(s->cpu_slab);
8a5ec0ba 2297#endif
81819f0f 2298
f6e7def7
CL
2299 page = c->page;
2300 if (!page)
81819f0f 2301 goto new_slab;
49e22585 2302redo:
6faa6833 2303
57d437d2 2304 if (unlikely(!node_match(page, node))) {
e36a2652 2305 stat(s, ALLOC_NODE_MISMATCH);
f6e7def7 2306 deactivate_slab(s, page, c->freelist);
c17dda40
CL
2307 c->page = NULL;
2308 c->freelist = NULL;
fc59c053
CL
2309 goto new_slab;
2310 }
6446faa2 2311
072bb0aa
MG
2312 /*
2313 * By rights, we should be searching for a slab page that was
2314 * PFMEMALLOC but right now, we are losing the pfmemalloc
2315 * information when the page leaves the per-cpu allocator
2316 */
2317 if (unlikely(!pfmemalloc_match(page, gfpflags))) {
2318 deactivate_slab(s, page, c->freelist);
2319 c->page = NULL;
2320 c->freelist = NULL;
2321 goto new_slab;
2322 }
2323
73736e03 2324 /* must check again c->freelist in case of cpu migration or IRQ */
6faa6833
CL
2325 freelist = c->freelist;
2326 if (freelist)
73736e03 2327 goto load_freelist;
03e404af 2328
2cfb7455 2329 stat(s, ALLOC_SLOWPATH);
03e404af 2330
f6e7def7 2331 freelist = get_freelist(s, page);
6446faa2 2332
6faa6833 2333 if (!freelist) {
03e404af
CL
2334 c->page = NULL;
2335 stat(s, DEACTIVATE_BYPASS);
fc59c053 2336 goto new_slab;
03e404af 2337 }
6446faa2 2338
84e554e6 2339 stat(s, ALLOC_REFILL);
6446faa2 2340
894b8788 2341load_freelist:
507effea
CL
2342 /*
2343 * freelist is pointing to the list of objects to be used.
2344 * page is pointing to the page from which the objects are obtained.
2345 * That page must be frozen for per cpu allocations to work.
2346 */
a0132ac0 2347 VM_BUG_ON(!c->page->frozen);
6faa6833 2348 c->freelist = get_freepointer(s, freelist);
8a5ec0ba
CL
2349 c->tid = next_tid(c->tid);
2350 local_irq_restore(flags);
6faa6833 2351 return freelist;
81819f0f 2352
81819f0f 2353new_slab:
2cfb7455 2354
49e22585 2355 if (c->partial) {
f6e7def7
CL
2356 page = c->page = c->partial;
2357 c->partial = page->next;
49e22585
CL
2358 stat(s, CPU_PARTIAL_ALLOC);
2359 c->freelist = NULL;
2360 goto redo;
81819f0f
CL
2361 }
2362
188fd063 2363 freelist = new_slab_objects(s, gfpflags, node, &c);
01ad8a7b 2364
f4697436
CL
2365 if (unlikely(!freelist)) {
2366 if (!(gfpflags & __GFP_NOWARN) && printk_ratelimit())
2367 slab_out_of_memory(s, gfpflags, node);
2cfb7455 2368
f4697436
CL
2369 local_irq_restore(flags);
2370 return NULL;
81819f0f 2371 }
2cfb7455 2372
f6e7def7 2373 page = c->page;
5091b74a 2374 if (likely(!kmem_cache_debug(s) && pfmemalloc_match(page, gfpflags)))
4b6f0750 2375 goto load_freelist;
2cfb7455 2376
497b66f2 2377 /* Only entered in the debug case */
d0e0ac97
CG
2378 if (kmem_cache_debug(s) &&
2379 !alloc_debug_processing(s, page, freelist, addr))
497b66f2 2380 goto new_slab; /* Slab failed checks. Next slab needed */
894b8788 2381
f6e7def7 2382 deactivate_slab(s, page, get_freepointer(s, freelist));
c17dda40
CL
2383 c->page = NULL;
2384 c->freelist = NULL;
a71ae47a 2385 local_irq_restore(flags);
6faa6833 2386 return freelist;
894b8788
CL
2387}
2388
2389/*
2390 * Inlined fastpath so that allocation functions (kmalloc, kmem_cache_alloc)
2391 * have the fastpath folded into their functions. So no function call
2392 * overhead for requests that can be satisfied on the fastpath.
2393 *
2394 * The fastpath works by first checking if the lockless freelist can be used.
2395 * If not then __slab_alloc is called for slow processing.
2396 *
2397 * Otherwise we can simply pick the next object from the lockless free list.
2398 */
2b847c3c 2399static __always_inline void *slab_alloc_node(struct kmem_cache *s,
ce71e27c 2400 gfp_t gfpflags, int node, unsigned long addr)
894b8788 2401{
894b8788 2402 void **object;
dfb4f096 2403 struct kmem_cache_cpu *c;
57d437d2 2404 struct page *page;
8a5ec0ba 2405 unsigned long tid;
1f84260c 2406
c016b0bd 2407 if (slab_pre_alloc_hook(s, gfpflags))
773ff60e 2408 return NULL;
1f84260c 2409
d79923fa 2410 s = memcg_kmem_get_cache(s, gfpflags);
8a5ec0ba 2411redo:
8a5ec0ba
CL
2412 /*
2413 * Must read kmem_cache cpu data via this cpu ptr. Preemption is
2414 * enabled. We may switch back and forth between cpus while
2415 * reading from one cpu area. That does not matter as long
2416 * as we end up on the original cpu again when doing the cmpxchg.
7cccd80b
CL
2417 *
2418 * Preemption is disabled for the retrieval of the tid because that
2419 * must occur from the current processor. We cannot allow rescheduling
2420 * on a different processor between the determination of the pointer
2421 * and the retrieval of the tid.
8a5ec0ba 2422 */
7cccd80b 2423 preempt_disable();
9dfc6e68 2424 c = __this_cpu_ptr(s->cpu_slab);
8a5ec0ba 2425
8a5ec0ba
CL
2426 /*
2427 * The transaction ids are globally unique per cpu and per operation on
2428 * a per cpu queue. Thus they can be guarantee that the cmpxchg_double
2429 * occurs on the right processor and that there was no operation on the
2430 * linked list in between.
2431 */
2432 tid = c->tid;
7cccd80b 2433 preempt_enable();
8a5ec0ba 2434
9dfc6e68 2435 object = c->freelist;
57d437d2 2436 page = c->page;
ac6434e6 2437 if (unlikely(!object || !node_match(page, node)))
dfb4f096 2438 object = __slab_alloc(s, gfpflags, node, addr, c);
894b8788
CL
2439
2440 else {
0ad9500e
ED
2441 void *next_object = get_freepointer_safe(s, object);
2442
8a5ec0ba 2443 /*
25985edc 2444 * The cmpxchg will only match if there was no additional
8a5ec0ba
CL
2445 * operation and if we are on the right processor.
2446 *
d0e0ac97
CG
2447 * The cmpxchg does the following atomically (without lock
2448 * semantics!)
8a5ec0ba
CL
2449 * 1. Relocate first pointer to the current per cpu area.
2450 * 2. Verify that tid and freelist have not been changed
2451 * 3. If they were not changed replace tid and freelist
2452 *
d0e0ac97
CG
2453 * Since this is without lock semantics the protection is only
2454 * against code executing on this cpu *not* from access by
2455 * other cpus.
8a5ec0ba 2456 */
933393f5 2457 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2458 s->cpu_slab->freelist, s->cpu_slab->tid,
2459 object, tid,
0ad9500e 2460 next_object, next_tid(tid)))) {
8a5ec0ba
CL
2461
2462 note_cmpxchg_failure("slab_alloc", s, tid);
2463 goto redo;
2464 }
0ad9500e 2465 prefetch_freepointer(s, next_object);
84e554e6 2466 stat(s, ALLOC_FASTPATH);
894b8788 2467 }
8a5ec0ba 2468
74e2134f 2469 if (unlikely(gfpflags & __GFP_ZERO) && object)
3b0efdfa 2470 memset(object, 0, s->object_size);
d07dbea4 2471
c016b0bd 2472 slab_post_alloc_hook(s, gfpflags, object);
5a896d9e 2473
894b8788 2474 return object;
81819f0f
CL
2475}
2476
2b847c3c
EG
2477static __always_inline void *slab_alloc(struct kmem_cache *s,
2478 gfp_t gfpflags, unsigned long addr)
2479{
2480 return slab_alloc_node(s, gfpflags, NUMA_NO_NODE, addr);
2481}
2482
81819f0f
CL
2483void *kmem_cache_alloc(struct kmem_cache *s, gfp_t gfpflags)
2484{
2b847c3c 2485 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
5b882be4 2486
d0e0ac97
CG
2487 trace_kmem_cache_alloc(_RET_IP_, ret, s->object_size,
2488 s->size, gfpflags);
5b882be4
EGM
2489
2490 return ret;
81819f0f
CL
2491}
2492EXPORT_SYMBOL(kmem_cache_alloc);
2493
0f24f128 2494#ifdef CONFIG_TRACING
4a92379b
RK
2495void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
2496{
2b847c3c 2497 void *ret = slab_alloc(s, gfpflags, _RET_IP_);
4a92379b
RK
2498 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags);
2499 return ret;
2500}
2501EXPORT_SYMBOL(kmem_cache_alloc_trace);
5b882be4
EGM
2502#endif
2503
81819f0f
CL
2504#ifdef CONFIG_NUMA
2505void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t gfpflags, int node)
2506{
2b847c3c 2507 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
5b882be4 2508
ca2b84cb 2509 trace_kmem_cache_alloc_node(_RET_IP_, ret,
3b0efdfa 2510 s->object_size, s->size, gfpflags, node);
5b882be4
EGM
2511
2512 return ret;
81819f0f
CL
2513}
2514EXPORT_SYMBOL(kmem_cache_alloc_node);
81819f0f 2515
0f24f128 2516#ifdef CONFIG_TRACING
4a92379b 2517void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
5b882be4 2518 gfp_t gfpflags,
4a92379b 2519 int node, size_t size)
5b882be4 2520{
2b847c3c 2521 void *ret = slab_alloc_node(s, gfpflags, node, _RET_IP_);
4a92379b
RK
2522
2523 trace_kmalloc_node(_RET_IP_, ret,
2524 size, s->size, gfpflags, node);
2525 return ret;
5b882be4 2526}
4a92379b 2527EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
5b882be4 2528#endif
5d1f57e4 2529#endif
5b882be4 2530
81819f0f 2531/*
894b8788
CL
2532 * Slow patch handling. This may still be called frequently since objects
2533 * have a longer lifetime than the cpu slabs in most processing loads.
81819f0f 2534 *
894b8788
CL
2535 * So we still attempt to reduce cache line usage. Just take the slab
2536 * lock and free the item. If there is no additional partial page
2537 * handling required then we can return immediately.
81819f0f 2538 */
894b8788 2539static void __slab_free(struct kmem_cache *s, struct page *page,
ff12059e 2540 void *x, unsigned long addr)
81819f0f
CL
2541{
2542 void *prior;
2543 void **object = (void *)x;
2cfb7455 2544 int was_frozen;
2cfb7455
CL
2545 struct page new;
2546 unsigned long counters;
2547 struct kmem_cache_node *n = NULL;
61728d1e 2548 unsigned long uninitialized_var(flags);
81819f0f 2549
8a5ec0ba 2550 stat(s, FREE_SLOWPATH);
81819f0f 2551
19c7ff9e
CL
2552 if (kmem_cache_debug(s) &&
2553 !(n = free_debug_processing(s, page, x, addr, &flags)))
80f08c19 2554 return;
6446faa2 2555
2cfb7455 2556 do {
837d678d
JK
2557 if (unlikely(n)) {
2558 spin_unlock_irqrestore(&n->list_lock, flags);
2559 n = NULL;
2560 }
2cfb7455
CL
2561 prior = page->freelist;
2562 counters = page->counters;
2563 set_freepointer(s, object, prior);
2564 new.counters = counters;
2565 was_frozen = new.frozen;
2566 new.inuse--;
837d678d 2567 if ((!new.inuse || !prior) && !was_frozen) {
49e22585 2568
c65c1877 2569 if (kmem_cache_has_cpu_partial(s) && !prior) {
49e22585
CL
2570
2571 /*
d0e0ac97
CG
2572 * Slab was on no list before and will be
2573 * partially empty
2574 * We can defer the list move and instead
2575 * freeze it.
49e22585
CL
2576 */
2577 new.frozen = 1;
2578
c65c1877 2579 } else { /* Needs to be taken off a list */
49e22585
CL
2580
2581 n = get_node(s, page_to_nid(page));
2582 /*
2583 * Speculatively acquire the list_lock.
2584 * If the cmpxchg does not succeed then we may
2585 * drop the list_lock without any processing.
2586 *
2587 * Otherwise the list_lock will synchronize with
2588 * other processors updating the list of slabs.
2589 */
2590 spin_lock_irqsave(&n->list_lock, flags);
2591
2592 }
2cfb7455 2593 }
81819f0f 2594
2cfb7455
CL
2595 } while (!cmpxchg_double_slab(s, page,
2596 prior, counters,
2597 object, new.counters,
2598 "__slab_free"));
81819f0f 2599
2cfb7455 2600 if (likely(!n)) {
49e22585
CL
2601
2602 /*
2603 * If we just froze the page then put it onto the
2604 * per cpu partial list.
2605 */
8028dcea 2606 if (new.frozen && !was_frozen) {
49e22585 2607 put_cpu_partial(s, page, 1);
8028dcea
AS
2608 stat(s, CPU_PARTIAL_FREE);
2609 }
49e22585 2610 /*
2cfb7455
CL
2611 * The list lock was not taken therefore no list
2612 * activity can be necessary.
2613 */
2614 if (was_frozen)
2615 stat(s, FREE_FROZEN);
80f08c19 2616 return;
2cfb7455 2617 }
81819f0f 2618
837d678d
JK
2619 if (unlikely(!new.inuse && n->nr_partial > s->min_partial))
2620 goto slab_empty;
2621
81819f0f 2622 /*
837d678d
JK
2623 * Objects left in the slab. If it was not on the partial list before
2624 * then add it.
81819f0f 2625 */
345c905d
JK
2626 if (!kmem_cache_has_cpu_partial(s) && unlikely(!prior)) {
2627 if (kmem_cache_debug(s))
c65c1877 2628 remove_full(s, n, page);
837d678d
JK
2629 add_partial(n, page, DEACTIVATE_TO_TAIL);
2630 stat(s, FREE_ADD_PARTIAL);
8ff12cfc 2631 }
80f08c19 2632 spin_unlock_irqrestore(&n->list_lock, flags);
81819f0f
CL
2633 return;
2634
2635slab_empty:
a973e9dd 2636 if (prior) {
81819f0f 2637 /*
6fbabb20 2638 * Slab on the partial list.
81819f0f 2639 */
5cc6eee8 2640 remove_partial(n, page);
84e554e6 2641 stat(s, FREE_REMOVE_PARTIAL);
c65c1877 2642 } else {
6fbabb20 2643 /* Slab must be on the full list */
c65c1877
PZ
2644 remove_full(s, n, page);
2645 }
2cfb7455 2646
80f08c19 2647 spin_unlock_irqrestore(&n->list_lock, flags);
84e554e6 2648 stat(s, FREE_SLAB);
81819f0f 2649 discard_slab(s, page);
81819f0f
CL
2650}
2651
894b8788
CL
2652/*
2653 * Fastpath with forced inlining to produce a kfree and kmem_cache_free that
2654 * can perform fastpath freeing without additional function calls.
2655 *
2656 * The fastpath is only possible if we are freeing to the current cpu slab
2657 * of this processor. This typically the case if we have just allocated
2658 * the item before.
2659 *
2660 * If fastpath is not possible then fall back to __slab_free where we deal
2661 * with all sorts of special processing.
2662 */
06428780 2663static __always_inline void slab_free(struct kmem_cache *s,
ce71e27c 2664 struct page *page, void *x, unsigned long addr)
894b8788
CL
2665{
2666 void **object = (void *)x;
dfb4f096 2667 struct kmem_cache_cpu *c;
8a5ec0ba 2668 unsigned long tid;
1f84260c 2669
c016b0bd
CL
2670 slab_free_hook(s, x);
2671
8a5ec0ba
CL
2672redo:
2673 /*
2674 * Determine the currently cpus per cpu slab.
2675 * The cpu may change afterward. However that does not matter since
2676 * data is retrieved via this pointer. If we are on the same cpu
2677 * during the cmpxchg then the free will succedd.
2678 */
7cccd80b 2679 preempt_disable();
9dfc6e68 2680 c = __this_cpu_ptr(s->cpu_slab);
c016b0bd 2681
8a5ec0ba 2682 tid = c->tid;
7cccd80b 2683 preempt_enable();
c016b0bd 2684
442b06bc 2685 if (likely(page == c->page)) {
ff12059e 2686 set_freepointer(s, object, c->freelist);
8a5ec0ba 2687
933393f5 2688 if (unlikely(!this_cpu_cmpxchg_double(
8a5ec0ba
CL
2689 s->cpu_slab->freelist, s->cpu_slab->tid,
2690 c->freelist, tid,
2691 object, next_tid(tid)))) {
2692
2693 note_cmpxchg_failure("slab_free", s, tid);
2694 goto redo;
2695 }
84e554e6 2696 stat(s, FREE_FASTPATH);
894b8788 2697 } else
ff12059e 2698 __slab_free(s, page, x, addr);
894b8788 2699
894b8788
CL
2700}
2701
81819f0f
CL
2702void kmem_cache_free(struct kmem_cache *s, void *x)
2703{
b9ce5ef4
GC
2704 s = cache_from_obj(s, x);
2705 if (!s)
79576102 2706 return;
b9ce5ef4 2707 slab_free(s, virt_to_head_page(x), x, _RET_IP_);
ca2b84cb 2708 trace_kmem_cache_free(_RET_IP_, x);
81819f0f
CL
2709}
2710EXPORT_SYMBOL(kmem_cache_free);
2711
81819f0f 2712/*
672bba3a
CL
2713 * Object placement in a slab is made very easy because we always start at
2714 * offset 0. If we tune the size of the object to the alignment then we can
2715 * get the required alignment by putting one properly sized object after
2716 * another.
81819f0f
CL
2717 *
2718 * Notice that the allocation order determines the sizes of the per cpu
2719 * caches. Each processor has always one slab available for allocations.
2720 * Increasing the allocation order reduces the number of times that slabs
672bba3a 2721 * must be moved on and off the partial lists and is therefore a factor in
81819f0f 2722 * locking overhead.
81819f0f
CL
2723 */
2724
2725/*
2726 * Mininum / Maximum order of slab pages. This influences locking overhead
2727 * and slab fragmentation. A higher order reduces the number of partial slabs
2728 * and increases the number of allocations possible without having to
2729 * take the list_lock.
2730 */
2731static int slub_min_order;
114e9e89 2732static int slub_max_order = PAGE_ALLOC_COSTLY_ORDER;
9b2cd506 2733static int slub_min_objects;
81819f0f
CL
2734
2735/*
2736 * Merge control. If this is set then no merging of slab caches will occur.
672bba3a 2737 * (Could be removed. This was introduced to pacify the merge skeptics.)
81819f0f
CL
2738 */
2739static int slub_nomerge;
2740
81819f0f
CL
2741/*
2742 * Calculate the order of allocation given an slab object size.
2743 *
672bba3a
CL
2744 * The order of allocation has significant impact on performance and other
2745 * system components. Generally order 0 allocations should be preferred since
2746 * order 0 does not cause fragmentation in the page allocator. Larger objects
2747 * be problematic to put into order 0 slabs because there may be too much
c124f5b5 2748 * unused space left. We go to a higher order if more than 1/16th of the slab
672bba3a
CL
2749 * would be wasted.
2750 *
2751 * In order to reach satisfactory performance we must ensure that a minimum
2752 * number of objects is in one slab. Otherwise we may generate too much
2753 * activity on the partial lists which requires taking the list_lock. This is
2754 * less a concern for large slabs though which are rarely used.
81819f0f 2755 *
672bba3a
CL
2756 * slub_max_order specifies the order where we begin to stop considering the
2757 * number of objects in a slab as critical. If we reach slub_max_order then
2758 * we try to keep the page order as low as possible. So we accept more waste
2759 * of space in favor of a small page order.
81819f0f 2760 *
672bba3a
CL
2761 * Higher order allocations also allow the placement of more objects in a
2762 * slab and thereby reduce object handling overhead. If the user has
2763 * requested a higher mininum order then we start with that one instead of
2764 * the smallest order which will fit the object.
81819f0f 2765 */
5e6d444e 2766static inline int slab_order(int size, int min_objects,
ab9a0f19 2767 int max_order, int fract_leftover, int reserved)
81819f0f
CL
2768{
2769 int order;
2770 int rem;
6300ea75 2771 int min_order = slub_min_order;
81819f0f 2772
ab9a0f19 2773 if (order_objects(min_order, size, reserved) > MAX_OBJS_PER_PAGE)
210b5c06 2774 return get_order(size * MAX_OBJS_PER_PAGE) - 1;
39b26464 2775
6300ea75 2776 for (order = max(min_order,
5e6d444e
CL
2777 fls(min_objects * size - 1) - PAGE_SHIFT);
2778 order <= max_order; order++) {
81819f0f 2779
5e6d444e 2780 unsigned long slab_size = PAGE_SIZE << order;
81819f0f 2781
ab9a0f19 2782 if (slab_size < min_objects * size + reserved)
81819f0f
CL
2783 continue;
2784
ab9a0f19 2785 rem = (slab_size - reserved) % size;
81819f0f 2786
5e6d444e 2787 if (rem <= slab_size / fract_leftover)
81819f0f
CL
2788 break;
2789
2790 }
672bba3a 2791
81819f0f
CL
2792 return order;
2793}
2794
ab9a0f19 2795static inline int calculate_order(int size, int reserved)
5e6d444e
CL
2796{
2797 int order;
2798 int min_objects;
2799 int fraction;
e8120ff1 2800 int max_objects;
5e6d444e
CL
2801
2802 /*
2803 * Attempt to find best configuration for a slab. This
2804 * works by first attempting to generate a layout with
2805 * the best configuration and backing off gradually.
2806 *
2807 * First we reduce the acceptable waste in a slab. Then
2808 * we reduce the minimum objects required in a slab.
2809 */
2810 min_objects = slub_min_objects;
9b2cd506
CL
2811 if (!min_objects)
2812 min_objects = 4 * (fls(nr_cpu_ids) + 1);
ab9a0f19 2813 max_objects = order_objects(slub_max_order, size, reserved);
e8120ff1
ZY
2814 min_objects = min(min_objects, max_objects);
2815
5e6d444e 2816 while (min_objects > 1) {
c124f5b5 2817 fraction = 16;
5e6d444e
CL
2818 while (fraction >= 4) {
2819 order = slab_order(size, min_objects,
ab9a0f19 2820 slub_max_order, fraction, reserved);
5e6d444e
CL
2821 if (order <= slub_max_order)
2822 return order;
2823 fraction /= 2;
2824 }
5086c389 2825 min_objects--;
5e6d444e
CL
2826 }
2827
2828 /*
2829 * We were unable to place multiple objects in a slab. Now
2830 * lets see if we can place a single object there.
2831 */
ab9a0f19 2832 order = slab_order(size, 1, slub_max_order, 1, reserved);
5e6d444e
CL
2833 if (order <= slub_max_order)
2834 return order;
2835
2836 /*
2837 * Doh this slab cannot be placed using slub_max_order.
2838 */
ab9a0f19 2839 order = slab_order(size, 1, MAX_ORDER, 1, reserved);
818cf590 2840 if (order < MAX_ORDER)
5e6d444e
CL
2841 return order;
2842 return -ENOSYS;
2843}
2844
5595cffc 2845static void
4053497d 2846init_kmem_cache_node(struct kmem_cache_node *n)
81819f0f
CL
2847{
2848 n->nr_partial = 0;
81819f0f
CL
2849 spin_lock_init(&n->list_lock);
2850 INIT_LIST_HEAD(&n->partial);
8ab1372f 2851#ifdef CONFIG_SLUB_DEBUG
0f389ec6 2852 atomic_long_set(&n->nr_slabs, 0);
02b71b70 2853 atomic_long_set(&n->total_objects, 0);
643b1138 2854 INIT_LIST_HEAD(&n->full);
8ab1372f 2855#endif
81819f0f
CL
2856}
2857
55136592 2858static inline int alloc_kmem_cache_cpus(struct kmem_cache *s)
4c93c355 2859{
6c182dc0 2860 BUILD_BUG_ON(PERCPU_DYNAMIC_EARLY_SIZE <
95a05b42 2861 KMALLOC_SHIFT_HIGH * sizeof(struct kmem_cache_cpu));
4c93c355 2862
8a5ec0ba 2863 /*
d4d84fef
CM
2864 * Must align to double word boundary for the double cmpxchg
2865 * instructions to work; see __pcpu_double_call_return_bool().
8a5ec0ba 2866 */
d4d84fef
CM
2867 s->cpu_slab = __alloc_percpu(sizeof(struct kmem_cache_cpu),
2868 2 * sizeof(void *));
8a5ec0ba
CL
2869
2870 if (!s->cpu_slab)
2871 return 0;
2872
2873 init_kmem_cache_cpus(s);
4c93c355 2874
8a5ec0ba 2875 return 1;
4c93c355 2876}
4c93c355 2877
51df1142
CL
2878static struct kmem_cache *kmem_cache_node;
2879
81819f0f
CL
2880/*
2881 * No kmalloc_node yet so do it by hand. We know that this is the first
2882 * slab on the node for this slabcache. There are no concurrent accesses
2883 * possible.
2884 *
721ae22a
ZYW
2885 * Note that this function only works on the kmem_cache_node
2886 * when allocating for the kmem_cache_node. This is used for bootstrapping
4c93c355 2887 * memory on a fresh node that has no slab structures yet.
81819f0f 2888 */
55136592 2889static void early_kmem_cache_node_alloc(int node)
81819f0f
CL
2890{
2891 struct page *page;
2892 struct kmem_cache_node *n;
2893
51df1142 2894 BUG_ON(kmem_cache_node->size < sizeof(struct kmem_cache_node));
81819f0f 2895
51df1142 2896 page = new_slab(kmem_cache_node, GFP_NOWAIT, node);
81819f0f
CL
2897
2898 BUG_ON(!page);
a2f92ee7
CL
2899 if (page_to_nid(page) != node) {
2900 printk(KERN_ERR "SLUB: Unable to allocate memory from "
2901 "node %d\n", node);
2902 printk(KERN_ERR "SLUB: Allocating a useless per node structure "
2903 "in order to be able to continue\n");
2904 }
2905
81819f0f
CL
2906 n = page->freelist;
2907 BUG_ON(!n);
51df1142 2908 page->freelist = get_freepointer(kmem_cache_node, n);
e6e82ea1 2909 page->inuse = 1;
8cb0a506 2910 page->frozen = 0;
51df1142 2911 kmem_cache_node->node[node] = n;
8ab1372f 2912#ifdef CONFIG_SLUB_DEBUG
f7cb1933 2913 init_object(kmem_cache_node, n, SLUB_RED_ACTIVE);
51df1142 2914 init_tracking(kmem_cache_node, n);
8ab1372f 2915#endif
4053497d 2916 init_kmem_cache_node(n);
51df1142 2917 inc_slabs_node(kmem_cache_node, node, page->objects);
6446faa2 2918
67b6c900 2919 /*
1e4dd946
SR
2920 * No locks need to be taken here as it has just been
2921 * initialized and there is no concurrent access.
67b6c900 2922 */
1e4dd946 2923 __add_partial(n, page, DEACTIVATE_TO_HEAD);
81819f0f
CL
2924}
2925
2926static void free_kmem_cache_nodes(struct kmem_cache *s)
2927{
2928 int node;
2929
f64dc58c 2930 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f 2931 struct kmem_cache_node *n = s->node[node];
51df1142 2932
73367bd8 2933 if (n)
51df1142
CL
2934 kmem_cache_free(kmem_cache_node, n);
2935
81819f0f
CL
2936 s->node[node] = NULL;
2937 }
2938}
2939
55136592 2940static int init_kmem_cache_nodes(struct kmem_cache *s)
81819f0f
CL
2941{
2942 int node;
81819f0f 2943
f64dc58c 2944 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
2945 struct kmem_cache_node *n;
2946
73367bd8 2947 if (slab_state == DOWN) {
55136592 2948 early_kmem_cache_node_alloc(node);
73367bd8
AD
2949 continue;
2950 }
51df1142 2951 n = kmem_cache_alloc_node(kmem_cache_node,
55136592 2952 GFP_KERNEL, node);
81819f0f 2953
73367bd8
AD
2954 if (!n) {
2955 free_kmem_cache_nodes(s);
2956 return 0;
81819f0f 2957 }
73367bd8 2958
81819f0f 2959 s->node[node] = n;
4053497d 2960 init_kmem_cache_node(n);
81819f0f
CL
2961 }
2962 return 1;
2963}
81819f0f 2964
c0bdb232 2965static void set_min_partial(struct kmem_cache *s, unsigned long min)
3b89d7d8
DR
2966{
2967 if (min < MIN_PARTIAL)
2968 min = MIN_PARTIAL;
2969 else if (min > MAX_PARTIAL)
2970 min = MAX_PARTIAL;
2971 s->min_partial = min;
2972}
2973
81819f0f
CL
2974/*
2975 * calculate_sizes() determines the order and the distribution of data within
2976 * a slab object.
2977 */
06b285dc 2978static int calculate_sizes(struct kmem_cache *s, int forced_order)
81819f0f
CL
2979{
2980 unsigned long flags = s->flags;
3b0efdfa 2981 unsigned long size = s->object_size;
834f3d11 2982 int order;
81819f0f 2983
d8b42bf5
CL
2984 /*
2985 * Round up object size to the next word boundary. We can only
2986 * place the free pointer at word boundaries and this determines
2987 * the possible location of the free pointer.
2988 */
2989 size = ALIGN(size, sizeof(void *));
2990
2991#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
2992 /*
2993 * Determine if we can poison the object itself. If the user of
2994 * the slab may touch the object after free or before allocation
2995 * then we should never poison the object itself.
2996 */
2997 if ((flags & SLAB_POISON) && !(flags & SLAB_DESTROY_BY_RCU) &&
c59def9f 2998 !s->ctor)
81819f0f
CL
2999 s->flags |= __OBJECT_POISON;
3000 else
3001 s->flags &= ~__OBJECT_POISON;
3002
81819f0f
CL
3003
3004 /*
672bba3a 3005 * If we are Redzoning then check if there is some space between the
81819f0f 3006 * end of the object and the free pointer. If not then add an
672bba3a 3007 * additional word to have some bytes to store Redzone information.
81819f0f 3008 */
3b0efdfa 3009 if ((flags & SLAB_RED_ZONE) && size == s->object_size)
81819f0f 3010 size += sizeof(void *);
41ecc55b 3011#endif
81819f0f
CL
3012
3013 /*
672bba3a
CL
3014 * With that we have determined the number of bytes in actual use
3015 * by the object. This is the potential offset to the free pointer.
81819f0f
CL
3016 */
3017 s->inuse = size;
3018
3019 if (((flags & (SLAB_DESTROY_BY_RCU | SLAB_POISON)) ||
c59def9f 3020 s->ctor)) {
81819f0f
CL
3021 /*
3022 * Relocate free pointer after the object if it is not
3023 * permitted to overwrite the first word of the object on
3024 * kmem_cache_free.
3025 *
3026 * This is the case if we do RCU, have a constructor or
3027 * destructor or are poisoning the objects.
3028 */
3029 s->offset = size;
3030 size += sizeof(void *);
3031 }
3032
c12b3c62 3033#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
3034 if (flags & SLAB_STORE_USER)
3035 /*
3036 * Need to store information about allocs and frees after
3037 * the object.
3038 */
3039 size += 2 * sizeof(struct track);
3040
be7b3fbc 3041 if (flags & SLAB_RED_ZONE)
81819f0f
CL
3042 /*
3043 * Add some empty padding so that we can catch
3044 * overwrites from earlier objects rather than let
3045 * tracking information or the free pointer be
0211a9c8 3046 * corrupted if a user writes before the start
81819f0f
CL
3047 * of the object.
3048 */
3049 size += sizeof(void *);
41ecc55b 3050#endif
672bba3a 3051
81819f0f
CL
3052 /*
3053 * SLUB stores one object immediately after another beginning from
3054 * offset 0. In order to align the objects we have to simply size
3055 * each object to conform to the alignment.
3056 */
45906855 3057 size = ALIGN(size, s->align);
81819f0f 3058 s->size = size;
06b285dc
CL
3059 if (forced_order >= 0)
3060 order = forced_order;
3061 else
ab9a0f19 3062 order = calculate_order(size, s->reserved);
81819f0f 3063
834f3d11 3064 if (order < 0)
81819f0f
CL
3065 return 0;
3066
b7a49f0d 3067 s->allocflags = 0;
834f3d11 3068 if (order)
b7a49f0d
CL
3069 s->allocflags |= __GFP_COMP;
3070
3071 if (s->flags & SLAB_CACHE_DMA)
2c59dd65 3072 s->allocflags |= GFP_DMA;
b7a49f0d
CL
3073
3074 if (s->flags & SLAB_RECLAIM_ACCOUNT)
3075 s->allocflags |= __GFP_RECLAIMABLE;
3076
81819f0f
CL
3077 /*
3078 * Determine the number of objects per slab
3079 */
ab9a0f19
LJ
3080 s->oo = oo_make(order, size, s->reserved);
3081 s->min = oo_make(get_order(size), size, s->reserved);
205ab99d
CL
3082 if (oo_objects(s->oo) > oo_objects(s->max))
3083 s->max = s->oo;
81819f0f 3084
834f3d11 3085 return !!oo_objects(s->oo);
81819f0f
CL
3086}
3087
8a13a4cc 3088static int kmem_cache_open(struct kmem_cache *s, unsigned long flags)
81819f0f 3089{
8a13a4cc 3090 s->flags = kmem_cache_flags(s->size, flags, s->name, s->ctor);
ab9a0f19 3091 s->reserved = 0;
81819f0f 3092
da9a638c
LJ
3093 if (need_reserve_slab_rcu && (s->flags & SLAB_DESTROY_BY_RCU))
3094 s->reserved = sizeof(struct rcu_head);
81819f0f 3095
06b285dc 3096 if (!calculate_sizes(s, -1))
81819f0f 3097 goto error;
3de47213
DR
3098 if (disable_higher_order_debug) {
3099 /*
3100 * Disable debugging flags that store metadata if the min slab
3101 * order increased.
3102 */
3b0efdfa 3103 if (get_order(s->size) > get_order(s->object_size)) {
3de47213
DR
3104 s->flags &= ~DEBUG_METADATA_FLAGS;
3105 s->offset = 0;
3106 if (!calculate_sizes(s, -1))
3107 goto error;
3108 }
3109 }
81819f0f 3110
2565409f
HC
3111#if defined(CONFIG_HAVE_CMPXCHG_DOUBLE) && \
3112 defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE)
b789ef51
CL
3113 if (system_has_cmpxchg_double() && (s->flags & SLAB_DEBUG_FLAGS) == 0)
3114 /* Enable fast mode */
3115 s->flags |= __CMPXCHG_DOUBLE;
3116#endif
3117
3b89d7d8
DR
3118 /*
3119 * The larger the object size is, the more pages we want on the partial
3120 * list to avoid pounding the page allocator excessively.
3121 */
49e22585
CL
3122 set_min_partial(s, ilog2(s->size) / 2);
3123
3124 /*
3125 * cpu_partial determined the maximum number of objects kept in the
3126 * per cpu partial lists of a processor.
3127 *
3128 * Per cpu partial lists mainly contain slabs that just have one
3129 * object freed. If they are used for allocation then they can be
3130 * filled up again with minimal effort. The slab will never hit the
3131 * per node partial lists and therefore no locking will be required.
3132 *
3133 * This setting also determines
3134 *
3135 * A) The number of objects from per cpu partial slabs dumped to the
3136 * per node list when we reach the limit.
9f264904 3137 * B) The number of objects in cpu partial slabs to extract from the
d0e0ac97
CG
3138 * per node list when we run out of per cpu objects. We only fetch
3139 * 50% to keep some capacity around for frees.
49e22585 3140 */
345c905d 3141 if (!kmem_cache_has_cpu_partial(s))
8f1e33da
CL
3142 s->cpu_partial = 0;
3143 else if (s->size >= PAGE_SIZE)
49e22585
CL
3144 s->cpu_partial = 2;
3145 else if (s->size >= 1024)
3146 s->cpu_partial = 6;
3147 else if (s->size >= 256)
3148 s->cpu_partial = 13;
3149 else
3150 s->cpu_partial = 30;
3151
81819f0f 3152#ifdef CONFIG_NUMA
e2cb96b7 3153 s->remote_node_defrag_ratio = 1000;
81819f0f 3154#endif
55136592 3155 if (!init_kmem_cache_nodes(s))
dfb4f096 3156 goto error;
81819f0f 3157
55136592 3158 if (alloc_kmem_cache_cpus(s))
278b1bb1 3159 return 0;
ff12059e 3160
4c93c355 3161 free_kmem_cache_nodes(s);
81819f0f
CL
3162error:
3163 if (flags & SLAB_PANIC)
3164 panic("Cannot create slab %s size=%lu realsize=%u "
3165 "order=%u offset=%u flags=%lx\n",
d0e0ac97
CG
3166 s->name, (unsigned long)s->size, s->size,
3167 oo_order(s->oo), s->offset, flags);
278b1bb1 3168 return -EINVAL;
81819f0f 3169}
81819f0f 3170
33b12c38
CL
3171static void list_slab_objects(struct kmem_cache *s, struct page *page,
3172 const char *text)
3173{
3174#ifdef CONFIG_SLUB_DEBUG
3175 void *addr = page_address(page);
3176 void *p;
a5dd5c11
NK
3177 unsigned long *map = kzalloc(BITS_TO_LONGS(page->objects) *
3178 sizeof(long), GFP_ATOMIC);
bbd7d57b
ED
3179 if (!map)
3180 return;
945cf2b6 3181 slab_err(s, page, text, s->name);
33b12c38 3182 slab_lock(page);
33b12c38 3183
5f80b13a 3184 get_map(s, page, map);
33b12c38
CL
3185 for_each_object(p, s, addr, page->objects) {
3186
3187 if (!test_bit(slab_index(p, s, addr), map)) {
3188 printk(KERN_ERR "INFO: Object 0x%p @offset=%tu\n",
3189 p, p - addr);
3190 print_tracking(s, p);
3191 }
3192 }
3193 slab_unlock(page);
bbd7d57b 3194 kfree(map);
33b12c38
CL
3195#endif
3196}
3197
81819f0f 3198/*
599870b1 3199 * Attempt to free all partial slabs on a node.
69cb8e6b
CL
3200 * This is called from kmem_cache_close(). We must be the last thread
3201 * using the cache and therefore we do not need to lock anymore.
81819f0f 3202 */
599870b1 3203static void free_partial(struct kmem_cache *s, struct kmem_cache_node *n)
81819f0f 3204{
81819f0f
CL
3205 struct page *page, *h;
3206
33b12c38 3207 list_for_each_entry_safe(page, h, &n->partial, lru) {
81819f0f 3208 if (!page->inuse) {
1e4dd946 3209 __remove_partial(n, page);
81819f0f 3210 discard_slab(s, page);
33b12c38
CL
3211 } else {
3212 list_slab_objects(s, page,
945cf2b6 3213 "Objects remaining in %s on kmem_cache_close()");
599870b1 3214 }
33b12c38 3215 }
81819f0f
CL
3216}
3217
3218/*
672bba3a 3219 * Release all resources used by a slab cache.
81819f0f 3220 */
0c710013 3221static inline int kmem_cache_close(struct kmem_cache *s)
81819f0f
CL
3222{
3223 int node;
3224
3225 flush_all(s);
81819f0f 3226 /* Attempt to free all objects */
f64dc58c 3227 for_each_node_state(node, N_NORMAL_MEMORY) {
81819f0f
CL
3228 struct kmem_cache_node *n = get_node(s, node);
3229
599870b1
CL
3230 free_partial(s, n);
3231 if (n->nr_partial || slabs_node(s, node))
81819f0f
CL
3232 return 1;
3233 }
945cf2b6 3234 free_percpu(s->cpu_slab);
81819f0f
CL
3235 free_kmem_cache_nodes(s);
3236 return 0;
3237}
3238
945cf2b6 3239int __kmem_cache_shutdown(struct kmem_cache *s)
81819f0f 3240{
12c3667f 3241 int rc = kmem_cache_close(s);
945cf2b6 3242
5413dfba
GC
3243 if (!rc) {
3244 /*
421af243
VD
3245 * Since slab_attr_store may take the slab_mutex, we should
3246 * release the lock while removing the sysfs entry in order to
3247 * avoid a deadlock. Because this is pretty much the last
5413dfba
GC
3248 * operation we do and the lock will be released shortly after
3249 * that in slab_common.c, we could just move sysfs_slab_remove
3250 * to a later point in common code. We should do that when we
3251 * have a common sysfs framework for all allocators.
3252 */
3253 mutex_unlock(&slab_mutex);
81819f0f 3254 sysfs_slab_remove(s);
5413dfba
GC
3255 mutex_lock(&slab_mutex);
3256 }
12c3667f
CL
3257
3258 return rc;
81819f0f 3259}
81819f0f
CL
3260
3261/********************************************************************
3262 * Kmalloc subsystem
3263 *******************************************************************/
3264
81819f0f
CL
3265static int __init setup_slub_min_order(char *str)
3266{
06428780 3267 get_option(&str, &slub_min_order);
81819f0f
CL
3268
3269 return 1;
3270}
3271
3272__setup("slub_min_order=", setup_slub_min_order);
3273
3274static int __init setup_slub_max_order(char *str)
3275{
06428780 3276 get_option(&str, &slub_max_order);
818cf590 3277 slub_max_order = min(slub_max_order, MAX_ORDER - 1);
81819f0f
CL
3278
3279 return 1;
3280}
3281
3282__setup("slub_max_order=", setup_slub_max_order);
3283
3284static int __init setup_slub_min_objects(char *str)
3285{
06428780 3286 get_option(&str, &slub_min_objects);
81819f0f
CL
3287
3288 return 1;
3289}
3290
3291__setup("slub_min_objects=", setup_slub_min_objects);
3292
3293static int __init setup_slub_nomerge(char *str)
3294{
3295 slub_nomerge = 1;
3296 return 1;
3297}
3298
3299__setup("slub_nomerge", setup_slub_nomerge);
3300
81819f0f
CL
3301void *__kmalloc(size_t size, gfp_t flags)
3302{
aadb4bc4 3303 struct kmem_cache *s;
5b882be4 3304 void *ret;
81819f0f 3305
95a05b42 3306 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef 3307 return kmalloc_large(size, flags);
aadb4bc4 3308
2c59dd65 3309 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3310
3311 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3312 return s;
3313
2b847c3c 3314 ret = slab_alloc(s, flags, _RET_IP_);
5b882be4 3315
ca2b84cb 3316 trace_kmalloc(_RET_IP_, ret, size, s->size, flags);
5b882be4
EGM
3317
3318 return ret;
81819f0f
CL
3319}
3320EXPORT_SYMBOL(__kmalloc);
3321
5d1f57e4 3322#ifdef CONFIG_NUMA
f619cfe1
CL
3323static void *kmalloc_large_node(size_t size, gfp_t flags, int node)
3324{
b1eeab67 3325 struct page *page;
e4f7c0b4 3326 void *ptr = NULL;
f619cfe1 3327
d79923fa 3328 flags |= __GFP_COMP | __GFP_NOTRACK | __GFP_KMEMCG;
b1eeab67 3329 page = alloc_pages_node(node, flags, get_order(size));
f619cfe1 3330 if (page)
e4f7c0b4
CM
3331 ptr = page_address(page);
3332
d56791b3 3333 kmalloc_large_node_hook(ptr, size, flags);
e4f7c0b4 3334 return ptr;
f619cfe1
CL
3335}
3336
81819f0f
CL
3337void *__kmalloc_node(size_t size, gfp_t flags, int node)
3338{
aadb4bc4 3339 struct kmem_cache *s;
5b882be4 3340 void *ret;
81819f0f 3341
95a05b42 3342 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
5b882be4
EGM
3343 ret = kmalloc_large_node(size, flags, node);
3344
ca2b84cb
EGM
3345 trace_kmalloc_node(_RET_IP_, ret,
3346 size, PAGE_SIZE << get_order(size),
3347 flags, node);
5b882be4
EGM
3348
3349 return ret;
3350 }
aadb4bc4 3351
2c59dd65 3352 s = kmalloc_slab(size, flags);
aadb4bc4
CL
3353
3354 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913
CL
3355 return s;
3356
2b847c3c 3357 ret = slab_alloc_node(s, flags, node, _RET_IP_);
5b882be4 3358
ca2b84cb 3359 trace_kmalloc_node(_RET_IP_, ret, size, s->size, flags, node);
5b882be4
EGM
3360
3361 return ret;
81819f0f
CL
3362}
3363EXPORT_SYMBOL(__kmalloc_node);
3364#endif
3365
3366size_t ksize(const void *object)
3367{
272c1d21 3368 struct page *page;
81819f0f 3369
ef8b4520 3370 if (unlikely(object == ZERO_SIZE_PTR))
272c1d21
CL
3371 return 0;
3372
294a80a8 3373 page = virt_to_head_page(object);
294a80a8 3374
76994412
PE
3375 if (unlikely(!PageSlab(page))) {
3376 WARN_ON(!PageCompound(page));
294a80a8 3377 return PAGE_SIZE << compound_order(page);
76994412 3378 }
81819f0f 3379
1b4f59e3 3380 return slab_ksize(page->slab_cache);
81819f0f 3381}
b1aabecd 3382EXPORT_SYMBOL(ksize);
81819f0f
CL
3383
3384void kfree(const void *x)
3385{
81819f0f 3386 struct page *page;
5bb983b0 3387 void *object = (void *)x;
81819f0f 3388
2121db74
PE
3389 trace_kfree(_RET_IP_, x);
3390
2408c550 3391 if (unlikely(ZERO_OR_NULL_PTR(x)))
81819f0f
CL
3392 return;
3393
b49af68f 3394 page = virt_to_head_page(x);
aadb4bc4 3395 if (unlikely(!PageSlab(page))) {
0937502a 3396 BUG_ON(!PageCompound(page));
d56791b3 3397 kfree_hook(x);
d79923fa 3398 __free_memcg_kmem_pages(page, compound_order(page));
aadb4bc4
CL
3399 return;
3400 }
1b4f59e3 3401 slab_free(page->slab_cache, page, object, _RET_IP_);
81819f0f
CL
3402}
3403EXPORT_SYMBOL(kfree);
3404
2086d26a 3405/*
672bba3a
CL
3406 * kmem_cache_shrink removes empty slabs from the partial lists and sorts
3407 * the remaining slabs by the number of items in use. The slabs with the
3408 * most items in use come first. New allocations will then fill those up
3409 * and thus they can be removed from the partial lists.
3410 *
3411 * The slabs with the least items are placed last. This results in them
3412 * being allocated from last increasing the chance that the last objects
3413 * are freed in them.
2086d26a
CL
3414 */
3415int kmem_cache_shrink(struct kmem_cache *s)
3416{
3417 int node;
3418 int i;
3419 struct kmem_cache_node *n;
3420 struct page *page;
3421 struct page *t;
205ab99d 3422 int objects = oo_objects(s->max);
2086d26a 3423 struct list_head *slabs_by_inuse =
834f3d11 3424 kmalloc(sizeof(struct list_head) * objects, GFP_KERNEL);
2086d26a
CL
3425 unsigned long flags;
3426
3427 if (!slabs_by_inuse)
3428 return -ENOMEM;
3429
3430 flush_all(s);
f64dc58c 3431 for_each_node_state(node, N_NORMAL_MEMORY) {
2086d26a
CL
3432 n = get_node(s, node);
3433
3434 if (!n->nr_partial)
3435 continue;
3436
834f3d11 3437 for (i = 0; i < objects; i++)
2086d26a
CL
3438 INIT_LIST_HEAD(slabs_by_inuse + i);
3439
3440 spin_lock_irqsave(&n->list_lock, flags);
3441
3442 /*
672bba3a 3443 * Build lists indexed by the items in use in each slab.
2086d26a 3444 *
672bba3a
CL
3445 * Note that concurrent frees may occur while we hold the
3446 * list_lock. page->inuse here is the upper limit.
2086d26a
CL
3447 */
3448 list_for_each_entry_safe(page, t, &n->partial, lru) {
69cb8e6b
CL
3449 list_move(&page->lru, slabs_by_inuse + page->inuse);
3450 if (!page->inuse)
3451 n->nr_partial--;
2086d26a
CL
3452 }
3453
2086d26a 3454 /*
672bba3a
CL
3455 * Rebuild the partial list with the slabs filled up most
3456 * first and the least used slabs at the end.
2086d26a 3457 */
69cb8e6b 3458 for (i = objects - 1; i > 0; i--)
2086d26a
CL
3459 list_splice(slabs_by_inuse + i, n->partial.prev);
3460
2086d26a 3461 spin_unlock_irqrestore(&n->list_lock, flags);
69cb8e6b
CL
3462
3463 /* Release empty slabs */
3464 list_for_each_entry_safe(page, t, slabs_by_inuse, lru)
3465 discard_slab(s, page);
2086d26a
CL
3466 }
3467
3468 kfree(slabs_by_inuse);
3469 return 0;
3470}
3471EXPORT_SYMBOL(kmem_cache_shrink);
3472
b9049e23
YG
3473static int slab_mem_going_offline_callback(void *arg)
3474{
3475 struct kmem_cache *s;
3476
18004c5d 3477 mutex_lock(&slab_mutex);
b9049e23
YG
3478 list_for_each_entry(s, &slab_caches, list)
3479 kmem_cache_shrink(s);
18004c5d 3480 mutex_unlock(&slab_mutex);
b9049e23
YG
3481
3482 return 0;
3483}
3484
3485static void slab_mem_offline_callback(void *arg)
3486{
3487 struct kmem_cache_node *n;
3488 struct kmem_cache *s;
3489 struct memory_notify *marg = arg;
3490 int offline_node;
3491
b9d5ab25 3492 offline_node = marg->status_change_nid_normal;
b9049e23
YG
3493
3494 /*
3495 * If the node still has available memory. we need kmem_cache_node
3496 * for it yet.
3497 */
3498 if (offline_node < 0)
3499 return;
3500
18004c5d 3501 mutex_lock(&slab_mutex);
b9049e23
YG
3502 list_for_each_entry(s, &slab_caches, list) {
3503 n = get_node(s, offline_node);
3504 if (n) {
3505 /*
3506 * if n->nr_slabs > 0, slabs still exist on the node
3507 * that is going down. We were unable to free them,
c9404c9c 3508 * and offline_pages() function shouldn't call this
b9049e23
YG
3509 * callback. So, we must fail.
3510 */
0f389ec6 3511 BUG_ON(slabs_node(s, offline_node));
b9049e23
YG
3512
3513 s->node[offline_node] = NULL;
8de66a0c 3514 kmem_cache_free(kmem_cache_node, n);
b9049e23
YG
3515 }
3516 }
18004c5d 3517 mutex_unlock(&slab_mutex);
b9049e23
YG
3518}
3519
3520static int slab_mem_going_online_callback(void *arg)
3521{
3522 struct kmem_cache_node *n;
3523 struct kmem_cache *s;
3524 struct memory_notify *marg = arg;
b9d5ab25 3525 int nid = marg->status_change_nid_normal;
b9049e23
YG
3526 int ret = 0;
3527
3528 /*
3529 * If the node's memory is already available, then kmem_cache_node is
3530 * already created. Nothing to do.
3531 */
3532 if (nid < 0)
3533 return 0;
3534
3535 /*
0121c619 3536 * We are bringing a node online. No memory is available yet. We must
b9049e23
YG
3537 * allocate a kmem_cache_node structure in order to bring the node
3538 * online.
3539 */
18004c5d 3540 mutex_lock(&slab_mutex);
b9049e23
YG
3541 list_for_each_entry(s, &slab_caches, list) {
3542 /*
3543 * XXX: kmem_cache_alloc_node will fallback to other nodes
3544 * since memory is not yet available from the node that
3545 * is brought up.
3546 */
8de66a0c 3547 n = kmem_cache_alloc(kmem_cache_node, GFP_KERNEL);
b9049e23
YG
3548 if (!n) {
3549 ret = -ENOMEM;
3550 goto out;
3551 }
4053497d 3552 init_kmem_cache_node(n);
b9049e23
YG
3553 s->node[nid] = n;
3554 }
3555out:
18004c5d 3556 mutex_unlock(&slab_mutex);
b9049e23
YG
3557 return ret;
3558}
3559
3560static int slab_memory_callback(struct notifier_block *self,
3561 unsigned long action, void *arg)
3562{
3563 int ret = 0;
3564
3565 switch (action) {
3566 case MEM_GOING_ONLINE:
3567 ret = slab_mem_going_online_callback(arg);
3568 break;
3569 case MEM_GOING_OFFLINE:
3570 ret = slab_mem_going_offline_callback(arg);
3571 break;
3572 case MEM_OFFLINE:
3573 case MEM_CANCEL_ONLINE:
3574 slab_mem_offline_callback(arg);
3575 break;
3576 case MEM_ONLINE:
3577 case MEM_CANCEL_OFFLINE:
3578 break;
3579 }
dc19f9db
KH
3580 if (ret)
3581 ret = notifier_from_errno(ret);
3582 else
3583 ret = NOTIFY_OK;
b9049e23
YG
3584 return ret;
3585}
3586
3ac38faa
AM
3587static struct notifier_block slab_memory_callback_nb = {
3588 .notifier_call = slab_memory_callback,
3589 .priority = SLAB_CALLBACK_PRI,
3590};
b9049e23 3591
81819f0f
CL
3592/********************************************************************
3593 * Basic setup of slabs
3594 *******************************************************************/
3595
51df1142
CL
3596/*
3597 * Used for early kmem_cache structures that were allocated using
dffb4d60
CL
3598 * the page allocator. Allocate them properly then fix up the pointers
3599 * that may be pointing to the wrong kmem_cache structure.
51df1142
CL
3600 */
3601
dffb4d60 3602static struct kmem_cache * __init bootstrap(struct kmem_cache *static_cache)
51df1142
CL
3603{
3604 int node;
dffb4d60 3605 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
51df1142 3606
dffb4d60 3607 memcpy(s, static_cache, kmem_cache->object_size);
51df1142 3608
7d557b3c
GC
3609 /*
3610 * This runs very early, and only the boot processor is supposed to be
3611 * up. Even if it weren't true, IRQs are not up so we couldn't fire
3612 * IPIs around.
3613 */
3614 __flush_cpu_slab(s, smp_processor_id());
51df1142
CL
3615 for_each_node_state(node, N_NORMAL_MEMORY) {
3616 struct kmem_cache_node *n = get_node(s, node);
3617 struct page *p;
3618
3619 if (n) {
3620 list_for_each_entry(p, &n->partial, lru)
1b4f59e3 3621 p->slab_cache = s;
51df1142 3622
607bf324 3623#ifdef CONFIG_SLUB_DEBUG
51df1142 3624 list_for_each_entry(p, &n->full, lru)
1b4f59e3 3625 p->slab_cache = s;
51df1142
CL
3626#endif
3627 }
3628 }
dffb4d60
CL
3629 list_add(&s->list, &slab_caches);
3630 return s;
51df1142
CL
3631}
3632
81819f0f
CL
3633void __init kmem_cache_init(void)
3634{
dffb4d60
CL
3635 static __initdata struct kmem_cache boot_kmem_cache,
3636 boot_kmem_cache_node;
51df1142 3637
fc8d8620
SG
3638 if (debug_guardpage_minorder())
3639 slub_max_order = 0;
3640
dffb4d60
CL
3641 kmem_cache_node = &boot_kmem_cache_node;
3642 kmem_cache = &boot_kmem_cache;
51df1142 3643
dffb4d60
CL
3644 create_boot_cache(kmem_cache_node, "kmem_cache_node",
3645 sizeof(struct kmem_cache_node), SLAB_HWCACHE_ALIGN);
b9049e23 3646
3ac38faa 3647 register_hotmemory_notifier(&slab_memory_callback_nb);
81819f0f
CL
3648
3649 /* Able to allocate the per node structures */
3650 slab_state = PARTIAL;
3651
dffb4d60
CL
3652 create_boot_cache(kmem_cache, "kmem_cache",
3653 offsetof(struct kmem_cache, node) +
3654 nr_node_ids * sizeof(struct kmem_cache_node *),
3655 SLAB_HWCACHE_ALIGN);
8a13a4cc 3656
dffb4d60 3657 kmem_cache = bootstrap(&boot_kmem_cache);
81819f0f 3658
51df1142
CL
3659 /*
3660 * Allocate kmem_cache_node properly from the kmem_cache slab.
3661 * kmem_cache_node is separately allocated so no need to
3662 * update any list pointers.
3663 */
dffb4d60 3664 kmem_cache_node = bootstrap(&boot_kmem_cache_node);
51df1142
CL
3665
3666 /* Now we can use the kmem_cache to allocate kmalloc slabs */
f97d5f63 3667 create_kmalloc_caches(0);
81819f0f
CL
3668
3669#ifdef CONFIG_SMP
3670 register_cpu_notifier(&slab_notifier);
9dfc6e68 3671#endif
81819f0f 3672
3adbefee 3673 printk(KERN_INFO
f97d5f63 3674 "SLUB: HWalign=%d, Order=%d-%d, MinObjects=%d,"
4b356be0 3675 " CPUs=%d, Nodes=%d\n",
f97d5f63 3676 cache_line_size(),
81819f0f
CL
3677 slub_min_order, slub_max_order, slub_min_objects,
3678 nr_cpu_ids, nr_node_ids);
3679}
3680
7e85ee0c
PE
3681void __init kmem_cache_init_late(void)
3682{
7e85ee0c
PE
3683}
3684
81819f0f
CL
3685/*
3686 * Find a mergeable slab cache
3687 */
3688static int slab_unmergeable(struct kmem_cache *s)
3689{
3690 if (slub_nomerge || (s->flags & SLUB_NEVER_MERGE))
3691 return 1;
3692
a44cb944
VD
3693 if (!is_root_cache(s))
3694 return 1;
3695
c59def9f 3696 if (s->ctor)
81819f0f
CL
3697 return 1;
3698
8ffa6875
CL
3699 /*
3700 * We may have set a slab to be unmergeable during bootstrap.
3701 */
3702 if (s->refcount < 0)
3703 return 1;
3704
81819f0f
CL
3705 return 0;
3706}
3707
a44cb944
VD
3708static struct kmem_cache *find_mergeable(size_t size, size_t align,
3709 unsigned long flags, const char *name, void (*ctor)(void *))
81819f0f 3710{
5b95a4ac 3711 struct kmem_cache *s;
81819f0f
CL
3712
3713 if (slub_nomerge || (flags & SLUB_NEVER_MERGE))
3714 return NULL;
3715
c59def9f 3716 if (ctor)
81819f0f
CL
3717 return NULL;
3718
3719 size = ALIGN(size, sizeof(void *));
3720 align = calculate_alignment(flags, align, size);
3721 size = ALIGN(size, align);
ba0268a8 3722 flags = kmem_cache_flags(size, flags, name, NULL);
81819f0f 3723
5b95a4ac 3724 list_for_each_entry(s, &slab_caches, list) {
81819f0f
CL
3725 if (slab_unmergeable(s))
3726 continue;
3727
3728 if (size > s->size)
3729 continue;
3730
ba0268a8 3731 if ((flags & SLUB_MERGE_SAME) != (s->flags & SLUB_MERGE_SAME))
a44cb944 3732 continue;
81819f0f
CL
3733 /*
3734 * Check if alignment is compatible.
3735 * Courtesy of Adrian Drzewiecki
3736 */
06428780 3737 if ((s->size & ~(align - 1)) != s->size)
81819f0f
CL
3738 continue;
3739
3740 if (s->size - size >= sizeof(void *))
3741 continue;
3742
3743 return s;
3744 }
3745 return NULL;
3746}
3747
2633d7a0 3748struct kmem_cache *
a44cb944
VD
3749__kmem_cache_alias(const char *name, size_t size, size_t align,
3750 unsigned long flags, void (*ctor)(void *))
81819f0f
CL
3751{
3752 struct kmem_cache *s;
3753
a44cb944 3754 s = find_mergeable(size, align, flags, name, ctor);
81819f0f 3755 if (s) {
84d0ddd6
VD
3756 int i;
3757 struct kmem_cache *c;
3758
81819f0f 3759 s->refcount++;
84d0ddd6 3760
81819f0f
CL
3761 /*
3762 * Adjust the object sizes so that we clear
3763 * the complete object on kzalloc.
3764 */
3b0efdfa 3765 s->object_size = max(s->object_size, (int)size);
81819f0f 3766 s->inuse = max_t(int, s->inuse, ALIGN(size, sizeof(void *)));
6446faa2 3767
84d0ddd6
VD
3768 for_each_memcg_cache_index(i) {
3769 c = cache_from_memcg_idx(s, i);
3770 if (!c)
3771 continue;
3772 c->object_size = s->object_size;
3773 c->inuse = max_t(int, c->inuse,
3774 ALIGN(size, sizeof(void *)));
3775 }
3776
7b8f3b66 3777 if (sysfs_slab_alias(s, name)) {
7b8f3b66 3778 s->refcount--;
cbb79694 3779 s = NULL;
7b8f3b66 3780 }
a0e1d1be 3781 }
6446faa2 3782
cbb79694
CL
3783 return s;
3784}
84c1cf62 3785
8a13a4cc 3786int __kmem_cache_create(struct kmem_cache *s, unsigned long flags)
cbb79694 3787{
aac3a166
PE
3788 int err;
3789
3790 err = kmem_cache_open(s, flags);
3791 if (err)
3792 return err;
20cea968 3793
45530c44
CL
3794 /* Mutex is not taken during early boot */
3795 if (slab_state <= UP)
3796 return 0;
3797
107dab5c 3798 memcg_propagate_slab_attrs(s);
aac3a166 3799 err = sysfs_slab_add(s);
aac3a166
PE
3800 if (err)
3801 kmem_cache_close(s);
20cea968 3802
aac3a166 3803 return err;
81819f0f 3804}
81819f0f 3805
81819f0f 3806#ifdef CONFIG_SMP
81819f0f 3807/*
672bba3a
CL
3808 * Use the cpu notifier to insure that the cpu slabs are flushed when
3809 * necessary.
81819f0f 3810 */
0db0628d 3811static int slab_cpuup_callback(struct notifier_block *nfb,
81819f0f
CL
3812 unsigned long action, void *hcpu)
3813{
3814 long cpu = (long)hcpu;
5b95a4ac
CL
3815 struct kmem_cache *s;
3816 unsigned long flags;
81819f0f
CL
3817
3818 switch (action) {
3819 case CPU_UP_CANCELED:
8bb78442 3820 case CPU_UP_CANCELED_FROZEN:
81819f0f 3821 case CPU_DEAD:
8bb78442 3822 case CPU_DEAD_FROZEN:
18004c5d 3823 mutex_lock(&slab_mutex);
5b95a4ac
CL
3824 list_for_each_entry(s, &slab_caches, list) {
3825 local_irq_save(flags);
3826 __flush_cpu_slab(s, cpu);
3827 local_irq_restore(flags);
3828 }
18004c5d 3829 mutex_unlock(&slab_mutex);
81819f0f
CL
3830 break;
3831 default:
3832 break;
3833 }
3834 return NOTIFY_OK;
3835}
3836
0db0628d 3837static struct notifier_block slab_notifier = {
3adbefee 3838 .notifier_call = slab_cpuup_callback
06428780 3839};
81819f0f
CL
3840
3841#endif
3842
ce71e27c 3843void *__kmalloc_track_caller(size_t size, gfp_t gfpflags, unsigned long caller)
81819f0f 3844{
aadb4bc4 3845 struct kmem_cache *s;
94b528d0 3846 void *ret;
aadb4bc4 3847
95a05b42 3848 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE))
eada35ef
PE
3849 return kmalloc_large(size, gfpflags);
3850
2c59dd65 3851 s = kmalloc_slab(size, gfpflags);
81819f0f 3852
2408c550 3853 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3854 return s;
81819f0f 3855
2b847c3c 3856 ret = slab_alloc(s, gfpflags, caller);
94b528d0 3857
25985edc 3858 /* Honor the call site pointer we received. */
ca2b84cb 3859 trace_kmalloc(caller, ret, size, s->size, gfpflags);
94b528d0
EGM
3860
3861 return ret;
81819f0f
CL
3862}
3863
5d1f57e4 3864#ifdef CONFIG_NUMA
81819f0f 3865void *__kmalloc_node_track_caller(size_t size, gfp_t gfpflags,
ce71e27c 3866 int node, unsigned long caller)
81819f0f 3867{
aadb4bc4 3868 struct kmem_cache *s;
94b528d0 3869 void *ret;
aadb4bc4 3870
95a05b42 3871 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
d3e14aa3
XF
3872 ret = kmalloc_large_node(size, gfpflags, node);
3873
3874 trace_kmalloc_node(caller, ret,
3875 size, PAGE_SIZE << get_order(size),
3876 gfpflags, node);
3877
3878 return ret;
3879 }
eada35ef 3880
2c59dd65 3881 s = kmalloc_slab(size, gfpflags);
81819f0f 3882
2408c550 3883 if (unlikely(ZERO_OR_NULL_PTR(s)))
6cb8f913 3884 return s;
81819f0f 3885
2b847c3c 3886 ret = slab_alloc_node(s, gfpflags, node, caller);
94b528d0 3887
25985edc 3888 /* Honor the call site pointer we received. */
ca2b84cb 3889 trace_kmalloc_node(caller, ret, size, s->size, gfpflags, node);
94b528d0
EGM
3890
3891 return ret;
81819f0f 3892}
5d1f57e4 3893#endif
81819f0f 3894
ab4d5ed5 3895#ifdef CONFIG_SYSFS
205ab99d
CL
3896static int count_inuse(struct page *page)
3897{
3898 return page->inuse;
3899}
3900
3901static int count_total(struct page *page)
3902{
3903 return page->objects;
3904}
ab4d5ed5 3905#endif
205ab99d 3906
ab4d5ed5 3907#ifdef CONFIG_SLUB_DEBUG
434e245d
CL
3908static int validate_slab(struct kmem_cache *s, struct page *page,
3909 unsigned long *map)
53e15af0
CL
3910{
3911 void *p;
a973e9dd 3912 void *addr = page_address(page);
53e15af0
CL
3913
3914 if (!check_slab(s, page) ||
3915 !on_freelist(s, page, NULL))
3916 return 0;
3917
3918 /* Now we know that a valid freelist exists */
39b26464 3919 bitmap_zero(map, page->objects);
53e15af0 3920
5f80b13a
CL
3921 get_map(s, page, map);
3922 for_each_object(p, s, addr, page->objects) {
3923 if (test_bit(slab_index(p, s, addr), map))
3924 if (!check_object(s, page, p, SLUB_RED_INACTIVE))
3925 return 0;
53e15af0
CL
3926 }
3927
224a88be 3928 for_each_object(p, s, addr, page->objects)
7656c72b 3929 if (!test_bit(slab_index(p, s, addr), map))
37d57443 3930 if (!check_object(s, page, p, SLUB_RED_ACTIVE))
53e15af0
CL
3931 return 0;
3932 return 1;
3933}
3934
434e245d
CL
3935static void validate_slab_slab(struct kmem_cache *s, struct page *page,
3936 unsigned long *map)
53e15af0 3937{
881db7fb
CL
3938 slab_lock(page);
3939 validate_slab(s, page, map);
3940 slab_unlock(page);
53e15af0
CL
3941}
3942
434e245d
CL
3943static int validate_slab_node(struct kmem_cache *s,
3944 struct kmem_cache_node *n, unsigned long *map)
53e15af0
CL
3945{
3946 unsigned long count = 0;
3947 struct page *page;
3948 unsigned long flags;
3949
3950 spin_lock_irqsave(&n->list_lock, flags);
3951
3952 list_for_each_entry(page, &n->partial, lru) {
434e245d 3953 validate_slab_slab(s, page, map);
53e15af0
CL
3954 count++;
3955 }
3956 if (count != n->nr_partial)
3957 printk(KERN_ERR "SLUB %s: %ld partial slabs counted but "
3958 "counter=%ld\n", s->name, count, n->nr_partial);
3959
3960 if (!(s->flags & SLAB_STORE_USER))
3961 goto out;
3962
3963 list_for_each_entry(page, &n->full, lru) {
434e245d 3964 validate_slab_slab(s, page, map);
53e15af0
CL
3965 count++;
3966 }
3967 if (count != atomic_long_read(&n->nr_slabs))
3968 printk(KERN_ERR "SLUB: %s %ld slabs counted but "
3969 "counter=%ld\n", s->name, count,
3970 atomic_long_read(&n->nr_slabs));
3971
3972out:
3973 spin_unlock_irqrestore(&n->list_lock, flags);
3974 return count;
3975}
3976
434e245d 3977static long validate_slab_cache(struct kmem_cache *s)
53e15af0
CL
3978{
3979 int node;
3980 unsigned long count = 0;
205ab99d 3981 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
434e245d
CL
3982 sizeof(unsigned long), GFP_KERNEL);
3983
3984 if (!map)
3985 return -ENOMEM;
53e15af0
CL
3986
3987 flush_all(s);
f64dc58c 3988 for_each_node_state(node, N_NORMAL_MEMORY) {
53e15af0
CL
3989 struct kmem_cache_node *n = get_node(s, node);
3990
434e245d 3991 count += validate_slab_node(s, n, map);
53e15af0 3992 }
434e245d 3993 kfree(map);
53e15af0
CL
3994 return count;
3995}
88a420e4 3996/*
672bba3a 3997 * Generate lists of code addresses where slabcache objects are allocated
88a420e4
CL
3998 * and freed.
3999 */
4000
4001struct location {
4002 unsigned long count;
ce71e27c 4003 unsigned long addr;
45edfa58
CL
4004 long long sum_time;
4005 long min_time;
4006 long max_time;
4007 long min_pid;
4008 long max_pid;
174596a0 4009 DECLARE_BITMAP(cpus, NR_CPUS);
45edfa58 4010 nodemask_t nodes;
88a420e4
CL
4011};
4012
4013struct loc_track {
4014 unsigned long max;
4015 unsigned long count;
4016 struct location *loc;
4017};
4018
4019static void free_loc_track(struct loc_track *t)
4020{
4021 if (t->max)
4022 free_pages((unsigned long)t->loc,
4023 get_order(sizeof(struct location) * t->max));
4024}
4025
68dff6a9 4026static int alloc_loc_track(struct loc_track *t, unsigned long max, gfp_t flags)
88a420e4
CL
4027{
4028 struct location *l;
4029 int order;
4030
88a420e4
CL
4031 order = get_order(sizeof(struct location) * max);
4032
68dff6a9 4033 l = (void *)__get_free_pages(flags, order);
88a420e4
CL
4034 if (!l)
4035 return 0;
4036
4037 if (t->count) {
4038 memcpy(l, t->loc, sizeof(struct location) * t->count);
4039 free_loc_track(t);
4040 }
4041 t->max = max;
4042 t->loc = l;
4043 return 1;
4044}
4045
4046static int add_location(struct loc_track *t, struct kmem_cache *s,
45edfa58 4047 const struct track *track)
88a420e4
CL
4048{
4049 long start, end, pos;
4050 struct location *l;
ce71e27c 4051 unsigned long caddr;
45edfa58 4052 unsigned long age = jiffies - track->when;
88a420e4
CL
4053
4054 start = -1;
4055 end = t->count;
4056
4057 for ( ; ; ) {
4058 pos = start + (end - start + 1) / 2;
4059
4060 /*
4061 * There is nothing at "end". If we end up there
4062 * we need to add something to before end.
4063 */
4064 if (pos == end)
4065 break;
4066
4067 caddr = t->loc[pos].addr;
45edfa58
CL
4068 if (track->addr == caddr) {
4069
4070 l = &t->loc[pos];
4071 l->count++;
4072 if (track->when) {
4073 l->sum_time += age;
4074 if (age < l->min_time)
4075 l->min_time = age;
4076 if (age > l->max_time)
4077 l->max_time = age;
4078
4079 if (track->pid < l->min_pid)
4080 l->min_pid = track->pid;
4081 if (track->pid > l->max_pid)
4082 l->max_pid = track->pid;
4083
174596a0
RR
4084 cpumask_set_cpu(track->cpu,
4085 to_cpumask(l->cpus));
45edfa58
CL
4086 }
4087 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4088 return 1;
4089 }
4090
45edfa58 4091 if (track->addr < caddr)
88a420e4
CL
4092 end = pos;
4093 else
4094 start = pos;
4095 }
4096
4097 /*
672bba3a 4098 * Not found. Insert new tracking element.
88a420e4 4099 */
68dff6a9 4100 if (t->count >= t->max && !alloc_loc_track(t, 2 * t->max, GFP_ATOMIC))
88a420e4
CL
4101 return 0;
4102
4103 l = t->loc + pos;
4104 if (pos < t->count)
4105 memmove(l + 1, l,
4106 (t->count - pos) * sizeof(struct location));
4107 t->count++;
4108 l->count = 1;
45edfa58
CL
4109 l->addr = track->addr;
4110 l->sum_time = age;
4111 l->min_time = age;
4112 l->max_time = age;
4113 l->min_pid = track->pid;
4114 l->max_pid = track->pid;
174596a0
RR
4115 cpumask_clear(to_cpumask(l->cpus));
4116 cpumask_set_cpu(track->cpu, to_cpumask(l->cpus));
45edfa58
CL
4117 nodes_clear(l->nodes);
4118 node_set(page_to_nid(virt_to_page(track)), l->nodes);
88a420e4
CL
4119 return 1;
4120}
4121
4122static void process_slab(struct loc_track *t, struct kmem_cache *s,
bbd7d57b 4123 struct page *page, enum track_item alloc,
a5dd5c11 4124 unsigned long *map)
88a420e4 4125{
a973e9dd 4126 void *addr = page_address(page);
88a420e4
CL
4127 void *p;
4128
39b26464 4129 bitmap_zero(map, page->objects);
5f80b13a 4130 get_map(s, page, map);
88a420e4 4131
224a88be 4132 for_each_object(p, s, addr, page->objects)
45edfa58
CL
4133 if (!test_bit(slab_index(p, s, addr), map))
4134 add_location(t, s, get_track(s, p, alloc));
88a420e4
CL
4135}
4136
4137static int list_locations(struct kmem_cache *s, char *buf,
4138 enum track_item alloc)
4139{
e374d483 4140 int len = 0;
88a420e4 4141 unsigned long i;
68dff6a9 4142 struct loc_track t = { 0, 0, NULL };
88a420e4 4143 int node;
bbd7d57b
ED
4144 unsigned long *map = kmalloc(BITS_TO_LONGS(oo_objects(s->max)) *
4145 sizeof(unsigned long), GFP_KERNEL);
88a420e4 4146
bbd7d57b
ED
4147 if (!map || !alloc_loc_track(&t, PAGE_SIZE / sizeof(struct location),
4148 GFP_TEMPORARY)) {
4149 kfree(map);
68dff6a9 4150 return sprintf(buf, "Out of memory\n");
bbd7d57b 4151 }
88a420e4
CL
4152 /* Push back cpu slabs */
4153 flush_all(s);
4154
f64dc58c 4155 for_each_node_state(node, N_NORMAL_MEMORY) {
88a420e4
CL
4156 struct kmem_cache_node *n = get_node(s, node);
4157 unsigned long flags;
4158 struct page *page;
4159
9e86943b 4160 if (!atomic_long_read(&n->nr_slabs))
88a420e4
CL
4161 continue;
4162
4163 spin_lock_irqsave(&n->list_lock, flags);
4164 list_for_each_entry(page, &n->partial, lru)
bbd7d57b 4165 process_slab(&t, s, page, alloc, map);
88a420e4 4166 list_for_each_entry(page, &n->full, lru)
bbd7d57b 4167 process_slab(&t, s, page, alloc, map);
88a420e4
CL
4168 spin_unlock_irqrestore(&n->list_lock, flags);
4169 }
4170
4171 for (i = 0; i < t.count; i++) {
45edfa58 4172 struct location *l = &t.loc[i];
88a420e4 4173
9c246247 4174 if (len > PAGE_SIZE - KSYM_SYMBOL_LEN - 100)
88a420e4 4175 break;
e374d483 4176 len += sprintf(buf + len, "%7ld ", l->count);
45edfa58
CL
4177
4178 if (l->addr)
62c70bce 4179 len += sprintf(buf + len, "%pS", (void *)l->addr);
88a420e4 4180 else
e374d483 4181 len += sprintf(buf + len, "<not-available>");
45edfa58
CL
4182
4183 if (l->sum_time != l->min_time) {
e374d483 4184 len += sprintf(buf + len, " age=%ld/%ld/%ld",
f8bd2258
RZ
4185 l->min_time,
4186 (long)div_u64(l->sum_time, l->count),
4187 l->max_time);
45edfa58 4188 } else
e374d483 4189 len += sprintf(buf + len, " age=%ld",
45edfa58
CL
4190 l->min_time);
4191
4192 if (l->min_pid != l->max_pid)
e374d483 4193 len += sprintf(buf + len, " pid=%ld-%ld",
45edfa58
CL
4194 l->min_pid, l->max_pid);
4195 else
e374d483 4196 len += sprintf(buf + len, " pid=%ld",
45edfa58
CL
4197 l->min_pid);
4198
174596a0
RR
4199 if (num_online_cpus() > 1 &&
4200 !cpumask_empty(to_cpumask(l->cpus)) &&
e374d483
HH
4201 len < PAGE_SIZE - 60) {
4202 len += sprintf(buf + len, " cpus=");
d0e0ac97
CG
4203 len += cpulist_scnprintf(buf + len,
4204 PAGE_SIZE - len - 50,
174596a0 4205 to_cpumask(l->cpus));
45edfa58
CL
4206 }
4207
62bc62a8 4208 if (nr_online_nodes > 1 && !nodes_empty(l->nodes) &&
e374d483
HH
4209 len < PAGE_SIZE - 60) {
4210 len += sprintf(buf + len, " nodes=");
d0e0ac97
CG
4211 len += nodelist_scnprintf(buf + len,
4212 PAGE_SIZE - len - 50,
4213 l->nodes);
45edfa58
CL
4214 }
4215
e374d483 4216 len += sprintf(buf + len, "\n");
88a420e4
CL
4217 }
4218
4219 free_loc_track(&t);
bbd7d57b 4220 kfree(map);
88a420e4 4221 if (!t.count)
e374d483
HH
4222 len += sprintf(buf, "No data\n");
4223 return len;
88a420e4 4224}
ab4d5ed5 4225#endif
88a420e4 4226
a5a84755
CL
4227#ifdef SLUB_RESILIENCY_TEST
4228static void resiliency_test(void)
4229{
4230 u8 *p;
4231
95a05b42 4232 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 16 || KMALLOC_SHIFT_HIGH < 10);
a5a84755
CL
4233
4234 printk(KERN_ERR "SLUB resiliency testing\n");
4235 printk(KERN_ERR "-----------------------\n");
4236 printk(KERN_ERR "A. Corruption after allocation\n");
4237
4238 p = kzalloc(16, GFP_KERNEL);
4239 p[16] = 0x12;
4240 printk(KERN_ERR "\n1. kmalloc-16: Clobber Redzone/next pointer"
4241 " 0x12->0x%p\n\n", p + 16);
4242
4243 validate_slab_cache(kmalloc_caches[4]);
4244
4245 /* Hmmm... The next two are dangerous */
4246 p = kzalloc(32, GFP_KERNEL);
4247 p[32 + sizeof(void *)] = 0x34;
4248 printk(KERN_ERR "\n2. kmalloc-32: Clobber next pointer/next slab"
4249 " 0x34 -> -0x%p\n", p);
4250 printk(KERN_ERR
4251 "If allocated object is overwritten then not detectable\n\n");
4252
4253 validate_slab_cache(kmalloc_caches[5]);
4254 p = kzalloc(64, GFP_KERNEL);
4255 p += 64 + (get_cycles() & 0xff) * sizeof(void *);
4256 *p = 0x56;
4257 printk(KERN_ERR "\n3. kmalloc-64: corrupting random byte 0x56->0x%p\n",
4258 p);
4259 printk(KERN_ERR
4260 "If allocated object is overwritten then not detectable\n\n");
4261 validate_slab_cache(kmalloc_caches[6]);
4262
4263 printk(KERN_ERR "\nB. Corruption after free\n");
4264 p = kzalloc(128, GFP_KERNEL);
4265 kfree(p);
4266 *p = 0x78;
4267 printk(KERN_ERR "1. kmalloc-128: Clobber first word 0x78->0x%p\n\n", p);
4268 validate_slab_cache(kmalloc_caches[7]);
4269
4270 p = kzalloc(256, GFP_KERNEL);
4271 kfree(p);
4272 p[50] = 0x9a;
4273 printk(KERN_ERR "\n2. kmalloc-256: Clobber 50th byte 0x9a->0x%p\n\n",
4274 p);
4275 validate_slab_cache(kmalloc_caches[8]);
4276
4277 p = kzalloc(512, GFP_KERNEL);
4278 kfree(p);
4279 p[512] = 0xab;
4280 printk(KERN_ERR "\n3. kmalloc-512: Clobber redzone 0xab->0x%p\n\n", p);
4281 validate_slab_cache(kmalloc_caches[9]);
4282}
4283#else
4284#ifdef CONFIG_SYSFS
4285static void resiliency_test(void) {};
4286#endif
4287#endif
4288
ab4d5ed5 4289#ifdef CONFIG_SYSFS
81819f0f 4290enum slab_stat_type {
205ab99d
CL
4291 SL_ALL, /* All slabs */
4292 SL_PARTIAL, /* Only partially allocated slabs */
4293 SL_CPU, /* Only slabs used for cpu caches */
4294 SL_OBJECTS, /* Determine allocated objects not slabs */
4295 SL_TOTAL /* Determine object capacity not slabs */
81819f0f
CL
4296};
4297
205ab99d 4298#define SO_ALL (1 << SL_ALL)
81819f0f
CL
4299#define SO_PARTIAL (1 << SL_PARTIAL)
4300#define SO_CPU (1 << SL_CPU)
4301#define SO_OBJECTS (1 << SL_OBJECTS)
205ab99d 4302#define SO_TOTAL (1 << SL_TOTAL)
81819f0f 4303
62e5c4b4
CG
4304static ssize_t show_slab_objects(struct kmem_cache *s,
4305 char *buf, unsigned long flags)
81819f0f
CL
4306{
4307 unsigned long total = 0;
81819f0f
CL
4308 int node;
4309 int x;
4310 unsigned long *nodes;
81819f0f 4311
e35e1a97 4312 nodes = kzalloc(sizeof(unsigned long) * nr_node_ids, GFP_KERNEL);
62e5c4b4
CG
4313 if (!nodes)
4314 return -ENOMEM;
81819f0f 4315
205ab99d
CL
4316 if (flags & SO_CPU) {
4317 int cpu;
81819f0f 4318
205ab99d 4319 for_each_possible_cpu(cpu) {
d0e0ac97
CG
4320 struct kmem_cache_cpu *c = per_cpu_ptr(s->cpu_slab,
4321 cpu);
ec3ab083 4322 int node;
49e22585 4323 struct page *page;
dfb4f096 4324
bc6697d8 4325 page = ACCESS_ONCE(c->page);
ec3ab083
CL
4326 if (!page)
4327 continue;
205ab99d 4328
ec3ab083
CL
4329 node = page_to_nid(page);
4330 if (flags & SO_TOTAL)
4331 x = page->objects;
4332 else if (flags & SO_OBJECTS)
4333 x = page->inuse;
4334 else
4335 x = 1;
49e22585 4336
ec3ab083
CL
4337 total += x;
4338 nodes[node] += x;
4339
4340 page = ACCESS_ONCE(c->partial);
49e22585 4341 if (page) {
8afb1474
LZ
4342 node = page_to_nid(page);
4343 if (flags & SO_TOTAL)
4344 WARN_ON_ONCE(1);
4345 else if (flags & SO_OBJECTS)
4346 WARN_ON_ONCE(1);
4347 else
4348 x = page->pages;
bc6697d8
ED
4349 total += x;
4350 nodes[node] += x;
49e22585 4351 }
81819f0f
CL
4352 }
4353 }
4354
04d94879 4355 lock_memory_hotplug();
ab4d5ed5 4356#ifdef CONFIG_SLUB_DEBUG
205ab99d
CL
4357 if (flags & SO_ALL) {
4358 for_each_node_state(node, N_NORMAL_MEMORY) {
4359 struct kmem_cache_node *n = get_node(s, node);
4360
d0e0ac97
CG
4361 if (flags & SO_TOTAL)
4362 x = atomic_long_read(&n->total_objects);
4363 else if (flags & SO_OBJECTS)
4364 x = atomic_long_read(&n->total_objects) -
4365 count_partial(n, count_free);
81819f0f 4366 else
205ab99d 4367 x = atomic_long_read(&n->nr_slabs);
81819f0f
CL
4368 total += x;
4369 nodes[node] += x;
4370 }
4371
ab4d5ed5
CL
4372 } else
4373#endif
4374 if (flags & SO_PARTIAL) {
205ab99d
CL
4375 for_each_node_state(node, N_NORMAL_MEMORY) {
4376 struct kmem_cache_node *n = get_node(s, node);
81819f0f 4377
205ab99d
CL
4378 if (flags & SO_TOTAL)
4379 x = count_partial(n, count_total);
4380 else if (flags & SO_OBJECTS)
4381 x = count_partial(n, count_inuse);
81819f0f 4382 else
205ab99d 4383 x = n->nr_partial;
81819f0f
CL
4384 total += x;
4385 nodes[node] += x;
4386 }
4387 }
81819f0f
CL
4388 x = sprintf(buf, "%lu", total);
4389#ifdef CONFIG_NUMA
f64dc58c 4390 for_each_node_state(node, N_NORMAL_MEMORY)
81819f0f
CL
4391 if (nodes[node])
4392 x += sprintf(buf + x, " N%d=%lu",
4393 node, nodes[node]);
4394#endif
04d94879 4395 unlock_memory_hotplug();
81819f0f
CL
4396 kfree(nodes);
4397 return x + sprintf(buf + x, "\n");
4398}
4399
ab4d5ed5 4400#ifdef CONFIG_SLUB_DEBUG
81819f0f
CL
4401static int any_slab_objects(struct kmem_cache *s)
4402{
4403 int node;
81819f0f 4404
dfb4f096 4405 for_each_online_node(node) {
81819f0f
CL
4406 struct kmem_cache_node *n = get_node(s, node);
4407
dfb4f096
CL
4408 if (!n)
4409 continue;
4410
4ea33e2d 4411 if (atomic_long_read(&n->total_objects))
81819f0f
CL
4412 return 1;
4413 }
4414 return 0;
4415}
ab4d5ed5 4416#endif
81819f0f
CL
4417
4418#define to_slab_attr(n) container_of(n, struct slab_attribute, attr)
497888cf 4419#define to_slab(n) container_of(n, struct kmem_cache, kobj)
81819f0f
CL
4420
4421struct slab_attribute {
4422 struct attribute attr;
4423 ssize_t (*show)(struct kmem_cache *s, char *buf);
4424 ssize_t (*store)(struct kmem_cache *s, const char *x, size_t count);
4425};
4426
4427#define SLAB_ATTR_RO(_name) \
ab067e99
VK
4428 static struct slab_attribute _name##_attr = \
4429 __ATTR(_name, 0400, _name##_show, NULL)
81819f0f
CL
4430
4431#define SLAB_ATTR(_name) \
4432 static struct slab_attribute _name##_attr = \
ab067e99 4433 __ATTR(_name, 0600, _name##_show, _name##_store)
81819f0f 4434
81819f0f
CL
4435static ssize_t slab_size_show(struct kmem_cache *s, char *buf)
4436{
4437 return sprintf(buf, "%d\n", s->size);
4438}
4439SLAB_ATTR_RO(slab_size);
4440
4441static ssize_t align_show(struct kmem_cache *s, char *buf)
4442{
4443 return sprintf(buf, "%d\n", s->align);
4444}
4445SLAB_ATTR_RO(align);
4446
4447static ssize_t object_size_show(struct kmem_cache *s, char *buf)
4448{
3b0efdfa 4449 return sprintf(buf, "%d\n", s->object_size);
81819f0f
CL
4450}
4451SLAB_ATTR_RO(object_size);
4452
4453static ssize_t objs_per_slab_show(struct kmem_cache *s, char *buf)
4454{
834f3d11 4455 return sprintf(buf, "%d\n", oo_objects(s->oo));
81819f0f
CL
4456}
4457SLAB_ATTR_RO(objs_per_slab);
4458
06b285dc
CL
4459static ssize_t order_store(struct kmem_cache *s,
4460 const char *buf, size_t length)
4461{
0121c619
CL
4462 unsigned long order;
4463 int err;
4464
3dbb95f7 4465 err = kstrtoul(buf, 10, &order);
0121c619
CL
4466 if (err)
4467 return err;
06b285dc
CL
4468
4469 if (order > slub_max_order || order < slub_min_order)
4470 return -EINVAL;
4471
4472 calculate_sizes(s, order);
4473 return length;
4474}
4475
81819f0f
CL
4476static ssize_t order_show(struct kmem_cache *s, char *buf)
4477{
834f3d11 4478 return sprintf(buf, "%d\n", oo_order(s->oo));
81819f0f 4479}
06b285dc 4480SLAB_ATTR(order);
81819f0f 4481
73d342b1
DR
4482static ssize_t min_partial_show(struct kmem_cache *s, char *buf)
4483{
4484 return sprintf(buf, "%lu\n", s->min_partial);
4485}
4486
4487static ssize_t min_partial_store(struct kmem_cache *s, const char *buf,
4488 size_t length)
4489{
4490 unsigned long min;
4491 int err;
4492
3dbb95f7 4493 err = kstrtoul(buf, 10, &min);
73d342b1
DR
4494 if (err)
4495 return err;
4496
c0bdb232 4497 set_min_partial(s, min);
73d342b1
DR
4498 return length;
4499}
4500SLAB_ATTR(min_partial);
4501
49e22585
CL
4502static ssize_t cpu_partial_show(struct kmem_cache *s, char *buf)
4503{
4504 return sprintf(buf, "%u\n", s->cpu_partial);
4505}
4506
4507static ssize_t cpu_partial_store(struct kmem_cache *s, const char *buf,
4508 size_t length)
4509{
4510 unsigned long objects;
4511 int err;
4512
3dbb95f7 4513 err = kstrtoul(buf, 10, &objects);
49e22585
CL
4514 if (err)
4515 return err;
345c905d 4516 if (objects && !kmem_cache_has_cpu_partial(s))
74ee4ef1 4517 return -EINVAL;
49e22585
CL
4518
4519 s->cpu_partial = objects;
4520 flush_all(s);
4521 return length;
4522}
4523SLAB_ATTR(cpu_partial);
4524
81819f0f
CL
4525static ssize_t ctor_show(struct kmem_cache *s, char *buf)
4526{
62c70bce
JP
4527 if (!s->ctor)
4528 return 0;
4529 return sprintf(buf, "%pS\n", s->ctor);
81819f0f
CL
4530}
4531SLAB_ATTR_RO(ctor);
4532
81819f0f
CL
4533static ssize_t aliases_show(struct kmem_cache *s, char *buf)
4534{
4535 return sprintf(buf, "%d\n", s->refcount - 1);
4536}
4537SLAB_ATTR_RO(aliases);
4538
81819f0f
CL
4539static ssize_t partial_show(struct kmem_cache *s, char *buf)
4540{
d9acf4b7 4541 return show_slab_objects(s, buf, SO_PARTIAL);
81819f0f
CL
4542}
4543SLAB_ATTR_RO(partial);
4544
4545static ssize_t cpu_slabs_show(struct kmem_cache *s, char *buf)
4546{
d9acf4b7 4547 return show_slab_objects(s, buf, SO_CPU);
81819f0f
CL
4548}
4549SLAB_ATTR_RO(cpu_slabs);
4550
4551static ssize_t objects_show(struct kmem_cache *s, char *buf)
4552{
205ab99d 4553 return show_slab_objects(s, buf, SO_ALL|SO_OBJECTS);
81819f0f
CL
4554}
4555SLAB_ATTR_RO(objects);
4556
205ab99d
CL
4557static ssize_t objects_partial_show(struct kmem_cache *s, char *buf)
4558{
4559 return show_slab_objects(s, buf, SO_PARTIAL|SO_OBJECTS);
4560}
4561SLAB_ATTR_RO(objects_partial);
4562
49e22585
CL
4563static ssize_t slabs_cpu_partial_show(struct kmem_cache *s, char *buf)
4564{
4565 int objects = 0;
4566 int pages = 0;
4567 int cpu;
4568 int len;
4569
4570 for_each_online_cpu(cpu) {
4571 struct page *page = per_cpu_ptr(s->cpu_slab, cpu)->partial;
4572
4573 if (page) {
4574 pages += page->pages;
4575 objects += page->pobjects;
4576 }
4577 }
4578
4579 len = sprintf(buf, "%d(%d)", objects, pages);
4580
4581#ifdef CONFIG_SMP
4582 for_each_online_cpu(cpu) {
4583 struct page *page = per_cpu_ptr(s->cpu_slab, cpu) ->partial;
4584
4585 if (page && len < PAGE_SIZE - 20)
4586 len += sprintf(buf + len, " C%d=%d(%d)", cpu,
4587 page->pobjects, page->pages);
4588 }
4589#endif
4590 return len + sprintf(buf + len, "\n");
4591}
4592SLAB_ATTR_RO(slabs_cpu_partial);
4593
a5a84755
CL
4594static ssize_t reclaim_account_show(struct kmem_cache *s, char *buf)
4595{
4596 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RECLAIM_ACCOUNT));
4597}
4598
4599static ssize_t reclaim_account_store(struct kmem_cache *s,
4600 const char *buf, size_t length)
4601{
4602 s->flags &= ~SLAB_RECLAIM_ACCOUNT;
4603 if (buf[0] == '1')
4604 s->flags |= SLAB_RECLAIM_ACCOUNT;
4605 return length;
4606}
4607SLAB_ATTR(reclaim_account);
4608
4609static ssize_t hwcache_align_show(struct kmem_cache *s, char *buf)
4610{
4611 return sprintf(buf, "%d\n", !!(s->flags & SLAB_HWCACHE_ALIGN));
4612}
4613SLAB_ATTR_RO(hwcache_align);
4614
4615#ifdef CONFIG_ZONE_DMA
4616static ssize_t cache_dma_show(struct kmem_cache *s, char *buf)
4617{
4618 return sprintf(buf, "%d\n", !!(s->flags & SLAB_CACHE_DMA));
4619}
4620SLAB_ATTR_RO(cache_dma);
4621#endif
4622
4623static ssize_t destroy_by_rcu_show(struct kmem_cache *s, char *buf)
4624{
4625 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DESTROY_BY_RCU));
4626}
4627SLAB_ATTR_RO(destroy_by_rcu);
4628
ab9a0f19
LJ
4629static ssize_t reserved_show(struct kmem_cache *s, char *buf)
4630{
4631 return sprintf(buf, "%d\n", s->reserved);
4632}
4633SLAB_ATTR_RO(reserved);
4634
ab4d5ed5 4635#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
4636static ssize_t slabs_show(struct kmem_cache *s, char *buf)
4637{
4638 return show_slab_objects(s, buf, SO_ALL);
4639}
4640SLAB_ATTR_RO(slabs);
4641
205ab99d
CL
4642static ssize_t total_objects_show(struct kmem_cache *s, char *buf)
4643{
4644 return show_slab_objects(s, buf, SO_ALL|SO_TOTAL);
4645}
4646SLAB_ATTR_RO(total_objects);
4647
81819f0f
CL
4648static ssize_t sanity_checks_show(struct kmem_cache *s, char *buf)
4649{
4650 return sprintf(buf, "%d\n", !!(s->flags & SLAB_DEBUG_FREE));
4651}
4652
4653static ssize_t sanity_checks_store(struct kmem_cache *s,
4654 const char *buf, size_t length)
4655{
4656 s->flags &= ~SLAB_DEBUG_FREE;
b789ef51
CL
4657 if (buf[0] == '1') {
4658 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4659 s->flags |= SLAB_DEBUG_FREE;
b789ef51 4660 }
81819f0f
CL
4661 return length;
4662}
4663SLAB_ATTR(sanity_checks);
4664
4665static ssize_t trace_show(struct kmem_cache *s, char *buf)
4666{
4667 return sprintf(buf, "%d\n", !!(s->flags & SLAB_TRACE));
4668}
4669
4670static ssize_t trace_store(struct kmem_cache *s, const char *buf,
4671 size_t length)
4672{
4673 s->flags &= ~SLAB_TRACE;
b789ef51
CL
4674 if (buf[0] == '1') {
4675 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4676 s->flags |= SLAB_TRACE;
b789ef51 4677 }
81819f0f
CL
4678 return length;
4679}
4680SLAB_ATTR(trace);
4681
81819f0f
CL
4682static ssize_t red_zone_show(struct kmem_cache *s, char *buf)
4683{
4684 return sprintf(buf, "%d\n", !!(s->flags & SLAB_RED_ZONE));
4685}
4686
4687static ssize_t red_zone_store(struct kmem_cache *s,
4688 const char *buf, size_t length)
4689{
4690 if (any_slab_objects(s))
4691 return -EBUSY;
4692
4693 s->flags &= ~SLAB_RED_ZONE;
b789ef51
CL
4694 if (buf[0] == '1') {
4695 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4696 s->flags |= SLAB_RED_ZONE;
b789ef51 4697 }
06b285dc 4698 calculate_sizes(s, -1);
81819f0f
CL
4699 return length;
4700}
4701SLAB_ATTR(red_zone);
4702
4703static ssize_t poison_show(struct kmem_cache *s, char *buf)
4704{
4705 return sprintf(buf, "%d\n", !!(s->flags & SLAB_POISON));
4706}
4707
4708static ssize_t poison_store(struct kmem_cache *s,
4709 const char *buf, size_t length)
4710{
4711 if (any_slab_objects(s))
4712 return -EBUSY;
4713
4714 s->flags &= ~SLAB_POISON;
b789ef51
CL
4715 if (buf[0] == '1') {
4716 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4717 s->flags |= SLAB_POISON;
b789ef51 4718 }
06b285dc 4719 calculate_sizes(s, -1);
81819f0f
CL
4720 return length;
4721}
4722SLAB_ATTR(poison);
4723
4724static ssize_t store_user_show(struct kmem_cache *s, char *buf)
4725{
4726 return sprintf(buf, "%d\n", !!(s->flags & SLAB_STORE_USER));
4727}
4728
4729static ssize_t store_user_store(struct kmem_cache *s,
4730 const char *buf, size_t length)
4731{
4732 if (any_slab_objects(s))
4733 return -EBUSY;
4734
4735 s->flags &= ~SLAB_STORE_USER;
b789ef51
CL
4736 if (buf[0] == '1') {
4737 s->flags &= ~__CMPXCHG_DOUBLE;
81819f0f 4738 s->flags |= SLAB_STORE_USER;
b789ef51 4739 }
06b285dc 4740 calculate_sizes(s, -1);
81819f0f
CL
4741 return length;
4742}
4743SLAB_ATTR(store_user);
4744
53e15af0
CL
4745static ssize_t validate_show(struct kmem_cache *s, char *buf)
4746{
4747 return 0;
4748}
4749
4750static ssize_t validate_store(struct kmem_cache *s,
4751 const char *buf, size_t length)
4752{
434e245d
CL
4753 int ret = -EINVAL;
4754
4755 if (buf[0] == '1') {
4756 ret = validate_slab_cache(s);
4757 if (ret >= 0)
4758 ret = length;
4759 }
4760 return ret;
53e15af0
CL
4761}
4762SLAB_ATTR(validate);
a5a84755
CL
4763
4764static ssize_t alloc_calls_show(struct kmem_cache *s, char *buf)
4765{
4766 if (!(s->flags & SLAB_STORE_USER))
4767 return -ENOSYS;
4768 return list_locations(s, buf, TRACK_ALLOC);
4769}
4770SLAB_ATTR_RO(alloc_calls);
4771
4772static ssize_t free_calls_show(struct kmem_cache *s, char *buf)
4773{
4774 if (!(s->flags & SLAB_STORE_USER))
4775 return -ENOSYS;
4776 return list_locations(s, buf, TRACK_FREE);
4777}
4778SLAB_ATTR_RO(free_calls);
4779#endif /* CONFIG_SLUB_DEBUG */
4780
4781#ifdef CONFIG_FAILSLAB
4782static ssize_t failslab_show(struct kmem_cache *s, char *buf)
4783{
4784 return sprintf(buf, "%d\n", !!(s->flags & SLAB_FAILSLAB));
4785}
4786
4787static ssize_t failslab_store(struct kmem_cache *s, const char *buf,
4788 size_t length)
4789{
4790 s->flags &= ~SLAB_FAILSLAB;
4791 if (buf[0] == '1')
4792 s->flags |= SLAB_FAILSLAB;
4793 return length;
4794}
4795SLAB_ATTR(failslab);
ab4d5ed5 4796#endif
53e15af0 4797
2086d26a
CL
4798static ssize_t shrink_show(struct kmem_cache *s, char *buf)
4799{
4800 return 0;
4801}
4802
4803static ssize_t shrink_store(struct kmem_cache *s,
4804 const char *buf, size_t length)
4805{
4806 if (buf[0] == '1') {
4807 int rc = kmem_cache_shrink(s);
4808
4809 if (rc)
4810 return rc;
4811 } else
4812 return -EINVAL;
4813 return length;
4814}
4815SLAB_ATTR(shrink);
4816
81819f0f 4817#ifdef CONFIG_NUMA
9824601e 4818static ssize_t remote_node_defrag_ratio_show(struct kmem_cache *s, char *buf)
81819f0f 4819{
9824601e 4820 return sprintf(buf, "%d\n", s->remote_node_defrag_ratio / 10);
81819f0f
CL
4821}
4822
9824601e 4823static ssize_t remote_node_defrag_ratio_store(struct kmem_cache *s,
81819f0f
CL
4824 const char *buf, size_t length)
4825{
0121c619
CL
4826 unsigned long ratio;
4827 int err;
4828
3dbb95f7 4829 err = kstrtoul(buf, 10, &ratio);
0121c619
CL
4830 if (err)
4831 return err;
4832
e2cb96b7 4833 if (ratio <= 100)
0121c619 4834 s->remote_node_defrag_ratio = ratio * 10;
81819f0f 4835
81819f0f
CL
4836 return length;
4837}
9824601e 4838SLAB_ATTR(remote_node_defrag_ratio);
81819f0f
CL
4839#endif
4840
8ff12cfc 4841#ifdef CONFIG_SLUB_STATS
8ff12cfc
CL
4842static int show_stat(struct kmem_cache *s, char *buf, enum stat_item si)
4843{
4844 unsigned long sum = 0;
4845 int cpu;
4846 int len;
4847 int *data = kmalloc(nr_cpu_ids * sizeof(int), GFP_KERNEL);
4848
4849 if (!data)
4850 return -ENOMEM;
4851
4852 for_each_online_cpu(cpu) {
9dfc6e68 4853 unsigned x = per_cpu_ptr(s->cpu_slab, cpu)->stat[si];
8ff12cfc
CL
4854
4855 data[cpu] = x;
4856 sum += x;
4857 }
4858
4859 len = sprintf(buf, "%lu", sum);
4860
50ef37b9 4861#ifdef CONFIG_SMP
8ff12cfc
CL
4862 for_each_online_cpu(cpu) {
4863 if (data[cpu] && len < PAGE_SIZE - 20)
50ef37b9 4864 len += sprintf(buf + len, " C%d=%u", cpu, data[cpu]);
8ff12cfc 4865 }
50ef37b9 4866#endif
8ff12cfc
CL
4867 kfree(data);
4868 return len + sprintf(buf + len, "\n");
4869}
4870
78eb00cc
DR
4871static void clear_stat(struct kmem_cache *s, enum stat_item si)
4872{
4873 int cpu;
4874
4875 for_each_online_cpu(cpu)
9dfc6e68 4876 per_cpu_ptr(s->cpu_slab, cpu)->stat[si] = 0;
78eb00cc
DR
4877}
4878
8ff12cfc
CL
4879#define STAT_ATTR(si, text) \
4880static ssize_t text##_show(struct kmem_cache *s, char *buf) \
4881{ \
4882 return show_stat(s, buf, si); \
4883} \
78eb00cc
DR
4884static ssize_t text##_store(struct kmem_cache *s, \
4885 const char *buf, size_t length) \
4886{ \
4887 if (buf[0] != '0') \
4888 return -EINVAL; \
4889 clear_stat(s, si); \
4890 return length; \
4891} \
4892SLAB_ATTR(text); \
8ff12cfc
CL
4893
4894STAT_ATTR(ALLOC_FASTPATH, alloc_fastpath);
4895STAT_ATTR(ALLOC_SLOWPATH, alloc_slowpath);
4896STAT_ATTR(FREE_FASTPATH, free_fastpath);
4897STAT_ATTR(FREE_SLOWPATH, free_slowpath);
4898STAT_ATTR(FREE_FROZEN, free_frozen);
4899STAT_ATTR(FREE_ADD_PARTIAL, free_add_partial);
4900STAT_ATTR(FREE_REMOVE_PARTIAL, free_remove_partial);
4901STAT_ATTR(ALLOC_FROM_PARTIAL, alloc_from_partial);
4902STAT_ATTR(ALLOC_SLAB, alloc_slab);
4903STAT_ATTR(ALLOC_REFILL, alloc_refill);
e36a2652 4904STAT_ATTR(ALLOC_NODE_MISMATCH, alloc_node_mismatch);
8ff12cfc
CL
4905STAT_ATTR(FREE_SLAB, free_slab);
4906STAT_ATTR(CPUSLAB_FLUSH, cpuslab_flush);
4907STAT_ATTR(DEACTIVATE_FULL, deactivate_full);
4908STAT_ATTR(DEACTIVATE_EMPTY, deactivate_empty);
4909STAT_ATTR(DEACTIVATE_TO_HEAD, deactivate_to_head);
4910STAT_ATTR(DEACTIVATE_TO_TAIL, deactivate_to_tail);
4911STAT_ATTR(DEACTIVATE_REMOTE_FREES, deactivate_remote_frees);
03e404af 4912STAT_ATTR(DEACTIVATE_BYPASS, deactivate_bypass);
65c3376a 4913STAT_ATTR(ORDER_FALLBACK, order_fallback);
b789ef51
CL
4914STAT_ATTR(CMPXCHG_DOUBLE_CPU_FAIL, cmpxchg_double_cpu_fail);
4915STAT_ATTR(CMPXCHG_DOUBLE_FAIL, cmpxchg_double_fail);
49e22585
CL
4916STAT_ATTR(CPU_PARTIAL_ALLOC, cpu_partial_alloc);
4917STAT_ATTR(CPU_PARTIAL_FREE, cpu_partial_free);
8028dcea
AS
4918STAT_ATTR(CPU_PARTIAL_NODE, cpu_partial_node);
4919STAT_ATTR(CPU_PARTIAL_DRAIN, cpu_partial_drain);
8ff12cfc
CL
4920#endif
4921
06428780 4922static struct attribute *slab_attrs[] = {
81819f0f
CL
4923 &slab_size_attr.attr,
4924 &object_size_attr.attr,
4925 &objs_per_slab_attr.attr,
4926 &order_attr.attr,
73d342b1 4927 &min_partial_attr.attr,
49e22585 4928 &cpu_partial_attr.attr,
81819f0f 4929 &objects_attr.attr,
205ab99d 4930 &objects_partial_attr.attr,
81819f0f
CL
4931 &partial_attr.attr,
4932 &cpu_slabs_attr.attr,
4933 &ctor_attr.attr,
81819f0f
CL
4934 &aliases_attr.attr,
4935 &align_attr.attr,
81819f0f
CL
4936 &hwcache_align_attr.attr,
4937 &reclaim_account_attr.attr,
4938 &destroy_by_rcu_attr.attr,
a5a84755 4939 &shrink_attr.attr,
ab9a0f19 4940 &reserved_attr.attr,
49e22585 4941 &slabs_cpu_partial_attr.attr,
ab4d5ed5 4942#ifdef CONFIG_SLUB_DEBUG
a5a84755
CL
4943 &total_objects_attr.attr,
4944 &slabs_attr.attr,
4945 &sanity_checks_attr.attr,
4946 &trace_attr.attr,
81819f0f
CL
4947 &red_zone_attr.attr,
4948 &poison_attr.attr,
4949 &store_user_attr.attr,
53e15af0 4950 &validate_attr.attr,
88a420e4
CL
4951 &alloc_calls_attr.attr,
4952 &free_calls_attr.attr,
ab4d5ed5 4953#endif
81819f0f
CL
4954#ifdef CONFIG_ZONE_DMA
4955 &cache_dma_attr.attr,
4956#endif
4957#ifdef CONFIG_NUMA
9824601e 4958 &remote_node_defrag_ratio_attr.attr,
8ff12cfc
CL
4959#endif
4960#ifdef CONFIG_SLUB_STATS
4961 &alloc_fastpath_attr.attr,
4962 &alloc_slowpath_attr.attr,
4963 &free_fastpath_attr.attr,
4964 &free_slowpath_attr.attr,
4965 &free_frozen_attr.attr,
4966 &free_add_partial_attr.attr,
4967 &free_remove_partial_attr.attr,
4968 &alloc_from_partial_attr.attr,
4969 &alloc_slab_attr.attr,
4970 &alloc_refill_attr.attr,
e36a2652 4971 &alloc_node_mismatch_attr.attr,
8ff12cfc
CL
4972 &free_slab_attr.attr,
4973 &cpuslab_flush_attr.attr,
4974 &deactivate_full_attr.attr,
4975 &deactivate_empty_attr.attr,
4976 &deactivate_to_head_attr.attr,
4977 &deactivate_to_tail_attr.attr,
4978 &deactivate_remote_frees_attr.attr,
03e404af 4979 &deactivate_bypass_attr.attr,
65c3376a 4980 &order_fallback_attr.attr,
b789ef51
CL
4981 &cmpxchg_double_fail_attr.attr,
4982 &cmpxchg_double_cpu_fail_attr.attr,
49e22585
CL
4983 &cpu_partial_alloc_attr.attr,
4984 &cpu_partial_free_attr.attr,
8028dcea
AS
4985 &cpu_partial_node_attr.attr,
4986 &cpu_partial_drain_attr.attr,
81819f0f 4987#endif
4c13dd3b
DM
4988#ifdef CONFIG_FAILSLAB
4989 &failslab_attr.attr,
4990#endif
4991
81819f0f
CL
4992 NULL
4993};
4994
4995static struct attribute_group slab_attr_group = {
4996 .attrs = slab_attrs,
4997};
4998
4999static ssize_t slab_attr_show(struct kobject *kobj,
5000 struct attribute *attr,
5001 char *buf)
5002{
5003 struct slab_attribute *attribute;
5004 struct kmem_cache *s;
5005 int err;
5006
5007 attribute = to_slab_attr(attr);
5008 s = to_slab(kobj);
5009
5010 if (!attribute->show)
5011 return -EIO;
5012
5013 err = attribute->show(s, buf);
5014
5015 return err;
5016}
5017
5018static ssize_t slab_attr_store(struct kobject *kobj,
5019 struct attribute *attr,
5020 const char *buf, size_t len)
5021{
5022 struct slab_attribute *attribute;
5023 struct kmem_cache *s;
5024 int err;
5025
5026 attribute = to_slab_attr(attr);
5027 s = to_slab(kobj);
5028
5029 if (!attribute->store)
5030 return -EIO;
5031
5032 err = attribute->store(s, buf, len);
107dab5c
GC
5033#ifdef CONFIG_MEMCG_KMEM
5034 if (slab_state >= FULL && err >= 0 && is_root_cache(s)) {
5035 int i;
81819f0f 5036
107dab5c
GC
5037 mutex_lock(&slab_mutex);
5038 if (s->max_attr_size < len)
5039 s->max_attr_size = len;
5040
ebe945c2
GC
5041 /*
5042 * This is a best effort propagation, so this function's return
5043 * value will be determined by the parent cache only. This is
5044 * basically because not all attributes will have a well
5045 * defined semantics for rollbacks - most of the actions will
5046 * have permanent effects.
5047 *
5048 * Returning the error value of any of the children that fail
5049 * is not 100 % defined, in the sense that users seeing the
5050 * error code won't be able to know anything about the state of
5051 * the cache.
5052 *
5053 * Only returning the error code for the parent cache at least
5054 * has well defined semantics. The cache being written to
5055 * directly either failed or succeeded, in which case we loop
5056 * through the descendants with best-effort propagation.
5057 */
107dab5c 5058 for_each_memcg_cache_index(i) {
2ade4de8 5059 struct kmem_cache *c = cache_from_memcg_idx(s, i);
107dab5c
GC
5060 if (c)
5061 attribute->store(c, buf, len);
5062 }
5063 mutex_unlock(&slab_mutex);
5064 }
5065#endif
81819f0f
CL
5066 return err;
5067}
5068
107dab5c
GC
5069static void memcg_propagate_slab_attrs(struct kmem_cache *s)
5070{
5071#ifdef CONFIG_MEMCG_KMEM
5072 int i;
5073 char *buffer = NULL;
93030d83 5074 struct kmem_cache *root_cache;
107dab5c 5075
93030d83 5076 if (is_root_cache(s))
107dab5c
GC
5077 return;
5078
93030d83
VD
5079 root_cache = s->memcg_params->root_cache;
5080
107dab5c
GC
5081 /*
5082 * This mean this cache had no attribute written. Therefore, no point
5083 * in copying default values around
5084 */
93030d83 5085 if (!root_cache->max_attr_size)
107dab5c
GC
5086 return;
5087
5088 for (i = 0; i < ARRAY_SIZE(slab_attrs); i++) {
5089 char mbuf[64];
5090 char *buf;
5091 struct slab_attribute *attr = to_slab_attr(slab_attrs[i]);
5092
5093 if (!attr || !attr->store || !attr->show)
5094 continue;
5095
5096 /*
5097 * It is really bad that we have to allocate here, so we will
5098 * do it only as a fallback. If we actually allocate, though,
5099 * we can just use the allocated buffer until the end.
5100 *
5101 * Most of the slub attributes will tend to be very small in
5102 * size, but sysfs allows buffers up to a page, so they can
5103 * theoretically happen.
5104 */
5105 if (buffer)
5106 buf = buffer;
93030d83 5107 else if (root_cache->max_attr_size < ARRAY_SIZE(mbuf))
107dab5c
GC
5108 buf = mbuf;
5109 else {
5110 buffer = (char *) get_zeroed_page(GFP_KERNEL);
5111 if (WARN_ON(!buffer))
5112 continue;
5113 buf = buffer;
5114 }
5115
93030d83 5116 attr->show(root_cache, buf);
107dab5c
GC
5117 attr->store(s, buf, strlen(buf));
5118 }
5119
5120 if (buffer)
5121 free_page((unsigned long)buffer);
5122#endif
5123}
5124
52cf25d0 5125static const struct sysfs_ops slab_sysfs_ops = {
81819f0f
CL
5126 .show = slab_attr_show,
5127 .store = slab_attr_store,
5128};
5129
5130static struct kobj_type slab_ktype = {
5131 .sysfs_ops = &slab_sysfs_ops,
5132};
5133
5134static int uevent_filter(struct kset *kset, struct kobject *kobj)
5135{
5136 struct kobj_type *ktype = get_ktype(kobj);
5137
5138 if (ktype == &slab_ktype)
5139 return 1;
5140 return 0;
5141}
5142
9cd43611 5143static const struct kset_uevent_ops slab_uevent_ops = {
81819f0f
CL
5144 .filter = uevent_filter,
5145};
5146
27c3a314 5147static struct kset *slab_kset;
81819f0f 5148
9a41707b
VD
5149static inline struct kset *cache_kset(struct kmem_cache *s)
5150{
5151#ifdef CONFIG_MEMCG_KMEM
5152 if (!is_root_cache(s))
5153 return s->memcg_params->root_cache->memcg_kset;
5154#endif
5155 return slab_kset;
5156}
5157
81819f0f
CL
5158#define ID_STR_LENGTH 64
5159
5160/* Create a unique string id for a slab cache:
6446faa2
CL
5161 *
5162 * Format :[flags-]size
81819f0f
CL
5163 */
5164static char *create_unique_id(struct kmem_cache *s)
5165{
5166 char *name = kmalloc(ID_STR_LENGTH, GFP_KERNEL);
5167 char *p = name;
5168
5169 BUG_ON(!name);
5170
5171 *p++ = ':';
5172 /*
5173 * First flags affecting slabcache operations. We will only
5174 * get here for aliasable slabs so we do not need to support
5175 * too many flags. The flags here must cover all flags that
5176 * are matched during merging to guarantee that the id is
5177 * unique.
5178 */
5179 if (s->flags & SLAB_CACHE_DMA)
5180 *p++ = 'd';
5181 if (s->flags & SLAB_RECLAIM_ACCOUNT)
5182 *p++ = 'a';
5183 if (s->flags & SLAB_DEBUG_FREE)
5184 *p++ = 'F';
5a896d9e
VN
5185 if (!(s->flags & SLAB_NOTRACK))
5186 *p++ = 't';
81819f0f
CL
5187 if (p != name + 1)
5188 *p++ = '-';
5189 p += sprintf(p, "%07d", s->size);
2633d7a0
GC
5190
5191#ifdef CONFIG_MEMCG_KMEM
5192 if (!is_root_cache(s))
d0e0ac97
CG
5193 p += sprintf(p, "-%08d",
5194 memcg_cache_id(s->memcg_params->memcg));
2633d7a0
GC
5195#endif
5196
81819f0f
CL
5197 BUG_ON(p > name + ID_STR_LENGTH - 1);
5198 return name;
5199}
5200
5201static int sysfs_slab_add(struct kmem_cache *s)
5202{
5203 int err;
5204 const char *name;
45530c44 5205 int unmergeable = slab_unmergeable(s);
81819f0f 5206
81819f0f
CL
5207 if (unmergeable) {
5208 /*
5209 * Slabcache can never be merged so we can use the name proper.
5210 * This is typically the case for debug situations. In that
5211 * case we can catch duplicate names easily.
5212 */
27c3a314 5213 sysfs_remove_link(&slab_kset->kobj, s->name);
81819f0f
CL
5214 name = s->name;
5215 } else {
5216 /*
5217 * Create a unique name for the slab as a target
5218 * for the symlinks.
5219 */
5220 name = create_unique_id(s);
5221 }
5222
9a41707b 5223 s->kobj.kset = cache_kset(s);
26e4f205 5224 err = kobject_init_and_add(&s->kobj, &slab_ktype, NULL, "%s", name);
54b6a731
DJ
5225 if (err)
5226 goto out_put_kobj;
81819f0f
CL
5227
5228 err = sysfs_create_group(&s->kobj, &slab_attr_group);
54b6a731
DJ
5229 if (err)
5230 goto out_del_kobj;
9a41707b
VD
5231
5232#ifdef CONFIG_MEMCG_KMEM
5233 if (is_root_cache(s)) {
5234 s->memcg_kset = kset_create_and_add("cgroup", NULL, &s->kobj);
5235 if (!s->memcg_kset) {
54b6a731
DJ
5236 err = -ENOMEM;
5237 goto out_del_kobj;
9a41707b
VD
5238 }
5239 }
5240#endif
5241
81819f0f
CL
5242 kobject_uevent(&s->kobj, KOBJ_ADD);
5243 if (!unmergeable) {
5244 /* Setup first alias */
5245 sysfs_slab_alias(s, s->name);
81819f0f 5246 }
54b6a731
DJ
5247out:
5248 if (!unmergeable)
5249 kfree(name);
5250 return err;
5251out_del_kobj:
5252 kobject_del(&s->kobj);
5253out_put_kobj:
5254 kobject_put(&s->kobj);
5255 goto out;
81819f0f
CL
5256}
5257
5258static void sysfs_slab_remove(struct kmem_cache *s)
5259{
97d06609 5260 if (slab_state < FULL)
2bce6485
CL
5261 /*
5262 * Sysfs has not been setup yet so no need to remove the
5263 * cache from sysfs.
5264 */
5265 return;
5266
9a41707b
VD
5267#ifdef CONFIG_MEMCG_KMEM
5268 kset_unregister(s->memcg_kset);
5269#endif
81819f0f
CL
5270 kobject_uevent(&s->kobj, KOBJ_REMOVE);
5271 kobject_del(&s->kobj);
151c602f 5272 kobject_put(&s->kobj);
81819f0f
CL
5273}
5274
5275/*
5276 * Need to buffer aliases during bootup until sysfs becomes
9f6c708e 5277 * available lest we lose that information.
81819f0f
CL
5278 */
5279struct saved_alias {
5280 struct kmem_cache *s;
5281 const char *name;
5282 struct saved_alias *next;
5283};
5284
5af328a5 5285static struct saved_alias *alias_list;
81819f0f
CL
5286
5287static int sysfs_slab_alias(struct kmem_cache *s, const char *name)
5288{
5289 struct saved_alias *al;
5290
97d06609 5291 if (slab_state == FULL) {
81819f0f
CL
5292 /*
5293 * If we have a leftover link then remove it.
5294 */
27c3a314
GKH
5295 sysfs_remove_link(&slab_kset->kobj, name);
5296 return sysfs_create_link(&slab_kset->kobj, &s->kobj, name);
81819f0f
CL
5297 }
5298
5299 al = kmalloc(sizeof(struct saved_alias), GFP_KERNEL);
5300 if (!al)
5301 return -ENOMEM;
5302
5303 al->s = s;
5304 al->name = name;
5305 al->next = alias_list;
5306 alias_list = al;
5307 return 0;
5308}
5309
5310static int __init slab_sysfs_init(void)
5311{
5b95a4ac 5312 struct kmem_cache *s;
81819f0f
CL
5313 int err;
5314
18004c5d 5315 mutex_lock(&slab_mutex);
2bce6485 5316
0ff21e46 5317 slab_kset = kset_create_and_add("slab", &slab_uevent_ops, kernel_kobj);
27c3a314 5318 if (!slab_kset) {
18004c5d 5319 mutex_unlock(&slab_mutex);
81819f0f
CL
5320 printk(KERN_ERR "Cannot register slab subsystem.\n");
5321 return -ENOSYS;
5322 }
5323
97d06609 5324 slab_state = FULL;
26a7bd03 5325
5b95a4ac 5326 list_for_each_entry(s, &slab_caches, list) {
26a7bd03 5327 err = sysfs_slab_add(s);
5d540fb7
CL
5328 if (err)
5329 printk(KERN_ERR "SLUB: Unable to add boot slab %s"
5330 " to sysfs\n", s->name);
26a7bd03 5331 }
81819f0f
CL
5332
5333 while (alias_list) {
5334 struct saved_alias *al = alias_list;
5335
5336 alias_list = alias_list->next;
5337 err = sysfs_slab_alias(al->s, al->name);
5d540fb7
CL
5338 if (err)
5339 printk(KERN_ERR "SLUB: Unable to add boot slab alias"
068ce415 5340 " %s to sysfs\n", al->name);
81819f0f
CL
5341 kfree(al);
5342 }
5343
18004c5d 5344 mutex_unlock(&slab_mutex);
81819f0f
CL
5345 resiliency_test();
5346 return 0;
5347}
5348
5349__initcall(slab_sysfs_init);
ab4d5ed5 5350#endif /* CONFIG_SYSFS */
57ed3eda
PE
5351
5352/*
5353 * The /proc/slabinfo ABI
5354 */
158a9624 5355#ifdef CONFIG_SLABINFO
0d7561c6 5356void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo)
57ed3eda 5357{
57ed3eda 5358 unsigned long nr_slabs = 0;
205ab99d
CL
5359 unsigned long nr_objs = 0;
5360 unsigned long nr_free = 0;
57ed3eda
PE
5361 int node;
5362
57ed3eda
PE
5363 for_each_online_node(node) {
5364 struct kmem_cache_node *n = get_node(s, node);
5365
5366 if (!n)
5367 continue;
5368
c17fd13e
WL
5369 nr_slabs += node_nr_slabs(n);
5370 nr_objs += node_nr_objs(n);
205ab99d 5371 nr_free += count_partial(n, count_free);
57ed3eda
PE
5372 }
5373
0d7561c6
GC
5374 sinfo->active_objs = nr_objs - nr_free;
5375 sinfo->num_objs = nr_objs;
5376 sinfo->active_slabs = nr_slabs;
5377 sinfo->num_slabs = nr_slabs;
5378 sinfo->objects_per_slab = oo_objects(s->oo);
5379 sinfo->cache_order = oo_order(s->oo);
57ed3eda
PE
5380}
5381
0d7561c6 5382void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s)
7b3c3a50 5383{
7b3c3a50
AD
5384}
5385
b7454ad3
GC
5386ssize_t slabinfo_write(struct file *file, const char __user *buffer,
5387 size_t count, loff_t *ppos)
7b3c3a50 5388{
b7454ad3 5389 return -EIO;
7b3c3a50 5390}
158a9624 5391#endif /* CONFIG_SLABINFO */