mm/sl[aou]b: Move sysfs_slab_add to common
[linux-2.6-block.git] / mm / slab.c
CommitLineData
1da177e4
LT
1/*
2 * linux/mm/slab.c
3 * Written by Mark Hemment, 1996/97.
4 * (markhe@nextd.demon.co.uk)
5 *
6 * kmem_cache_destroy() + some cleanup - 1999 Andrea Arcangeli
7 *
8 * Major cleanup, different bufctl logic, per-cpu arrays
9 * (c) 2000 Manfred Spraul
10 *
11 * Cleanup, make the head arrays unconditional, preparation for NUMA
12 * (c) 2002 Manfred Spraul
13 *
14 * An implementation of the Slab Allocator as described in outline in;
15 * UNIX Internals: The New Frontiers by Uresh Vahalia
16 * Pub: Prentice Hall ISBN 0-13-101908-2
17 * or with a little more detail in;
18 * The Slab Allocator: An Object-Caching Kernel Memory Allocator
19 * Jeff Bonwick (Sun Microsystems).
20 * Presented at: USENIX Summer 1994 Technical Conference
21 *
22 * The memory is organized in caches, one cache for each object type.
23 * (e.g. inode_cache, dentry_cache, buffer_head, vm_area_struct)
24 * Each cache consists out of many slabs (they are small (usually one
25 * page long) and always contiguous), and each slab contains multiple
26 * initialized objects.
27 *
28 * This means, that your constructor is used only for newly allocated
183ff22b 29 * slabs and you must pass objects with the same initializations to
1da177e4
LT
30 * kmem_cache_free.
31 *
32 * Each cache can only support one memory type (GFP_DMA, GFP_HIGHMEM,
33 * normal). If you need a special memory type, then must create a new
34 * cache for that memory type.
35 *
36 * In order to reduce fragmentation, the slabs are sorted in 3 groups:
37 * full slabs with 0 free objects
38 * partial slabs
39 * empty slabs with no allocated objects
40 *
41 * If partial slabs exist, then new allocations come from these slabs,
42 * otherwise from empty slabs or new slabs are allocated.
43 *
44 * kmem_cache_destroy() CAN CRASH if you try to allocate from the cache
45 * during kmem_cache_destroy(). The caller must prevent concurrent allocs.
46 *
47 * Each cache has a short per-cpu head array, most allocs
48 * and frees go into that array, and if that array overflows, then 1/2
49 * of the entries in the array are given back into the global cache.
50 * The head array is strictly LIFO and should improve the cache hit rates.
51 * On SMP, it additionally reduces the spinlock operations.
52 *
a737b3e2 53 * The c_cpuarray may not be read with enabled local interrupts -
1da177e4
LT
54 * it's changed with a smp_call_function().
55 *
56 * SMP synchronization:
57 * constructors and destructors are called without any locking.
343e0d7a 58 * Several members in struct kmem_cache and struct slab never change, they
1da177e4
LT
59 * are accessed without any locking.
60 * The per-cpu arrays are never accessed from the wrong cpu, no locking,
61 * and local interrupts are disabled so slab code is preempt-safe.
62 * The non-constant members are protected with a per-cache irq spinlock.
63 *
64 * Many thanks to Mark Hemment, who wrote another per-cpu slab patch
65 * in 2000 - many ideas in the current implementation are derived from
66 * his patch.
67 *
68 * Further notes from the original documentation:
69 *
70 * 11 April '97. Started multi-threading - markhe
18004c5d 71 * The global cache-chain is protected by the mutex 'slab_mutex'.
1da177e4
LT
72 * The sem is only needed when accessing/extending the cache-chain, which
73 * can never happen inside an interrupt (kmem_cache_create(),
74 * kmem_cache_shrink() and kmem_cache_reap()).
75 *
76 * At present, each engine can be growing a cache. This should be blocked.
77 *
e498be7d
CL
78 * 15 March 2005. NUMA slab allocator.
79 * Shai Fultheim <shai@scalex86.org>.
80 * Shobhit Dayal <shobhit@calsoftinc.com>
81 * Alok N Kataria <alokk@calsoftinc.com>
82 * Christoph Lameter <christoph@lameter.com>
83 *
84 * Modified the slab allocator to be node aware on NUMA systems.
85 * Each node has its own list of partial, free and full slabs.
86 * All object allocations for a node occur from node specific slab lists.
1da177e4
LT
87 */
88
1da177e4 89#include <linux/slab.h>
97d06609 90#include "slab.h"
1da177e4 91#include <linux/mm.h>
c9cf5528 92#include <linux/poison.h>
1da177e4
LT
93#include <linux/swap.h>
94#include <linux/cache.h>
95#include <linux/interrupt.h>
96#include <linux/init.h>
97#include <linux/compiler.h>
101a5001 98#include <linux/cpuset.h>
a0ec95a8 99#include <linux/proc_fs.h>
1da177e4
LT
100#include <linux/seq_file.h>
101#include <linux/notifier.h>
102#include <linux/kallsyms.h>
103#include <linux/cpu.h>
104#include <linux/sysctl.h>
105#include <linux/module.h>
106#include <linux/rcupdate.h>
543537bd 107#include <linux/string.h>
138ae663 108#include <linux/uaccess.h>
e498be7d 109#include <linux/nodemask.h>
d5cff635 110#include <linux/kmemleak.h>
dc85da15 111#include <linux/mempolicy.h>
fc0abb14 112#include <linux/mutex.h>
8a8b6502 113#include <linux/fault-inject.h>
e7eebaf6 114#include <linux/rtmutex.h>
6a2d7a95 115#include <linux/reciprocal_div.h>
3ac7fe5a 116#include <linux/debugobjects.h>
c175eea4 117#include <linux/kmemcheck.h>
8f9f8d9e 118#include <linux/memory.h>
268bb0ce 119#include <linux/prefetch.h>
1da177e4 120
381760ea
MG
121#include <net/sock.h>
122
1da177e4
LT
123#include <asm/cacheflush.h>
124#include <asm/tlbflush.h>
125#include <asm/page.h>
126
4dee6b64
SR
127#include <trace/events/kmem.h>
128
072bb0aa
MG
129#include "internal.h"
130
1da177e4 131/*
50953fe9 132 * DEBUG - 1 for kmem_cache_create() to honour; SLAB_RED_ZONE & SLAB_POISON.
1da177e4
LT
133 * 0 for faster, smaller code (especially in the critical paths).
134 *
135 * STATS - 1 to collect stats for /proc/slabinfo.
136 * 0 for faster, smaller code (especially in the critical paths).
137 *
138 * FORCED_DEBUG - 1 enables SLAB_RED_ZONE and SLAB_POISON (if possible)
139 */
140
141#ifdef CONFIG_DEBUG_SLAB
142#define DEBUG 1
143#define STATS 1
144#define FORCED_DEBUG 1
145#else
146#define DEBUG 0
147#define STATS 0
148#define FORCED_DEBUG 0
149#endif
150
1da177e4
LT
151/* Shouldn't this be in a header file somewhere? */
152#define BYTES_PER_WORD sizeof(void *)
87a927c7 153#define REDZONE_ALIGN max(BYTES_PER_WORD, __alignof__(unsigned long long))
1da177e4 154
1da177e4
LT
155#ifndef ARCH_KMALLOC_FLAGS
156#define ARCH_KMALLOC_FLAGS SLAB_HWCACHE_ALIGN
157#endif
158
072bb0aa
MG
159/*
160 * true if a page was allocated from pfmemalloc reserves for network-based
161 * swap
162 */
163static bool pfmemalloc_active __read_mostly;
164
1da177e4
LT
165/* Legal flag mask for kmem_cache_create(). */
166#if DEBUG
50953fe9 167# define CREATE_MASK (SLAB_RED_ZONE | \
1da177e4 168 SLAB_POISON | SLAB_HWCACHE_ALIGN | \
ac2b898c 169 SLAB_CACHE_DMA | \
5af60839 170 SLAB_STORE_USER | \
1da177e4 171 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
3ac7fe5a 172 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
c175eea4 173 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
1da177e4 174#else
ac2b898c 175# define CREATE_MASK (SLAB_HWCACHE_ALIGN | \
5af60839 176 SLAB_CACHE_DMA | \
1da177e4 177 SLAB_RECLAIM_ACCOUNT | SLAB_PANIC | \
3ac7fe5a 178 SLAB_DESTROY_BY_RCU | SLAB_MEM_SPREAD | \
c175eea4 179 SLAB_DEBUG_OBJECTS | SLAB_NOLEAKTRACE | SLAB_NOTRACK)
1da177e4
LT
180#endif
181
182/*
183 * kmem_bufctl_t:
184 *
185 * Bufctl's are used for linking objs within a slab
186 * linked offsets.
187 *
188 * This implementation relies on "struct page" for locating the cache &
189 * slab an object belongs to.
190 * This allows the bufctl structure to be small (one int), but limits
191 * the number of objects a slab (not a cache) can contain when off-slab
192 * bufctls are used. The limit is the size of the largest general cache
193 * that does not use off-slab slabs.
194 * For 32bit archs with 4 kB pages, is this 56.
195 * This is not serious, as it is only for large objects, when it is unwise
196 * to have too many per slab.
197 * Note: This limit can be raised by introducing a general cache whose size
198 * is less than 512 (PAGE_SIZE<<3), but greater than 256.
199 */
200
fa5b08d5 201typedef unsigned int kmem_bufctl_t;
1da177e4
LT
202#define BUFCTL_END (((kmem_bufctl_t)(~0U))-0)
203#define BUFCTL_FREE (((kmem_bufctl_t)(~0U))-1)
871751e2
AV
204#define BUFCTL_ACTIVE (((kmem_bufctl_t)(~0U))-2)
205#define SLAB_LIMIT (((kmem_bufctl_t)(~0U))-3)
1da177e4 206
1da177e4
LT
207/*
208 * struct slab_rcu
209 *
210 * slab_destroy on a SLAB_DESTROY_BY_RCU cache uses this structure to
211 * arrange for kmem_freepages to be called via RCU. This is useful if
212 * we need to approach a kernel structure obliquely, from its address
213 * obtained without the usual locking. We can lock the structure to
214 * stabilize it and check it's still at the given address, only if we
215 * can be sure that the memory has not been meanwhile reused for some
216 * other kind of object (which our subsystem's lock might corrupt).
217 *
218 * rcu_read_lock before reading the address, then rcu_read_unlock after
219 * taking the spinlock within the structure expected at that address.
1da177e4
LT
220 */
221struct slab_rcu {
b28a02de 222 struct rcu_head head;
343e0d7a 223 struct kmem_cache *cachep;
b28a02de 224 void *addr;
1da177e4
LT
225};
226
5bfe53a7
LJ
227/*
228 * struct slab
229 *
230 * Manages the objs in a slab. Placed either at the beginning of mem allocated
231 * for a slab, or allocated from an general cache.
232 * Slabs are chained into three list: fully used, partial, fully free slabs.
233 */
234struct slab {
235 union {
236 struct {
237 struct list_head list;
238 unsigned long colouroff;
239 void *s_mem; /* including colour offset */
240 unsigned int inuse; /* num of objs active in slab */
241 kmem_bufctl_t free;
242 unsigned short nodeid;
243 };
244 struct slab_rcu __slab_cover_slab_rcu;
245 };
246};
247
1da177e4
LT
248/*
249 * struct array_cache
250 *
1da177e4
LT
251 * Purpose:
252 * - LIFO ordering, to hand out cache-warm objects from _alloc
253 * - reduce the number of linked list operations
254 * - reduce spinlock operations
255 *
256 * The limit is stored in the per-cpu structure to reduce the data cache
257 * footprint.
258 *
259 */
260struct array_cache {
261 unsigned int avail;
262 unsigned int limit;
263 unsigned int batchcount;
264 unsigned int touched;
e498be7d 265 spinlock_t lock;
bda5b655 266 void *entry[]; /*
a737b3e2
AM
267 * Must have this definition in here for the proper
268 * alignment of array_cache. Also simplifies accessing
269 * the entries.
072bb0aa
MG
270 *
271 * Entries should not be directly dereferenced as
272 * entries belonging to slabs marked pfmemalloc will
273 * have the lower bits set SLAB_OBJ_PFMEMALLOC
a737b3e2 274 */
1da177e4
LT
275};
276
072bb0aa
MG
277#define SLAB_OBJ_PFMEMALLOC 1
278static inline bool is_obj_pfmemalloc(void *objp)
279{
280 return (unsigned long)objp & SLAB_OBJ_PFMEMALLOC;
281}
282
283static inline void set_obj_pfmemalloc(void **objp)
284{
285 *objp = (void *)((unsigned long)*objp | SLAB_OBJ_PFMEMALLOC);
286 return;
287}
288
289static inline void clear_obj_pfmemalloc(void **objp)
290{
291 *objp = (void *)((unsigned long)*objp & ~SLAB_OBJ_PFMEMALLOC);
292}
293
a737b3e2
AM
294/*
295 * bootstrap: The caches do not work without cpuarrays anymore, but the
296 * cpuarrays are allocated from the generic caches...
1da177e4
LT
297 */
298#define BOOT_CPUCACHE_ENTRIES 1
299struct arraycache_init {
300 struct array_cache cache;
b28a02de 301 void *entries[BOOT_CPUCACHE_ENTRIES];
1da177e4
LT
302};
303
304/*
e498be7d 305 * The slab lists for all objects.
1da177e4
LT
306 */
307struct kmem_list3 {
b28a02de
PE
308 struct list_head slabs_partial; /* partial list first, better asm code */
309 struct list_head slabs_full;
310 struct list_head slabs_free;
311 unsigned long free_objects;
b28a02de 312 unsigned int free_limit;
2e1217cf 313 unsigned int colour_next; /* Per-node cache coloring */
b28a02de
PE
314 spinlock_t list_lock;
315 struct array_cache *shared; /* shared per node */
316 struct array_cache **alien; /* on other nodes */
35386e3b
CL
317 unsigned long next_reap; /* updated without locking */
318 int free_touched; /* updated without locking */
1da177e4
LT
319};
320
e498be7d
CL
321/*
322 * Need this for bootstrapping a per node allocator.
323 */
556a169d 324#define NUM_INIT_LISTS (3 * MAX_NUMNODES)
68a1b195 325static struct kmem_list3 __initdata initkmem_list3[NUM_INIT_LISTS];
e498be7d 326#define CACHE_CACHE 0
556a169d
PE
327#define SIZE_AC MAX_NUMNODES
328#define SIZE_L3 (2 * MAX_NUMNODES)
e498be7d 329
ed11d9eb
CL
330static int drain_freelist(struct kmem_cache *cache,
331 struct kmem_list3 *l3, int tofree);
332static void free_block(struct kmem_cache *cachep, void **objpp, int len,
333 int node);
83b519e8 334static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp);
65f27f38 335static void cache_reap(struct work_struct *unused);
ed11d9eb 336
e498be7d 337/*
a737b3e2
AM
338 * This function must be completely optimized away if a constant is passed to
339 * it. Mostly the same as what is in linux/slab.h except it returns an index.
e498be7d 340 */
7243cc05 341static __always_inline int index_of(const size_t size)
e498be7d 342{
5ec8a847
SR
343 extern void __bad_size(void);
344
e498be7d
CL
345 if (__builtin_constant_p(size)) {
346 int i = 0;
347
348#define CACHE(x) \
349 if (size <=x) \
350 return i; \
351 else \
352 i++;
1c61fc40 353#include <linux/kmalloc_sizes.h>
e498be7d 354#undef CACHE
5ec8a847 355 __bad_size();
7243cc05 356 } else
5ec8a847 357 __bad_size();
e498be7d
CL
358 return 0;
359}
360
e0a42726
IM
361static int slab_early_init = 1;
362
e498be7d
CL
363#define INDEX_AC index_of(sizeof(struct arraycache_init))
364#define INDEX_L3 index_of(sizeof(struct kmem_list3))
1da177e4 365
5295a74c 366static void kmem_list3_init(struct kmem_list3 *parent)
e498be7d
CL
367{
368 INIT_LIST_HEAD(&parent->slabs_full);
369 INIT_LIST_HEAD(&parent->slabs_partial);
370 INIT_LIST_HEAD(&parent->slabs_free);
371 parent->shared = NULL;
372 parent->alien = NULL;
2e1217cf 373 parent->colour_next = 0;
e498be7d
CL
374 spin_lock_init(&parent->list_lock);
375 parent->free_objects = 0;
376 parent->free_touched = 0;
377}
378
a737b3e2
AM
379#define MAKE_LIST(cachep, listp, slab, nodeid) \
380 do { \
381 INIT_LIST_HEAD(listp); \
382 list_splice(&(cachep->nodelists[nodeid]->slab), listp); \
e498be7d
CL
383 } while (0)
384
a737b3e2
AM
385#define MAKE_ALL_LISTS(cachep, ptr, nodeid) \
386 do { \
e498be7d
CL
387 MAKE_LIST((cachep), (&(ptr)->slabs_full), slabs_full, nodeid); \
388 MAKE_LIST((cachep), (&(ptr)->slabs_partial), slabs_partial, nodeid); \
389 MAKE_LIST((cachep), (&(ptr)->slabs_free), slabs_free, nodeid); \
390 } while (0)
1da177e4 391
1da177e4
LT
392#define CFLGS_OFF_SLAB (0x80000000UL)
393#define OFF_SLAB(x) ((x)->flags & CFLGS_OFF_SLAB)
394
395#define BATCHREFILL_LIMIT 16
a737b3e2
AM
396/*
397 * Optimization question: fewer reaps means less probability for unnessary
398 * cpucache drain/refill cycles.
1da177e4 399 *
dc6f3f27 400 * OTOH the cpuarrays can contain lots of objects,
1da177e4
LT
401 * which could lock up otherwise freeable slabs.
402 */
403#define REAPTIMEOUT_CPUC (2*HZ)
404#define REAPTIMEOUT_LIST3 (4*HZ)
405
406#if STATS
407#define STATS_INC_ACTIVE(x) ((x)->num_active++)
408#define STATS_DEC_ACTIVE(x) ((x)->num_active--)
409#define STATS_INC_ALLOCED(x) ((x)->num_allocations++)
410#define STATS_INC_GROWN(x) ((x)->grown++)
ed11d9eb 411#define STATS_ADD_REAPED(x,y) ((x)->reaped += (y))
a737b3e2
AM
412#define STATS_SET_HIGH(x) \
413 do { \
414 if ((x)->num_active > (x)->high_mark) \
415 (x)->high_mark = (x)->num_active; \
416 } while (0)
1da177e4
LT
417#define STATS_INC_ERR(x) ((x)->errors++)
418#define STATS_INC_NODEALLOCS(x) ((x)->node_allocs++)
e498be7d 419#define STATS_INC_NODEFREES(x) ((x)->node_frees++)
fb7faf33 420#define STATS_INC_ACOVERFLOW(x) ((x)->node_overflow++)
a737b3e2
AM
421#define STATS_SET_FREEABLE(x, i) \
422 do { \
423 if ((x)->max_freeable < i) \
424 (x)->max_freeable = i; \
425 } while (0)
1da177e4
LT
426#define STATS_INC_ALLOCHIT(x) atomic_inc(&(x)->allochit)
427#define STATS_INC_ALLOCMISS(x) atomic_inc(&(x)->allocmiss)
428#define STATS_INC_FREEHIT(x) atomic_inc(&(x)->freehit)
429#define STATS_INC_FREEMISS(x) atomic_inc(&(x)->freemiss)
430#else
431#define STATS_INC_ACTIVE(x) do { } while (0)
432#define STATS_DEC_ACTIVE(x) do { } while (0)
433#define STATS_INC_ALLOCED(x) do { } while (0)
434#define STATS_INC_GROWN(x) do { } while (0)
4e60c86b 435#define STATS_ADD_REAPED(x,y) do { (void)(y); } while (0)
1da177e4
LT
436#define STATS_SET_HIGH(x) do { } while (0)
437#define STATS_INC_ERR(x) do { } while (0)
438#define STATS_INC_NODEALLOCS(x) do { } while (0)
e498be7d 439#define STATS_INC_NODEFREES(x) do { } while (0)
fb7faf33 440#define STATS_INC_ACOVERFLOW(x) do { } while (0)
a737b3e2 441#define STATS_SET_FREEABLE(x, i) do { } while (0)
1da177e4
LT
442#define STATS_INC_ALLOCHIT(x) do { } while (0)
443#define STATS_INC_ALLOCMISS(x) do { } while (0)
444#define STATS_INC_FREEHIT(x) do { } while (0)
445#define STATS_INC_FREEMISS(x) do { } while (0)
446#endif
447
448#if DEBUG
1da177e4 449
a737b3e2
AM
450/*
451 * memory layout of objects:
1da177e4 452 * 0 : objp
3dafccf2 453 * 0 .. cachep->obj_offset - BYTES_PER_WORD - 1: padding. This ensures that
1da177e4
LT
454 * the end of an object is aligned with the end of the real
455 * allocation. Catches writes behind the end of the allocation.
3dafccf2 456 * cachep->obj_offset - BYTES_PER_WORD .. cachep->obj_offset - 1:
1da177e4 457 * redzone word.
3dafccf2 458 * cachep->obj_offset: The real object.
3b0efdfa
CL
459 * cachep->size - 2* BYTES_PER_WORD: redzone word [BYTES_PER_WORD long]
460 * cachep->size - 1* BYTES_PER_WORD: last caller address
a737b3e2 461 * [BYTES_PER_WORD long]
1da177e4 462 */
343e0d7a 463static int obj_offset(struct kmem_cache *cachep)
1da177e4 464{
3dafccf2 465 return cachep->obj_offset;
1da177e4
LT
466}
467
b46b8f19 468static unsigned long long *dbg_redzone1(struct kmem_cache *cachep, void *objp)
1da177e4
LT
469{
470 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
b46b8f19
DW
471 return (unsigned long long*) (objp + obj_offset(cachep) -
472 sizeof(unsigned long long));
1da177e4
LT
473}
474
b46b8f19 475static unsigned long long *dbg_redzone2(struct kmem_cache *cachep, void *objp)
1da177e4
LT
476{
477 BUG_ON(!(cachep->flags & SLAB_RED_ZONE));
478 if (cachep->flags & SLAB_STORE_USER)
3b0efdfa 479 return (unsigned long long *)(objp + cachep->size -
b46b8f19 480 sizeof(unsigned long long) -
87a927c7 481 REDZONE_ALIGN);
3b0efdfa 482 return (unsigned long long *) (objp + cachep->size -
b46b8f19 483 sizeof(unsigned long long));
1da177e4
LT
484}
485
343e0d7a 486static void **dbg_userword(struct kmem_cache *cachep, void *objp)
1da177e4
LT
487{
488 BUG_ON(!(cachep->flags & SLAB_STORE_USER));
3b0efdfa 489 return (void **)(objp + cachep->size - BYTES_PER_WORD);
1da177e4
LT
490}
491
492#else
493
3dafccf2 494#define obj_offset(x) 0
b46b8f19
DW
495#define dbg_redzone1(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
496#define dbg_redzone2(cachep, objp) ({BUG(); (unsigned long long *)NULL;})
1da177e4
LT
497#define dbg_userword(cachep, objp) ({BUG(); (void **)NULL;})
498
499#endif
500
0f24f128 501#ifdef CONFIG_TRACING
36555751
EGM
502size_t slab_buffer_size(struct kmem_cache *cachep)
503{
3b0efdfa 504 return cachep->size;
36555751
EGM
505}
506EXPORT_SYMBOL(slab_buffer_size);
507#endif
508
1da177e4 509/*
3df1cccd
DR
510 * Do not go above this order unless 0 objects fit into the slab or
511 * overridden on the command line.
1da177e4 512 */
543585cc
DR
513#define SLAB_MAX_ORDER_HI 1
514#define SLAB_MAX_ORDER_LO 0
515static int slab_max_order = SLAB_MAX_ORDER_LO;
3df1cccd 516static bool slab_max_order_set __initdata;
1da177e4 517
6ed5eb22
PE
518static inline struct kmem_cache *virt_to_cache(const void *obj)
519{
b49af68f 520 struct page *page = virt_to_head_page(obj);
35026088 521 return page->slab_cache;
6ed5eb22
PE
522}
523
524static inline struct slab *virt_to_slab(const void *obj)
525{
b49af68f 526 struct page *page = virt_to_head_page(obj);
35026088
CL
527
528 VM_BUG_ON(!PageSlab(page));
529 return page->slab_page;
6ed5eb22
PE
530}
531
8fea4e96
PE
532static inline void *index_to_obj(struct kmem_cache *cache, struct slab *slab,
533 unsigned int idx)
534{
3b0efdfa 535 return slab->s_mem + cache->size * idx;
8fea4e96
PE
536}
537
6a2d7a95 538/*
3b0efdfa
CL
539 * We want to avoid an expensive divide : (offset / cache->size)
540 * Using the fact that size is a constant for a particular cache,
541 * we can replace (offset / cache->size) by
6a2d7a95
ED
542 * reciprocal_divide(offset, cache->reciprocal_buffer_size)
543 */
544static inline unsigned int obj_to_index(const struct kmem_cache *cache,
545 const struct slab *slab, void *obj)
8fea4e96 546{
6a2d7a95
ED
547 u32 offset = (obj - slab->s_mem);
548 return reciprocal_divide(offset, cache->reciprocal_buffer_size);
8fea4e96
PE
549}
550
a737b3e2
AM
551/*
552 * These are the default caches for kmalloc. Custom caches can have other sizes.
553 */
1da177e4
LT
554struct cache_sizes malloc_sizes[] = {
555#define CACHE(x) { .cs_size = (x) },
556#include <linux/kmalloc_sizes.h>
557 CACHE(ULONG_MAX)
558#undef CACHE
559};
560EXPORT_SYMBOL(malloc_sizes);
561
562/* Must match cache_sizes above. Out of line to keep cache footprint low. */
563struct cache_names {
564 char *name;
565 char *name_dma;
566};
567
568static struct cache_names __initdata cache_names[] = {
569#define CACHE(x) { .name = "size-" #x, .name_dma = "size-" #x "(DMA)" },
570#include <linux/kmalloc_sizes.h>
b28a02de 571 {NULL,}
1da177e4
LT
572#undef CACHE
573};
574
575static struct arraycache_init initarray_cache __initdata =
b28a02de 576 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4 577static struct arraycache_init initarray_generic =
b28a02de 578 { {0, BOOT_CPUCACHE_ENTRIES, 1, 0} };
1da177e4
LT
579
580/* internal cache of cache description objs */
9b030cb8
CL
581static struct kmem_list3 *kmem_cache_nodelists[MAX_NUMNODES];
582static struct kmem_cache kmem_cache_boot = {
583 .nodelists = kmem_cache_nodelists,
b28a02de
PE
584 .batchcount = 1,
585 .limit = BOOT_CPUCACHE_ENTRIES,
586 .shared = 1,
3b0efdfa 587 .size = sizeof(struct kmem_cache),
b28a02de 588 .name = "kmem_cache",
1da177e4
LT
589};
590
056c6241
RT
591#define BAD_ALIEN_MAGIC 0x01020304ul
592
f1aaee53
AV
593#ifdef CONFIG_LOCKDEP
594
595/*
596 * Slab sometimes uses the kmalloc slabs to store the slab headers
597 * for other slabs "off slab".
598 * The locking for this is tricky in that it nests within the locks
599 * of all other slabs in a few places; to deal with this special
600 * locking we put on-slab caches into a separate lock-class.
056c6241
RT
601 *
602 * We set lock class for alien array caches which are up during init.
603 * The lock annotation will be lost if all cpus of a node goes down and
604 * then comes back up during hotplug
f1aaee53 605 */
056c6241
RT
606static struct lock_class_key on_slab_l3_key;
607static struct lock_class_key on_slab_alc_key;
608
83835b3d
PZ
609static struct lock_class_key debugobj_l3_key;
610static struct lock_class_key debugobj_alc_key;
611
612static void slab_set_lock_classes(struct kmem_cache *cachep,
613 struct lock_class_key *l3_key, struct lock_class_key *alc_key,
614 int q)
615{
616 struct array_cache **alc;
617 struct kmem_list3 *l3;
618 int r;
619
620 l3 = cachep->nodelists[q];
621 if (!l3)
622 return;
623
624 lockdep_set_class(&l3->list_lock, l3_key);
625 alc = l3->alien;
626 /*
627 * FIXME: This check for BAD_ALIEN_MAGIC
628 * should go away when common slab code is taught to
629 * work even without alien caches.
630 * Currently, non NUMA code returns BAD_ALIEN_MAGIC
631 * for alloc_alien_cache,
632 */
633 if (!alc || (unsigned long)alc == BAD_ALIEN_MAGIC)
634 return;
635 for_each_node(r) {
636 if (alc[r])
637 lockdep_set_class(&alc[r]->lock, alc_key);
638 }
639}
640
641static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
642{
643 slab_set_lock_classes(cachep, &debugobj_l3_key, &debugobj_alc_key, node);
644}
645
646static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
647{
648 int node;
649
650 for_each_online_node(node)
651 slab_set_debugobj_lock_classes_node(cachep, node);
652}
653
ce79ddc8 654static void init_node_lock_keys(int q)
f1aaee53 655{
056c6241
RT
656 struct cache_sizes *s = malloc_sizes;
657
97d06609 658 if (slab_state < UP)
ce79ddc8
PE
659 return;
660
661 for (s = malloc_sizes; s->cs_size != ULONG_MAX; s++) {
ce79ddc8 662 struct kmem_list3 *l3;
ce79ddc8
PE
663
664 l3 = s->cs_cachep->nodelists[q];
665 if (!l3 || OFF_SLAB(s->cs_cachep))
00afa758 666 continue;
83835b3d
PZ
667
668 slab_set_lock_classes(s->cs_cachep, &on_slab_l3_key,
669 &on_slab_alc_key, q);
f1aaee53
AV
670 }
671}
ce79ddc8
PE
672
673static inline void init_lock_keys(void)
674{
675 int node;
676
677 for_each_node(node)
678 init_node_lock_keys(node);
679}
f1aaee53 680#else
ce79ddc8
PE
681static void init_node_lock_keys(int q)
682{
683}
684
056c6241 685static inline void init_lock_keys(void)
f1aaee53
AV
686{
687}
83835b3d
PZ
688
689static void slab_set_debugobj_lock_classes_node(struct kmem_cache *cachep, int node)
690{
691}
692
693static void slab_set_debugobj_lock_classes(struct kmem_cache *cachep)
694{
695}
f1aaee53
AV
696#endif
697
1871e52c 698static DEFINE_PER_CPU(struct delayed_work, slab_reap_work);
1da177e4 699
343e0d7a 700static inline struct array_cache *cpu_cache_get(struct kmem_cache *cachep)
1da177e4
LT
701{
702 return cachep->array[smp_processor_id()];
703}
704
a737b3e2
AM
705static inline struct kmem_cache *__find_general_cachep(size_t size,
706 gfp_t gfpflags)
1da177e4
LT
707{
708 struct cache_sizes *csizep = malloc_sizes;
709
710#if DEBUG
711 /* This happens if someone tries to call
b28a02de
PE
712 * kmem_cache_create(), or __kmalloc(), before
713 * the generic caches are initialized.
714 */
c7e43c78 715 BUG_ON(malloc_sizes[INDEX_AC].cs_cachep == NULL);
1da177e4 716#endif
6cb8f913
CL
717 if (!size)
718 return ZERO_SIZE_PTR;
719
1da177e4
LT
720 while (size > csizep->cs_size)
721 csizep++;
722
723 /*
0abf40c1 724 * Really subtle: The last entry with cs->cs_size==ULONG_MAX
1da177e4
LT
725 * has cs_{dma,}cachep==NULL. Thus no special case
726 * for large kmalloc calls required.
727 */
4b51d669 728#ifdef CONFIG_ZONE_DMA
1da177e4
LT
729 if (unlikely(gfpflags & GFP_DMA))
730 return csizep->cs_dmacachep;
4b51d669 731#endif
1da177e4
LT
732 return csizep->cs_cachep;
733}
734
b221385b 735static struct kmem_cache *kmem_find_general_cachep(size_t size, gfp_t gfpflags)
97e2bde4
MS
736{
737 return __find_general_cachep(size, gfpflags);
738}
97e2bde4 739
fbaccacf 740static size_t slab_mgmt_size(size_t nr_objs, size_t align)
1da177e4 741{
fbaccacf
SR
742 return ALIGN(sizeof(struct slab)+nr_objs*sizeof(kmem_bufctl_t), align);
743}
1da177e4 744
a737b3e2
AM
745/*
746 * Calculate the number of objects and left-over bytes for a given buffer size.
747 */
fbaccacf
SR
748static void cache_estimate(unsigned long gfporder, size_t buffer_size,
749 size_t align, int flags, size_t *left_over,
750 unsigned int *num)
751{
752 int nr_objs;
753 size_t mgmt_size;
754 size_t slab_size = PAGE_SIZE << gfporder;
1da177e4 755
fbaccacf
SR
756 /*
757 * The slab management structure can be either off the slab or
758 * on it. For the latter case, the memory allocated for a
759 * slab is used for:
760 *
761 * - The struct slab
762 * - One kmem_bufctl_t for each object
763 * - Padding to respect alignment of @align
764 * - @buffer_size bytes for each object
765 *
766 * If the slab management structure is off the slab, then the
767 * alignment will already be calculated into the size. Because
768 * the slabs are all pages aligned, the objects will be at the
769 * correct alignment when allocated.
770 */
771 if (flags & CFLGS_OFF_SLAB) {
772 mgmt_size = 0;
773 nr_objs = slab_size / buffer_size;
774
775 if (nr_objs > SLAB_LIMIT)
776 nr_objs = SLAB_LIMIT;
777 } else {
778 /*
779 * Ignore padding for the initial guess. The padding
780 * is at most @align-1 bytes, and @buffer_size is at
781 * least @align. In the worst case, this result will
782 * be one greater than the number of objects that fit
783 * into the memory allocation when taking the padding
784 * into account.
785 */
786 nr_objs = (slab_size - sizeof(struct slab)) /
787 (buffer_size + sizeof(kmem_bufctl_t));
788
789 /*
790 * This calculated number will be either the right
791 * amount, or one greater than what we want.
792 */
793 if (slab_mgmt_size(nr_objs, align) + nr_objs*buffer_size
794 > slab_size)
795 nr_objs--;
796
797 if (nr_objs > SLAB_LIMIT)
798 nr_objs = SLAB_LIMIT;
799
800 mgmt_size = slab_mgmt_size(nr_objs, align);
801 }
802 *num = nr_objs;
803 *left_over = slab_size - nr_objs*buffer_size - mgmt_size;
1da177e4
LT
804}
805
d40cee24 806#define slab_error(cachep, msg) __slab_error(__func__, cachep, msg)
1da177e4 807
a737b3e2
AM
808static void __slab_error(const char *function, struct kmem_cache *cachep,
809 char *msg)
1da177e4
LT
810{
811 printk(KERN_ERR "slab error in %s(): cache `%s': %s\n",
b28a02de 812 function, cachep->name, msg);
1da177e4
LT
813 dump_stack();
814}
815
3395ee05
PM
816/*
817 * By default on NUMA we use alien caches to stage the freeing of
818 * objects allocated from other nodes. This causes massive memory
819 * inefficiencies when using fake NUMA setup to split memory into a
820 * large number of small nodes, so it can be disabled on the command
821 * line
822 */
823
824static int use_alien_caches __read_mostly = 1;
825static int __init noaliencache_setup(char *s)
826{
827 use_alien_caches = 0;
828 return 1;
829}
830__setup("noaliencache", noaliencache_setup);
831
3df1cccd
DR
832static int __init slab_max_order_setup(char *str)
833{
834 get_option(&str, &slab_max_order);
835 slab_max_order = slab_max_order < 0 ? 0 :
836 min(slab_max_order, MAX_ORDER - 1);
837 slab_max_order_set = true;
838
839 return 1;
840}
841__setup("slab_max_order=", slab_max_order_setup);
842
8fce4d8e
CL
843#ifdef CONFIG_NUMA
844/*
845 * Special reaping functions for NUMA systems called from cache_reap().
846 * These take care of doing round robin flushing of alien caches (containing
847 * objects freed on different nodes from which they were allocated) and the
848 * flushing of remote pcps by calling drain_node_pages.
849 */
1871e52c 850static DEFINE_PER_CPU(unsigned long, slab_reap_node);
8fce4d8e
CL
851
852static void init_reap_node(int cpu)
853{
854 int node;
855
7d6e6d09 856 node = next_node(cpu_to_mem(cpu), node_online_map);
8fce4d8e 857 if (node == MAX_NUMNODES)
442295c9 858 node = first_node(node_online_map);
8fce4d8e 859
1871e52c 860 per_cpu(slab_reap_node, cpu) = node;
8fce4d8e
CL
861}
862
863static void next_reap_node(void)
864{
909ea964 865 int node = __this_cpu_read(slab_reap_node);
8fce4d8e 866
8fce4d8e
CL
867 node = next_node(node, node_online_map);
868 if (unlikely(node >= MAX_NUMNODES))
869 node = first_node(node_online_map);
909ea964 870 __this_cpu_write(slab_reap_node, node);
8fce4d8e
CL
871}
872
873#else
874#define init_reap_node(cpu) do { } while (0)
875#define next_reap_node(void) do { } while (0)
876#endif
877
1da177e4
LT
878/*
879 * Initiate the reap timer running on the target CPU. We run at around 1 to 2Hz
880 * via the workqueue/eventd.
881 * Add the CPU number into the expiration time to minimize the possibility of
882 * the CPUs getting into lockstep and contending for the global cache chain
883 * lock.
884 */
897e679b 885static void __cpuinit start_cpu_timer(int cpu)
1da177e4 886{
1871e52c 887 struct delayed_work *reap_work = &per_cpu(slab_reap_work, cpu);
1da177e4
LT
888
889 /*
890 * When this gets called from do_initcalls via cpucache_init(),
891 * init_workqueues() has already run, so keventd will be setup
892 * at that time.
893 */
52bad64d 894 if (keventd_up() && reap_work->work.func == NULL) {
8fce4d8e 895 init_reap_node(cpu);
78b43536 896 INIT_DELAYED_WORK_DEFERRABLE(reap_work, cache_reap);
2b284214
AV
897 schedule_delayed_work_on(cpu, reap_work,
898 __round_jiffies_relative(HZ, cpu));
1da177e4
LT
899 }
900}
901
e498be7d 902static struct array_cache *alloc_arraycache(int node, int entries,
83b519e8 903 int batchcount, gfp_t gfp)
1da177e4 904{
b28a02de 905 int memsize = sizeof(void *) * entries + sizeof(struct array_cache);
1da177e4
LT
906 struct array_cache *nc = NULL;
907
83b519e8 908 nc = kmalloc_node(memsize, gfp, node);
d5cff635
CM
909 /*
910 * The array_cache structures contain pointers to free object.
25985edc 911 * However, when such objects are allocated or transferred to another
d5cff635
CM
912 * cache the pointers are not cleared and they could be counted as
913 * valid references during a kmemleak scan. Therefore, kmemleak must
914 * not scan such objects.
915 */
916 kmemleak_no_scan(nc);
1da177e4
LT
917 if (nc) {
918 nc->avail = 0;
919 nc->limit = entries;
920 nc->batchcount = batchcount;
921 nc->touched = 0;
e498be7d 922 spin_lock_init(&nc->lock);
1da177e4
LT
923 }
924 return nc;
925}
926
072bb0aa
MG
927static inline bool is_slab_pfmemalloc(struct slab *slabp)
928{
929 struct page *page = virt_to_page(slabp->s_mem);
930
931 return PageSlabPfmemalloc(page);
932}
933
934/* Clears pfmemalloc_active if no slabs have pfmalloc set */
935static void recheck_pfmemalloc_active(struct kmem_cache *cachep,
936 struct array_cache *ac)
937{
938 struct kmem_list3 *l3 = cachep->nodelists[numa_mem_id()];
939 struct slab *slabp;
940 unsigned long flags;
941
942 if (!pfmemalloc_active)
943 return;
944
945 spin_lock_irqsave(&l3->list_lock, flags);
946 list_for_each_entry(slabp, &l3->slabs_full, list)
947 if (is_slab_pfmemalloc(slabp))
948 goto out;
949
950 list_for_each_entry(slabp, &l3->slabs_partial, list)
951 if (is_slab_pfmemalloc(slabp))
952 goto out;
953
954 list_for_each_entry(slabp, &l3->slabs_free, list)
955 if (is_slab_pfmemalloc(slabp))
956 goto out;
957
958 pfmemalloc_active = false;
959out:
960 spin_unlock_irqrestore(&l3->list_lock, flags);
961}
962
381760ea 963static void *__ac_get_obj(struct kmem_cache *cachep, struct array_cache *ac,
072bb0aa
MG
964 gfp_t flags, bool force_refill)
965{
966 int i;
967 void *objp = ac->entry[--ac->avail];
968
969 /* Ensure the caller is allowed to use objects from PFMEMALLOC slab */
970 if (unlikely(is_obj_pfmemalloc(objp))) {
971 struct kmem_list3 *l3;
972
973 if (gfp_pfmemalloc_allowed(flags)) {
974 clear_obj_pfmemalloc(&objp);
975 return objp;
976 }
977
978 /* The caller cannot use PFMEMALLOC objects, find another one */
979 for (i = 1; i < ac->avail; i++) {
980 /* If a !PFMEMALLOC object is found, swap them */
981 if (!is_obj_pfmemalloc(ac->entry[i])) {
982 objp = ac->entry[i];
983 ac->entry[i] = ac->entry[ac->avail];
984 ac->entry[ac->avail] = objp;
985 return objp;
986 }
987 }
988
989 /*
990 * If there are empty slabs on the slabs_free list and we are
991 * being forced to refill the cache, mark this one !pfmemalloc.
992 */
993 l3 = cachep->nodelists[numa_mem_id()];
994 if (!list_empty(&l3->slabs_free) && force_refill) {
995 struct slab *slabp = virt_to_slab(objp);
996 ClearPageSlabPfmemalloc(virt_to_page(slabp->s_mem));
997 clear_obj_pfmemalloc(&objp);
998 recheck_pfmemalloc_active(cachep, ac);
999 return objp;
1000 }
1001
1002 /* No !PFMEMALLOC objects available */
1003 ac->avail++;
1004 objp = NULL;
1005 }
1006
1007 return objp;
1008}
1009
381760ea
MG
1010static inline void *ac_get_obj(struct kmem_cache *cachep,
1011 struct array_cache *ac, gfp_t flags, bool force_refill)
1012{
1013 void *objp;
1014
1015 if (unlikely(sk_memalloc_socks()))
1016 objp = __ac_get_obj(cachep, ac, flags, force_refill);
1017 else
1018 objp = ac->entry[--ac->avail];
1019
1020 return objp;
1021}
1022
1023static void *__ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
072bb0aa
MG
1024 void *objp)
1025{
1026 if (unlikely(pfmemalloc_active)) {
1027 /* Some pfmemalloc slabs exist, check if this is one */
1028 struct page *page = virt_to_page(objp);
1029 if (PageSlabPfmemalloc(page))
1030 set_obj_pfmemalloc(&objp);
1031 }
1032
381760ea
MG
1033 return objp;
1034}
1035
1036static inline void ac_put_obj(struct kmem_cache *cachep, struct array_cache *ac,
1037 void *objp)
1038{
1039 if (unlikely(sk_memalloc_socks()))
1040 objp = __ac_put_obj(cachep, ac, objp);
1041
072bb0aa
MG
1042 ac->entry[ac->avail++] = objp;
1043}
1044
3ded175a
CL
1045/*
1046 * Transfer objects in one arraycache to another.
1047 * Locking must be handled by the caller.
1048 *
1049 * Return the number of entries transferred.
1050 */
1051static int transfer_objects(struct array_cache *to,
1052 struct array_cache *from, unsigned int max)
1053{
1054 /* Figure out how many entries to transfer */
732eacc0 1055 int nr = min3(from->avail, max, to->limit - to->avail);
3ded175a
CL
1056
1057 if (!nr)
1058 return 0;
1059
1060 memcpy(to->entry + to->avail, from->entry + from->avail -nr,
1061 sizeof(void *) *nr);
1062
1063 from->avail -= nr;
1064 to->avail += nr;
3ded175a
CL
1065 return nr;
1066}
1067
765c4507
CL
1068#ifndef CONFIG_NUMA
1069
1070#define drain_alien_cache(cachep, alien) do { } while (0)
1071#define reap_alien(cachep, l3) do { } while (0)
1072
83b519e8 1073static inline struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
765c4507
CL
1074{
1075 return (struct array_cache **)BAD_ALIEN_MAGIC;
1076}
1077
1078static inline void free_alien_cache(struct array_cache **ac_ptr)
1079{
1080}
1081
1082static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
1083{
1084 return 0;
1085}
1086
1087static inline void *alternate_node_alloc(struct kmem_cache *cachep,
1088 gfp_t flags)
1089{
1090 return NULL;
1091}
1092
8b98c169 1093static inline void *____cache_alloc_node(struct kmem_cache *cachep,
765c4507
CL
1094 gfp_t flags, int nodeid)
1095{
1096 return NULL;
1097}
1098
1099#else /* CONFIG_NUMA */
1100
8b98c169 1101static void *____cache_alloc_node(struct kmem_cache *, gfp_t, int);
c61afb18 1102static void *alternate_node_alloc(struct kmem_cache *, gfp_t);
dc85da15 1103
83b519e8 1104static struct array_cache **alloc_alien_cache(int node, int limit, gfp_t gfp)
e498be7d
CL
1105{
1106 struct array_cache **ac_ptr;
8ef82866 1107 int memsize = sizeof(void *) * nr_node_ids;
e498be7d
CL
1108 int i;
1109
1110 if (limit > 1)
1111 limit = 12;
f3186a9c 1112 ac_ptr = kzalloc_node(memsize, gfp, node);
e498be7d
CL
1113 if (ac_ptr) {
1114 for_each_node(i) {
f3186a9c 1115 if (i == node || !node_online(i))
e498be7d 1116 continue;
83b519e8 1117 ac_ptr[i] = alloc_arraycache(node, limit, 0xbaadf00d, gfp);
e498be7d 1118 if (!ac_ptr[i]) {
cc550def 1119 for (i--; i >= 0; i--)
e498be7d
CL
1120 kfree(ac_ptr[i]);
1121 kfree(ac_ptr);
1122 return NULL;
1123 }
1124 }
1125 }
1126 return ac_ptr;
1127}
1128
5295a74c 1129static void free_alien_cache(struct array_cache **ac_ptr)
e498be7d
CL
1130{
1131 int i;
1132
1133 if (!ac_ptr)
1134 return;
e498be7d 1135 for_each_node(i)
b28a02de 1136 kfree(ac_ptr[i]);
e498be7d
CL
1137 kfree(ac_ptr);
1138}
1139
343e0d7a 1140static void __drain_alien_cache(struct kmem_cache *cachep,
5295a74c 1141 struct array_cache *ac, int node)
e498be7d
CL
1142{
1143 struct kmem_list3 *rl3 = cachep->nodelists[node];
1144
1145 if (ac->avail) {
1146 spin_lock(&rl3->list_lock);
e00946fe
CL
1147 /*
1148 * Stuff objects into the remote nodes shared array first.
1149 * That way we could avoid the overhead of putting the objects
1150 * into the free lists and getting them back later.
1151 */
693f7d36 1152 if (rl3->shared)
1153 transfer_objects(rl3->shared, ac, ac->limit);
e00946fe 1154
ff69416e 1155 free_block(cachep, ac->entry, ac->avail, node);
e498be7d
CL
1156 ac->avail = 0;
1157 spin_unlock(&rl3->list_lock);
1158 }
1159}
1160
8fce4d8e
CL
1161/*
1162 * Called from cache_reap() to regularly drain alien caches round robin.
1163 */
1164static void reap_alien(struct kmem_cache *cachep, struct kmem_list3 *l3)
1165{
909ea964 1166 int node = __this_cpu_read(slab_reap_node);
8fce4d8e
CL
1167
1168 if (l3->alien) {
1169 struct array_cache *ac = l3->alien[node];
e00946fe
CL
1170
1171 if (ac && ac->avail && spin_trylock_irq(&ac->lock)) {
8fce4d8e
CL
1172 __drain_alien_cache(cachep, ac, node);
1173 spin_unlock_irq(&ac->lock);
1174 }
1175 }
1176}
1177
a737b3e2
AM
1178static void drain_alien_cache(struct kmem_cache *cachep,
1179 struct array_cache **alien)
e498be7d 1180{
b28a02de 1181 int i = 0;
e498be7d
CL
1182 struct array_cache *ac;
1183 unsigned long flags;
1184
1185 for_each_online_node(i) {
4484ebf1 1186 ac = alien[i];
e498be7d
CL
1187 if (ac) {
1188 spin_lock_irqsave(&ac->lock, flags);
1189 __drain_alien_cache(cachep, ac, i);
1190 spin_unlock_irqrestore(&ac->lock, flags);
1191 }
1192 }
1193}
729bd0b7 1194
873623df 1195static inline int cache_free_alien(struct kmem_cache *cachep, void *objp)
729bd0b7
PE
1196{
1197 struct slab *slabp = virt_to_slab(objp);
1198 int nodeid = slabp->nodeid;
1199 struct kmem_list3 *l3;
1200 struct array_cache *alien = NULL;
1ca4cb24
PE
1201 int node;
1202
7d6e6d09 1203 node = numa_mem_id();
729bd0b7
PE
1204
1205 /*
1206 * Make sure we are not freeing a object from another node to the array
1207 * cache on this cpu.
1208 */
62918a03 1209 if (likely(slabp->nodeid == node))
729bd0b7
PE
1210 return 0;
1211
1ca4cb24 1212 l3 = cachep->nodelists[node];
729bd0b7
PE
1213 STATS_INC_NODEFREES(cachep);
1214 if (l3->alien && l3->alien[nodeid]) {
1215 alien = l3->alien[nodeid];
873623df 1216 spin_lock(&alien->lock);
729bd0b7
PE
1217 if (unlikely(alien->avail == alien->limit)) {
1218 STATS_INC_ACOVERFLOW(cachep);
1219 __drain_alien_cache(cachep, alien, nodeid);
1220 }
072bb0aa 1221 ac_put_obj(cachep, alien, objp);
729bd0b7
PE
1222 spin_unlock(&alien->lock);
1223 } else {
1224 spin_lock(&(cachep->nodelists[nodeid])->list_lock);
1225 free_block(cachep, &objp, 1, nodeid);
1226 spin_unlock(&(cachep->nodelists[nodeid])->list_lock);
1227 }
1228 return 1;
1229}
e498be7d
CL
1230#endif
1231
8f9f8d9e
DR
1232/*
1233 * Allocates and initializes nodelists for a node on each slab cache, used for
1234 * either memory or cpu hotplug. If memory is being hot-added, the kmem_list3
1235 * will be allocated off-node since memory is not yet online for the new node.
1236 * When hotplugging memory or a cpu, existing nodelists are not replaced if
1237 * already in use.
1238 *
18004c5d 1239 * Must hold slab_mutex.
8f9f8d9e
DR
1240 */
1241static int init_cache_nodelists_node(int node)
1242{
1243 struct kmem_cache *cachep;
1244 struct kmem_list3 *l3;
1245 const int memsize = sizeof(struct kmem_list3);
1246
18004c5d 1247 list_for_each_entry(cachep, &slab_caches, list) {
8f9f8d9e
DR
1248 /*
1249 * Set up the size64 kmemlist for cpu before we can
1250 * begin anything. Make sure some other cpu on this
1251 * node has not already allocated this
1252 */
1253 if (!cachep->nodelists[node]) {
1254 l3 = kmalloc_node(memsize, GFP_KERNEL, node);
1255 if (!l3)
1256 return -ENOMEM;
1257 kmem_list3_init(l3);
1258 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
1259 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1260
1261 /*
1262 * The l3s don't come and go as CPUs come and
18004c5d 1263 * go. slab_mutex is sufficient
8f9f8d9e
DR
1264 * protection here.
1265 */
1266 cachep->nodelists[node] = l3;
1267 }
1268
1269 spin_lock_irq(&cachep->nodelists[node]->list_lock);
1270 cachep->nodelists[node]->free_limit =
1271 (1 + nr_cpus_node(node)) *
1272 cachep->batchcount + cachep->num;
1273 spin_unlock_irq(&cachep->nodelists[node]->list_lock);
1274 }
1275 return 0;
1276}
1277
fbf1e473
AM
1278static void __cpuinit cpuup_canceled(long cpu)
1279{
1280 struct kmem_cache *cachep;
1281 struct kmem_list3 *l3 = NULL;
7d6e6d09 1282 int node = cpu_to_mem(cpu);
a70f7302 1283 const struct cpumask *mask = cpumask_of_node(node);
fbf1e473 1284
18004c5d 1285 list_for_each_entry(cachep, &slab_caches, list) {
fbf1e473
AM
1286 struct array_cache *nc;
1287 struct array_cache *shared;
1288 struct array_cache **alien;
fbf1e473 1289
fbf1e473
AM
1290 /* cpu is dead; no one can alloc from it. */
1291 nc = cachep->array[cpu];
1292 cachep->array[cpu] = NULL;
1293 l3 = cachep->nodelists[node];
1294
1295 if (!l3)
1296 goto free_array_cache;
1297
1298 spin_lock_irq(&l3->list_lock);
1299
1300 /* Free limit for this kmem_list3 */
1301 l3->free_limit -= cachep->batchcount;
1302 if (nc)
1303 free_block(cachep, nc->entry, nc->avail, node);
1304
58463c1f 1305 if (!cpumask_empty(mask)) {
fbf1e473
AM
1306 spin_unlock_irq(&l3->list_lock);
1307 goto free_array_cache;
1308 }
1309
1310 shared = l3->shared;
1311 if (shared) {
1312 free_block(cachep, shared->entry,
1313 shared->avail, node);
1314 l3->shared = NULL;
1315 }
1316
1317 alien = l3->alien;
1318 l3->alien = NULL;
1319
1320 spin_unlock_irq(&l3->list_lock);
1321
1322 kfree(shared);
1323 if (alien) {
1324 drain_alien_cache(cachep, alien);
1325 free_alien_cache(alien);
1326 }
1327free_array_cache:
1328 kfree(nc);
1329 }
1330 /*
1331 * In the previous loop, all the objects were freed to
1332 * the respective cache's slabs, now we can go ahead and
1333 * shrink each nodelist to its limit.
1334 */
18004c5d 1335 list_for_each_entry(cachep, &slab_caches, list) {
fbf1e473
AM
1336 l3 = cachep->nodelists[node];
1337 if (!l3)
1338 continue;
1339 drain_freelist(cachep, l3, l3->free_objects);
1340 }
1341}
1342
1343static int __cpuinit cpuup_prepare(long cpu)
1da177e4 1344{
343e0d7a 1345 struct kmem_cache *cachep;
e498be7d 1346 struct kmem_list3 *l3 = NULL;
7d6e6d09 1347 int node = cpu_to_mem(cpu);
8f9f8d9e 1348 int err;
1da177e4 1349
fbf1e473
AM
1350 /*
1351 * We need to do this right in the beginning since
1352 * alloc_arraycache's are going to use this list.
1353 * kmalloc_node allows us to add the slab to the right
1354 * kmem_list3 and not this cpu's kmem_list3
1355 */
8f9f8d9e
DR
1356 err = init_cache_nodelists_node(node);
1357 if (err < 0)
1358 goto bad;
fbf1e473
AM
1359
1360 /*
1361 * Now we can go ahead with allocating the shared arrays and
1362 * array caches
1363 */
18004c5d 1364 list_for_each_entry(cachep, &slab_caches, list) {
fbf1e473
AM
1365 struct array_cache *nc;
1366 struct array_cache *shared = NULL;
1367 struct array_cache **alien = NULL;
1368
1369 nc = alloc_arraycache(node, cachep->limit,
83b519e8 1370 cachep->batchcount, GFP_KERNEL);
fbf1e473
AM
1371 if (!nc)
1372 goto bad;
1373 if (cachep->shared) {
1374 shared = alloc_arraycache(node,
1375 cachep->shared * cachep->batchcount,
83b519e8 1376 0xbaadf00d, GFP_KERNEL);
12d00f6a
AM
1377 if (!shared) {
1378 kfree(nc);
1da177e4 1379 goto bad;
12d00f6a 1380 }
fbf1e473
AM
1381 }
1382 if (use_alien_caches) {
83b519e8 1383 alien = alloc_alien_cache(node, cachep->limit, GFP_KERNEL);
12d00f6a
AM
1384 if (!alien) {
1385 kfree(shared);
1386 kfree(nc);
fbf1e473 1387 goto bad;
12d00f6a 1388 }
fbf1e473
AM
1389 }
1390 cachep->array[cpu] = nc;
1391 l3 = cachep->nodelists[node];
1392 BUG_ON(!l3);
1393
1394 spin_lock_irq(&l3->list_lock);
1395 if (!l3->shared) {
1396 /*
1397 * We are serialised from CPU_DEAD or
1398 * CPU_UP_CANCELLED by the cpucontrol lock
1399 */
1400 l3->shared = shared;
1401 shared = NULL;
1402 }
4484ebf1 1403#ifdef CONFIG_NUMA
fbf1e473
AM
1404 if (!l3->alien) {
1405 l3->alien = alien;
1406 alien = NULL;
1da177e4 1407 }
fbf1e473
AM
1408#endif
1409 spin_unlock_irq(&l3->list_lock);
1410 kfree(shared);
1411 free_alien_cache(alien);
83835b3d
PZ
1412 if (cachep->flags & SLAB_DEBUG_OBJECTS)
1413 slab_set_debugobj_lock_classes_node(cachep, node);
fbf1e473 1414 }
ce79ddc8
PE
1415 init_node_lock_keys(node);
1416
fbf1e473
AM
1417 return 0;
1418bad:
12d00f6a 1419 cpuup_canceled(cpu);
fbf1e473
AM
1420 return -ENOMEM;
1421}
1422
1423static int __cpuinit cpuup_callback(struct notifier_block *nfb,
1424 unsigned long action, void *hcpu)
1425{
1426 long cpu = (long)hcpu;
1427 int err = 0;
1428
1429 switch (action) {
fbf1e473
AM
1430 case CPU_UP_PREPARE:
1431 case CPU_UP_PREPARE_FROZEN:
18004c5d 1432 mutex_lock(&slab_mutex);
fbf1e473 1433 err = cpuup_prepare(cpu);
18004c5d 1434 mutex_unlock(&slab_mutex);
1da177e4
LT
1435 break;
1436 case CPU_ONLINE:
8bb78442 1437 case CPU_ONLINE_FROZEN:
1da177e4
LT
1438 start_cpu_timer(cpu);
1439 break;
1440#ifdef CONFIG_HOTPLUG_CPU
5830c590 1441 case CPU_DOWN_PREPARE:
8bb78442 1442 case CPU_DOWN_PREPARE_FROZEN:
5830c590 1443 /*
18004c5d 1444 * Shutdown cache reaper. Note that the slab_mutex is
5830c590
CL
1445 * held so that if cache_reap() is invoked it cannot do
1446 * anything expensive but will only modify reap_work
1447 * and reschedule the timer.
1448 */
afe2c511 1449 cancel_delayed_work_sync(&per_cpu(slab_reap_work, cpu));
5830c590 1450 /* Now the cache_reaper is guaranteed to be not running. */
1871e52c 1451 per_cpu(slab_reap_work, cpu).work.func = NULL;
5830c590
CL
1452 break;
1453 case CPU_DOWN_FAILED:
8bb78442 1454 case CPU_DOWN_FAILED_FROZEN:
5830c590
CL
1455 start_cpu_timer(cpu);
1456 break;
1da177e4 1457 case CPU_DEAD:
8bb78442 1458 case CPU_DEAD_FROZEN:
4484ebf1
RT
1459 /*
1460 * Even if all the cpus of a node are down, we don't free the
1461 * kmem_list3 of any cache. This to avoid a race between
1462 * cpu_down, and a kmalloc allocation from another cpu for
1463 * memory from the node of the cpu going down. The list3
1464 * structure is usually allocated from kmem_cache_create() and
1465 * gets destroyed at kmem_cache_destroy().
1466 */
183ff22b 1467 /* fall through */
8f5be20b 1468#endif
1da177e4 1469 case CPU_UP_CANCELED:
8bb78442 1470 case CPU_UP_CANCELED_FROZEN:
18004c5d 1471 mutex_lock(&slab_mutex);
fbf1e473 1472 cpuup_canceled(cpu);
18004c5d 1473 mutex_unlock(&slab_mutex);
1da177e4 1474 break;
1da177e4 1475 }
eac40680 1476 return notifier_from_errno(err);
1da177e4
LT
1477}
1478
74b85f37
CS
1479static struct notifier_block __cpuinitdata cpucache_notifier = {
1480 &cpuup_callback, NULL, 0
1481};
1da177e4 1482
8f9f8d9e
DR
1483#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
1484/*
1485 * Drains freelist for a node on each slab cache, used for memory hot-remove.
1486 * Returns -EBUSY if all objects cannot be drained so that the node is not
1487 * removed.
1488 *
18004c5d 1489 * Must hold slab_mutex.
8f9f8d9e
DR
1490 */
1491static int __meminit drain_cache_nodelists_node(int node)
1492{
1493 struct kmem_cache *cachep;
1494 int ret = 0;
1495
18004c5d 1496 list_for_each_entry(cachep, &slab_caches, list) {
8f9f8d9e
DR
1497 struct kmem_list3 *l3;
1498
1499 l3 = cachep->nodelists[node];
1500 if (!l3)
1501 continue;
1502
1503 drain_freelist(cachep, l3, l3->free_objects);
1504
1505 if (!list_empty(&l3->slabs_full) ||
1506 !list_empty(&l3->slabs_partial)) {
1507 ret = -EBUSY;
1508 break;
1509 }
1510 }
1511 return ret;
1512}
1513
1514static int __meminit slab_memory_callback(struct notifier_block *self,
1515 unsigned long action, void *arg)
1516{
1517 struct memory_notify *mnb = arg;
1518 int ret = 0;
1519 int nid;
1520
1521 nid = mnb->status_change_nid;
1522 if (nid < 0)
1523 goto out;
1524
1525 switch (action) {
1526 case MEM_GOING_ONLINE:
18004c5d 1527 mutex_lock(&slab_mutex);
8f9f8d9e 1528 ret = init_cache_nodelists_node(nid);
18004c5d 1529 mutex_unlock(&slab_mutex);
8f9f8d9e
DR
1530 break;
1531 case MEM_GOING_OFFLINE:
18004c5d 1532 mutex_lock(&slab_mutex);
8f9f8d9e 1533 ret = drain_cache_nodelists_node(nid);
18004c5d 1534 mutex_unlock(&slab_mutex);
8f9f8d9e
DR
1535 break;
1536 case MEM_ONLINE:
1537 case MEM_OFFLINE:
1538 case MEM_CANCEL_ONLINE:
1539 case MEM_CANCEL_OFFLINE:
1540 break;
1541 }
1542out:
5fda1bd5 1543 return notifier_from_errno(ret);
8f9f8d9e
DR
1544}
1545#endif /* CONFIG_NUMA && CONFIG_MEMORY_HOTPLUG */
1546
e498be7d
CL
1547/*
1548 * swap the static kmem_list3 with kmalloced memory
1549 */
8f9f8d9e
DR
1550static void __init init_list(struct kmem_cache *cachep, struct kmem_list3 *list,
1551 int nodeid)
e498be7d
CL
1552{
1553 struct kmem_list3 *ptr;
1554
83b519e8 1555 ptr = kmalloc_node(sizeof(struct kmem_list3), GFP_NOWAIT, nodeid);
e498be7d
CL
1556 BUG_ON(!ptr);
1557
e498be7d 1558 memcpy(ptr, list, sizeof(struct kmem_list3));
2b2d5493
IM
1559 /*
1560 * Do not assume that spinlocks can be initialized via memcpy:
1561 */
1562 spin_lock_init(&ptr->list_lock);
1563
e498be7d
CL
1564 MAKE_ALL_LISTS(cachep, ptr, nodeid);
1565 cachep->nodelists[nodeid] = ptr;
e498be7d
CL
1566}
1567
556a169d
PE
1568/*
1569 * For setting up all the kmem_list3s for cache whose buffer_size is same as
1570 * size of kmem_list3.
1571 */
1572static void __init set_up_list3s(struct kmem_cache *cachep, int index)
1573{
1574 int node;
1575
1576 for_each_online_node(node) {
1577 cachep->nodelists[node] = &initkmem_list3[index + node];
1578 cachep->nodelists[node]->next_reap = jiffies +
1579 REAPTIMEOUT_LIST3 +
1580 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
1581 }
1582}
1583
a737b3e2
AM
1584/*
1585 * Initialisation. Called after the page allocator have been initialised and
1586 * before smp_init().
1da177e4
LT
1587 */
1588void __init kmem_cache_init(void)
1589{
1590 size_t left_over;
1591 struct cache_sizes *sizes;
1592 struct cache_names *names;
e498be7d 1593 int i;
07ed76b2 1594 int order;
1ca4cb24 1595 int node;
e498be7d 1596
9b030cb8
CL
1597 kmem_cache = &kmem_cache_boot;
1598
b6e68bc1 1599 if (num_possible_nodes() == 1)
62918a03
SS
1600 use_alien_caches = 0;
1601
e498be7d
CL
1602 for (i = 0; i < NUM_INIT_LISTS; i++) {
1603 kmem_list3_init(&initkmem_list3[i]);
1604 if (i < MAX_NUMNODES)
9b030cb8 1605 kmem_cache->nodelists[i] = NULL;
e498be7d 1606 }
9b030cb8 1607 set_up_list3s(kmem_cache, CACHE_CACHE);
1da177e4
LT
1608
1609 /*
1610 * Fragmentation resistance on low memory - only use bigger
3df1cccd
DR
1611 * page orders on machines with more than 32MB of memory if
1612 * not overridden on the command line.
1da177e4 1613 */
3df1cccd 1614 if (!slab_max_order_set && totalram_pages > (32 << 20) >> PAGE_SHIFT)
543585cc 1615 slab_max_order = SLAB_MAX_ORDER_HI;
1da177e4 1616
1da177e4
LT
1617 /* Bootstrap is tricky, because several objects are allocated
1618 * from caches that do not exist yet:
9b030cb8
CL
1619 * 1) initialize the kmem_cache cache: it contains the struct
1620 * kmem_cache structures of all caches, except kmem_cache itself:
1621 * kmem_cache is statically allocated.
e498be7d
CL
1622 * Initially an __init data area is used for the head array and the
1623 * kmem_list3 structures, it's replaced with a kmalloc allocated
1624 * array at the end of the bootstrap.
1da177e4 1625 * 2) Create the first kmalloc cache.
343e0d7a 1626 * The struct kmem_cache for the new cache is allocated normally.
e498be7d
CL
1627 * An __init data area is used for the head array.
1628 * 3) Create the remaining kmalloc caches, with minimally sized
1629 * head arrays.
9b030cb8 1630 * 4) Replace the __init data head arrays for kmem_cache and the first
1da177e4 1631 * kmalloc cache with kmalloc allocated arrays.
9b030cb8 1632 * 5) Replace the __init data for kmem_list3 for kmem_cache and
e498be7d
CL
1633 * the other cache's with kmalloc allocated memory.
1634 * 6) Resize the head arrays of the kmalloc caches to their final sizes.
1da177e4
LT
1635 */
1636
7d6e6d09 1637 node = numa_mem_id();
1ca4cb24 1638
9b030cb8 1639 /* 1) create the kmem_cache */
18004c5d 1640 INIT_LIST_HEAD(&slab_caches);
9b030cb8
CL
1641 list_add(&kmem_cache->list, &slab_caches);
1642 kmem_cache->colour_off = cache_line_size();
1643 kmem_cache->array[smp_processor_id()] = &initarray_cache.cache;
1644 kmem_cache->nodelists[node] = &initkmem_list3[CACHE_CACHE + node];
1da177e4 1645
8da3430d 1646 /*
b56efcf0 1647 * struct kmem_cache size depends on nr_node_ids & nr_cpu_ids
8da3430d 1648 */
9b030cb8 1649 kmem_cache->size = offsetof(struct kmem_cache, array[nr_cpu_ids]) +
b56efcf0 1650 nr_node_ids * sizeof(struct kmem_list3 *);
9b030cb8
CL
1651 kmem_cache->object_size = kmem_cache->size;
1652 kmem_cache->size = ALIGN(kmem_cache->object_size,
a737b3e2 1653 cache_line_size());
9b030cb8
CL
1654 kmem_cache->reciprocal_buffer_size =
1655 reciprocal_value(kmem_cache->size);
1da177e4 1656
07ed76b2 1657 for (order = 0; order < MAX_ORDER; order++) {
9b030cb8
CL
1658 cache_estimate(order, kmem_cache->size,
1659 cache_line_size(), 0, &left_over, &kmem_cache->num);
1660 if (kmem_cache->num)
07ed76b2
JS
1661 break;
1662 }
9b030cb8
CL
1663 BUG_ON(!kmem_cache->num);
1664 kmem_cache->gfporder = order;
1665 kmem_cache->colour = left_over / kmem_cache->colour_off;
1666 kmem_cache->slab_size = ALIGN(kmem_cache->num * sizeof(kmem_bufctl_t) +
b28a02de 1667 sizeof(struct slab), cache_line_size());
1da177e4
LT
1668
1669 /* 2+3) create the kmalloc caches */
1670 sizes = malloc_sizes;
1671 names = cache_names;
1672
a737b3e2
AM
1673 /*
1674 * Initialize the caches that provide memory for the array cache and the
1675 * kmem_list3 structures first. Without this, further allocations will
1676 * bug.
e498be7d
CL
1677 */
1678
039363f3 1679 sizes[INDEX_AC].cs_cachep = __kmem_cache_create(names[INDEX_AC].name,
a737b3e2
AM
1680 sizes[INDEX_AC].cs_size,
1681 ARCH_KMALLOC_MINALIGN,
1682 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
20c2df83 1683 NULL);
e498be7d 1684
7c9adf5a 1685 list_add(&sizes[INDEX_AC].cs_cachep->list, &slab_caches);
a737b3e2 1686 if (INDEX_AC != INDEX_L3) {
e498be7d 1687 sizes[INDEX_L3].cs_cachep =
039363f3 1688 __kmem_cache_create(names[INDEX_L3].name,
a737b3e2
AM
1689 sizes[INDEX_L3].cs_size,
1690 ARCH_KMALLOC_MINALIGN,
1691 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
20c2df83 1692 NULL);
7c9adf5a 1693 list_add(&sizes[INDEX_L3].cs_cachep->list, &slab_caches);
a737b3e2 1694 }
e498be7d 1695
e0a42726
IM
1696 slab_early_init = 0;
1697
1da177e4 1698 while (sizes->cs_size != ULONG_MAX) {
e498be7d
CL
1699 /*
1700 * For performance, all the general caches are L1 aligned.
1da177e4
LT
1701 * This should be particularly beneficial on SMP boxes, as it
1702 * eliminates "false sharing".
1703 * Note for systems short on memory removing the alignment will
e498be7d
CL
1704 * allow tighter packing of the smaller caches.
1705 */
a737b3e2 1706 if (!sizes->cs_cachep) {
039363f3 1707 sizes->cs_cachep = __kmem_cache_create(names->name,
a737b3e2
AM
1708 sizes->cs_size,
1709 ARCH_KMALLOC_MINALIGN,
1710 ARCH_KMALLOC_FLAGS|SLAB_PANIC,
20c2df83 1711 NULL);
7c9adf5a 1712 list_add(&sizes->cs_cachep->list, &slab_caches);
a737b3e2 1713 }
4b51d669 1714#ifdef CONFIG_ZONE_DMA
039363f3 1715 sizes->cs_dmacachep = __kmem_cache_create(
4b51d669 1716 names->name_dma,
a737b3e2
AM
1717 sizes->cs_size,
1718 ARCH_KMALLOC_MINALIGN,
1719 ARCH_KMALLOC_FLAGS|SLAB_CACHE_DMA|
1720 SLAB_PANIC,
20c2df83 1721 NULL);
7c9adf5a 1722 list_add(&sizes->cs_dmacachep->list, &slab_caches);
4b51d669 1723#endif
1da177e4
LT
1724 sizes++;
1725 names++;
1726 }
1727 /* 4) Replace the bootstrap head arrays */
1728 {
2b2d5493 1729 struct array_cache *ptr;
e498be7d 1730
83b519e8 1731 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
e498be7d 1732
9b030cb8
CL
1733 BUG_ON(cpu_cache_get(kmem_cache) != &initarray_cache.cache);
1734 memcpy(ptr, cpu_cache_get(kmem_cache),
b28a02de 1735 sizeof(struct arraycache_init));
2b2d5493
IM
1736 /*
1737 * Do not assume that spinlocks can be initialized via memcpy:
1738 */
1739 spin_lock_init(&ptr->lock);
1740
9b030cb8 1741 kmem_cache->array[smp_processor_id()] = ptr;
e498be7d 1742
83b519e8 1743 ptr = kmalloc(sizeof(struct arraycache_init), GFP_NOWAIT);
e498be7d 1744
9a2dba4b 1745 BUG_ON(cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep)
b28a02de 1746 != &initarray_generic.cache);
9a2dba4b 1747 memcpy(ptr, cpu_cache_get(malloc_sizes[INDEX_AC].cs_cachep),
b28a02de 1748 sizeof(struct arraycache_init));
2b2d5493
IM
1749 /*
1750 * Do not assume that spinlocks can be initialized via memcpy:
1751 */
1752 spin_lock_init(&ptr->lock);
1753
e498be7d 1754 malloc_sizes[INDEX_AC].cs_cachep->array[smp_processor_id()] =
b28a02de 1755 ptr;
1da177e4 1756 }
e498be7d
CL
1757 /* 5) Replace the bootstrap kmem_list3's */
1758 {
1ca4cb24
PE
1759 int nid;
1760
9c09a95c 1761 for_each_online_node(nid) {
9b030cb8 1762 init_list(kmem_cache, &initkmem_list3[CACHE_CACHE + nid], nid);
556a169d 1763
e498be7d 1764 init_list(malloc_sizes[INDEX_AC].cs_cachep,
1ca4cb24 1765 &initkmem_list3[SIZE_AC + nid], nid);
e498be7d
CL
1766
1767 if (INDEX_AC != INDEX_L3) {
1768 init_list(malloc_sizes[INDEX_L3].cs_cachep,
1ca4cb24 1769 &initkmem_list3[SIZE_L3 + nid], nid);
e498be7d
CL
1770 }
1771 }
1772 }
1da177e4 1773
97d06609 1774 slab_state = UP;
8429db5c
PE
1775}
1776
1777void __init kmem_cache_init_late(void)
1778{
1779 struct kmem_cache *cachep;
1780
97d06609 1781 slab_state = UP;
52cef189 1782
30765b92
PZ
1783 /* Annotate slab for lockdep -- annotate the malloc caches */
1784 init_lock_keys();
1785
8429db5c 1786 /* 6) resize the head arrays to their final sizes */
18004c5d
CL
1787 mutex_lock(&slab_mutex);
1788 list_for_each_entry(cachep, &slab_caches, list)
8429db5c
PE
1789 if (enable_cpucache(cachep, GFP_NOWAIT))
1790 BUG();
18004c5d 1791 mutex_unlock(&slab_mutex);
056c6241 1792
97d06609
CL
1793 /* Done! */
1794 slab_state = FULL;
1795
a737b3e2
AM
1796 /*
1797 * Register a cpu startup notifier callback that initializes
1798 * cpu_cache_get for all new cpus
1da177e4
LT
1799 */
1800 register_cpu_notifier(&cpucache_notifier);
1da177e4 1801
8f9f8d9e
DR
1802#ifdef CONFIG_NUMA
1803 /*
1804 * Register a memory hotplug callback that initializes and frees
1805 * nodelists.
1806 */
1807 hotplug_memory_notifier(slab_memory_callback, SLAB_CALLBACK_PRI);
1808#endif
1809
a737b3e2
AM
1810 /*
1811 * The reap timers are started later, with a module init call: That part
1812 * of the kernel is not yet operational.
1da177e4
LT
1813 */
1814}
1815
1816static int __init cpucache_init(void)
1817{
1818 int cpu;
1819
a737b3e2
AM
1820 /*
1821 * Register the timers that return unneeded pages to the page allocator
1da177e4 1822 */
e498be7d 1823 for_each_online_cpu(cpu)
a737b3e2 1824 start_cpu_timer(cpu);
a164f896
GC
1825
1826 /* Done! */
97d06609 1827 slab_state = FULL;
1da177e4
LT
1828 return 0;
1829}
1da177e4
LT
1830__initcall(cpucache_init);
1831
8bdec192
RA
1832static noinline void
1833slab_out_of_memory(struct kmem_cache *cachep, gfp_t gfpflags, int nodeid)
1834{
1835 struct kmem_list3 *l3;
1836 struct slab *slabp;
1837 unsigned long flags;
1838 int node;
1839
1840 printk(KERN_WARNING
1841 "SLAB: Unable to allocate memory on node %d (gfp=0x%x)\n",
1842 nodeid, gfpflags);
1843 printk(KERN_WARNING " cache: %s, object size: %d, order: %d\n",
3b0efdfa 1844 cachep->name, cachep->size, cachep->gfporder);
8bdec192
RA
1845
1846 for_each_online_node(node) {
1847 unsigned long active_objs = 0, num_objs = 0, free_objects = 0;
1848 unsigned long active_slabs = 0, num_slabs = 0;
1849
1850 l3 = cachep->nodelists[node];
1851 if (!l3)
1852 continue;
1853
1854 spin_lock_irqsave(&l3->list_lock, flags);
1855 list_for_each_entry(slabp, &l3->slabs_full, list) {
1856 active_objs += cachep->num;
1857 active_slabs++;
1858 }
1859 list_for_each_entry(slabp, &l3->slabs_partial, list) {
1860 active_objs += slabp->inuse;
1861 active_slabs++;
1862 }
1863 list_for_each_entry(slabp, &l3->slabs_free, list)
1864 num_slabs++;
1865
1866 free_objects += l3->free_objects;
1867 spin_unlock_irqrestore(&l3->list_lock, flags);
1868
1869 num_slabs += active_slabs;
1870 num_objs = num_slabs * cachep->num;
1871 printk(KERN_WARNING
1872 " node %d: slabs: %ld/%ld, objs: %ld/%ld, free: %ld\n",
1873 node, active_slabs, num_slabs, active_objs, num_objs,
1874 free_objects);
1875 }
1876}
1877
1da177e4
LT
1878/*
1879 * Interface to system's page allocator. No need to hold the cache-lock.
1880 *
1881 * If we requested dmaable memory, we will get it. Even if we
1882 * did not request dmaable memory, we might get it, but that
1883 * would be relatively rare and ignorable.
1884 */
343e0d7a 1885static void *kmem_getpages(struct kmem_cache *cachep, gfp_t flags, int nodeid)
1da177e4
LT
1886{
1887 struct page *page;
e1b6aa6f 1888 int nr_pages;
1da177e4
LT
1889 int i;
1890
d6fef9da 1891#ifndef CONFIG_MMU
e1b6aa6f
CH
1892 /*
1893 * Nommu uses slab's for process anonymous memory allocations, and thus
1894 * requires __GFP_COMP to properly refcount higher order allocations
d6fef9da 1895 */
e1b6aa6f 1896 flags |= __GFP_COMP;
d6fef9da 1897#endif
765c4507 1898
a618e89f 1899 flags |= cachep->allocflags;
e12ba74d
MG
1900 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1901 flags |= __GFP_RECLAIMABLE;
e1b6aa6f 1902
517d0869 1903 page = alloc_pages_exact_node(nodeid, flags | __GFP_NOTRACK, cachep->gfporder);
8bdec192
RA
1904 if (!page) {
1905 if (!(flags & __GFP_NOWARN) && printk_ratelimit())
1906 slab_out_of_memory(cachep, flags, nodeid);
1da177e4 1907 return NULL;
8bdec192 1908 }
1da177e4 1909
b37f1dd0 1910 /* Record if ALLOC_NO_WATERMARKS was set when allocating the slab */
072bb0aa
MG
1911 if (unlikely(page->pfmemalloc))
1912 pfmemalloc_active = true;
1913
e1b6aa6f 1914 nr_pages = (1 << cachep->gfporder);
1da177e4 1915 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
972d1a7b
CL
1916 add_zone_page_state(page_zone(page),
1917 NR_SLAB_RECLAIMABLE, nr_pages);
1918 else
1919 add_zone_page_state(page_zone(page),
1920 NR_SLAB_UNRECLAIMABLE, nr_pages);
072bb0aa 1921 for (i = 0; i < nr_pages; i++) {
e1b6aa6f 1922 __SetPageSlab(page + i);
c175eea4 1923
072bb0aa
MG
1924 if (page->pfmemalloc)
1925 SetPageSlabPfmemalloc(page + i);
1926 }
1927
b1eeab67
VN
1928 if (kmemcheck_enabled && !(cachep->flags & SLAB_NOTRACK)) {
1929 kmemcheck_alloc_shadow(page, cachep->gfporder, flags, nodeid);
1930
1931 if (cachep->ctor)
1932 kmemcheck_mark_uninitialized_pages(page, nr_pages);
1933 else
1934 kmemcheck_mark_unallocated_pages(page, nr_pages);
1935 }
c175eea4 1936
e1b6aa6f 1937 return page_address(page);
1da177e4
LT
1938}
1939
1940/*
1941 * Interface to system's page release.
1942 */
343e0d7a 1943static void kmem_freepages(struct kmem_cache *cachep, void *addr)
1da177e4 1944{
b28a02de 1945 unsigned long i = (1 << cachep->gfporder);
1da177e4
LT
1946 struct page *page = virt_to_page(addr);
1947 const unsigned long nr_freed = i;
1948
b1eeab67 1949 kmemcheck_free_shadow(page, cachep->gfporder);
c175eea4 1950
972d1a7b
CL
1951 if (cachep->flags & SLAB_RECLAIM_ACCOUNT)
1952 sub_zone_page_state(page_zone(page),
1953 NR_SLAB_RECLAIMABLE, nr_freed);
1954 else
1955 sub_zone_page_state(page_zone(page),
1956 NR_SLAB_UNRECLAIMABLE, nr_freed);
1da177e4 1957 while (i--) {
f205b2fe 1958 BUG_ON(!PageSlab(page));
072bb0aa 1959 __ClearPageSlabPfmemalloc(page);
f205b2fe 1960 __ClearPageSlab(page);
1da177e4
LT
1961 page++;
1962 }
1da177e4
LT
1963 if (current->reclaim_state)
1964 current->reclaim_state->reclaimed_slab += nr_freed;
1965 free_pages((unsigned long)addr, cachep->gfporder);
1da177e4
LT
1966}
1967
1968static void kmem_rcu_free(struct rcu_head *head)
1969{
b28a02de 1970 struct slab_rcu *slab_rcu = (struct slab_rcu *)head;
343e0d7a 1971 struct kmem_cache *cachep = slab_rcu->cachep;
1da177e4
LT
1972
1973 kmem_freepages(cachep, slab_rcu->addr);
1974 if (OFF_SLAB(cachep))
1975 kmem_cache_free(cachep->slabp_cache, slab_rcu);
1976}
1977
1978#if DEBUG
1979
1980#ifdef CONFIG_DEBUG_PAGEALLOC
343e0d7a 1981static void store_stackinfo(struct kmem_cache *cachep, unsigned long *addr,
b28a02de 1982 unsigned long caller)
1da177e4 1983{
8c138bc0 1984 int size = cachep->object_size;
1da177e4 1985
3dafccf2 1986 addr = (unsigned long *)&((char *)addr)[obj_offset(cachep)];
1da177e4 1987
b28a02de 1988 if (size < 5 * sizeof(unsigned long))
1da177e4
LT
1989 return;
1990
b28a02de
PE
1991 *addr++ = 0x12345678;
1992 *addr++ = caller;
1993 *addr++ = smp_processor_id();
1994 size -= 3 * sizeof(unsigned long);
1da177e4
LT
1995 {
1996 unsigned long *sptr = &caller;
1997 unsigned long svalue;
1998
1999 while (!kstack_end(sptr)) {
2000 svalue = *sptr++;
2001 if (kernel_text_address(svalue)) {
b28a02de 2002 *addr++ = svalue;
1da177e4
LT
2003 size -= sizeof(unsigned long);
2004 if (size <= sizeof(unsigned long))
2005 break;
2006 }
2007 }
2008
2009 }
b28a02de 2010 *addr++ = 0x87654321;
1da177e4
LT
2011}
2012#endif
2013
343e0d7a 2014static void poison_obj(struct kmem_cache *cachep, void *addr, unsigned char val)
1da177e4 2015{
8c138bc0 2016 int size = cachep->object_size;
3dafccf2 2017 addr = &((char *)addr)[obj_offset(cachep)];
1da177e4
LT
2018
2019 memset(addr, val, size);
b28a02de 2020 *(unsigned char *)(addr + size - 1) = POISON_END;
1da177e4
LT
2021}
2022
2023static void dump_line(char *data, int offset, int limit)
2024{
2025 int i;
aa83aa40
DJ
2026 unsigned char error = 0;
2027 int bad_count = 0;
2028
fdde6abb 2029 printk(KERN_ERR "%03x: ", offset);
aa83aa40
DJ
2030 for (i = 0; i < limit; i++) {
2031 if (data[offset + i] != POISON_FREE) {
2032 error = data[offset + i];
2033 bad_count++;
2034 }
aa83aa40 2035 }
fdde6abb
SAS
2036 print_hex_dump(KERN_CONT, "", 0, 16, 1,
2037 &data[offset], limit, 1);
aa83aa40
DJ
2038
2039 if (bad_count == 1) {
2040 error ^= POISON_FREE;
2041 if (!(error & (error - 1))) {
2042 printk(KERN_ERR "Single bit error detected. Probably "
2043 "bad RAM.\n");
2044#ifdef CONFIG_X86
2045 printk(KERN_ERR "Run memtest86+ or a similar memory "
2046 "test tool.\n");
2047#else
2048 printk(KERN_ERR "Run a memory test tool.\n");
2049#endif
2050 }
2051 }
1da177e4
LT
2052}
2053#endif
2054
2055#if DEBUG
2056
343e0d7a 2057static void print_objinfo(struct kmem_cache *cachep, void *objp, int lines)
1da177e4
LT
2058{
2059 int i, size;
2060 char *realobj;
2061
2062 if (cachep->flags & SLAB_RED_ZONE) {
b46b8f19 2063 printk(KERN_ERR "Redzone: 0x%llx/0x%llx.\n",
a737b3e2
AM
2064 *dbg_redzone1(cachep, objp),
2065 *dbg_redzone2(cachep, objp));
1da177e4
LT
2066 }
2067
2068 if (cachep->flags & SLAB_STORE_USER) {
2069 printk(KERN_ERR "Last user: [<%p>]",
a737b3e2 2070 *dbg_userword(cachep, objp));
1da177e4 2071 print_symbol("(%s)",
a737b3e2 2072 (unsigned long)*dbg_userword(cachep, objp));
1da177e4
LT
2073 printk("\n");
2074 }
3dafccf2 2075 realobj = (char *)objp + obj_offset(cachep);
8c138bc0 2076 size = cachep->object_size;
b28a02de 2077 for (i = 0; i < size && lines; i += 16, lines--) {
1da177e4
LT
2078 int limit;
2079 limit = 16;
b28a02de
PE
2080 if (i + limit > size)
2081 limit = size - i;
1da177e4
LT
2082 dump_line(realobj, i, limit);
2083 }
2084}
2085
343e0d7a 2086static void check_poison_obj(struct kmem_cache *cachep, void *objp)
1da177e4
LT
2087{
2088 char *realobj;
2089 int size, i;
2090 int lines = 0;
2091
3dafccf2 2092 realobj = (char *)objp + obj_offset(cachep);
8c138bc0 2093 size = cachep->object_size;
1da177e4 2094
b28a02de 2095 for (i = 0; i < size; i++) {
1da177e4 2096 char exp = POISON_FREE;
b28a02de 2097 if (i == size - 1)
1da177e4
LT
2098 exp = POISON_END;
2099 if (realobj[i] != exp) {
2100 int limit;
2101 /* Mismatch ! */
2102 /* Print header */
2103 if (lines == 0) {
b28a02de 2104 printk(KERN_ERR
face37f5
DJ
2105 "Slab corruption (%s): %s start=%p, len=%d\n",
2106 print_tainted(), cachep->name, realobj, size);
1da177e4
LT
2107 print_objinfo(cachep, objp, 0);
2108 }
2109 /* Hexdump the affected line */
b28a02de 2110 i = (i / 16) * 16;
1da177e4 2111 limit = 16;
b28a02de
PE
2112 if (i + limit > size)
2113 limit = size - i;
1da177e4
LT
2114 dump_line(realobj, i, limit);
2115 i += 16;
2116 lines++;
2117 /* Limit to 5 lines */
2118 if (lines > 5)
2119 break;
2120 }
2121 }
2122 if (lines != 0) {
2123 /* Print some data about the neighboring objects, if they
2124 * exist:
2125 */
6ed5eb22 2126 struct slab *slabp = virt_to_slab(objp);
8fea4e96 2127 unsigned int objnr;
1da177e4 2128
8fea4e96 2129 objnr = obj_to_index(cachep, slabp, objp);
1da177e4 2130 if (objnr) {
8fea4e96 2131 objp = index_to_obj(cachep, slabp, objnr - 1);
3dafccf2 2132 realobj = (char *)objp + obj_offset(cachep);
1da177e4 2133 printk(KERN_ERR "Prev obj: start=%p, len=%d\n",
b28a02de 2134 realobj, size);
1da177e4
LT
2135 print_objinfo(cachep, objp, 2);
2136 }
b28a02de 2137 if (objnr + 1 < cachep->num) {
8fea4e96 2138 objp = index_to_obj(cachep, slabp, objnr + 1);
3dafccf2 2139 realobj = (char *)objp + obj_offset(cachep);
1da177e4 2140 printk(KERN_ERR "Next obj: start=%p, len=%d\n",
b28a02de 2141 realobj, size);
1da177e4
LT
2142 print_objinfo(cachep, objp, 2);
2143 }
2144 }
2145}
2146#endif
2147
12dd36fa 2148#if DEBUG
e79aec29 2149static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
1da177e4 2150{
1da177e4
LT
2151 int i;
2152 for (i = 0; i < cachep->num; i++) {
8fea4e96 2153 void *objp = index_to_obj(cachep, slabp, i);
1da177e4
LT
2154
2155 if (cachep->flags & SLAB_POISON) {
2156#ifdef CONFIG_DEBUG_PAGEALLOC
3b0efdfa 2157 if (cachep->size % PAGE_SIZE == 0 &&
a737b3e2 2158 OFF_SLAB(cachep))
b28a02de 2159 kernel_map_pages(virt_to_page(objp),
3b0efdfa 2160 cachep->size / PAGE_SIZE, 1);
1da177e4
LT
2161 else
2162 check_poison_obj(cachep, objp);
2163#else
2164 check_poison_obj(cachep, objp);
2165#endif
2166 }
2167 if (cachep->flags & SLAB_RED_ZONE) {
2168 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2169 slab_error(cachep, "start of a freed object "
b28a02de 2170 "was overwritten");
1da177e4
LT
2171 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2172 slab_error(cachep, "end of a freed object "
b28a02de 2173 "was overwritten");
1da177e4 2174 }
1da177e4 2175 }
12dd36fa 2176}
1da177e4 2177#else
e79aec29 2178static void slab_destroy_debugcheck(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa 2179{
12dd36fa 2180}
1da177e4
LT
2181#endif
2182
911851e6
RD
2183/**
2184 * slab_destroy - destroy and release all objects in a slab
2185 * @cachep: cache pointer being destroyed
2186 * @slabp: slab pointer being destroyed
2187 *
12dd36fa 2188 * Destroy all the objs in a slab, and release the mem back to the system.
a737b3e2
AM
2189 * Before calling the slab must have been unlinked from the cache. The
2190 * cache-lock is not held/needed.
12dd36fa 2191 */
343e0d7a 2192static void slab_destroy(struct kmem_cache *cachep, struct slab *slabp)
12dd36fa
MD
2193{
2194 void *addr = slabp->s_mem - slabp->colouroff;
2195
e79aec29 2196 slab_destroy_debugcheck(cachep, slabp);
1da177e4
LT
2197 if (unlikely(cachep->flags & SLAB_DESTROY_BY_RCU)) {
2198 struct slab_rcu *slab_rcu;
2199
b28a02de 2200 slab_rcu = (struct slab_rcu *)slabp;
1da177e4
LT
2201 slab_rcu->cachep = cachep;
2202 slab_rcu->addr = addr;
2203 call_rcu(&slab_rcu->head, kmem_rcu_free);
2204 } else {
2205 kmem_freepages(cachep, addr);
873623df
IM
2206 if (OFF_SLAB(cachep))
2207 kmem_cache_free(cachep->slabp_cache, slabp);
1da177e4
LT
2208 }
2209}
2210
4d268eba 2211/**
a70773dd
RD
2212 * calculate_slab_order - calculate size (page order) of slabs
2213 * @cachep: pointer to the cache that is being created
2214 * @size: size of objects to be created in this cache.
2215 * @align: required alignment for the objects.
2216 * @flags: slab allocation flags
2217 *
2218 * Also calculates the number of objects per slab.
4d268eba
PE
2219 *
2220 * This could be made much more intelligent. For now, try to avoid using
2221 * high order pages for slabs. When the gfp() functions are more friendly
2222 * towards high-order requests, this should be changed.
2223 */
a737b3e2 2224static size_t calculate_slab_order(struct kmem_cache *cachep,
ee13d785 2225 size_t size, size_t align, unsigned long flags)
4d268eba 2226{
b1ab41c4 2227 unsigned long offslab_limit;
4d268eba 2228 size_t left_over = 0;
9888e6fa 2229 int gfporder;
4d268eba 2230
0aa817f0 2231 for (gfporder = 0; gfporder <= KMALLOC_MAX_ORDER; gfporder++) {
4d268eba
PE
2232 unsigned int num;
2233 size_t remainder;
2234
9888e6fa 2235 cache_estimate(gfporder, size, align, flags, &remainder, &num);
4d268eba
PE
2236 if (!num)
2237 continue;
9888e6fa 2238
b1ab41c4
IM
2239 if (flags & CFLGS_OFF_SLAB) {
2240 /*
2241 * Max number of objs-per-slab for caches which
2242 * use off-slab slabs. Needed to avoid a possible
2243 * looping condition in cache_grow().
2244 */
2245 offslab_limit = size - sizeof(struct slab);
2246 offslab_limit /= sizeof(kmem_bufctl_t);
2247
2248 if (num > offslab_limit)
2249 break;
2250 }
4d268eba 2251
9888e6fa 2252 /* Found something acceptable - save it away */
4d268eba 2253 cachep->num = num;
9888e6fa 2254 cachep->gfporder = gfporder;
4d268eba
PE
2255 left_over = remainder;
2256
f78bb8ad
LT
2257 /*
2258 * A VFS-reclaimable slab tends to have most allocations
2259 * as GFP_NOFS and we really don't want to have to be allocating
2260 * higher-order pages when we are unable to shrink dcache.
2261 */
2262 if (flags & SLAB_RECLAIM_ACCOUNT)
2263 break;
2264
4d268eba
PE
2265 /*
2266 * Large number of objects is good, but very large slabs are
2267 * currently bad for the gfp()s.
2268 */
543585cc 2269 if (gfporder >= slab_max_order)
4d268eba
PE
2270 break;
2271
9888e6fa
LT
2272 /*
2273 * Acceptable internal fragmentation?
2274 */
a737b3e2 2275 if (left_over * 8 <= (PAGE_SIZE << gfporder))
4d268eba
PE
2276 break;
2277 }
2278 return left_over;
2279}
2280
83b519e8 2281static int __init_refok setup_cpu_cache(struct kmem_cache *cachep, gfp_t gfp)
f30cf7d1 2282{
97d06609 2283 if (slab_state >= FULL)
83b519e8 2284 return enable_cpucache(cachep, gfp);
2ed3a4ef 2285
97d06609 2286 if (slab_state == DOWN) {
f30cf7d1
PE
2287 /*
2288 * Note: the first kmem_cache_create must create the cache
2289 * that's used by kmalloc(24), otherwise the creation of
2290 * further caches will BUG().
2291 */
2292 cachep->array[smp_processor_id()] = &initarray_generic.cache;
2293
2294 /*
2295 * If the cache that's used by kmalloc(sizeof(kmem_list3)) is
2296 * the first cache, then we need to set up all its list3s,
2297 * otherwise the creation of further caches will BUG().
2298 */
2299 set_up_list3s(cachep, SIZE_AC);
2300 if (INDEX_AC == INDEX_L3)
97d06609 2301 slab_state = PARTIAL_L3;
f30cf7d1 2302 else
97d06609 2303 slab_state = PARTIAL_ARRAYCACHE;
f30cf7d1
PE
2304 } else {
2305 cachep->array[smp_processor_id()] =
83b519e8 2306 kmalloc(sizeof(struct arraycache_init), gfp);
f30cf7d1 2307
97d06609 2308 if (slab_state == PARTIAL_ARRAYCACHE) {
f30cf7d1 2309 set_up_list3s(cachep, SIZE_L3);
97d06609 2310 slab_state = PARTIAL_L3;
f30cf7d1
PE
2311 } else {
2312 int node;
556a169d 2313 for_each_online_node(node) {
f30cf7d1
PE
2314 cachep->nodelists[node] =
2315 kmalloc_node(sizeof(struct kmem_list3),
eb91f1d0 2316 gfp, node);
f30cf7d1
PE
2317 BUG_ON(!cachep->nodelists[node]);
2318 kmem_list3_init(cachep->nodelists[node]);
2319 }
2320 }
2321 }
7d6e6d09 2322 cachep->nodelists[numa_mem_id()]->next_reap =
f30cf7d1
PE
2323 jiffies + REAPTIMEOUT_LIST3 +
2324 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
2325
2326 cpu_cache_get(cachep)->avail = 0;
2327 cpu_cache_get(cachep)->limit = BOOT_CPUCACHE_ENTRIES;
2328 cpu_cache_get(cachep)->batchcount = 1;
2329 cpu_cache_get(cachep)->touched = 0;
2330 cachep->batchcount = 1;
2331 cachep->limit = BOOT_CPUCACHE_ENTRIES;
2ed3a4ef 2332 return 0;
f30cf7d1
PE
2333}
2334
1da177e4 2335/**
039363f3 2336 * __kmem_cache_create - Create a cache.
1da177e4
LT
2337 * @name: A string which is used in /proc/slabinfo to identify this cache.
2338 * @size: The size of objects to be created in this cache.
2339 * @align: The required alignment for the objects.
2340 * @flags: SLAB flags
2341 * @ctor: A constructor for the objects.
1da177e4
LT
2342 *
2343 * Returns a ptr to the cache on success, NULL on failure.
2344 * Cannot be called within a int, but can be interrupted.
20c2df83 2345 * The @ctor is run when new pages are allocated by the cache.
1da177e4 2346 *
1da177e4
LT
2347 * The flags are
2348 *
2349 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
2350 * to catch references to uninitialised memory.
2351 *
2352 * %SLAB_RED_ZONE - Insert `Red' zones around the allocated memory to check
2353 * for buffer overruns.
2354 *
1da177e4
LT
2355 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
2356 * cacheline. This can be beneficial if you're counting cycles as closely
2357 * as davem.
2358 */
343e0d7a 2359struct kmem_cache *
039363f3 2360__kmem_cache_create (const char *name, size_t size, size_t align,
51cc5068 2361 unsigned long flags, void (*ctor)(void *))
1da177e4
LT
2362{
2363 size_t left_over, slab_size, ralign;
20cea968 2364 struct kmem_cache *cachep = NULL;
83b519e8 2365 gfp_t gfp;
1da177e4 2366
1da177e4 2367#if DEBUG
1da177e4
LT
2368#if FORCED_DEBUG
2369 /*
2370 * Enable redzoning and last user accounting, except for caches with
2371 * large objects, if the increased size would increase the object size
2372 * above the next power of two: caches with object sizes just above a
2373 * power of two have a significant amount of internal fragmentation.
2374 */
87a927c7
DW
2375 if (size < 4096 || fls(size - 1) == fls(size-1 + REDZONE_ALIGN +
2376 2 * sizeof(unsigned long long)))
b28a02de 2377 flags |= SLAB_RED_ZONE | SLAB_STORE_USER;
1da177e4
LT
2378 if (!(flags & SLAB_DESTROY_BY_RCU))
2379 flags |= SLAB_POISON;
2380#endif
2381 if (flags & SLAB_DESTROY_BY_RCU)
2382 BUG_ON(flags & SLAB_POISON);
2383#endif
1da177e4 2384 /*
a737b3e2
AM
2385 * Always checks flags, a caller might be expecting debug support which
2386 * isn't available.
1da177e4 2387 */
40094fa6 2388 BUG_ON(flags & ~CREATE_MASK);
1da177e4 2389
a737b3e2
AM
2390 /*
2391 * Check that size is in terms of words. This is needed to avoid
1da177e4
LT
2392 * unaligned accesses for some archs when redzoning is used, and makes
2393 * sure any on-slab bufctl's are also correctly aligned.
2394 */
b28a02de
PE
2395 if (size & (BYTES_PER_WORD - 1)) {
2396 size += (BYTES_PER_WORD - 1);
2397 size &= ~(BYTES_PER_WORD - 1);
1da177e4
LT
2398 }
2399
a737b3e2
AM
2400 /* calculate the final buffer alignment: */
2401
1da177e4
LT
2402 /* 1) arch recommendation: can be overridden for debug */
2403 if (flags & SLAB_HWCACHE_ALIGN) {
a737b3e2
AM
2404 /*
2405 * Default alignment: as specified by the arch code. Except if
2406 * an object is really small, then squeeze multiple objects into
2407 * one cacheline.
1da177e4
LT
2408 */
2409 ralign = cache_line_size();
b28a02de 2410 while (size <= ralign / 2)
1da177e4
LT
2411 ralign /= 2;
2412 } else {
2413 ralign = BYTES_PER_WORD;
2414 }
ca5f9703
PE
2415
2416 /*
87a927c7
DW
2417 * Redzoning and user store require word alignment or possibly larger.
2418 * Note this will be overridden by architecture or caller mandated
2419 * alignment if either is greater than BYTES_PER_WORD.
ca5f9703 2420 */
87a927c7
DW
2421 if (flags & SLAB_STORE_USER)
2422 ralign = BYTES_PER_WORD;
2423
2424 if (flags & SLAB_RED_ZONE) {
2425 ralign = REDZONE_ALIGN;
2426 /* If redzoning, ensure that the second redzone is suitably
2427 * aligned, by adjusting the object size accordingly. */
2428 size += REDZONE_ALIGN - 1;
2429 size &= ~(REDZONE_ALIGN - 1);
2430 }
ca5f9703 2431
a44b56d3 2432 /* 2) arch mandated alignment */
1da177e4
LT
2433 if (ralign < ARCH_SLAB_MINALIGN) {
2434 ralign = ARCH_SLAB_MINALIGN;
1da177e4 2435 }
a44b56d3 2436 /* 3) caller mandated alignment */
1da177e4
LT
2437 if (ralign < align) {
2438 ralign = align;
1da177e4 2439 }
3ff84a7f
PE
2440 /* disable debug if necessary */
2441 if (ralign > __alignof__(unsigned long long))
a44b56d3 2442 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
a737b3e2 2443 /*
ca5f9703 2444 * 4) Store it.
1da177e4
LT
2445 */
2446 align = ralign;
2447
83b519e8
PE
2448 if (slab_is_available())
2449 gfp = GFP_KERNEL;
2450 else
2451 gfp = GFP_NOWAIT;
2452
1da177e4 2453 /* Get cache's description obj. */
9b030cb8 2454 cachep = kmem_cache_zalloc(kmem_cache, gfp);
1da177e4 2455 if (!cachep)
039363f3 2456 return NULL;
1da177e4 2457
b56efcf0 2458 cachep->nodelists = (struct kmem_list3 **)&cachep->array[nr_cpu_ids];
3b0efdfa
CL
2459 cachep->object_size = size;
2460 cachep->align = align;
1da177e4 2461#if DEBUG
1da177e4 2462
ca5f9703
PE
2463 /*
2464 * Both debugging options require word-alignment which is calculated
2465 * into align above.
2466 */
1da177e4 2467 if (flags & SLAB_RED_ZONE) {
1da177e4 2468 /* add space for red zone words */
3ff84a7f
PE
2469 cachep->obj_offset += sizeof(unsigned long long);
2470 size += 2 * sizeof(unsigned long long);
1da177e4
LT
2471 }
2472 if (flags & SLAB_STORE_USER) {
ca5f9703 2473 /* user store requires one word storage behind the end of
87a927c7
DW
2474 * the real object. But if the second red zone needs to be
2475 * aligned to 64 bits, we must allow that much space.
1da177e4 2476 */
87a927c7
DW
2477 if (flags & SLAB_RED_ZONE)
2478 size += REDZONE_ALIGN;
2479 else
2480 size += BYTES_PER_WORD;
1da177e4
LT
2481 }
2482#if FORCED_DEBUG && defined(CONFIG_DEBUG_PAGEALLOC)
b28a02de 2483 if (size >= malloc_sizes[INDEX_L3 + 1].cs_size
3b0efdfa 2484 && cachep->object_size > cache_line_size() && ALIGN(size, align) < PAGE_SIZE) {
1ab335d8 2485 cachep->obj_offset += PAGE_SIZE - ALIGN(size, align);
1da177e4
LT
2486 size = PAGE_SIZE;
2487 }
2488#endif
2489#endif
2490
e0a42726
IM
2491 /*
2492 * Determine if the slab management is 'on' or 'off' slab.
2493 * (bootstrapping cannot cope with offslab caches so don't do
e7cb55b9
CM
2494 * it too early on. Always use on-slab management when
2495 * SLAB_NOLEAKTRACE to avoid recursive calls into kmemleak)
e0a42726 2496 */
e7cb55b9
CM
2497 if ((size >= (PAGE_SIZE >> 3)) && !slab_early_init &&
2498 !(flags & SLAB_NOLEAKTRACE))
1da177e4
LT
2499 /*
2500 * Size is large, assume best to place the slab management obj
2501 * off-slab (should allow better packing of objs).
2502 */
2503 flags |= CFLGS_OFF_SLAB;
2504
2505 size = ALIGN(size, align);
2506
f78bb8ad 2507 left_over = calculate_slab_order(cachep, size, align, flags);
1da177e4
LT
2508
2509 if (!cachep->num) {
b4169525 2510 printk(KERN_ERR
2511 "kmem_cache_create: couldn't create cache %s.\n", name);
9b030cb8 2512 kmem_cache_free(kmem_cache, cachep);
039363f3 2513 return NULL;
1da177e4 2514 }
b28a02de
PE
2515 slab_size = ALIGN(cachep->num * sizeof(kmem_bufctl_t)
2516 + sizeof(struct slab), align);
1da177e4
LT
2517
2518 /*
2519 * If the slab has been placed off-slab, and we have enough space then
2520 * move it on-slab. This is at the expense of any extra colouring.
2521 */
2522 if (flags & CFLGS_OFF_SLAB && left_over >= slab_size) {
2523 flags &= ~CFLGS_OFF_SLAB;
2524 left_over -= slab_size;
2525 }
2526
2527 if (flags & CFLGS_OFF_SLAB) {
2528 /* really off slab. No need for manual alignment */
b28a02de
PE
2529 slab_size =
2530 cachep->num * sizeof(kmem_bufctl_t) + sizeof(struct slab);
67461365
RL
2531
2532#ifdef CONFIG_PAGE_POISONING
2533 /* If we're going to use the generic kernel_map_pages()
2534 * poisoning, then it's going to smash the contents of
2535 * the redzone and userword anyhow, so switch them off.
2536 */
2537 if (size % PAGE_SIZE == 0 && flags & SLAB_POISON)
2538 flags &= ~(SLAB_RED_ZONE | SLAB_STORE_USER);
2539#endif
1da177e4
LT
2540 }
2541
2542 cachep->colour_off = cache_line_size();
2543 /* Offset must be a multiple of the alignment. */
2544 if (cachep->colour_off < align)
2545 cachep->colour_off = align;
b28a02de 2546 cachep->colour = left_over / cachep->colour_off;
1da177e4
LT
2547 cachep->slab_size = slab_size;
2548 cachep->flags = flags;
a618e89f 2549 cachep->allocflags = 0;
4b51d669 2550 if (CONFIG_ZONE_DMA_FLAG && (flags & SLAB_CACHE_DMA))
a618e89f 2551 cachep->allocflags |= GFP_DMA;
3b0efdfa 2552 cachep->size = size;
6a2d7a95 2553 cachep->reciprocal_buffer_size = reciprocal_value(size);
1da177e4 2554
e5ac9c5a 2555 if (flags & CFLGS_OFF_SLAB) {
b2d55073 2556 cachep->slabp_cache = kmem_find_general_cachep(slab_size, 0u);
e5ac9c5a
RT
2557 /*
2558 * This is a possibility for one of the malloc_sizes caches.
2559 * But since we go off slab only for object size greater than
2560 * PAGE_SIZE/8, and malloc_sizes gets created in ascending order,
2561 * this should not happen at all.
2562 * But leave a BUG_ON for some lucky dude.
2563 */
6cb8f913 2564 BUG_ON(ZERO_OR_NULL_PTR(cachep->slabp_cache));
e5ac9c5a 2565 }
1da177e4 2566 cachep->ctor = ctor;
1da177e4 2567 cachep->name = name;
7c9adf5a 2568 cachep->refcount = 1;
1da177e4 2569
83b519e8 2570 if (setup_cpu_cache(cachep, gfp)) {
12c3667f 2571 __kmem_cache_shutdown(cachep);
039363f3 2572 return NULL;
2ed3a4ef 2573 }
1da177e4 2574
83835b3d
PZ
2575 if (flags & SLAB_DEBUG_OBJECTS) {
2576 /*
2577 * Would deadlock through slab_destroy()->call_rcu()->
2578 * debug_object_activate()->kmem_cache_alloc().
2579 */
2580 WARN_ON_ONCE(flags & SLAB_DESTROY_BY_RCU);
2581
2582 slab_set_debugobj_lock_classes(cachep);
2583 }
2584
1da177e4
LT
2585 return cachep;
2586}
1da177e4
LT
2587
2588#if DEBUG
2589static void check_irq_off(void)
2590{
2591 BUG_ON(!irqs_disabled());
2592}
2593
2594static void check_irq_on(void)
2595{
2596 BUG_ON(irqs_disabled());
2597}
2598
343e0d7a 2599static void check_spinlock_acquired(struct kmem_cache *cachep)
1da177e4
LT
2600{
2601#ifdef CONFIG_SMP
2602 check_irq_off();
7d6e6d09 2603 assert_spin_locked(&cachep->nodelists[numa_mem_id()]->list_lock);
1da177e4
LT
2604#endif
2605}
e498be7d 2606
343e0d7a 2607static void check_spinlock_acquired_node(struct kmem_cache *cachep, int node)
e498be7d
CL
2608{
2609#ifdef CONFIG_SMP
2610 check_irq_off();
2611 assert_spin_locked(&cachep->nodelists[node]->list_lock);
2612#endif
2613}
2614
1da177e4
LT
2615#else
2616#define check_irq_off() do { } while(0)
2617#define check_irq_on() do { } while(0)
2618#define check_spinlock_acquired(x) do { } while(0)
e498be7d 2619#define check_spinlock_acquired_node(x, y) do { } while(0)
1da177e4
LT
2620#endif
2621
aab2207c
CL
2622static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
2623 struct array_cache *ac,
2624 int force, int node);
2625
1da177e4
LT
2626static void do_drain(void *arg)
2627{
a737b3e2 2628 struct kmem_cache *cachep = arg;
1da177e4 2629 struct array_cache *ac;
7d6e6d09 2630 int node = numa_mem_id();
1da177e4
LT
2631
2632 check_irq_off();
9a2dba4b 2633 ac = cpu_cache_get(cachep);
ff69416e
CL
2634 spin_lock(&cachep->nodelists[node]->list_lock);
2635 free_block(cachep, ac->entry, ac->avail, node);
2636 spin_unlock(&cachep->nodelists[node]->list_lock);
1da177e4
LT
2637 ac->avail = 0;
2638}
2639
343e0d7a 2640static void drain_cpu_caches(struct kmem_cache *cachep)
1da177e4 2641{
e498be7d
CL
2642 struct kmem_list3 *l3;
2643 int node;
2644
15c8b6c1 2645 on_each_cpu(do_drain, cachep, 1);
1da177e4 2646 check_irq_on();
b28a02de 2647 for_each_online_node(node) {
e498be7d 2648 l3 = cachep->nodelists[node];
a4523a8b
RD
2649 if (l3 && l3->alien)
2650 drain_alien_cache(cachep, l3->alien);
2651 }
2652
2653 for_each_online_node(node) {
2654 l3 = cachep->nodelists[node];
2655 if (l3)
aab2207c 2656 drain_array(cachep, l3, l3->shared, 1, node);
e498be7d 2657 }
1da177e4
LT
2658}
2659
ed11d9eb
CL
2660/*
2661 * Remove slabs from the list of free slabs.
2662 * Specify the number of slabs to drain in tofree.
2663 *
2664 * Returns the actual number of slabs released.
2665 */
2666static int drain_freelist(struct kmem_cache *cache,
2667 struct kmem_list3 *l3, int tofree)
1da177e4 2668{
ed11d9eb
CL
2669 struct list_head *p;
2670 int nr_freed;
1da177e4 2671 struct slab *slabp;
1da177e4 2672
ed11d9eb
CL
2673 nr_freed = 0;
2674 while (nr_freed < tofree && !list_empty(&l3->slabs_free)) {
1da177e4 2675
ed11d9eb 2676 spin_lock_irq(&l3->list_lock);
e498be7d 2677 p = l3->slabs_free.prev;
ed11d9eb
CL
2678 if (p == &l3->slabs_free) {
2679 spin_unlock_irq(&l3->list_lock);
2680 goto out;
2681 }
1da177e4 2682
ed11d9eb 2683 slabp = list_entry(p, struct slab, list);
1da177e4 2684#if DEBUG
40094fa6 2685 BUG_ON(slabp->inuse);
1da177e4
LT
2686#endif
2687 list_del(&slabp->list);
ed11d9eb
CL
2688 /*
2689 * Safe to drop the lock. The slab is no longer linked
2690 * to the cache.
2691 */
2692 l3->free_objects -= cache->num;
e498be7d 2693 spin_unlock_irq(&l3->list_lock);
ed11d9eb
CL
2694 slab_destroy(cache, slabp);
2695 nr_freed++;
1da177e4 2696 }
ed11d9eb
CL
2697out:
2698 return nr_freed;
1da177e4
LT
2699}
2700
18004c5d 2701/* Called with slab_mutex held to protect against cpu hotplug */
343e0d7a 2702static int __cache_shrink(struct kmem_cache *cachep)
e498be7d
CL
2703{
2704 int ret = 0, i = 0;
2705 struct kmem_list3 *l3;
2706
2707 drain_cpu_caches(cachep);
2708
2709 check_irq_on();
2710 for_each_online_node(i) {
2711 l3 = cachep->nodelists[i];
ed11d9eb
CL
2712 if (!l3)
2713 continue;
2714
2715 drain_freelist(cachep, l3, l3->free_objects);
2716
2717 ret += !list_empty(&l3->slabs_full) ||
2718 !list_empty(&l3->slabs_partial);
e498be7d
CL
2719 }
2720 return (ret ? 1 : 0);
2721}
2722
1da177e4
LT
2723/**
2724 * kmem_cache_shrink - Shrink a cache.
2725 * @cachep: The cache to shrink.
2726 *
2727 * Releases as many slabs as possible for a cache.
2728 * To help debugging, a zero exit status indicates all slabs were released.
2729 */
343e0d7a 2730int kmem_cache_shrink(struct kmem_cache *cachep)
1da177e4 2731{
8f5be20b 2732 int ret;
40094fa6 2733 BUG_ON(!cachep || in_interrupt());
1da177e4 2734
95402b38 2735 get_online_cpus();
18004c5d 2736 mutex_lock(&slab_mutex);
8f5be20b 2737 ret = __cache_shrink(cachep);
18004c5d 2738 mutex_unlock(&slab_mutex);
95402b38 2739 put_online_cpus();
8f5be20b 2740 return ret;
1da177e4
LT
2741}
2742EXPORT_SYMBOL(kmem_cache_shrink);
2743
945cf2b6 2744int __kmem_cache_shutdown(struct kmem_cache *cachep)
1da177e4 2745{
12c3667f
CL
2746 int i;
2747 struct kmem_list3 *l3;
2748 int rc = __cache_shrink(cachep);
2749
2750 if (rc)
2751 return rc;
2752
2753 for_each_online_cpu(i)
2754 kfree(cachep->array[i]);
2755
2756 /* NUMA: free the list3 structures */
2757 for_each_online_node(i) {
2758 l3 = cachep->nodelists[i];
2759 if (l3) {
2760 kfree(l3->shared);
2761 free_alien_cache(l3->alien);
2762 kfree(l3);
2763 }
2764 }
2765 return 0;
1da177e4 2766}
1da177e4 2767
e5ac9c5a
RT
2768/*
2769 * Get the memory for a slab management obj.
2770 * For a slab cache when the slab descriptor is off-slab, slab descriptors
2771 * always come from malloc_sizes caches. The slab descriptor cannot
2772 * come from the same cache which is getting created because,
2773 * when we are searching for an appropriate cache for these
2774 * descriptors in kmem_cache_create, we search through the malloc_sizes array.
2775 * If we are creating a malloc_sizes cache here it would not be visible to
2776 * kmem_find_general_cachep till the initialization is complete.
2777 * Hence we cannot have slabp_cache same as the original cache.
2778 */
343e0d7a 2779static struct slab *alloc_slabmgmt(struct kmem_cache *cachep, void *objp,
5b74ada7
RT
2780 int colour_off, gfp_t local_flags,
2781 int nodeid)
1da177e4
LT
2782{
2783 struct slab *slabp;
b28a02de 2784
1da177e4
LT
2785 if (OFF_SLAB(cachep)) {
2786 /* Slab management obj is off-slab. */
5b74ada7 2787 slabp = kmem_cache_alloc_node(cachep->slabp_cache,
8759ec50 2788 local_flags, nodeid);
d5cff635
CM
2789 /*
2790 * If the first object in the slab is leaked (it's allocated
2791 * but no one has a reference to it), we want to make sure
2792 * kmemleak does not treat the ->s_mem pointer as a reference
2793 * to the object. Otherwise we will not report the leak.
2794 */
c017b4be
CM
2795 kmemleak_scan_area(&slabp->list, sizeof(struct list_head),
2796 local_flags);
1da177e4
LT
2797 if (!slabp)
2798 return NULL;
2799 } else {
b28a02de 2800 slabp = objp + colour_off;
1da177e4
LT
2801 colour_off += cachep->slab_size;
2802 }
2803 slabp->inuse = 0;
2804 slabp->colouroff = colour_off;
b28a02de 2805 slabp->s_mem = objp + colour_off;
5b74ada7 2806 slabp->nodeid = nodeid;
e51bfd0a 2807 slabp->free = 0;
1da177e4
LT
2808 return slabp;
2809}
2810
2811static inline kmem_bufctl_t *slab_bufctl(struct slab *slabp)
2812{
b28a02de 2813 return (kmem_bufctl_t *) (slabp + 1);
1da177e4
LT
2814}
2815
343e0d7a 2816static void cache_init_objs(struct kmem_cache *cachep,
a35afb83 2817 struct slab *slabp)
1da177e4
LT
2818{
2819 int i;
2820
2821 for (i = 0; i < cachep->num; i++) {
8fea4e96 2822 void *objp = index_to_obj(cachep, slabp, i);
1da177e4
LT
2823#if DEBUG
2824 /* need to poison the objs? */
2825 if (cachep->flags & SLAB_POISON)
2826 poison_obj(cachep, objp, POISON_FREE);
2827 if (cachep->flags & SLAB_STORE_USER)
2828 *dbg_userword(cachep, objp) = NULL;
2829
2830 if (cachep->flags & SLAB_RED_ZONE) {
2831 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
2832 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
2833 }
2834 /*
a737b3e2
AM
2835 * Constructors are not allowed to allocate memory from the same
2836 * cache which they are a constructor for. Otherwise, deadlock.
2837 * They must also be threaded.
1da177e4
LT
2838 */
2839 if (cachep->ctor && !(cachep->flags & SLAB_POISON))
51cc5068 2840 cachep->ctor(objp + obj_offset(cachep));
1da177e4
LT
2841
2842 if (cachep->flags & SLAB_RED_ZONE) {
2843 if (*dbg_redzone2(cachep, objp) != RED_INACTIVE)
2844 slab_error(cachep, "constructor overwrote the"
b28a02de 2845 " end of an object");
1da177e4
LT
2846 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE)
2847 slab_error(cachep, "constructor overwrote the"
b28a02de 2848 " start of an object");
1da177e4 2849 }
3b0efdfa 2850 if ((cachep->size % PAGE_SIZE) == 0 &&
a737b3e2 2851 OFF_SLAB(cachep) && cachep->flags & SLAB_POISON)
b28a02de 2852 kernel_map_pages(virt_to_page(objp),
3b0efdfa 2853 cachep->size / PAGE_SIZE, 0);
1da177e4
LT
2854#else
2855 if (cachep->ctor)
51cc5068 2856 cachep->ctor(objp);
1da177e4 2857#endif
b28a02de 2858 slab_bufctl(slabp)[i] = i + 1;
1da177e4 2859 }
b28a02de 2860 slab_bufctl(slabp)[i - 1] = BUFCTL_END;
1da177e4
LT
2861}
2862
343e0d7a 2863static void kmem_flagcheck(struct kmem_cache *cachep, gfp_t flags)
1da177e4 2864{
4b51d669
CL
2865 if (CONFIG_ZONE_DMA_FLAG) {
2866 if (flags & GFP_DMA)
a618e89f 2867 BUG_ON(!(cachep->allocflags & GFP_DMA));
4b51d669 2868 else
a618e89f 2869 BUG_ON(cachep->allocflags & GFP_DMA);
4b51d669 2870 }
1da177e4
LT
2871}
2872
a737b3e2
AM
2873static void *slab_get_obj(struct kmem_cache *cachep, struct slab *slabp,
2874 int nodeid)
78d382d7 2875{
8fea4e96 2876 void *objp = index_to_obj(cachep, slabp, slabp->free);
78d382d7
MD
2877 kmem_bufctl_t next;
2878
2879 slabp->inuse++;
2880 next = slab_bufctl(slabp)[slabp->free];
2881#if DEBUG
2882 slab_bufctl(slabp)[slabp->free] = BUFCTL_FREE;
2883 WARN_ON(slabp->nodeid != nodeid);
2884#endif
2885 slabp->free = next;
2886
2887 return objp;
2888}
2889
a737b3e2
AM
2890static void slab_put_obj(struct kmem_cache *cachep, struct slab *slabp,
2891 void *objp, int nodeid)
78d382d7 2892{
8fea4e96 2893 unsigned int objnr = obj_to_index(cachep, slabp, objp);
78d382d7
MD
2894
2895#if DEBUG
2896 /* Verify that the slab belongs to the intended node */
2897 WARN_ON(slabp->nodeid != nodeid);
2898
871751e2 2899 if (slab_bufctl(slabp)[objnr] + 1 <= SLAB_LIMIT + 1) {
78d382d7 2900 printk(KERN_ERR "slab: double free detected in cache "
a737b3e2 2901 "'%s', objp %p\n", cachep->name, objp);
78d382d7
MD
2902 BUG();
2903 }
2904#endif
2905 slab_bufctl(slabp)[objnr] = slabp->free;
2906 slabp->free = objnr;
2907 slabp->inuse--;
2908}
2909
4776874f
PE
2910/*
2911 * Map pages beginning at addr to the given cache and slab. This is required
2912 * for the slab allocator to be able to lookup the cache and slab of a
ccd35fb9 2913 * virtual address for kfree, ksize, and slab debugging.
4776874f
PE
2914 */
2915static void slab_map_pages(struct kmem_cache *cache, struct slab *slab,
2916 void *addr)
1da177e4 2917{
4776874f 2918 int nr_pages;
1da177e4
LT
2919 struct page *page;
2920
4776874f 2921 page = virt_to_page(addr);
84097518 2922
4776874f 2923 nr_pages = 1;
84097518 2924 if (likely(!PageCompound(page)))
4776874f
PE
2925 nr_pages <<= cache->gfporder;
2926
1da177e4 2927 do {
35026088
CL
2928 page->slab_cache = cache;
2929 page->slab_page = slab;
1da177e4 2930 page++;
4776874f 2931 } while (--nr_pages);
1da177e4
LT
2932}
2933
2934/*
2935 * Grow (by 1) the number of slabs within a cache. This is called by
2936 * kmem_cache_alloc() when there are no active objs left in a cache.
2937 */
3c517a61
CL
2938static int cache_grow(struct kmem_cache *cachep,
2939 gfp_t flags, int nodeid, void *objp)
1da177e4 2940{
b28a02de 2941 struct slab *slabp;
b28a02de
PE
2942 size_t offset;
2943 gfp_t local_flags;
e498be7d 2944 struct kmem_list3 *l3;
1da177e4 2945
a737b3e2
AM
2946 /*
2947 * Be lazy and only check for valid flags here, keeping it out of the
2948 * critical path in kmem_cache_alloc().
1da177e4 2949 */
6cb06229
CL
2950 BUG_ON(flags & GFP_SLAB_BUG_MASK);
2951 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
1da177e4 2952
2e1217cf 2953 /* Take the l3 list lock to change the colour_next on this node */
1da177e4 2954 check_irq_off();
2e1217cf
RT
2955 l3 = cachep->nodelists[nodeid];
2956 spin_lock(&l3->list_lock);
1da177e4
LT
2957
2958 /* Get colour for the slab, and cal the next value. */
2e1217cf
RT
2959 offset = l3->colour_next;
2960 l3->colour_next++;
2961 if (l3->colour_next >= cachep->colour)
2962 l3->colour_next = 0;
2963 spin_unlock(&l3->list_lock);
1da177e4 2964
2e1217cf 2965 offset *= cachep->colour_off;
1da177e4
LT
2966
2967 if (local_flags & __GFP_WAIT)
2968 local_irq_enable();
2969
2970 /*
2971 * The test for missing atomic flag is performed here, rather than
2972 * the more obvious place, simply to reduce the critical path length
2973 * in kmem_cache_alloc(). If a caller is seriously mis-behaving they
2974 * will eventually be caught here (where it matters).
2975 */
2976 kmem_flagcheck(cachep, flags);
2977
a737b3e2
AM
2978 /*
2979 * Get mem for the objs. Attempt to allocate a physical page from
2980 * 'nodeid'.
e498be7d 2981 */
3c517a61 2982 if (!objp)
b8c1c5da 2983 objp = kmem_getpages(cachep, local_flags, nodeid);
a737b3e2 2984 if (!objp)
1da177e4
LT
2985 goto failed;
2986
2987 /* Get slab management. */
3c517a61 2988 slabp = alloc_slabmgmt(cachep, objp, offset,
6cb06229 2989 local_flags & ~GFP_CONSTRAINT_MASK, nodeid);
a737b3e2 2990 if (!slabp)
1da177e4
LT
2991 goto opps1;
2992
4776874f 2993 slab_map_pages(cachep, slabp, objp);
1da177e4 2994
a35afb83 2995 cache_init_objs(cachep, slabp);
1da177e4
LT
2996
2997 if (local_flags & __GFP_WAIT)
2998 local_irq_disable();
2999 check_irq_off();
e498be7d 3000 spin_lock(&l3->list_lock);
1da177e4
LT
3001
3002 /* Make slab active. */
e498be7d 3003 list_add_tail(&slabp->list, &(l3->slabs_free));
1da177e4 3004 STATS_INC_GROWN(cachep);
e498be7d
CL
3005 l3->free_objects += cachep->num;
3006 spin_unlock(&l3->list_lock);
1da177e4 3007 return 1;
a737b3e2 3008opps1:
1da177e4 3009 kmem_freepages(cachep, objp);
a737b3e2 3010failed:
1da177e4
LT
3011 if (local_flags & __GFP_WAIT)
3012 local_irq_disable();
3013 return 0;
3014}
3015
3016#if DEBUG
3017
3018/*
3019 * Perform extra freeing checks:
3020 * - detect bad pointers.
3021 * - POISON/RED_ZONE checking
1da177e4
LT
3022 */
3023static void kfree_debugcheck(const void *objp)
3024{
1da177e4
LT
3025 if (!virt_addr_valid(objp)) {
3026 printk(KERN_ERR "kfree_debugcheck: out of range ptr %lxh.\n",
b28a02de
PE
3027 (unsigned long)objp);
3028 BUG();
1da177e4 3029 }
1da177e4
LT
3030}
3031
58ce1fd5
PE
3032static inline void verify_redzone_free(struct kmem_cache *cache, void *obj)
3033{
b46b8f19 3034 unsigned long long redzone1, redzone2;
58ce1fd5
PE
3035
3036 redzone1 = *dbg_redzone1(cache, obj);
3037 redzone2 = *dbg_redzone2(cache, obj);
3038
3039 /*
3040 * Redzone is ok.
3041 */
3042 if (redzone1 == RED_ACTIVE && redzone2 == RED_ACTIVE)
3043 return;
3044
3045 if (redzone1 == RED_INACTIVE && redzone2 == RED_INACTIVE)
3046 slab_error(cache, "double free detected");
3047 else
3048 slab_error(cache, "memory outside object was overwritten");
3049
b46b8f19 3050 printk(KERN_ERR "%p: redzone 1:0x%llx, redzone 2:0x%llx.\n",
58ce1fd5
PE
3051 obj, redzone1, redzone2);
3052}
3053
343e0d7a 3054static void *cache_free_debugcheck(struct kmem_cache *cachep, void *objp,
b28a02de 3055 void *caller)
1da177e4
LT
3056{
3057 struct page *page;
3058 unsigned int objnr;
3059 struct slab *slabp;
3060
80cbd911
MW
3061 BUG_ON(virt_to_cache(objp) != cachep);
3062
3dafccf2 3063 objp -= obj_offset(cachep);
1da177e4 3064 kfree_debugcheck(objp);
b49af68f 3065 page = virt_to_head_page(objp);
1da177e4 3066
35026088 3067 slabp = page->slab_page;
1da177e4
LT
3068
3069 if (cachep->flags & SLAB_RED_ZONE) {
58ce1fd5 3070 verify_redzone_free(cachep, objp);
1da177e4
LT
3071 *dbg_redzone1(cachep, objp) = RED_INACTIVE;
3072 *dbg_redzone2(cachep, objp) = RED_INACTIVE;
3073 }
3074 if (cachep->flags & SLAB_STORE_USER)
3075 *dbg_userword(cachep, objp) = caller;
3076
8fea4e96 3077 objnr = obj_to_index(cachep, slabp, objp);
1da177e4
LT
3078
3079 BUG_ON(objnr >= cachep->num);
8fea4e96 3080 BUG_ON(objp != index_to_obj(cachep, slabp, objnr));
1da177e4 3081
871751e2
AV
3082#ifdef CONFIG_DEBUG_SLAB_LEAK
3083 slab_bufctl(slabp)[objnr] = BUFCTL_FREE;
3084#endif
1da177e4
LT
3085 if (cachep->flags & SLAB_POISON) {
3086#ifdef CONFIG_DEBUG_PAGEALLOC
3b0efdfa 3087 if ((cachep->size % PAGE_SIZE)==0 && OFF_SLAB(cachep)) {
1da177e4 3088 store_stackinfo(cachep, objp, (unsigned long)caller);
b28a02de 3089 kernel_map_pages(virt_to_page(objp),
3b0efdfa 3090 cachep->size / PAGE_SIZE, 0);
1da177e4
LT
3091 } else {
3092 poison_obj(cachep, objp, POISON_FREE);
3093 }
3094#else
3095 poison_obj(cachep, objp, POISON_FREE);
3096#endif
3097 }
3098 return objp;
3099}
3100
343e0d7a 3101static void check_slabp(struct kmem_cache *cachep, struct slab *slabp)
1da177e4
LT
3102{
3103 kmem_bufctl_t i;
3104 int entries = 0;
b28a02de 3105
1da177e4
LT
3106 /* Check slab's freelist to see if this obj is there. */
3107 for (i = slabp->free; i != BUFCTL_END; i = slab_bufctl(slabp)[i]) {
3108 entries++;
3109 if (entries > cachep->num || i >= cachep->num)
3110 goto bad;
3111 }
3112 if (entries != cachep->num - slabp->inuse) {
a737b3e2
AM
3113bad:
3114 printk(KERN_ERR "slab: Internal list corruption detected in "
face37f5
DJ
3115 "cache '%s'(%d), slabp %p(%d). Tainted(%s). Hexdump:\n",
3116 cachep->name, cachep->num, slabp, slabp->inuse,
3117 print_tainted());
fdde6abb
SAS
3118 print_hex_dump(KERN_ERR, "", DUMP_PREFIX_OFFSET, 16, 1, slabp,
3119 sizeof(*slabp) + cachep->num * sizeof(kmem_bufctl_t),
3120 1);
1da177e4
LT
3121 BUG();
3122 }
3123}
3124#else
3125#define kfree_debugcheck(x) do { } while(0)
3126#define cache_free_debugcheck(x,objp,z) (objp)
3127#define check_slabp(x,y) do { } while(0)
3128#endif
3129
072bb0aa
MG
3130static void *cache_alloc_refill(struct kmem_cache *cachep, gfp_t flags,
3131 bool force_refill)
1da177e4
LT
3132{
3133 int batchcount;
3134 struct kmem_list3 *l3;
3135 struct array_cache *ac;
1ca4cb24
PE
3136 int node;
3137
1da177e4 3138 check_irq_off();
7d6e6d09 3139 node = numa_mem_id();
072bb0aa
MG
3140 if (unlikely(force_refill))
3141 goto force_grow;
3142retry:
9a2dba4b 3143 ac = cpu_cache_get(cachep);
1da177e4
LT
3144 batchcount = ac->batchcount;
3145 if (!ac->touched && batchcount > BATCHREFILL_LIMIT) {
a737b3e2
AM
3146 /*
3147 * If there was little recent activity on this cache, then
3148 * perform only a partial refill. Otherwise we could generate
3149 * refill bouncing.
1da177e4
LT
3150 */
3151 batchcount = BATCHREFILL_LIMIT;
3152 }
1ca4cb24 3153 l3 = cachep->nodelists[node];
e498be7d
CL
3154
3155 BUG_ON(ac->avail > 0 || !l3);
3156 spin_lock(&l3->list_lock);
1da177e4 3157
3ded175a 3158 /* See if we can refill from the shared array */
44b57f1c
NP
3159 if (l3->shared && transfer_objects(ac, l3->shared, batchcount)) {
3160 l3->shared->touched = 1;
3ded175a 3161 goto alloc_done;
44b57f1c 3162 }
3ded175a 3163
1da177e4
LT
3164 while (batchcount > 0) {
3165 struct list_head *entry;
3166 struct slab *slabp;
3167 /* Get slab alloc is to come from. */
3168 entry = l3->slabs_partial.next;
3169 if (entry == &l3->slabs_partial) {
3170 l3->free_touched = 1;
3171 entry = l3->slabs_free.next;
3172 if (entry == &l3->slabs_free)
3173 goto must_grow;
3174 }
3175
3176 slabp = list_entry(entry, struct slab, list);
3177 check_slabp(cachep, slabp);
3178 check_spinlock_acquired(cachep);
714b8171
PE
3179
3180 /*
3181 * The slab was either on partial or free list so
3182 * there must be at least one object available for
3183 * allocation.
3184 */
249b9f33 3185 BUG_ON(slabp->inuse >= cachep->num);
714b8171 3186
1da177e4 3187 while (slabp->inuse < cachep->num && batchcount--) {
1da177e4
LT
3188 STATS_INC_ALLOCED(cachep);
3189 STATS_INC_ACTIVE(cachep);
3190 STATS_SET_HIGH(cachep);
3191
072bb0aa
MG
3192 ac_put_obj(cachep, ac, slab_get_obj(cachep, slabp,
3193 node));
1da177e4
LT
3194 }
3195 check_slabp(cachep, slabp);
3196
3197 /* move slabp to correct slabp list: */
3198 list_del(&slabp->list);
3199 if (slabp->free == BUFCTL_END)
3200 list_add(&slabp->list, &l3->slabs_full);
3201 else
3202 list_add(&slabp->list, &l3->slabs_partial);
3203 }
3204
a737b3e2 3205must_grow:
1da177e4 3206 l3->free_objects -= ac->avail;
a737b3e2 3207alloc_done:
e498be7d 3208 spin_unlock(&l3->list_lock);
1da177e4
LT
3209
3210 if (unlikely(!ac->avail)) {
3211 int x;
072bb0aa 3212force_grow:
3c517a61 3213 x = cache_grow(cachep, flags | GFP_THISNODE, node, NULL);
e498be7d 3214
a737b3e2 3215 /* cache_grow can reenable interrupts, then ac could change. */
9a2dba4b 3216 ac = cpu_cache_get(cachep);
072bb0aa
MG
3217
3218 /* no objects in sight? abort */
3219 if (!x && (ac->avail == 0 || force_refill))
1da177e4
LT
3220 return NULL;
3221
a737b3e2 3222 if (!ac->avail) /* objects refilled by interrupt? */
1da177e4
LT
3223 goto retry;
3224 }
3225 ac->touched = 1;
072bb0aa
MG
3226
3227 return ac_get_obj(cachep, ac, flags, force_refill);
1da177e4
LT
3228}
3229
a737b3e2
AM
3230static inline void cache_alloc_debugcheck_before(struct kmem_cache *cachep,
3231 gfp_t flags)
1da177e4
LT
3232{
3233 might_sleep_if(flags & __GFP_WAIT);
3234#if DEBUG
3235 kmem_flagcheck(cachep, flags);
3236#endif
3237}
3238
3239#if DEBUG
a737b3e2
AM
3240static void *cache_alloc_debugcheck_after(struct kmem_cache *cachep,
3241 gfp_t flags, void *objp, void *caller)
1da177e4 3242{
b28a02de 3243 if (!objp)
1da177e4 3244 return objp;
b28a02de 3245 if (cachep->flags & SLAB_POISON) {
1da177e4 3246#ifdef CONFIG_DEBUG_PAGEALLOC
3b0efdfa 3247 if ((cachep->size % PAGE_SIZE) == 0 && OFF_SLAB(cachep))
b28a02de 3248 kernel_map_pages(virt_to_page(objp),
3b0efdfa 3249 cachep->size / PAGE_SIZE, 1);
1da177e4
LT
3250 else
3251 check_poison_obj(cachep, objp);
3252#else
3253 check_poison_obj(cachep, objp);
3254#endif
3255 poison_obj(cachep, objp, POISON_INUSE);
3256 }
3257 if (cachep->flags & SLAB_STORE_USER)
3258 *dbg_userword(cachep, objp) = caller;
3259
3260 if (cachep->flags & SLAB_RED_ZONE) {
a737b3e2
AM
3261 if (*dbg_redzone1(cachep, objp) != RED_INACTIVE ||
3262 *dbg_redzone2(cachep, objp) != RED_INACTIVE) {
3263 slab_error(cachep, "double free, or memory outside"
3264 " object was overwritten");
b28a02de 3265 printk(KERN_ERR
b46b8f19 3266 "%p: redzone 1:0x%llx, redzone 2:0x%llx\n",
a737b3e2
AM
3267 objp, *dbg_redzone1(cachep, objp),
3268 *dbg_redzone2(cachep, objp));
1da177e4
LT
3269 }
3270 *dbg_redzone1(cachep, objp) = RED_ACTIVE;
3271 *dbg_redzone2(cachep, objp) = RED_ACTIVE;
3272 }
871751e2
AV
3273#ifdef CONFIG_DEBUG_SLAB_LEAK
3274 {
3275 struct slab *slabp;
3276 unsigned objnr;
3277
35026088 3278 slabp = virt_to_head_page(objp)->slab_page;
3b0efdfa 3279 objnr = (unsigned)(objp - slabp->s_mem) / cachep->size;
871751e2
AV
3280 slab_bufctl(slabp)[objnr] = BUFCTL_ACTIVE;
3281 }
3282#endif
3dafccf2 3283 objp += obj_offset(cachep);
4f104934 3284 if (cachep->ctor && cachep->flags & SLAB_POISON)
51cc5068 3285 cachep->ctor(objp);
7ea466f2
TH
3286 if (ARCH_SLAB_MINALIGN &&
3287 ((unsigned long)objp & (ARCH_SLAB_MINALIGN-1))) {
a44b56d3 3288 printk(KERN_ERR "0x%p: not aligned to ARCH_SLAB_MINALIGN=%d\n",
c225150b 3289 objp, (int)ARCH_SLAB_MINALIGN);
a44b56d3 3290 }
1da177e4
LT
3291 return objp;
3292}
3293#else
3294#define cache_alloc_debugcheck_after(a,b,objp,d) (objp)
3295#endif
3296
773ff60e 3297static bool slab_should_failslab(struct kmem_cache *cachep, gfp_t flags)
8a8b6502 3298{
9b030cb8 3299 if (cachep == kmem_cache)
773ff60e 3300 return false;
8a8b6502 3301
8c138bc0 3302 return should_failslab(cachep->object_size, flags, cachep->flags);
8a8b6502
AM
3303}
3304
343e0d7a 3305static inline void *____cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3306{
b28a02de 3307 void *objp;
1da177e4 3308 struct array_cache *ac;
072bb0aa 3309 bool force_refill = false;
1da177e4 3310
5c382300 3311 check_irq_off();
8a8b6502 3312
9a2dba4b 3313 ac = cpu_cache_get(cachep);
1da177e4 3314 if (likely(ac->avail)) {
1da177e4 3315 ac->touched = 1;
072bb0aa
MG
3316 objp = ac_get_obj(cachep, ac, flags, false);
3317
ddbf2e83 3318 /*
072bb0aa
MG
3319 * Allow for the possibility all avail objects are not allowed
3320 * by the current flags
ddbf2e83 3321 */
072bb0aa
MG
3322 if (objp) {
3323 STATS_INC_ALLOCHIT(cachep);
3324 goto out;
3325 }
3326 force_refill = true;
1da177e4 3327 }
072bb0aa
MG
3328
3329 STATS_INC_ALLOCMISS(cachep);
3330 objp = cache_alloc_refill(cachep, flags, force_refill);
3331 /*
3332 * the 'ac' may be updated by cache_alloc_refill(),
3333 * and kmemleak_erase() requires its correct value.
3334 */
3335 ac = cpu_cache_get(cachep);
3336
3337out:
d5cff635
CM
3338 /*
3339 * To avoid a false negative, if an object that is in one of the
3340 * per-CPU caches is leaked, we need to make sure kmemleak doesn't
3341 * treat the array pointers as a reference to the object.
3342 */
f3d8b53a
O
3343 if (objp)
3344 kmemleak_erase(&ac->entry[ac->avail]);
5c382300
AK
3345 return objp;
3346}
3347
e498be7d 3348#ifdef CONFIG_NUMA
c61afb18 3349/*
b2455396 3350 * Try allocating on another node if PF_SPREAD_SLAB|PF_MEMPOLICY.
c61afb18
PJ
3351 *
3352 * If we are in_interrupt, then process context, including cpusets and
3353 * mempolicy, may not apply and should not be used for allocation policy.
3354 */
3355static void *alternate_node_alloc(struct kmem_cache *cachep, gfp_t flags)
3356{
3357 int nid_alloc, nid_here;
3358
765c4507 3359 if (in_interrupt() || (flags & __GFP_THISNODE))
c61afb18 3360 return NULL;
7d6e6d09 3361 nid_alloc = nid_here = numa_mem_id();
c61afb18 3362 if (cpuset_do_slab_mem_spread() && (cachep->flags & SLAB_MEM_SPREAD))
6adef3eb 3363 nid_alloc = cpuset_slab_spread_node();
c61afb18 3364 else if (current->mempolicy)
e7b691b0 3365 nid_alloc = slab_node();
c61afb18 3366 if (nid_alloc != nid_here)
8b98c169 3367 return ____cache_alloc_node(cachep, flags, nid_alloc);
c61afb18
PJ
3368 return NULL;
3369}
3370
765c4507
CL
3371/*
3372 * Fallback function if there was no memory available and no objects on a
3c517a61
CL
3373 * certain node and fall back is permitted. First we scan all the
3374 * available nodelists for available objects. If that fails then we
3375 * perform an allocation without specifying a node. This allows the page
3376 * allocator to do its reclaim / fallback magic. We then insert the
3377 * slab into the proper nodelist and then allocate from it.
765c4507 3378 */
8c8cc2c1 3379static void *fallback_alloc(struct kmem_cache *cache, gfp_t flags)
765c4507 3380{
8c8cc2c1
PE
3381 struct zonelist *zonelist;
3382 gfp_t local_flags;
dd1a239f 3383 struct zoneref *z;
54a6eb5c
MG
3384 struct zone *zone;
3385 enum zone_type high_zoneidx = gfp_zone(flags);
765c4507 3386 void *obj = NULL;
3c517a61 3387 int nid;
cc9a6c87 3388 unsigned int cpuset_mems_cookie;
8c8cc2c1
PE
3389
3390 if (flags & __GFP_THISNODE)
3391 return NULL;
3392
6cb06229 3393 local_flags = flags & (GFP_CONSTRAINT_MASK|GFP_RECLAIM_MASK);
765c4507 3394
cc9a6c87
MG
3395retry_cpuset:
3396 cpuset_mems_cookie = get_mems_allowed();
e7b691b0 3397 zonelist = node_zonelist(slab_node(), flags);
cc9a6c87 3398
3c517a61
CL
3399retry:
3400 /*
3401 * Look through allowed nodes for objects available
3402 * from existing per node queues.
3403 */
54a6eb5c
MG
3404 for_each_zone_zonelist(zone, z, zonelist, high_zoneidx) {
3405 nid = zone_to_nid(zone);
aedb0eb1 3406
54a6eb5c 3407 if (cpuset_zone_allowed_hardwall(zone, flags) &&
3c517a61 3408 cache->nodelists[nid] &&
481c5346 3409 cache->nodelists[nid]->free_objects) {
3c517a61
CL
3410 obj = ____cache_alloc_node(cache,
3411 flags | GFP_THISNODE, nid);
481c5346
CL
3412 if (obj)
3413 break;
3414 }
3c517a61
CL
3415 }
3416
cfce6604 3417 if (!obj) {
3c517a61
CL
3418 /*
3419 * This allocation will be performed within the constraints
3420 * of the current cpuset / memory policy requirements.
3421 * We may trigger various forms of reclaim on the allowed
3422 * set and go into memory reserves if necessary.
3423 */
dd47ea75
CL
3424 if (local_flags & __GFP_WAIT)
3425 local_irq_enable();
3426 kmem_flagcheck(cache, flags);
7d6e6d09 3427 obj = kmem_getpages(cache, local_flags, numa_mem_id());
dd47ea75
CL
3428 if (local_flags & __GFP_WAIT)
3429 local_irq_disable();
3c517a61
CL
3430 if (obj) {
3431 /*
3432 * Insert into the appropriate per node queues
3433 */
3434 nid = page_to_nid(virt_to_page(obj));
3435 if (cache_grow(cache, flags, nid, obj)) {
3436 obj = ____cache_alloc_node(cache,
3437 flags | GFP_THISNODE, nid);
3438 if (!obj)
3439 /*
3440 * Another processor may allocate the
3441 * objects in the slab since we are
3442 * not holding any locks.
3443 */
3444 goto retry;
3445 } else {
b6a60451 3446 /* cache_grow already freed obj */
3c517a61
CL
3447 obj = NULL;
3448 }
3449 }
aedb0eb1 3450 }
cc9a6c87
MG
3451
3452 if (unlikely(!put_mems_allowed(cpuset_mems_cookie) && !obj))
3453 goto retry_cpuset;
765c4507
CL
3454 return obj;
3455}
3456
e498be7d
CL
3457/*
3458 * A interface to enable slab creation on nodeid
1da177e4 3459 */
8b98c169 3460static void *____cache_alloc_node(struct kmem_cache *cachep, gfp_t flags,
a737b3e2 3461 int nodeid)
e498be7d
CL
3462{
3463 struct list_head *entry;
b28a02de
PE
3464 struct slab *slabp;
3465 struct kmem_list3 *l3;
3466 void *obj;
b28a02de
PE
3467 int x;
3468
3469 l3 = cachep->nodelists[nodeid];
3470 BUG_ON(!l3);
3471
a737b3e2 3472retry:
ca3b9b91 3473 check_irq_off();
b28a02de
PE
3474 spin_lock(&l3->list_lock);
3475 entry = l3->slabs_partial.next;
3476 if (entry == &l3->slabs_partial) {
3477 l3->free_touched = 1;
3478 entry = l3->slabs_free.next;
3479 if (entry == &l3->slabs_free)
3480 goto must_grow;
3481 }
3482
3483 slabp = list_entry(entry, struct slab, list);
3484 check_spinlock_acquired_node(cachep, nodeid);
3485 check_slabp(cachep, slabp);
3486
3487 STATS_INC_NODEALLOCS(cachep);
3488 STATS_INC_ACTIVE(cachep);
3489 STATS_SET_HIGH(cachep);
3490
3491 BUG_ON(slabp->inuse == cachep->num);
3492
78d382d7 3493 obj = slab_get_obj(cachep, slabp, nodeid);
b28a02de
PE
3494 check_slabp(cachep, slabp);
3495 l3->free_objects--;
3496 /* move slabp to correct slabp list: */
3497 list_del(&slabp->list);
3498
a737b3e2 3499 if (slabp->free == BUFCTL_END)
b28a02de 3500 list_add(&slabp->list, &l3->slabs_full);
a737b3e2 3501 else
b28a02de 3502 list_add(&slabp->list, &l3->slabs_partial);
e498be7d 3503
b28a02de
PE
3504 spin_unlock(&l3->list_lock);
3505 goto done;
e498be7d 3506
a737b3e2 3507must_grow:
b28a02de 3508 spin_unlock(&l3->list_lock);
3c517a61 3509 x = cache_grow(cachep, flags | GFP_THISNODE, nodeid, NULL);
765c4507
CL
3510 if (x)
3511 goto retry;
1da177e4 3512
8c8cc2c1 3513 return fallback_alloc(cachep, flags);
e498be7d 3514
a737b3e2 3515done:
b28a02de 3516 return obj;
e498be7d 3517}
8c8cc2c1
PE
3518
3519/**
3520 * kmem_cache_alloc_node - Allocate an object on the specified node
3521 * @cachep: The cache to allocate from.
3522 * @flags: See kmalloc().
3523 * @nodeid: node number of the target node.
3524 * @caller: return address of caller, used for debug information
3525 *
3526 * Identical to kmem_cache_alloc but it will allocate memory on the given
3527 * node, which can improve the performance for cpu bound structures.
3528 *
3529 * Fallback to other node is possible if __GFP_THISNODE is not set.
3530 */
3531static __always_inline void *
3532__cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid,
3533 void *caller)
3534{
3535 unsigned long save_flags;
3536 void *ptr;
7d6e6d09 3537 int slab_node = numa_mem_id();
8c8cc2c1 3538
dcce284a 3539 flags &= gfp_allowed_mask;
7e85ee0c 3540
cf40bd16
NP
3541 lockdep_trace_alloc(flags);
3542
773ff60e 3543 if (slab_should_failslab(cachep, flags))
824ebef1
AM
3544 return NULL;
3545
8c8cc2c1
PE
3546 cache_alloc_debugcheck_before(cachep, flags);
3547 local_irq_save(save_flags);
3548
eacbbae3 3549 if (nodeid == NUMA_NO_NODE)
7d6e6d09 3550 nodeid = slab_node;
8c8cc2c1
PE
3551
3552 if (unlikely(!cachep->nodelists[nodeid])) {
3553 /* Node not bootstrapped yet */
3554 ptr = fallback_alloc(cachep, flags);
3555 goto out;
3556 }
3557
7d6e6d09 3558 if (nodeid == slab_node) {
8c8cc2c1
PE
3559 /*
3560 * Use the locally cached objects if possible.
3561 * However ____cache_alloc does not allow fallback
3562 * to other nodes. It may fail while we still have
3563 * objects on other nodes available.
3564 */
3565 ptr = ____cache_alloc(cachep, flags);
3566 if (ptr)
3567 goto out;
3568 }
3569 /* ___cache_alloc_node can fall back to other nodes */
3570 ptr = ____cache_alloc_node(cachep, flags, nodeid);
3571 out:
3572 local_irq_restore(save_flags);
3573 ptr = cache_alloc_debugcheck_after(cachep, flags, ptr, caller);
8c138bc0 3574 kmemleak_alloc_recursive(ptr, cachep->object_size, 1, cachep->flags,
d5cff635 3575 flags);
8c8cc2c1 3576
c175eea4 3577 if (likely(ptr))
8c138bc0 3578 kmemcheck_slab_alloc(cachep, flags, ptr, cachep->object_size);
c175eea4 3579
d07dbea4 3580 if (unlikely((flags & __GFP_ZERO) && ptr))
8c138bc0 3581 memset(ptr, 0, cachep->object_size);
d07dbea4 3582
8c8cc2c1
PE
3583 return ptr;
3584}
3585
3586static __always_inline void *
3587__do_cache_alloc(struct kmem_cache *cache, gfp_t flags)
3588{
3589 void *objp;
3590
3591 if (unlikely(current->flags & (PF_SPREAD_SLAB | PF_MEMPOLICY))) {
3592 objp = alternate_node_alloc(cache, flags);
3593 if (objp)
3594 goto out;
3595 }
3596 objp = ____cache_alloc(cache, flags);
3597
3598 /*
3599 * We may just have run out of memory on the local node.
3600 * ____cache_alloc_node() knows how to locate memory on other nodes
3601 */
7d6e6d09
LS
3602 if (!objp)
3603 objp = ____cache_alloc_node(cache, flags, numa_mem_id());
8c8cc2c1
PE
3604
3605 out:
3606 return objp;
3607}
3608#else
3609
3610static __always_inline void *
3611__do_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
3612{
3613 return ____cache_alloc(cachep, flags);
3614}
3615
3616#endif /* CONFIG_NUMA */
3617
3618static __always_inline void *
3619__cache_alloc(struct kmem_cache *cachep, gfp_t flags, void *caller)
3620{
3621 unsigned long save_flags;
3622 void *objp;
3623
dcce284a 3624 flags &= gfp_allowed_mask;
7e85ee0c 3625
cf40bd16
NP
3626 lockdep_trace_alloc(flags);
3627
773ff60e 3628 if (slab_should_failslab(cachep, flags))
824ebef1
AM
3629 return NULL;
3630
8c8cc2c1
PE
3631 cache_alloc_debugcheck_before(cachep, flags);
3632 local_irq_save(save_flags);
3633 objp = __do_cache_alloc(cachep, flags);
3634 local_irq_restore(save_flags);
3635 objp = cache_alloc_debugcheck_after(cachep, flags, objp, caller);
8c138bc0 3636 kmemleak_alloc_recursive(objp, cachep->object_size, 1, cachep->flags,
d5cff635 3637 flags);
8c8cc2c1
PE
3638 prefetchw(objp);
3639
c175eea4 3640 if (likely(objp))
8c138bc0 3641 kmemcheck_slab_alloc(cachep, flags, objp, cachep->object_size);
c175eea4 3642
d07dbea4 3643 if (unlikely((flags & __GFP_ZERO) && objp))
8c138bc0 3644 memset(objp, 0, cachep->object_size);
d07dbea4 3645
8c8cc2c1
PE
3646 return objp;
3647}
e498be7d
CL
3648
3649/*
3650 * Caller needs to acquire correct kmem_list's list_lock
3651 */
343e0d7a 3652static void free_block(struct kmem_cache *cachep, void **objpp, int nr_objects,
b28a02de 3653 int node)
1da177e4
LT
3654{
3655 int i;
e498be7d 3656 struct kmem_list3 *l3;
1da177e4
LT
3657
3658 for (i = 0; i < nr_objects; i++) {
072bb0aa 3659 void *objp;
1da177e4 3660 struct slab *slabp;
1da177e4 3661
072bb0aa
MG
3662 clear_obj_pfmemalloc(&objpp[i]);
3663 objp = objpp[i];
3664
6ed5eb22 3665 slabp = virt_to_slab(objp);
ff69416e 3666 l3 = cachep->nodelists[node];
1da177e4 3667 list_del(&slabp->list);
ff69416e 3668 check_spinlock_acquired_node(cachep, node);
1da177e4 3669 check_slabp(cachep, slabp);
78d382d7 3670 slab_put_obj(cachep, slabp, objp, node);
1da177e4 3671 STATS_DEC_ACTIVE(cachep);
e498be7d 3672 l3->free_objects++;
1da177e4
LT
3673 check_slabp(cachep, slabp);
3674
3675 /* fixup slab chains */
3676 if (slabp->inuse == 0) {
e498be7d
CL
3677 if (l3->free_objects > l3->free_limit) {
3678 l3->free_objects -= cachep->num;
e5ac9c5a
RT
3679 /* No need to drop any previously held
3680 * lock here, even if we have a off-slab slab
3681 * descriptor it is guaranteed to come from
3682 * a different cache, refer to comments before
3683 * alloc_slabmgmt.
3684 */
1da177e4
LT
3685 slab_destroy(cachep, slabp);
3686 } else {
e498be7d 3687 list_add(&slabp->list, &l3->slabs_free);
1da177e4
LT
3688 }
3689 } else {
3690 /* Unconditionally move a slab to the end of the
3691 * partial list on free - maximum time for the
3692 * other objects to be freed, too.
3693 */
e498be7d 3694 list_add_tail(&slabp->list, &l3->slabs_partial);
1da177e4
LT
3695 }
3696 }
3697}
3698
343e0d7a 3699static void cache_flusharray(struct kmem_cache *cachep, struct array_cache *ac)
1da177e4
LT
3700{
3701 int batchcount;
e498be7d 3702 struct kmem_list3 *l3;
7d6e6d09 3703 int node = numa_mem_id();
1da177e4
LT
3704
3705 batchcount = ac->batchcount;
3706#if DEBUG
3707 BUG_ON(!batchcount || batchcount > ac->avail);
3708#endif
3709 check_irq_off();
ff69416e 3710 l3 = cachep->nodelists[node];
873623df 3711 spin_lock(&l3->list_lock);
e498be7d
CL
3712 if (l3->shared) {
3713 struct array_cache *shared_array = l3->shared;
b28a02de 3714 int max = shared_array->limit - shared_array->avail;
1da177e4
LT
3715 if (max) {
3716 if (batchcount > max)
3717 batchcount = max;
e498be7d 3718 memcpy(&(shared_array->entry[shared_array->avail]),
b28a02de 3719 ac->entry, sizeof(void *) * batchcount);
1da177e4
LT
3720 shared_array->avail += batchcount;
3721 goto free_done;
3722 }
3723 }
3724
ff69416e 3725 free_block(cachep, ac->entry, batchcount, node);
a737b3e2 3726free_done:
1da177e4
LT
3727#if STATS
3728 {
3729 int i = 0;
3730 struct list_head *p;
3731
e498be7d
CL
3732 p = l3->slabs_free.next;
3733 while (p != &(l3->slabs_free)) {
1da177e4
LT
3734 struct slab *slabp;
3735
3736 slabp = list_entry(p, struct slab, list);
3737 BUG_ON(slabp->inuse);
3738
3739 i++;
3740 p = p->next;
3741 }
3742 STATS_SET_FREEABLE(cachep, i);
3743 }
3744#endif
e498be7d 3745 spin_unlock(&l3->list_lock);
1da177e4 3746 ac->avail -= batchcount;
a737b3e2 3747 memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail);
1da177e4
LT
3748}
3749
3750/*
a737b3e2
AM
3751 * Release an obj back to its cache. If the obj has a constructed state, it must
3752 * be in this state _before_ it is released. Called with disabled ints.
1da177e4 3753 */
a947eb95
SS
3754static inline void __cache_free(struct kmem_cache *cachep, void *objp,
3755 void *caller)
1da177e4 3756{
9a2dba4b 3757 struct array_cache *ac = cpu_cache_get(cachep);
1da177e4
LT
3758
3759 check_irq_off();
d5cff635 3760 kmemleak_free_recursive(objp, cachep->flags);
a947eb95 3761 objp = cache_free_debugcheck(cachep, objp, caller);
1da177e4 3762
8c138bc0 3763 kmemcheck_slab_free(cachep, objp, cachep->object_size);
c175eea4 3764
1807a1aa
SS
3765 /*
3766 * Skip calling cache_free_alien() when the platform is not numa.
3767 * This will avoid cache misses that happen while accessing slabp (which
3768 * is per page memory reference) to get nodeid. Instead use a global
3769 * variable to skip the call, which is mostly likely to be present in
3770 * the cache.
3771 */
b6e68bc1 3772 if (nr_online_nodes > 1 && cache_free_alien(cachep, objp))
729bd0b7
PE
3773 return;
3774
1da177e4
LT
3775 if (likely(ac->avail < ac->limit)) {
3776 STATS_INC_FREEHIT(cachep);
1da177e4
LT
3777 } else {
3778 STATS_INC_FREEMISS(cachep);
3779 cache_flusharray(cachep, ac);
1da177e4 3780 }
42c8c99c 3781
072bb0aa 3782 ac_put_obj(cachep, ac, objp);
1da177e4
LT
3783}
3784
3785/**
3786 * kmem_cache_alloc - Allocate an object
3787 * @cachep: The cache to allocate from.
3788 * @flags: See kmalloc().
3789 *
3790 * Allocate an object from this cache. The flags are only relevant
3791 * if the cache has no available objects.
3792 */
343e0d7a 3793void *kmem_cache_alloc(struct kmem_cache *cachep, gfp_t flags)
1da177e4 3794{
36555751
EGM
3795 void *ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3796
ca2b84cb 3797 trace_kmem_cache_alloc(_RET_IP_, ret,
8c138bc0 3798 cachep->object_size, cachep->size, flags);
36555751
EGM
3799
3800 return ret;
1da177e4
LT
3801}
3802EXPORT_SYMBOL(kmem_cache_alloc);
3803
0f24f128 3804#ifdef CONFIG_TRACING
85beb586
SR
3805void *
3806kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
36555751 3807{
85beb586
SR
3808 void *ret;
3809
3810 ret = __cache_alloc(cachep, flags, __builtin_return_address(0));
3811
3812 trace_kmalloc(_RET_IP_, ret,
3813 size, slab_buffer_size(cachep), flags);
3814 return ret;
36555751 3815}
85beb586 3816EXPORT_SYMBOL(kmem_cache_alloc_trace);
36555751
EGM
3817#endif
3818
1da177e4 3819#ifdef CONFIG_NUMA
8b98c169
CH
3820void *kmem_cache_alloc_node(struct kmem_cache *cachep, gfp_t flags, int nodeid)
3821{
36555751
EGM
3822 void *ret = __cache_alloc_node(cachep, flags, nodeid,
3823 __builtin_return_address(0));
3824
ca2b84cb 3825 trace_kmem_cache_alloc_node(_RET_IP_, ret,
8c138bc0 3826 cachep->object_size, cachep->size,
ca2b84cb 3827 flags, nodeid);
36555751
EGM
3828
3829 return ret;
8b98c169 3830}
1da177e4
LT
3831EXPORT_SYMBOL(kmem_cache_alloc_node);
3832
0f24f128 3833#ifdef CONFIG_TRACING
85beb586
SR
3834void *kmem_cache_alloc_node_trace(size_t size,
3835 struct kmem_cache *cachep,
3836 gfp_t flags,
3837 int nodeid)
36555751 3838{
85beb586
SR
3839 void *ret;
3840
3841 ret = __cache_alloc_node(cachep, flags, nodeid,
36555751 3842 __builtin_return_address(0));
85beb586
SR
3843 trace_kmalloc_node(_RET_IP_, ret,
3844 size, slab_buffer_size(cachep),
3845 flags, nodeid);
3846 return ret;
36555751 3847}
85beb586 3848EXPORT_SYMBOL(kmem_cache_alloc_node_trace);
36555751
EGM
3849#endif
3850
8b98c169
CH
3851static __always_inline void *
3852__do_kmalloc_node(size_t size, gfp_t flags, int node, void *caller)
97e2bde4 3853{
343e0d7a 3854 struct kmem_cache *cachep;
97e2bde4
MS
3855
3856 cachep = kmem_find_general_cachep(size, flags);
6cb8f913
CL
3857 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3858 return cachep;
85beb586 3859 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
97e2bde4 3860}
8b98c169 3861
0bb38a5c 3862#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
8b98c169
CH
3863void *__kmalloc_node(size_t size, gfp_t flags, int node)
3864{
3865 return __do_kmalloc_node(size, flags, node,
3866 __builtin_return_address(0));
3867}
dbe5e69d 3868EXPORT_SYMBOL(__kmalloc_node);
8b98c169
CH
3869
3870void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
ce71e27c 3871 int node, unsigned long caller)
8b98c169 3872{
ce71e27c 3873 return __do_kmalloc_node(size, flags, node, (void *)caller);
8b98c169
CH
3874}
3875EXPORT_SYMBOL(__kmalloc_node_track_caller);
3876#else
3877void *__kmalloc_node(size_t size, gfp_t flags, int node)
3878{
3879 return __do_kmalloc_node(size, flags, node, NULL);
3880}
3881EXPORT_SYMBOL(__kmalloc_node);
0bb38a5c 3882#endif /* CONFIG_DEBUG_SLAB || CONFIG_TRACING */
8b98c169 3883#endif /* CONFIG_NUMA */
1da177e4
LT
3884
3885/**
800590f5 3886 * __do_kmalloc - allocate memory
1da177e4 3887 * @size: how many bytes of memory are required.
800590f5 3888 * @flags: the type of memory to allocate (see kmalloc).
911851e6 3889 * @caller: function caller for debug tracking of the caller
1da177e4 3890 */
7fd6b141
PE
3891static __always_inline void *__do_kmalloc(size_t size, gfp_t flags,
3892 void *caller)
1da177e4 3893{
343e0d7a 3894 struct kmem_cache *cachep;
36555751 3895 void *ret;
1da177e4 3896
97e2bde4
MS
3897 /* If you want to save a few bytes .text space: replace
3898 * __ with kmem_.
3899 * Then kmalloc uses the uninlined functions instead of the inline
3900 * functions.
3901 */
3902 cachep = __find_general_cachep(size, flags);
a5c96d8a
LT
3903 if (unlikely(ZERO_OR_NULL_PTR(cachep)))
3904 return cachep;
36555751
EGM
3905 ret = __cache_alloc(cachep, flags, caller);
3906
ca2b84cb 3907 trace_kmalloc((unsigned long) caller, ret,
3b0efdfa 3908 size, cachep->size, flags);
36555751
EGM
3909
3910 return ret;
7fd6b141
PE
3911}
3912
7fd6b141 3913
0bb38a5c 3914#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_TRACING)
7fd6b141
PE
3915void *__kmalloc(size_t size, gfp_t flags)
3916{
871751e2 3917 return __do_kmalloc(size, flags, __builtin_return_address(0));
1da177e4
LT
3918}
3919EXPORT_SYMBOL(__kmalloc);
3920
ce71e27c 3921void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
7fd6b141 3922{
ce71e27c 3923 return __do_kmalloc(size, flags, (void *)caller);
7fd6b141
PE
3924}
3925EXPORT_SYMBOL(__kmalloc_track_caller);
1d2c8eea
CH
3926
3927#else
3928void *__kmalloc(size_t size, gfp_t flags)
3929{
3930 return __do_kmalloc(size, flags, NULL);
3931}
3932EXPORT_SYMBOL(__kmalloc);
7fd6b141
PE
3933#endif
3934
1da177e4
LT
3935/**
3936 * kmem_cache_free - Deallocate an object
3937 * @cachep: The cache the allocation was from.
3938 * @objp: The previously allocated object.
3939 *
3940 * Free an object which was previously allocated from this
3941 * cache.
3942 */
343e0d7a 3943void kmem_cache_free(struct kmem_cache *cachep, void *objp)
1da177e4
LT
3944{
3945 unsigned long flags;
3946
3947 local_irq_save(flags);
d97d476b 3948 debug_check_no_locks_freed(objp, cachep->object_size);
3ac7fe5a 3949 if (!(cachep->flags & SLAB_DEBUG_OBJECTS))
8c138bc0 3950 debug_check_no_obj_freed(objp, cachep->object_size);
a947eb95 3951 __cache_free(cachep, objp, __builtin_return_address(0));
1da177e4 3952 local_irq_restore(flags);
36555751 3953
ca2b84cb 3954 trace_kmem_cache_free(_RET_IP_, objp);
1da177e4
LT
3955}
3956EXPORT_SYMBOL(kmem_cache_free);
3957
1da177e4
LT
3958/**
3959 * kfree - free previously allocated memory
3960 * @objp: pointer returned by kmalloc.
3961 *
80e93eff
PE
3962 * If @objp is NULL, no operation is performed.
3963 *
1da177e4
LT
3964 * Don't free memory not originally allocated by kmalloc()
3965 * or you will run into trouble.
3966 */
3967void kfree(const void *objp)
3968{
343e0d7a 3969 struct kmem_cache *c;
1da177e4
LT
3970 unsigned long flags;
3971
2121db74
PE
3972 trace_kfree(_RET_IP_, objp);
3973
6cb8f913 3974 if (unlikely(ZERO_OR_NULL_PTR(objp)))
1da177e4
LT
3975 return;
3976 local_irq_save(flags);
3977 kfree_debugcheck(objp);
6ed5eb22 3978 c = virt_to_cache(objp);
8c138bc0
CL
3979 debug_check_no_locks_freed(objp, c->object_size);
3980
3981 debug_check_no_obj_freed(objp, c->object_size);
a947eb95 3982 __cache_free(c, (void *)objp, __builtin_return_address(0));
1da177e4
LT
3983 local_irq_restore(flags);
3984}
3985EXPORT_SYMBOL(kfree);
3986
343e0d7a 3987unsigned int kmem_cache_size(struct kmem_cache *cachep)
1da177e4 3988{
8c138bc0 3989 return cachep->object_size;
1da177e4
LT
3990}
3991EXPORT_SYMBOL(kmem_cache_size);
3992
e498be7d 3993/*
183ff22b 3994 * This initializes kmem_list3 or resizes various caches for all nodes.
e498be7d 3995 */
83b519e8 3996static int alloc_kmemlist(struct kmem_cache *cachep, gfp_t gfp)
e498be7d
CL
3997{
3998 int node;
3999 struct kmem_list3 *l3;
cafeb02e 4000 struct array_cache *new_shared;
3395ee05 4001 struct array_cache **new_alien = NULL;
e498be7d 4002
9c09a95c 4003 for_each_online_node(node) {
cafeb02e 4004
3395ee05 4005 if (use_alien_caches) {
83b519e8 4006 new_alien = alloc_alien_cache(node, cachep->limit, gfp);
3395ee05
PM
4007 if (!new_alien)
4008 goto fail;
4009 }
cafeb02e 4010
63109846
ED
4011 new_shared = NULL;
4012 if (cachep->shared) {
4013 new_shared = alloc_arraycache(node,
0718dc2a 4014 cachep->shared*cachep->batchcount,
83b519e8 4015 0xbaadf00d, gfp);
63109846
ED
4016 if (!new_shared) {
4017 free_alien_cache(new_alien);
4018 goto fail;
4019 }
0718dc2a 4020 }
cafeb02e 4021
a737b3e2
AM
4022 l3 = cachep->nodelists[node];
4023 if (l3) {
cafeb02e
CL
4024 struct array_cache *shared = l3->shared;
4025
e498be7d
CL
4026 spin_lock_irq(&l3->list_lock);
4027
cafeb02e 4028 if (shared)
0718dc2a
CL
4029 free_block(cachep, shared->entry,
4030 shared->avail, node);
e498be7d 4031
cafeb02e
CL
4032 l3->shared = new_shared;
4033 if (!l3->alien) {
e498be7d
CL
4034 l3->alien = new_alien;
4035 new_alien = NULL;
4036 }
b28a02de 4037 l3->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 4038 cachep->batchcount + cachep->num;
e498be7d 4039 spin_unlock_irq(&l3->list_lock);
cafeb02e 4040 kfree(shared);
e498be7d
CL
4041 free_alien_cache(new_alien);
4042 continue;
4043 }
83b519e8 4044 l3 = kmalloc_node(sizeof(struct kmem_list3), gfp, node);
0718dc2a
CL
4045 if (!l3) {
4046 free_alien_cache(new_alien);
4047 kfree(new_shared);
e498be7d 4048 goto fail;
0718dc2a 4049 }
e498be7d
CL
4050
4051 kmem_list3_init(l3);
4052 l3->next_reap = jiffies + REAPTIMEOUT_LIST3 +
a737b3e2 4053 ((unsigned long)cachep) % REAPTIMEOUT_LIST3;
cafeb02e 4054 l3->shared = new_shared;
e498be7d 4055 l3->alien = new_alien;
b28a02de 4056 l3->free_limit = (1 + nr_cpus_node(node)) *
a737b3e2 4057 cachep->batchcount + cachep->num;
e498be7d
CL
4058 cachep->nodelists[node] = l3;
4059 }
cafeb02e 4060 return 0;
0718dc2a 4061
a737b3e2 4062fail:
3b0efdfa 4063 if (!cachep->list.next) {
0718dc2a
CL
4064 /* Cache is not active yet. Roll back what we did */
4065 node--;
4066 while (node >= 0) {
4067 if (cachep->nodelists[node]) {
4068 l3 = cachep->nodelists[node];
4069
4070 kfree(l3->shared);
4071 free_alien_cache(l3->alien);
4072 kfree(l3);
4073 cachep->nodelists[node] = NULL;
4074 }
4075 node--;
4076 }
4077 }
cafeb02e 4078 return -ENOMEM;
e498be7d
CL
4079}
4080
1da177e4 4081struct ccupdate_struct {
343e0d7a 4082 struct kmem_cache *cachep;
acfe7d74 4083 struct array_cache *new[0];
1da177e4
LT
4084};
4085
4086static void do_ccupdate_local(void *info)
4087{
a737b3e2 4088 struct ccupdate_struct *new = info;
1da177e4
LT
4089 struct array_cache *old;
4090
4091 check_irq_off();
9a2dba4b 4092 old = cpu_cache_get(new->cachep);
e498be7d 4093
1da177e4
LT
4094 new->cachep->array[smp_processor_id()] = new->new[smp_processor_id()];
4095 new->new[smp_processor_id()] = old;
4096}
4097
18004c5d 4098/* Always called with the slab_mutex held */
a737b3e2 4099static int do_tune_cpucache(struct kmem_cache *cachep, int limit,
83b519e8 4100 int batchcount, int shared, gfp_t gfp)
1da177e4 4101{
d2e7b7d0 4102 struct ccupdate_struct *new;
2ed3a4ef 4103 int i;
1da177e4 4104
acfe7d74
ED
4105 new = kzalloc(sizeof(*new) + nr_cpu_ids * sizeof(struct array_cache *),
4106 gfp);
d2e7b7d0
SS
4107 if (!new)
4108 return -ENOMEM;
4109
e498be7d 4110 for_each_online_cpu(i) {
7d6e6d09 4111 new->new[i] = alloc_arraycache(cpu_to_mem(i), limit,
83b519e8 4112 batchcount, gfp);
d2e7b7d0 4113 if (!new->new[i]) {
b28a02de 4114 for (i--; i >= 0; i--)
d2e7b7d0
SS
4115 kfree(new->new[i]);
4116 kfree(new);
e498be7d 4117 return -ENOMEM;
1da177e4
LT
4118 }
4119 }
d2e7b7d0 4120 new->cachep = cachep;
1da177e4 4121
15c8b6c1 4122 on_each_cpu(do_ccupdate_local, (void *)new, 1);
e498be7d 4123
1da177e4 4124 check_irq_on();
1da177e4
LT
4125 cachep->batchcount = batchcount;
4126 cachep->limit = limit;
e498be7d 4127 cachep->shared = shared;
1da177e4 4128
e498be7d 4129 for_each_online_cpu(i) {
d2e7b7d0 4130 struct array_cache *ccold = new->new[i];
1da177e4
LT
4131 if (!ccold)
4132 continue;
7d6e6d09
LS
4133 spin_lock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
4134 free_block(cachep, ccold->entry, ccold->avail, cpu_to_mem(i));
4135 spin_unlock_irq(&cachep->nodelists[cpu_to_mem(i)]->list_lock);
1da177e4
LT
4136 kfree(ccold);
4137 }
d2e7b7d0 4138 kfree(new);
83b519e8 4139 return alloc_kmemlist(cachep, gfp);
1da177e4
LT
4140}
4141
18004c5d 4142/* Called with slab_mutex held always */
83b519e8 4143static int enable_cpucache(struct kmem_cache *cachep, gfp_t gfp)
1da177e4
LT
4144{
4145 int err;
4146 int limit, shared;
4147
a737b3e2
AM
4148 /*
4149 * The head array serves three purposes:
1da177e4
LT
4150 * - create a LIFO ordering, i.e. return objects that are cache-warm
4151 * - reduce the number of spinlock operations.
a737b3e2 4152 * - reduce the number of linked list operations on the slab and
1da177e4
LT
4153 * bufctl chains: array operations are cheaper.
4154 * The numbers are guessed, we should auto-tune as described by
4155 * Bonwick.
4156 */
3b0efdfa 4157 if (cachep->size > 131072)
1da177e4 4158 limit = 1;
3b0efdfa 4159 else if (cachep->size > PAGE_SIZE)
1da177e4 4160 limit = 8;
3b0efdfa 4161 else if (cachep->size > 1024)
1da177e4 4162 limit = 24;
3b0efdfa 4163 else if (cachep->size > 256)
1da177e4
LT
4164 limit = 54;
4165 else
4166 limit = 120;
4167
a737b3e2
AM
4168 /*
4169 * CPU bound tasks (e.g. network routing) can exhibit cpu bound
1da177e4
LT
4170 * allocation behaviour: Most allocs on one cpu, most free operations
4171 * on another cpu. For these cases, an efficient object passing between
4172 * cpus is necessary. This is provided by a shared array. The array
4173 * replaces Bonwick's magazine layer.
4174 * On uniprocessor, it's functionally equivalent (but less efficient)
4175 * to a larger limit. Thus disabled by default.
4176 */
4177 shared = 0;
3b0efdfa 4178 if (cachep->size <= PAGE_SIZE && num_possible_cpus() > 1)
1da177e4 4179 shared = 8;
1da177e4
LT
4180
4181#if DEBUG
a737b3e2
AM
4182 /*
4183 * With debugging enabled, large batchcount lead to excessively long
4184 * periods with disabled local interrupts. Limit the batchcount
1da177e4
LT
4185 */
4186 if (limit > 32)
4187 limit = 32;
4188#endif
83b519e8 4189 err = do_tune_cpucache(cachep, limit, (limit + 1) / 2, shared, gfp);
1da177e4
LT
4190 if (err)
4191 printk(KERN_ERR "enable_cpucache failed for %s, error %d.\n",
b28a02de 4192 cachep->name, -err);
2ed3a4ef 4193 return err;
1da177e4
LT
4194}
4195
1b55253a
CL
4196/*
4197 * Drain an array if it contains any elements taking the l3 lock only if
b18e7e65
CL
4198 * necessary. Note that the l3 listlock also protects the array_cache
4199 * if drain_array() is used on the shared array.
1b55253a 4200 */
68a1b195 4201static void drain_array(struct kmem_cache *cachep, struct kmem_list3 *l3,
1b55253a 4202 struct array_cache *ac, int force, int node)
1da177e4
LT
4203{
4204 int tofree;
4205
1b55253a
CL
4206 if (!ac || !ac->avail)
4207 return;
1da177e4
LT
4208 if (ac->touched && !force) {
4209 ac->touched = 0;
b18e7e65 4210 } else {
1b55253a 4211 spin_lock_irq(&l3->list_lock);
b18e7e65
CL
4212 if (ac->avail) {
4213 tofree = force ? ac->avail : (ac->limit + 4) / 5;
4214 if (tofree > ac->avail)
4215 tofree = (ac->avail + 1) / 2;
4216 free_block(cachep, ac->entry, tofree, node);
4217 ac->avail -= tofree;
4218 memmove(ac->entry, &(ac->entry[tofree]),
4219 sizeof(void *) * ac->avail);
4220 }
1b55253a 4221 spin_unlock_irq(&l3->list_lock);
1da177e4
LT
4222 }
4223}
4224
4225/**
4226 * cache_reap - Reclaim memory from caches.
05fb6bf0 4227 * @w: work descriptor
1da177e4
LT
4228 *
4229 * Called from workqueue/eventd every few seconds.
4230 * Purpose:
4231 * - clear the per-cpu caches for this CPU.
4232 * - return freeable pages to the main free memory pool.
4233 *
a737b3e2
AM
4234 * If we cannot acquire the cache chain mutex then just give up - we'll try
4235 * again on the next iteration.
1da177e4 4236 */
7c5cae36 4237static void cache_reap(struct work_struct *w)
1da177e4 4238{
7a7c381d 4239 struct kmem_cache *searchp;
e498be7d 4240 struct kmem_list3 *l3;
7d6e6d09 4241 int node = numa_mem_id();
bf6aede7 4242 struct delayed_work *work = to_delayed_work(w);
1da177e4 4243
18004c5d 4244 if (!mutex_trylock(&slab_mutex))
1da177e4 4245 /* Give up. Setup the next iteration. */
7c5cae36 4246 goto out;
1da177e4 4247
18004c5d 4248 list_for_each_entry(searchp, &slab_caches, list) {
1da177e4
LT
4249 check_irq_on();
4250
35386e3b
CL
4251 /*
4252 * We only take the l3 lock if absolutely necessary and we
4253 * have established with reasonable certainty that
4254 * we can do some work if the lock was obtained.
4255 */
aab2207c 4256 l3 = searchp->nodelists[node];
35386e3b 4257
8fce4d8e 4258 reap_alien(searchp, l3);
1da177e4 4259
aab2207c 4260 drain_array(searchp, l3, cpu_cache_get(searchp), 0, node);
1da177e4 4261
35386e3b
CL
4262 /*
4263 * These are racy checks but it does not matter
4264 * if we skip one check or scan twice.
4265 */
e498be7d 4266 if (time_after(l3->next_reap, jiffies))
35386e3b 4267 goto next;
1da177e4 4268
e498be7d 4269 l3->next_reap = jiffies + REAPTIMEOUT_LIST3;
1da177e4 4270
aab2207c 4271 drain_array(searchp, l3, l3->shared, 0, node);
1da177e4 4272
ed11d9eb 4273 if (l3->free_touched)
e498be7d 4274 l3->free_touched = 0;
ed11d9eb
CL
4275 else {
4276 int freed;
1da177e4 4277
ed11d9eb
CL
4278 freed = drain_freelist(searchp, l3, (l3->free_limit +
4279 5 * searchp->num - 1) / (5 * searchp->num));
4280 STATS_ADD_REAPED(searchp, freed);
4281 }
35386e3b 4282next:
1da177e4
LT
4283 cond_resched();
4284 }
4285 check_irq_on();
18004c5d 4286 mutex_unlock(&slab_mutex);
8fce4d8e 4287 next_reap_node();
7c5cae36 4288out:
a737b3e2 4289 /* Set up the next iteration */
7c5cae36 4290 schedule_delayed_work(work, round_jiffies_relative(REAPTIMEOUT_CPUC));
1da177e4
LT
4291}
4292
158a9624 4293#ifdef CONFIG_SLABINFO
1da177e4 4294
85289f98 4295static void print_slabinfo_header(struct seq_file *m)
1da177e4 4296{
85289f98
PE
4297 /*
4298 * Output format version, so at least we can change it
4299 * without _too_ many complaints.
4300 */
1da177e4 4301#if STATS
85289f98 4302 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1da177e4 4303#else
85289f98 4304 seq_puts(m, "slabinfo - version: 2.1\n");
1da177e4 4305#endif
85289f98
PE
4306 seq_puts(m, "# name <active_objs> <num_objs> <objsize> "
4307 "<objperslab> <pagesperslab>");
4308 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
4309 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1da177e4 4310#if STATS
85289f98 4311 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> "
fb7faf33 4312 "<error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
85289f98 4313 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1da177e4 4314#endif
85289f98
PE
4315 seq_putc(m, '\n');
4316}
4317
4318static void *s_start(struct seq_file *m, loff_t *pos)
4319{
4320 loff_t n = *pos;
85289f98 4321
18004c5d 4322 mutex_lock(&slab_mutex);
85289f98
PE
4323 if (!n)
4324 print_slabinfo_header(m);
b92151ba 4325
18004c5d 4326 return seq_list_start(&slab_caches, *pos);
1da177e4
LT
4327}
4328
4329static void *s_next(struct seq_file *m, void *p, loff_t *pos)
4330{
18004c5d 4331 return seq_list_next(p, &slab_caches, pos);
1da177e4
LT
4332}
4333
4334static void s_stop(struct seq_file *m, void *p)
4335{
18004c5d 4336 mutex_unlock(&slab_mutex);
1da177e4
LT
4337}
4338
4339static int s_show(struct seq_file *m, void *p)
4340{
3b0efdfa 4341 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
b28a02de
PE
4342 struct slab *slabp;
4343 unsigned long active_objs;
4344 unsigned long num_objs;
4345 unsigned long active_slabs = 0;
4346 unsigned long num_slabs, free_objects = 0, shared_avail = 0;
e498be7d 4347 const char *name;
1da177e4 4348 char *error = NULL;
e498be7d
CL
4349 int node;
4350 struct kmem_list3 *l3;
1da177e4 4351
1da177e4
LT
4352 active_objs = 0;
4353 num_slabs = 0;
e498be7d
CL
4354 for_each_online_node(node) {
4355 l3 = cachep->nodelists[node];
4356 if (!l3)
4357 continue;
4358
ca3b9b91
RT
4359 check_irq_on();
4360 spin_lock_irq(&l3->list_lock);
e498be7d 4361
7a7c381d 4362 list_for_each_entry(slabp, &l3->slabs_full, list) {
e498be7d
CL
4363 if (slabp->inuse != cachep->num && !error)
4364 error = "slabs_full accounting error";
4365 active_objs += cachep->num;
4366 active_slabs++;
4367 }
7a7c381d 4368 list_for_each_entry(slabp, &l3->slabs_partial, list) {
e498be7d
CL
4369 if (slabp->inuse == cachep->num && !error)
4370 error = "slabs_partial inuse accounting error";
4371 if (!slabp->inuse && !error)
4372 error = "slabs_partial/inuse accounting error";
4373 active_objs += slabp->inuse;
4374 active_slabs++;
4375 }
7a7c381d 4376 list_for_each_entry(slabp, &l3->slabs_free, list) {
e498be7d
CL
4377 if (slabp->inuse && !error)
4378 error = "slabs_free/inuse accounting error";
4379 num_slabs++;
4380 }
4381 free_objects += l3->free_objects;
4484ebf1
RT
4382 if (l3->shared)
4383 shared_avail += l3->shared->avail;
e498be7d 4384
ca3b9b91 4385 spin_unlock_irq(&l3->list_lock);
1da177e4 4386 }
b28a02de
PE
4387 num_slabs += active_slabs;
4388 num_objs = num_slabs * cachep->num;
e498be7d 4389 if (num_objs - active_objs != free_objects && !error)
1da177e4
LT
4390 error = "free_objects accounting error";
4391
b28a02de 4392 name = cachep->name;
1da177e4
LT
4393 if (error)
4394 printk(KERN_ERR "slab: cache %s error: %s\n", name, error);
4395
4396 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
3b0efdfa 4397 name, active_objs, num_objs, cachep->size,
b28a02de 4398 cachep->num, (1 << cachep->gfporder));
1da177e4 4399 seq_printf(m, " : tunables %4u %4u %4u",
b28a02de 4400 cachep->limit, cachep->batchcount, cachep->shared);
e498be7d 4401 seq_printf(m, " : slabdata %6lu %6lu %6lu",
b28a02de 4402 active_slabs, num_slabs, shared_avail);
1da177e4 4403#if STATS
b28a02de 4404 { /* list3 stats */
1da177e4
LT
4405 unsigned long high = cachep->high_mark;
4406 unsigned long allocs = cachep->num_allocations;
4407 unsigned long grown = cachep->grown;
4408 unsigned long reaped = cachep->reaped;
4409 unsigned long errors = cachep->errors;
4410 unsigned long max_freeable = cachep->max_freeable;
1da177e4 4411 unsigned long node_allocs = cachep->node_allocs;
e498be7d 4412 unsigned long node_frees = cachep->node_frees;
fb7faf33 4413 unsigned long overflows = cachep->node_overflow;
1da177e4 4414
e92dd4fd
JP
4415 seq_printf(m, " : globalstat %7lu %6lu %5lu %4lu "
4416 "%4lu %4lu %4lu %4lu %4lu",
4417 allocs, high, grown,
4418 reaped, errors, max_freeable, node_allocs,
4419 node_frees, overflows);
1da177e4
LT
4420 }
4421 /* cpu stats */
4422 {
4423 unsigned long allochit = atomic_read(&cachep->allochit);
4424 unsigned long allocmiss = atomic_read(&cachep->allocmiss);
4425 unsigned long freehit = atomic_read(&cachep->freehit);
4426 unsigned long freemiss = atomic_read(&cachep->freemiss);
4427
4428 seq_printf(m, " : cpustat %6lu %6lu %6lu %6lu",
b28a02de 4429 allochit, allocmiss, freehit, freemiss);
1da177e4
LT
4430 }
4431#endif
4432 seq_putc(m, '\n');
1da177e4
LT
4433 return 0;
4434}
4435
4436/*
4437 * slabinfo_op - iterator that generates /proc/slabinfo
4438 *
4439 * Output layout:
4440 * cache-name
4441 * num-active-objs
4442 * total-objs
4443 * object size
4444 * num-active-slabs
4445 * total-slabs
4446 * num-pages-per-slab
4447 * + further values on SMP and with statistics enabled
4448 */
4449
7b3c3a50 4450static const struct seq_operations slabinfo_op = {
b28a02de
PE
4451 .start = s_start,
4452 .next = s_next,
4453 .stop = s_stop,
4454 .show = s_show,
1da177e4
LT
4455};
4456
4457#define MAX_SLABINFO_WRITE 128
4458/**
4459 * slabinfo_write - Tuning for the slab allocator
4460 * @file: unused
4461 * @buffer: user buffer
4462 * @count: data length
4463 * @ppos: unused
4464 */
68a1b195 4465static ssize_t slabinfo_write(struct file *file, const char __user *buffer,
b28a02de 4466 size_t count, loff_t *ppos)
1da177e4 4467{
b28a02de 4468 char kbuf[MAX_SLABINFO_WRITE + 1], *tmp;
1da177e4 4469 int limit, batchcount, shared, res;
7a7c381d 4470 struct kmem_cache *cachep;
b28a02de 4471
1da177e4
LT
4472 if (count > MAX_SLABINFO_WRITE)
4473 return -EINVAL;
4474 if (copy_from_user(&kbuf, buffer, count))
4475 return -EFAULT;
b28a02de 4476 kbuf[MAX_SLABINFO_WRITE] = '\0';
1da177e4
LT
4477
4478 tmp = strchr(kbuf, ' ');
4479 if (!tmp)
4480 return -EINVAL;
4481 *tmp = '\0';
4482 tmp++;
4483 if (sscanf(tmp, " %d %d %d", &limit, &batchcount, &shared) != 3)
4484 return -EINVAL;
4485
4486 /* Find the cache in the chain of caches. */
18004c5d 4487 mutex_lock(&slab_mutex);
1da177e4 4488 res = -EINVAL;
18004c5d 4489 list_for_each_entry(cachep, &slab_caches, list) {
1da177e4 4490 if (!strcmp(cachep->name, kbuf)) {
a737b3e2
AM
4491 if (limit < 1 || batchcount < 1 ||
4492 batchcount > limit || shared < 0) {
e498be7d 4493 res = 0;
1da177e4 4494 } else {
e498be7d 4495 res = do_tune_cpucache(cachep, limit,
83b519e8
PE
4496 batchcount, shared,
4497 GFP_KERNEL);
1da177e4
LT
4498 }
4499 break;
4500 }
4501 }
18004c5d 4502 mutex_unlock(&slab_mutex);
1da177e4
LT
4503 if (res >= 0)
4504 res = count;
4505 return res;
4506}
871751e2 4507
7b3c3a50
AD
4508static int slabinfo_open(struct inode *inode, struct file *file)
4509{
4510 return seq_open(file, &slabinfo_op);
4511}
4512
4513static const struct file_operations proc_slabinfo_operations = {
4514 .open = slabinfo_open,
4515 .read = seq_read,
4516 .write = slabinfo_write,
4517 .llseek = seq_lseek,
4518 .release = seq_release,
4519};
4520
871751e2
AV
4521#ifdef CONFIG_DEBUG_SLAB_LEAK
4522
4523static void *leaks_start(struct seq_file *m, loff_t *pos)
4524{
18004c5d
CL
4525 mutex_lock(&slab_mutex);
4526 return seq_list_start(&slab_caches, *pos);
871751e2
AV
4527}
4528
4529static inline int add_caller(unsigned long *n, unsigned long v)
4530{
4531 unsigned long *p;
4532 int l;
4533 if (!v)
4534 return 1;
4535 l = n[1];
4536 p = n + 2;
4537 while (l) {
4538 int i = l/2;
4539 unsigned long *q = p + 2 * i;
4540 if (*q == v) {
4541 q[1]++;
4542 return 1;
4543 }
4544 if (*q > v) {
4545 l = i;
4546 } else {
4547 p = q + 2;
4548 l -= i + 1;
4549 }
4550 }
4551 if (++n[1] == n[0])
4552 return 0;
4553 memmove(p + 2, p, n[1] * 2 * sizeof(unsigned long) - ((void *)p - (void *)n));
4554 p[0] = v;
4555 p[1] = 1;
4556 return 1;
4557}
4558
4559static void handle_slab(unsigned long *n, struct kmem_cache *c, struct slab *s)
4560{
4561 void *p;
4562 int i;
4563 if (n[0] == n[1])
4564 return;
3b0efdfa 4565 for (i = 0, p = s->s_mem; i < c->num; i++, p += c->size) {
871751e2
AV
4566 if (slab_bufctl(s)[i] != BUFCTL_ACTIVE)
4567 continue;
4568 if (!add_caller(n, (unsigned long)*dbg_userword(c, p)))
4569 return;
4570 }
4571}
4572
4573static void show_symbol(struct seq_file *m, unsigned long address)
4574{
4575#ifdef CONFIG_KALLSYMS
871751e2 4576 unsigned long offset, size;
9281acea 4577 char modname[MODULE_NAME_LEN], name[KSYM_NAME_LEN];
871751e2 4578
a5c43dae 4579 if (lookup_symbol_attrs(address, &size, &offset, modname, name) == 0) {
871751e2 4580 seq_printf(m, "%s+%#lx/%#lx", name, offset, size);
a5c43dae 4581 if (modname[0])
871751e2
AV
4582 seq_printf(m, " [%s]", modname);
4583 return;
4584 }
4585#endif
4586 seq_printf(m, "%p", (void *)address);
4587}
4588
4589static int leaks_show(struct seq_file *m, void *p)
4590{
0672aa7c 4591 struct kmem_cache *cachep = list_entry(p, struct kmem_cache, list);
871751e2
AV
4592 struct slab *slabp;
4593 struct kmem_list3 *l3;
4594 const char *name;
4595 unsigned long *n = m->private;
4596 int node;
4597 int i;
4598
4599 if (!(cachep->flags & SLAB_STORE_USER))
4600 return 0;
4601 if (!(cachep->flags & SLAB_RED_ZONE))
4602 return 0;
4603
4604 /* OK, we can do it */
4605
4606 n[1] = 0;
4607
4608 for_each_online_node(node) {
4609 l3 = cachep->nodelists[node];
4610 if (!l3)
4611 continue;
4612
4613 check_irq_on();
4614 spin_lock_irq(&l3->list_lock);
4615
7a7c381d 4616 list_for_each_entry(slabp, &l3->slabs_full, list)
871751e2 4617 handle_slab(n, cachep, slabp);
7a7c381d 4618 list_for_each_entry(slabp, &l3->slabs_partial, list)
871751e2 4619 handle_slab(n, cachep, slabp);
871751e2
AV
4620 spin_unlock_irq(&l3->list_lock);
4621 }
4622 name = cachep->name;
4623 if (n[0] == n[1]) {
4624 /* Increase the buffer size */
18004c5d 4625 mutex_unlock(&slab_mutex);
871751e2
AV
4626 m->private = kzalloc(n[0] * 4 * sizeof(unsigned long), GFP_KERNEL);
4627 if (!m->private) {
4628 /* Too bad, we are really out */
4629 m->private = n;
18004c5d 4630 mutex_lock(&slab_mutex);
871751e2
AV
4631 return -ENOMEM;
4632 }
4633 *(unsigned long *)m->private = n[0] * 2;
4634 kfree(n);
18004c5d 4635 mutex_lock(&slab_mutex);
871751e2
AV
4636 /* Now make sure this entry will be retried */
4637 m->count = m->size;
4638 return 0;
4639 }
4640 for (i = 0; i < n[1]; i++) {
4641 seq_printf(m, "%s: %lu ", name, n[2*i+3]);
4642 show_symbol(m, n[2*i+2]);
4643 seq_putc(m, '\n');
4644 }
d2e7b7d0 4645
871751e2
AV
4646 return 0;
4647}
4648
a0ec95a8 4649static const struct seq_operations slabstats_op = {
871751e2
AV
4650 .start = leaks_start,
4651 .next = s_next,
4652 .stop = s_stop,
4653 .show = leaks_show,
4654};
a0ec95a8
AD
4655
4656static int slabstats_open(struct inode *inode, struct file *file)
4657{
4658 unsigned long *n = kzalloc(PAGE_SIZE, GFP_KERNEL);
4659 int ret = -ENOMEM;
4660 if (n) {
4661 ret = seq_open(file, &slabstats_op);
4662 if (!ret) {
4663 struct seq_file *m = file->private_data;
4664 *n = PAGE_SIZE / (2 * sizeof(unsigned long));
4665 m->private = n;
4666 n = NULL;
4667 }
4668 kfree(n);
4669 }
4670 return ret;
4671}
4672
4673static const struct file_operations proc_slabstats_operations = {
4674 .open = slabstats_open,
4675 .read = seq_read,
4676 .llseek = seq_lseek,
4677 .release = seq_release_private,
4678};
4679#endif
4680
4681static int __init slab_proc_init(void)
4682{
ab067e99 4683 proc_create("slabinfo",S_IWUSR|S_IRUSR,NULL,&proc_slabinfo_operations);
a0ec95a8
AD
4684#ifdef CONFIG_DEBUG_SLAB_LEAK
4685 proc_create("slab_allocators", 0, NULL, &proc_slabstats_operations);
871751e2 4686#endif
a0ec95a8
AD
4687 return 0;
4688}
4689module_init(slab_proc_init);
1da177e4
LT
4690#endif
4691
00e145b6
MS
4692/**
4693 * ksize - get the actual amount of memory allocated for a given object
4694 * @objp: Pointer to the object
4695 *
4696 * kmalloc may internally round up allocations and return more memory
4697 * than requested. ksize() can be used to determine the actual amount of
4698 * memory allocated. The caller may use this additional memory, even though
4699 * a smaller amount of memory was initially specified with the kmalloc call.
4700 * The caller must guarantee that objp points to a valid object previously
4701 * allocated with either kmalloc() or kmem_cache_alloc(). The object
4702 * must not be freed during the duration of the call.
4703 */
fd76bab2 4704size_t ksize(const void *objp)
1da177e4 4705{
ef8b4520
CL
4706 BUG_ON(!objp);
4707 if (unlikely(objp == ZERO_SIZE_PTR))
00e145b6 4708 return 0;
1da177e4 4709
8c138bc0 4710 return virt_to_cache(objp)->object_size;
1da177e4 4711}
b1aabecd 4712EXPORT_SYMBOL(ksize);