SLUB: cleanup - define macros instead of hardcoded numbers
[linux-2.6-block.git] / include / linux / slab.h
CommitLineData
1da177e4 1/*
2e892f43
CL
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
cde53535 4 * (C) SGI 2006, Christoph Lameter
2e892f43
CL
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
1da177e4
LT
7 */
8
9#ifndef _LINUX_SLAB_H
10#define _LINUX_SLAB_H
11
1b1cec4b 12#include <linux/gfp.h>
1b1cec4b 13#include <linux/types.h>
1da177e4 14
2e892f43
CL
15/*
16 * Flags to pass to kmem_cache_create().
17 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
1da177e4 18 */
55935a34 19#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
55935a34
CL
20#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
21#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
22#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
2e892f43 23#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
2e892f43 24#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
2e892f43 25#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
d7de4c1d
PZ
26/*
27 * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
28 *
29 * This delays freeing the SLAB page by a grace period, it does _NOT_
30 * delay object freeing. This means that if you do kmem_cache_free()
31 * that memory location is free to be reused at any time. Thus it may
32 * be possible to see another object there in the same RCU grace period.
33 *
34 * This feature only ensures the memory location backing the object
35 * stays valid, the trick to using this is relying on an independent
36 * object validation pass. Something like:
37 *
38 * rcu_read_lock()
39 * again:
40 * obj = lockless_lookup(key);
41 * if (obj) {
42 * if (!try_get_ref(obj)) // might fail for free objects
43 * goto again;
44 *
45 * if (obj->key != key) { // not the object we expected
46 * put_ref(obj);
47 * goto again;
48 * }
49 * }
50 * rcu_read_unlock();
51 *
52 * See also the comment on struct slab_rcu in mm/slab.c.
53 */
2e892f43 54#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
101a5001 55#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
81819f0f 56#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
1da177e4 57
30327acf
TG
58/* Flag to prevent checks on free */
59#ifdef CONFIG_DEBUG_OBJECTS
60# define SLAB_DEBUG_OBJECTS 0x00400000UL
61#else
62# define SLAB_DEBUG_OBJECTS 0x00000000UL
63#endif
64
e12ba74d
MG
65/* The following flags affect the page allocator grouping pages by mobility */
66#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
67#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
6cb8f913
CL
68/*
69 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
70 *
71 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
72 *
73 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
74 * Both make kfree a no-op.
75 */
76#define ZERO_SIZE_PTR ((void *)16)
77
1d4ec7b1 78#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
6cb8f913
CL
79 (unsigned long)ZERO_SIZE_PTR)
80
2e892f43
CL
81/*
82 * struct kmem_cache related prototypes
83 */
84void __init kmem_cache_init(void);
81819f0f 85int slab_is_available(void);
1da177e4 86
2e892f43 87struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
ebe29738 88 unsigned long,
51cc5068 89 void (*)(void *));
2e892f43
CL
90void kmem_cache_destroy(struct kmem_cache *);
91int kmem_cache_shrink(struct kmem_cache *);
2e892f43
CL
92void kmem_cache_free(struct kmem_cache *, void *);
93unsigned int kmem_cache_size(struct kmem_cache *);
94const char *kmem_cache_name(struct kmem_cache *);
55935a34 95int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
2e892f43 96
0a31bd5f
CL
97/*
98 * Please use this macro to create slab caches. Simply specify the
99 * name of the structure and maybe some flags that are listed above.
100 *
101 * The alignment of the struct determines object alignment. If you
102 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
103 * then the objects will be properly aligned in SMP configurations.
104 */
105#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
106 sizeof(struct __struct), __alignof__(struct __struct),\
20c2df83 107 (__flags), NULL)
0a31bd5f 108
0aa817f0
CL
109/*
110 * The largest kmalloc size supported by the slab allocators is
111 * 32 megabyte (2^25) or the maximum allocatable page order if that is
112 * less than 32 MB.
113 *
114 * WARNING: Its not easy to increase this value since the allocators have
115 * to do various tricks to work around compiler limitations in order to
116 * ensure proper constant folding.
117 */
debee076
CL
118#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
119 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
0aa817f0
CL
120
121#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
122#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
123
2e892f43
CL
124/*
125 * Common kmalloc functions provided by all allocators
126 */
93bc4e89 127void * __must_check __krealloc(const void *, size_t, gfp_t);
fd76bab2 128void * __must_check krealloc(const void *, size_t, gfp_t);
2e892f43 129void kfree(const void *);
fd76bab2 130size_t ksize(const void *);
2e892f43 131
81cda662
CL
132/*
133 * Allocator specific definitions. These are mainly used to establish optimized
134 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
135 * selecting the appropriate general cache at compile time.
136 *
137 * Allocators must define at least:
138 *
139 * kmem_cache_alloc()
140 * __kmalloc()
141 * kmalloc()
142 *
143 * Those wishing to support NUMA must also define:
144 *
145 * kmem_cache_alloc_node()
146 * kmalloc_node()
147 *
148 * See each allocator definition file for additional comments and
149 * implementation notes.
150 */
151#ifdef CONFIG_SLUB
152#include <linux/slub_def.h>
153#elif defined(CONFIG_SLOB)
154#include <linux/slob_def.h>
155#else
156#include <linux/slab_def.h>
157#endif
158
2e892f43
CL
159/**
160 * kcalloc - allocate memory for an array. The memory is set to zero.
161 * @n: number of elements.
162 * @size: element size.
163 * @flags: the type of memory to allocate.
800590f5
PD
164 *
165 * The @flags argument may be one of:
166 *
167 * %GFP_USER - Allocate memory on behalf of user. May sleep.
168 *
169 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
170 *
6193a2ff 171 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
800590f5
PD
172 * For example, use this inside interrupt handlers.
173 *
174 * %GFP_HIGHUSER - Allocate pages from high memory.
175 *
176 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
177 *
178 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
179 *
6193a2ff
PM
180 * %GFP_NOWAIT - Allocation will not sleep.
181 *
182 * %GFP_THISNODE - Allocate node-local memory only.
183 *
184 * %GFP_DMA - Allocation suitable for DMA.
185 * Should only be used for kmalloc() caches. Otherwise, use a
186 * slab created with SLAB_DMA.
187 *
800590f5
PD
188 * Also it is possible to set different flags by OR'ing
189 * in one or more of the following additional @flags:
190 *
191 * %__GFP_COLD - Request cache-cold pages instead of
192 * trying to return cache-warm pages.
193 *
800590f5
PD
194 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
195 *
800590f5
PD
196 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
197 * (think twice before using).
198 *
199 * %__GFP_NORETRY - If memory is not immediately available,
200 * then give up at once.
201 *
202 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
203 *
204 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
6193a2ff
PM
205 *
206 * There are other flags available as well, but these are not intended
207 * for general use, and so are not documented here. For a full list of
208 * potential flags, always refer to linux/gfp.h.
800590f5 209 */
6193a2ff 210static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
1da177e4 211{
9ca908f4 212 if (size != 0 && n > ULONG_MAX / size)
6193a2ff 213 return NULL;
81cda662 214 return __kmalloc(n * size, flags | __GFP_ZERO);
1da177e4
LT
215}
216
6193a2ff
PM
217#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
218/**
219 * kmalloc_node - allocate memory from a specific node
220 * @size: how many bytes of memory are required.
221 * @flags: the type of memory to allocate (see kcalloc).
222 * @node: node to allocate from.
223 *
224 * kmalloc() for non-local nodes, used to allocate from a specific node
225 * if available. Equivalent to kmalloc() in the non-NUMA single-node
226 * case.
227 */
55935a34
CL
228static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
229{
230 return kmalloc(size, flags);
231}
232
233static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
234{
235 return __kmalloc(size, flags);
236}
6193a2ff
PM
237
238void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
239
240static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
241 gfp_t flags, int node)
242{
243 return kmem_cache_alloc(cachep, flags);
244}
245#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
55935a34 246
1d2c8eea
CH
247/*
248 * kmalloc_track_caller is a special version of kmalloc that records the
249 * calling function of the routine calling it for slab leak tracking instead
250 * of just the calling function (confusing, eh?).
251 * It's useful when the call to kmalloc comes from a widely-used standard
252 * allocator where we care about the real place the memory allocation
253 * request comes from.
254 */
81819f0f 255#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
1d2c8eea
CH
256extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
257#define kmalloc_track_caller(size, flags) \
258 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
2e892f43
CL
259#else
260#define kmalloc_track_caller(size, flags) \
261 __kmalloc(size, flags)
262#endif /* DEBUG_SLAB */
1da177e4 263
97e2bde4 264#ifdef CONFIG_NUMA
8b98c169
CH
265/*
266 * kmalloc_node_track_caller is a special version of kmalloc_node that
267 * records the calling function of the routine calling it for slab leak
268 * tracking instead of just the calling function (confusing, eh?).
269 * It's useful when the call to kmalloc_node comes from a widely-used
270 * standard allocator where we care about the real place the memory
271 * allocation request comes from.
272 */
81819f0f 273#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
8b98c169
CH
274extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
275#define kmalloc_node_track_caller(size, flags, node) \
276 __kmalloc_node_track_caller(size, flags, node, \
277 __builtin_return_address(0))
2e892f43
CL
278#else
279#define kmalloc_node_track_caller(size, flags, node) \
280 __kmalloc_node(size, flags, node)
8b98c169 281#endif
2e892f43 282
8b98c169 283#else /* CONFIG_NUMA */
8b98c169
CH
284
285#define kmalloc_node_track_caller(size, flags, node) \
286 kmalloc_track_caller(size, flags)
97e2bde4 287
55935a34 288#endif /* DEBUG_SLAB */
10cef602 289
81cda662
CL
290/*
291 * Shortcuts
292 */
293static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
294{
295 return kmem_cache_alloc(k, flags | __GFP_ZERO);
296}
297
298/**
299 * kzalloc - allocate memory. The memory is set to zero.
300 * @size: how many bytes of memory are required.
301 * @flags: the type of memory to allocate (see kmalloc).
302 */
303static inline void *kzalloc(size_t size, gfp_t flags)
304{
305 return kmalloc(size, flags | __GFP_ZERO);
306}
307
979b0fea
JL
308/**
309 * kzalloc_node - allocate zeroed memory from a particular memory node.
310 * @size: how many bytes of memory are required.
311 * @flags: the type of memory to allocate (see kmalloc).
312 * @node: memory node from which to allocate
313 */
314static inline void *kzalloc_node(size_t size, gfp_t flags, int node)
315{
316 return kmalloc_node(size, flags | __GFP_ZERO, node);
317}
318
1da177e4 319#endif /* _LINUX_SLAB_H */