SLUB: Do not use length parameter in slab_alloc()
[linux-2.6-block.git] / include / linux / slab.h
CommitLineData
1da177e4 1/*
2e892f43
CL
2 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
3 *
4 * (C) SGI 2006, Christoph Lameter <clameter@sgi.com>
5 * Cleaned up and restructured to ease the addition of alternative
6 * implementations of SLAB allocators.
1da177e4
LT
7 */
8
9#ifndef _LINUX_SLAB_H
10#define _LINUX_SLAB_H
11
1b1cec4b 12#ifdef __KERNEL__
1da177e4 13
1b1cec4b 14#include <linux/gfp.h>
1b1cec4b 15#include <linux/types.h>
1da177e4 16
2e892f43
CL
17/*
18 * Flags to pass to kmem_cache_create().
19 * The ones marked DEBUG are only valid if CONFIG_SLAB_DEBUG is set.
1da177e4 20 */
55935a34 21#define SLAB_DEBUG_FREE 0x00000100UL /* DEBUG: Perform (expensive) checks on free */
55935a34
CL
22#define SLAB_RED_ZONE 0x00000400UL /* DEBUG: Red zone objs in a cache */
23#define SLAB_POISON 0x00000800UL /* DEBUG: Poison objects */
24#define SLAB_HWCACHE_ALIGN 0x00002000UL /* Align objs on cache lines */
2e892f43 25#define SLAB_CACHE_DMA 0x00004000UL /* Use GFP_DMA memory */
2e892f43
CL
26#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
27#define SLAB_RECLAIM_ACCOUNT 0x00020000UL /* Objects are reclaimable */
28#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
29#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
101a5001 30#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
81819f0f 31#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
1da177e4 32
6cb8f913
CL
33/*
34 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
35 *
36 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
37 *
38 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
39 * Both make kfree a no-op.
40 */
41#define ZERO_SIZE_PTR ((void *)16)
42
43#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) < \
44 (unsigned long)ZERO_SIZE_PTR)
45
2e892f43
CL
46/*
47 * struct kmem_cache related prototypes
48 */
49void __init kmem_cache_init(void);
81819f0f 50int slab_is_available(void);
1da177e4 51
2e892f43 52struct kmem_cache *kmem_cache_create(const char *, size_t, size_t,
ebe29738
CL
53 unsigned long,
54 void (*)(void *, struct kmem_cache *, unsigned long),
55 void (*)(void *, struct kmem_cache *, unsigned long));
2e892f43
CL
56void kmem_cache_destroy(struct kmem_cache *);
57int kmem_cache_shrink(struct kmem_cache *);
2e892f43
CL
58void *kmem_cache_zalloc(struct kmem_cache *, gfp_t);
59void kmem_cache_free(struct kmem_cache *, void *);
60unsigned int kmem_cache_size(struct kmem_cache *);
61const char *kmem_cache_name(struct kmem_cache *);
55935a34 62int kmem_ptr_validate(struct kmem_cache *cachep, const void *ptr);
2e892f43 63
0a31bd5f
CL
64/*
65 * Please use this macro to create slab caches. Simply specify the
66 * name of the structure and maybe some flags that are listed above.
67 *
68 * The alignment of the struct determines object alignment. If you
69 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
70 * then the objects will be properly aligned in SMP configurations.
71 */
72#define KMEM_CACHE(__struct, __flags) kmem_cache_create(#__struct,\
73 sizeof(struct __struct), __alignof__(struct __struct),\
74 (__flags), NULL, NULL)
75
0aa817f0
CL
76/*
77 * The largest kmalloc size supported by the slab allocators is
78 * 32 megabyte (2^25) or the maximum allocatable page order if that is
79 * less than 32 MB.
80 *
81 * WARNING: Its not easy to increase this value since the allocators have
82 * to do various tricks to work around compiler limitations in order to
83 * ensure proper constant folding.
84 */
debee076
CL
85#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
86 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
0aa817f0
CL
87
88#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_HIGH)
89#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_HIGH - PAGE_SHIFT)
90
2e892f43
CL
91/*
92 * Common kmalloc functions provided by all allocators
93 */
2e892f43 94void *__kzalloc(size_t, gfp_t);
fd76bab2 95void * __must_check krealloc(const void *, size_t, gfp_t);
2e892f43 96void kfree(const void *);
fd76bab2 97size_t ksize(const void *);
2e892f43
CL
98
99/**
100 * kcalloc - allocate memory for an array. The memory is set to zero.
101 * @n: number of elements.
102 * @size: element size.
103 * @flags: the type of memory to allocate.
800590f5
PD
104 *
105 * The @flags argument may be one of:
106 *
107 * %GFP_USER - Allocate memory on behalf of user. May sleep.
108 *
109 * %GFP_KERNEL - Allocate normal kernel ram. May sleep.
110 *
6193a2ff 111 * %GFP_ATOMIC - Allocation will not sleep. May use emergency pools.
800590f5
PD
112 * For example, use this inside interrupt handlers.
113 *
114 * %GFP_HIGHUSER - Allocate pages from high memory.
115 *
116 * %GFP_NOIO - Do not do any I/O at all while trying to get memory.
117 *
118 * %GFP_NOFS - Do not make any fs calls while trying to get memory.
119 *
6193a2ff
PM
120 * %GFP_NOWAIT - Allocation will not sleep.
121 *
122 * %GFP_THISNODE - Allocate node-local memory only.
123 *
124 * %GFP_DMA - Allocation suitable for DMA.
125 * Should only be used for kmalloc() caches. Otherwise, use a
126 * slab created with SLAB_DMA.
127 *
800590f5
PD
128 * Also it is possible to set different flags by OR'ing
129 * in one or more of the following additional @flags:
130 *
131 * %__GFP_COLD - Request cache-cold pages instead of
132 * trying to return cache-warm pages.
133 *
800590f5
PD
134 * %__GFP_HIGH - This allocation has high priority and may use emergency pools.
135 *
800590f5
PD
136 * %__GFP_NOFAIL - Indicate that this allocation is in no way allowed to fail
137 * (think twice before using).
138 *
139 * %__GFP_NORETRY - If memory is not immediately available,
140 * then give up at once.
141 *
142 * %__GFP_NOWARN - If allocation fails, don't issue any warnings.
143 *
144 * %__GFP_REPEAT - If allocation fails initially, try once more before failing.
6193a2ff
PM
145 *
146 * There are other flags available as well, but these are not intended
147 * for general use, and so are not documented here. For a full list of
148 * potential flags, always refer to linux/gfp.h.
800590f5 149 */
6193a2ff 150static inline void *kcalloc(size_t n, size_t size, gfp_t flags)
1da177e4 151{
6193a2ff
PM
152 if (n != 0 && size > ULONG_MAX / n)
153 return NULL;
154 return __kzalloc(n * size, flags);
1da177e4
LT
155}
156
6193a2ff
PM
157/*
158 * Allocator specific definitions. These are mainly used to establish optimized
159 * ways to convert kmalloc() calls to kmem_cache_alloc() invocations by
160 * selecting the appropriate general cache at compile time.
161 *
162 * Allocators must define at least:
163 *
164 * kmem_cache_alloc()
165 * __kmalloc()
166 * kmalloc()
167 * kzalloc()
168 *
169 * Those wishing to support NUMA must also define:
170 *
171 * kmem_cache_alloc_node()
172 * kmalloc_node()
173 *
174 * See each allocator definition file for additional comments and
175 * implementation notes.
2e892f43 176 */
6193a2ff
PM
177#ifdef CONFIG_SLUB
178#include <linux/slub_def.h>
179#elif defined(CONFIG_SLOB)
180#include <linux/slob_def.h>
181#else
182#include <linux/slab_def.h>
2e892f43
CL
183#endif
184
6193a2ff
PM
185#if !defined(CONFIG_NUMA) && !defined(CONFIG_SLOB)
186/**
187 * kmalloc_node - allocate memory from a specific node
188 * @size: how many bytes of memory are required.
189 * @flags: the type of memory to allocate (see kcalloc).
190 * @node: node to allocate from.
191 *
192 * kmalloc() for non-local nodes, used to allocate from a specific node
193 * if available. Equivalent to kmalloc() in the non-NUMA single-node
194 * case.
195 */
55935a34
CL
196static inline void *kmalloc_node(size_t size, gfp_t flags, int node)
197{
198 return kmalloc(size, flags);
199}
200
201static inline void *__kmalloc_node(size_t size, gfp_t flags, int node)
202{
203 return __kmalloc(size, flags);
204}
6193a2ff
PM
205
206void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
207
208static inline void *kmem_cache_alloc_node(struct kmem_cache *cachep,
209 gfp_t flags, int node)
210{
211 return kmem_cache_alloc(cachep, flags);
212}
213#endif /* !CONFIG_NUMA && !CONFIG_SLOB */
55935a34 214
1d2c8eea
CH
215/*
216 * kmalloc_track_caller is a special version of kmalloc that records the
217 * calling function of the routine calling it for slab leak tracking instead
218 * of just the calling function (confusing, eh?).
219 * It's useful when the call to kmalloc comes from a widely-used standard
220 * allocator where we care about the real place the memory allocation
221 * request comes from.
222 */
81819f0f 223#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
1d2c8eea
CH
224extern void *__kmalloc_track_caller(size_t, gfp_t, void*);
225#define kmalloc_track_caller(size, flags) \
226 __kmalloc_track_caller(size, flags, __builtin_return_address(0))
2e892f43
CL
227#else
228#define kmalloc_track_caller(size, flags) \
229 __kmalloc(size, flags)
230#endif /* DEBUG_SLAB */
1da177e4 231
97e2bde4 232#ifdef CONFIG_NUMA
8b98c169
CH
233/*
234 * kmalloc_node_track_caller is a special version of kmalloc_node that
235 * records the calling function of the routine calling it for slab leak
236 * tracking instead of just the calling function (confusing, eh?).
237 * It's useful when the call to kmalloc_node comes from a widely-used
238 * standard allocator where we care about the real place the memory
239 * allocation request comes from.
240 */
81819f0f 241#if defined(CONFIG_DEBUG_SLAB) || defined(CONFIG_SLUB)
8b98c169
CH
242extern void *__kmalloc_node_track_caller(size_t, gfp_t, int, void *);
243#define kmalloc_node_track_caller(size, flags, node) \
244 __kmalloc_node_track_caller(size, flags, node, \
245 __builtin_return_address(0))
2e892f43
CL
246#else
247#define kmalloc_node_track_caller(size, flags, node) \
248 __kmalloc_node(size, flags, node)
8b98c169 249#endif
2e892f43 250
8b98c169 251#else /* CONFIG_NUMA */
8b98c169
CH
252
253#define kmalloc_node_track_caller(size, flags, node) \
254 kmalloc_track_caller(size, flags)
97e2bde4 255
55935a34 256#endif /* DEBUG_SLAB */
10cef602 257
1da177e4 258#endif /* __KERNEL__ */
1da177e4 259#endif /* _LINUX_SLAB_H */