Merge branch 'unicore32' of git://github.com/gxt/linux
[linux-block.git] / include / linux / slab_def.h
CommitLineData
2e892f43
CL
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
039ca4e7
LZ
17
18#include <trace/events/kmem.h>
2e892f43 19
8eae985f
PE
20/*
21 * struct kmem_cache
22 *
23 * manages a cache.
24 */
25
26struct kmem_cache {
b56efcf0 27/* 1) Cache tunables. Protected by cache_chain_mutex */
8eae985f
PE
28 unsigned int batchcount;
29 unsigned int limit;
30 unsigned int shared;
31
32 unsigned int buffer_size;
33 u32 reciprocal_buffer_size;
b56efcf0 34/* 2) touched by every alloc & free from the backend */
8eae985f
PE
35
36 unsigned int flags; /* constant flags */
37 unsigned int num; /* # of objs per slab */
38
b56efcf0 39/* 3) cache_grow/shrink */
8eae985f
PE
40 /* order of pgs per slab (2^n) */
41 unsigned int gfporder;
42
43 /* force GFP flags, e.g. GFP_DMA */
44 gfp_t gfpflags;
45
46 size_t colour; /* cache colouring range */
47 unsigned int colour_off; /* colour offset */
48 struct kmem_cache *slabp_cache;
49 unsigned int slab_size;
50 unsigned int dflags; /* dynamic flags */
51
52 /* constructor func */
53 void (*ctor)(void *obj);
54
b56efcf0 55/* 4) cache creation/removal */
8eae985f
PE
56 const char *name;
57 struct list_head next;
58
b56efcf0 59/* 5) statistics */
8eae985f
PE
60#ifdef CONFIG_DEBUG_SLAB
61 unsigned long num_active;
62 unsigned long num_allocations;
63 unsigned long high_mark;
64 unsigned long grown;
65 unsigned long reaped;
66 unsigned long errors;
67 unsigned long max_freeable;
68 unsigned long node_allocs;
69 unsigned long node_frees;
70 unsigned long node_overflow;
71 atomic_t allochit;
72 atomic_t allocmiss;
73 atomic_t freehit;
74 atomic_t freemiss;
75
76 /*
77 * If debugging is enabled, then the allocator can add additional
78 * fields and/or padding to every object. buffer_size contains the total
79 * object size including these internal fields, the following two
80 * variables contain the offset to the user object and its size.
81 */
82 int obj_offset;
83 int obj_size;
84#endif /* CONFIG_DEBUG_SLAB */
85
b56efcf0 86/* 6) per-cpu/per-node data, touched during every alloc/free */
8eae985f 87 /*
b56efcf0
ED
88 * We put array[] at the end of kmem_cache, because we want to size
89 * this array to nr_cpu_ids slots instead of NR_CPUS
8eae985f 90 * (see kmem_cache_init())
b56efcf0
ED
91 * We still use [NR_CPUS] and not [1] or [0] because cache_cache
92 * is statically defined, so we reserve the max number of cpus.
8eae985f 93 */
b56efcf0
ED
94 struct kmem_list3 **nodelists;
95 struct array_cache *array[NR_CPUS];
8eae985f 96 /*
b56efcf0 97 * Do not add fields after array[]
8eae985f
PE
98 */
99};
100
2e892f43
CL
101/* Size description struct for general caches. */
102struct cache_sizes {
103 size_t cs_size;
104 struct kmem_cache *cs_cachep;
4b51d669 105#ifdef CONFIG_ZONE_DMA
2e892f43 106 struct kmem_cache *cs_dmacachep;
4b51d669 107#endif
2e892f43
CL
108};
109extern struct cache_sizes malloc_sizes[];
110
6193a2ff
PM
111void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
112void *__kmalloc(size_t size, gfp_t flags);
113
0f24f128 114#ifdef CONFIG_TRACING
85beb586
SR
115extern void *kmem_cache_alloc_trace(size_t size,
116 struct kmem_cache *cachep, gfp_t flags);
36555751
EGM
117extern size_t slab_buffer_size(struct kmem_cache *cachep);
118#else
119static __always_inline void *
85beb586 120kmem_cache_alloc_trace(size_t size, struct kmem_cache *cachep, gfp_t flags)
2e892f43 121{
36555751
EGM
122 return kmem_cache_alloc(cachep, flags);
123}
124static inline size_t slab_buffer_size(struct kmem_cache *cachep)
125{
126 return 0;
127}
128#endif
129
130static __always_inline void *kmalloc(size_t size, gfp_t flags)
131{
132 struct kmem_cache *cachep;
133 void *ret;
134
2e892f43
CL
135 if (__builtin_constant_p(size)) {
136 int i = 0;
6cb8f913
CL
137
138 if (!size)
139 return ZERO_SIZE_PTR;
140
2e892f43
CL
141#define CACHE(x) \
142 if (size <= x) \
143 goto found; \
144 else \
145 i++;
1c61fc40 146#include <linux/kmalloc_sizes.h>
2e892f43 147#undef CACHE
1cf3eb2f 148 return NULL;
2e892f43 149found:
4b51d669
CL
150#ifdef CONFIG_ZONE_DMA
151 if (flags & GFP_DMA)
36555751
EGM
152 cachep = malloc_sizes[i].cs_dmacachep;
153 else
4b51d669 154#endif
36555751
EGM
155 cachep = malloc_sizes[i].cs_cachep;
156
85beb586 157 ret = kmem_cache_alloc_trace(size, cachep, flags);
36555751
EGM
158
159 return ret;
2e892f43
CL
160 }
161 return __kmalloc(size, flags);
162}
163
2e892f43
CL
164#ifdef CONFIG_NUMA
165extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
6193a2ff 166extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2e892f43 167
0f24f128 168#ifdef CONFIG_TRACING
85beb586
SR
169extern void *kmem_cache_alloc_node_trace(size_t size,
170 struct kmem_cache *cachep,
171 gfp_t flags,
172 int nodeid);
36555751
EGM
173#else
174static __always_inline void *
85beb586
SR
175kmem_cache_alloc_node_trace(size_t size,
176 struct kmem_cache *cachep,
177 gfp_t flags,
178 int nodeid)
36555751
EGM
179{
180 return kmem_cache_alloc_node(cachep, flags, nodeid);
181}
182#endif
183
184static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
2e892f43 185{
36555751 186 struct kmem_cache *cachep;
36555751 187
2e892f43
CL
188 if (__builtin_constant_p(size)) {
189 int i = 0;
6cb8f913
CL
190
191 if (!size)
192 return ZERO_SIZE_PTR;
193
2e892f43
CL
194#define CACHE(x) \
195 if (size <= x) \
196 goto found; \
197 else \
198 i++;
1c61fc40 199#include <linux/kmalloc_sizes.h>
2e892f43 200#undef CACHE
1cf3eb2f 201 return NULL;
2e892f43 202found:
4b51d669
CL
203#ifdef CONFIG_ZONE_DMA
204 if (flags & GFP_DMA)
36555751
EGM
205 cachep = malloc_sizes[i].cs_dmacachep;
206 else
4b51d669 207#endif
36555751
EGM
208 cachep = malloc_sizes[i].cs_cachep;
209
85beb586 210 return kmem_cache_alloc_node_trace(size, cachep, flags, node);
2e892f43
CL
211 }
212 return __kmalloc_node(size, flags, node);
213}
214
215#endif /* CONFIG_NUMA */
216
217#endif /* _LINUX_SLAB_DEF_H */