Merge branch 'for-2.6.30' of git://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-block.git] / include / linux / slab_def.h
CommitLineData
2e892f43
CL
1#ifndef _LINUX_SLAB_DEF_H
2#define _LINUX_SLAB_DEF_H
3
4/*
5 * Definitions unique to the original Linux SLAB allocator.
6 *
7 * What we provide here is a way to optimize the frequent kmalloc
8 * calls in the kernel by selecting the appropriate general cache
9 * if kmalloc was called with a size that can be established at
10 * compile time.
11 */
12
13#include <linux/init.h>
14#include <asm/page.h> /* kmalloc_sizes.h needs PAGE_SIZE */
15#include <asm/cache.h> /* kmalloc_sizes.h needs L1_CACHE_BYTES */
16#include <linux/compiler.h>
36994e58 17#include <trace/kmemtrace.h>
2e892f43
CL
18
19/* Size description struct for general caches. */
20struct cache_sizes {
21 size_t cs_size;
22 struct kmem_cache *cs_cachep;
4b51d669 23#ifdef CONFIG_ZONE_DMA
2e892f43 24 struct kmem_cache *cs_dmacachep;
4b51d669 25#endif
2e892f43
CL
26};
27extern struct cache_sizes malloc_sizes[];
28
6193a2ff
PM
29void *kmem_cache_alloc(struct kmem_cache *, gfp_t);
30void *__kmalloc(size_t size, gfp_t flags);
31
36555751
EGM
32#ifdef CONFIG_KMEMTRACE
33extern void *kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags);
34extern size_t slab_buffer_size(struct kmem_cache *cachep);
35#else
36static __always_inline void *
37kmem_cache_alloc_notrace(struct kmem_cache *cachep, gfp_t flags)
2e892f43 38{
36555751
EGM
39 return kmem_cache_alloc(cachep, flags);
40}
41static inline size_t slab_buffer_size(struct kmem_cache *cachep)
42{
43 return 0;
44}
45#endif
46
47static __always_inline void *kmalloc(size_t size, gfp_t flags)
48{
49 struct kmem_cache *cachep;
50 void *ret;
51
2e892f43
CL
52 if (__builtin_constant_p(size)) {
53 int i = 0;
6cb8f913
CL
54
55 if (!size)
56 return ZERO_SIZE_PTR;
57
2e892f43
CL
58#define CACHE(x) \
59 if (size <= x) \
60 goto found; \
61 else \
62 i++;
1c61fc40 63#include <linux/kmalloc_sizes.h>
2e892f43 64#undef CACHE
1cf3eb2f 65 return NULL;
2e892f43 66found:
4b51d669
CL
67#ifdef CONFIG_ZONE_DMA
68 if (flags & GFP_DMA)
36555751
EGM
69 cachep = malloc_sizes[i].cs_dmacachep;
70 else
4b51d669 71#endif
36555751
EGM
72 cachep = malloc_sizes[i].cs_cachep;
73
74 ret = kmem_cache_alloc_notrace(cachep, flags);
75
ca2b84cb
EGM
76 trace_kmalloc(_THIS_IP_, ret,
77 size, slab_buffer_size(cachep), flags);
36555751
EGM
78
79 return ret;
2e892f43
CL
80 }
81 return __kmalloc(size, flags);
82}
83
2e892f43
CL
84#ifdef CONFIG_NUMA
85extern void *__kmalloc_node(size_t size, gfp_t flags, int node);
6193a2ff 86extern void *kmem_cache_alloc_node(struct kmem_cache *, gfp_t flags, int node);
2e892f43 87
36555751
EGM
88#ifdef CONFIG_KMEMTRACE
89extern void *kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
90 gfp_t flags,
91 int nodeid);
92#else
93static __always_inline void *
94kmem_cache_alloc_node_notrace(struct kmem_cache *cachep,
95 gfp_t flags,
96 int nodeid)
97{
98 return kmem_cache_alloc_node(cachep, flags, nodeid);
99}
100#endif
101
102static __always_inline void *kmalloc_node(size_t size, gfp_t flags, int node)
2e892f43 103{
36555751
EGM
104 struct kmem_cache *cachep;
105 void *ret;
106
2e892f43
CL
107 if (__builtin_constant_p(size)) {
108 int i = 0;
6cb8f913
CL
109
110 if (!size)
111 return ZERO_SIZE_PTR;
112
2e892f43
CL
113#define CACHE(x) \
114 if (size <= x) \
115 goto found; \
116 else \
117 i++;
1c61fc40 118#include <linux/kmalloc_sizes.h>
2e892f43 119#undef CACHE
1cf3eb2f 120 return NULL;
2e892f43 121found:
4b51d669
CL
122#ifdef CONFIG_ZONE_DMA
123 if (flags & GFP_DMA)
36555751
EGM
124 cachep = malloc_sizes[i].cs_dmacachep;
125 else
4b51d669 126#endif
36555751
EGM
127 cachep = malloc_sizes[i].cs_cachep;
128
129 ret = kmem_cache_alloc_node_notrace(cachep, flags, node);
130
ca2b84cb
EGM
131 trace_kmalloc_node(_THIS_IP_, ret,
132 size, slab_buffer_size(cachep),
133 flags, node);
36555751
EGM
134
135 return ret;
2e892f43
CL
136 }
137 return __kmalloc_node(size, flags, node);
138}
139
140#endif /* CONFIG_NUMA */
141
142#endif /* _LINUX_SLAB_DEF_H */