Commit | Line | Data |
---|---|---|
97d06609 CL |
1 | #ifndef MM_SLAB_H |
2 | #define MM_SLAB_H | |
3 | /* | |
4 | * Internal slab definitions | |
5 | */ | |
6 | ||
7 | /* | |
8 | * State of the slab allocator. | |
9 | * | |
10 | * This is used to describe the states of the allocator during bootup. | |
11 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
12 | * have the problem that the structures used for managing slab caches are | |
13 | * allocated from slab caches themselves. | |
14 | */ | |
15 | enum slab_state { | |
16 | DOWN, /* No slab functionality yet */ | |
17 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
18 | PARTIAL_ARRAYCACHE, /* SLAB: kmalloc size for arraycache available */ | |
19 | PARTIAL_L3, /* SLAB: kmalloc size for l3 struct available */ | |
20 | UP, /* Slab caches usable but not all extras yet */ | |
21 | FULL /* Everything is working */ | |
22 | }; | |
23 | ||
24 | extern enum slab_state slab_state; | |
25 | ||
18004c5d CL |
26 | /* The slab cache mutex protects the management structures during changes */ |
27 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
28 | |
29 | /* The list of all slab caches on the system */ | |
18004c5d CL |
30 | extern struct list_head slab_caches; |
31 | ||
9b030cb8 CL |
32 | /* The slab cache that manages slab cache information */ |
33 | extern struct kmem_cache *kmem_cache; | |
34 | ||
45906855 CL |
35 | unsigned long calculate_alignment(unsigned long flags, |
36 | unsigned long align, unsigned long size); | |
37 | ||
9b030cb8 | 38 | /* Functions provided by the slab allocators */ |
8a13a4cc | 39 | extern int __kmem_cache_create(struct kmem_cache *, unsigned long flags); |
97d06609 | 40 | |
45530c44 CL |
41 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
42 | unsigned long flags); | |
43 | extern void create_boot_cache(struct kmem_cache *, const char *name, | |
44 | size_t size, unsigned long flags); | |
45 | ||
2633d7a0 | 46 | struct mem_cgroup; |
cbb79694 | 47 | #ifdef CONFIG_SLUB |
2633d7a0 GC |
48 | struct kmem_cache * |
49 | __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, | |
50 | size_t align, unsigned long flags, void (*ctor)(void *)); | |
cbb79694 | 51 | #else |
2633d7a0 GC |
52 | static inline struct kmem_cache * |
53 | __kmem_cache_alias(struct mem_cgroup *memcg, const char *name, size_t size, | |
54 | size_t align, unsigned long flags, void (*ctor)(void *)) | |
cbb79694 CL |
55 | { return NULL; } |
56 | #endif | |
57 | ||
58 | ||
d8843922 GC |
59 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
60 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | |
61 | SLAB_DESTROY_BY_RCU | SLAB_DEBUG_OBJECTS ) | |
62 | ||
63 | #if defined(CONFIG_DEBUG_SLAB) | |
64 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
65 | #elif defined(CONFIG_SLUB_DEBUG) | |
66 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
67 | SLAB_TRACE | SLAB_DEBUG_FREE) | |
68 | #else | |
69 | #define SLAB_DEBUG_FLAGS (0) | |
70 | #endif | |
71 | ||
72 | #if defined(CONFIG_SLAB) | |
73 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
74 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | SLAB_NOTRACK) | |
75 | #elif defined(CONFIG_SLUB) | |
76 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
77 | SLAB_TEMPORARY | SLAB_NOTRACK) | |
78 | #else | |
79 | #define SLAB_CACHE_FLAGS (0) | |
80 | #endif | |
81 | ||
82 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) | |
83 | ||
945cf2b6 | 84 | int __kmem_cache_shutdown(struct kmem_cache *); |
945cf2b6 | 85 | |
b7454ad3 GC |
86 | struct seq_file; |
87 | struct file; | |
b7454ad3 | 88 | |
0d7561c6 GC |
89 | struct slabinfo { |
90 | unsigned long active_objs; | |
91 | unsigned long num_objs; | |
92 | unsigned long active_slabs; | |
93 | unsigned long num_slabs; | |
94 | unsigned long shared_avail; | |
95 | unsigned int limit; | |
96 | unsigned int batchcount; | |
97 | unsigned int shared; | |
98 | unsigned int objects_per_slab; | |
99 | unsigned int cache_order; | |
100 | }; | |
101 | ||
102 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
103 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
104 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
105 | size_t count, loff_t *ppos); | |
ba6c496e GC |
106 | |
107 | #ifdef CONFIG_MEMCG_KMEM | |
108 | static inline bool is_root_cache(struct kmem_cache *s) | |
109 | { | |
110 | return !s->memcg_params || s->memcg_params->is_root_cache; | |
111 | } | |
2633d7a0 GC |
112 | |
113 | static inline bool cache_match_memcg(struct kmem_cache *cachep, | |
114 | struct mem_cgroup *memcg) | |
115 | { | |
116 | return (is_root_cache(cachep) && !memcg) || | |
117 | (cachep->memcg_params->memcg == memcg); | |
118 | } | |
b9ce5ef4 | 119 | |
1f458cbf GC |
120 | static inline void memcg_bind_pages(struct kmem_cache *s, int order) |
121 | { | |
122 | if (!is_root_cache(s)) | |
123 | atomic_add(1 << order, &s->memcg_params->nr_pages); | |
124 | } | |
125 | ||
126 | static inline void memcg_release_pages(struct kmem_cache *s, int order) | |
127 | { | |
128 | if (is_root_cache(s)) | |
129 | return; | |
130 | ||
131 | if (atomic_sub_and_test((1 << order), &s->memcg_params->nr_pages)) | |
132 | mem_cgroup_destroy_cache(s); | |
133 | } | |
134 | ||
b9ce5ef4 GC |
135 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
136 | struct kmem_cache *p) | |
137 | { | |
138 | return (p == s) || | |
139 | (s->memcg_params && (p == s->memcg_params->root_cache)); | |
140 | } | |
749c5415 GC |
141 | |
142 | /* | |
143 | * We use suffixes to the name in memcg because we can't have caches | |
144 | * created in the system with the same name. But when we print them | |
145 | * locally, better refer to them with the base name | |
146 | */ | |
147 | static inline const char *cache_name(struct kmem_cache *s) | |
148 | { | |
149 | if (!is_root_cache(s)) | |
150 | return s->memcg_params->root_cache->name; | |
151 | return s->name; | |
152 | } | |
153 | ||
154 | static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) | |
155 | { | |
156 | return s->memcg_params->memcg_caches[idx]; | |
157 | } | |
943a451a GC |
158 | |
159 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
160 | { | |
161 | if (is_root_cache(s)) | |
162 | return s; | |
163 | return s->memcg_params->root_cache; | |
164 | } | |
ba6c496e GC |
165 | #else |
166 | static inline bool is_root_cache(struct kmem_cache *s) | |
167 | { | |
168 | return true; | |
169 | } | |
170 | ||
2633d7a0 GC |
171 | static inline bool cache_match_memcg(struct kmem_cache *cachep, |
172 | struct mem_cgroup *memcg) | |
173 | { | |
174 | return true; | |
175 | } | |
b9ce5ef4 | 176 | |
1f458cbf GC |
177 | static inline void memcg_bind_pages(struct kmem_cache *s, int order) |
178 | { | |
179 | } | |
180 | ||
181 | static inline void memcg_release_pages(struct kmem_cache *s, int order) | |
182 | { | |
183 | } | |
184 | ||
b9ce5ef4 GC |
185 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
186 | struct kmem_cache *p) | |
187 | { | |
188 | return true; | |
189 | } | |
749c5415 GC |
190 | |
191 | static inline const char *cache_name(struct kmem_cache *s) | |
192 | { | |
193 | return s->name; | |
194 | } | |
195 | ||
196 | static inline struct kmem_cache *cache_from_memcg(struct kmem_cache *s, int idx) | |
197 | { | |
198 | return NULL; | |
199 | } | |
943a451a GC |
200 | |
201 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
202 | { | |
203 | return s; | |
204 | } | |
ba6c496e | 205 | #endif |
b9ce5ef4 GC |
206 | |
207 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | |
208 | { | |
209 | struct kmem_cache *cachep; | |
210 | struct page *page; | |
211 | ||
212 | /* | |
213 | * When kmemcg is not being used, both assignments should return the | |
214 | * same value. but we don't want to pay the assignment price in that | |
215 | * case. If it is not compiled in, the compiler should be smart enough | |
216 | * to not do even the assignment. In that case, slab_equal_or_root | |
217 | * will also be a constant. | |
218 | */ | |
219 | if (!memcg_kmem_enabled() && !unlikely(s->flags & SLAB_DEBUG_FREE)) | |
220 | return s; | |
221 | ||
222 | page = virt_to_head_page(x); | |
223 | cachep = page->slab_cache; | |
224 | if (slab_equal_or_root(cachep, s)) | |
225 | return cachep; | |
226 | ||
227 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", | |
228 | __FUNCTION__, cachep->name, s->name); | |
229 | WARN_ON_ONCE(1); | |
230 | return s; | |
231 | } | |
97d06609 | 232 | #endif |