Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
97d06609 CL |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H | |
4 | /* | |
5 | * Internal slab definitions | |
6 | */ | |
7 | ||
07f361b2 JK |
8 | #ifdef CONFIG_SLOB |
9 | /* | |
10 | * Common fields provided in kmem_cache by all slab allocators | |
11 | * This struct is either used directly by the allocator (SLOB) | |
12 | * or the allocator must include definitions for all fields | |
13 | * provided in kmem_cache_common in their definition of kmem_cache. | |
14 | * | |
15 | * Once we can do anonymous structs (C11 standard) we could put a | |
16 | * anonymous struct definition in these allocators so that the | |
17 | * separate allocations in the kmem_cache structure of SLAB and | |
18 | * SLUB is no longer needed. | |
19 | */ | |
20 | struct kmem_cache { | |
21 | unsigned int object_size;/* The original size of the object */ | |
22 | unsigned int size; /* The aligned/padded/added on size */ | |
23 | unsigned int align; /* Alignment as calculated */ | |
d50112ed | 24 | slab_flags_t flags; /* Active flags on the slab */ |
7bbdb81e AD |
25 | unsigned int useroffset;/* Usercopy region offset */ |
26 | unsigned int usersize; /* Usercopy region size */ | |
07f361b2 JK |
27 | const char *name; /* Slab name for sysfs */ |
28 | int refcount; /* Use counter */ | |
29 | void (*ctor)(void *); /* Called on object slot creation */ | |
30 | struct list_head list; /* List of all slab caches on the system */ | |
31 | }; | |
32 | ||
33 | #endif /* CONFIG_SLOB */ | |
34 | ||
35 | #ifdef CONFIG_SLAB | |
36 | #include <linux/slab_def.h> | |
37 | #endif | |
38 | ||
39 | #ifdef CONFIG_SLUB | |
40 | #include <linux/slub_def.h> | |
41 | #endif | |
42 | ||
43 | #include <linux/memcontrol.h> | |
11c7aec2 | 44 | #include <linux/fault-inject.h> |
11c7aec2 JDB |
45 | #include <linux/kasan.h> |
46 | #include <linux/kmemleak.h> | |
7c00fce9 | 47 | #include <linux/random.h> |
d92a8cfc | 48 | #include <linux/sched/mm.h> |
07f361b2 | 49 | |
97d06609 CL |
50 | /* |
51 | * State of the slab allocator. | |
52 | * | |
53 | * This is used to describe the states of the allocator during bootup. | |
54 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
55 | * have the problem that the structures used for managing slab caches are | |
56 | * allocated from slab caches themselves. | |
57 | */ | |
58 | enum slab_state { | |
59 | DOWN, /* No slab functionality yet */ | |
60 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 61 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
62 | UP, /* Slab caches usable but not all extras yet */ |
63 | FULL /* Everything is working */ | |
64 | }; | |
65 | ||
66 | extern enum slab_state slab_state; | |
67 | ||
18004c5d CL |
68 | /* The slab cache mutex protects the management structures during changes */ |
69 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
70 | |
71 | /* The list of all slab caches on the system */ | |
18004c5d CL |
72 | extern struct list_head slab_caches; |
73 | ||
9b030cb8 CL |
74 | /* The slab cache that manages slab cache information */ |
75 | extern struct kmem_cache *kmem_cache; | |
76 | ||
af3b5f87 VB |
77 | /* A table of kmalloc cache names and sizes */ |
78 | extern const struct kmalloc_info_struct { | |
79 | const char *name; | |
55de8b9c | 80 | unsigned int size; |
af3b5f87 VB |
81 | } kmalloc_info[]; |
82 | ||
f97d5f63 CL |
83 | #ifndef CONFIG_SLOB |
84 | /* Kmalloc array related functions */ | |
34cc6990 | 85 | void setup_kmalloc_cache_index_table(void); |
d50112ed | 86 | void create_kmalloc_caches(slab_flags_t); |
2c59dd65 CL |
87 | |
88 | /* Find the kmalloc slab corresponding for a certain size */ | |
89 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
90 | #endif |
91 | ||
92 | ||
9b030cb8 | 93 | /* Functions provided by the slab allocators */ |
d50112ed | 94 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
97d06609 | 95 | |
55de8b9c AD |
96 | struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, |
97 | slab_flags_t flags, unsigned int useroffset, | |
98 | unsigned int usersize); | |
45530c44 | 99 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
361d575e AD |
100 | unsigned int size, slab_flags_t flags, |
101 | unsigned int useroffset, unsigned int usersize); | |
45530c44 | 102 | |
423c929c | 103 | int slab_unmergeable(struct kmem_cache *s); |
f4957d5b | 104 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
d50112ed | 105 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
12220dea | 106 | #ifndef CONFIG_SLOB |
2633d7a0 | 107 | struct kmem_cache * |
f4957d5b | 108 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 109 | slab_flags_t flags, void (*ctor)(void *)); |
423c929c | 110 | |
0293d1fd | 111 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
d50112ed | 112 | slab_flags_t flags, const char *name, |
423c929c | 113 | void (*ctor)(void *)); |
cbb79694 | 114 | #else |
2633d7a0 | 115 | static inline struct kmem_cache * |
f4957d5b | 116 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 117 | slab_flags_t flags, void (*ctor)(void *)) |
cbb79694 | 118 | { return NULL; } |
423c929c | 119 | |
0293d1fd | 120 | static inline slab_flags_t kmem_cache_flags(unsigned int object_size, |
d50112ed | 121 | slab_flags_t flags, const char *name, |
423c929c JK |
122 | void (*ctor)(void *)) |
123 | { | |
124 | return flags; | |
125 | } | |
cbb79694 CL |
126 | #endif |
127 | ||
128 | ||
d8843922 | 129 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
6d6ea1e9 NB |
130 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
131 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ | |
5f0d5a3a | 132 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
d8843922 GC |
133 | |
134 | #if defined(CONFIG_DEBUG_SLAB) | |
135 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
136 | #elif defined(CONFIG_SLUB_DEBUG) | |
137 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
becfda68 | 138 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 GC |
139 | #else |
140 | #define SLAB_DEBUG_FLAGS (0) | |
141 | #endif | |
142 | ||
143 | #if defined(CONFIG_SLAB) | |
144 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
230e9fc2 | 145 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
75f296d9 | 146 | SLAB_ACCOUNT) |
d8843922 GC |
147 | #elif defined(CONFIG_SLUB) |
148 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
75f296d9 | 149 | SLAB_TEMPORARY | SLAB_ACCOUNT) |
d8843922 GC |
150 | #else |
151 | #define SLAB_CACHE_FLAGS (0) | |
152 | #endif | |
153 | ||
e70954fd | 154 | /* Common flags available with current configuration */ |
d8843922 GC |
155 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
156 | ||
e70954fd TG |
157 | /* Common flags permitted for kmem_cache_create */ |
158 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ | |
159 | SLAB_RED_ZONE | \ | |
160 | SLAB_POISON | \ | |
161 | SLAB_STORE_USER | \ | |
162 | SLAB_TRACE | \ | |
163 | SLAB_CONSISTENCY_CHECKS | \ | |
164 | SLAB_MEM_SPREAD | \ | |
165 | SLAB_NOLEAKTRACE | \ | |
166 | SLAB_RECLAIM_ACCOUNT | \ | |
167 | SLAB_TEMPORARY | \ | |
e70954fd TG |
168 | SLAB_ACCOUNT) |
169 | ||
f9e13c0a | 170 | bool __kmem_cache_empty(struct kmem_cache *); |
945cf2b6 | 171 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 172 | void __kmem_cache_release(struct kmem_cache *); |
c9fc5864 TH |
173 | int __kmem_cache_shrink(struct kmem_cache *); |
174 | void __kmemcg_cache_deactivate(struct kmem_cache *s); | |
43486694 | 175 | void __kmemcg_cache_deactivate_after_rcu(struct kmem_cache *s); |
41a21285 | 176 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 177 | |
b7454ad3 GC |
178 | struct seq_file; |
179 | struct file; | |
b7454ad3 | 180 | |
0d7561c6 GC |
181 | struct slabinfo { |
182 | unsigned long active_objs; | |
183 | unsigned long num_objs; | |
184 | unsigned long active_slabs; | |
185 | unsigned long num_slabs; | |
186 | unsigned long shared_avail; | |
187 | unsigned int limit; | |
188 | unsigned int batchcount; | |
189 | unsigned int shared; | |
190 | unsigned int objects_per_slab; | |
191 | unsigned int cache_order; | |
192 | }; | |
193 | ||
194 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
195 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
196 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
197 | size_t count, loff_t *ppos); | |
ba6c496e | 198 | |
484748f0 CL |
199 | /* |
200 | * Generic implementation of bulk operations | |
201 | * These are useful for situations in which the allocator cannot | |
9f706d68 | 202 | * perform optimizations. In that case segments of the object listed |
484748f0 CL |
203 | * may be allocated or freed using these operations. |
204 | */ | |
205 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | |
865762a8 | 206 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
484748f0 | 207 | |
6cea1d56 RG |
208 | static inline int cache_vmstat_idx(struct kmem_cache *s) |
209 | { | |
210 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? | |
211 | NR_SLAB_RECLAIMABLE : NR_SLAB_UNRECLAIMABLE; | |
212 | } | |
213 | ||
84c07d11 | 214 | #ifdef CONFIG_MEMCG_KMEM |
510ded33 TH |
215 | |
216 | /* List of all root caches. */ | |
217 | extern struct list_head slab_root_caches; | |
218 | #define root_caches_node memcg_params.__root_caches_node | |
219 | ||
426589f5 VD |
220 | /* |
221 | * Iterate over all memcg caches of the given root cache. The caller must hold | |
222 | * slab_mutex. | |
223 | */ | |
224 | #define for_each_memcg_cache(iter, root) \ | |
9eeadc8b TH |
225 | list_for_each_entry(iter, &(root)->memcg_params.children, \ |
226 | memcg_params.children_node) | |
426589f5 | 227 | |
ba6c496e GC |
228 | static inline bool is_root_cache(struct kmem_cache *s) |
229 | { | |
9eeadc8b | 230 | return !s->memcg_params.root_cache; |
ba6c496e | 231 | } |
2633d7a0 | 232 | |
b9ce5ef4 | 233 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190 | 234 | struct kmem_cache *p) |
b9ce5ef4 | 235 | { |
f7ce3190 | 236 | return p == s || p == s->memcg_params.root_cache; |
b9ce5ef4 | 237 | } |
749c5415 GC |
238 | |
239 | /* | |
240 | * We use suffixes to the name in memcg because we can't have caches | |
241 | * created in the system with the same name. But when we print them | |
242 | * locally, better refer to them with the base name | |
243 | */ | |
244 | static inline const char *cache_name(struct kmem_cache *s) | |
245 | { | |
246 | if (!is_root_cache(s)) | |
f7ce3190 | 247 | s = s->memcg_params.root_cache; |
749c5415 GC |
248 | return s->name; |
249 | } | |
250 | ||
943a451a GC |
251 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
252 | { | |
253 | if (is_root_cache(s)) | |
254 | return s; | |
f7ce3190 | 255 | return s->memcg_params.root_cache; |
943a451a | 256 | } |
5dfb4175 | 257 | |
4d96ba35 RG |
258 | /* |
259 | * Expects a pointer to a slab page. Please note, that PageSlab() check | |
260 | * isn't sufficient, as it returns true also for tail compound slab pages, | |
261 | * which do not have slab_cache pointer set. | |
262 | * So this function assumes that the page can pass PageHead() and PageSlab() | |
263 | * checks. | |
fb2f2b0a RG |
264 | * |
265 | * The kmem_cache can be reparented asynchronously. The caller must ensure | |
266 | * the memcg lifetime, e.g. by taking rcu_read_lock() or cgroup_mutex. | |
4d96ba35 RG |
267 | */ |
268 | static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) | |
269 | { | |
270 | struct kmem_cache *s; | |
271 | ||
272 | s = READ_ONCE(page->slab_cache); | |
273 | if (s && !is_root_cache(s)) | |
fb2f2b0a | 274 | return READ_ONCE(s->memcg_params.memcg); |
4d96ba35 RG |
275 | |
276 | return NULL; | |
277 | } | |
278 | ||
279 | /* | |
280 | * Charge the slab page belonging to the non-root kmem_cache. | |
281 | * Can be called for non-root kmem_caches only. | |
282 | */ | |
f3ccb2c4 VD |
283 | static __always_inline int memcg_charge_slab(struct page *page, |
284 | gfp_t gfp, int order, | |
285 | struct kmem_cache *s) | |
5dfb4175 | 286 | { |
4d96ba35 RG |
287 | struct mem_cgroup *memcg; |
288 | struct lruvec *lruvec; | |
f0a3a24b RG |
289 | int ret; |
290 | ||
fb2f2b0a RG |
291 | rcu_read_lock(); |
292 | memcg = READ_ONCE(s->memcg_params.memcg); | |
293 | while (memcg && !css_tryget_online(&memcg->css)) | |
294 | memcg = parent_mem_cgroup(memcg); | |
295 | rcu_read_unlock(); | |
296 | ||
297 | if (unlikely(!memcg || mem_cgroup_is_root(memcg))) { | |
298 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), | |
299 | (1 << order)); | |
300 | percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); | |
301 | return 0; | |
302 | } | |
303 | ||
4d96ba35 | 304 | ret = memcg_kmem_charge_memcg(page, gfp, order, memcg); |
f0a3a24b | 305 | if (ret) |
fb2f2b0a | 306 | goto out; |
f0a3a24b | 307 | |
4d96ba35 RG |
308 | lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); |
309 | mod_lruvec_state(lruvec, cache_vmstat_idx(s), 1 << order); | |
310 | ||
311 | /* transer try_charge() page references to kmem_cache */ | |
f0a3a24b | 312 | percpu_ref_get_many(&s->memcg_params.refcnt, 1 << order); |
4d96ba35 | 313 | css_put_many(&memcg->css, 1 << order); |
fb2f2b0a RG |
314 | out: |
315 | css_put(&memcg->css); | |
316 | return ret; | |
27ee57c9 VD |
317 | } |
318 | ||
4d96ba35 RG |
319 | /* |
320 | * Uncharge a slab page belonging to a non-root kmem_cache. | |
321 | * Can be called for non-root kmem_caches only. | |
322 | */ | |
27ee57c9 VD |
323 | static __always_inline void memcg_uncharge_slab(struct page *page, int order, |
324 | struct kmem_cache *s) | |
325 | { | |
4d96ba35 RG |
326 | struct mem_cgroup *memcg; |
327 | struct lruvec *lruvec; | |
328 | ||
fb2f2b0a RG |
329 | rcu_read_lock(); |
330 | memcg = READ_ONCE(s->memcg_params.memcg); | |
331 | if (likely(!mem_cgroup_is_root(memcg))) { | |
332 | lruvec = mem_cgroup_lruvec(page_pgdat(page), memcg); | |
333 | mod_lruvec_state(lruvec, cache_vmstat_idx(s), -(1 << order)); | |
334 | memcg_kmem_uncharge_memcg(page, order, memcg); | |
335 | } else { | |
336 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), | |
337 | -(1 << order)); | |
338 | } | |
339 | rcu_read_unlock(); | |
4d96ba35 RG |
340 | |
341 | percpu_ref_put_many(&s->memcg_params.refcnt, 1 << order); | |
5dfb4175 | 342 | } |
f7ce3190 VD |
343 | |
344 | extern void slab_init_memcg_params(struct kmem_cache *); | |
c03914b7 | 345 | extern void memcg_link_cache(struct kmem_cache *s, struct mem_cgroup *memcg); |
f7ce3190 | 346 | |
84c07d11 | 347 | #else /* CONFIG_MEMCG_KMEM */ |
f7ce3190 | 348 | |
510ded33 TH |
349 | /* If !memcg, all caches are root. */ |
350 | #define slab_root_caches slab_caches | |
351 | #define root_caches_node list | |
352 | ||
426589f5 VD |
353 | #define for_each_memcg_cache(iter, root) \ |
354 | for ((void)(iter), (void)(root); 0; ) | |
426589f5 | 355 | |
ba6c496e GC |
356 | static inline bool is_root_cache(struct kmem_cache *s) |
357 | { | |
358 | return true; | |
359 | } | |
360 | ||
b9ce5ef4 GC |
361 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
362 | struct kmem_cache *p) | |
363 | { | |
598a0717 | 364 | return s == p; |
b9ce5ef4 | 365 | } |
749c5415 GC |
366 | |
367 | static inline const char *cache_name(struct kmem_cache *s) | |
368 | { | |
369 | return s->name; | |
370 | } | |
371 | ||
943a451a GC |
372 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) |
373 | { | |
374 | return s; | |
375 | } | |
5dfb4175 | 376 | |
4d96ba35 RG |
377 | static inline struct mem_cgroup *memcg_from_slab_page(struct page *page) |
378 | { | |
379 | return NULL; | |
380 | } | |
381 | ||
f3ccb2c4 VD |
382 | static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, |
383 | struct kmem_cache *s) | |
5dfb4175 VD |
384 | { |
385 | return 0; | |
386 | } | |
387 | ||
27ee57c9 VD |
388 | static inline void memcg_uncharge_slab(struct page *page, int order, |
389 | struct kmem_cache *s) | |
390 | { | |
391 | } | |
392 | ||
f7ce3190 VD |
393 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
394 | { | |
395 | } | |
510ded33 | 396 | |
c03914b7 RG |
397 | static inline void memcg_link_cache(struct kmem_cache *s, |
398 | struct mem_cgroup *memcg) | |
510ded33 TH |
399 | { |
400 | } | |
401 | ||
84c07d11 | 402 | #endif /* CONFIG_MEMCG_KMEM */ |
b9ce5ef4 | 403 | |
a64b5378 KC |
404 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
405 | { | |
406 | struct page *page; | |
407 | ||
408 | page = virt_to_head_page(obj); | |
409 | if (WARN_ONCE(!PageSlab(page), "%s: Object is not a Slab page!\n", | |
410 | __func__)) | |
411 | return NULL; | |
412 | return page->slab_cache; | |
413 | } | |
414 | ||
6cea1d56 RG |
415 | static __always_inline int charge_slab_page(struct page *page, |
416 | gfp_t gfp, int order, | |
417 | struct kmem_cache *s) | |
418 | { | |
4d96ba35 RG |
419 | if (is_root_cache(s)) { |
420 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), | |
421 | 1 << order); | |
422 | return 0; | |
423 | } | |
6cea1d56 | 424 | |
4d96ba35 | 425 | return memcg_charge_slab(page, gfp, order, s); |
6cea1d56 RG |
426 | } |
427 | ||
428 | static __always_inline void uncharge_slab_page(struct page *page, int order, | |
429 | struct kmem_cache *s) | |
430 | { | |
4d96ba35 RG |
431 | if (is_root_cache(s)) { |
432 | mod_node_page_state(page_pgdat(page), cache_vmstat_idx(s), | |
433 | -(1 << order)); | |
434 | return; | |
435 | } | |
436 | ||
6cea1d56 RG |
437 | memcg_uncharge_slab(page, order, s); |
438 | } | |
439 | ||
b9ce5ef4 GC |
440 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
441 | { | |
442 | struct kmem_cache *cachep; | |
b9ce5ef4 GC |
443 | |
444 | /* | |
445 | * When kmemcg is not being used, both assignments should return the | |
446 | * same value. but we don't want to pay the assignment price in that | |
447 | * case. If it is not compiled in, the compiler should be smart enough | |
448 | * to not do even the assignment. In that case, slab_equal_or_root | |
449 | * will also be a constant. | |
450 | */ | |
becfda68 | 451 | if (!memcg_kmem_enabled() && |
598a0717 | 452 | !IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && |
becfda68 | 453 | !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) |
b9ce5ef4 GC |
454 | return s; |
455 | ||
a64b5378 KC |
456 | cachep = virt_to_cache(x); |
457 | WARN_ONCE(cachep && !slab_equal_or_root(cachep, s), | |
598a0717 KC |
458 | "%s: Wrong slab cache. %s but object is from %s\n", |
459 | __func__, s->name, cachep->name); | |
460 | return cachep; | |
b9ce5ef4 | 461 | } |
ca34956b | 462 | |
11c7aec2 JDB |
463 | static inline size_t slab_ksize(const struct kmem_cache *s) |
464 | { | |
465 | #ifndef CONFIG_SLUB | |
466 | return s->object_size; | |
467 | ||
468 | #else /* CONFIG_SLUB */ | |
469 | # ifdef CONFIG_SLUB_DEBUG | |
470 | /* | |
471 | * Debugging requires use of the padding between object | |
472 | * and whatever may come after it. | |
473 | */ | |
474 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
475 | return s->object_size; | |
476 | # endif | |
80a9201a AP |
477 | if (s->flags & SLAB_KASAN) |
478 | return s->object_size; | |
11c7aec2 JDB |
479 | /* |
480 | * If we have the need to store the freelist pointer | |
481 | * back there or track user information then we can | |
482 | * only use the space before that information. | |
483 | */ | |
5f0d5a3a | 484 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2 JDB |
485 | return s->inuse; |
486 | /* | |
487 | * Else we can use all the padding etc for the allocation | |
488 | */ | |
489 | return s->size; | |
490 | #endif | |
491 | } | |
492 | ||
493 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |
494 | gfp_t flags) | |
495 | { | |
496 | flags &= gfp_allowed_mask; | |
d92a8cfc PZ |
497 | |
498 | fs_reclaim_acquire(flags); | |
499 | fs_reclaim_release(flags); | |
500 | ||
11c7aec2 JDB |
501 | might_sleep_if(gfpflags_allow_blocking(flags)); |
502 | ||
fab9963a | 503 | if (should_failslab(s, flags)) |
11c7aec2 JDB |
504 | return NULL; |
505 | ||
45264778 VD |
506 | if (memcg_kmem_enabled() && |
507 | ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) | |
508 | return memcg_kmem_get_cache(s); | |
509 | ||
510 | return s; | |
11c7aec2 JDB |
511 | } |
512 | ||
513 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | |
514 | size_t size, void **p) | |
515 | { | |
516 | size_t i; | |
517 | ||
518 | flags &= gfp_allowed_mask; | |
519 | for (i = 0; i < size; i++) { | |
53128245 | 520 | p[i] = kasan_slab_alloc(s, p[i], flags); |
a2f77575 | 521 | /* As p[i] might get tagged, call kmemleak hook after KASAN. */ |
53128245 | 522 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
11c7aec2 | 523 | s->flags, flags); |
11c7aec2 | 524 | } |
45264778 VD |
525 | |
526 | if (memcg_kmem_enabled()) | |
527 | memcg_kmem_put_cache(s); | |
11c7aec2 JDB |
528 | } |
529 | ||
44c5356f | 530 | #ifndef CONFIG_SLOB |
ca34956b CL |
531 | /* |
532 | * The slab lists for all objects. | |
533 | */ | |
534 | struct kmem_cache_node { | |
535 | spinlock_t list_lock; | |
536 | ||
537 | #ifdef CONFIG_SLAB | |
538 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
539 | struct list_head slabs_full; | |
540 | struct list_head slabs_free; | |
bf00bd34 DR |
541 | unsigned long total_slabs; /* length of all slab lists */ |
542 | unsigned long free_slabs; /* length of free slab list only */ | |
ca34956b CL |
543 | unsigned long free_objects; |
544 | unsigned int free_limit; | |
545 | unsigned int colour_next; /* Per-node cache coloring */ | |
546 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 547 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
548 | unsigned long next_reap; /* updated without locking */ |
549 | int free_touched; /* updated without locking */ | |
550 | #endif | |
551 | ||
552 | #ifdef CONFIG_SLUB | |
553 | unsigned long nr_partial; | |
554 | struct list_head partial; | |
555 | #ifdef CONFIG_SLUB_DEBUG | |
556 | atomic_long_t nr_slabs; | |
557 | atomic_long_t total_objects; | |
558 | struct list_head full; | |
559 | #endif | |
560 | #endif | |
561 | ||
562 | }; | |
e25839f6 | 563 | |
44c5356f CL |
564 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
565 | { | |
566 | return s->node[node]; | |
567 | } | |
568 | ||
569 | /* | |
570 | * Iterator over all nodes. The body will be executed for each node that has | |
571 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
572 | */ | |
573 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
574 | for (__node = 0; __node < nr_node_ids; __node++) \ |
575 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
576 | |
577 | #endif | |
578 | ||
1df3b26f | 579 | void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439 WL |
580 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
581 | void slab_stop(struct seq_file *m, void *p); | |
bc2791f8 TH |
582 | void *memcg_slab_start(struct seq_file *m, loff_t *pos); |
583 | void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); | |
584 | void memcg_slab_stop(struct seq_file *m, void *p); | |
b047501c | 585 | int memcg_slab_show(struct seq_file *m, void *p); |
5240ab40 | 586 | |
852d8be0 YS |
587 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
588 | void dump_unreclaimable_slab(void); | |
589 | #else | |
590 | static inline void dump_unreclaimable_slab(void) | |
591 | { | |
592 | } | |
593 | #endif | |
594 | ||
55834c59 AP |
595 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
596 | ||
7c00fce9 TG |
597 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
598 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
599 | gfp_t gfp); | |
600 | void cache_random_seq_destroy(struct kmem_cache *cachep); | |
601 | #else | |
602 | static inline int cache_random_seq_create(struct kmem_cache *cachep, | |
603 | unsigned int count, gfp_t gfp) | |
604 | { | |
605 | return 0; | |
606 | } | |
607 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } | |
608 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
609 | ||
6471384a AP |
610 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
611 | { | |
612 | if (static_branch_unlikely(&init_on_alloc)) { | |
613 | if (c->ctor) | |
614 | return false; | |
615 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) | |
616 | return flags & __GFP_ZERO; | |
617 | return true; | |
618 | } | |
619 | return flags & __GFP_ZERO; | |
620 | } | |
621 | ||
622 | static inline bool slab_want_init_on_free(struct kmem_cache *c) | |
623 | { | |
624 | if (static_branch_unlikely(&init_on_free)) | |
625 | return !(c->ctor || | |
626 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); | |
627 | return false; | |
628 | } | |
629 | ||
5240ab40 | 630 | #endif /* MM_SLAB_H */ |