Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
97d06609 CL |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H | |
89c2d061 VB |
4 | |
5 | #include <linux/reciprocal_div.h> | |
6 | #include <linux/list_lru.h> | |
7 | #include <linux/local_lock.h> | |
8 | #include <linux/random.h> | |
9 | #include <linux/kobject.h> | |
10 | #include <linux/sched/mm.h> | |
11 | #include <linux/memcontrol.h> | |
89c2d061 VB |
12 | #include <linux/kfence.h> |
13 | #include <linux/kasan.h> | |
14 | ||
97d06609 CL |
15 | /* |
16 | * Internal slab definitions | |
17 | */ | |
18 | ||
6801be4f PZ |
19 | #ifdef CONFIG_64BIT |
20 | # ifdef system_has_cmpxchg128 | |
21 | # define system_has_freelist_aba() system_has_cmpxchg128() | |
22 | # define try_cmpxchg_freelist try_cmpxchg128 | |
23 | # endif | |
24 | #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg128 | |
25 | typedef u128 freelist_full_t; | |
26 | #else /* CONFIG_64BIT */ | |
27 | # ifdef system_has_cmpxchg64 | |
28 | # define system_has_freelist_aba() system_has_cmpxchg64() | |
29 | # define try_cmpxchg_freelist try_cmpxchg64 | |
30 | # endif | |
31 | #define this_cpu_try_cmpxchg_freelist this_cpu_try_cmpxchg64 | |
32 | typedef u64 freelist_full_t; | |
33 | #endif /* CONFIG_64BIT */ | |
34 | ||
35 | #if defined(system_has_freelist_aba) && !defined(CONFIG_HAVE_ALIGNED_STRUCT_PAGE) | |
36 | #undef system_has_freelist_aba | |
37 | #endif | |
38 | ||
39 | /* | |
40 | * Freelist pointer and counter to cmpxchg together, avoids the typical ABA | |
41 | * problems with cmpxchg of just a pointer. | |
42 | */ | |
43 | typedef union { | |
44 | struct { | |
45 | void *freelist; | |
46 | unsigned long counter; | |
47 | }; | |
48 | freelist_full_t full; | |
49 | } freelist_aba_t; | |
50 | ||
d122019b MWO |
51 | /* Reuses the bits in struct page */ |
52 | struct slab { | |
53 | unsigned long __page_flags; | |
401fb12c | 54 | |
401fb12c | 55 | struct kmem_cache *slab_cache; |
d122019b | 56 | union { |
401fb12c | 57 | struct { |
130d4df5 VB |
58 | union { |
59 | struct list_head slab_list; | |
60 | #ifdef CONFIG_SLUB_CPU_PARTIAL | |
61 | struct { | |
62 | struct slab *next; | |
63 | int slabs; /* Nr of slabs left */ | |
64 | }; | |
65 | #endif | |
66 | }; | |
67 | /* Double-word boundary */ | |
130d4df5 | 68 | union { |
130d4df5 | 69 | struct { |
6801be4f PZ |
70 | void *freelist; /* first free object */ |
71 | union { | |
72 | unsigned long counters; | |
73 | struct { | |
74 | unsigned inuse:16; | |
75 | unsigned objects:15; | |
dbc16915 | 76 | /* |
77 | * If slab debugging is enabled then the | |
78 | * frozen bit can be reused to indicate | |
79 | * that the slab was corrupted | |
80 | */ | |
6801be4f PZ |
81 | unsigned frozen:1; |
82 | }; | |
83 | }; | |
130d4df5 | 84 | }; |
6801be4f PZ |
85 | #ifdef system_has_freelist_aba |
86 | freelist_aba_t freelist_counter; | |
87 | #endif | |
130d4df5 | 88 | }; |
d122019b | 89 | }; |
130d4df5 | 90 | struct rcu_head rcu_head; |
d122019b | 91 | }; |
401fb12c | 92 | |
46df8e73 | 93 | unsigned int __page_type; |
d122019b | 94 | atomic_t __page_refcount; |
21c690a3 SB |
95 | #ifdef CONFIG_SLAB_OBJ_EXT |
96 | unsigned long obj_exts; | |
d122019b MWO |
97 | #endif |
98 | }; | |
99 | ||
100 | #define SLAB_MATCH(pg, sl) \ | |
101 | static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) | |
102 | SLAB_MATCH(flags, __page_flags); | |
130d4df5 | 103 | SLAB_MATCH(compound_head, slab_cache); /* Ensure bit 0 is clear */ |
d122019b | 104 | SLAB_MATCH(_refcount, __page_refcount); |
a52c6330 | 105 | #ifdef CONFIG_MEMCG |
21c690a3 | 106 | SLAB_MATCH(memcg_data, obj_exts); |
a52c6330 AST |
107 | #elif defined(CONFIG_SLAB_OBJ_EXT) |
108 | SLAB_MATCH(_unused_slab_obj_exts, obj_exts); | |
d122019b MWO |
109 | #endif |
110 | #undef SLAB_MATCH | |
111 | static_assert(sizeof(struct slab) <= sizeof(struct page)); | |
a9e0b9f2 | 112 | #if defined(system_has_freelist_aba) |
6801be4f | 113 | static_assert(IS_ALIGNED(offsetof(struct slab, freelist), sizeof(freelist_aba_t))); |
130d4df5 | 114 | #endif |
d122019b MWO |
115 | |
116 | /** | |
117 | * folio_slab - Converts from folio to slab. | |
118 | * @folio: The folio. | |
119 | * | |
120 | * Currently struct slab is a different representation of a folio where | |
121 | * folio_test_slab() is true. | |
122 | * | |
123 | * Return: The slab which contains this folio. | |
124 | */ | |
125 | #define folio_slab(folio) (_Generic((folio), \ | |
126 | const struct folio *: (const struct slab *)(folio), \ | |
127 | struct folio *: (struct slab *)(folio))) | |
128 | ||
129 | /** | |
130 | * slab_folio - The folio allocated for a slab | |
2da76e9e | 131 | * @s: The slab. |
d122019b MWO |
132 | * |
133 | * Slabs are allocated as folios that contain the individual objects and are | |
134 | * using some fields in the first struct page of the folio - those fields are | |
135 | * now accessed by struct slab. It is occasionally necessary to convert back to | |
136 | * a folio in order to communicate with the rest of the mm. Please use this | |
137 | * helper function instead of casting yourself, as the implementation may change | |
138 | * in the future. | |
139 | */ | |
140 | #define slab_folio(s) (_Generic((s), \ | |
141 | const struct slab *: (const struct folio *)s, \ | |
142 | struct slab *: (struct folio *)s)) | |
143 | ||
144 | /** | |
145 | * page_slab - Converts from first struct page to slab. | |
146 | * @p: The first (either head of compound or single) page of slab. | |
147 | * | |
148 | * A temporary wrapper to convert struct page to struct slab in situations where | |
149 | * we know the page is the compound head, or single order-0 page. | |
150 | * | |
151 | * Long-term ideally everything would work with struct slab directly or go | |
152 | * through folio to struct slab. | |
153 | * | |
154 | * Return: The slab which contains this page | |
155 | */ | |
156 | #define page_slab(p) (_Generic((p), \ | |
157 | const struct page *: (const struct slab *)(p), \ | |
158 | struct page *: (struct slab *)(p))) | |
159 | ||
160 | /** | |
161 | * slab_page - The first struct page allocated for a slab | |
2da76e9e | 162 | * @s: The slab. |
d122019b MWO |
163 | * |
164 | * A convenience wrapper for converting slab to the first struct page of the | |
165 | * underlying folio, to communicate with code not yet converted to folio or | |
166 | * struct slab. | |
167 | */ | |
168 | #define slab_page(s) folio_page(slab_folio(s), 0) | |
169 | ||
170 | /* | |
171 | * If network-based swap is enabled, sl*b must keep track of whether pages | |
172 | * were allocated from pfmemalloc reserves. | |
173 | */ | |
174 | static inline bool slab_test_pfmemalloc(const struct slab *slab) | |
175 | { | |
4d2bcefa | 176 | return folio_test_active(slab_folio(slab)); |
d122019b MWO |
177 | } |
178 | ||
179 | static inline void slab_set_pfmemalloc(struct slab *slab) | |
180 | { | |
181 | folio_set_active(slab_folio(slab)); | |
182 | } | |
183 | ||
184 | static inline void slab_clear_pfmemalloc(struct slab *slab) | |
185 | { | |
186 | folio_clear_active(slab_folio(slab)); | |
187 | } | |
188 | ||
189 | static inline void __slab_clear_pfmemalloc(struct slab *slab) | |
190 | { | |
191 | __folio_clear_active(slab_folio(slab)); | |
192 | } | |
193 | ||
194 | static inline void *slab_address(const struct slab *slab) | |
195 | { | |
196 | return folio_address(slab_folio(slab)); | |
197 | } | |
198 | ||
199 | static inline int slab_nid(const struct slab *slab) | |
200 | { | |
201 | return folio_nid(slab_folio(slab)); | |
202 | } | |
203 | ||
204 | static inline pg_data_t *slab_pgdat(const struct slab *slab) | |
205 | { | |
206 | return folio_pgdat(slab_folio(slab)); | |
207 | } | |
208 | ||
209 | static inline struct slab *virt_to_slab(const void *addr) | |
210 | { | |
211 | struct folio *folio = virt_to_folio(addr); | |
212 | ||
213 | if (!folio_test_slab(folio)) | |
214 | return NULL; | |
215 | ||
216 | return folio_slab(folio); | |
217 | } | |
218 | ||
219 | static inline int slab_order(const struct slab *slab) | |
220 | { | |
4d2bcefa | 221 | return folio_order(slab_folio(slab)); |
d122019b MWO |
222 | } |
223 | ||
224 | static inline size_t slab_size(const struct slab *slab) | |
225 | { | |
226 | return PAGE_SIZE << slab_order(slab); | |
227 | } | |
228 | ||
19975f83 VB |
229 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
230 | #define slub_percpu_partial(c) ((c)->partial) | |
231 | ||
232 | #define slub_set_percpu_partial(c, p) \ | |
233 | ({ \ | |
234 | slub_percpu_partial(c) = (p)->next; \ | |
235 | }) | |
236 | ||
237 | #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c)) | |
238 | #else | |
239 | #define slub_percpu_partial(c) NULL | |
240 | ||
241 | #define slub_set_percpu_partial(c, p) | |
242 | ||
243 | #define slub_percpu_partial_read_once(c) NULL | |
244 | #endif // CONFIG_SLUB_CPU_PARTIAL | |
245 | ||
246 | /* | |
247 | * Word size structure that can be atomically updated or read and that | |
248 | * contains both the order and the number of objects that a slab of the | |
249 | * given order would contain. | |
250 | */ | |
251 | struct kmem_cache_order_objects { | |
252 | unsigned int x; | |
253 | }; | |
254 | ||
255 | /* | |
256 | * Slab cache management. | |
257 | */ | |
258 | struct kmem_cache { | |
259 | #ifndef CONFIG_SLUB_TINY | |
260 | struct kmem_cache_cpu __percpu *cpu_slab; | |
261 | #endif | |
262 | /* Used for retrieving partial slabs, etc. */ | |
263 | slab_flags_t flags; | |
264 | unsigned long min_partial; | |
265 | unsigned int size; /* Object size including metadata */ | |
266 | unsigned int object_size; /* Object size without metadata */ | |
267 | struct reciprocal_value reciprocal_size; | |
268 | unsigned int offset; /* Free pointer offset */ | |
269 | #ifdef CONFIG_SLUB_CPU_PARTIAL | |
270 | /* Number of per cpu partial objects to keep around */ | |
271 | unsigned int cpu_partial; | |
272 | /* Number of per cpu partial slabs to keep around */ | |
273 | unsigned int cpu_partial_slabs; | |
274 | #endif | |
275 | struct kmem_cache_order_objects oo; | |
276 | ||
277 | /* Allocation and freeing of slabs */ | |
278 | struct kmem_cache_order_objects min; | |
279 | gfp_t allocflags; /* gfp flags to use on each alloc */ | |
280 | int refcount; /* Refcount for slab cache destroy */ | |
281 | void (*ctor)(void *object); /* Object constructor */ | |
282 | unsigned int inuse; /* Offset to metadata */ | |
283 | unsigned int align; /* Alignment */ | |
284 | unsigned int red_left_pad; /* Left redzone padding size */ | |
285 | const char *name; /* Name (only for display!) */ | |
286 | struct list_head list; /* List of slab caches */ | |
287 | #ifdef CONFIG_SYSFS | |
288 | struct kobject kobj; /* For sysfs */ | |
289 | #endif | |
290 | #ifdef CONFIG_SLAB_FREELIST_HARDENED | |
291 | unsigned long random; | |
292 | #endif | |
293 | ||
294 | #ifdef CONFIG_NUMA | |
295 | /* | |
296 | * Defragmentation by allocating from a remote node. | |
297 | */ | |
298 | unsigned int remote_node_defrag_ratio; | |
299 | #endif | |
300 | ||
301 | #ifdef CONFIG_SLAB_FREELIST_RANDOM | |
302 | unsigned int *random_seq; | |
303 | #endif | |
304 | ||
305 | #ifdef CONFIG_KASAN_GENERIC | |
306 | struct kasan_cache kasan_info; | |
307 | #endif | |
308 | ||
309 | #ifdef CONFIG_HARDENED_USERCOPY | |
310 | unsigned int useroffset; /* Usercopy region offset */ | |
311 | unsigned int usersize; /* Usercopy region size */ | |
312 | #endif | |
313 | ||
314 | struct kmem_cache_node *node[MAX_NUMNODES]; | |
315 | }; | |
316 | ||
317 | #if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY) | |
77ced98f | 318 | #define SLAB_SUPPORTS_SYSFS 1 |
19975f83 VB |
319 | void sysfs_slab_unlink(struct kmem_cache *s); |
320 | void sysfs_slab_release(struct kmem_cache *s); | |
321 | #else | |
322 | static inline void sysfs_slab_unlink(struct kmem_cache *s) { } | |
323 | static inline void sysfs_slab_release(struct kmem_cache *s) { } | |
324 | #endif | |
325 | ||
326 | void *fixup_red_left(struct kmem_cache *s, void *p); | |
327 | ||
328 | static inline void *nearest_obj(struct kmem_cache *cache, | |
329 | const struct slab *slab, void *x) | |
330 | { | |
331 | void *object = x - (x - slab_address(slab)) % cache->size; | |
332 | void *last_object = slab_address(slab) + | |
333 | (slab->objects - 1) * cache->size; | |
334 | void *result = (unlikely(object > last_object)) ? last_object : object; | |
335 | ||
336 | result = fixup_red_left(cache, result); | |
337 | return result; | |
338 | } | |
339 | ||
340 | /* Determine object index from a given position */ | |
341 | static inline unsigned int __obj_to_index(const struct kmem_cache *cache, | |
342 | void *addr, void *obj) | |
343 | { | |
344 | return reciprocal_divide(kasan_reset_tag(obj) - addr, | |
345 | cache->reciprocal_size); | |
346 | } | |
347 | ||
348 | static inline unsigned int obj_to_index(const struct kmem_cache *cache, | |
349 | const struct slab *slab, void *obj) | |
350 | { | |
351 | if (is_kfence_address(obj)) | |
352 | return 0; | |
353 | return __obj_to_index(cache, slab_address(slab), obj); | |
354 | } | |
355 | ||
356 | static inline int objs_per_slab(const struct kmem_cache *cache, | |
357 | const struct slab *slab) | |
358 | { | |
359 | return slab->objects; | |
360 | } | |
07f361b2 | 361 | |
97d06609 CL |
362 | /* |
363 | * State of the slab allocator. | |
364 | * | |
365 | * This is used to describe the states of the allocator during bootup. | |
366 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
367 | * have the problem that the structures used for managing slab caches are | |
368 | * allocated from slab caches themselves. | |
369 | */ | |
370 | enum slab_state { | |
371 | DOWN, /* No slab functionality yet */ | |
372 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
97d06609 CL |
373 | UP, /* Slab caches usable but not all extras yet */ |
374 | FULL /* Everything is working */ | |
375 | }; | |
376 | ||
377 | extern enum slab_state slab_state; | |
378 | ||
18004c5d CL |
379 | /* The slab cache mutex protects the management structures during changes */ |
380 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
381 | |
382 | /* The list of all slab caches on the system */ | |
18004c5d CL |
383 | extern struct list_head slab_caches; |
384 | ||
9b030cb8 CL |
385 | /* The slab cache that manages slab cache information */ |
386 | extern struct kmem_cache *kmem_cache; | |
387 | ||
af3b5f87 VB |
388 | /* A table of kmalloc cache names and sizes */ |
389 | extern const struct kmalloc_info_struct { | |
cb5d9fb3 | 390 | const char *name[NR_KMALLOC_TYPES]; |
55de8b9c | 391 | unsigned int size; |
af3b5f87 VB |
392 | } kmalloc_info[]; |
393 | ||
f97d5f63 | 394 | /* Kmalloc array related functions */ |
34cc6990 | 395 | void setup_kmalloc_cache_index_table(void); |
66b3dc1f | 396 | void create_kmalloc_caches(void); |
2c59dd65 | 397 | |
5a9d31d9 VB |
398 | extern u8 kmalloc_size_index[24]; |
399 | ||
400 | static inline unsigned int size_index_elem(unsigned int bytes) | |
401 | { | |
402 | return (bytes - 1) / 8; | |
403 | } | |
404 | ||
405 | /* | |
406 | * Find the kmem_cache structure that serves a given size of | |
407 | * allocation | |
408 | * | |
409 | * This assumes size is larger than zero and not larger than | |
410 | * KMALLOC_MAX_CACHE_SIZE and the caller must check that. | |
411 | */ | |
412 | static inline struct kmem_cache * | |
67f2df3b | 413 | kmalloc_slab(size_t size, kmem_buckets *b, gfp_t flags, unsigned long caller) |
5a9d31d9 VB |
414 | { |
415 | unsigned int index; | |
416 | ||
67f2df3b KC |
417 | if (!b) |
418 | b = &kmalloc_caches[kmalloc_type(flags, caller)]; | |
5a9d31d9 VB |
419 | if (size <= 192) |
420 | index = kmalloc_size_index[size_index_elem(size)]; | |
421 | else | |
422 | index = fls(size - 1); | |
423 | ||
67f2df3b | 424 | return (*b)[index]; |
5a9d31d9 | 425 | } |
ed4cd17e | 426 | |
44405099 | 427 | gfp_t kmalloc_fix_flags(gfp_t flags); |
f97d5f63 | 428 | |
9b030cb8 | 429 | /* Functions provided by the slab allocators */ |
3dbe2bad CB |
430 | int do_kmem_cache_create(struct kmem_cache *s, const char *name, |
431 | unsigned int size, struct kmem_cache_args *args, | |
432 | slab_flags_t flags); | |
97d06609 | 433 | |
89c2d061 | 434 | void __init kmem_cache_init(void); |
45530c44 | 435 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
361d575e AD |
436 | unsigned int size, slab_flags_t flags, |
437 | unsigned int useroffset, unsigned int usersize); | |
45530c44 | 438 | |
423c929c | 439 | int slab_unmergeable(struct kmem_cache *s); |
f4957d5b | 440 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
d50112ed | 441 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
2633d7a0 | 442 | struct kmem_cache * |
f4957d5b | 443 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 444 | slab_flags_t flags, void (*ctor)(void *)); |
423c929c | 445 | |
303cd693 | 446 | slab_flags_t kmem_cache_flags(slab_flags_t flags, const char *name); |
cbb79694 | 447 | |
bb944290 FT |
448 | static inline bool is_kmalloc_cache(struct kmem_cache *s) |
449 | { | |
bb944290 | 450 | return (s->flags & SLAB_KMALLOC); |
bb944290 | 451 | } |
cbb79694 | 452 | |
9028cdeb SB |
453 | static inline bool is_kmalloc_normal(struct kmem_cache *s) |
454 | { | |
455 | if (!is_kmalloc_cache(s)) | |
456 | return false; | |
457 | return !(s->flags & (SLAB_CACHE_DMA|SLAB_ACCOUNT|SLAB_RECLAIM_ACCOUNT)); | |
458 | } | |
459 | ||
6d6ea1e9 NB |
460 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
461 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ | |
12f4888c KB |
462 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS | \ |
463 | SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
464 | SLAB_TEMPORARY | SLAB_ACCOUNT | \ | |
465 | SLAB_NO_USER_FLAGS | SLAB_KMALLOC | SLAB_NO_MERGE) | |
d8843922 | 466 | |
d8843922 | 467 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ |
becfda68 | 468 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 | 469 | |
12f4888c | 470 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS) |
e70954fd | 471 | |
f9e13c0a | 472 | bool __kmem_cache_empty(struct kmem_cache *); |
945cf2b6 | 473 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 474 | void __kmem_cache_release(struct kmem_cache *); |
c9fc5864 | 475 | int __kmem_cache_shrink(struct kmem_cache *); |
41a21285 | 476 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 477 | |
b7454ad3 GC |
478 | struct seq_file; |
479 | struct file; | |
b7454ad3 | 480 | |
0d7561c6 GC |
481 | struct slabinfo { |
482 | unsigned long active_objs; | |
483 | unsigned long num_objs; | |
484 | unsigned long active_slabs; | |
485 | unsigned long num_slabs; | |
486 | unsigned long shared_avail; | |
487 | unsigned int limit; | |
488 | unsigned int batchcount; | |
489 | unsigned int shared; | |
490 | unsigned int objects_per_slab; | |
491 | unsigned int cache_order; | |
492 | }; | |
493 | ||
494 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
ba6c496e | 495 | |
e42f174e VB |
496 | #ifdef CONFIG_SLUB_DEBUG |
497 | #ifdef CONFIG_SLUB_DEBUG_ON | |
498 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); | |
499 | #else | |
500 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); | |
501 | #endif | |
502 | extern void print_tracking(struct kmem_cache *s, void *object); | |
1f9f78b1 | 503 | long validate_slab_cache(struct kmem_cache *s); |
0d4a062a ME |
504 | static inline bool __slub_debug_enabled(void) |
505 | { | |
506 | return static_branch_unlikely(&slub_debug_enabled); | |
507 | } | |
e42f174e VB |
508 | #else |
509 | static inline void print_tracking(struct kmem_cache *s, void *object) | |
510 | { | |
511 | } | |
0d4a062a ME |
512 | static inline bool __slub_debug_enabled(void) |
513 | { | |
514 | return false; | |
515 | } | |
e42f174e VB |
516 | #endif |
517 | ||
518 | /* | |
671776b3 | 519 | * Returns true if any of the specified slab_debug flags is enabled for the |
e42f174e VB |
520 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables |
521 | * the static key. | |
522 | */ | |
523 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) | |
524 | { | |
0d4a062a ME |
525 | if (IS_ENABLED(CONFIG_SLUB_DEBUG)) |
526 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); | |
527 | if (__slub_debug_enabled()) | |
e42f174e | 528 | return s->flags & flags; |
e42f174e VB |
529 | return false; |
530 | } | |
531 | ||
3f1dd33f VB |
532 | #if IS_ENABLED(CONFIG_SLUB_DEBUG) && IS_ENABLED(CONFIG_KUNIT) |
533 | bool slab_in_kunit_test(void); | |
534 | #else | |
535 | static inline bool slab_in_kunit_test(void) { return false; } | |
536 | #endif | |
537 | ||
21c690a3 SB |
538 | #ifdef CONFIG_SLAB_OBJ_EXT |
539 | ||
4b5f8d9a | 540 | /* |
21c690a3 SB |
541 | * slab_obj_exts - get the pointer to the slab object extension vector |
542 | * associated with a slab. | |
4b5f8d9a VB |
543 | * @slab: a pointer to the slab struct |
544 | * | |
21c690a3 | 545 | * Returns a pointer to the object extension vector associated with the slab, |
4b5f8d9a VB |
546 | * or NULL if no such vector has been associated yet. |
547 | */ | |
21c690a3 | 548 | static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) |
4b5f8d9a | 549 | { |
21c690a3 | 550 | unsigned long obj_exts = READ_ONCE(slab->obj_exts); |
4b5f8d9a | 551 | |
21c690a3 SB |
552 | #ifdef CONFIG_MEMCG |
553 | VM_BUG_ON_PAGE(obj_exts && !(obj_exts & MEMCG_DATA_OBJEXTS), | |
4b5f8d9a | 554 | slab_page(slab)); |
21c690a3 | 555 | VM_BUG_ON_PAGE(obj_exts & MEMCG_DATA_KMEM, slab_page(slab)); |
21c690a3 | 556 | #endif |
53ce7203 | 557 | return (struct slabobj_ext *)(obj_exts & ~OBJEXTS_FLAGS_MASK); |
4b5f8d9a VB |
558 | } |
559 | ||
e6100a45 VB |
560 | int alloc_slab_obj_exts(struct slab *slab, struct kmem_cache *s, |
561 | gfp_t gfp, bool new_slab); | |
562 | ||
21c690a3 SB |
563 | #else /* CONFIG_SLAB_OBJ_EXT */ |
564 | ||
565 | static inline struct slabobj_ext *slab_obj_exts(struct slab *slab) | |
4b5f8d9a VB |
566 | { |
567 | return NULL; | |
568 | } | |
569 | ||
21c690a3 SB |
570 | #endif /* CONFIG_SLAB_OBJ_EXT */ |
571 | ||
e6100a45 | 572 | static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) |
286e04b8 | 573 | { |
e6100a45 VB |
574 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? |
575 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; | |
286e04b8 | 576 | } |
e6100a45 | 577 | |
3a3b7fec | 578 | #ifdef CONFIG_MEMCG |
e6100a45 VB |
579 | bool __memcg_slab_post_alloc_hook(struct kmem_cache *s, struct list_lru *lru, |
580 | gfp_t flags, size_t size, void **p); | |
581 | void __memcg_slab_free_hook(struct kmem_cache *s, struct slab *slab, | |
582 | void **p, int objects, struct slabobj_ext *obj_exts); | |
21c690a3 | 583 | #endif |
b9ce5ef4 | 584 | |
49d5377b VB |
585 | void kvfree_rcu_cb(struct rcu_head *head); |
586 | ||
8dfa9d55 HY |
587 | size_t __ksize(const void *objp); |
588 | ||
11c7aec2 JDB |
589 | static inline size_t slab_ksize(const struct kmem_cache *s) |
590 | { | |
a9e0b9f2 | 591 | #ifdef CONFIG_SLUB_DEBUG |
11c7aec2 JDB |
592 | /* |
593 | * Debugging requires use of the padding between object | |
594 | * and whatever may come after it. | |
595 | */ | |
596 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
597 | return s->object_size; | |
a9e0b9f2 | 598 | #endif |
80a9201a AP |
599 | if (s->flags & SLAB_KASAN) |
600 | return s->object_size; | |
11c7aec2 JDB |
601 | /* |
602 | * If we have the need to store the freelist pointer | |
603 | * back there or track user information then we can | |
604 | * only use the space before that information. | |
605 | */ | |
5f0d5a3a | 606 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2 JDB |
607 | return s->inuse; |
608 | /* | |
609 | * Else we can use all the padding etc for the allocation | |
610 | */ | |
611 | return s->size; | |
11c7aec2 JDB |
612 | } |
613 | ||
a9e0b9f2 | 614 | #ifdef CONFIG_SLUB_DEBUG |
852d8be0 YS |
615 | void dump_unreclaimable_slab(void); |
616 | #else | |
617 | static inline void dump_unreclaimable_slab(void) | |
618 | { | |
619 | } | |
620 | #endif | |
621 | ||
55834c59 AP |
622 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
623 | ||
7c00fce9 TG |
624 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
625 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
626 | gfp_t gfp); | |
627 | void cache_random_seq_destroy(struct kmem_cache *cachep); | |
628 | #else | |
629 | static inline int cache_random_seq_create(struct kmem_cache *cachep, | |
630 | unsigned int count, gfp_t gfp) | |
631 | { | |
632 | return 0; | |
633 | } | |
634 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } | |
635 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
636 | ||
6471384a AP |
637 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
638 | { | |
51cba1eb KC |
639 | if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, |
640 | &init_on_alloc)) { | |
6471384a AP |
641 | if (c->ctor) |
642 | return false; | |
643 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) | |
644 | return flags & __GFP_ZERO; | |
645 | return true; | |
646 | } | |
647 | return flags & __GFP_ZERO; | |
648 | } | |
649 | ||
650 | static inline bool slab_want_init_on_free(struct kmem_cache *c) | |
651 | { | |
51cba1eb KC |
652 | if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, |
653 | &init_on_free)) | |
6471384a AP |
654 | return !(c->ctor || |
655 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); | |
656 | return false; | |
657 | } | |
658 | ||
64dd6849 FM |
659 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) |
660 | void debugfs_slab_release(struct kmem_cache *); | |
661 | #else | |
662 | static inline void debugfs_slab_release(struct kmem_cache *s) { } | |
663 | #endif | |
664 | ||
5bb1bb35 | 665 | #ifdef CONFIG_PRINTK |
8e7f37f2 PM |
666 | #define KS_ADDRS_COUNT 16 |
667 | struct kmem_obj_info { | |
668 | void *kp_ptr; | |
7213230a | 669 | struct slab *kp_slab; |
8e7f37f2 PM |
670 | void *kp_objp; |
671 | unsigned long kp_data_offset; | |
672 | struct kmem_cache *kp_slab_cache; | |
673 | void *kp_ret; | |
674 | void *kp_stack[KS_ADDRS_COUNT]; | |
e548eaa1 | 675 | void *kp_free_stack[KS_ADDRS_COUNT]; |
8e7f37f2 | 676 | }; |
2dfe63e6 | 677 | void __kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); |
5bb1bb35 | 678 | #endif |
8e7f37f2 | 679 | |
0b3eb091 MWO |
680 | void __check_heap_object(const void *ptr, unsigned long n, |
681 | const struct slab *slab, bool to_user); | |
0b3eb091 | 682 | |
fb5eda0d FT |
683 | static inline bool slub_debug_orig_size(struct kmem_cache *s) |
684 | { | |
685 | return (kmem_cache_debug_flags(s, SLAB_STORE_USER) && | |
686 | (s->flags & SLAB_KMALLOC)); | |
687 | } | |
688 | ||
946fa0db FT |
689 | #ifdef CONFIG_SLUB_DEBUG |
690 | void skip_orig_size_check(struct kmem_cache *s, const void *object); | |
691 | #endif | |
692 | ||
5240ab40 | 693 | #endif /* MM_SLAB_H */ |