Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
97d06609 CL |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H | |
4 | /* | |
5 | * Internal slab definitions | |
6 | */ | |
7 | ||
d122019b MWO |
8 | /* Reuses the bits in struct page */ |
9 | struct slab { | |
10 | unsigned long __page_flags; | |
401fb12c VB |
11 | |
12 | #if defined(CONFIG_SLAB) | |
13 | ||
d122019b MWO |
14 | union { |
15 | struct list_head slab_list; | |
401fb12c VB |
16 | struct rcu_head rcu_head; |
17 | }; | |
18 | struct kmem_cache *slab_cache; | |
19 | void *freelist; /* array of free object indexes */ | |
20 | void *s_mem; /* first object */ | |
21 | unsigned int active; | |
22 | ||
23 | #elif defined(CONFIG_SLUB) | |
24 | ||
25 | union { | |
26 | struct list_head slab_list; | |
27 | struct rcu_head rcu_head; | |
28 | struct { | |
d122019b | 29 | struct slab *next; |
d122019b | 30 | int slabs; /* Nr of slabs left */ |
d122019b | 31 | }; |
d122019b | 32 | }; |
401fb12c | 33 | struct kmem_cache *slab_cache; |
d122019b MWO |
34 | /* Double-word boundary */ |
35 | void *freelist; /* first free object */ | |
36 | union { | |
401fb12c VB |
37 | unsigned long counters; |
38 | struct { | |
d122019b MWO |
39 | unsigned inuse:16; |
40 | unsigned objects:15; | |
41 | unsigned frozen:1; | |
42 | }; | |
43 | }; | |
401fb12c VB |
44 | unsigned int __unused; |
45 | ||
46 | #elif defined(CONFIG_SLOB) | |
47 | ||
48 | struct list_head slab_list; | |
49 | void *__unused_1; | |
50 | void *freelist; /* first free block */ | |
51 | void *__unused_2; | |
52 | int units; | |
53 | ||
54 | #else | |
55 | #error "Unexpected slab allocator configured" | |
56 | #endif | |
d122019b | 57 | |
d122019b MWO |
58 | atomic_t __page_refcount; |
59 | #ifdef CONFIG_MEMCG | |
60 | unsigned long memcg_data; | |
61 | #endif | |
62 | }; | |
63 | ||
64 | #define SLAB_MATCH(pg, sl) \ | |
65 | static_assert(offsetof(struct page, pg) == offsetof(struct slab, sl)) | |
66 | SLAB_MATCH(flags, __page_flags); | |
67 | SLAB_MATCH(compound_head, slab_list); /* Ensure bit 0 is clear */ | |
68 | SLAB_MATCH(slab_list, slab_list); | |
401fb12c | 69 | #ifndef CONFIG_SLOB |
d122019b MWO |
70 | SLAB_MATCH(rcu_head, rcu_head); |
71 | SLAB_MATCH(slab_cache, slab_cache); | |
401fb12c VB |
72 | #endif |
73 | #ifdef CONFIG_SLAB | |
d122019b MWO |
74 | SLAB_MATCH(s_mem, s_mem); |
75 | SLAB_MATCH(active, active); | |
401fb12c | 76 | #endif |
d122019b MWO |
77 | SLAB_MATCH(_refcount, __page_refcount); |
78 | #ifdef CONFIG_MEMCG | |
79 | SLAB_MATCH(memcg_data, memcg_data); | |
80 | #endif | |
81 | #undef SLAB_MATCH | |
82 | static_assert(sizeof(struct slab) <= sizeof(struct page)); | |
83 | ||
84 | /** | |
85 | * folio_slab - Converts from folio to slab. | |
86 | * @folio: The folio. | |
87 | * | |
88 | * Currently struct slab is a different representation of a folio where | |
89 | * folio_test_slab() is true. | |
90 | * | |
91 | * Return: The slab which contains this folio. | |
92 | */ | |
93 | #define folio_slab(folio) (_Generic((folio), \ | |
94 | const struct folio *: (const struct slab *)(folio), \ | |
95 | struct folio *: (struct slab *)(folio))) | |
96 | ||
97 | /** | |
98 | * slab_folio - The folio allocated for a slab | |
99 | * @slab: The slab. | |
100 | * | |
101 | * Slabs are allocated as folios that contain the individual objects and are | |
102 | * using some fields in the first struct page of the folio - those fields are | |
103 | * now accessed by struct slab. It is occasionally necessary to convert back to | |
104 | * a folio in order to communicate with the rest of the mm. Please use this | |
105 | * helper function instead of casting yourself, as the implementation may change | |
106 | * in the future. | |
107 | */ | |
108 | #define slab_folio(s) (_Generic((s), \ | |
109 | const struct slab *: (const struct folio *)s, \ | |
110 | struct slab *: (struct folio *)s)) | |
111 | ||
112 | /** | |
113 | * page_slab - Converts from first struct page to slab. | |
114 | * @p: The first (either head of compound or single) page of slab. | |
115 | * | |
116 | * A temporary wrapper to convert struct page to struct slab in situations where | |
117 | * we know the page is the compound head, or single order-0 page. | |
118 | * | |
119 | * Long-term ideally everything would work with struct slab directly or go | |
120 | * through folio to struct slab. | |
121 | * | |
122 | * Return: The slab which contains this page | |
123 | */ | |
124 | #define page_slab(p) (_Generic((p), \ | |
125 | const struct page *: (const struct slab *)(p), \ | |
126 | struct page *: (struct slab *)(p))) | |
127 | ||
128 | /** | |
129 | * slab_page - The first struct page allocated for a slab | |
130 | * @slab: The slab. | |
131 | * | |
132 | * A convenience wrapper for converting slab to the first struct page of the | |
133 | * underlying folio, to communicate with code not yet converted to folio or | |
134 | * struct slab. | |
135 | */ | |
136 | #define slab_page(s) folio_page(slab_folio(s), 0) | |
137 | ||
138 | /* | |
139 | * If network-based swap is enabled, sl*b must keep track of whether pages | |
140 | * were allocated from pfmemalloc reserves. | |
141 | */ | |
142 | static inline bool slab_test_pfmemalloc(const struct slab *slab) | |
143 | { | |
144 | return folio_test_active((struct folio *)slab_folio(slab)); | |
145 | } | |
146 | ||
147 | static inline void slab_set_pfmemalloc(struct slab *slab) | |
148 | { | |
149 | folio_set_active(slab_folio(slab)); | |
150 | } | |
151 | ||
152 | static inline void slab_clear_pfmemalloc(struct slab *slab) | |
153 | { | |
154 | folio_clear_active(slab_folio(slab)); | |
155 | } | |
156 | ||
157 | static inline void __slab_clear_pfmemalloc(struct slab *slab) | |
158 | { | |
159 | __folio_clear_active(slab_folio(slab)); | |
160 | } | |
161 | ||
162 | static inline void *slab_address(const struct slab *slab) | |
163 | { | |
164 | return folio_address(slab_folio(slab)); | |
165 | } | |
166 | ||
167 | static inline int slab_nid(const struct slab *slab) | |
168 | { | |
169 | return folio_nid(slab_folio(slab)); | |
170 | } | |
171 | ||
172 | static inline pg_data_t *slab_pgdat(const struct slab *slab) | |
173 | { | |
174 | return folio_pgdat(slab_folio(slab)); | |
175 | } | |
176 | ||
177 | static inline struct slab *virt_to_slab(const void *addr) | |
178 | { | |
179 | struct folio *folio = virt_to_folio(addr); | |
180 | ||
181 | if (!folio_test_slab(folio)) | |
182 | return NULL; | |
183 | ||
184 | return folio_slab(folio); | |
185 | } | |
186 | ||
187 | static inline int slab_order(const struct slab *slab) | |
188 | { | |
189 | return folio_order((struct folio *)slab_folio(slab)); | |
190 | } | |
191 | ||
192 | static inline size_t slab_size(const struct slab *slab) | |
193 | { | |
194 | return PAGE_SIZE << slab_order(slab); | |
195 | } | |
196 | ||
07f361b2 JK |
197 | #ifdef CONFIG_SLOB |
198 | /* | |
199 | * Common fields provided in kmem_cache by all slab allocators | |
200 | * This struct is either used directly by the allocator (SLOB) | |
201 | * or the allocator must include definitions for all fields | |
202 | * provided in kmem_cache_common in their definition of kmem_cache. | |
203 | * | |
204 | * Once we can do anonymous structs (C11 standard) we could put a | |
205 | * anonymous struct definition in these allocators so that the | |
206 | * separate allocations in the kmem_cache structure of SLAB and | |
207 | * SLUB is no longer needed. | |
208 | */ | |
209 | struct kmem_cache { | |
210 | unsigned int object_size;/* The original size of the object */ | |
211 | unsigned int size; /* The aligned/padded/added on size */ | |
212 | unsigned int align; /* Alignment as calculated */ | |
d50112ed | 213 | slab_flags_t flags; /* Active flags on the slab */ |
7bbdb81e AD |
214 | unsigned int useroffset;/* Usercopy region offset */ |
215 | unsigned int usersize; /* Usercopy region size */ | |
07f361b2 JK |
216 | const char *name; /* Slab name for sysfs */ |
217 | int refcount; /* Use counter */ | |
218 | void (*ctor)(void *); /* Called on object slot creation */ | |
219 | struct list_head list; /* List of all slab caches on the system */ | |
220 | }; | |
221 | ||
222 | #endif /* CONFIG_SLOB */ | |
223 | ||
224 | #ifdef CONFIG_SLAB | |
225 | #include <linux/slab_def.h> | |
226 | #endif | |
227 | ||
228 | #ifdef CONFIG_SLUB | |
229 | #include <linux/slub_def.h> | |
230 | #endif | |
231 | ||
232 | #include <linux/memcontrol.h> | |
11c7aec2 | 233 | #include <linux/fault-inject.h> |
11c7aec2 JDB |
234 | #include <linux/kasan.h> |
235 | #include <linux/kmemleak.h> | |
7c00fce9 | 236 | #include <linux/random.h> |
d92a8cfc | 237 | #include <linux/sched/mm.h> |
07f361b2 | 238 | |
97d06609 CL |
239 | /* |
240 | * State of the slab allocator. | |
241 | * | |
242 | * This is used to describe the states of the allocator during bootup. | |
243 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
244 | * have the problem that the structures used for managing slab caches are | |
245 | * allocated from slab caches themselves. | |
246 | */ | |
247 | enum slab_state { | |
248 | DOWN, /* No slab functionality yet */ | |
249 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 250 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
251 | UP, /* Slab caches usable but not all extras yet */ |
252 | FULL /* Everything is working */ | |
253 | }; | |
254 | ||
255 | extern enum slab_state slab_state; | |
256 | ||
18004c5d CL |
257 | /* The slab cache mutex protects the management structures during changes */ |
258 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
259 | |
260 | /* The list of all slab caches on the system */ | |
18004c5d CL |
261 | extern struct list_head slab_caches; |
262 | ||
9b030cb8 CL |
263 | /* The slab cache that manages slab cache information */ |
264 | extern struct kmem_cache *kmem_cache; | |
265 | ||
af3b5f87 VB |
266 | /* A table of kmalloc cache names and sizes */ |
267 | extern const struct kmalloc_info_struct { | |
cb5d9fb3 | 268 | const char *name[NR_KMALLOC_TYPES]; |
55de8b9c | 269 | unsigned int size; |
af3b5f87 VB |
270 | } kmalloc_info[]; |
271 | ||
f97d5f63 CL |
272 | #ifndef CONFIG_SLOB |
273 | /* Kmalloc array related functions */ | |
34cc6990 | 274 | void setup_kmalloc_cache_index_table(void); |
d50112ed | 275 | void create_kmalloc_caches(slab_flags_t); |
2c59dd65 CL |
276 | |
277 | /* Find the kmalloc slab corresponding for a certain size */ | |
278 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
279 | #endif |
280 | ||
44405099 | 281 | gfp_t kmalloc_fix_flags(gfp_t flags); |
f97d5f63 | 282 | |
9b030cb8 | 283 | /* Functions provided by the slab allocators */ |
d50112ed | 284 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
97d06609 | 285 | |
55de8b9c AD |
286 | struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size, |
287 | slab_flags_t flags, unsigned int useroffset, | |
288 | unsigned int usersize); | |
45530c44 | 289 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
361d575e AD |
290 | unsigned int size, slab_flags_t flags, |
291 | unsigned int useroffset, unsigned int usersize); | |
45530c44 | 292 | |
423c929c | 293 | int slab_unmergeable(struct kmem_cache *s); |
f4957d5b | 294 | struct kmem_cache *find_mergeable(unsigned size, unsigned align, |
d50112ed | 295 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
12220dea | 296 | #ifndef CONFIG_SLOB |
2633d7a0 | 297 | struct kmem_cache * |
f4957d5b | 298 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 299 | slab_flags_t flags, void (*ctor)(void *)); |
423c929c | 300 | |
0293d1fd | 301 | slab_flags_t kmem_cache_flags(unsigned int object_size, |
37540008 | 302 | slab_flags_t flags, const char *name); |
cbb79694 | 303 | #else |
2633d7a0 | 304 | static inline struct kmem_cache * |
f4957d5b | 305 | __kmem_cache_alias(const char *name, unsigned int size, unsigned int align, |
d50112ed | 306 | slab_flags_t flags, void (*ctor)(void *)) |
cbb79694 | 307 | { return NULL; } |
423c929c | 308 | |
0293d1fd | 309 | static inline slab_flags_t kmem_cache_flags(unsigned int object_size, |
37540008 | 310 | slab_flags_t flags, const char *name) |
423c929c JK |
311 | { |
312 | return flags; | |
313 | } | |
cbb79694 CL |
314 | #endif |
315 | ||
316 | ||
d8843922 | 317 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
6d6ea1e9 NB |
318 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | \ |
319 | SLAB_CACHE_DMA32 | SLAB_PANIC | \ | |
5f0d5a3a | 320 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
d8843922 GC |
321 | |
322 | #if defined(CONFIG_DEBUG_SLAB) | |
323 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
324 | #elif defined(CONFIG_SLUB_DEBUG) | |
325 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
becfda68 | 326 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 GC |
327 | #else |
328 | #define SLAB_DEBUG_FLAGS (0) | |
329 | #endif | |
330 | ||
331 | #if defined(CONFIG_SLAB) | |
332 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
230e9fc2 | 333 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
75f296d9 | 334 | SLAB_ACCOUNT) |
d8843922 GC |
335 | #elif defined(CONFIG_SLUB) |
336 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
75f296d9 | 337 | SLAB_TEMPORARY | SLAB_ACCOUNT) |
d8843922 | 338 | #else |
34dbc3aa | 339 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE) |
d8843922 GC |
340 | #endif |
341 | ||
e70954fd | 342 | /* Common flags available with current configuration */ |
d8843922 GC |
343 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
344 | ||
e70954fd TG |
345 | /* Common flags permitted for kmem_cache_create */ |
346 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ | |
347 | SLAB_RED_ZONE | \ | |
348 | SLAB_POISON | \ | |
349 | SLAB_STORE_USER | \ | |
350 | SLAB_TRACE | \ | |
351 | SLAB_CONSISTENCY_CHECKS | \ | |
352 | SLAB_MEM_SPREAD | \ | |
353 | SLAB_NOLEAKTRACE | \ | |
354 | SLAB_RECLAIM_ACCOUNT | \ | |
355 | SLAB_TEMPORARY | \ | |
e70954fd TG |
356 | SLAB_ACCOUNT) |
357 | ||
f9e13c0a | 358 | bool __kmem_cache_empty(struct kmem_cache *); |
945cf2b6 | 359 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 360 | void __kmem_cache_release(struct kmem_cache *); |
c9fc5864 | 361 | int __kmem_cache_shrink(struct kmem_cache *); |
41a21285 | 362 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 363 | |
b7454ad3 GC |
364 | struct seq_file; |
365 | struct file; | |
b7454ad3 | 366 | |
0d7561c6 GC |
367 | struct slabinfo { |
368 | unsigned long active_objs; | |
369 | unsigned long num_objs; | |
370 | unsigned long active_slabs; | |
371 | unsigned long num_slabs; | |
372 | unsigned long shared_avail; | |
373 | unsigned int limit; | |
374 | unsigned int batchcount; | |
375 | unsigned int shared; | |
376 | unsigned int objects_per_slab; | |
377 | unsigned int cache_order; | |
378 | }; | |
379 | ||
380 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
381 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
382 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
383 | size_t count, loff_t *ppos); | |
ba6c496e | 384 | |
484748f0 CL |
385 | /* |
386 | * Generic implementation of bulk operations | |
387 | * These are useful for situations in which the allocator cannot | |
9f706d68 | 388 | * perform optimizations. In that case segments of the object listed |
484748f0 CL |
389 | * may be allocated or freed using these operations. |
390 | */ | |
391 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | |
865762a8 | 392 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
484748f0 | 393 | |
1a984c4e | 394 | static inline enum node_stat_item cache_vmstat_idx(struct kmem_cache *s) |
6cea1d56 RG |
395 | { |
396 | return (s->flags & SLAB_RECLAIM_ACCOUNT) ? | |
d42f3245 | 397 | NR_SLAB_RECLAIMABLE_B : NR_SLAB_UNRECLAIMABLE_B; |
6cea1d56 RG |
398 | } |
399 | ||
e42f174e VB |
400 | #ifdef CONFIG_SLUB_DEBUG |
401 | #ifdef CONFIG_SLUB_DEBUG_ON | |
402 | DECLARE_STATIC_KEY_TRUE(slub_debug_enabled); | |
403 | #else | |
404 | DECLARE_STATIC_KEY_FALSE(slub_debug_enabled); | |
405 | #endif | |
406 | extern void print_tracking(struct kmem_cache *s, void *object); | |
1f9f78b1 | 407 | long validate_slab_cache(struct kmem_cache *s); |
0d4a062a ME |
408 | static inline bool __slub_debug_enabled(void) |
409 | { | |
410 | return static_branch_unlikely(&slub_debug_enabled); | |
411 | } | |
e42f174e VB |
412 | #else |
413 | static inline void print_tracking(struct kmem_cache *s, void *object) | |
414 | { | |
415 | } | |
0d4a062a ME |
416 | static inline bool __slub_debug_enabled(void) |
417 | { | |
418 | return false; | |
419 | } | |
e42f174e VB |
420 | #endif |
421 | ||
422 | /* | |
423 | * Returns true if any of the specified slub_debug flags is enabled for the | |
424 | * cache. Use only for flags parsed by setup_slub_debug() as it also enables | |
425 | * the static key. | |
426 | */ | |
427 | static inline bool kmem_cache_debug_flags(struct kmem_cache *s, slab_flags_t flags) | |
428 | { | |
0d4a062a ME |
429 | if (IS_ENABLED(CONFIG_SLUB_DEBUG)) |
430 | VM_WARN_ON_ONCE(!(flags & SLAB_DEBUG_FLAGS)); | |
431 | if (__slub_debug_enabled()) | |
e42f174e | 432 | return s->flags & flags; |
e42f174e VB |
433 | return false; |
434 | } | |
435 | ||
84c07d11 | 436 | #ifdef CONFIG_MEMCG_KMEM |
4b5f8d9a VB |
437 | /* |
438 | * slab_objcgs - get the object cgroups vector associated with a slab | |
439 | * @slab: a pointer to the slab struct | |
440 | * | |
441 | * Returns a pointer to the object cgroups vector associated with the slab, | |
442 | * or NULL if no such vector has been associated yet. | |
443 | */ | |
444 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) | |
445 | { | |
446 | unsigned long memcg_data = READ_ONCE(slab->memcg_data); | |
447 | ||
448 | VM_BUG_ON_PAGE(memcg_data && !(memcg_data & MEMCG_DATA_OBJCGS), | |
449 | slab_page(slab)); | |
450 | VM_BUG_ON_PAGE(memcg_data & MEMCG_DATA_KMEM, slab_page(slab)); | |
451 | ||
452 | return (struct obj_cgroup **)(memcg_data & ~MEMCG_DATA_FLAGS_MASK); | |
453 | } | |
454 | ||
455 | int memcg_alloc_slab_cgroups(struct slab *slab, struct kmem_cache *s, | |
456 | gfp_t gfp, bool new_slab); | |
fdbcb2a6 WL |
457 | void mod_objcg_state(struct obj_cgroup *objcg, struct pglist_data *pgdat, |
458 | enum node_stat_item idx, int nr); | |
286e04b8 | 459 | |
4b5f8d9a | 460 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
286e04b8 | 461 | { |
4b5f8d9a VB |
462 | kfree(slab_objcgs(slab)); |
463 | slab->memcg_data = 0; | |
286e04b8 RG |
464 | } |
465 | ||
f2fe7b09 RG |
466 | static inline size_t obj_full_size(struct kmem_cache *s) |
467 | { | |
468 | /* | |
469 | * For each accounted object there is an extra space which is used | |
470 | * to store obj_cgroup membership. Charge it too. | |
471 | */ | |
472 | return s->size + sizeof(struct obj_cgroup *); | |
473 | } | |
474 | ||
becaba65 RG |
475 | /* |
476 | * Returns false if the allocation should fail. | |
477 | */ | |
478 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, | |
479 | struct obj_cgroup **objcgp, | |
480 | size_t objects, gfp_t flags) | |
f2fe7b09 | 481 | { |
9855609b RG |
482 | struct obj_cgroup *objcg; |
483 | ||
becaba65 RG |
484 | if (!memcg_kmem_enabled()) |
485 | return true; | |
486 | ||
487 | if (!(flags & __GFP_ACCOUNT) && !(s->flags & SLAB_ACCOUNT)) | |
488 | return true; | |
489 | ||
9855609b RG |
490 | objcg = get_obj_cgroup_from_current(); |
491 | if (!objcg) | |
becaba65 | 492 | return true; |
9855609b RG |
493 | |
494 | if (obj_cgroup_charge(objcg, flags, objects * obj_full_size(s))) { | |
495 | obj_cgroup_put(objcg); | |
becaba65 | 496 | return false; |
f2fe7b09 RG |
497 | } |
498 | ||
becaba65 RG |
499 | *objcgp = objcg; |
500 | return true; | |
f2fe7b09 RG |
501 | } |
502 | ||
964d4bd3 RG |
503 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
504 | struct obj_cgroup *objcg, | |
10befea9 RG |
505 | gfp_t flags, size_t size, |
506 | void **p) | |
964d4bd3 | 507 | { |
4b5f8d9a | 508 | struct slab *slab; |
964d4bd3 RG |
509 | unsigned long off; |
510 | size_t i; | |
511 | ||
becaba65 | 512 | if (!memcg_kmem_enabled() || !objcg) |
10befea9 RG |
513 | return; |
514 | ||
964d4bd3 RG |
515 | for (i = 0; i < size; i++) { |
516 | if (likely(p[i])) { | |
4b5f8d9a | 517 | slab = virt_to_slab(p[i]); |
10befea9 | 518 | |
4b5f8d9a VB |
519 | if (!slab_objcgs(slab) && |
520 | memcg_alloc_slab_cgroups(slab, s, flags, | |
2e9bd483 | 521 | false)) { |
10befea9 RG |
522 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
523 | continue; | |
524 | } | |
525 | ||
4b5f8d9a | 526 | off = obj_to_index(s, slab, p[i]); |
964d4bd3 | 527 | obj_cgroup_get(objcg); |
4b5f8d9a VB |
528 | slab_objcgs(slab)[off] = objcg; |
529 | mod_objcg_state(objcg, slab_pgdat(slab), | |
f2fe7b09 RG |
530 | cache_vmstat_idx(s), obj_full_size(s)); |
531 | } else { | |
532 | obj_cgroup_uncharge(objcg, obj_full_size(s)); | |
964d4bd3 RG |
533 | } |
534 | } | |
535 | obj_cgroup_put(objcg); | |
964d4bd3 RG |
536 | } |
537 | ||
d1b2cf6c BR |
538 | static inline void memcg_slab_free_hook(struct kmem_cache *s_orig, |
539 | void **p, int objects) | |
964d4bd3 | 540 | { |
d1b2cf6c | 541 | struct kmem_cache *s; |
270c6a71 | 542 | struct obj_cgroup **objcgs; |
964d4bd3 | 543 | struct obj_cgroup *objcg; |
4b5f8d9a | 544 | struct slab *slab; |
964d4bd3 | 545 | unsigned int off; |
d1b2cf6c | 546 | int i; |
964d4bd3 | 547 | |
10befea9 RG |
548 | if (!memcg_kmem_enabled()) |
549 | return; | |
550 | ||
d1b2cf6c BR |
551 | for (i = 0; i < objects; i++) { |
552 | if (unlikely(!p[i])) | |
553 | continue; | |
964d4bd3 | 554 | |
4b5f8d9a VB |
555 | slab = virt_to_slab(p[i]); |
556 | /* we could be given a kmalloc_large() object, skip those */ | |
557 | if (!slab) | |
558 | continue; | |
559 | ||
560 | objcgs = slab_objcgs(slab); | |
270c6a71 | 561 | if (!objcgs) |
d1b2cf6c | 562 | continue; |
f2fe7b09 | 563 | |
d1b2cf6c | 564 | if (!s_orig) |
4b5f8d9a | 565 | s = slab->slab_cache; |
d1b2cf6c BR |
566 | else |
567 | s = s_orig; | |
10befea9 | 568 | |
4b5f8d9a | 569 | off = obj_to_index(s, slab, p[i]); |
270c6a71 | 570 | objcg = objcgs[off]; |
d1b2cf6c BR |
571 | if (!objcg) |
572 | continue; | |
f2fe7b09 | 573 | |
270c6a71 | 574 | objcgs[off] = NULL; |
d1b2cf6c | 575 | obj_cgroup_uncharge(objcg, obj_full_size(s)); |
4b5f8d9a | 576 | mod_objcg_state(objcg, slab_pgdat(slab), cache_vmstat_idx(s), |
d1b2cf6c BR |
577 | -obj_full_size(s)); |
578 | obj_cgroup_put(objcg); | |
579 | } | |
964d4bd3 RG |
580 | } |
581 | ||
84c07d11 | 582 | #else /* CONFIG_MEMCG_KMEM */ |
4b5f8d9a VB |
583 | static inline struct obj_cgroup **slab_objcgs(struct slab *slab) |
584 | { | |
585 | return NULL; | |
586 | } | |
587 | ||
9855609b | 588 | static inline struct mem_cgroup *memcg_from_slab_obj(void *ptr) |
4d96ba35 RG |
589 | { |
590 | return NULL; | |
591 | } | |
592 | ||
4b5f8d9a | 593 | static inline int memcg_alloc_slab_cgroups(struct slab *slab, |
2e9bd483 | 594 | struct kmem_cache *s, gfp_t gfp, |
4b5f8d9a | 595 | bool new_slab) |
286e04b8 RG |
596 | { |
597 | return 0; | |
598 | } | |
599 | ||
4b5f8d9a | 600 | static inline void memcg_free_slab_cgroups(struct slab *slab) |
286e04b8 RG |
601 | { |
602 | } | |
603 | ||
becaba65 RG |
604 | static inline bool memcg_slab_pre_alloc_hook(struct kmem_cache *s, |
605 | struct obj_cgroup **objcgp, | |
606 | size_t objects, gfp_t flags) | |
f2fe7b09 | 607 | { |
becaba65 | 608 | return true; |
f2fe7b09 RG |
609 | } |
610 | ||
964d4bd3 RG |
611 | static inline void memcg_slab_post_alloc_hook(struct kmem_cache *s, |
612 | struct obj_cgroup *objcg, | |
10befea9 RG |
613 | gfp_t flags, size_t size, |
614 | void **p) | |
964d4bd3 RG |
615 | { |
616 | } | |
617 | ||
d1b2cf6c BR |
618 | static inline void memcg_slab_free_hook(struct kmem_cache *s, |
619 | void **p, int objects) | |
964d4bd3 RG |
620 | { |
621 | } | |
84c07d11 | 622 | #endif /* CONFIG_MEMCG_KMEM */ |
b9ce5ef4 | 623 | |
401fb12c | 624 | #ifndef CONFIG_SLOB |
a64b5378 KC |
625 | static inline struct kmem_cache *virt_to_cache(const void *obj) |
626 | { | |
82c1775d | 627 | struct slab *slab; |
a64b5378 | 628 | |
82c1775d MWO |
629 | slab = virt_to_slab(obj); |
630 | if (WARN_ONCE(!slab, "%s: Object is not a Slab page!\n", | |
a64b5378 KC |
631 | __func__)) |
632 | return NULL; | |
82c1775d | 633 | return slab->slab_cache; |
a64b5378 KC |
634 | } |
635 | ||
b918653b MWO |
636 | static __always_inline void account_slab(struct slab *slab, int order, |
637 | struct kmem_cache *s, gfp_t gfp) | |
6cea1d56 | 638 | { |
2e9bd483 | 639 | if (memcg_kmem_enabled() && (s->flags & SLAB_ACCOUNT)) |
4b5f8d9a | 640 | memcg_alloc_slab_cgroups(slab, s, gfp, true); |
2e9bd483 | 641 | |
b918653b | 642 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
f2fe7b09 | 643 | PAGE_SIZE << order); |
6cea1d56 RG |
644 | } |
645 | ||
b918653b MWO |
646 | static __always_inline void unaccount_slab(struct slab *slab, int order, |
647 | struct kmem_cache *s) | |
6cea1d56 | 648 | { |
10befea9 | 649 | if (memcg_kmem_enabled()) |
4b5f8d9a | 650 | memcg_free_slab_cgroups(slab); |
9855609b | 651 | |
b918653b | 652 | mod_node_page_state(slab_pgdat(slab), cache_vmstat_idx(s), |
f2fe7b09 | 653 | -(PAGE_SIZE << order)); |
6cea1d56 RG |
654 | } |
655 | ||
e42f174e VB |
656 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) |
657 | { | |
658 | struct kmem_cache *cachep; | |
659 | ||
660 | if (!IS_ENABLED(CONFIG_SLAB_FREELIST_HARDENED) && | |
e42f174e VB |
661 | !kmem_cache_debug_flags(s, SLAB_CONSISTENCY_CHECKS)) |
662 | return s; | |
663 | ||
664 | cachep = virt_to_cache(x); | |
10befea9 | 665 | if (WARN(cachep && cachep != s, |
e42f174e VB |
666 | "%s: Wrong slab cache. %s but object is from %s\n", |
667 | __func__, s->name, cachep->name)) | |
668 | print_tracking(cachep, x); | |
669 | return cachep; | |
670 | } | |
401fb12c | 671 | #endif /* CONFIG_SLOB */ |
e42f174e | 672 | |
11c7aec2 JDB |
673 | static inline size_t slab_ksize(const struct kmem_cache *s) |
674 | { | |
675 | #ifndef CONFIG_SLUB | |
676 | return s->object_size; | |
677 | ||
678 | #else /* CONFIG_SLUB */ | |
679 | # ifdef CONFIG_SLUB_DEBUG | |
680 | /* | |
681 | * Debugging requires use of the padding between object | |
682 | * and whatever may come after it. | |
683 | */ | |
684 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
685 | return s->object_size; | |
686 | # endif | |
80a9201a AP |
687 | if (s->flags & SLAB_KASAN) |
688 | return s->object_size; | |
11c7aec2 JDB |
689 | /* |
690 | * If we have the need to store the freelist pointer | |
691 | * back there or track user information then we can | |
692 | * only use the space before that information. | |
693 | */ | |
5f0d5a3a | 694 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2 JDB |
695 | return s->inuse; |
696 | /* | |
697 | * Else we can use all the padding etc for the allocation | |
698 | */ | |
699 | return s->size; | |
700 | #endif | |
701 | } | |
702 | ||
703 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |
964d4bd3 RG |
704 | struct obj_cgroup **objcgp, |
705 | size_t size, gfp_t flags) | |
11c7aec2 JDB |
706 | { |
707 | flags &= gfp_allowed_mask; | |
d92a8cfc | 708 | |
95d6c701 | 709 | might_alloc(flags); |
11c7aec2 | 710 | |
fab9963a | 711 | if (should_failslab(s, flags)) |
11c7aec2 JDB |
712 | return NULL; |
713 | ||
becaba65 RG |
714 | if (!memcg_slab_pre_alloc_hook(s, objcgp, size, flags)) |
715 | return NULL; | |
45264778 VD |
716 | |
717 | return s; | |
11c7aec2 JDB |
718 | } |
719 | ||
964d4bd3 | 720 | static inline void slab_post_alloc_hook(struct kmem_cache *s, |
da844b78 AK |
721 | struct obj_cgroup *objcg, gfp_t flags, |
722 | size_t size, void **p, bool init) | |
11c7aec2 JDB |
723 | { |
724 | size_t i; | |
725 | ||
726 | flags &= gfp_allowed_mask; | |
da844b78 AK |
727 | |
728 | /* | |
729 | * As memory initialization might be integrated into KASAN, | |
730 | * kasan_slab_alloc and initialization memset must be | |
731 | * kept together to avoid discrepancies in behavior. | |
732 | * | |
733 | * As p[i] might get tagged, memset and kmemleak hook come after KASAN. | |
734 | */ | |
11c7aec2 | 735 | for (i = 0; i < size; i++) { |
da844b78 AK |
736 | p[i] = kasan_slab_alloc(s, p[i], flags, init); |
737 | if (p[i] && init && !kasan_has_integrated_init()) | |
738 | memset(p[i], 0, s->object_size); | |
53128245 | 739 | kmemleak_alloc_recursive(p[i], s->object_size, 1, |
11c7aec2 | 740 | s->flags, flags); |
11c7aec2 | 741 | } |
45264778 | 742 | |
becaba65 | 743 | memcg_slab_post_alloc_hook(s, objcg, flags, size, p); |
11c7aec2 JDB |
744 | } |
745 | ||
44c5356f | 746 | #ifndef CONFIG_SLOB |
ca34956b CL |
747 | /* |
748 | * The slab lists for all objects. | |
749 | */ | |
750 | struct kmem_cache_node { | |
751 | spinlock_t list_lock; | |
752 | ||
753 | #ifdef CONFIG_SLAB | |
754 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
755 | struct list_head slabs_full; | |
756 | struct list_head slabs_free; | |
bf00bd34 DR |
757 | unsigned long total_slabs; /* length of all slab lists */ |
758 | unsigned long free_slabs; /* length of free slab list only */ | |
ca34956b CL |
759 | unsigned long free_objects; |
760 | unsigned int free_limit; | |
761 | unsigned int colour_next; /* Per-node cache coloring */ | |
762 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 763 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
764 | unsigned long next_reap; /* updated without locking */ |
765 | int free_touched; /* updated without locking */ | |
766 | #endif | |
767 | ||
768 | #ifdef CONFIG_SLUB | |
769 | unsigned long nr_partial; | |
770 | struct list_head partial; | |
771 | #ifdef CONFIG_SLUB_DEBUG | |
772 | atomic_long_t nr_slabs; | |
773 | atomic_long_t total_objects; | |
774 | struct list_head full; | |
775 | #endif | |
776 | #endif | |
777 | ||
778 | }; | |
e25839f6 | 779 | |
44c5356f CL |
780 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
781 | { | |
782 | return s->node[node]; | |
783 | } | |
784 | ||
785 | /* | |
786 | * Iterator over all nodes. The body will be executed for each node that has | |
787 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
788 | */ | |
789 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
790 | for (__node = 0; __node < nr_node_ids; __node++) \ |
791 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
792 | |
793 | #endif | |
794 | ||
1df3b26f | 795 | void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439 WL |
796 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
797 | void slab_stop(struct seq_file *m, void *p); | |
b047501c | 798 | int memcg_slab_show(struct seq_file *m, void *p); |
5240ab40 | 799 | |
852d8be0 YS |
800 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
801 | void dump_unreclaimable_slab(void); | |
802 | #else | |
803 | static inline void dump_unreclaimable_slab(void) | |
804 | { | |
805 | } | |
806 | #endif | |
807 | ||
55834c59 AP |
808 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
809 | ||
7c00fce9 TG |
810 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
811 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
812 | gfp_t gfp); | |
813 | void cache_random_seq_destroy(struct kmem_cache *cachep); | |
814 | #else | |
815 | static inline int cache_random_seq_create(struct kmem_cache *cachep, | |
816 | unsigned int count, gfp_t gfp) | |
817 | { | |
818 | return 0; | |
819 | } | |
820 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } | |
821 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
822 | ||
6471384a AP |
823 | static inline bool slab_want_init_on_alloc(gfp_t flags, struct kmem_cache *c) |
824 | { | |
51cba1eb KC |
825 | if (static_branch_maybe(CONFIG_INIT_ON_ALLOC_DEFAULT_ON, |
826 | &init_on_alloc)) { | |
6471384a AP |
827 | if (c->ctor) |
828 | return false; | |
829 | if (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON)) | |
830 | return flags & __GFP_ZERO; | |
831 | return true; | |
832 | } | |
833 | return flags & __GFP_ZERO; | |
834 | } | |
835 | ||
836 | static inline bool slab_want_init_on_free(struct kmem_cache *c) | |
837 | { | |
51cba1eb KC |
838 | if (static_branch_maybe(CONFIG_INIT_ON_FREE_DEFAULT_ON, |
839 | &init_on_free)) | |
6471384a AP |
840 | return !(c->ctor || |
841 | (c->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_POISON))); | |
842 | return false; | |
843 | } | |
844 | ||
64dd6849 FM |
845 | #if defined(CONFIG_DEBUG_FS) && defined(CONFIG_SLUB_DEBUG) |
846 | void debugfs_slab_release(struct kmem_cache *); | |
847 | #else | |
848 | static inline void debugfs_slab_release(struct kmem_cache *s) { } | |
849 | #endif | |
850 | ||
5bb1bb35 | 851 | #ifdef CONFIG_PRINTK |
8e7f37f2 PM |
852 | #define KS_ADDRS_COUNT 16 |
853 | struct kmem_obj_info { | |
854 | void *kp_ptr; | |
7213230a | 855 | struct slab *kp_slab; |
8e7f37f2 PM |
856 | void *kp_objp; |
857 | unsigned long kp_data_offset; | |
858 | struct kmem_cache *kp_slab_cache; | |
859 | void *kp_ret; | |
860 | void *kp_stack[KS_ADDRS_COUNT]; | |
e548eaa1 | 861 | void *kp_free_stack[KS_ADDRS_COUNT]; |
8e7f37f2 | 862 | }; |
7213230a | 863 | void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab); |
5bb1bb35 | 864 | #endif |
8e7f37f2 | 865 | |
0b3eb091 MWO |
866 | #ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR |
867 | void __check_heap_object(const void *ptr, unsigned long n, | |
868 | const struct slab *slab, bool to_user); | |
869 | #else | |
870 | static inline | |
871 | void __check_heap_object(const void *ptr, unsigned long n, | |
872 | const struct slab *slab, bool to_user) | |
873 | { | |
874 | } | |
875 | #endif | |
876 | ||
5240ab40 | 877 | #endif /* MM_SLAB_H */ |