Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
97d06609 CL |
2 | #ifndef MM_SLAB_H |
3 | #define MM_SLAB_H | |
4 | /* | |
5 | * Internal slab definitions | |
6 | */ | |
7 | ||
07f361b2 JK |
8 | #ifdef CONFIG_SLOB |
9 | /* | |
10 | * Common fields provided in kmem_cache by all slab allocators | |
11 | * This struct is either used directly by the allocator (SLOB) | |
12 | * or the allocator must include definitions for all fields | |
13 | * provided in kmem_cache_common in their definition of kmem_cache. | |
14 | * | |
15 | * Once we can do anonymous structs (C11 standard) we could put a | |
16 | * anonymous struct definition in these allocators so that the | |
17 | * separate allocations in the kmem_cache structure of SLAB and | |
18 | * SLUB is no longer needed. | |
19 | */ | |
20 | struct kmem_cache { | |
21 | unsigned int object_size;/* The original size of the object */ | |
22 | unsigned int size; /* The aligned/padded/added on size */ | |
23 | unsigned int align; /* Alignment as calculated */ | |
d50112ed | 24 | slab_flags_t flags; /* Active flags on the slab */ |
07f361b2 JK |
25 | const char *name; /* Slab name for sysfs */ |
26 | int refcount; /* Use counter */ | |
27 | void (*ctor)(void *); /* Called on object slot creation */ | |
28 | struct list_head list; /* List of all slab caches on the system */ | |
29 | }; | |
30 | ||
31 | #endif /* CONFIG_SLOB */ | |
32 | ||
33 | #ifdef CONFIG_SLAB | |
34 | #include <linux/slab_def.h> | |
35 | #endif | |
36 | ||
37 | #ifdef CONFIG_SLUB | |
38 | #include <linux/slub_def.h> | |
39 | #endif | |
40 | ||
41 | #include <linux/memcontrol.h> | |
11c7aec2 | 42 | #include <linux/fault-inject.h> |
11c7aec2 JDB |
43 | #include <linux/kasan.h> |
44 | #include <linux/kmemleak.h> | |
7c00fce9 | 45 | #include <linux/random.h> |
d92a8cfc | 46 | #include <linux/sched/mm.h> |
07f361b2 | 47 | |
97d06609 CL |
48 | /* |
49 | * State of the slab allocator. | |
50 | * | |
51 | * This is used to describe the states of the allocator during bootup. | |
52 | * Allocators use this to gradually bootstrap themselves. Most allocators | |
53 | * have the problem that the structures used for managing slab caches are | |
54 | * allocated from slab caches themselves. | |
55 | */ | |
56 | enum slab_state { | |
57 | DOWN, /* No slab functionality yet */ | |
58 | PARTIAL, /* SLUB: kmem_cache_node available */ | |
ce8eb6c4 | 59 | PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */ |
97d06609 CL |
60 | UP, /* Slab caches usable but not all extras yet */ |
61 | FULL /* Everything is working */ | |
62 | }; | |
63 | ||
64 | extern enum slab_state slab_state; | |
65 | ||
18004c5d CL |
66 | /* The slab cache mutex protects the management structures during changes */ |
67 | extern struct mutex slab_mutex; | |
9b030cb8 CL |
68 | |
69 | /* The list of all slab caches on the system */ | |
18004c5d CL |
70 | extern struct list_head slab_caches; |
71 | ||
9b030cb8 CL |
72 | /* The slab cache that manages slab cache information */ |
73 | extern struct kmem_cache *kmem_cache; | |
74 | ||
af3b5f87 VB |
75 | /* A table of kmalloc cache names and sizes */ |
76 | extern const struct kmalloc_info_struct { | |
77 | const char *name; | |
78 | unsigned long size; | |
79 | } kmalloc_info[]; | |
80 | ||
d50112ed | 81 | unsigned long calculate_alignment(slab_flags_t flags, |
45906855 CL |
82 | unsigned long align, unsigned long size); |
83 | ||
f97d5f63 CL |
84 | #ifndef CONFIG_SLOB |
85 | /* Kmalloc array related functions */ | |
34cc6990 | 86 | void setup_kmalloc_cache_index_table(void); |
d50112ed | 87 | void create_kmalloc_caches(slab_flags_t); |
2c59dd65 CL |
88 | |
89 | /* Find the kmalloc slab corresponding for a certain size */ | |
90 | struct kmem_cache *kmalloc_slab(size_t, gfp_t); | |
f97d5f63 CL |
91 | #endif |
92 | ||
93 | ||
9b030cb8 | 94 | /* Functions provided by the slab allocators */ |
d50112ed | 95 | int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags); |
97d06609 | 96 | |
45530c44 | 97 | extern struct kmem_cache *create_kmalloc_cache(const char *name, size_t size, |
d50112ed | 98 | slab_flags_t flags); |
45530c44 | 99 | extern void create_boot_cache(struct kmem_cache *, const char *name, |
d50112ed | 100 | size_t size, slab_flags_t flags); |
45530c44 | 101 | |
423c929c JK |
102 | int slab_unmergeable(struct kmem_cache *s); |
103 | struct kmem_cache *find_mergeable(size_t size, size_t align, | |
d50112ed | 104 | slab_flags_t flags, const char *name, void (*ctor)(void *)); |
12220dea | 105 | #ifndef CONFIG_SLOB |
2633d7a0 | 106 | struct kmem_cache * |
a44cb944 | 107 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
d50112ed | 108 | slab_flags_t flags, void (*ctor)(void *)); |
423c929c | 109 | |
d50112ed AD |
110 | slab_flags_t kmem_cache_flags(unsigned long object_size, |
111 | slab_flags_t flags, const char *name, | |
423c929c | 112 | void (*ctor)(void *)); |
cbb79694 | 113 | #else |
2633d7a0 | 114 | static inline struct kmem_cache * |
a44cb944 | 115 | __kmem_cache_alias(const char *name, size_t size, size_t align, |
d50112ed | 116 | slab_flags_t flags, void (*ctor)(void *)) |
cbb79694 | 117 | { return NULL; } |
423c929c | 118 | |
d50112ed AD |
119 | static inline slab_flags_t kmem_cache_flags(unsigned long object_size, |
120 | slab_flags_t flags, const char *name, | |
423c929c JK |
121 | void (*ctor)(void *)) |
122 | { | |
123 | return flags; | |
124 | } | |
cbb79694 CL |
125 | #endif |
126 | ||
127 | ||
d8843922 GC |
128 | /* Legal flag mask for kmem_cache_create(), for various configurations */ |
129 | #define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \ | |
5f0d5a3a | 130 | SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS ) |
d8843922 GC |
131 | |
132 | #if defined(CONFIG_DEBUG_SLAB) | |
133 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER) | |
134 | #elif defined(CONFIG_SLUB_DEBUG) | |
135 | #define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \ | |
becfda68 | 136 | SLAB_TRACE | SLAB_CONSISTENCY_CHECKS) |
d8843922 GC |
137 | #else |
138 | #define SLAB_DEBUG_FLAGS (0) | |
139 | #endif | |
140 | ||
141 | #if defined(CONFIG_SLAB) | |
142 | #define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \ | |
230e9fc2 | 143 | SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \ |
75f296d9 | 144 | SLAB_ACCOUNT) |
d8843922 GC |
145 | #elif defined(CONFIG_SLUB) |
146 | #define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \ | |
75f296d9 | 147 | SLAB_TEMPORARY | SLAB_ACCOUNT) |
d8843922 GC |
148 | #else |
149 | #define SLAB_CACHE_FLAGS (0) | |
150 | #endif | |
151 | ||
e70954fd | 152 | /* Common flags available with current configuration */ |
d8843922 GC |
153 | #define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS) |
154 | ||
e70954fd TG |
155 | /* Common flags permitted for kmem_cache_create */ |
156 | #define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \ | |
157 | SLAB_RED_ZONE | \ | |
158 | SLAB_POISON | \ | |
159 | SLAB_STORE_USER | \ | |
160 | SLAB_TRACE | \ | |
161 | SLAB_CONSISTENCY_CHECKS | \ | |
162 | SLAB_MEM_SPREAD | \ | |
163 | SLAB_NOLEAKTRACE | \ | |
164 | SLAB_RECLAIM_ACCOUNT | \ | |
165 | SLAB_TEMPORARY | \ | |
e70954fd TG |
166 | SLAB_ACCOUNT) |
167 | ||
945cf2b6 | 168 | int __kmem_cache_shutdown(struct kmem_cache *); |
52b4b950 | 169 | void __kmem_cache_release(struct kmem_cache *); |
c9fc5864 TH |
170 | int __kmem_cache_shrink(struct kmem_cache *); |
171 | void __kmemcg_cache_deactivate(struct kmem_cache *s); | |
41a21285 | 172 | void slab_kmem_cache_release(struct kmem_cache *); |
945cf2b6 | 173 | |
b7454ad3 GC |
174 | struct seq_file; |
175 | struct file; | |
b7454ad3 | 176 | |
0d7561c6 GC |
177 | struct slabinfo { |
178 | unsigned long active_objs; | |
179 | unsigned long num_objs; | |
180 | unsigned long active_slabs; | |
181 | unsigned long num_slabs; | |
182 | unsigned long shared_avail; | |
183 | unsigned int limit; | |
184 | unsigned int batchcount; | |
185 | unsigned int shared; | |
186 | unsigned int objects_per_slab; | |
187 | unsigned int cache_order; | |
188 | }; | |
189 | ||
190 | void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo); | |
191 | void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s); | |
b7454ad3 GC |
192 | ssize_t slabinfo_write(struct file *file, const char __user *buffer, |
193 | size_t count, loff_t *ppos); | |
ba6c496e | 194 | |
484748f0 CL |
195 | /* |
196 | * Generic implementation of bulk operations | |
197 | * These are useful for situations in which the allocator cannot | |
9f706d68 | 198 | * perform optimizations. In that case segments of the object listed |
484748f0 CL |
199 | * may be allocated or freed using these operations. |
200 | */ | |
201 | void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **); | |
865762a8 | 202 | int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **); |
484748f0 | 203 | |
127424c8 | 204 | #if defined(CONFIG_MEMCG) && !defined(CONFIG_SLOB) |
510ded33 TH |
205 | |
206 | /* List of all root caches. */ | |
207 | extern struct list_head slab_root_caches; | |
208 | #define root_caches_node memcg_params.__root_caches_node | |
209 | ||
426589f5 VD |
210 | /* |
211 | * Iterate over all memcg caches of the given root cache. The caller must hold | |
212 | * slab_mutex. | |
213 | */ | |
214 | #define for_each_memcg_cache(iter, root) \ | |
9eeadc8b TH |
215 | list_for_each_entry(iter, &(root)->memcg_params.children, \ |
216 | memcg_params.children_node) | |
426589f5 | 217 | |
ba6c496e GC |
218 | static inline bool is_root_cache(struct kmem_cache *s) |
219 | { | |
9eeadc8b | 220 | return !s->memcg_params.root_cache; |
ba6c496e | 221 | } |
2633d7a0 | 222 | |
b9ce5ef4 | 223 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
f7ce3190 | 224 | struct kmem_cache *p) |
b9ce5ef4 | 225 | { |
f7ce3190 | 226 | return p == s || p == s->memcg_params.root_cache; |
b9ce5ef4 | 227 | } |
749c5415 GC |
228 | |
229 | /* | |
230 | * We use suffixes to the name in memcg because we can't have caches | |
231 | * created in the system with the same name. But when we print them | |
232 | * locally, better refer to them with the base name | |
233 | */ | |
234 | static inline const char *cache_name(struct kmem_cache *s) | |
235 | { | |
236 | if (!is_root_cache(s)) | |
f7ce3190 | 237 | s = s->memcg_params.root_cache; |
749c5415 GC |
238 | return s->name; |
239 | } | |
240 | ||
f8570263 VD |
241 | /* |
242 | * Note, we protect with RCU only the memcg_caches array, not per-memcg caches. | |
f7ce3190 VD |
243 | * That said the caller must assure the memcg's cache won't go away by either |
244 | * taking a css reference to the owner cgroup, or holding the slab_mutex. | |
f8570263 | 245 | */ |
2ade4de8 QH |
246 | static inline struct kmem_cache * |
247 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 | 248 | { |
959c8963 | 249 | struct kmem_cache *cachep; |
f7ce3190 | 250 | struct memcg_cache_array *arr; |
f8570263 VD |
251 | |
252 | rcu_read_lock(); | |
f7ce3190 | 253 | arr = rcu_dereference(s->memcg_params.memcg_caches); |
959c8963 VD |
254 | |
255 | /* | |
256 | * Make sure we will access the up-to-date value. The code updating | |
257 | * memcg_caches issues a write barrier to match this (see | |
f7ce3190 | 258 | * memcg_create_kmem_cache()). |
959c8963 | 259 | */ |
506458ef | 260 | cachep = READ_ONCE(arr->entries[idx]); |
8df0c2dc PK |
261 | rcu_read_unlock(); |
262 | ||
959c8963 | 263 | return cachep; |
749c5415 | 264 | } |
943a451a GC |
265 | |
266 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
267 | { | |
268 | if (is_root_cache(s)) | |
269 | return s; | |
f7ce3190 | 270 | return s->memcg_params.root_cache; |
943a451a | 271 | } |
5dfb4175 | 272 | |
f3ccb2c4 VD |
273 | static __always_inline int memcg_charge_slab(struct page *page, |
274 | gfp_t gfp, int order, | |
275 | struct kmem_cache *s) | |
5dfb4175 VD |
276 | { |
277 | if (!memcg_kmem_enabled()) | |
278 | return 0; | |
279 | if (is_root_cache(s)) | |
280 | return 0; | |
7779f212 | 281 | return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg); |
27ee57c9 VD |
282 | } |
283 | ||
284 | static __always_inline void memcg_uncharge_slab(struct page *page, int order, | |
285 | struct kmem_cache *s) | |
286 | { | |
45264778 VD |
287 | if (!memcg_kmem_enabled()) |
288 | return; | |
27ee57c9 | 289 | memcg_kmem_uncharge(page, order); |
5dfb4175 | 290 | } |
f7ce3190 VD |
291 | |
292 | extern void slab_init_memcg_params(struct kmem_cache *); | |
510ded33 | 293 | extern void memcg_link_cache(struct kmem_cache *s); |
01fb58bc TH |
294 | extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s, |
295 | void (*deact_fn)(struct kmem_cache *)); | |
f7ce3190 | 296 | |
127424c8 | 297 | #else /* CONFIG_MEMCG && !CONFIG_SLOB */ |
f7ce3190 | 298 | |
510ded33 TH |
299 | /* If !memcg, all caches are root. */ |
300 | #define slab_root_caches slab_caches | |
301 | #define root_caches_node list | |
302 | ||
426589f5 VD |
303 | #define for_each_memcg_cache(iter, root) \ |
304 | for ((void)(iter), (void)(root); 0; ) | |
426589f5 | 305 | |
ba6c496e GC |
306 | static inline bool is_root_cache(struct kmem_cache *s) |
307 | { | |
308 | return true; | |
309 | } | |
310 | ||
b9ce5ef4 GC |
311 | static inline bool slab_equal_or_root(struct kmem_cache *s, |
312 | struct kmem_cache *p) | |
313 | { | |
314 | return true; | |
315 | } | |
749c5415 GC |
316 | |
317 | static inline const char *cache_name(struct kmem_cache *s) | |
318 | { | |
319 | return s->name; | |
320 | } | |
321 | ||
2ade4de8 QH |
322 | static inline struct kmem_cache * |
323 | cache_from_memcg_idx(struct kmem_cache *s, int idx) | |
749c5415 GC |
324 | { |
325 | return NULL; | |
326 | } | |
943a451a GC |
327 | |
328 | static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s) | |
329 | { | |
330 | return s; | |
331 | } | |
5dfb4175 | 332 | |
f3ccb2c4 VD |
333 | static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order, |
334 | struct kmem_cache *s) | |
5dfb4175 VD |
335 | { |
336 | return 0; | |
337 | } | |
338 | ||
27ee57c9 VD |
339 | static inline void memcg_uncharge_slab(struct page *page, int order, |
340 | struct kmem_cache *s) | |
341 | { | |
342 | } | |
343 | ||
f7ce3190 VD |
344 | static inline void slab_init_memcg_params(struct kmem_cache *s) |
345 | { | |
346 | } | |
510ded33 TH |
347 | |
348 | static inline void memcg_link_cache(struct kmem_cache *s) | |
349 | { | |
350 | } | |
351 | ||
127424c8 | 352 | #endif /* CONFIG_MEMCG && !CONFIG_SLOB */ |
b9ce5ef4 GC |
353 | |
354 | static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x) | |
355 | { | |
356 | struct kmem_cache *cachep; | |
357 | struct page *page; | |
358 | ||
359 | /* | |
360 | * When kmemcg is not being used, both assignments should return the | |
361 | * same value. but we don't want to pay the assignment price in that | |
362 | * case. If it is not compiled in, the compiler should be smart enough | |
363 | * to not do even the assignment. In that case, slab_equal_or_root | |
364 | * will also be a constant. | |
365 | */ | |
becfda68 LA |
366 | if (!memcg_kmem_enabled() && |
367 | !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS)) | |
b9ce5ef4 GC |
368 | return s; |
369 | ||
370 | page = virt_to_head_page(x); | |
371 | cachep = page->slab_cache; | |
372 | if (slab_equal_or_root(cachep, s)) | |
373 | return cachep; | |
374 | ||
375 | pr_err("%s: Wrong slab cache. %s but object is from %s\n", | |
2d16e0fd | 376 | __func__, s->name, cachep->name); |
b9ce5ef4 GC |
377 | WARN_ON_ONCE(1); |
378 | return s; | |
379 | } | |
ca34956b | 380 | |
11c7aec2 JDB |
381 | static inline size_t slab_ksize(const struct kmem_cache *s) |
382 | { | |
383 | #ifndef CONFIG_SLUB | |
384 | return s->object_size; | |
385 | ||
386 | #else /* CONFIG_SLUB */ | |
387 | # ifdef CONFIG_SLUB_DEBUG | |
388 | /* | |
389 | * Debugging requires use of the padding between object | |
390 | * and whatever may come after it. | |
391 | */ | |
392 | if (s->flags & (SLAB_RED_ZONE | SLAB_POISON)) | |
393 | return s->object_size; | |
394 | # endif | |
80a9201a AP |
395 | if (s->flags & SLAB_KASAN) |
396 | return s->object_size; | |
11c7aec2 JDB |
397 | /* |
398 | * If we have the need to store the freelist pointer | |
399 | * back there or track user information then we can | |
400 | * only use the space before that information. | |
401 | */ | |
5f0d5a3a | 402 | if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER)) |
11c7aec2 JDB |
403 | return s->inuse; |
404 | /* | |
405 | * Else we can use all the padding etc for the allocation | |
406 | */ | |
407 | return s->size; | |
408 | #endif | |
409 | } | |
410 | ||
411 | static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s, | |
412 | gfp_t flags) | |
413 | { | |
414 | flags &= gfp_allowed_mask; | |
d92a8cfc PZ |
415 | |
416 | fs_reclaim_acquire(flags); | |
417 | fs_reclaim_release(flags); | |
418 | ||
11c7aec2 JDB |
419 | might_sleep_if(gfpflags_allow_blocking(flags)); |
420 | ||
fab9963a | 421 | if (should_failslab(s, flags)) |
11c7aec2 JDB |
422 | return NULL; |
423 | ||
45264778 VD |
424 | if (memcg_kmem_enabled() && |
425 | ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT))) | |
426 | return memcg_kmem_get_cache(s); | |
427 | ||
428 | return s; | |
11c7aec2 JDB |
429 | } |
430 | ||
431 | static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags, | |
432 | size_t size, void **p) | |
433 | { | |
434 | size_t i; | |
435 | ||
436 | flags &= gfp_allowed_mask; | |
437 | for (i = 0; i < size; i++) { | |
438 | void *object = p[i]; | |
439 | ||
11c7aec2 JDB |
440 | kmemleak_alloc_recursive(object, s->object_size, 1, |
441 | s->flags, flags); | |
505f5dcb | 442 | kasan_slab_alloc(s, object, flags); |
11c7aec2 | 443 | } |
45264778 VD |
444 | |
445 | if (memcg_kmem_enabled()) | |
446 | memcg_kmem_put_cache(s); | |
11c7aec2 JDB |
447 | } |
448 | ||
44c5356f | 449 | #ifndef CONFIG_SLOB |
ca34956b CL |
450 | /* |
451 | * The slab lists for all objects. | |
452 | */ | |
453 | struct kmem_cache_node { | |
454 | spinlock_t list_lock; | |
455 | ||
456 | #ifdef CONFIG_SLAB | |
457 | struct list_head slabs_partial; /* partial list first, better asm code */ | |
458 | struct list_head slabs_full; | |
459 | struct list_head slabs_free; | |
bf00bd34 DR |
460 | unsigned long total_slabs; /* length of all slab lists */ |
461 | unsigned long free_slabs; /* length of free slab list only */ | |
ca34956b CL |
462 | unsigned long free_objects; |
463 | unsigned int free_limit; | |
464 | unsigned int colour_next; /* Per-node cache coloring */ | |
465 | struct array_cache *shared; /* shared per node */ | |
c8522a3a | 466 | struct alien_cache **alien; /* on other nodes */ |
ca34956b CL |
467 | unsigned long next_reap; /* updated without locking */ |
468 | int free_touched; /* updated without locking */ | |
469 | #endif | |
470 | ||
471 | #ifdef CONFIG_SLUB | |
472 | unsigned long nr_partial; | |
473 | struct list_head partial; | |
474 | #ifdef CONFIG_SLUB_DEBUG | |
475 | atomic_long_t nr_slabs; | |
476 | atomic_long_t total_objects; | |
477 | struct list_head full; | |
478 | #endif | |
479 | #endif | |
480 | ||
481 | }; | |
e25839f6 | 482 | |
44c5356f CL |
483 | static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node) |
484 | { | |
485 | return s->node[node]; | |
486 | } | |
487 | ||
488 | /* | |
489 | * Iterator over all nodes. The body will be executed for each node that has | |
490 | * a kmem_cache_node structure allocated (which is true for all online nodes) | |
491 | */ | |
492 | #define for_each_kmem_cache_node(__s, __node, __n) \ | |
9163582c MP |
493 | for (__node = 0; __node < nr_node_ids; __node++) \ |
494 | if ((__n = get_node(__s, __node))) | |
44c5356f CL |
495 | |
496 | #endif | |
497 | ||
1df3b26f | 498 | void *slab_start(struct seq_file *m, loff_t *pos); |
276a2439 WL |
499 | void *slab_next(struct seq_file *m, void *p, loff_t *pos); |
500 | void slab_stop(struct seq_file *m, void *p); | |
bc2791f8 TH |
501 | void *memcg_slab_start(struct seq_file *m, loff_t *pos); |
502 | void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos); | |
503 | void memcg_slab_stop(struct seq_file *m, void *p); | |
b047501c | 504 | int memcg_slab_show(struct seq_file *m, void *p); |
5240ab40 | 505 | |
852d8be0 YS |
506 | #if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG) |
507 | void dump_unreclaimable_slab(void); | |
508 | #else | |
509 | static inline void dump_unreclaimable_slab(void) | |
510 | { | |
511 | } | |
512 | #endif | |
513 | ||
55834c59 AP |
514 | void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr); |
515 | ||
7c00fce9 TG |
516 | #ifdef CONFIG_SLAB_FREELIST_RANDOM |
517 | int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count, | |
518 | gfp_t gfp); | |
519 | void cache_random_seq_destroy(struct kmem_cache *cachep); | |
520 | #else | |
521 | static inline int cache_random_seq_create(struct kmem_cache *cachep, | |
522 | unsigned int count, gfp_t gfp) | |
523 | { | |
524 | return 0; | |
525 | } | |
526 | static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { } | |
527 | #endif /* CONFIG_SLAB_FREELIST_RANDOM */ | |
528 | ||
5240ab40 | 529 | #endif /* MM_SLAB_H */ |