dmaengine: at_hdmac: drop useless LIST_HEAD
[linux-2.6-block.git] / mm / slab.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
97d06609
CL
2#ifndef MM_SLAB_H
3#define MM_SLAB_H
4/*
5 * Internal slab definitions
6 */
7
07f361b2
JK
8#ifdef CONFIG_SLOB
9/*
10 * Common fields provided in kmem_cache by all slab allocators
11 * This struct is either used directly by the allocator (SLOB)
12 * or the allocator must include definitions for all fields
13 * provided in kmem_cache_common in their definition of kmem_cache.
14 *
15 * Once we can do anonymous structs (C11 standard) we could put a
16 * anonymous struct definition in these allocators so that the
17 * separate allocations in the kmem_cache structure of SLAB and
18 * SLUB is no longer needed.
19 */
20struct kmem_cache {
21 unsigned int object_size;/* The original size of the object */
22 unsigned int size; /* The aligned/padded/added on size */
23 unsigned int align; /* Alignment as calculated */
d50112ed 24 slab_flags_t flags; /* Active flags on the slab */
7bbdb81e
AD
25 unsigned int useroffset;/* Usercopy region offset */
26 unsigned int usersize; /* Usercopy region size */
07f361b2
JK
27 const char *name; /* Slab name for sysfs */
28 int refcount; /* Use counter */
29 void (*ctor)(void *); /* Called on object slot creation */
30 struct list_head list; /* List of all slab caches on the system */
31};
32
33#endif /* CONFIG_SLOB */
34
35#ifdef CONFIG_SLAB
36#include <linux/slab_def.h>
37#endif
38
39#ifdef CONFIG_SLUB
40#include <linux/slub_def.h>
41#endif
42
43#include <linux/memcontrol.h>
11c7aec2 44#include <linux/fault-inject.h>
11c7aec2
JDB
45#include <linux/kasan.h>
46#include <linux/kmemleak.h>
7c00fce9 47#include <linux/random.h>
d92a8cfc 48#include <linux/sched/mm.h>
07f361b2 49
97d06609
CL
50/*
51 * State of the slab allocator.
52 *
53 * This is used to describe the states of the allocator during bootup.
54 * Allocators use this to gradually bootstrap themselves. Most allocators
55 * have the problem that the structures used for managing slab caches are
56 * allocated from slab caches themselves.
57 */
58enum slab_state {
59 DOWN, /* No slab functionality yet */
60 PARTIAL, /* SLUB: kmem_cache_node available */
ce8eb6c4 61 PARTIAL_NODE, /* SLAB: kmalloc size for node struct available */
97d06609
CL
62 UP, /* Slab caches usable but not all extras yet */
63 FULL /* Everything is working */
64};
65
66extern enum slab_state slab_state;
67
18004c5d
CL
68/* The slab cache mutex protects the management structures during changes */
69extern struct mutex slab_mutex;
9b030cb8
CL
70
71/* The list of all slab caches on the system */
18004c5d
CL
72extern struct list_head slab_caches;
73
9b030cb8
CL
74/* The slab cache that manages slab cache information */
75extern struct kmem_cache *kmem_cache;
76
af3b5f87
VB
77/* A table of kmalloc cache names and sizes */
78extern const struct kmalloc_info_struct {
79 const char *name;
55de8b9c 80 unsigned int size;
af3b5f87
VB
81} kmalloc_info[];
82
f97d5f63
CL
83#ifndef CONFIG_SLOB
84/* Kmalloc array related functions */
34cc6990 85void setup_kmalloc_cache_index_table(void);
d50112ed 86void create_kmalloc_caches(slab_flags_t);
2c59dd65
CL
87
88/* Find the kmalloc slab corresponding for a certain size */
89struct kmem_cache *kmalloc_slab(size_t, gfp_t);
f97d5f63
CL
90#endif
91
92
9b030cb8 93/* Functions provided by the slab allocators */
d50112ed 94int __kmem_cache_create(struct kmem_cache *, slab_flags_t flags);
97d06609 95
55de8b9c
AD
96struct kmem_cache *create_kmalloc_cache(const char *name, unsigned int size,
97 slab_flags_t flags, unsigned int useroffset,
98 unsigned int usersize);
45530c44 99extern void create_boot_cache(struct kmem_cache *, const char *name,
361d575e
AD
100 unsigned int size, slab_flags_t flags,
101 unsigned int useroffset, unsigned int usersize);
45530c44 102
423c929c 103int slab_unmergeable(struct kmem_cache *s);
f4957d5b 104struct kmem_cache *find_mergeable(unsigned size, unsigned align,
d50112ed 105 slab_flags_t flags, const char *name, void (*ctor)(void *));
12220dea 106#ifndef CONFIG_SLOB
2633d7a0 107struct kmem_cache *
f4957d5b 108__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112ed 109 slab_flags_t flags, void (*ctor)(void *));
423c929c 110
0293d1fd 111slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112ed 112 slab_flags_t flags, const char *name,
423c929c 113 void (*ctor)(void *));
cbb79694 114#else
2633d7a0 115static inline struct kmem_cache *
f4957d5b 116__kmem_cache_alias(const char *name, unsigned int size, unsigned int align,
d50112ed 117 slab_flags_t flags, void (*ctor)(void *))
cbb79694 118{ return NULL; }
423c929c 119
0293d1fd 120static inline slab_flags_t kmem_cache_flags(unsigned int object_size,
d50112ed 121 slab_flags_t flags, const char *name,
423c929c
JK
122 void (*ctor)(void *))
123{
124 return flags;
125}
cbb79694
CL
126#endif
127
128
d8843922
GC
129/* Legal flag mask for kmem_cache_create(), for various configurations */
130#define SLAB_CORE_FLAGS (SLAB_HWCACHE_ALIGN | SLAB_CACHE_DMA | SLAB_PANIC | \
5f0d5a3a 131 SLAB_TYPESAFE_BY_RCU | SLAB_DEBUG_OBJECTS )
d8843922
GC
132
133#if defined(CONFIG_DEBUG_SLAB)
134#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER)
135#elif defined(CONFIG_SLUB_DEBUG)
136#define SLAB_DEBUG_FLAGS (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
becfda68 137 SLAB_TRACE | SLAB_CONSISTENCY_CHECKS)
d8843922
GC
138#else
139#define SLAB_DEBUG_FLAGS (0)
140#endif
141
142#if defined(CONFIG_SLAB)
143#define SLAB_CACHE_FLAGS (SLAB_MEM_SPREAD | SLAB_NOLEAKTRACE | \
230e9fc2 144 SLAB_RECLAIM_ACCOUNT | SLAB_TEMPORARY | \
75f296d9 145 SLAB_ACCOUNT)
d8843922
GC
146#elif defined(CONFIG_SLUB)
147#define SLAB_CACHE_FLAGS (SLAB_NOLEAKTRACE | SLAB_RECLAIM_ACCOUNT | \
75f296d9 148 SLAB_TEMPORARY | SLAB_ACCOUNT)
d8843922
GC
149#else
150#define SLAB_CACHE_FLAGS (0)
151#endif
152
e70954fd 153/* Common flags available with current configuration */
d8843922
GC
154#define CACHE_CREATE_MASK (SLAB_CORE_FLAGS | SLAB_DEBUG_FLAGS | SLAB_CACHE_FLAGS)
155
e70954fd
TG
156/* Common flags permitted for kmem_cache_create */
157#define SLAB_FLAGS_PERMITTED (SLAB_CORE_FLAGS | \
158 SLAB_RED_ZONE | \
159 SLAB_POISON | \
160 SLAB_STORE_USER | \
161 SLAB_TRACE | \
162 SLAB_CONSISTENCY_CHECKS | \
163 SLAB_MEM_SPREAD | \
164 SLAB_NOLEAKTRACE | \
165 SLAB_RECLAIM_ACCOUNT | \
166 SLAB_TEMPORARY | \
e70954fd
TG
167 SLAB_ACCOUNT)
168
f9e13c0a 169bool __kmem_cache_empty(struct kmem_cache *);
945cf2b6 170int __kmem_cache_shutdown(struct kmem_cache *);
52b4b950 171void __kmem_cache_release(struct kmem_cache *);
c9fc5864
TH
172int __kmem_cache_shrink(struct kmem_cache *);
173void __kmemcg_cache_deactivate(struct kmem_cache *s);
41a21285 174void slab_kmem_cache_release(struct kmem_cache *);
945cf2b6 175
b7454ad3
GC
176struct seq_file;
177struct file;
b7454ad3 178
0d7561c6
GC
179struct slabinfo {
180 unsigned long active_objs;
181 unsigned long num_objs;
182 unsigned long active_slabs;
183 unsigned long num_slabs;
184 unsigned long shared_avail;
185 unsigned int limit;
186 unsigned int batchcount;
187 unsigned int shared;
188 unsigned int objects_per_slab;
189 unsigned int cache_order;
190};
191
192void get_slabinfo(struct kmem_cache *s, struct slabinfo *sinfo);
193void slabinfo_show_stats(struct seq_file *m, struct kmem_cache *s);
b7454ad3
GC
194ssize_t slabinfo_write(struct file *file, const char __user *buffer,
195 size_t count, loff_t *ppos);
ba6c496e 196
484748f0
CL
197/*
198 * Generic implementation of bulk operations
199 * These are useful for situations in which the allocator cannot
9f706d68 200 * perform optimizations. In that case segments of the object listed
484748f0
CL
201 * may be allocated or freed using these operations.
202 */
203void __kmem_cache_free_bulk(struct kmem_cache *, size_t, void **);
865762a8 204int __kmem_cache_alloc_bulk(struct kmem_cache *, gfp_t, size_t, void **);
484748f0 205
84c07d11 206#ifdef CONFIG_MEMCG_KMEM
510ded33
TH
207
208/* List of all root caches. */
209extern struct list_head slab_root_caches;
210#define root_caches_node memcg_params.__root_caches_node
211
426589f5
VD
212/*
213 * Iterate over all memcg caches of the given root cache. The caller must hold
214 * slab_mutex.
215 */
216#define for_each_memcg_cache(iter, root) \
9eeadc8b
TH
217 list_for_each_entry(iter, &(root)->memcg_params.children, \
218 memcg_params.children_node)
426589f5 219
ba6c496e
GC
220static inline bool is_root_cache(struct kmem_cache *s)
221{
9eeadc8b 222 return !s->memcg_params.root_cache;
ba6c496e 223}
2633d7a0 224
b9ce5ef4 225static inline bool slab_equal_or_root(struct kmem_cache *s,
f7ce3190 226 struct kmem_cache *p)
b9ce5ef4 227{
f7ce3190 228 return p == s || p == s->memcg_params.root_cache;
b9ce5ef4 229}
749c5415
GC
230
231/*
232 * We use suffixes to the name in memcg because we can't have caches
233 * created in the system with the same name. But when we print them
234 * locally, better refer to them with the base name
235 */
236static inline const char *cache_name(struct kmem_cache *s)
237{
238 if (!is_root_cache(s))
f7ce3190 239 s = s->memcg_params.root_cache;
749c5415
GC
240 return s->name;
241}
242
f8570263
VD
243/*
244 * Note, we protect with RCU only the memcg_caches array, not per-memcg caches.
f7ce3190
VD
245 * That said the caller must assure the memcg's cache won't go away by either
246 * taking a css reference to the owner cgroup, or holding the slab_mutex.
f8570263 247 */
2ade4de8
QH
248static inline struct kmem_cache *
249cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c5415 250{
959c8963 251 struct kmem_cache *cachep;
f7ce3190 252 struct memcg_cache_array *arr;
f8570263
VD
253
254 rcu_read_lock();
f7ce3190 255 arr = rcu_dereference(s->memcg_params.memcg_caches);
959c8963
VD
256
257 /*
258 * Make sure we will access the up-to-date value. The code updating
259 * memcg_caches issues a write barrier to match this (see
f7ce3190 260 * memcg_create_kmem_cache()).
959c8963 261 */
506458ef 262 cachep = READ_ONCE(arr->entries[idx]);
8df0c2dc
PK
263 rcu_read_unlock();
264
959c8963 265 return cachep;
749c5415 266}
943a451a
GC
267
268static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
269{
270 if (is_root_cache(s))
271 return s;
f7ce3190 272 return s->memcg_params.root_cache;
943a451a 273}
5dfb4175 274
f3ccb2c4
VD
275static __always_inline int memcg_charge_slab(struct page *page,
276 gfp_t gfp, int order,
277 struct kmem_cache *s)
5dfb4175
VD
278{
279 if (!memcg_kmem_enabled())
280 return 0;
281 if (is_root_cache(s))
282 return 0;
7779f212 283 return memcg_kmem_charge_memcg(page, gfp, order, s->memcg_params.memcg);
27ee57c9
VD
284}
285
286static __always_inline void memcg_uncharge_slab(struct page *page, int order,
287 struct kmem_cache *s)
288{
45264778
VD
289 if (!memcg_kmem_enabled())
290 return;
27ee57c9 291 memcg_kmem_uncharge(page, order);
5dfb4175 292}
f7ce3190
VD
293
294extern void slab_init_memcg_params(struct kmem_cache *);
510ded33 295extern void memcg_link_cache(struct kmem_cache *s);
01fb58bc
TH
296extern void slab_deactivate_memcg_cache_rcu_sched(struct kmem_cache *s,
297 void (*deact_fn)(struct kmem_cache *));
f7ce3190 298
84c07d11 299#else /* CONFIG_MEMCG_KMEM */
f7ce3190 300
510ded33
TH
301/* If !memcg, all caches are root. */
302#define slab_root_caches slab_caches
303#define root_caches_node list
304
426589f5
VD
305#define for_each_memcg_cache(iter, root) \
306 for ((void)(iter), (void)(root); 0; )
426589f5 307
ba6c496e
GC
308static inline bool is_root_cache(struct kmem_cache *s)
309{
310 return true;
311}
312
b9ce5ef4
GC
313static inline bool slab_equal_or_root(struct kmem_cache *s,
314 struct kmem_cache *p)
315{
316 return true;
317}
749c5415
GC
318
319static inline const char *cache_name(struct kmem_cache *s)
320{
321 return s->name;
322}
323
2ade4de8
QH
324static inline struct kmem_cache *
325cache_from_memcg_idx(struct kmem_cache *s, int idx)
749c5415
GC
326{
327 return NULL;
328}
943a451a
GC
329
330static inline struct kmem_cache *memcg_root_cache(struct kmem_cache *s)
331{
332 return s;
333}
5dfb4175 334
f3ccb2c4
VD
335static inline int memcg_charge_slab(struct page *page, gfp_t gfp, int order,
336 struct kmem_cache *s)
5dfb4175
VD
337{
338 return 0;
339}
340
27ee57c9
VD
341static inline void memcg_uncharge_slab(struct page *page, int order,
342 struct kmem_cache *s)
343{
344}
345
f7ce3190
VD
346static inline void slab_init_memcg_params(struct kmem_cache *s)
347{
348}
510ded33
TH
349
350static inline void memcg_link_cache(struct kmem_cache *s)
351{
352}
353
84c07d11 354#endif /* CONFIG_MEMCG_KMEM */
b9ce5ef4
GC
355
356static inline struct kmem_cache *cache_from_obj(struct kmem_cache *s, void *x)
357{
358 struct kmem_cache *cachep;
359 struct page *page;
360
361 /*
362 * When kmemcg is not being used, both assignments should return the
363 * same value. but we don't want to pay the assignment price in that
364 * case. If it is not compiled in, the compiler should be smart enough
365 * to not do even the assignment. In that case, slab_equal_or_root
366 * will also be a constant.
367 */
becfda68
LA
368 if (!memcg_kmem_enabled() &&
369 !unlikely(s->flags & SLAB_CONSISTENCY_CHECKS))
b9ce5ef4
GC
370 return s;
371
372 page = virt_to_head_page(x);
373 cachep = page->slab_cache;
374 if (slab_equal_or_root(cachep, s))
375 return cachep;
376
377 pr_err("%s: Wrong slab cache. %s but object is from %s\n",
2d16e0fd 378 __func__, s->name, cachep->name);
b9ce5ef4
GC
379 WARN_ON_ONCE(1);
380 return s;
381}
ca34956b 382
11c7aec2
JDB
383static inline size_t slab_ksize(const struct kmem_cache *s)
384{
385#ifndef CONFIG_SLUB
386 return s->object_size;
387
388#else /* CONFIG_SLUB */
389# ifdef CONFIG_SLUB_DEBUG
390 /*
391 * Debugging requires use of the padding between object
392 * and whatever may come after it.
393 */
394 if (s->flags & (SLAB_RED_ZONE | SLAB_POISON))
395 return s->object_size;
396# endif
80a9201a
AP
397 if (s->flags & SLAB_KASAN)
398 return s->object_size;
11c7aec2
JDB
399 /*
400 * If we have the need to store the freelist pointer
401 * back there or track user information then we can
402 * only use the space before that information.
403 */
5f0d5a3a 404 if (s->flags & (SLAB_TYPESAFE_BY_RCU | SLAB_STORE_USER))
11c7aec2
JDB
405 return s->inuse;
406 /*
407 * Else we can use all the padding etc for the allocation
408 */
409 return s->size;
410#endif
411}
412
413static inline struct kmem_cache *slab_pre_alloc_hook(struct kmem_cache *s,
414 gfp_t flags)
415{
416 flags &= gfp_allowed_mask;
d92a8cfc
PZ
417
418 fs_reclaim_acquire(flags);
419 fs_reclaim_release(flags);
420
11c7aec2
JDB
421 might_sleep_if(gfpflags_allow_blocking(flags));
422
fab9963a 423 if (should_failslab(s, flags))
11c7aec2
JDB
424 return NULL;
425
45264778
VD
426 if (memcg_kmem_enabled() &&
427 ((flags & __GFP_ACCOUNT) || (s->flags & SLAB_ACCOUNT)))
428 return memcg_kmem_get_cache(s);
429
430 return s;
11c7aec2
JDB
431}
432
433static inline void slab_post_alloc_hook(struct kmem_cache *s, gfp_t flags,
434 size_t size, void **p)
435{
436 size_t i;
437
438 flags &= gfp_allowed_mask;
439 for (i = 0; i < size; i++) {
440 void *object = p[i];
441
11c7aec2
JDB
442 kmemleak_alloc_recursive(object, s->object_size, 1,
443 s->flags, flags);
0116523c 444 p[i] = kasan_slab_alloc(s, object, flags);
11c7aec2 445 }
45264778
VD
446
447 if (memcg_kmem_enabled())
448 memcg_kmem_put_cache(s);
11c7aec2
JDB
449}
450
44c5356f 451#ifndef CONFIG_SLOB
ca34956b
CL
452/*
453 * The slab lists for all objects.
454 */
455struct kmem_cache_node {
456 spinlock_t list_lock;
457
458#ifdef CONFIG_SLAB
459 struct list_head slabs_partial; /* partial list first, better asm code */
460 struct list_head slabs_full;
461 struct list_head slabs_free;
bf00bd34
DR
462 unsigned long total_slabs; /* length of all slab lists */
463 unsigned long free_slabs; /* length of free slab list only */
ca34956b
CL
464 unsigned long free_objects;
465 unsigned int free_limit;
466 unsigned int colour_next; /* Per-node cache coloring */
467 struct array_cache *shared; /* shared per node */
c8522a3a 468 struct alien_cache **alien; /* on other nodes */
ca34956b
CL
469 unsigned long next_reap; /* updated without locking */
470 int free_touched; /* updated without locking */
471#endif
472
473#ifdef CONFIG_SLUB
474 unsigned long nr_partial;
475 struct list_head partial;
476#ifdef CONFIG_SLUB_DEBUG
477 atomic_long_t nr_slabs;
478 atomic_long_t total_objects;
479 struct list_head full;
480#endif
481#endif
482
483};
e25839f6 484
44c5356f
CL
485static inline struct kmem_cache_node *get_node(struct kmem_cache *s, int node)
486{
487 return s->node[node];
488}
489
490/*
491 * Iterator over all nodes. The body will be executed for each node that has
492 * a kmem_cache_node structure allocated (which is true for all online nodes)
493 */
494#define for_each_kmem_cache_node(__s, __node, __n) \
9163582c
MP
495 for (__node = 0; __node < nr_node_ids; __node++) \
496 if ((__n = get_node(__s, __node)))
44c5356f
CL
497
498#endif
499
1df3b26f 500void *slab_start(struct seq_file *m, loff_t *pos);
276a2439
WL
501void *slab_next(struct seq_file *m, void *p, loff_t *pos);
502void slab_stop(struct seq_file *m, void *p);
bc2791f8
TH
503void *memcg_slab_start(struct seq_file *m, loff_t *pos);
504void *memcg_slab_next(struct seq_file *m, void *p, loff_t *pos);
505void memcg_slab_stop(struct seq_file *m, void *p);
b047501c 506int memcg_slab_show(struct seq_file *m, void *p);
5240ab40 507
852d8be0
YS
508#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
509void dump_unreclaimable_slab(void);
510#else
511static inline void dump_unreclaimable_slab(void)
512{
513}
514#endif
515
55834c59
AP
516void ___cache_free(struct kmem_cache *cache, void *x, unsigned long addr);
517
7c00fce9
TG
518#ifdef CONFIG_SLAB_FREELIST_RANDOM
519int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
520 gfp_t gfp);
521void cache_random_seq_destroy(struct kmem_cache *cachep);
522#else
523static inline int cache_random_seq_create(struct kmem_cache *cachep,
524 unsigned int count, gfp_t gfp)
525{
526 return 0;
527}
528static inline void cache_random_seq_destroy(struct kmem_cache *cachep) { }
529#endif /* CONFIG_SLAB_FREELIST_RANDOM */
530
5240ab40 531#endif /* MM_SLAB_H */