Merge tag 'mm-hotfixes-stable-2023-05-03-16-27' of git://git.kernel.org/pub/scm/linux...
[linux-block.git] / include / linux / slub_def.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
81819f0f
CL
2#ifndef _LINUX_SLUB_DEF_H
3#define _LINUX_SLUB_DEF_H
4
5/*
6 * SLUB : A Slab allocator without object queues.
7 *
cde53535 8 * (C) 2007 SGI, Christoph Lameter
81819f0f 9 */
b89fb5ef 10#include <linux/kfence.h>
81819f0f 11#include <linux/kobject.h>
4138fdfc 12#include <linux/reciprocal_div.h>
bd0e7491 13#include <linux/local_lock.h>
81819f0f 14
8ff12cfc
CL
15enum stat_item {
16 ALLOC_FASTPATH, /* Allocation from cpu slab */
17 ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */
a941f836 18 FREE_FASTPATH, /* Free to cpu slab */
8ff12cfc
CL
19 FREE_SLOWPATH, /* Freeing not to cpu slab */
20 FREE_FROZEN, /* Freeing to frozen slab */
21 FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */
22 FREE_REMOVE_PARTIAL, /* Freeing removes last object */
8028dcea 23 ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */
8ff12cfc
CL
24 ALLOC_SLAB, /* Cpu slab acquired from page allocator */
25 ALLOC_REFILL, /* Refill cpu slab from slab freelist */
e36a2652 26 ALLOC_NODE_MISMATCH, /* Switching cpu slab */
8ff12cfc
CL
27 FREE_SLAB, /* Slab freed to the page allocator */
28 CPUSLAB_FLUSH, /* Abandoning of the cpu slab */
29 DEACTIVATE_FULL, /* Cpu slab was full when deactivated */
30 DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */
31 DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */
32 DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */
33 DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */
03e404af 34 DEACTIVATE_BYPASS, /* Implicit deactivation */
65c3376a 35 ORDER_FALLBACK, /* Number of times fallback was necessary */
4fdccdfb 36 CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */
b789ef51 37 CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */
49e22585 38 CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */
8028dcea
AS
39 CPU_PARTIAL_FREE, /* Refill cpu partial on free */
40 CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */
41 CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */
8ff12cfc
CL
42 NR_SLUB_STAT_ITEMS };
43
0af8489b 44#ifndef CONFIG_SLUB_TINY
bd0e7491
VB
45/*
46 * When changing the layout, make sure freelist and tid are still compatible
47 * with this_cpu_cmpxchg_double() alignment requirements.
48 */
dfb4f096 49struct kmem_cache_cpu {
8a5ec0ba 50 void **freelist; /* Pointer to next available object */
8a5ec0ba 51 unsigned long tid; /* Globally unique transaction id */
bb192ed9 52 struct slab *slab; /* The slab from which we are allocating */
a93cf07b 53#ifdef CONFIG_SLUB_CPU_PARTIAL
bb192ed9 54 struct slab *partial; /* Partially allocated frozen slabs */
a93cf07b 55#endif
bd0e7491 56 local_lock_t lock; /* Protects the fields above */
8ff12cfc
CL
57#ifdef CONFIG_SLUB_STATS
58 unsigned stat[NR_SLUB_STAT_ITEMS];
59#endif
4c93c355 60};
0af8489b 61#endif /* CONFIG_SLUB_TINY */
dfb4f096 62
a93cf07b
WY
63#ifdef CONFIG_SLUB_CPU_PARTIAL
64#define slub_percpu_partial(c) ((c)->partial)
65
66#define slub_set_percpu_partial(c, p) \
67({ \
68 slub_percpu_partial(c) = (p)->next; \
69})
70
71#define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c))
72#else
73#define slub_percpu_partial(c) NULL
74
75#define slub_set_percpu_partial(c, p)
76
77#define slub_percpu_partial_read_once(c) NULL
78#endif // CONFIG_SLUB_CPU_PARTIAL
79
834f3d11
CL
80/*
81 * Word size structure that can be atomically updated or read and that
82 * contains both the order and the number of objects that a slab of the
83 * given order would contain.
84 */
85struct kmem_cache_order_objects {
19af27af 86 unsigned int x;
834f3d11
CL
87};
88
81819f0f
CL
89/*
90 * Slab cache management.
91 */
92struct kmem_cache {
0af8489b 93#ifndef CONFIG_SLUB_TINY
1b5ad248 94 struct kmem_cache_cpu __percpu *cpu_slab;
0af8489b 95#endif
de810f49 96 /* Used for retrieving partial slabs, etc. */
d50112ed 97 slab_flags_t flags;
1a757fe5 98 unsigned long min_partial;
de810f49
TH
99 unsigned int size; /* The size of an object including metadata */
100 unsigned int object_size;/* The size of an object without metadata */
4138fdfc 101 struct reciprocal_value reciprocal_size;
de810f49 102 unsigned int offset; /* Free pointer offset */
e6d0e1dc 103#ifdef CONFIG_SLUB_CPU_PARTIAL
e5d9998f
AD
104 /* Number of per cpu partial objects to keep around */
105 unsigned int cpu_partial;
c2092c12 106 /* Number of per cpu partial slabs to keep around */
bb192ed9 107 unsigned int cpu_partial_slabs;
e6d0e1dc 108#endif
834f3d11 109 struct kmem_cache_order_objects oo;
81819f0f 110
81819f0f 111 /* Allocation and freeing of slabs */
65c3376a 112 struct kmem_cache_order_objects min;
b7a49f0d 113 gfp_t allocflags; /* gfp flags to use on each alloc */
81819f0f 114 int refcount; /* Refcount for slab cache destroy */
51cc5068 115 void (*ctor)(void *);
52ee6d74 116 unsigned int inuse; /* Offset to metadata */
3a3791ec 117 unsigned int align; /* Alignment */
2ca6d39b 118 unsigned int red_left_pad; /* Left redzone padding size */
81819f0f
CL
119 const char *name; /* Name (only for display!) */
120 struct list_head list; /* List of slab caches */
ab4d5ed5 121#ifdef CONFIG_SYSFS
81819f0f 122 struct kobject kobj; /* For sysfs */
0c710013 123#endif
2482ddec
KC
124#ifdef CONFIG_SLAB_FREELIST_HARDENED
125 unsigned long random;
126#endif
127
81819f0f 128#ifdef CONFIG_NUMA
9824601e
CL
129 /*
130 * Defragmentation by allocating from a remote node.
131 */
eb7235eb 132 unsigned int remote_node_defrag_ratio;
81819f0f 133#endif
210e7a43
TG
134
135#ifdef CONFIG_SLAB_FREELIST_RANDOM
136 unsigned int *random_seq;
137#endif
138
bbc61844 139#ifdef CONFIG_KASAN_GENERIC
80a9201a
AP
140 struct kasan_cache kasan_info;
141#endif
142
346907ce 143#ifdef CONFIG_HARDENED_USERCOPY
7bbdb81e
AD
144 unsigned int useroffset; /* Usercopy region offset */
145 unsigned int usersize; /* Usercopy region size */
346907ce 146#endif
8eb8284b 147
7340cc84 148 struct kmem_cache_node *node[MAX_NUMNODES];
81819f0f
CL
149};
150
b1a413a3 151#if defined(CONFIG_SYSFS) && !defined(CONFIG_SLUB_TINY)
41a21285 152#define SLAB_SUPPORTS_SYSFS
d50d82fa 153void sysfs_slab_unlink(struct kmem_cache *);
bf5eb3de 154void sysfs_slab_release(struct kmem_cache *);
41a21285 155#else
d50d82fa
MP
156static inline void sysfs_slab_unlink(struct kmem_cache *s)
157{
158}
bf5eb3de 159static inline void sysfs_slab_release(struct kmem_cache *s)
41a21285
CL
160{
161}
162#endif
163
c146a2b9
AP
164void *fixup_red_left(struct kmem_cache *s, void *p);
165
40f3bf0c 166static inline void *nearest_obj(struct kmem_cache *cache, const struct slab *slab,
7ed2f9e6 167 void *x) {
40f3bf0c
VB
168 void *object = x - (x - slab_address(slab)) % cache->size;
169 void *last_object = slab_address(slab) +
170 (slab->objects - 1) * cache->size;
c146a2b9
AP
171 void *result = (unlikely(object > last_object)) ? last_object : object;
172
173 result = fixup_red_left(cache, result);
174 return result;
7ed2f9e6
AP
175}
176
4138fdfc
RG
177/* Determine object index from a given position */
178static inline unsigned int __obj_to_index(const struct kmem_cache *cache,
179 void *addr, void *obj)
180{
181 return reciprocal_divide(kasan_reset_tag(obj) - addr,
182 cache->reciprocal_size);
183}
184
185static inline unsigned int obj_to_index(const struct kmem_cache *cache,
40f3bf0c 186 const struct slab *slab, void *obj)
4138fdfc 187{
b89fb5ef
AP
188 if (is_kfence_address(obj))
189 return 0;
40f3bf0c 190 return __obj_to_index(cache, slab_address(slab), obj);
4138fdfc
RG
191}
192
40f3bf0c
VB
193static inline int objs_per_slab(const struct kmem_cache *cache,
194 const struct slab *slab)
286e04b8 195{
40f3bf0c 196 return slab->objects;
286e04b8 197}
81819f0f 198#endif /* _LINUX_SLUB_DEF_H */