Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
81819f0f CL |
2 | #ifndef _LINUX_SLUB_DEF_H |
3 | #define _LINUX_SLUB_DEF_H | |
4 | ||
5 | /* | |
6 | * SLUB : A Slab allocator without object queues. | |
7 | * | |
cde53535 | 8 | * (C) 2007 SGI, Christoph Lameter |
81819f0f | 9 | */ |
81819f0f CL |
10 | #include <linux/kobject.h> |
11 | ||
8ff12cfc CL |
12 | enum stat_item { |
13 | ALLOC_FASTPATH, /* Allocation from cpu slab */ | |
14 | ALLOC_SLOWPATH, /* Allocation by getting a new cpu slab */ | |
a941f836 | 15 | FREE_FASTPATH, /* Free to cpu slab */ |
8ff12cfc CL |
16 | FREE_SLOWPATH, /* Freeing not to cpu slab */ |
17 | FREE_FROZEN, /* Freeing to frozen slab */ | |
18 | FREE_ADD_PARTIAL, /* Freeing moves slab to partial list */ | |
19 | FREE_REMOVE_PARTIAL, /* Freeing removes last object */ | |
8028dcea | 20 | ALLOC_FROM_PARTIAL, /* Cpu slab acquired from node partial list */ |
8ff12cfc CL |
21 | ALLOC_SLAB, /* Cpu slab acquired from page allocator */ |
22 | ALLOC_REFILL, /* Refill cpu slab from slab freelist */ | |
e36a2652 | 23 | ALLOC_NODE_MISMATCH, /* Switching cpu slab */ |
8ff12cfc CL |
24 | FREE_SLAB, /* Slab freed to the page allocator */ |
25 | CPUSLAB_FLUSH, /* Abandoning of the cpu slab */ | |
26 | DEACTIVATE_FULL, /* Cpu slab was full when deactivated */ | |
27 | DEACTIVATE_EMPTY, /* Cpu slab was empty when deactivated */ | |
28 | DEACTIVATE_TO_HEAD, /* Cpu slab was moved to the head of partials */ | |
29 | DEACTIVATE_TO_TAIL, /* Cpu slab was moved to the tail of partials */ | |
30 | DEACTIVATE_REMOTE_FREES,/* Slab contained remotely freed objects */ | |
03e404af | 31 | DEACTIVATE_BYPASS, /* Implicit deactivation */ |
65c3376a | 32 | ORDER_FALLBACK, /* Number of times fallback was necessary */ |
4fdccdfb | 33 | CMPXCHG_DOUBLE_CPU_FAIL,/* Failure of this_cpu_cmpxchg_double */ |
b789ef51 | 34 | CMPXCHG_DOUBLE_FAIL, /* Number of times that cmpxchg double did not match */ |
49e22585 | 35 | CPU_PARTIAL_ALLOC, /* Used cpu partial on alloc */ |
8028dcea AS |
36 | CPU_PARTIAL_FREE, /* Refill cpu partial on free */ |
37 | CPU_PARTIAL_NODE, /* Refill cpu partial from node partial */ | |
38 | CPU_PARTIAL_DRAIN, /* Drain cpu partial to node partial */ | |
8ff12cfc CL |
39 | NR_SLUB_STAT_ITEMS }; |
40 | ||
dfb4f096 | 41 | struct kmem_cache_cpu { |
8a5ec0ba | 42 | void **freelist; /* Pointer to next available object */ |
8a5ec0ba | 43 | unsigned long tid; /* Globally unique transaction id */ |
da89b79e | 44 | struct page *page; /* The slab from which we are allocating */ |
a93cf07b | 45 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
49e22585 | 46 | struct page *partial; /* Partially allocated frozen slabs */ |
a93cf07b | 47 | #endif |
8ff12cfc CL |
48 | #ifdef CONFIG_SLUB_STATS |
49 | unsigned stat[NR_SLUB_STAT_ITEMS]; | |
50 | #endif | |
4c93c355 | 51 | }; |
dfb4f096 | 52 | |
a93cf07b WY |
53 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
54 | #define slub_percpu_partial(c) ((c)->partial) | |
55 | ||
56 | #define slub_set_percpu_partial(c, p) \ | |
57 | ({ \ | |
58 | slub_percpu_partial(c) = (p)->next; \ | |
59 | }) | |
60 | ||
61 | #define slub_percpu_partial_read_once(c) READ_ONCE(slub_percpu_partial(c)) | |
62 | #else | |
63 | #define slub_percpu_partial(c) NULL | |
64 | ||
65 | #define slub_set_percpu_partial(c, p) | |
66 | ||
67 | #define slub_percpu_partial_read_once(c) NULL | |
68 | #endif // CONFIG_SLUB_CPU_PARTIAL | |
69 | ||
834f3d11 CL |
70 | /* |
71 | * Word size structure that can be atomically updated or read and that | |
72 | * contains both the order and the number of objects that a slab of the | |
73 | * given order would contain. | |
74 | */ | |
75 | struct kmem_cache_order_objects { | |
19af27af | 76 | unsigned int x; |
834f3d11 CL |
77 | }; |
78 | ||
81819f0f CL |
79 | /* |
80 | * Slab cache management. | |
81 | */ | |
82 | struct kmem_cache { | |
1b5ad248 | 83 | struct kmem_cache_cpu __percpu *cpu_slab; |
81819f0f | 84 | /* Used for retriving partial slabs etc */ |
d50112ed | 85 | slab_flags_t flags; |
1a757fe5 | 86 | unsigned long min_partial; |
44065b2e | 87 | unsigned int size; /* The size of an object including meta data */ |
1b473f29 | 88 | unsigned int object_size;/* The size of an object without meta data */ |
a5035de2 | 89 | unsigned int offset; /* Free pointer offset. */ |
e6d0e1dc | 90 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
e5d9998f AD |
91 | /* Number of per cpu partial objects to keep around */ |
92 | unsigned int cpu_partial; | |
e6d0e1dc | 93 | #endif |
834f3d11 | 94 | struct kmem_cache_order_objects oo; |
81819f0f | 95 | |
81819f0f | 96 | /* Allocation and freeing of slabs */ |
205ab99d | 97 | struct kmem_cache_order_objects max; |
65c3376a | 98 | struct kmem_cache_order_objects min; |
b7a49f0d | 99 | gfp_t allocflags; /* gfp flags to use on each alloc */ |
81819f0f | 100 | int refcount; /* Refcount for slab cache destroy */ |
51cc5068 | 101 | void (*ctor)(void *); |
52ee6d74 | 102 | unsigned int inuse; /* Offset to metadata */ |
3a3791ec | 103 | unsigned int align; /* Alignment */ |
2ca6d39b | 104 | unsigned int red_left_pad; /* Left redzone padding size */ |
81819f0f CL |
105 | const char *name; /* Name (only for display!) */ |
106 | struct list_head list; /* List of slab caches */ | |
ab4d5ed5 | 107 | #ifdef CONFIG_SYSFS |
81819f0f | 108 | struct kobject kobj; /* For sysfs */ |
3b7b3140 | 109 | struct work_struct kobj_remove_work; |
0c710013 | 110 | #endif |
127424c8 | 111 | #ifdef CONFIG_MEMCG |
f7ce3190 | 112 | struct memcg_cache_params memcg_params; |
56d8ceeb AD |
113 | /* for propagation, maximum size of a stored attr */ |
114 | unsigned int max_attr_size; | |
9a41707b VD |
115 | #ifdef CONFIG_SYSFS |
116 | struct kset *memcg_kset; | |
117 | #endif | |
ba6c496e | 118 | #endif |
81819f0f | 119 | |
2482ddec KC |
120 | #ifdef CONFIG_SLAB_FREELIST_HARDENED |
121 | unsigned long random; | |
122 | #endif | |
123 | ||
81819f0f | 124 | #ifdef CONFIG_NUMA |
9824601e CL |
125 | /* |
126 | * Defragmentation by allocating from a remote node. | |
127 | */ | |
eb7235eb | 128 | unsigned int remote_node_defrag_ratio; |
81819f0f | 129 | #endif |
210e7a43 TG |
130 | |
131 | #ifdef CONFIG_SLAB_FREELIST_RANDOM | |
132 | unsigned int *random_seq; | |
133 | #endif | |
134 | ||
80a9201a AP |
135 | #ifdef CONFIG_KASAN |
136 | struct kasan_cache kasan_info; | |
137 | #endif | |
138 | ||
7bbdb81e AD |
139 | unsigned int useroffset; /* Usercopy region offset */ |
140 | unsigned int usersize; /* Usercopy region size */ | |
8eb8284b | 141 | |
7340cc84 | 142 | struct kmem_cache_node *node[MAX_NUMNODES]; |
81819f0f CL |
143 | }; |
144 | ||
e6d0e1dc WY |
145 | #ifdef CONFIG_SLUB_CPU_PARTIAL |
146 | #define slub_cpu_partial(s) ((s)->cpu_partial) | |
147 | #define slub_set_cpu_partial(s, n) \ | |
148 | ({ \ | |
149 | slub_cpu_partial(s) = (n); \ | |
150 | }) | |
151 | #else | |
152 | #define slub_cpu_partial(s) (0) | |
153 | #define slub_set_cpu_partial(s, n) | |
154 | #endif // CONFIG_SLUB_CPU_PARTIAL | |
155 | ||
41a21285 CL |
156 | #ifdef CONFIG_SYSFS |
157 | #define SLAB_SUPPORTS_SYSFS | |
bf5eb3de | 158 | void sysfs_slab_release(struct kmem_cache *); |
41a21285 | 159 | #else |
bf5eb3de | 160 | static inline void sysfs_slab_release(struct kmem_cache *s) |
41a21285 CL |
161 | { |
162 | } | |
163 | #endif | |
164 | ||
75c66def AR |
165 | void object_err(struct kmem_cache *s, struct page *page, |
166 | u8 *object, char *reason); | |
167 | ||
c146a2b9 AP |
168 | void *fixup_red_left(struct kmem_cache *s, void *p); |
169 | ||
7ed2f9e6 AP |
170 | static inline void *nearest_obj(struct kmem_cache *cache, struct page *page, |
171 | void *x) { | |
172 | void *object = x - (x - page_address(page)) % cache->size; | |
173 | void *last_object = page_address(page) + | |
174 | (page->objects - 1) * cache->size; | |
c146a2b9 AP |
175 | void *result = (unlikely(object > last_object)) ? last_object : object; |
176 | ||
177 | result = fixup_red_left(cache, result); | |
178 | return result; | |
7ed2f9e6 AP |
179 | } |
180 | ||
81819f0f | 181 | #endif /* _LINUX_SLUB_DEF_H */ |