slub: Make PREEMPT_RT support less convoluted
[linux-2.6-block.git] / include / linux / slab.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4 2/*
2e892f43
CL
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
cde53535 5 * (C) SGI 2006, Christoph Lameter
2e892f43
CL
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
f1b6eb6e
CL
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
1da177e4
LT
10 */
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
1b1cec4b 15#include <linux/gfp.h>
49b7f898 16#include <linux/overflow.h>
1b1cec4b 17#include <linux/types.h>
1f458cbf 18#include <linux/workqueue.h>
f0a3a24b 19#include <linux/percpu-refcount.h>
1f458cbf 20
1da177e4 21
2e892f43
CL
22/*
23 * Flags to pass to kmem_cache_create().
124dee09 24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
1da177e4 25 */
d50112ed 26/* DEBUG: Perform (expensive) checks on alloc/free */
4fd0b46e 27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
d50112ed 28/* DEBUG: Red zone objs in a cache */
4fd0b46e 29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
d50112ed 30/* DEBUG: Poison objects */
4fd0b46e 31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
d50112ed 32/* Align objs on cache lines */
4fd0b46e 33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
d50112ed 34/* Use GFP_DMA memory */
4fd0b46e 35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
6d6ea1e9
NB
36/* Use GFP_DMA32 memory */
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
d50112ed 38/* DEBUG: Store the last owner for bug hunting */
4fd0b46e 39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
d50112ed 40/* Panic if kmem_cache_create() fails */
4fd0b46e 41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
d7de4c1d 42/*
5f0d5a3a 43 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
d7de4c1d
PZ
44 *
45 * This delays freeing the SLAB page by a grace period, it does _NOT_
46 * delay object freeing. This means that if you do kmem_cache_free()
47 * that memory location is free to be reused at any time. Thus it may
48 * be possible to see another object there in the same RCU grace period.
49 *
50 * This feature only ensures the memory location backing the object
51 * stays valid, the trick to using this is relying on an independent
52 * object validation pass. Something like:
53 *
54 * rcu_read_lock()
55 * again:
56 * obj = lockless_lookup(key);
57 * if (obj) {
58 * if (!try_get_ref(obj)) // might fail for free objects
59 * goto again;
60 *
61 * if (obj->key != key) { // not the object we expected
62 * put_ref(obj);
63 * goto again;
64 * }
65 * }
66 * rcu_read_unlock();
67 *
68126702
JK
68 * This is useful if we need to approach a kernel structure obliquely,
69 * from its address obtained without the usual locking. We can lock
70 * the structure to stabilize it and check it's still at the given address,
71 * only if we can be sure that the memory has not been meanwhile reused
72 * for some other kind of object (which our subsystem's lock might corrupt).
73 *
74 * rcu_read_lock before reading the address, then rcu_read_unlock after
75 * taking the spinlock within the structure expected at that address.
5f0d5a3a
PM
76 *
77 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
d7de4c1d 78 */
d50112ed 79/* Defer freeing slabs to RCU */
4fd0b46e 80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
d50112ed 81/* Spread some memory over cpuset */
4fd0b46e 82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
d50112ed 83/* Trace allocations and frees */
4fd0b46e 84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
1da177e4 85
30327acf
TG
86/* Flag to prevent checks on free */
87#ifdef CONFIG_DEBUG_OBJECTS
4fd0b46e 88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
30327acf 89#else
4fd0b46e 90# define SLAB_DEBUG_OBJECTS 0
30327acf
TG
91#endif
92
d50112ed 93/* Avoid kmemleak tracing */
4fd0b46e 94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
d5cff635 95
d50112ed 96/* Fault injection mark */
4c13dd3b 97#ifdef CONFIG_FAILSLAB
4fd0b46e 98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
4c13dd3b 99#else
4fd0b46e 100# define SLAB_FAILSLAB 0
4c13dd3b 101#endif
d50112ed 102/* Account to memcg */
84c07d11 103#ifdef CONFIG_MEMCG_KMEM
4fd0b46e 104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
230e9fc2 105#else
4fd0b46e 106# define SLAB_ACCOUNT 0
230e9fc2 107#endif
2dff4405 108
7ed2f9e6 109#ifdef CONFIG_KASAN
4fd0b46e 110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
7ed2f9e6 111#else
4fd0b46e 112#define SLAB_KASAN 0
7ed2f9e6
AP
113#endif
114
a285909f
HY
115/*
116 * Ignore user specified debugging flags.
117 * Intended for caches created for self-tests so they have only flags
118 * specified in the code and other flags are ignored.
119 */
120#define SLAB_NO_USER_FLAGS ((slab_flags_t __force)0x10000000U)
121
e12ba74d 122/* The following flags affect the page allocator grouping pages by mobility */
d50112ed 123/* Objects are reclaimable */
4fd0b46e 124#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
e12ba74d 125#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
fcf8a1e4 126
6cb8f913
CL
127/*
128 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
129 *
130 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
131 *
132 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
133 * Both make kfree a no-op.
134 */
135#define ZERO_SIZE_PTR ((void *)16)
136
1d4ec7b1 137#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
6cb8f913
CL
138 (unsigned long)ZERO_SIZE_PTR)
139
0316bec2 140#include <linux/kasan.h>
3b0efdfa 141
88f2ef73 142struct list_lru;
2633d7a0 143struct mem_cgroup;
2e892f43
CL
144/*
145 * struct kmem_cache related prototypes
146 */
147void __init kmem_cache_init(void);
fda90124 148bool slab_is_available(void);
1da177e4 149
f4957d5b
AD
150struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
151 unsigned int align, slab_flags_t flags,
8eb8284b
DW
152 void (*ctor)(void *));
153struct kmem_cache *kmem_cache_create_usercopy(const char *name,
f4957d5b
AD
154 unsigned int size, unsigned int align,
155 slab_flags_t flags,
7bbdb81e 156 unsigned int useroffset, unsigned int usersize,
8eb8284b 157 void (*ctor)(void *));
72d67229
KC
158void kmem_cache_destroy(struct kmem_cache *s);
159int kmem_cache_shrink(struct kmem_cache *s);
2a4db7eb 160
0a31bd5f
CL
161/*
162 * Please use this macro to create slab caches. Simply specify the
163 * name of the structure and maybe some flags that are listed above.
164 *
165 * The alignment of the struct determines object alignment. If you
166 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
167 * then the objects will be properly aligned in SMP configurations.
168 */
8eb8284b
DW
169#define KMEM_CACHE(__struct, __flags) \
170 kmem_cache_create(#__struct, sizeof(struct __struct), \
171 __alignof__(struct __struct), (__flags), NULL)
172
173/*
174 * To whitelist a single field for copying to/from usercopy, use this
175 * macro instead for KMEM_CACHE() above.
176 */
177#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
178 kmem_cache_create_usercopy(#__struct, \
179 sizeof(struct __struct), \
180 __alignof__(struct __struct), (__flags), \
181 offsetof(struct __struct, __field), \
182 sizeof_field(struct __struct, __field), NULL)
0a31bd5f 183
34504667
CL
184/*
185 * Common kmalloc functions provided by all allocators
186 */
c37495d6 187void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
72d67229
KC
188void kfree(const void *objp);
189void kfree_sensitive(const void *objp);
190size_t __ksize(const void *objp);
191size_t ksize(const void *objp);
5bb1bb35 192#ifdef CONFIG_PRINTK
8e7f37f2
PM
193bool kmem_valid_obj(void *object);
194void kmem_dump_obj(void *object);
5bb1bb35 195#endif
34504667 196
c601fd69
CL
197/*
198 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
199 * alignment larger than the alignment of a 64-bit integer.
8cf9e121 200 * Setting ARCH_DMA_MINALIGN in arch headers allows that.
c601fd69
CL
201 */
202#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
203#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
204#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
205#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
206#else
207#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
208#endif
209
94a58c36
RV
210/*
211 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
212 * Intended for arches that get misalignment faults even for 64 bit integer
213 * aligned buffers.
214 */
215#ifndef ARCH_SLAB_MINALIGN
216#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
217#endif
218
d949a815
PC
219/*
220 * Arches can define this function if they want to decide the minimum slab
221 * alignment at runtime. The value returned by the function must be a power
222 * of two and >= ARCH_SLAB_MINALIGN.
223 */
224#ifndef arch_slab_minalign
225static inline unsigned int arch_slab_minalign(void)
226{
227 return ARCH_SLAB_MINALIGN;
228}
229#endif
230
94a58c36 231/*
154036a3
AK
232 * kmem_cache_alloc and friends return pointers aligned to ARCH_SLAB_MINALIGN.
233 * kmalloc and friends return pointers aligned to both ARCH_KMALLOC_MINALIGN
234 * and ARCH_SLAB_MINALIGN, but here we only assume the former alignment.
94a58c36
RV
235 */
236#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
237#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
238#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
239
0aa817f0 240/*
95a05b42
CL
241 * Kmalloc array related definitions
242 */
243
244#ifdef CONFIG_SLAB
245/*
246 * The largest kmalloc size supported by the SLAB allocators is
0aa817f0
CL
247 * 32 megabyte (2^25) or the maximum allocatable page order if that is
248 * less than 32 MB.
249 *
250 * WARNING: Its not easy to increase this value since the allocators have
251 * to do various tricks to work around compiler limitations in order to
252 * ensure proper constant folding.
253 */
debee076
CL
254#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
255 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
95a05b42 256#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
c601fd69 257#ifndef KMALLOC_SHIFT_LOW
95a05b42 258#define KMALLOC_SHIFT_LOW 5
c601fd69 259#endif
069e2b35
CL
260#endif
261
262#ifdef CONFIG_SLUB
95a05b42 263/*
433a91ff
DH
264 * SLUB directly allocates requests fitting in to an order-1 page
265 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
95a05b42
CL
266 */
267#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
bb1107f7 268#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
c601fd69 269#ifndef KMALLOC_SHIFT_LOW
95a05b42
CL
270#define KMALLOC_SHIFT_LOW 3
271#endif
c601fd69 272#endif
0aa817f0 273
069e2b35
CL
274#ifdef CONFIG_SLOB
275/*
433a91ff 276 * SLOB passes all requests larger than one page to the page allocator.
069e2b35
CL
277 * No kmalloc array is necessary since objects of different sizes can
278 * be allocated from the same page.
279 */
069e2b35 280#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
bb1107f7 281#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
069e2b35
CL
282#ifndef KMALLOC_SHIFT_LOW
283#define KMALLOC_SHIFT_LOW 3
284#endif
285#endif
286
95a05b42
CL
287/* Maximum allocatable size */
288#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
289/* Maximum size for which we actually use a slab cache */
290#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
d7cff4de 291/* Maximum order allocatable via the slab allocator */
95a05b42 292#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
0aa817f0 293
ce6a5026
CL
294/*
295 * Kmalloc subsystem.
296 */
c601fd69 297#ifndef KMALLOC_MIN_SIZE
95a05b42 298#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
ce6a5026
CL
299#endif
300
24f870d8
JK
301/*
302 * This restriction comes from byte sized index implementation.
303 * Page size is normally 2^12 bytes and, in this case, if we want to use
304 * byte sized index which can represent 2^8 entries, the size of the object
305 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
306 * If minimum size of kmalloc is less than 16, we use it as minimum object
307 * size and give up to use byte sized index.
308 */
309#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
310 (KMALLOC_MIN_SIZE) : 16)
311
1291523f
VB
312/*
313 * Whenever changing this, take care of that kmalloc_type() and
314 * create_kmalloc_caches() still work as intended.
494c1dfe
WL
315 *
316 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
317 * is for accounted but unreclaimable and non-dma objects. All the other
318 * kmem caches can have both accounted and unaccounted objects.
1291523f 319 */
cc252eae
VB
320enum kmalloc_cache_type {
321 KMALLOC_NORMAL = 0,
494c1dfe
WL
322#ifndef CONFIG_ZONE_DMA
323 KMALLOC_DMA = KMALLOC_NORMAL,
324#endif
325#ifndef CONFIG_MEMCG_KMEM
326 KMALLOC_CGROUP = KMALLOC_NORMAL,
327#else
328 KMALLOC_CGROUP,
329#endif
1291523f 330 KMALLOC_RECLAIM,
cc252eae
VB
331#ifdef CONFIG_ZONE_DMA
332 KMALLOC_DMA,
333#endif
334 NR_KMALLOC_TYPES
335};
336
069e2b35 337#ifndef CONFIG_SLOB
cc252eae
VB
338extern struct kmem_cache *
339kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
340
494c1dfe
WL
341/*
342 * Define gfp bits that should not be set for KMALLOC_NORMAL.
343 */
344#define KMALLOC_NOT_NORMAL_BITS \
345 (__GFP_RECLAIMABLE | \
346 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
347 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
348
cc252eae
VB
349static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
350{
4e45f712
VB
351 /*
352 * The most common case is KMALLOC_NORMAL, so test for it
494c1dfe 353 * with a single branch for all the relevant flags.
4e45f712 354 */
494c1dfe 355 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
4e45f712 356 return KMALLOC_NORMAL;
1291523f
VB
357
358 /*
494c1dfe
WL
359 * At least one of the flags has to be set. Their priorities in
360 * decreasing order are:
361 * 1) __GFP_DMA
362 * 2) __GFP_RECLAIMABLE
363 * 3) __GFP_ACCOUNT
1291523f 364 */
494c1dfe
WL
365 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
366 return KMALLOC_DMA;
367 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
368 return KMALLOC_RECLAIM;
369 else
370 return KMALLOC_CGROUP;
cc252eae
VB
371}
372
ce6a5026
CL
373/*
374 * Figure out which kmalloc slab an allocation of a certain size
375 * belongs to.
376 * 0 = zero alloc
377 * 1 = 65 .. 96 bytes
1ed58b60
RV
378 * 2 = 129 .. 192 bytes
379 * n = 2^(n-1)+1 .. 2^n
588c7fa0
HY
380 *
381 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
382 * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
383 * Callers where !size_is_constant should only be test modules, where runtime
384 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
ce6a5026 385 */
588c7fa0
HY
386static __always_inline unsigned int __kmalloc_index(size_t size,
387 bool size_is_constant)
ce6a5026
CL
388{
389 if (!size)
390 return 0;
391
392 if (size <= KMALLOC_MIN_SIZE)
393 return KMALLOC_SHIFT_LOW;
394
395 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
396 return 1;
397 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
398 return 2;
399 if (size <= 8) return 3;
400 if (size <= 16) return 4;
401 if (size <= 32) return 5;
402 if (size <= 64) return 6;
403 if (size <= 128) return 7;
404 if (size <= 256) return 8;
405 if (size <= 512) return 9;
406 if (size <= 1024) return 10;
407 if (size <= 2 * 1024) return 11;
408 if (size <= 4 * 1024) return 12;
409 if (size <= 8 * 1024) return 13;
410 if (size <= 16 * 1024) return 14;
411 if (size <= 32 * 1024) return 15;
412 if (size <= 64 * 1024) return 16;
413 if (size <= 128 * 1024) return 17;
414 if (size <= 256 * 1024) return 18;
415 if (size <= 512 * 1024) return 19;
416 if (size <= 1024 * 1024) return 20;
417 if (size <= 2 * 1024 * 1024) return 21;
418 if (size <= 4 * 1024 * 1024) return 22;
419 if (size <= 8 * 1024 * 1024) return 23;
420 if (size <= 16 * 1024 * 1024) return 24;
421 if (size <= 32 * 1024 * 1024) return 25;
588c7fa0 422
57b2b72a 423 if (!IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
588c7fa0
HY
424 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
425 else
426 BUG();
ce6a5026
CL
427
428 /* Will never be reached. Needed because the compiler may complain */
429 return -1;
430}
588c7fa0 431#define kmalloc_index(s) __kmalloc_index(s, true)
069e2b35 432#endif /* !CONFIG_SLOB */
ce6a5026 433
c37495d6 434void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
72d67229 435void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
88f2ef73
MS
436void *kmem_cache_alloc_lru(struct kmem_cache *s, struct list_lru *lru,
437 gfp_t gfpflags) __assume_slab_alignment __malloc;
72d67229 438void kmem_cache_free(struct kmem_cache *s, void *objp);
f1b6eb6e 439
484748f0 440/*
9f706d68 441 * Bulk allocation and freeing operations. These are accelerated in an
484748f0
CL
442 * allocator specific way to avoid taking locks repeatedly or building
443 * metadata structures unnecessarily.
444 *
445 * Note that interrupts must be enabled when calling these functions.
446 */
72d67229
KC
447void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
448int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
484748f0 449
ca257195
JDB
450/*
451 * Caller must not use kfree_bulk() on memory not originally allocated
452 * by kmalloc(), because the SLOB allocator cannot handle this.
453 */
454static __always_inline void kfree_bulk(size_t size, void **p)
455{
456 kmem_cache_free_bulk(NULL, size, p);
457}
458
f1b6eb6e 459#ifdef CONFIG_NUMA
c37495d6
KC
460void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
461 __alloc_size(1);
72d67229
KC
462void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
463 __malloc;
f1b6eb6e 464#else
c37495d6 465static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
f1b6eb6e
CL
466{
467 return __kmalloc(size, flags);
468}
469
470static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
471{
472 return kmem_cache_alloc(s, flags);
473}
474#endif
475
476#ifdef CONFIG_TRACING
72d67229 477extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
c37495d6 478 __assume_slab_alignment __alloc_size(3);
f1b6eb6e
CL
479
480#ifdef CONFIG_NUMA
72d67229 481extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
c37495d6
KC
482 int node, size_t size) __assume_slab_alignment
483 __alloc_size(4);
f1b6eb6e 484#else
c37495d6
KC
485static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
486 gfp_t gfpflags, int node, size_t size)
f1b6eb6e
CL
487{
488 return kmem_cache_alloc_trace(s, gfpflags, size);
489}
490#endif /* CONFIG_NUMA */
491
492#else /* CONFIG_TRACING */
c37495d6
KC
493static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
494 gfp_t flags, size_t size)
f1b6eb6e 495{
0316bec2
AR
496 void *ret = kmem_cache_alloc(s, flags);
497
0116523c 498 ret = kasan_kmalloc(s, ret, size, flags);
0316bec2 499 return ret;
f1b6eb6e
CL
500}
501
72d67229
KC
502static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
503 int node, size_t size)
f1b6eb6e 504{
0316bec2
AR
505 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
506
0116523c 507 ret = kasan_kmalloc(s, ret, size, gfpflags);
0316bec2 508 return ret;
f1b6eb6e
CL
509}
510#endif /* CONFIG_TRACING */
511
72d67229 512extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
c37495d6 513 __alloc_size(1);
f1b6eb6e
CL
514
515#ifdef CONFIG_TRACING
72d67229 516extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
c37495d6 517 __assume_page_alignment __alloc_size(1);
f1b6eb6e 518#else
c37495d6
KC
519static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
520 unsigned int order)
f1b6eb6e
CL
521{
522 return kmalloc_order(size, flags, order);
523}
ce6a5026
CL
524#endif
525
c37495d6 526static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
f1b6eb6e
CL
527{
528 unsigned int order = get_order(size);
529 return kmalloc_order_trace(size, flags, order);
530}
531
532/**
533 * kmalloc - allocate memory
534 * @size: how many bytes of memory are required.
7e3528c3 535 * @flags: the type of memory to allocate.
f1b6eb6e
CL
536 *
537 * kmalloc is the normal method of allocating memory
538 * for objects smaller than page size in the kernel.
7e3528c3 539 *
59bb4798
VB
540 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
541 * bytes. For @size of power of two bytes, the alignment is also guaranteed
542 * to be at least to the size.
543 *
01598ba6
MR
544 * The @flags argument may be one of the GFP flags defined at
545 * include/linux/gfp.h and described at
546 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
7e3528c3 547 *
01598ba6 548 * The recommended usage of the @flags is described at
2370ae4b 549 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
7e3528c3 550 *
01598ba6 551 * Below is a brief outline of the most useful GFP flags
7e3528c3 552 *
01598ba6
MR
553 * %GFP_KERNEL
554 * Allocate normal kernel ram. May sleep.
7e3528c3 555 *
01598ba6
MR
556 * %GFP_NOWAIT
557 * Allocation will not sleep.
7e3528c3 558 *
01598ba6
MR
559 * %GFP_ATOMIC
560 * Allocation will not sleep. May use emergency pools.
7e3528c3 561 *
01598ba6
MR
562 * %GFP_HIGHUSER
563 * Allocate memory from high memory on behalf of user.
7e3528c3
RD
564 *
565 * Also it is possible to set different flags by OR'ing
566 * in one or more of the following additional @flags:
567 *
01598ba6
MR
568 * %__GFP_HIGH
569 * This allocation has high priority and may use emergency pools.
7e3528c3 570 *
01598ba6
MR
571 * %__GFP_NOFAIL
572 * Indicate that this allocation is in no way allowed to fail
573 * (think twice before using).
7e3528c3 574 *
01598ba6
MR
575 * %__GFP_NORETRY
576 * If memory is not immediately available,
577 * then give up at once.
7e3528c3 578 *
01598ba6
MR
579 * %__GFP_NOWARN
580 * If allocation fails, don't issue any warnings.
7e3528c3 581 *
01598ba6
MR
582 * %__GFP_RETRY_MAYFAIL
583 * Try really hard to succeed the allocation but fail
584 * eventually.
f1b6eb6e 585 */
c37495d6 586static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
f1b6eb6e
CL
587{
588 if (__builtin_constant_p(size)) {
cc252eae
VB
589#ifndef CONFIG_SLOB
590 unsigned int index;
591#endif
f1b6eb6e
CL
592 if (size > KMALLOC_MAX_CACHE_SIZE)
593 return kmalloc_large(size, flags);
594#ifndef CONFIG_SLOB
cc252eae 595 index = kmalloc_index(size);
f1b6eb6e 596
cc252eae
VB
597 if (!index)
598 return ZERO_SIZE_PTR;
f1b6eb6e 599
cc252eae
VB
600 return kmem_cache_alloc_trace(
601 kmalloc_caches[kmalloc_type(flags)][index],
602 flags, size);
f1b6eb6e
CL
603#endif
604 }
605 return __kmalloc(size, flags);
606}
607
c37495d6 608static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
f1b6eb6e
CL
609{
610#ifndef CONFIG_SLOB
611 if (__builtin_constant_p(size) &&
cc252eae 612 size <= KMALLOC_MAX_CACHE_SIZE) {
36071a27 613 unsigned int i = kmalloc_index(size);
f1b6eb6e
CL
614
615 if (!i)
616 return ZERO_SIZE_PTR;
617
cc252eae
VB
618 return kmem_cache_alloc_node_trace(
619 kmalloc_caches[kmalloc_type(flags)][i],
f1b6eb6e
CL
620 flags, node, size);
621 }
622#endif
623 return __kmalloc_node(size, flags, node);
624}
625
e7efa615
MO
626/**
627 * kmalloc_array - allocate memory for an array.
628 * @n: number of elements.
629 * @size: element size.
630 * @flags: the type of memory to allocate (see kmalloc).
800590f5 631 */
c37495d6 632static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
1da177e4 633{
49b7f898
KC
634 size_t bytes;
635
636 if (unlikely(check_mul_overflow(n, size, &bytes)))
6193a2ff 637 return NULL;
91c6a05f 638 if (__builtin_constant_p(n) && __builtin_constant_p(size))
49b7f898
KC
639 return kmalloc(bytes, flags);
640 return __kmalloc(bytes, flags);
a8203725
XW
641}
642
f0dbd2bd
BG
643/**
644 * krealloc_array - reallocate memory for an array.
645 * @p: pointer to the memory chunk to reallocate
646 * @new_n: new number of elements to alloc
647 * @new_size: new size of a single member of the array
648 * @flags: the type of memory to allocate (see kmalloc)
649 */
c37495d6
KC
650static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
651 size_t new_n,
652 size_t new_size,
653 gfp_t flags)
f0dbd2bd
BG
654{
655 size_t bytes;
656
657 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
658 return NULL;
659
660 return krealloc(p, bytes, flags);
661}
662
a8203725
XW
663/**
664 * kcalloc - allocate memory for an array. The memory is set to zero.
665 * @n: number of elements.
666 * @size: element size.
667 * @flags: the type of memory to allocate (see kmalloc).
668 */
c37495d6 669static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
a8203725
XW
670{
671 return kmalloc_array(n, size, flags | __GFP_ZERO);
1da177e4
LT
672}
673
1d2c8eea
CH
674/*
675 * kmalloc_track_caller is a special version of kmalloc that records the
676 * calling function of the routine calling it for slab leak tracking instead
677 * of just the calling function (confusing, eh?).
678 * It's useful when the call to kmalloc comes from a widely-used standard
679 * allocator where we care about the real place the memory allocation
680 * request comes from.
681 */
93dd04ab 682extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller);
1d2c8eea 683#define kmalloc_track_caller(size, flags) \
ce71e27c 684 __kmalloc_track_caller(size, flags, _RET_IP_)
1da177e4 685
c37495d6
KC
686static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
687 int node)
5799b255 688{
49b7f898
KC
689 size_t bytes;
690
691 if (unlikely(check_mul_overflow(n, size, &bytes)))
5799b255
JT
692 return NULL;
693 if (__builtin_constant_p(n) && __builtin_constant_p(size))
49b7f898
KC
694 return kmalloc_node(bytes, flags, node);
695 return __kmalloc_node(bytes, flags, node);
5799b255
JT
696}
697
c37495d6 698static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
5799b255
JT
699{
700 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
701}
702
703
97e2bde4 704#ifdef CONFIG_NUMA
72d67229 705extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
c37495d6 706 unsigned long caller) __alloc_size(1);
8b98c169
CH
707#define kmalloc_node_track_caller(size, flags, node) \
708 __kmalloc_node_track_caller(size, flags, node, \
ce71e27c 709 _RET_IP_)
2e892f43 710
8b98c169 711#else /* CONFIG_NUMA */
8b98c169
CH
712
713#define kmalloc_node_track_caller(size, flags, node) \
714 kmalloc_track_caller(size, flags)
97e2bde4 715
dfcd3610 716#endif /* CONFIG_NUMA */
10cef602 717
81cda662
CL
718/*
719 * Shortcuts
720 */
721static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
722{
723 return kmem_cache_alloc(k, flags | __GFP_ZERO);
724}
725
726/**
727 * kzalloc - allocate memory. The memory is set to zero.
728 * @size: how many bytes of memory are required.
729 * @flags: the type of memory to allocate (see kmalloc).
730 */
c37495d6 731static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
81cda662
CL
732{
733 return kmalloc(size, flags | __GFP_ZERO);
734}
735
979b0fea
JL
736/**
737 * kzalloc_node - allocate zeroed memory from a particular memory node.
738 * @size: how many bytes of memory are required.
739 * @flags: the type of memory to allocate (see kmalloc).
740 * @node: memory node from which to allocate
741 */
c37495d6 742static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
979b0fea
JL
743{
744 return kmalloc_node(size, flags | __GFP_ZERO, node);
745}
746
56bcf40f
KC
747extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
748static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
8587ca6f
MWO
749{
750 return kvmalloc_node(size, flags, NUMA_NO_NODE);
751}
56bcf40f 752static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
8587ca6f
MWO
753{
754 return kvmalloc_node(size, flags | __GFP_ZERO, node);
755}
56bcf40f 756static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
8587ca6f
MWO
757{
758 return kvmalloc(size, flags | __GFP_ZERO);
759}
760
56bcf40f 761static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
8587ca6f
MWO
762{
763 size_t bytes;
764
765 if (unlikely(check_mul_overflow(n, size, &bytes)))
766 return NULL;
767
768 return kvmalloc(bytes, flags);
769}
770
56bcf40f 771static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
8587ca6f
MWO
772{
773 return kvmalloc_array(n, size, flags | __GFP_ZERO);
774}
775
56bcf40f
KC
776extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
777 __alloc_size(3);
8587ca6f
MWO
778extern void kvfree(const void *addr);
779extern void kvfree_sensitive(const void *addr, size_t len);
780
07f361b2 781unsigned int kmem_cache_size(struct kmem_cache *s);
7e85ee0c
PE
782void __init kmem_cache_init_late(void);
783
6731d4f1
SAS
784#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
785int slab_prepare_cpu(unsigned int cpu);
786int slab_dead_cpu(unsigned int cpu);
787#else
788#define slab_prepare_cpu NULL
789#define slab_dead_cpu NULL
790#endif
791
1da177e4 792#endif /* _LINUX_SLAB_H */