Documentation: Raise the minimum supported version of LLVM to 11.0.0
[linux-2.6-block.git] / include / linux / slab.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
1da177e4 2/*
2e892f43
CL
3 * Written by Mark Hemment, 1996 (markhe@nextd.demon.co.uk).
4 *
cde53535 5 * (C) SGI 2006, Christoph Lameter
2e892f43
CL
6 * Cleaned up and restructured to ease the addition of alternative
7 * implementations of SLAB allocators.
f1b6eb6e
CL
8 * (C) Linux Foundation 2008-2013
9 * Unified interface for all slab allocators
1da177e4
LT
10 */
11
12#ifndef _LINUX_SLAB_H
13#define _LINUX_SLAB_H
14
1b1cec4b 15#include <linux/gfp.h>
49b7f898 16#include <linux/overflow.h>
1b1cec4b 17#include <linux/types.h>
1f458cbf 18#include <linux/workqueue.h>
f0a3a24b 19#include <linux/percpu-refcount.h>
1f458cbf 20
1da177e4 21
2e892f43
CL
22/*
23 * Flags to pass to kmem_cache_create().
124dee09 24 * The ones marked DEBUG are only valid if CONFIG_DEBUG_SLAB is set.
1da177e4 25 */
d50112ed 26/* DEBUG: Perform (expensive) checks on alloc/free */
4fd0b46e 27#define SLAB_CONSISTENCY_CHECKS ((slab_flags_t __force)0x00000100U)
d50112ed 28/* DEBUG: Red zone objs in a cache */
4fd0b46e 29#define SLAB_RED_ZONE ((slab_flags_t __force)0x00000400U)
d50112ed 30/* DEBUG: Poison objects */
4fd0b46e 31#define SLAB_POISON ((slab_flags_t __force)0x00000800U)
d50112ed 32/* Align objs on cache lines */
4fd0b46e 33#define SLAB_HWCACHE_ALIGN ((slab_flags_t __force)0x00002000U)
d50112ed 34/* Use GFP_DMA memory */
4fd0b46e 35#define SLAB_CACHE_DMA ((slab_flags_t __force)0x00004000U)
6d6ea1e9
NB
36/* Use GFP_DMA32 memory */
37#define SLAB_CACHE_DMA32 ((slab_flags_t __force)0x00008000U)
d50112ed 38/* DEBUG: Store the last owner for bug hunting */
4fd0b46e 39#define SLAB_STORE_USER ((slab_flags_t __force)0x00010000U)
d50112ed 40/* Panic if kmem_cache_create() fails */
4fd0b46e 41#define SLAB_PANIC ((slab_flags_t __force)0x00040000U)
d7de4c1d 42/*
5f0d5a3a 43 * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
d7de4c1d
PZ
44 *
45 * This delays freeing the SLAB page by a grace period, it does _NOT_
46 * delay object freeing. This means that if you do kmem_cache_free()
47 * that memory location is free to be reused at any time. Thus it may
48 * be possible to see another object there in the same RCU grace period.
49 *
50 * This feature only ensures the memory location backing the object
51 * stays valid, the trick to using this is relying on an independent
52 * object validation pass. Something like:
53 *
54 * rcu_read_lock()
55 * again:
56 * obj = lockless_lookup(key);
57 * if (obj) {
58 * if (!try_get_ref(obj)) // might fail for free objects
59 * goto again;
60 *
61 * if (obj->key != key) { // not the object we expected
62 * put_ref(obj);
63 * goto again;
64 * }
65 * }
66 * rcu_read_unlock();
67 *
68126702
JK
68 * This is useful if we need to approach a kernel structure obliquely,
69 * from its address obtained without the usual locking. We can lock
70 * the structure to stabilize it and check it's still at the given address,
71 * only if we can be sure that the memory has not been meanwhile reused
72 * for some other kind of object (which our subsystem's lock might corrupt).
73 *
74 * rcu_read_lock before reading the address, then rcu_read_unlock after
75 * taking the spinlock within the structure expected at that address.
5f0d5a3a
PM
76 *
77 * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
d7de4c1d 78 */
d50112ed 79/* Defer freeing slabs to RCU */
4fd0b46e 80#define SLAB_TYPESAFE_BY_RCU ((slab_flags_t __force)0x00080000U)
d50112ed 81/* Spread some memory over cpuset */
4fd0b46e 82#define SLAB_MEM_SPREAD ((slab_flags_t __force)0x00100000U)
d50112ed 83/* Trace allocations and frees */
4fd0b46e 84#define SLAB_TRACE ((slab_flags_t __force)0x00200000U)
1da177e4 85
30327acf
TG
86/* Flag to prevent checks on free */
87#ifdef CONFIG_DEBUG_OBJECTS
4fd0b46e 88# define SLAB_DEBUG_OBJECTS ((slab_flags_t __force)0x00400000U)
30327acf 89#else
4fd0b46e 90# define SLAB_DEBUG_OBJECTS 0
30327acf
TG
91#endif
92
d50112ed 93/* Avoid kmemleak tracing */
4fd0b46e 94#define SLAB_NOLEAKTRACE ((slab_flags_t __force)0x00800000U)
d5cff635 95
d50112ed 96/* Fault injection mark */
4c13dd3b 97#ifdef CONFIG_FAILSLAB
4fd0b46e 98# define SLAB_FAILSLAB ((slab_flags_t __force)0x02000000U)
4c13dd3b 99#else
4fd0b46e 100# define SLAB_FAILSLAB 0
4c13dd3b 101#endif
d50112ed 102/* Account to memcg */
84c07d11 103#ifdef CONFIG_MEMCG_KMEM
4fd0b46e 104# define SLAB_ACCOUNT ((slab_flags_t __force)0x04000000U)
230e9fc2 105#else
4fd0b46e 106# define SLAB_ACCOUNT 0
230e9fc2 107#endif
2dff4405 108
7ed2f9e6 109#ifdef CONFIG_KASAN
4fd0b46e 110#define SLAB_KASAN ((slab_flags_t __force)0x08000000U)
7ed2f9e6 111#else
4fd0b46e 112#define SLAB_KASAN 0
7ed2f9e6
AP
113#endif
114
e12ba74d 115/* The following flags affect the page allocator grouping pages by mobility */
d50112ed 116/* Objects are reclaimable */
4fd0b46e 117#define SLAB_RECLAIM_ACCOUNT ((slab_flags_t __force)0x00020000U)
e12ba74d 118#define SLAB_TEMPORARY SLAB_RECLAIM_ACCOUNT /* Objects are short-lived */
fcf8a1e4
WL
119
120/* Slab deactivation flag */
121#define SLAB_DEACTIVATED ((slab_flags_t __force)0x10000000U)
122
6cb8f913
CL
123/*
124 * ZERO_SIZE_PTR will be returned for zero sized kmalloc requests.
125 *
126 * Dereferencing ZERO_SIZE_PTR will lead to a distinct access fault.
127 *
128 * ZERO_SIZE_PTR can be passed to kfree though in the same way that NULL can.
129 * Both make kfree a no-op.
130 */
131#define ZERO_SIZE_PTR ((void *)16)
132
1d4ec7b1 133#define ZERO_OR_NULL_PTR(x) ((unsigned long)(x) <= \
6cb8f913
CL
134 (unsigned long)ZERO_SIZE_PTR)
135
0316bec2 136#include <linux/kasan.h>
3b0efdfa 137
2633d7a0 138struct mem_cgroup;
2e892f43
CL
139/*
140 * struct kmem_cache related prototypes
141 */
142void __init kmem_cache_init(void);
fda90124 143bool slab_is_available(void);
1da177e4 144
f4957d5b
AD
145struct kmem_cache *kmem_cache_create(const char *name, unsigned int size,
146 unsigned int align, slab_flags_t flags,
8eb8284b
DW
147 void (*ctor)(void *));
148struct kmem_cache *kmem_cache_create_usercopy(const char *name,
f4957d5b
AD
149 unsigned int size, unsigned int align,
150 slab_flags_t flags,
7bbdb81e 151 unsigned int useroffset, unsigned int usersize,
8eb8284b 152 void (*ctor)(void *));
72d67229
KC
153void kmem_cache_destroy(struct kmem_cache *s);
154int kmem_cache_shrink(struct kmem_cache *s);
2a4db7eb 155
0a31bd5f
CL
156/*
157 * Please use this macro to create slab caches. Simply specify the
158 * name of the structure and maybe some flags that are listed above.
159 *
160 * The alignment of the struct determines object alignment. If you
161 * f.e. add ____cacheline_aligned_in_smp to the struct declaration
162 * then the objects will be properly aligned in SMP configurations.
163 */
8eb8284b
DW
164#define KMEM_CACHE(__struct, __flags) \
165 kmem_cache_create(#__struct, sizeof(struct __struct), \
166 __alignof__(struct __struct), (__flags), NULL)
167
168/*
169 * To whitelist a single field for copying to/from usercopy, use this
170 * macro instead for KMEM_CACHE() above.
171 */
172#define KMEM_CACHE_USERCOPY(__struct, __flags, __field) \
173 kmem_cache_create_usercopy(#__struct, \
174 sizeof(struct __struct), \
175 __alignof__(struct __struct), (__flags), \
176 offsetof(struct __struct, __field), \
177 sizeof_field(struct __struct, __field), NULL)
0a31bd5f 178
34504667
CL
179/*
180 * Common kmalloc functions provided by all allocators
181 */
c37495d6 182void * __must_check krealloc(const void *objp, size_t new_size, gfp_t flags) __alloc_size(2);
72d67229
KC
183void kfree(const void *objp);
184void kfree_sensitive(const void *objp);
185size_t __ksize(const void *objp);
186size_t ksize(const void *objp);
5bb1bb35 187#ifdef CONFIG_PRINTK
8e7f37f2
PM
188bool kmem_valid_obj(void *object);
189void kmem_dump_obj(void *object);
5bb1bb35 190#endif
34504667 191
f5509cc1 192#ifdef CONFIG_HAVE_HARDENED_USERCOPY_ALLOCATOR
f4e6e289
KC
193void __check_heap_object(const void *ptr, unsigned long n, struct page *page,
194 bool to_user);
f5509cc1 195#else
f4e6e289
KC
196static inline void __check_heap_object(const void *ptr, unsigned long n,
197 struct page *page, bool to_user) { }
f5509cc1
KC
198#endif
199
c601fd69
CL
200/*
201 * Some archs want to perform DMA into kmalloc caches and need a guaranteed
202 * alignment larger than the alignment of a 64-bit integer.
203 * Setting ARCH_KMALLOC_MINALIGN in arch headers allows that.
204 */
205#if defined(ARCH_DMA_MINALIGN) && ARCH_DMA_MINALIGN > 8
206#define ARCH_KMALLOC_MINALIGN ARCH_DMA_MINALIGN
207#define KMALLOC_MIN_SIZE ARCH_DMA_MINALIGN
208#define KMALLOC_SHIFT_LOW ilog2(ARCH_DMA_MINALIGN)
209#else
210#define ARCH_KMALLOC_MINALIGN __alignof__(unsigned long long)
211#endif
212
94a58c36
RV
213/*
214 * Setting ARCH_SLAB_MINALIGN in arch headers allows a different alignment.
215 * Intended for arches that get misalignment faults even for 64 bit integer
216 * aligned buffers.
217 */
218#ifndef ARCH_SLAB_MINALIGN
219#define ARCH_SLAB_MINALIGN __alignof__(unsigned long long)
220#endif
221
222/*
223 * kmalloc and friends return ARCH_KMALLOC_MINALIGN aligned
224 * pointers. kmem_cache_alloc and friends return ARCH_SLAB_MINALIGN
225 * aligned pointers.
226 */
227#define __assume_kmalloc_alignment __assume_aligned(ARCH_KMALLOC_MINALIGN)
228#define __assume_slab_alignment __assume_aligned(ARCH_SLAB_MINALIGN)
229#define __assume_page_alignment __assume_aligned(PAGE_SIZE)
230
0aa817f0 231/*
95a05b42
CL
232 * Kmalloc array related definitions
233 */
234
235#ifdef CONFIG_SLAB
236/*
237 * The largest kmalloc size supported by the SLAB allocators is
0aa817f0
CL
238 * 32 megabyte (2^25) or the maximum allocatable page order if that is
239 * less than 32 MB.
240 *
241 * WARNING: Its not easy to increase this value since the allocators have
242 * to do various tricks to work around compiler limitations in order to
243 * ensure proper constant folding.
244 */
debee076
CL
245#define KMALLOC_SHIFT_HIGH ((MAX_ORDER + PAGE_SHIFT - 1) <= 25 ? \
246 (MAX_ORDER + PAGE_SHIFT - 1) : 25)
95a05b42 247#define KMALLOC_SHIFT_MAX KMALLOC_SHIFT_HIGH
c601fd69 248#ifndef KMALLOC_SHIFT_LOW
95a05b42 249#define KMALLOC_SHIFT_LOW 5
c601fd69 250#endif
069e2b35
CL
251#endif
252
253#ifdef CONFIG_SLUB
95a05b42 254/*
433a91ff
DH
255 * SLUB directly allocates requests fitting in to an order-1 page
256 * (PAGE_SIZE*2). Larger requests are passed to the page allocator.
95a05b42
CL
257 */
258#define KMALLOC_SHIFT_HIGH (PAGE_SHIFT + 1)
bb1107f7 259#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
c601fd69 260#ifndef KMALLOC_SHIFT_LOW
95a05b42
CL
261#define KMALLOC_SHIFT_LOW 3
262#endif
c601fd69 263#endif
0aa817f0 264
069e2b35
CL
265#ifdef CONFIG_SLOB
266/*
433a91ff 267 * SLOB passes all requests larger than one page to the page allocator.
069e2b35
CL
268 * No kmalloc array is necessary since objects of different sizes can
269 * be allocated from the same page.
270 */
069e2b35 271#define KMALLOC_SHIFT_HIGH PAGE_SHIFT
bb1107f7 272#define KMALLOC_SHIFT_MAX (MAX_ORDER + PAGE_SHIFT - 1)
069e2b35
CL
273#ifndef KMALLOC_SHIFT_LOW
274#define KMALLOC_SHIFT_LOW 3
275#endif
276#endif
277
95a05b42
CL
278/* Maximum allocatable size */
279#define KMALLOC_MAX_SIZE (1UL << KMALLOC_SHIFT_MAX)
280/* Maximum size for which we actually use a slab cache */
281#define KMALLOC_MAX_CACHE_SIZE (1UL << KMALLOC_SHIFT_HIGH)
d7cff4de 282/* Maximum order allocatable via the slab allocator */
95a05b42 283#define KMALLOC_MAX_ORDER (KMALLOC_SHIFT_MAX - PAGE_SHIFT)
0aa817f0 284
ce6a5026
CL
285/*
286 * Kmalloc subsystem.
287 */
c601fd69 288#ifndef KMALLOC_MIN_SIZE
95a05b42 289#define KMALLOC_MIN_SIZE (1 << KMALLOC_SHIFT_LOW)
ce6a5026
CL
290#endif
291
24f870d8
JK
292/*
293 * This restriction comes from byte sized index implementation.
294 * Page size is normally 2^12 bytes and, in this case, if we want to use
295 * byte sized index which can represent 2^8 entries, the size of the object
296 * should be equal or greater to 2^12 / 2^8 = 2^4 = 16.
297 * If minimum size of kmalloc is less than 16, we use it as minimum object
298 * size and give up to use byte sized index.
299 */
300#define SLAB_OBJ_MIN_SIZE (KMALLOC_MIN_SIZE < 16 ? \
301 (KMALLOC_MIN_SIZE) : 16)
302
1291523f
VB
303/*
304 * Whenever changing this, take care of that kmalloc_type() and
305 * create_kmalloc_caches() still work as intended.
494c1dfe
WL
306 *
307 * KMALLOC_NORMAL can contain only unaccounted objects whereas KMALLOC_CGROUP
308 * is for accounted but unreclaimable and non-dma objects. All the other
309 * kmem caches can have both accounted and unaccounted objects.
1291523f 310 */
cc252eae
VB
311enum kmalloc_cache_type {
312 KMALLOC_NORMAL = 0,
494c1dfe
WL
313#ifndef CONFIG_ZONE_DMA
314 KMALLOC_DMA = KMALLOC_NORMAL,
315#endif
316#ifndef CONFIG_MEMCG_KMEM
317 KMALLOC_CGROUP = KMALLOC_NORMAL,
318#else
319 KMALLOC_CGROUP,
320#endif
1291523f 321 KMALLOC_RECLAIM,
cc252eae
VB
322#ifdef CONFIG_ZONE_DMA
323 KMALLOC_DMA,
324#endif
325 NR_KMALLOC_TYPES
326};
327
069e2b35 328#ifndef CONFIG_SLOB
cc252eae
VB
329extern struct kmem_cache *
330kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1];
331
494c1dfe
WL
332/*
333 * Define gfp bits that should not be set for KMALLOC_NORMAL.
334 */
335#define KMALLOC_NOT_NORMAL_BITS \
336 (__GFP_RECLAIMABLE | \
337 (IS_ENABLED(CONFIG_ZONE_DMA) ? __GFP_DMA : 0) | \
338 (IS_ENABLED(CONFIG_MEMCG_KMEM) ? __GFP_ACCOUNT : 0))
339
cc252eae
VB
340static __always_inline enum kmalloc_cache_type kmalloc_type(gfp_t flags)
341{
4e45f712
VB
342 /*
343 * The most common case is KMALLOC_NORMAL, so test for it
494c1dfe 344 * with a single branch for all the relevant flags.
4e45f712 345 */
494c1dfe 346 if (likely((flags & KMALLOC_NOT_NORMAL_BITS) == 0))
4e45f712 347 return KMALLOC_NORMAL;
1291523f
VB
348
349 /*
494c1dfe
WL
350 * At least one of the flags has to be set. Their priorities in
351 * decreasing order are:
352 * 1) __GFP_DMA
353 * 2) __GFP_RECLAIMABLE
354 * 3) __GFP_ACCOUNT
1291523f 355 */
494c1dfe
WL
356 if (IS_ENABLED(CONFIG_ZONE_DMA) && (flags & __GFP_DMA))
357 return KMALLOC_DMA;
358 if (!IS_ENABLED(CONFIG_MEMCG_KMEM) || (flags & __GFP_RECLAIMABLE))
359 return KMALLOC_RECLAIM;
360 else
361 return KMALLOC_CGROUP;
cc252eae
VB
362}
363
ce6a5026
CL
364/*
365 * Figure out which kmalloc slab an allocation of a certain size
366 * belongs to.
367 * 0 = zero alloc
368 * 1 = 65 .. 96 bytes
1ed58b60
RV
369 * 2 = 129 .. 192 bytes
370 * n = 2^(n-1)+1 .. 2^n
588c7fa0
HY
371 *
372 * Note: __kmalloc_index() is compile-time optimized, and not runtime optimized;
373 * typical usage is via kmalloc_index() and therefore evaluated at compile-time.
374 * Callers where !size_is_constant should only be test modules, where runtime
375 * overheads of __kmalloc_index() can be tolerated. Also see kmalloc_slab().
ce6a5026 376 */
588c7fa0
HY
377static __always_inline unsigned int __kmalloc_index(size_t size,
378 bool size_is_constant)
ce6a5026
CL
379{
380 if (!size)
381 return 0;
382
383 if (size <= KMALLOC_MIN_SIZE)
384 return KMALLOC_SHIFT_LOW;
385
386 if (KMALLOC_MIN_SIZE <= 32 && size > 64 && size <= 96)
387 return 1;
388 if (KMALLOC_MIN_SIZE <= 64 && size > 128 && size <= 192)
389 return 2;
390 if (size <= 8) return 3;
391 if (size <= 16) return 4;
392 if (size <= 32) return 5;
393 if (size <= 64) return 6;
394 if (size <= 128) return 7;
395 if (size <= 256) return 8;
396 if (size <= 512) return 9;
397 if (size <= 1024) return 10;
398 if (size <= 2 * 1024) return 11;
399 if (size <= 4 * 1024) return 12;
400 if (size <= 8 * 1024) return 13;
401 if (size <= 16 * 1024) return 14;
402 if (size <= 32 * 1024) return 15;
403 if (size <= 64 * 1024) return 16;
404 if (size <= 128 * 1024) return 17;
405 if (size <= 256 * 1024) return 18;
406 if (size <= 512 * 1024) return 19;
407 if (size <= 1024 * 1024) return 20;
408 if (size <= 2 * 1024 * 1024) return 21;
409 if (size <= 4 * 1024 * 1024) return 22;
410 if (size <= 8 * 1024 * 1024) return 23;
411 if (size <= 16 * 1024 * 1024) return 24;
412 if (size <= 32 * 1024 * 1024) return 25;
588c7fa0
HY
413
414 if ((IS_ENABLED(CONFIG_CC_IS_GCC) || CONFIG_CLANG_VERSION >= 110000)
415 && !IS_ENABLED(CONFIG_PROFILE_ALL_BRANCHES) && size_is_constant)
416 BUILD_BUG_ON_MSG(1, "unexpected size in kmalloc_index()");
417 else
418 BUG();
ce6a5026
CL
419
420 /* Will never be reached. Needed because the compiler may complain */
421 return -1;
422}
588c7fa0 423#define kmalloc_index(s) __kmalloc_index(s, true)
069e2b35 424#endif /* !CONFIG_SLOB */
ce6a5026 425
c37495d6 426void *__kmalloc(size_t size, gfp_t flags) __assume_kmalloc_alignment __alloc_size(1);
72d67229
KC
427void *kmem_cache_alloc(struct kmem_cache *s, gfp_t flags) __assume_slab_alignment __malloc;
428void kmem_cache_free(struct kmem_cache *s, void *objp);
f1b6eb6e 429
484748f0 430/*
9f706d68 431 * Bulk allocation and freeing operations. These are accelerated in an
484748f0
CL
432 * allocator specific way to avoid taking locks repeatedly or building
433 * metadata structures unnecessarily.
434 *
435 * Note that interrupts must be enabled when calling these functions.
436 */
72d67229
KC
437void kmem_cache_free_bulk(struct kmem_cache *s, size_t size, void **p);
438int kmem_cache_alloc_bulk(struct kmem_cache *s, gfp_t flags, size_t size, void **p);
484748f0 439
ca257195
JDB
440/*
441 * Caller must not use kfree_bulk() on memory not originally allocated
442 * by kmalloc(), because the SLOB allocator cannot handle this.
443 */
444static __always_inline void kfree_bulk(size_t size, void **p)
445{
446 kmem_cache_free_bulk(NULL, size, p);
447}
448
f1b6eb6e 449#ifdef CONFIG_NUMA
c37495d6
KC
450void *__kmalloc_node(size_t size, gfp_t flags, int node) __assume_kmalloc_alignment
451 __alloc_size(1);
72d67229
KC
452void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node) __assume_slab_alignment
453 __malloc;
f1b6eb6e 454#else
c37495d6 455static __always_inline __alloc_size(1) void *__kmalloc_node(size_t size, gfp_t flags, int node)
f1b6eb6e
CL
456{
457 return __kmalloc(size, flags);
458}
459
460static __always_inline void *kmem_cache_alloc_node(struct kmem_cache *s, gfp_t flags, int node)
461{
462 return kmem_cache_alloc(s, flags);
463}
464#endif
465
466#ifdef CONFIG_TRACING
72d67229 467extern void *kmem_cache_alloc_trace(struct kmem_cache *s, gfp_t flags, size_t size)
c37495d6 468 __assume_slab_alignment __alloc_size(3);
f1b6eb6e
CL
469
470#ifdef CONFIG_NUMA
72d67229 471extern void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
c37495d6
KC
472 int node, size_t size) __assume_slab_alignment
473 __alloc_size(4);
f1b6eb6e 474#else
c37495d6
KC
475static __always_inline __alloc_size(4) void *kmem_cache_alloc_node_trace(struct kmem_cache *s,
476 gfp_t gfpflags, int node, size_t size)
f1b6eb6e
CL
477{
478 return kmem_cache_alloc_trace(s, gfpflags, size);
479}
480#endif /* CONFIG_NUMA */
481
482#else /* CONFIG_TRACING */
c37495d6
KC
483static __always_inline __alloc_size(3) void *kmem_cache_alloc_trace(struct kmem_cache *s,
484 gfp_t flags, size_t size)
f1b6eb6e 485{
0316bec2
AR
486 void *ret = kmem_cache_alloc(s, flags);
487
0116523c 488 ret = kasan_kmalloc(s, ret, size, flags);
0316bec2 489 return ret;
f1b6eb6e
CL
490}
491
72d67229
KC
492static __always_inline void *kmem_cache_alloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
493 int node, size_t size)
f1b6eb6e 494{
0316bec2
AR
495 void *ret = kmem_cache_alloc_node(s, gfpflags, node);
496
0116523c 497 ret = kasan_kmalloc(s, ret, size, gfpflags);
0316bec2 498 return ret;
f1b6eb6e
CL
499}
500#endif /* CONFIG_TRACING */
501
72d67229 502extern void *kmalloc_order(size_t size, gfp_t flags, unsigned int order) __assume_page_alignment
c37495d6 503 __alloc_size(1);
f1b6eb6e
CL
504
505#ifdef CONFIG_TRACING
72d67229 506extern void *kmalloc_order_trace(size_t size, gfp_t flags, unsigned int order)
c37495d6 507 __assume_page_alignment __alloc_size(1);
f1b6eb6e 508#else
c37495d6
KC
509static __always_inline __alloc_size(1) void *kmalloc_order_trace(size_t size, gfp_t flags,
510 unsigned int order)
f1b6eb6e
CL
511{
512 return kmalloc_order(size, flags, order);
513}
ce6a5026
CL
514#endif
515
c37495d6 516static __always_inline __alloc_size(1) void *kmalloc_large(size_t size, gfp_t flags)
f1b6eb6e
CL
517{
518 unsigned int order = get_order(size);
519 return kmalloc_order_trace(size, flags, order);
520}
521
522/**
523 * kmalloc - allocate memory
524 * @size: how many bytes of memory are required.
7e3528c3 525 * @flags: the type of memory to allocate.
f1b6eb6e
CL
526 *
527 * kmalloc is the normal method of allocating memory
528 * for objects smaller than page size in the kernel.
7e3528c3 529 *
59bb4798
VB
530 * The allocated object address is aligned to at least ARCH_KMALLOC_MINALIGN
531 * bytes. For @size of power of two bytes, the alignment is also guaranteed
532 * to be at least to the size.
533 *
01598ba6
MR
534 * The @flags argument may be one of the GFP flags defined at
535 * include/linux/gfp.h and described at
536 * :ref:`Documentation/core-api/mm-api.rst <mm-api-gfp-flags>`
7e3528c3 537 *
01598ba6 538 * The recommended usage of the @flags is described at
2370ae4b 539 * :ref:`Documentation/core-api/memory-allocation.rst <memory_allocation>`
7e3528c3 540 *
01598ba6 541 * Below is a brief outline of the most useful GFP flags
7e3528c3 542 *
01598ba6
MR
543 * %GFP_KERNEL
544 * Allocate normal kernel ram. May sleep.
7e3528c3 545 *
01598ba6
MR
546 * %GFP_NOWAIT
547 * Allocation will not sleep.
7e3528c3 548 *
01598ba6
MR
549 * %GFP_ATOMIC
550 * Allocation will not sleep. May use emergency pools.
7e3528c3 551 *
01598ba6
MR
552 * %GFP_HIGHUSER
553 * Allocate memory from high memory on behalf of user.
7e3528c3
RD
554 *
555 * Also it is possible to set different flags by OR'ing
556 * in one or more of the following additional @flags:
557 *
01598ba6
MR
558 * %__GFP_HIGH
559 * This allocation has high priority and may use emergency pools.
7e3528c3 560 *
01598ba6
MR
561 * %__GFP_NOFAIL
562 * Indicate that this allocation is in no way allowed to fail
563 * (think twice before using).
7e3528c3 564 *
01598ba6
MR
565 * %__GFP_NORETRY
566 * If memory is not immediately available,
567 * then give up at once.
7e3528c3 568 *
01598ba6
MR
569 * %__GFP_NOWARN
570 * If allocation fails, don't issue any warnings.
7e3528c3 571 *
01598ba6
MR
572 * %__GFP_RETRY_MAYFAIL
573 * Try really hard to succeed the allocation but fail
574 * eventually.
f1b6eb6e 575 */
c37495d6 576static __always_inline __alloc_size(1) void *kmalloc(size_t size, gfp_t flags)
f1b6eb6e
CL
577{
578 if (__builtin_constant_p(size)) {
cc252eae
VB
579#ifndef CONFIG_SLOB
580 unsigned int index;
581#endif
f1b6eb6e
CL
582 if (size > KMALLOC_MAX_CACHE_SIZE)
583 return kmalloc_large(size, flags);
584#ifndef CONFIG_SLOB
cc252eae 585 index = kmalloc_index(size);
f1b6eb6e 586
cc252eae
VB
587 if (!index)
588 return ZERO_SIZE_PTR;
f1b6eb6e 589
cc252eae
VB
590 return kmem_cache_alloc_trace(
591 kmalloc_caches[kmalloc_type(flags)][index],
592 flags, size);
f1b6eb6e
CL
593#endif
594 }
595 return __kmalloc(size, flags);
596}
597
c37495d6 598static __always_inline __alloc_size(1) void *kmalloc_node(size_t size, gfp_t flags, int node)
f1b6eb6e
CL
599{
600#ifndef CONFIG_SLOB
601 if (__builtin_constant_p(size) &&
cc252eae 602 size <= KMALLOC_MAX_CACHE_SIZE) {
36071a27 603 unsigned int i = kmalloc_index(size);
f1b6eb6e
CL
604
605 if (!i)
606 return ZERO_SIZE_PTR;
607
cc252eae
VB
608 return kmem_cache_alloc_node_trace(
609 kmalloc_caches[kmalloc_type(flags)][i],
f1b6eb6e
CL
610 flags, node, size);
611 }
612#endif
613 return __kmalloc_node(size, flags, node);
614}
615
e7efa615
MO
616/**
617 * kmalloc_array - allocate memory for an array.
618 * @n: number of elements.
619 * @size: element size.
620 * @flags: the type of memory to allocate (see kmalloc).
800590f5 621 */
c37495d6 622static inline __alloc_size(1, 2) void *kmalloc_array(size_t n, size_t size, gfp_t flags)
1da177e4 623{
49b7f898
KC
624 size_t bytes;
625
626 if (unlikely(check_mul_overflow(n, size, &bytes)))
6193a2ff 627 return NULL;
91c6a05f 628 if (__builtin_constant_p(n) && __builtin_constant_p(size))
49b7f898
KC
629 return kmalloc(bytes, flags);
630 return __kmalloc(bytes, flags);
a8203725
XW
631}
632
f0dbd2bd
BG
633/**
634 * krealloc_array - reallocate memory for an array.
635 * @p: pointer to the memory chunk to reallocate
636 * @new_n: new number of elements to alloc
637 * @new_size: new size of a single member of the array
638 * @flags: the type of memory to allocate (see kmalloc)
639 */
c37495d6
KC
640static inline __alloc_size(2, 3) void * __must_check krealloc_array(void *p,
641 size_t new_n,
642 size_t new_size,
643 gfp_t flags)
f0dbd2bd
BG
644{
645 size_t bytes;
646
647 if (unlikely(check_mul_overflow(new_n, new_size, &bytes)))
648 return NULL;
649
650 return krealloc(p, bytes, flags);
651}
652
a8203725
XW
653/**
654 * kcalloc - allocate memory for an array. The memory is set to zero.
655 * @n: number of elements.
656 * @size: element size.
657 * @flags: the type of memory to allocate (see kmalloc).
658 */
c37495d6 659static inline __alloc_size(1, 2) void *kcalloc(size_t n, size_t size, gfp_t flags)
a8203725
XW
660{
661 return kmalloc_array(n, size, flags | __GFP_ZERO);
1da177e4
LT
662}
663
1d2c8eea
CH
664/*
665 * kmalloc_track_caller is a special version of kmalloc that records the
666 * calling function of the routine calling it for slab leak tracking instead
667 * of just the calling function (confusing, eh?).
668 * It's useful when the call to kmalloc comes from a widely-used standard
669 * allocator where we care about the real place the memory allocation
670 * request comes from.
671 */
c37495d6
KC
672extern void *__kmalloc_track_caller(size_t size, gfp_t flags, unsigned long caller)
673 __alloc_size(1);
1d2c8eea 674#define kmalloc_track_caller(size, flags) \
ce71e27c 675 __kmalloc_track_caller(size, flags, _RET_IP_)
1da177e4 676
c37495d6
KC
677static inline __alloc_size(1, 2) void *kmalloc_array_node(size_t n, size_t size, gfp_t flags,
678 int node)
5799b255 679{
49b7f898
KC
680 size_t bytes;
681
682 if (unlikely(check_mul_overflow(n, size, &bytes)))
5799b255
JT
683 return NULL;
684 if (__builtin_constant_p(n) && __builtin_constant_p(size))
49b7f898
KC
685 return kmalloc_node(bytes, flags, node);
686 return __kmalloc_node(bytes, flags, node);
5799b255
JT
687}
688
c37495d6 689static inline __alloc_size(1, 2) void *kcalloc_node(size_t n, size_t size, gfp_t flags, int node)
5799b255
JT
690{
691 return kmalloc_array_node(n, size, flags | __GFP_ZERO, node);
692}
693
694
97e2bde4 695#ifdef CONFIG_NUMA
72d67229 696extern void *__kmalloc_node_track_caller(size_t size, gfp_t flags, int node,
c37495d6 697 unsigned long caller) __alloc_size(1);
8b98c169
CH
698#define kmalloc_node_track_caller(size, flags, node) \
699 __kmalloc_node_track_caller(size, flags, node, \
ce71e27c 700 _RET_IP_)
2e892f43 701
8b98c169 702#else /* CONFIG_NUMA */
8b98c169
CH
703
704#define kmalloc_node_track_caller(size, flags, node) \
705 kmalloc_track_caller(size, flags)
97e2bde4 706
dfcd3610 707#endif /* CONFIG_NUMA */
10cef602 708
81cda662
CL
709/*
710 * Shortcuts
711 */
712static inline void *kmem_cache_zalloc(struct kmem_cache *k, gfp_t flags)
713{
714 return kmem_cache_alloc(k, flags | __GFP_ZERO);
715}
716
717/**
718 * kzalloc - allocate memory. The memory is set to zero.
719 * @size: how many bytes of memory are required.
720 * @flags: the type of memory to allocate (see kmalloc).
721 */
c37495d6 722static inline __alloc_size(1) void *kzalloc(size_t size, gfp_t flags)
81cda662
CL
723{
724 return kmalloc(size, flags | __GFP_ZERO);
725}
726
979b0fea
JL
727/**
728 * kzalloc_node - allocate zeroed memory from a particular memory node.
729 * @size: how many bytes of memory are required.
730 * @flags: the type of memory to allocate (see kmalloc).
731 * @node: memory node from which to allocate
732 */
c37495d6 733static inline __alloc_size(1) void *kzalloc_node(size_t size, gfp_t flags, int node)
979b0fea
JL
734{
735 return kmalloc_node(size, flags | __GFP_ZERO, node);
736}
737
56bcf40f
KC
738extern void *kvmalloc_node(size_t size, gfp_t flags, int node) __alloc_size(1);
739static inline __alloc_size(1) void *kvmalloc(size_t size, gfp_t flags)
8587ca6f
MWO
740{
741 return kvmalloc_node(size, flags, NUMA_NO_NODE);
742}
56bcf40f 743static inline __alloc_size(1) void *kvzalloc_node(size_t size, gfp_t flags, int node)
8587ca6f
MWO
744{
745 return kvmalloc_node(size, flags | __GFP_ZERO, node);
746}
56bcf40f 747static inline __alloc_size(1) void *kvzalloc(size_t size, gfp_t flags)
8587ca6f
MWO
748{
749 return kvmalloc(size, flags | __GFP_ZERO);
750}
751
56bcf40f 752static inline __alloc_size(1, 2) void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
8587ca6f
MWO
753{
754 size_t bytes;
755
756 if (unlikely(check_mul_overflow(n, size, &bytes)))
757 return NULL;
758
759 return kvmalloc(bytes, flags);
760}
761
56bcf40f 762static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t flags)
8587ca6f
MWO
763{
764 return kvmalloc_array(n, size, flags | __GFP_ZERO);
765}
766
56bcf40f
KC
767extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
768 __alloc_size(3);
8587ca6f
MWO
769extern void kvfree(const void *addr);
770extern void kvfree_sensitive(const void *addr, size_t len);
771
07f361b2 772unsigned int kmem_cache_size(struct kmem_cache *s);
7e85ee0c
PE
773void __init kmem_cache_init_late(void);
774
6731d4f1
SAS
775#if defined(CONFIG_SMP) && defined(CONFIG_SLAB)
776int slab_prepare_cpu(unsigned int cpu);
777int slab_dead_cpu(unsigned int cpu);
778#else
779#define slab_prepare_cpu NULL
780#define slab_dead_cpu NULL
781#endif
782
1da177e4 783#endif /* _LINUX_SLAB_H */