Merge tag 'mm-stable-2022-10-08' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / mm / slab_common.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
039363f3
CL
2/*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
1c99ba29 13#include <linux/cache.h>
039363f3 14#include <linux/compiler.h>
d3fb45f3 15#include <linux/kfence.h>
039363f3 16#include <linux/module.h>
20cea968
CL
17#include <linux/cpu.h>
18#include <linux/uaccess.h>
b7454ad3
GC
19#include <linux/seq_file.h>
20#include <linux/proc_fs.h>
fcf8a1e4 21#include <linux/debugfs.h>
e86f8b09 22#include <linux/kasan.h>
039363f3
CL
23#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
25#include <asm/page.h>
2633d7a0 26#include <linux/memcontrol.h>
5cf909c5 27#include <linux/stackdepot.h>
928cec9c 28
44405099 29#include "internal.h"
97d06609
CL
30#include "slab.h"
31
b347aa7b
VA
32#define CREATE_TRACE_POINTS
33#include <trace/events/kmem.h>
34
97d06609 35enum slab_state slab_state;
18004c5d
CL
36LIST_HEAD(slab_caches);
37DEFINE_MUTEX(slab_mutex);
9b030cb8 38struct kmem_cache *kmem_cache;
97d06609 39
657dc2f9
TH
40static LIST_HEAD(slab_caches_to_rcu_destroy);
41static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
42static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
43 slab_caches_to_rcu_destroy_workfn);
44
423c929c
JK
45/*
46 * Set of flags that will prevent slab merging
47 */
48#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
5f0d5a3a 49 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
e86f8b09 50 SLAB_FAILSLAB | kasan_never_merge())
423c929c 51
230e9fc2 52#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
6d6ea1e9 53 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
423c929c
JK
54
55/*
56 * Merge control. If this is set then no merging of slab caches will occur.
423c929c 57 */
7660a6fd 58static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
423c929c
JK
59
60static int __init setup_slab_nomerge(char *str)
61{
7660a6fd 62 slab_nomerge = true;
423c929c
JK
63 return 1;
64}
65
82edd9d5
RA
66static int __init setup_slab_merge(char *str)
67{
68 slab_nomerge = false;
69 return 1;
70}
71
423c929c
JK
72#ifdef CONFIG_SLUB
73__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
82edd9d5 74__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
423c929c
JK
75#endif
76
77__setup("slab_nomerge", setup_slab_nomerge);
82edd9d5 78__setup("slab_merge", setup_slab_merge);
423c929c 79
07f361b2
JK
80/*
81 * Determine the size of a slab object
82 */
83unsigned int kmem_cache_size(struct kmem_cache *s)
84{
85 return s->object_size;
86}
87EXPORT_SYMBOL(kmem_cache_size);
88
77be4b13 89#ifdef CONFIG_DEBUG_VM
f4957d5b 90static int kmem_cache_sanity_check(const char *name, unsigned int size)
039363f3 91{
74c1d3e0 92 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
77be4b13
SK
93 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
94 return -EINVAL;
039363f3 95 }
b920536a 96
20cea968 97 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
77be4b13
SK
98 return 0;
99}
100#else
f4957d5b 101static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
77be4b13
SK
102{
103 return 0;
104}
20cea968
CL
105#endif
106
692ae74a
BL
107/*
108 * Figure out what the alignment of the objects will be given a set of
109 * flags, a user specified alignment and the size of the objects.
110 */
f4957d5b
AD
111static unsigned int calculate_alignment(slab_flags_t flags,
112 unsigned int align, unsigned int size)
692ae74a
BL
113{
114 /*
115 * If the user wants hardware cache aligned objects then follow that
116 * suggestion if the object is sufficiently large.
117 *
118 * The hardware cache alignment cannot override the specified
119 * alignment though. If that is greater then use it.
120 */
121 if (flags & SLAB_HWCACHE_ALIGN) {
f4957d5b 122 unsigned int ralign;
692ae74a
BL
123
124 ralign = cache_line_size();
125 while (size <= ralign / 2)
126 ralign /= 2;
127 align = max(align, ralign);
128 }
129
d949a815 130 align = max(align, arch_slab_minalign());
692ae74a
BL
131
132 return ALIGN(align, sizeof(void *));
133}
134
423c929c
JK
135/*
136 * Find a mergeable slab cache
137 */
138int slab_unmergeable(struct kmem_cache *s)
139{
140 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
141 return 1;
142
423c929c
JK
143 if (s->ctor)
144 return 1;
145
8eb8284b
DW
146 if (s->usersize)
147 return 1;
148
423c929c
JK
149 /*
150 * We may have set a slab to be unmergeable during bootstrap.
151 */
152 if (s->refcount < 0)
153 return 1;
154
155 return 0;
156}
157
f4957d5b 158struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
d50112ed 159 slab_flags_t flags, const char *name, void (*ctor)(void *))
423c929c
JK
160{
161 struct kmem_cache *s;
162
c6e28895 163 if (slab_nomerge)
423c929c
JK
164 return NULL;
165
166 if (ctor)
167 return NULL;
168
169 size = ALIGN(size, sizeof(void *));
170 align = calculate_alignment(flags, align, size);
171 size = ALIGN(size, align);
37540008 172 flags = kmem_cache_flags(size, flags, name);
423c929c 173
c6e28895
GM
174 if (flags & SLAB_NEVER_MERGE)
175 return NULL;
176
c7094406 177 list_for_each_entry_reverse(s, &slab_caches, list) {
423c929c
JK
178 if (slab_unmergeable(s))
179 continue;
180
181 if (size > s->size)
182 continue;
183
184 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
185 continue;
186 /*
187 * Check if alignment is compatible.
188 * Courtesy of Adrian Drzewiecki
189 */
190 if ((s->size & ~(align - 1)) != s->size)
191 continue;
192
193 if (s->size - size >= sizeof(void *))
194 continue;
195
95069ac8
JK
196 if (IS_ENABLED(CONFIG_SLAB) && align &&
197 (align > s->align || s->align % align))
198 continue;
199
423c929c
JK
200 return s;
201 }
202 return NULL;
203}
204
c9a77a79 205static struct kmem_cache *create_cache(const char *name,
613a5eb5 206 unsigned int object_size, unsigned int align,
7bbdb81e
AD
207 slab_flags_t flags, unsigned int useroffset,
208 unsigned int usersize, void (*ctor)(void *),
9855609b 209 struct kmem_cache *root_cache)
794b1248
VD
210{
211 struct kmem_cache *s;
212 int err;
213
8eb8284b
DW
214 if (WARN_ON(useroffset + usersize > object_size))
215 useroffset = usersize = 0;
216
794b1248
VD
217 err = -ENOMEM;
218 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
219 if (!s)
220 goto out;
221
222 s->name = name;
613a5eb5 223 s->size = s->object_size = object_size;
794b1248
VD
224 s->align = align;
225 s->ctor = ctor;
8eb8284b
DW
226 s->useroffset = useroffset;
227 s->usersize = usersize;
794b1248 228
794b1248
VD
229 err = __kmem_cache_create(s, flags);
230 if (err)
231 goto out_free_cache;
232
233 s->refcount = 1;
234 list_add(&s->list, &slab_caches);
794b1248
VD
235out:
236 if (err)
237 return ERR_PTR(err);
238 return s;
239
240out_free_cache:
7c4da061 241 kmem_cache_free(kmem_cache, s);
794b1248
VD
242 goto out;
243}
45906855 244
f496990f
MR
245/**
246 * kmem_cache_create_usercopy - Create a cache with a region suitable
247 * for copying to userspace
77be4b13
SK
248 * @name: A string which is used in /proc/slabinfo to identify this cache.
249 * @size: The size of objects to be created in this cache.
250 * @align: The required alignment for the objects.
251 * @flags: SLAB flags
8eb8284b
DW
252 * @useroffset: Usercopy region offset
253 * @usersize: Usercopy region size
77be4b13
SK
254 * @ctor: A constructor for the objects.
255 *
77be4b13
SK
256 * Cannot be called within a interrupt, but can be interrupted.
257 * The @ctor is run when new pages are allocated by the cache.
258 *
259 * The flags are
260 *
261 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
262 * to catch references to uninitialised memory.
263 *
f496990f 264 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
77be4b13
SK
265 * for buffer overruns.
266 *
267 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
268 * cacheline. This can be beneficial if you're counting cycles as closely
269 * as davem.
f496990f
MR
270 *
271 * Return: a pointer to the cache on success, NULL on failure.
77be4b13 272 */
2633d7a0 273struct kmem_cache *
f4957d5b
AD
274kmem_cache_create_usercopy(const char *name,
275 unsigned int size, unsigned int align,
7bbdb81e
AD
276 slab_flags_t flags,
277 unsigned int useroffset, unsigned int usersize,
8eb8284b 278 void (*ctor)(void *))
77be4b13 279{
40911a79 280 struct kmem_cache *s = NULL;
3dec16ea 281 const char *cache_name;
3965fc36 282 int err;
039363f3 283
afe0c26d
VB
284#ifdef CONFIG_SLUB_DEBUG
285 /*
286 * If no slub_debug was enabled globally, the static key is not yet
287 * enabled by setup_slub_debug(). Enable it if the cache is being
288 * created with any of the debugging flags passed explicitly.
5cf909c5
OG
289 * It's also possible that this is the first cache created with
290 * SLAB_STORE_USER and we should init stack_depot for it.
afe0c26d
VB
291 */
292 if (flags & SLAB_DEBUG_FLAGS)
293 static_branch_enable(&slub_debug_enabled);
5cf909c5
OG
294 if (flags & SLAB_STORE_USER)
295 stack_depot_init();
afe0c26d
VB
296#endif
297
77be4b13 298 mutex_lock(&slab_mutex);
686d550d 299
794b1248 300 err = kmem_cache_sanity_check(name, size);
3aa24f51 301 if (err) {
3965fc36 302 goto out_unlock;
3aa24f51 303 }
686d550d 304
e70954fd
TG
305 /* Refuse requests with allocator specific flags */
306 if (flags & ~SLAB_FLAGS_PERMITTED) {
307 err = -EINVAL;
308 goto out_unlock;
309 }
310
d8843922
GC
311 /*
312 * Some allocators will constraint the set of valid flags to a subset
313 * of all flags. We expect them to define CACHE_CREATE_MASK in this
314 * case, and we'll just provide them with a sanitized version of the
315 * passed flags.
316 */
317 flags &= CACHE_CREATE_MASK;
686d550d 318
8eb8284b
DW
319 /* Fail closed on bad usersize of useroffset values. */
320 if (WARN_ON(!usersize && useroffset) ||
321 WARN_ON(size < usersize || size - usersize < useroffset))
322 usersize = useroffset = 0;
323
324 if (!usersize)
325 s = __kmem_cache_alias(name, size, align, flags, ctor);
794b1248 326 if (s)
3965fc36 327 goto out_unlock;
2633d7a0 328
3dec16ea 329 cache_name = kstrdup_const(name, GFP_KERNEL);
794b1248
VD
330 if (!cache_name) {
331 err = -ENOMEM;
332 goto out_unlock;
333 }
7c9adf5a 334
613a5eb5 335 s = create_cache(cache_name, size,
c9a77a79 336 calculate_alignment(flags, align, size),
9855609b 337 flags, useroffset, usersize, ctor, NULL);
794b1248
VD
338 if (IS_ERR(s)) {
339 err = PTR_ERR(s);
3dec16ea 340 kfree_const(cache_name);
794b1248 341 }
3965fc36
VD
342
343out_unlock:
20cea968 344 mutex_unlock(&slab_mutex);
03afc0e2 345
ba3253c7 346 if (err) {
686d550d 347 if (flags & SLAB_PANIC)
4acaa7d5 348 panic("%s: Failed to create slab '%s'. Error %d\n",
349 __func__, name, err);
686d550d 350 else {
4acaa7d5 351 pr_warn("%s(%s) failed with error %d\n",
352 __func__, name, err);
686d550d
CL
353 dump_stack();
354 }
686d550d
CL
355 return NULL;
356 }
039363f3
CL
357 return s;
358}
8eb8284b
DW
359EXPORT_SYMBOL(kmem_cache_create_usercopy);
360
f496990f
MR
361/**
362 * kmem_cache_create - Create a cache.
363 * @name: A string which is used in /proc/slabinfo to identify this cache.
364 * @size: The size of objects to be created in this cache.
365 * @align: The required alignment for the objects.
366 * @flags: SLAB flags
367 * @ctor: A constructor for the objects.
368 *
369 * Cannot be called within a interrupt, but can be interrupted.
370 * The @ctor is run when new pages are allocated by the cache.
371 *
372 * The flags are
373 *
374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375 * to catch references to uninitialised memory.
376 *
377 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
378 * for buffer overruns.
379 *
380 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381 * cacheline. This can be beneficial if you're counting cycles as closely
382 * as davem.
383 *
384 * Return: a pointer to the cache on success, NULL on failure.
385 */
8eb8284b 386struct kmem_cache *
f4957d5b 387kmem_cache_create(const char *name, unsigned int size, unsigned int align,
8eb8284b
DW
388 slab_flags_t flags, void (*ctor)(void *))
389{
6d07d1cd 390 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
8eb8284b
DW
391 ctor);
392}
794b1248 393EXPORT_SYMBOL(kmem_cache_create);
2633d7a0 394
0495e337
WL
395#ifdef SLAB_SUPPORTS_SYSFS
396/*
397 * For a given kmem_cache, kmem_cache_destroy() should only be called
398 * once or there will be a use-after-free problem. The actual deletion
399 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
400 * protection. So they are now done without holding those locks.
401 *
402 * Note that there will be a slight delay in the deletion of sysfs files
403 * if kmem_cache_release() is called indrectly from a work function.
404 */
405static void kmem_cache_release(struct kmem_cache *s)
406{
407 sysfs_slab_unlink(s);
408 sysfs_slab_release(s);
409}
410#else
411static void kmem_cache_release(struct kmem_cache *s)
412{
413 slab_kmem_cache_release(s);
414}
415#endif
416
657dc2f9 417static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
d5b3cf71 418{
657dc2f9
TH
419 LIST_HEAD(to_destroy);
420 struct kmem_cache *s, *s2;
d5b3cf71 421
657dc2f9 422 /*
5f0d5a3a 423 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
657dc2f9 424 * @slab_caches_to_rcu_destroy list. The slab pages are freed
081a06fa 425 * through RCU and the associated kmem_cache are dereferenced
657dc2f9
TH
426 * while freeing the pages, so the kmem_caches should be freed only
427 * after the pending RCU operations are finished. As rcu_barrier()
428 * is a pretty slow operation, we batch all pending destructions
429 * asynchronously.
430 */
431 mutex_lock(&slab_mutex);
432 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
433 mutex_unlock(&slab_mutex);
d5b3cf71 434
657dc2f9
TH
435 if (list_empty(&to_destroy))
436 return;
437
438 rcu_barrier();
439
440 list_for_each_entry_safe(s, s2, &to_destroy, list) {
64dd6849 441 debugfs_slab_release(s);
d3fb45f3 442 kfence_shutdown_cache(s);
0495e337 443 kmem_cache_release(s);
657dc2f9 444 }
d5b3cf71
VD
445}
446
657dc2f9 447static int shutdown_cache(struct kmem_cache *s)
d5b3cf71 448{
f9fa1d91
GT
449 /* free asan quarantined objects */
450 kasan_cache_shutdown(s);
451
657dc2f9
TH
452 if (__kmem_cache_shutdown(s) != 0)
453 return -EBUSY;
d5b3cf71 454
657dc2f9 455 list_del(&s->list);
d5b3cf71 456
5f0d5a3a 457 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
657dc2f9
TH
458 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
459 schedule_work(&slab_caches_to_rcu_destroy_work);
460 } else {
d3fb45f3 461 kfence_shutdown_cache(s);
64dd6849 462 debugfs_slab_release(s);
d5b3cf71 463 }
657dc2f9
TH
464
465 return 0;
d5b3cf71
VD
466}
467
41a21285
CL
468void slab_kmem_cache_release(struct kmem_cache *s)
469{
52b4b950 470 __kmem_cache_release(s);
3dec16ea 471 kfree_const(s->name);
41a21285
CL
472 kmem_cache_free(kmem_cache, s);
473}
474
945cf2b6
CL
475void kmem_cache_destroy(struct kmem_cache *s)
476{
0495e337 477 int refcnt;
d71608a8 478 bool rcu_set;
0495e337 479
bed0a9b5 480 if (unlikely(!s) || !kasan_check_byte(s))
3942d299
SS
481 return;
482
5a836bf6 483 cpus_read_lock();
945cf2b6 484 mutex_lock(&slab_mutex);
b8529907 485
d71608a8
FT
486 rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
487
0495e337
WL
488 refcnt = --s->refcount;
489 if (refcnt)
b8529907
VD
490 goto out_unlock;
491
7302e91f
ME
492 WARN(shutdown_cache(s),
493 "%s %s: Slab cache still has objects when called from %pS",
494 __func__, s->name, (void *)_RET_IP_);
b8529907
VD
495out_unlock:
496 mutex_unlock(&slab_mutex);
5a836bf6 497 cpus_read_unlock();
d71608a8 498 if (!refcnt && !rcu_set)
0495e337 499 kmem_cache_release(s);
945cf2b6
CL
500}
501EXPORT_SYMBOL(kmem_cache_destroy);
502
03afc0e2
VD
503/**
504 * kmem_cache_shrink - Shrink a cache.
505 * @cachep: The cache to shrink.
506 *
507 * Releases as many slabs as possible for a cache.
508 * To help debugging, a zero exit status indicates all slabs were released.
a862f68a
MR
509 *
510 * Return: %0 if all slabs were released, non-zero otherwise
03afc0e2
VD
511 */
512int kmem_cache_shrink(struct kmem_cache *cachep)
513{
55834c59 514 kasan_cache_shrink(cachep);
7e1fa93d 515
610f9c00 516 return __kmem_cache_shrink(cachep);
03afc0e2
VD
517}
518EXPORT_SYMBOL(kmem_cache_shrink);
519
fda90124 520bool slab_is_available(void)
97d06609
CL
521{
522 return slab_state >= UP;
523}
b7454ad3 524
5bb1bb35 525#ifdef CONFIG_PRINTK
8e7f37f2
PM
526/**
527 * kmem_valid_obj - does the pointer reference a valid slab object?
528 * @object: pointer to query.
529 *
530 * Return: %true if the pointer is to a not-yet-freed object from
531 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
532 * is to an already-freed object, and %false otherwise.
533 */
534bool kmem_valid_obj(void *object)
535{
7213230a 536 struct folio *folio;
8e7f37f2
PM
537
538 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
539 if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
540 return false;
7213230a
MWO
541 folio = virt_to_folio(object);
542 return folio_test_slab(folio);
8e7f37f2 543}
0d3dd2c8 544EXPORT_SYMBOL_GPL(kmem_valid_obj);
8e7f37f2 545
2dfe63e6
ME
546static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
547{
548 if (__kfence_obj_info(kpp, object, slab))
549 return;
550 __kmem_obj_info(kpp, object, slab);
551}
552
8e7f37f2
PM
553/**
554 * kmem_dump_obj - Print available slab provenance information
555 * @object: slab object for which to find provenance information.
556 *
557 * This function uses pr_cont(), so that the caller is expected to have
558 * printed out whatever preamble is appropriate. The provenance information
559 * depends on the type of object and on how much debugging is enabled.
560 * For a slab-cache object, the fact that it is a slab object is printed,
561 * and, if available, the slab name, return address, and stack trace from
e548eaa1 562 * the allocation and last free path of that object.
8e7f37f2
PM
563 *
564 * This function will splat if passed a pointer to a non-slab object.
565 * If you are not sure what type of object you have, you should instead
566 * use mem_dump_obj().
567 */
568void kmem_dump_obj(void *object)
569{
570 char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
571 int i;
7213230a 572 struct slab *slab;
8e7f37f2
PM
573 unsigned long ptroffset;
574 struct kmem_obj_info kp = { };
575
576 if (WARN_ON_ONCE(!virt_addr_valid(object)))
577 return;
7213230a
MWO
578 slab = virt_to_slab(object);
579 if (WARN_ON_ONCE(!slab)) {
8e7f37f2
PM
580 pr_cont(" non-slab memory.\n");
581 return;
582 }
7213230a 583 kmem_obj_info(&kp, object, slab);
8e7f37f2
PM
584 if (kp.kp_slab_cache)
585 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
586 else
587 pr_cont(" slab%s", cp);
2dfe63e6
ME
588 if (is_kfence_address(object))
589 pr_cont(" (kfence)");
8e7f37f2
PM
590 if (kp.kp_objp)
591 pr_cont(" start %px", kp.kp_objp);
592 if (kp.kp_data_offset)
593 pr_cont(" data offset %lu", kp.kp_data_offset);
594 if (kp.kp_objp) {
595 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
596 pr_cont(" pointer offset %lu", ptroffset);
597 }
598 if (kp.kp_slab_cache && kp.kp_slab_cache->usersize)
599 pr_cont(" size %u", kp.kp_slab_cache->usersize);
600 if (kp.kp_ret)
601 pr_cont(" allocated at %pS\n", kp.kp_ret);
602 else
603 pr_cont("\n");
604 for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
605 if (!kp.kp_stack[i])
606 break;
607 pr_info(" %pS\n", kp.kp_stack[i]);
608 }
e548eaa1
MS
609
610 if (kp.kp_free_stack[0])
611 pr_cont(" Free path:\n");
612
613 for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
614 if (!kp.kp_free_stack[i])
615 break;
616 pr_info(" %pS\n", kp.kp_free_stack[i]);
617 }
618
8e7f37f2 619}
0d3dd2c8 620EXPORT_SYMBOL_GPL(kmem_dump_obj);
5bb1bb35 621#endif
8e7f37f2 622
45530c44
CL
623#ifndef CONFIG_SLOB
624/* Create a cache during boot when no slab services are available yet */
361d575e
AD
625void __init create_boot_cache(struct kmem_cache *s, const char *name,
626 unsigned int size, slab_flags_t flags,
627 unsigned int useroffset, unsigned int usersize)
45530c44
CL
628{
629 int err;
59bb4798 630 unsigned int align = ARCH_KMALLOC_MINALIGN;
45530c44
CL
631
632 s->name = name;
633 s->size = s->object_size = size;
59bb4798
VB
634
635 /*
636 * For power of two sizes, guarantee natural alignment for kmalloc
637 * caches, regardless of SL*B debugging options.
638 */
639 if (is_power_of_2(size))
640 align = max(align, size);
641 s->align = calculate_alignment(flags, align, size);
642
8eb8284b
DW
643 s->useroffset = useroffset;
644 s->usersize = usersize;
f7ce3190 645
45530c44
CL
646 err = __kmem_cache_create(s, flags);
647
648 if (err)
361d575e 649 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
45530c44
CL
650 name, size, err);
651
652 s->refcount = -1; /* Exempt from merging for now */
653}
654
55de8b9c
AD
655struct kmem_cache *__init create_kmalloc_cache(const char *name,
656 unsigned int size, slab_flags_t flags,
657 unsigned int useroffset, unsigned int usersize)
45530c44
CL
658{
659 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
660
661 if (!s)
662 panic("Out of memory when creating slab %s\n", name);
663
6edf2576
FT
664 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, useroffset,
665 usersize);
92850134 666 kasan_cache_create_kmalloc(s);
45530c44
CL
667 list_add(&s->list, &slab_caches);
668 s->refcount = 1;
669 return s;
670}
671
cc252eae 672struct kmem_cache *
a07057dc
AB
673kmalloc_caches[NR_KMALLOC_TYPES][KMALLOC_SHIFT_HIGH + 1] __ro_after_init =
674{ /* initialization for https://bugs.llvm.org/show_bug.cgi?id=42570 */ };
9425c58e
CL
675EXPORT_SYMBOL(kmalloc_caches);
676
2c59dd65
CL
677/*
678 * Conversion table for small slabs sizes / 8 to the index in the
679 * kmalloc array. This is necessary for slabs < 192 since we have non power
680 * of two cache sizes there. The size of larger slabs can be determined using
681 * fls.
682 */
d5f86655 683static u8 size_index[24] __ro_after_init = {
2c59dd65
CL
684 3, /* 8 */
685 4, /* 16 */
686 5, /* 24 */
687 5, /* 32 */
688 6, /* 40 */
689 6, /* 48 */
690 6, /* 56 */
691 6, /* 64 */
692 1, /* 72 */
693 1, /* 80 */
694 1, /* 88 */
695 1, /* 96 */
696 7, /* 104 */
697 7, /* 112 */
698 7, /* 120 */
699 7, /* 128 */
700 2, /* 136 */
701 2, /* 144 */
702 2, /* 152 */
703 2, /* 160 */
704 2, /* 168 */
705 2, /* 176 */
706 2, /* 184 */
707 2 /* 192 */
708};
709
ac914d08 710static inline unsigned int size_index_elem(unsigned int bytes)
2c59dd65
CL
711{
712 return (bytes - 1) / 8;
713}
714
715/*
716 * Find the kmem_cache structure that serves a given size of
717 * allocation
718 */
719struct kmem_cache *kmalloc_slab(size_t size, gfp_t flags)
720{
d5f86655 721 unsigned int index;
2c59dd65
CL
722
723 if (size <= 192) {
724 if (!size)
725 return ZERO_SIZE_PTR;
726
727 index = size_index[size_index_elem(size)];
61448479 728 } else {
221d7da6 729 if (WARN_ON_ONCE(size > KMALLOC_MAX_CACHE_SIZE))
61448479 730 return NULL;
2c59dd65 731 index = fls(size - 1);
61448479 732 }
2c59dd65 733
cc252eae 734 return kmalloc_caches[kmalloc_type(flags)][index];
2c59dd65
CL
735}
736
05a94065
KC
737size_t kmalloc_size_roundup(size_t size)
738{
739 struct kmem_cache *c;
740
741 /* Short-circuit the 0 size case. */
742 if (unlikely(size == 0))
743 return 0;
744 /* Short-circuit saturated "too-large" case. */
745 if (unlikely(size == SIZE_MAX))
746 return SIZE_MAX;
747 /* Above the smaller buckets, size is a multiple of page size. */
748 if (size > KMALLOC_MAX_CACHE_SIZE)
749 return PAGE_SIZE << get_order(size);
750
751 /* The flags don't matter since size_index is common to all. */
752 c = kmalloc_slab(size, GFP_KERNEL);
753 return c ? c->object_size : 0;
754}
755EXPORT_SYMBOL(kmalloc_size_roundup);
756
cb5d9fb3 757#ifdef CONFIG_ZONE_DMA
494c1dfe
WL
758#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
759#else
760#define KMALLOC_DMA_NAME(sz)
761#endif
762
763#ifdef CONFIG_MEMCG_KMEM
764#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
cb5d9fb3 765#else
494c1dfe
WL
766#define KMALLOC_CGROUP_NAME(sz)
767#endif
768
cb5d9fb3
PL
769#define INIT_KMALLOC_INFO(__size, __short_size) \
770{ \
771 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
772 .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #__short_size, \
494c1dfe
WL
773 KMALLOC_CGROUP_NAME(__short_size) \
774 KMALLOC_DMA_NAME(__short_size) \
cb5d9fb3
PL
775 .size = __size, \
776}
cb5d9fb3 777
4066c33d
GG
778/*
779 * kmalloc_info[] is to make slub_debug=,kmalloc-xx option work at boot time.
d6a71648
HY
780 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
781 * kmalloc-2M.
4066c33d 782 */
af3b5f87 783const struct kmalloc_info_struct kmalloc_info[] __initconst = {
cb5d9fb3
PL
784 INIT_KMALLOC_INFO(0, 0),
785 INIT_KMALLOC_INFO(96, 96),
786 INIT_KMALLOC_INFO(192, 192),
787 INIT_KMALLOC_INFO(8, 8),
788 INIT_KMALLOC_INFO(16, 16),
789 INIT_KMALLOC_INFO(32, 32),
790 INIT_KMALLOC_INFO(64, 64),
791 INIT_KMALLOC_INFO(128, 128),
792 INIT_KMALLOC_INFO(256, 256),
793 INIT_KMALLOC_INFO(512, 512),
794 INIT_KMALLOC_INFO(1024, 1k),
795 INIT_KMALLOC_INFO(2048, 2k),
796 INIT_KMALLOC_INFO(4096, 4k),
797 INIT_KMALLOC_INFO(8192, 8k),
798 INIT_KMALLOC_INFO(16384, 16k),
799 INIT_KMALLOC_INFO(32768, 32k),
800 INIT_KMALLOC_INFO(65536, 64k),
801 INIT_KMALLOC_INFO(131072, 128k),
802 INIT_KMALLOC_INFO(262144, 256k),
803 INIT_KMALLOC_INFO(524288, 512k),
804 INIT_KMALLOC_INFO(1048576, 1M),
d6a71648 805 INIT_KMALLOC_INFO(2097152, 2M)
4066c33d
GG
806};
807
f97d5f63 808/*
34cc6990
DS
809 * Patch up the size_index table if we have strange large alignment
810 * requirements for the kmalloc array. This is only the case for
811 * MIPS it seems. The standard arches will not generate any code here.
812 *
813 * Largest permitted alignment is 256 bytes due to the way we
814 * handle the index determination for the smaller caches.
815 *
816 * Make sure that nothing crazy happens if someone starts tinkering
817 * around with ARCH_KMALLOC_MINALIGN
f97d5f63 818 */
34cc6990 819void __init setup_kmalloc_cache_index_table(void)
f97d5f63 820{
ac914d08 821 unsigned int i;
f97d5f63 822
2c59dd65 823 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
7d6b6cc3 824 !is_power_of_2(KMALLOC_MIN_SIZE));
2c59dd65
CL
825
826 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
ac914d08 827 unsigned int elem = size_index_elem(i);
2c59dd65
CL
828
829 if (elem >= ARRAY_SIZE(size_index))
830 break;
831 size_index[elem] = KMALLOC_SHIFT_LOW;
832 }
833
834 if (KMALLOC_MIN_SIZE >= 64) {
835 /*
0b8f0d87 836 * The 96 byte sized cache is not used if the alignment
2c59dd65
CL
837 * is 64 byte.
838 */
839 for (i = 64 + 8; i <= 96; i += 8)
840 size_index[size_index_elem(i)] = 7;
841
842 }
843
844 if (KMALLOC_MIN_SIZE >= 128) {
845 /*
846 * The 192 byte sized cache is not used if the alignment
847 * is 128 byte. Redirect kmalloc to use the 256 byte cache
848 * instead.
849 */
850 for (i = 128 + 8; i <= 192; i += 8)
851 size_index[size_index_elem(i)] = 8;
852 }
34cc6990
DS
853}
854
1291523f 855static void __init
13657d0a 856new_kmalloc_cache(int idx, enum kmalloc_cache_type type, slab_flags_t flags)
a9730fca 857{
494c1dfe 858 if (type == KMALLOC_RECLAIM) {
1291523f 859 flags |= SLAB_RECLAIM_ACCOUNT;
494c1dfe 860 } else if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_CGROUP)) {
17c17367 861 if (mem_cgroup_kmem_disabled()) {
494c1dfe
WL
862 kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
863 return;
864 }
865 flags |= SLAB_ACCOUNT;
33647783
OK
866 } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
867 flags |= SLAB_CACHE_DMA;
494c1dfe 868 }
1291523f 869
cb5d9fb3
PL
870 kmalloc_caches[type][idx] = create_kmalloc_cache(
871 kmalloc_info[idx].name[type],
6c0c21ad
DW
872 kmalloc_info[idx].size, flags, 0,
873 kmalloc_info[idx].size);
13e680fb
WL
874
875 /*
876 * If CONFIG_MEMCG_KMEM is enabled, disable cache merging for
877 * KMALLOC_NORMAL caches.
878 */
879 if (IS_ENABLED(CONFIG_MEMCG_KMEM) && (type == KMALLOC_NORMAL))
880 kmalloc_caches[type][idx]->refcount = -1;
a9730fca
CL
881}
882
34cc6990
DS
883/*
884 * Create the kmalloc array. Some of the regular kmalloc arrays
885 * may already have been created because they were needed to
886 * enable allocations for slab creation.
887 */
d50112ed 888void __init create_kmalloc_caches(slab_flags_t flags)
34cc6990 889{
13657d0a
PL
890 int i;
891 enum kmalloc_cache_type type;
34cc6990 892
494c1dfe
WL
893 /*
894 * Including KMALLOC_CGROUP if CONFIG_MEMCG_KMEM defined
895 */
33647783 896 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
1291523f
VB
897 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++) {
898 if (!kmalloc_caches[type][i])
899 new_kmalloc_cache(i, type, flags);
f97d5f63 900
1291523f
VB
901 /*
902 * Caches that are not of the two-to-the-power-of size.
903 * These have to be created immediately after the
904 * earlier power of two caches
905 */
906 if (KMALLOC_MIN_SIZE <= 32 && i == 6 &&
907 !kmalloc_caches[type][1])
908 new_kmalloc_cache(1, type, flags);
909 if (KMALLOC_MIN_SIZE <= 64 && i == 7 &&
910 !kmalloc_caches[type][2])
911 new_kmalloc_cache(2, type, flags);
912 }
8a965b3b
CL
913 }
914
f97d5f63
CL
915 /* Kmalloc array is now usable */
916 slab_state = UP;
f97d5f63 917}
d6a71648
HY
918
919void free_large_kmalloc(struct folio *folio, void *object)
920{
921 unsigned int order = folio_order(folio);
922
923 if (WARN_ON_ONCE(order == 0))
924 pr_warn_once("object pointer: 0x%p\n", object);
925
926 kmemleak_free(object);
927 kasan_kfree_large(object);
27bc50fc 928 kmsan_kfree_large(object);
d6a71648
HY
929
930 mod_lruvec_page_state(folio_page(folio, 0), NR_SLAB_UNRECLAIMABLE_B,
931 -(PAGE_SIZE << order));
932 __free_pages(folio_page(folio, 0), order);
933}
b1405135
HY
934
935static void *__kmalloc_large_node(size_t size, gfp_t flags, int node);
936static __always_inline
937void *__do_kmalloc_node(size_t size, gfp_t flags, int node, unsigned long caller)
938{
939 struct kmem_cache *s;
940 void *ret;
941
942 if (unlikely(size > KMALLOC_MAX_CACHE_SIZE)) {
943 ret = __kmalloc_large_node(size, flags, node);
2c1d697f 944 trace_kmalloc(_RET_IP_, ret, size,
11e9734b 945 PAGE_SIZE << get_order(size), flags, node);
b1405135
HY
946 return ret;
947 }
948
949 s = kmalloc_slab(size, flags);
950
951 if (unlikely(ZERO_OR_NULL_PTR(s)))
952 return s;
953
954 ret = __kmem_cache_alloc_node(s, flags, node, size, caller);
955 ret = kasan_kmalloc(s, ret, size, flags);
2c1d697f 956 trace_kmalloc(_RET_IP_, ret, size, s->size, flags, node);
b1405135
HY
957 return ret;
958}
959
960void *__kmalloc_node(size_t size, gfp_t flags, int node)
961{
962 return __do_kmalloc_node(size, flags, node, _RET_IP_);
963}
964EXPORT_SYMBOL(__kmalloc_node);
965
966void *__kmalloc(size_t size, gfp_t flags)
967{
968 return __do_kmalloc_node(size, flags, NUMA_NO_NODE, _RET_IP_);
969}
970EXPORT_SYMBOL(__kmalloc);
971
972void *__kmalloc_node_track_caller(size_t size, gfp_t flags,
973 int node, unsigned long caller)
974{
975 return __do_kmalloc_node(size, flags, node, caller);
976}
977EXPORT_SYMBOL(__kmalloc_node_track_caller);
978
979/**
980 * kfree - free previously allocated memory
981 * @object: pointer returned by kmalloc.
982 *
983 * If @object is NULL, no operation is performed.
984 *
985 * Don't free memory not originally allocated by kmalloc()
986 * or you will run into trouble.
987 */
988void kfree(const void *object)
989{
990 struct folio *folio;
991 struct slab *slab;
992 struct kmem_cache *s;
993
994 trace_kfree(_RET_IP_, object);
995
996 if (unlikely(ZERO_OR_NULL_PTR(object)))
997 return;
998
999 folio = virt_to_folio(object);
1000 if (unlikely(!folio_test_slab(folio))) {
1001 free_large_kmalloc(folio, (void *)object);
1002 return;
1003 }
1004
1005 slab = folio_slab(folio);
1006 s = slab->slab_cache;
1007 __kmem_cache_free(s, (void *)object, _RET_IP_);
1008}
1009EXPORT_SYMBOL(kfree);
1010
445d41d7
VB
1011/**
1012 * __ksize -- Report full size of underlying allocation
1013 * @objp: pointer to the object
1014 *
1015 * This should only be used internally to query the true size of allocations.
1016 * It is not meant to be a way to discover the usable size of an allocation
1017 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
1018 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
1019 * and/or FORTIFY_SOURCE.
1020 *
1021 * Return: size of the actual memory used by @objp in bytes
1022 */
b1405135
HY
1023size_t __ksize(const void *object)
1024{
1025 struct folio *folio;
1026
1027 if (unlikely(object == ZERO_SIZE_PTR))
1028 return 0;
1029
1030 folio = virt_to_folio(object);
1031
d5eff736
HY
1032 if (unlikely(!folio_test_slab(folio))) {
1033 if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
1034 return 0;
1035 if (WARN_ON(object != folio_address(folio)))
1036 return 0;
b1405135 1037 return folio_size(folio);
d5eff736 1038 }
b1405135
HY
1039
1040 return slab_ksize(folio_slab(folio)->slab_cache);
1041}
26a40990
HY
1042
1043#ifdef CONFIG_TRACING
1044void *kmalloc_trace(struct kmem_cache *s, gfp_t gfpflags, size_t size)
1045{
1046 void *ret = __kmem_cache_alloc_node(s, gfpflags, NUMA_NO_NODE,
1047 size, _RET_IP_);
1048
2c1d697f 1049 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, NUMA_NO_NODE);
26a40990
HY
1050
1051 ret = kasan_kmalloc(s, ret, size, gfpflags);
1052 return ret;
1053}
1054EXPORT_SYMBOL(kmalloc_trace);
1055
1056void *kmalloc_node_trace(struct kmem_cache *s, gfp_t gfpflags,
1057 int node, size_t size)
1058{
1059 void *ret = __kmem_cache_alloc_node(s, gfpflags, node, size, _RET_IP_);
1060
2c1d697f 1061 trace_kmalloc(_RET_IP_, ret, size, s->size, gfpflags, node);
26a40990
HY
1062
1063 ret = kasan_kmalloc(s, ret, size, gfpflags);
1064 return ret;
1065}
1066EXPORT_SYMBOL(kmalloc_node_trace);
1067#endif /* !CONFIG_TRACING */
45530c44
CL
1068#endif /* !CONFIG_SLOB */
1069
44405099
LL
1070gfp_t kmalloc_fix_flags(gfp_t flags)
1071{
1072 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1073
1074 flags &= ~GFP_SLAB_BUG_MASK;
1075 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1076 invalid_mask, &invalid_mask, flags, &flags);
1077 dump_stack();
1078
1079 return flags;
1080}
1081
cea371f4
VD
1082/*
1083 * To avoid unnecessary overhead, we pass through large allocation requests
1084 * directly to the page allocator. We use __GFP_COMP, because we will need to
1085 * know the allocation order to free the pages properly in kfree.
1086 */
45530c44 1087
b1405135 1088static void *__kmalloc_large_node(size_t size, gfp_t flags, int node)
52383431 1089{
52383431 1090 struct page *page;
a0c3b940
HY
1091 void *ptr = NULL;
1092 unsigned int order = get_order(size);
52383431 1093
44405099
LL
1094 if (unlikely(flags & GFP_SLAB_BUG_MASK))
1095 flags = kmalloc_fix_flags(flags);
1096
52383431 1097 flags |= __GFP_COMP;
a0c3b940
HY
1098 page = alloc_pages_node(node, flags, order);
1099 if (page) {
1100 ptr = page_address(page);
96403bfe
MS
1101 mod_lruvec_page_state(page, NR_SLAB_UNRECLAIMABLE_B,
1102 PAGE_SIZE << order);
6a486c0a 1103 }
a0c3b940
HY
1104
1105 ptr = kasan_kmalloc_large(ptr, size, flags);
1106 /* As ptr might get tagged, call kmemleak hook after KASAN. */
1107 kmemleak_alloc(ptr, size, 1, flags);
27bc50fc 1108 kmsan_kmalloc_large(ptr, size, flags);
a0c3b940
HY
1109
1110 return ptr;
1111}
bf37d791 1112
c4cab557
HY
1113void *kmalloc_large(size_t size, gfp_t flags)
1114{
b1405135 1115 void *ret = __kmalloc_large_node(size, flags, NUMA_NO_NODE);
c4cab557 1116
2c1d697f
HY
1117 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
1118 flags, NUMA_NO_NODE);
52383431
VD
1119 return ret;
1120}
c4cab557 1121EXPORT_SYMBOL(kmalloc_large);
52383431 1122
bf37d791 1123void *kmalloc_large_node(size_t size, gfp_t flags, int node)
f1b6eb6e 1124{
b1405135 1125 void *ret = __kmalloc_large_node(size, flags, node);
bf37d791 1126
2c1d697f
HY
1127 trace_kmalloc(_RET_IP_, ret, size, PAGE_SIZE << get_order(size),
1128 flags, node);
f1b6eb6e
CL
1129 return ret;
1130}
a0c3b940 1131EXPORT_SYMBOL(kmalloc_large_node);
45530c44 1132
7c00fce9
TG
1133#ifdef CONFIG_SLAB_FREELIST_RANDOM
1134/* Randomize a generic freelist */
1135static void freelist_randomize(struct rnd_state *state, unsigned int *list,
302d55d5 1136 unsigned int count)
7c00fce9 1137{
7c00fce9 1138 unsigned int rand;
302d55d5 1139 unsigned int i;
7c00fce9
TG
1140
1141 for (i = 0; i < count; i++)
1142 list[i] = i;
1143
1144 /* Fisher-Yates shuffle */
1145 for (i = count - 1; i > 0; i--) {
1146 rand = prandom_u32_state(state);
1147 rand %= (i + 1);
1148 swap(list[i], list[rand]);
1149 }
1150}
1151
1152/* Create a random sequence per cache */
1153int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1154 gfp_t gfp)
1155{
1156 struct rnd_state state;
1157
1158 if (count < 2 || cachep->random_seq)
1159 return 0;
1160
1161 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1162 if (!cachep->random_seq)
1163 return -ENOMEM;
1164
1165 /* Get best entropy at this stage of boot */
1166 prandom_seed_state(&state, get_random_long());
1167
1168 freelist_randomize(&state, cachep->random_seq, count);
1169 return 0;
1170}
1171
1172/* Destroy the per-cache random freelist sequence */
1173void cache_random_seq_destroy(struct kmem_cache *cachep)
1174{
1175 kfree(cachep->random_seq);
1176 cachep->random_seq = NULL;
1177}
1178#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1179
5b365771 1180#if defined(CONFIG_SLAB) || defined(CONFIG_SLUB_DEBUG)
e9b4db2b 1181#ifdef CONFIG_SLAB
0825a6f9 1182#define SLABINFO_RIGHTS (0600)
e9b4db2b 1183#else
0825a6f9 1184#define SLABINFO_RIGHTS (0400)
e9b4db2b
WL
1185#endif
1186
b047501c 1187static void print_slabinfo_header(struct seq_file *m)
bcee6e2a
GC
1188{
1189 /*
1190 * Output format version, so at least we can change it
1191 * without _too_ many complaints.
1192 */
1193#ifdef CONFIG_DEBUG_SLAB
1194 seq_puts(m, "slabinfo - version: 2.1 (statistics)\n");
1195#else
1196 seq_puts(m, "slabinfo - version: 2.1\n");
1197#endif
756a025f 1198 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
bcee6e2a
GC
1199 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1200 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1201#ifdef CONFIG_DEBUG_SLAB
756a025f 1202 seq_puts(m, " : globalstat <listallocs> <maxobjs> <grown> <reaped> <error> <maxfreeable> <nodeallocs> <remotefrees> <alienoverflow>");
bcee6e2a
GC
1203 seq_puts(m, " : cpustat <allochit> <allocmiss> <freehit> <freemiss>");
1204#endif
1205 seq_putc(m, '\n');
1206}
1207
c29b5b3d 1208static void *slab_start(struct seq_file *m, loff_t *pos)
b7454ad3 1209{
b7454ad3 1210 mutex_lock(&slab_mutex);
c7094406 1211 return seq_list_start(&slab_caches, *pos);
b7454ad3
GC
1212}
1213
c29b5b3d 1214static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
b7454ad3 1215{
c7094406 1216 return seq_list_next(p, &slab_caches, pos);
b7454ad3
GC
1217}
1218
c29b5b3d 1219static void slab_stop(struct seq_file *m, void *p)
b7454ad3
GC
1220{
1221 mutex_unlock(&slab_mutex);
1222}
1223
b047501c 1224static void cache_show(struct kmem_cache *s, struct seq_file *m)
b7454ad3 1225{
0d7561c6
GC
1226 struct slabinfo sinfo;
1227
1228 memset(&sinfo, 0, sizeof(sinfo));
1229 get_slabinfo(s, &sinfo);
1230
1231 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
10befea9 1232 s->name, sinfo.active_objs, sinfo.num_objs, s->size,
0d7561c6
GC
1233 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1234
1235 seq_printf(m, " : tunables %4u %4u %4u",
1236 sinfo.limit, sinfo.batchcount, sinfo.shared);
1237 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1238 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1239 slabinfo_show_stats(m, s);
1240 seq_putc(m, '\n');
b7454ad3
GC
1241}
1242
1df3b26f 1243static int slab_show(struct seq_file *m, void *p)
749c5415 1244{
c7094406 1245 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
749c5415 1246
c7094406 1247 if (p == slab_caches.next)
1df3b26f 1248 print_slabinfo_header(m);
10befea9 1249 cache_show(s, m);
b047501c
VD
1250 return 0;
1251}
1252
852d8be0
YS
1253void dump_unreclaimable_slab(void)
1254{
7714304f 1255 struct kmem_cache *s;
852d8be0
YS
1256 struct slabinfo sinfo;
1257
1258 /*
1259 * Here acquiring slab_mutex is risky since we don't prefer to get
1260 * sleep in oom path. But, without mutex hold, it may introduce a
1261 * risk of crash.
1262 * Use mutex_trylock to protect the list traverse, dump nothing
1263 * without acquiring the mutex.
1264 */
1265 if (!mutex_trylock(&slab_mutex)) {
1266 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1267 return;
1268 }
1269
1270 pr_info("Unreclaimable slab info:\n");
1271 pr_info("Name Used Total\n");
1272
7714304f 1273 list_for_each_entry(s, &slab_caches, list) {
10befea9 1274 if (s->flags & SLAB_RECLAIM_ACCOUNT)
852d8be0
YS
1275 continue;
1276
1277 get_slabinfo(s, &sinfo);
1278
1279 if (sinfo.num_objs > 0)
10befea9 1280 pr_info("%-17s %10luKB %10luKB\n", s->name,
852d8be0
YS
1281 (sinfo.active_objs * s->size) / 1024,
1282 (sinfo.num_objs * s->size) / 1024);
1283 }
1284 mutex_unlock(&slab_mutex);
1285}
1286
b7454ad3
GC
1287/*
1288 * slabinfo_op - iterator that generates /proc/slabinfo
1289 *
1290 * Output layout:
1291 * cache-name
1292 * num-active-objs
1293 * total-objs
1294 * object size
1295 * num-active-slabs
1296 * total-slabs
1297 * num-pages-per-slab
1298 * + further values on SMP and with statistics enabled
1299 */
1300static const struct seq_operations slabinfo_op = {
1df3b26f 1301 .start = slab_start,
276a2439
WL
1302 .next = slab_next,
1303 .stop = slab_stop,
1df3b26f 1304 .show = slab_show,
b7454ad3
GC
1305};
1306
1307static int slabinfo_open(struct inode *inode, struct file *file)
1308{
1309 return seq_open(file, &slabinfo_op);
1310}
1311
97a32539 1312static const struct proc_ops slabinfo_proc_ops = {
d919b33d 1313 .proc_flags = PROC_ENTRY_PERMANENT,
97a32539
AD
1314 .proc_open = slabinfo_open,
1315 .proc_read = seq_read,
1316 .proc_write = slabinfo_write,
1317 .proc_lseek = seq_lseek,
1318 .proc_release = seq_release,
b7454ad3
GC
1319};
1320
1321static int __init slab_proc_init(void)
1322{
97a32539 1323 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
b7454ad3
GC
1324 return 0;
1325}
1326module_init(slab_proc_init);
fcf8a1e4 1327
5b365771 1328#endif /* CONFIG_SLAB || CONFIG_SLUB_DEBUG */
928cec9c 1329
9ed9cac1
KC
1330static __always_inline __realloc_size(2) void *
1331__do_krealloc(const void *p, size_t new_size, gfp_t flags)
928cec9c
AR
1332{
1333 void *ret;
fa9ba3aa 1334 size_t ks;
928cec9c 1335
d12d9ad8
AK
1336 /* Don't use instrumented ksize to allow precise KASAN poisoning. */
1337 if (likely(!ZERO_OR_NULL_PTR(p))) {
1338 if (!kasan_check_byte(p))
1339 return NULL;
1340 ks = kfence_ksize(p) ?: __ksize(p);
1341 } else
1342 ks = 0;
928cec9c 1343
d12d9ad8 1344 /* If the object still fits, repoison it precisely. */
0316bec2 1345 if (ks >= new_size) {
0116523c 1346 p = kasan_krealloc((void *)p, new_size, flags);
928cec9c 1347 return (void *)p;
0316bec2 1348 }
928cec9c
AR
1349
1350 ret = kmalloc_track_caller(new_size, flags);
d12d9ad8
AK
1351 if (ret && p) {
1352 /* Disable KASAN checks as the object's redzone is accessed. */
1353 kasan_disable_current();
1354 memcpy(ret, kasan_reset_tag(p), ks);
1355 kasan_enable_current();
1356 }
928cec9c
AR
1357
1358 return ret;
1359}
1360
928cec9c
AR
1361/**
1362 * krealloc - reallocate memory. The contents will remain unchanged.
1363 * @p: object to reallocate memory for.
1364 * @new_size: how many bytes of memory are required.
1365 * @flags: the type of memory to allocate.
1366 *
1367 * The contents of the object pointed to are preserved up to the
15d5de49
BG
1368 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1369 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1370 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
a862f68a
MR
1371 *
1372 * Return: pointer to the allocated memory or %NULL in case of error
928cec9c
AR
1373 */
1374void *krealloc(const void *p, size_t new_size, gfp_t flags)
1375{
1376 void *ret;
1377
1378 if (unlikely(!new_size)) {
1379 kfree(p);
1380 return ZERO_SIZE_PTR;
1381 }
1382
1383 ret = __do_krealloc(p, new_size, flags);
772a2fa5 1384 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
928cec9c
AR
1385 kfree(p);
1386
1387 return ret;
1388}
1389EXPORT_SYMBOL(krealloc);
1390
1391/**
453431a5 1392 * kfree_sensitive - Clear sensitive information in memory before freeing
928cec9c
AR
1393 * @p: object to free memory of
1394 *
1395 * The memory of the object @p points to is zeroed before freed.
453431a5 1396 * If @p is %NULL, kfree_sensitive() does nothing.
928cec9c
AR
1397 *
1398 * Note: this function zeroes the whole allocated buffer which can be a good
1399 * deal bigger than the requested buffer size passed to kmalloc(). So be
1400 * careful when using this function in performance sensitive code.
1401 */
453431a5 1402void kfree_sensitive(const void *p)
928cec9c
AR
1403{
1404 size_t ks;
1405 void *mem = (void *)p;
1406
928cec9c 1407 ks = ksize(mem);
fa9ba3aa
WK
1408 if (ks)
1409 memzero_explicit(mem, ks);
928cec9c
AR
1410 kfree(mem);
1411}
453431a5 1412EXPORT_SYMBOL(kfree_sensitive);
928cec9c 1413
10d1f8cb
ME
1414/**
1415 * ksize - get the actual amount of memory allocated for a given object
1416 * @objp: Pointer to the object
1417 *
1418 * kmalloc may internally round up allocations and return more memory
1419 * than requested. ksize() can be used to determine the actual amount of
1420 * memory allocated. The caller may use this additional memory, even though
1421 * a smaller amount of memory was initially specified with the kmalloc call.
1422 * The caller must guarantee that objp points to a valid object previously
1423 * allocated with either kmalloc() or kmem_cache_alloc(). The object
1424 * must not be freed during the duration of the call.
1425 *
1426 * Return: size of the actual memory used by @objp in bytes
1427 */
1428size_t ksize(const void *objp)
1429{
0d4ca4c9
ME
1430 size_t size;
1431
0d4ca4c9 1432 /*
611806b4
AK
1433 * We need to first check that the pointer to the object is valid, and
1434 * only then unpoison the memory. The report printed from ksize() is
1435 * more useful, then when it's printed later when the behaviour could
1436 * be undefined due to a potential use-after-free or double-free.
0d4ca4c9 1437 *
611806b4
AK
1438 * We use kasan_check_byte(), which is supported for the hardware
1439 * tag-based KASAN mode, unlike kasan_check_read/write().
1440 *
1441 * If the pointed to memory is invalid, we return 0 to avoid users of
0d4ca4c9
ME
1442 * ksize() writing to and potentially corrupting the memory region.
1443 *
1444 * We want to perform the check before __ksize(), to avoid potentially
1445 * crashing in __ksize() due to accessing invalid metadata.
1446 */
611806b4 1447 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
0d4ca4c9
ME
1448 return 0;
1449
d3fb45f3 1450 size = kfence_ksize(objp) ?: __ksize(objp);
10d1f8cb
ME
1451 /*
1452 * We assume that ksize callers could use whole allocated area,
1453 * so we need to unpoison this area.
1454 */
cebd0eb2 1455 kasan_unpoison_range(objp, size);
10d1f8cb
ME
1456 return size;
1457}
1458EXPORT_SYMBOL(ksize);
1459
928cec9c
AR
1460/* Tracepoints definitions. */
1461EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1462EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
928cec9c
AR
1463EXPORT_TRACEPOINT_SYMBOL(kfree);
1464EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
4f6923fb
HM
1465
1466int should_failslab(struct kmem_cache *s, gfp_t gfpflags)
1467{
1468 if (__should_failslab(s, gfpflags))
1469 return -ENOMEM;
1470 return 0;
1471}
1472ALLOW_ERROR_INJECTION(should_failslab, ERRNO);