Merge tag 'bcachefs-2024-09-09' of git://evilpiepirate.org/bcachefs
[linux-block.git] / mm / slab_common.c
... / ...
CommitLineData
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Slab allocator functions that are independent of the allocator strategy
4 *
5 * (C) 2012 Christoph Lameter <cl@linux.com>
6 */
7#include <linux/slab.h>
8
9#include <linux/mm.h>
10#include <linux/poison.h>
11#include <linux/interrupt.h>
12#include <linux/memory.h>
13#include <linux/cache.h>
14#include <linux/compiler.h>
15#include <linux/kfence.h>
16#include <linux/module.h>
17#include <linux/cpu.h>
18#include <linux/uaccess.h>
19#include <linux/seq_file.h>
20#include <linux/dma-mapping.h>
21#include <linux/swiotlb.h>
22#include <linux/proc_fs.h>
23#include <linux/debugfs.h>
24#include <linux/kmemleak.h>
25#include <linux/kasan.h>
26#include <asm/cacheflush.h>
27#include <asm/tlbflush.h>
28#include <asm/page.h>
29#include <linux/memcontrol.h>
30#include <linux/stackdepot.h>
31
32#include "internal.h"
33#include "slab.h"
34
35#define CREATE_TRACE_POINTS
36#include <trace/events/kmem.h>
37
38enum slab_state slab_state;
39LIST_HEAD(slab_caches);
40DEFINE_MUTEX(slab_mutex);
41struct kmem_cache *kmem_cache;
42
43static LIST_HEAD(slab_caches_to_rcu_destroy);
44static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work);
45static DECLARE_WORK(slab_caches_to_rcu_destroy_work,
46 slab_caches_to_rcu_destroy_workfn);
47
48/*
49 * Set of flags that will prevent slab merging
50 */
51#define SLAB_NEVER_MERGE (SLAB_RED_ZONE | SLAB_POISON | SLAB_STORE_USER | \
52 SLAB_TRACE | SLAB_TYPESAFE_BY_RCU | SLAB_NOLEAKTRACE | \
53 SLAB_FAILSLAB | SLAB_NO_MERGE)
54
55#define SLAB_MERGE_SAME (SLAB_RECLAIM_ACCOUNT | SLAB_CACHE_DMA | \
56 SLAB_CACHE_DMA32 | SLAB_ACCOUNT)
57
58/*
59 * Merge control. If this is set then no merging of slab caches will occur.
60 */
61static bool slab_nomerge = !IS_ENABLED(CONFIG_SLAB_MERGE_DEFAULT);
62
63static int __init setup_slab_nomerge(char *str)
64{
65 slab_nomerge = true;
66 return 1;
67}
68
69static int __init setup_slab_merge(char *str)
70{
71 slab_nomerge = false;
72 return 1;
73}
74
75__setup_param("slub_nomerge", slub_nomerge, setup_slab_nomerge, 0);
76__setup_param("slub_merge", slub_merge, setup_slab_merge, 0);
77
78__setup("slab_nomerge", setup_slab_nomerge);
79__setup("slab_merge", setup_slab_merge);
80
81/*
82 * Determine the size of a slab object
83 */
84unsigned int kmem_cache_size(struct kmem_cache *s)
85{
86 return s->object_size;
87}
88EXPORT_SYMBOL(kmem_cache_size);
89
90#ifdef CONFIG_DEBUG_VM
91static int kmem_cache_sanity_check(const char *name, unsigned int size)
92{
93 if (!name || in_interrupt() || size > KMALLOC_MAX_SIZE) {
94 pr_err("kmem_cache_create(%s) integrity check failed\n", name);
95 return -EINVAL;
96 }
97
98 WARN_ON(strchr(name, ' ')); /* It confuses parsers */
99 return 0;
100}
101#else
102static inline int kmem_cache_sanity_check(const char *name, unsigned int size)
103{
104 return 0;
105}
106#endif
107
108/*
109 * Figure out what the alignment of the objects will be given a set of
110 * flags, a user specified alignment and the size of the objects.
111 */
112static unsigned int calculate_alignment(slab_flags_t flags,
113 unsigned int align, unsigned int size)
114{
115 /*
116 * If the user wants hardware cache aligned objects then follow that
117 * suggestion if the object is sufficiently large.
118 *
119 * The hardware cache alignment cannot override the specified
120 * alignment though. If that is greater then use it.
121 */
122 if (flags & SLAB_HWCACHE_ALIGN) {
123 unsigned int ralign;
124
125 ralign = cache_line_size();
126 while (size <= ralign / 2)
127 ralign /= 2;
128 align = max(align, ralign);
129 }
130
131 align = max(align, arch_slab_minalign());
132
133 return ALIGN(align, sizeof(void *));
134}
135
136/*
137 * Find a mergeable slab cache
138 */
139int slab_unmergeable(struct kmem_cache *s)
140{
141 if (slab_nomerge || (s->flags & SLAB_NEVER_MERGE))
142 return 1;
143
144 if (s->ctor)
145 return 1;
146
147#ifdef CONFIG_HARDENED_USERCOPY
148 if (s->usersize)
149 return 1;
150#endif
151
152 /*
153 * We may have set a slab to be unmergeable during bootstrap.
154 */
155 if (s->refcount < 0)
156 return 1;
157
158 return 0;
159}
160
161struct kmem_cache *find_mergeable(unsigned int size, unsigned int align,
162 slab_flags_t flags, const char *name, void (*ctor)(void *))
163{
164 struct kmem_cache *s;
165
166 if (slab_nomerge)
167 return NULL;
168
169 if (ctor)
170 return NULL;
171
172 size = ALIGN(size, sizeof(void *));
173 align = calculate_alignment(flags, align, size);
174 size = ALIGN(size, align);
175 flags = kmem_cache_flags(flags, name);
176
177 if (flags & SLAB_NEVER_MERGE)
178 return NULL;
179
180 list_for_each_entry_reverse(s, &slab_caches, list) {
181 if (slab_unmergeable(s))
182 continue;
183
184 if (size > s->size)
185 continue;
186
187 if ((flags & SLAB_MERGE_SAME) != (s->flags & SLAB_MERGE_SAME))
188 continue;
189 /*
190 * Check if alignment is compatible.
191 * Courtesy of Adrian Drzewiecki
192 */
193 if ((s->size & ~(align - 1)) != s->size)
194 continue;
195
196 if (s->size - size >= sizeof(void *))
197 continue;
198
199 return s;
200 }
201 return NULL;
202}
203
204static struct kmem_cache *create_cache(const char *name,
205 unsigned int object_size, unsigned int align,
206 slab_flags_t flags, unsigned int useroffset,
207 unsigned int usersize, void (*ctor)(void *),
208 struct kmem_cache *root_cache)
209{
210 struct kmem_cache *s;
211 int err;
212
213 if (WARN_ON(useroffset + usersize > object_size))
214 useroffset = usersize = 0;
215
216 err = -ENOMEM;
217 s = kmem_cache_zalloc(kmem_cache, GFP_KERNEL);
218 if (!s)
219 goto out;
220
221 s->name = name;
222 s->size = s->object_size = object_size;
223 s->align = align;
224 s->ctor = ctor;
225#ifdef CONFIG_HARDENED_USERCOPY
226 s->useroffset = useroffset;
227 s->usersize = usersize;
228#endif
229
230 err = __kmem_cache_create(s, flags);
231 if (err)
232 goto out_free_cache;
233
234 s->refcount = 1;
235 list_add(&s->list, &slab_caches);
236 return s;
237
238out_free_cache:
239 kmem_cache_free(kmem_cache, s);
240out:
241 return ERR_PTR(err);
242}
243
244/**
245 * kmem_cache_create_usercopy - Create a cache with a region suitable
246 * for copying to userspace
247 * @name: A string which is used in /proc/slabinfo to identify this cache.
248 * @size: The size of objects to be created in this cache.
249 * @align: The required alignment for the objects.
250 * @flags: SLAB flags
251 * @useroffset: Usercopy region offset
252 * @usersize: Usercopy region size
253 * @ctor: A constructor for the objects.
254 *
255 * Cannot be called within a interrupt, but can be interrupted.
256 * The @ctor is run when new pages are allocated by the cache.
257 *
258 * The flags are
259 *
260 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
261 * to catch references to uninitialised memory.
262 *
263 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
264 * for buffer overruns.
265 *
266 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
267 * cacheline. This can be beneficial if you're counting cycles as closely
268 * as davem.
269 *
270 * Return: a pointer to the cache on success, NULL on failure.
271 */
272struct kmem_cache *
273kmem_cache_create_usercopy(const char *name,
274 unsigned int size, unsigned int align,
275 slab_flags_t flags,
276 unsigned int useroffset, unsigned int usersize,
277 void (*ctor)(void *))
278{
279 struct kmem_cache *s = NULL;
280 const char *cache_name;
281 int err;
282
283#ifdef CONFIG_SLUB_DEBUG
284 /*
285 * If no slab_debug was enabled globally, the static key is not yet
286 * enabled by setup_slub_debug(). Enable it if the cache is being
287 * created with any of the debugging flags passed explicitly.
288 * It's also possible that this is the first cache created with
289 * SLAB_STORE_USER and we should init stack_depot for it.
290 */
291 if (flags & SLAB_DEBUG_FLAGS)
292 static_branch_enable(&slub_debug_enabled);
293 if (flags & SLAB_STORE_USER)
294 stack_depot_init();
295#endif
296
297 mutex_lock(&slab_mutex);
298
299 err = kmem_cache_sanity_check(name, size);
300 if (err) {
301 goto out_unlock;
302 }
303
304 /* Refuse requests with allocator specific flags */
305 if (flags & ~SLAB_FLAGS_PERMITTED) {
306 err = -EINVAL;
307 goto out_unlock;
308 }
309
310 /*
311 * Some allocators will constraint the set of valid flags to a subset
312 * of all flags. We expect them to define CACHE_CREATE_MASK in this
313 * case, and we'll just provide them with a sanitized version of the
314 * passed flags.
315 */
316 flags &= CACHE_CREATE_MASK;
317
318 /* Fail closed on bad usersize of useroffset values. */
319 if (!IS_ENABLED(CONFIG_HARDENED_USERCOPY) ||
320 WARN_ON(!usersize && useroffset) ||
321 WARN_ON(size < usersize || size - usersize < useroffset))
322 usersize = useroffset = 0;
323
324 if (!usersize)
325 s = __kmem_cache_alias(name, size, align, flags, ctor);
326 if (s)
327 goto out_unlock;
328
329 cache_name = kstrdup_const(name, GFP_KERNEL);
330 if (!cache_name) {
331 err = -ENOMEM;
332 goto out_unlock;
333 }
334
335 s = create_cache(cache_name, size,
336 calculate_alignment(flags, align, size),
337 flags, useroffset, usersize, ctor, NULL);
338 if (IS_ERR(s)) {
339 err = PTR_ERR(s);
340 kfree_const(cache_name);
341 }
342
343out_unlock:
344 mutex_unlock(&slab_mutex);
345
346 if (err) {
347 if (flags & SLAB_PANIC)
348 panic("%s: Failed to create slab '%s'. Error %d\n",
349 __func__, name, err);
350 else {
351 pr_warn("%s(%s) failed with error %d\n",
352 __func__, name, err);
353 dump_stack();
354 }
355 return NULL;
356 }
357 return s;
358}
359EXPORT_SYMBOL(kmem_cache_create_usercopy);
360
361/**
362 * kmem_cache_create - Create a cache.
363 * @name: A string which is used in /proc/slabinfo to identify this cache.
364 * @size: The size of objects to be created in this cache.
365 * @align: The required alignment for the objects.
366 * @flags: SLAB flags
367 * @ctor: A constructor for the objects.
368 *
369 * Cannot be called within a interrupt, but can be interrupted.
370 * The @ctor is run when new pages are allocated by the cache.
371 *
372 * The flags are
373 *
374 * %SLAB_POISON - Poison the slab with a known test pattern (a5a5a5a5)
375 * to catch references to uninitialised memory.
376 *
377 * %SLAB_RED_ZONE - Insert `Red` zones around the allocated memory to check
378 * for buffer overruns.
379 *
380 * %SLAB_HWCACHE_ALIGN - Align the objects in this cache to a hardware
381 * cacheline. This can be beneficial if you're counting cycles as closely
382 * as davem.
383 *
384 * Return: a pointer to the cache on success, NULL on failure.
385 */
386struct kmem_cache *
387kmem_cache_create(const char *name, unsigned int size, unsigned int align,
388 slab_flags_t flags, void (*ctor)(void *))
389{
390 return kmem_cache_create_usercopy(name, size, align, flags, 0, 0,
391 ctor);
392}
393EXPORT_SYMBOL(kmem_cache_create);
394
395static struct kmem_cache *kmem_buckets_cache __ro_after_init;
396
397/**
398 * kmem_buckets_create - Create a set of caches that handle dynamic sized
399 * allocations via kmem_buckets_alloc()
400 * @name: A prefix string which is used in /proc/slabinfo to identify this
401 * cache. The individual caches with have their sizes as the suffix.
402 * @flags: SLAB flags (see kmem_cache_create() for details).
403 * @useroffset: Starting offset within an allocation that may be copied
404 * to/from userspace.
405 * @usersize: How many bytes, starting at @useroffset, may be copied
406 * to/from userspace.
407 * @ctor: A constructor for the objects, run when new allocations are made.
408 *
409 * Cannot be called within an interrupt, but can be interrupted.
410 *
411 * Return: a pointer to the cache on success, NULL on failure. When
412 * CONFIG_SLAB_BUCKETS is not enabled, ZERO_SIZE_PTR is returned, and
413 * subsequent calls to kmem_buckets_alloc() will fall back to kmalloc().
414 * (i.e. callers only need to check for NULL on failure.)
415 */
416kmem_buckets *kmem_buckets_create(const char *name, slab_flags_t flags,
417 unsigned int useroffset,
418 unsigned int usersize,
419 void (*ctor)(void *))
420{
421 kmem_buckets *b;
422 int idx;
423
424 /*
425 * When the separate buckets API is not built in, just return
426 * a non-NULL value for the kmem_buckets pointer, which will be
427 * unused when performing allocations.
428 */
429 if (!IS_ENABLED(CONFIG_SLAB_BUCKETS))
430 return ZERO_SIZE_PTR;
431
432 if (WARN_ON(!kmem_buckets_cache))
433 return NULL;
434
435 b = kmem_cache_alloc(kmem_buckets_cache, GFP_KERNEL|__GFP_ZERO);
436 if (WARN_ON(!b))
437 return NULL;
438
439 flags |= SLAB_NO_MERGE;
440
441 for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++) {
442 char *short_size, *cache_name;
443 unsigned int cache_useroffset, cache_usersize;
444 unsigned int size;
445
446 if (!kmalloc_caches[KMALLOC_NORMAL][idx])
447 continue;
448
449 size = kmalloc_caches[KMALLOC_NORMAL][idx]->object_size;
450 if (!size)
451 continue;
452
453 short_size = strchr(kmalloc_caches[KMALLOC_NORMAL][idx]->name, '-');
454 if (WARN_ON(!short_size))
455 goto fail;
456
457 cache_name = kasprintf(GFP_KERNEL, "%s-%s", name, short_size + 1);
458 if (WARN_ON(!cache_name))
459 goto fail;
460
461 if (useroffset >= size) {
462 cache_useroffset = 0;
463 cache_usersize = 0;
464 } else {
465 cache_useroffset = useroffset;
466 cache_usersize = min(size - cache_useroffset, usersize);
467 }
468 (*b)[idx] = kmem_cache_create_usercopy(cache_name, size,
469 0, flags, cache_useroffset,
470 cache_usersize, ctor);
471 kfree(cache_name);
472 if (WARN_ON(!(*b)[idx]))
473 goto fail;
474 }
475
476 return b;
477
478fail:
479 for (idx = 0; idx < ARRAY_SIZE(kmalloc_caches[KMALLOC_NORMAL]); idx++)
480 kmem_cache_destroy((*b)[idx]);
481 kfree(b);
482
483 return NULL;
484}
485EXPORT_SYMBOL(kmem_buckets_create);
486
487#ifdef SLAB_SUPPORTS_SYSFS
488/*
489 * For a given kmem_cache, kmem_cache_destroy() should only be called
490 * once or there will be a use-after-free problem. The actual deletion
491 * and release of the kobject does not need slab_mutex or cpu_hotplug_lock
492 * protection. So they are now done without holding those locks.
493 *
494 * Note that there will be a slight delay in the deletion of sysfs files
495 * if kmem_cache_release() is called indrectly from a work function.
496 */
497static void kmem_cache_release(struct kmem_cache *s)
498{
499 if (slab_state >= FULL) {
500 sysfs_slab_unlink(s);
501 sysfs_slab_release(s);
502 } else {
503 slab_kmem_cache_release(s);
504 }
505}
506#else
507static void kmem_cache_release(struct kmem_cache *s)
508{
509 slab_kmem_cache_release(s);
510}
511#endif
512
513static void slab_caches_to_rcu_destroy_workfn(struct work_struct *work)
514{
515 LIST_HEAD(to_destroy);
516 struct kmem_cache *s, *s2;
517
518 /*
519 * On destruction, SLAB_TYPESAFE_BY_RCU kmem_caches are put on the
520 * @slab_caches_to_rcu_destroy list. The slab pages are freed
521 * through RCU and the associated kmem_cache are dereferenced
522 * while freeing the pages, so the kmem_caches should be freed only
523 * after the pending RCU operations are finished. As rcu_barrier()
524 * is a pretty slow operation, we batch all pending destructions
525 * asynchronously.
526 */
527 mutex_lock(&slab_mutex);
528 list_splice_init(&slab_caches_to_rcu_destroy, &to_destroy);
529 mutex_unlock(&slab_mutex);
530
531 if (list_empty(&to_destroy))
532 return;
533
534 rcu_barrier();
535
536 list_for_each_entry_safe(s, s2, &to_destroy, list) {
537 debugfs_slab_release(s);
538 kfence_shutdown_cache(s);
539 kmem_cache_release(s);
540 }
541}
542
543static int shutdown_cache(struct kmem_cache *s)
544{
545 /* free asan quarantined objects */
546 kasan_cache_shutdown(s);
547
548 if (__kmem_cache_shutdown(s) != 0)
549 return -EBUSY;
550
551 list_del(&s->list);
552
553 if (s->flags & SLAB_TYPESAFE_BY_RCU) {
554 list_add_tail(&s->list, &slab_caches_to_rcu_destroy);
555 schedule_work(&slab_caches_to_rcu_destroy_work);
556 } else {
557 kfence_shutdown_cache(s);
558 debugfs_slab_release(s);
559 }
560
561 return 0;
562}
563
564void slab_kmem_cache_release(struct kmem_cache *s)
565{
566 __kmem_cache_release(s);
567 kfree_const(s->name);
568 kmem_cache_free(kmem_cache, s);
569}
570
571void kmem_cache_destroy(struct kmem_cache *s)
572{
573 int err = -EBUSY;
574 bool rcu_set;
575
576 if (unlikely(!s) || !kasan_check_byte(s))
577 return;
578
579 cpus_read_lock();
580 mutex_lock(&slab_mutex);
581
582 rcu_set = s->flags & SLAB_TYPESAFE_BY_RCU;
583
584 s->refcount--;
585 if (s->refcount)
586 goto out_unlock;
587
588 err = shutdown_cache(s);
589 WARN(err, "%s %s: Slab cache still has objects when called from %pS",
590 __func__, s->name, (void *)_RET_IP_);
591out_unlock:
592 mutex_unlock(&slab_mutex);
593 cpus_read_unlock();
594 if (!err && !rcu_set)
595 kmem_cache_release(s);
596}
597EXPORT_SYMBOL(kmem_cache_destroy);
598
599/**
600 * kmem_cache_shrink - Shrink a cache.
601 * @cachep: The cache to shrink.
602 *
603 * Releases as many slabs as possible for a cache.
604 * To help debugging, a zero exit status indicates all slabs were released.
605 *
606 * Return: %0 if all slabs were released, non-zero otherwise
607 */
608int kmem_cache_shrink(struct kmem_cache *cachep)
609{
610 kasan_cache_shrink(cachep);
611
612 return __kmem_cache_shrink(cachep);
613}
614EXPORT_SYMBOL(kmem_cache_shrink);
615
616bool slab_is_available(void)
617{
618 return slab_state >= UP;
619}
620
621#ifdef CONFIG_PRINTK
622static void kmem_obj_info(struct kmem_obj_info *kpp, void *object, struct slab *slab)
623{
624 if (__kfence_obj_info(kpp, object, slab))
625 return;
626 __kmem_obj_info(kpp, object, slab);
627}
628
629/**
630 * kmem_dump_obj - Print available slab provenance information
631 * @object: slab object for which to find provenance information.
632 *
633 * This function uses pr_cont(), so that the caller is expected to have
634 * printed out whatever preamble is appropriate. The provenance information
635 * depends on the type of object and on how much debugging is enabled.
636 * For a slab-cache object, the fact that it is a slab object is printed,
637 * and, if available, the slab name, return address, and stack trace from
638 * the allocation and last free path of that object.
639 *
640 * Return: %true if the pointer is to a not-yet-freed object from
641 * kmalloc() or kmem_cache_alloc(), either %true or %false if the pointer
642 * is to an already-freed object, and %false otherwise.
643 */
644bool kmem_dump_obj(void *object)
645{
646 char *cp = IS_ENABLED(CONFIG_MMU) ? "" : "/vmalloc";
647 int i;
648 struct slab *slab;
649 unsigned long ptroffset;
650 struct kmem_obj_info kp = { };
651
652 /* Some arches consider ZERO_SIZE_PTR to be a valid address. */
653 if (object < (void *)PAGE_SIZE || !virt_addr_valid(object))
654 return false;
655 slab = virt_to_slab(object);
656 if (!slab)
657 return false;
658
659 kmem_obj_info(&kp, object, slab);
660 if (kp.kp_slab_cache)
661 pr_cont(" slab%s %s", cp, kp.kp_slab_cache->name);
662 else
663 pr_cont(" slab%s", cp);
664 if (is_kfence_address(object))
665 pr_cont(" (kfence)");
666 if (kp.kp_objp)
667 pr_cont(" start %px", kp.kp_objp);
668 if (kp.kp_data_offset)
669 pr_cont(" data offset %lu", kp.kp_data_offset);
670 if (kp.kp_objp) {
671 ptroffset = ((char *)object - (char *)kp.kp_objp) - kp.kp_data_offset;
672 pr_cont(" pointer offset %lu", ptroffset);
673 }
674 if (kp.kp_slab_cache && kp.kp_slab_cache->object_size)
675 pr_cont(" size %u", kp.kp_slab_cache->object_size);
676 if (kp.kp_ret)
677 pr_cont(" allocated at %pS\n", kp.kp_ret);
678 else
679 pr_cont("\n");
680 for (i = 0; i < ARRAY_SIZE(kp.kp_stack); i++) {
681 if (!kp.kp_stack[i])
682 break;
683 pr_info(" %pS\n", kp.kp_stack[i]);
684 }
685
686 if (kp.kp_free_stack[0])
687 pr_cont(" Free path:\n");
688
689 for (i = 0; i < ARRAY_SIZE(kp.kp_free_stack); i++) {
690 if (!kp.kp_free_stack[i])
691 break;
692 pr_info(" %pS\n", kp.kp_free_stack[i]);
693 }
694
695 return true;
696}
697EXPORT_SYMBOL_GPL(kmem_dump_obj);
698#endif
699
700/* Create a cache during boot when no slab services are available yet */
701void __init create_boot_cache(struct kmem_cache *s, const char *name,
702 unsigned int size, slab_flags_t flags,
703 unsigned int useroffset, unsigned int usersize)
704{
705 int err;
706 unsigned int align = ARCH_KMALLOC_MINALIGN;
707
708 s->name = name;
709 s->size = s->object_size = size;
710
711 /*
712 * kmalloc caches guarantee alignment of at least the largest
713 * power-of-two divisor of the size. For power-of-two sizes,
714 * it is the size itself.
715 */
716 if (flags & SLAB_KMALLOC)
717 align = max(align, 1U << (ffs(size) - 1));
718 s->align = calculate_alignment(flags, align, size);
719
720#ifdef CONFIG_HARDENED_USERCOPY
721 s->useroffset = useroffset;
722 s->usersize = usersize;
723#endif
724
725 err = __kmem_cache_create(s, flags);
726
727 if (err)
728 panic("Creation of kmalloc slab %s size=%u failed. Reason %d\n",
729 name, size, err);
730
731 s->refcount = -1; /* Exempt from merging for now */
732}
733
734static struct kmem_cache *__init create_kmalloc_cache(const char *name,
735 unsigned int size,
736 slab_flags_t flags)
737{
738 struct kmem_cache *s = kmem_cache_zalloc(kmem_cache, GFP_NOWAIT);
739
740 if (!s)
741 panic("Out of memory when creating slab %s\n", name);
742
743 create_boot_cache(s, name, size, flags | SLAB_KMALLOC, 0, size);
744 list_add(&s->list, &slab_caches);
745 s->refcount = 1;
746 return s;
747}
748
749kmem_buckets kmalloc_caches[NR_KMALLOC_TYPES] __ro_after_init =
750{ /* initialization for https://llvm.org/pr42570 */ };
751EXPORT_SYMBOL(kmalloc_caches);
752
753#ifdef CONFIG_RANDOM_KMALLOC_CACHES
754unsigned long random_kmalloc_seed __ro_after_init;
755EXPORT_SYMBOL(random_kmalloc_seed);
756#endif
757
758/*
759 * Conversion table for small slabs sizes / 8 to the index in the
760 * kmalloc array. This is necessary for slabs < 192 since we have non power
761 * of two cache sizes there. The size of larger slabs can be determined using
762 * fls.
763 */
764u8 kmalloc_size_index[24] __ro_after_init = {
765 3, /* 8 */
766 4, /* 16 */
767 5, /* 24 */
768 5, /* 32 */
769 6, /* 40 */
770 6, /* 48 */
771 6, /* 56 */
772 6, /* 64 */
773 1, /* 72 */
774 1, /* 80 */
775 1, /* 88 */
776 1, /* 96 */
777 7, /* 104 */
778 7, /* 112 */
779 7, /* 120 */
780 7, /* 128 */
781 2, /* 136 */
782 2, /* 144 */
783 2, /* 152 */
784 2, /* 160 */
785 2, /* 168 */
786 2, /* 176 */
787 2, /* 184 */
788 2 /* 192 */
789};
790
791size_t kmalloc_size_roundup(size_t size)
792{
793 if (size && size <= KMALLOC_MAX_CACHE_SIZE) {
794 /*
795 * The flags don't matter since size_index is common to all.
796 * Neither does the caller for just getting ->object_size.
797 */
798 return kmalloc_slab(size, NULL, GFP_KERNEL, 0)->object_size;
799 }
800
801 /* Above the smaller buckets, size is a multiple of page size. */
802 if (size && size <= KMALLOC_MAX_SIZE)
803 return PAGE_SIZE << get_order(size);
804
805 /*
806 * Return 'size' for 0 - kmalloc() returns ZERO_SIZE_PTR
807 * and very large size - kmalloc() may fail.
808 */
809 return size;
810
811}
812EXPORT_SYMBOL(kmalloc_size_roundup);
813
814#ifdef CONFIG_ZONE_DMA
815#define KMALLOC_DMA_NAME(sz) .name[KMALLOC_DMA] = "dma-kmalloc-" #sz,
816#else
817#define KMALLOC_DMA_NAME(sz)
818#endif
819
820#ifdef CONFIG_MEMCG
821#define KMALLOC_CGROUP_NAME(sz) .name[KMALLOC_CGROUP] = "kmalloc-cg-" #sz,
822#else
823#define KMALLOC_CGROUP_NAME(sz)
824#endif
825
826#ifndef CONFIG_SLUB_TINY
827#define KMALLOC_RCL_NAME(sz) .name[KMALLOC_RECLAIM] = "kmalloc-rcl-" #sz,
828#else
829#define KMALLOC_RCL_NAME(sz)
830#endif
831
832#ifdef CONFIG_RANDOM_KMALLOC_CACHES
833#define __KMALLOC_RANDOM_CONCAT(a, b) a ## b
834#define KMALLOC_RANDOM_NAME(N, sz) __KMALLOC_RANDOM_CONCAT(KMA_RAND_, N)(sz)
835#define KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 1] = "kmalloc-rnd-01-" #sz,
836#define KMA_RAND_2(sz) KMA_RAND_1(sz) .name[KMALLOC_RANDOM_START + 2] = "kmalloc-rnd-02-" #sz,
837#define KMA_RAND_3(sz) KMA_RAND_2(sz) .name[KMALLOC_RANDOM_START + 3] = "kmalloc-rnd-03-" #sz,
838#define KMA_RAND_4(sz) KMA_RAND_3(sz) .name[KMALLOC_RANDOM_START + 4] = "kmalloc-rnd-04-" #sz,
839#define KMA_RAND_5(sz) KMA_RAND_4(sz) .name[KMALLOC_RANDOM_START + 5] = "kmalloc-rnd-05-" #sz,
840#define KMA_RAND_6(sz) KMA_RAND_5(sz) .name[KMALLOC_RANDOM_START + 6] = "kmalloc-rnd-06-" #sz,
841#define KMA_RAND_7(sz) KMA_RAND_6(sz) .name[KMALLOC_RANDOM_START + 7] = "kmalloc-rnd-07-" #sz,
842#define KMA_RAND_8(sz) KMA_RAND_7(sz) .name[KMALLOC_RANDOM_START + 8] = "kmalloc-rnd-08-" #sz,
843#define KMA_RAND_9(sz) KMA_RAND_8(sz) .name[KMALLOC_RANDOM_START + 9] = "kmalloc-rnd-09-" #sz,
844#define KMA_RAND_10(sz) KMA_RAND_9(sz) .name[KMALLOC_RANDOM_START + 10] = "kmalloc-rnd-10-" #sz,
845#define KMA_RAND_11(sz) KMA_RAND_10(sz) .name[KMALLOC_RANDOM_START + 11] = "kmalloc-rnd-11-" #sz,
846#define KMA_RAND_12(sz) KMA_RAND_11(sz) .name[KMALLOC_RANDOM_START + 12] = "kmalloc-rnd-12-" #sz,
847#define KMA_RAND_13(sz) KMA_RAND_12(sz) .name[KMALLOC_RANDOM_START + 13] = "kmalloc-rnd-13-" #sz,
848#define KMA_RAND_14(sz) KMA_RAND_13(sz) .name[KMALLOC_RANDOM_START + 14] = "kmalloc-rnd-14-" #sz,
849#define KMA_RAND_15(sz) KMA_RAND_14(sz) .name[KMALLOC_RANDOM_START + 15] = "kmalloc-rnd-15-" #sz,
850#else // CONFIG_RANDOM_KMALLOC_CACHES
851#define KMALLOC_RANDOM_NAME(N, sz)
852#endif
853
854#define INIT_KMALLOC_INFO(__size, __short_size) \
855{ \
856 .name[KMALLOC_NORMAL] = "kmalloc-" #__short_size, \
857 KMALLOC_RCL_NAME(__short_size) \
858 KMALLOC_CGROUP_NAME(__short_size) \
859 KMALLOC_DMA_NAME(__short_size) \
860 KMALLOC_RANDOM_NAME(RANDOM_KMALLOC_CACHES_NR, __short_size) \
861 .size = __size, \
862}
863
864/*
865 * kmalloc_info[] is to make slab_debug=,kmalloc-xx option work at boot time.
866 * kmalloc_index() supports up to 2^21=2MB, so the final entry of the table is
867 * kmalloc-2M.
868 */
869const struct kmalloc_info_struct kmalloc_info[] __initconst = {
870 INIT_KMALLOC_INFO(0, 0),
871 INIT_KMALLOC_INFO(96, 96),
872 INIT_KMALLOC_INFO(192, 192),
873 INIT_KMALLOC_INFO(8, 8),
874 INIT_KMALLOC_INFO(16, 16),
875 INIT_KMALLOC_INFO(32, 32),
876 INIT_KMALLOC_INFO(64, 64),
877 INIT_KMALLOC_INFO(128, 128),
878 INIT_KMALLOC_INFO(256, 256),
879 INIT_KMALLOC_INFO(512, 512),
880 INIT_KMALLOC_INFO(1024, 1k),
881 INIT_KMALLOC_INFO(2048, 2k),
882 INIT_KMALLOC_INFO(4096, 4k),
883 INIT_KMALLOC_INFO(8192, 8k),
884 INIT_KMALLOC_INFO(16384, 16k),
885 INIT_KMALLOC_INFO(32768, 32k),
886 INIT_KMALLOC_INFO(65536, 64k),
887 INIT_KMALLOC_INFO(131072, 128k),
888 INIT_KMALLOC_INFO(262144, 256k),
889 INIT_KMALLOC_INFO(524288, 512k),
890 INIT_KMALLOC_INFO(1048576, 1M),
891 INIT_KMALLOC_INFO(2097152, 2M)
892};
893
894/*
895 * Patch up the size_index table if we have strange large alignment
896 * requirements for the kmalloc array. This is only the case for
897 * MIPS it seems. The standard arches will not generate any code here.
898 *
899 * Largest permitted alignment is 256 bytes due to the way we
900 * handle the index determination for the smaller caches.
901 *
902 * Make sure that nothing crazy happens if someone starts tinkering
903 * around with ARCH_KMALLOC_MINALIGN
904 */
905void __init setup_kmalloc_cache_index_table(void)
906{
907 unsigned int i;
908
909 BUILD_BUG_ON(KMALLOC_MIN_SIZE > 256 ||
910 !is_power_of_2(KMALLOC_MIN_SIZE));
911
912 for (i = 8; i < KMALLOC_MIN_SIZE; i += 8) {
913 unsigned int elem = size_index_elem(i);
914
915 if (elem >= ARRAY_SIZE(kmalloc_size_index))
916 break;
917 kmalloc_size_index[elem] = KMALLOC_SHIFT_LOW;
918 }
919
920 if (KMALLOC_MIN_SIZE >= 64) {
921 /*
922 * The 96 byte sized cache is not used if the alignment
923 * is 64 byte.
924 */
925 for (i = 64 + 8; i <= 96; i += 8)
926 kmalloc_size_index[size_index_elem(i)] = 7;
927
928 }
929
930 if (KMALLOC_MIN_SIZE >= 128) {
931 /*
932 * The 192 byte sized cache is not used if the alignment
933 * is 128 byte. Redirect kmalloc to use the 256 byte cache
934 * instead.
935 */
936 for (i = 128 + 8; i <= 192; i += 8)
937 kmalloc_size_index[size_index_elem(i)] = 8;
938 }
939}
940
941static unsigned int __kmalloc_minalign(void)
942{
943 unsigned int minalign = dma_get_cache_alignment();
944
945 if (IS_ENABLED(CONFIG_DMA_BOUNCE_UNALIGNED_KMALLOC) &&
946 is_swiotlb_allocated())
947 minalign = ARCH_KMALLOC_MINALIGN;
948
949 return max(minalign, arch_slab_minalign());
950}
951
952static void __init
953new_kmalloc_cache(int idx, enum kmalloc_cache_type type)
954{
955 slab_flags_t flags = 0;
956 unsigned int minalign = __kmalloc_minalign();
957 unsigned int aligned_size = kmalloc_info[idx].size;
958 int aligned_idx = idx;
959
960 if ((KMALLOC_RECLAIM != KMALLOC_NORMAL) && (type == KMALLOC_RECLAIM)) {
961 flags |= SLAB_RECLAIM_ACCOUNT;
962 } else if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_CGROUP)) {
963 if (mem_cgroup_kmem_disabled()) {
964 kmalloc_caches[type][idx] = kmalloc_caches[KMALLOC_NORMAL][idx];
965 return;
966 }
967 flags |= SLAB_ACCOUNT;
968 } else if (IS_ENABLED(CONFIG_ZONE_DMA) && (type == KMALLOC_DMA)) {
969 flags |= SLAB_CACHE_DMA;
970 }
971
972#ifdef CONFIG_RANDOM_KMALLOC_CACHES
973 if (type >= KMALLOC_RANDOM_START && type <= KMALLOC_RANDOM_END)
974 flags |= SLAB_NO_MERGE;
975#endif
976
977 /*
978 * If CONFIG_MEMCG is enabled, disable cache merging for
979 * KMALLOC_NORMAL caches.
980 */
981 if (IS_ENABLED(CONFIG_MEMCG) && (type == KMALLOC_NORMAL))
982 flags |= SLAB_NO_MERGE;
983
984 if (minalign > ARCH_KMALLOC_MINALIGN) {
985 aligned_size = ALIGN(aligned_size, minalign);
986 aligned_idx = __kmalloc_index(aligned_size, false);
987 }
988
989 if (!kmalloc_caches[type][aligned_idx])
990 kmalloc_caches[type][aligned_idx] = create_kmalloc_cache(
991 kmalloc_info[aligned_idx].name[type],
992 aligned_size, flags);
993 if (idx != aligned_idx)
994 kmalloc_caches[type][idx] = kmalloc_caches[type][aligned_idx];
995}
996
997/*
998 * Create the kmalloc array. Some of the regular kmalloc arrays
999 * may already have been created because they were needed to
1000 * enable allocations for slab creation.
1001 */
1002void __init create_kmalloc_caches(void)
1003{
1004 int i;
1005 enum kmalloc_cache_type type;
1006
1007 /*
1008 * Including KMALLOC_CGROUP if CONFIG_MEMCG defined
1009 */
1010 for (type = KMALLOC_NORMAL; type < NR_KMALLOC_TYPES; type++) {
1011 /* Caches that are NOT of the two-to-the-power-of size. */
1012 if (KMALLOC_MIN_SIZE <= 32)
1013 new_kmalloc_cache(1, type);
1014 if (KMALLOC_MIN_SIZE <= 64)
1015 new_kmalloc_cache(2, type);
1016
1017 /* Caches that are of the two-to-the-power-of size. */
1018 for (i = KMALLOC_SHIFT_LOW; i <= KMALLOC_SHIFT_HIGH; i++)
1019 new_kmalloc_cache(i, type);
1020 }
1021#ifdef CONFIG_RANDOM_KMALLOC_CACHES
1022 random_kmalloc_seed = get_random_u64();
1023#endif
1024
1025 /* Kmalloc array is now usable */
1026 slab_state = UP;
1027
1028 if (IS_ENABLED(CONFIG_SLAB_BUCKETS))
1029 kmem_buckets_cache = kmem_cache_create("kmalloc_buckets",
1030 sizeof(kmem_buckets),
1031 0, SLAB_NO_MERGE, NULL);
1032}
1033
1034/**
1035 * __ksize -- Report full size of underlying allocation
1036 * @object: pointer to the object
1037 *
1038 * This should only be used internally to query the true size of allocations.
1039 * It is not meant to be a way to discover the usable size of an allocation
1040 * after the fact. Instead, use kmalloc_size_roundup(). Using memory beyond
1041 * the originally requested allocation size may trigger KASAN, UBSAN_BOUNDS,
1042 * and/or FORTIFY_SOURCE.
1043 *
1044 * Return: size of the actual memory used by @object in bytes
1045 */
1046size_t __ksize(const void *object)
1047{
1048 struct folio *folio;
1049
1050 if (unlikely(object == ZERO_SIZE_PTR))
1051 return 0;
1052
1053 folio = virt_to_folio(object);
1054
1055 if (unlikely(!folio_test_slab(folio))) {
1056 if (WARN_ON(folio_size(folio) <= KMALLOC_MAX_CACHE_SIZE))
1057 return 0;
1058 if (WARN_ON(object != folio_address(folio)))
1059 return 0;
1060 return folio_size(folio);
1061 }
1062
1063#ifdef CONFIG_SLUB_DEBUG
1064 skip_orig_size_check(folio_slab(folio)->slab_cache, object);
1065#endif
1066
1067 return slab_ksize(folio_slab(folio)->slab_cache);
1068}
1069
1070gfp_t kmalloc_fix_flags(gfp_t flags)
1071{
1072 gfp_t invalid_mask = flags & GFP_SLAB_BUG_MASK;
1073
1074 flags &= ~GFP_SLAB_BUG_MASK;
1075 pr_warn("Unexpected gfp: %#x (%pGg). Fixing up to gfp: %#x (%pGg). Fix your code!\n",
1076 invalid_mask, &invalid_mask, flags, &flags);
1077 dump_stack();
1078
1079 return flags;
1080}
1081
1082#ifdef CONFIG_SLAB_FREELIST_RANDOM
1083/* Randomize a generic freelist */
1084static void freelist_randomize(unsigned int *list,
1085 unsigned int count)
1086{
1087 unsigned int rand;
1088 unsigned int i;
1089
1090 for (i = 0; i < count; i++)
1091 list[i] = i;
1092
1093 /* Fisher-Yates shuffle */
1094 for (i = count - 1; i > 0; i--) {
1095 rand = get_random_u32_below(i + 1);
1096 swap(list[i], list[rand]);
1097 }
1098}
1099
1100/* Create a random sequence per cache */
1101int cache_random_seq_create(struct kmem_cache *cachep, unsigned int count,
1102 gfp_t gfp)
1103{
1104
1105 if (count < 2 || cachep->random_seq)
1106 return 0;
1107
1108 cachep->random_seq = kcalloc(count, sizeof(unsigned int), gfp);
1109 if (!cachep->random_seq)
1110 return -ENOMEM;
1111
1112 freelist_randomize(cachep->random_seq, count);
1113 return 0;
1114}
1115
1116/* Destroy the per-cache random freelist sequence */
1117void cache_random_seq_destroy(struct kmem_cache *cachep)
1118{
1119 kfree(cachep->random_seq);
1120 cachep->random_seq = NULL;
1121}
1122#endif /* CONFIG_SLAB_FREELIST_RANDOM */
1123
1124#ifdef CONFIG_SLUB_DEBUG
1125#define SLABINFO_RIGHTS (0400)
1126
1127static void print_slabinfo_header(struct seq_file *m)
1128{
1129 /*
1130 * Output format version, so at least we can change it
1131 * without _too_ many complaints.
1132 */
1133 seq_puts(m, "slabinfo - version: 2.1\n");
1134 seq_puts(m, "# name <active_objs> <num_objs> <objsize> <objperslab> <pagesperslab>");
1135 seq_puts(m, " : tunables <limit> <batchcount> <sharedfactor>");
1136 seq_puts(m, " : slabdata <active_slabs> <num_slabs> <sharedavail>");
1137 seq_putc(m, '\n');
1138}
1139
1140static void *slab_start(struct seq_file *m, loff_t *pos)
1141{
1142 mutex_lock(&slab_mutex);
1143 return seq_list_start(&slab_caches, *pos);
1144}
1145
1146static void *slab_next(struct seq_file *m, void *p, loff_t *pos)
1147{
1148 return seq_list_next(p, &slab_caches, pos);
1149}
1150
1151static void slab_stop(struct seq_file *m, void *p)
1152{
1153 mutex_unlock(&slab_mutex);
1154}
1155
1156static void cache_show(struct kmem_cache *s, struct seq_file *m)
1157{
1158 struct slabinfo sinfo;
1159
1160 memset(&sinfo, 0, sizeof(sinfo));
1161 get_slabinfo(s, &sinfo);
1162
1163 seq_printf(m, "%-17s %6lu %6lu %6u %4u %4d",
1164 s->name, sinfo.active_objs, sinfo.num_objs, s->size,
1165 sinfo.objects_per_slab, (1 << sinfo.cache_order));
1166
1167 seq_printf(m, " : tunables %4u %4u %4u",
1168 sinfo.limit, sinfo.batchcount, sinfo.shared);
1169 seq_printf(m, " : slabdata %6lu %6lu %6lu",
1170 sinfo.active_slabs, sinfo.num_slabs, sinfo.shared_avail);
1171 seq_putc(m, '\n');
1172}
1173
1174static int slab_show(struct seq_file *m, void *p)
1175{
1176 struct kmem_cache *s = list_entry(p, struct kmem_cache, list);
1177
1178 if (p == slab_caches.next)
1179 print_slabinfo_header(m);
1180 cache_show(s, m);
1181 return 0;
1182}
1183
1184void dump_unreclaimable_slab(void)
1185{
1186 struct kmem_cache *s;
1187 struct slabinfo sinfo;
1188
1189 /*
1190 * Here acquiring slab_mutex is risky since we don't prefer to get
1191 * sleep in oom path. But, without mutex hold, it may introduce a
1192 * risk of crash.
1193 * Use mutex_trylock to protect the list traverse, dump nothing
1194 * without acquiring the mutex.
1195 */
1196 if (!mutex_trylock(&slab_mutex)) {
1197 pr_warn("excessive unreclaimable slab but cannot dump stats\n");
1198 return;
1199 }
1200
1201 pr_info("Unreclaimable slab info:\n");
1202 pr_info("Name Used Total\n");
1203
1204 list_for_each_entry(s, &slab_caches, list) {
1205 if (s->flags & SLAB_RECLAIM_ACCOUNT)
1206 continue;
1207
1208 get_slabinfo(s, &sinfo);
1209
1210 if (sinfo.num_objs > 0)
1211 pr_info("%-17s %10luKB %10luKB\n", s->name,
1212 (sinfo.active_objs * s->size) / 1024,
1213 (sinfo.num_objs * s->size) / 1024);
1214 }
1215 mutex_unlock(&slab_mutex);
1216}
1217
1218/*
1219 * slabinfo_op - iterator that generates /proc/slabinfo
1220 *
1221 * Output layout:
1222 * cache-name
1223 * num-active-objs
1224 * total-objs
1225 * object size
1226 * num-active-slabs
1227 * total-slabs
1228 * num-pages-per-slab
1229 * + further values on SMP and with statistics enabled
1230 */
1231static const struct seq_operations slabinfo_op = {
1232 .start = slab_start,
1233 .next = slab_next,
1234 .stop = slab_stop,
1235 .show = slab_show,
1236};
1237
1238static int slabinfo_open(struct inode *inode, struct file *file)
1239{
1240 return seq_open(file, &slabinfo_op);
1241}
1242
1243static const struct proc_ops slabinfo_proc_ops = {
1244 .proc_flags = PROC_ENTRY_PERMANENT,
1245 .proc_open = slabinfo_open,
1246 .proc_read = seq_read,
1247 .proc_lseek = seq_lseek,
1248 .proc_release = seq_release,
1249};
1250
1251static int __init slab_proc_init(void)
1252{
1253 proc_create("slabinfo", SLABINFO_RIGHTS, NULL, &slabinfo_proc_ops);
1254 return 0;
1255}
1256module_init(slab_proc_init);
1257
1258#endif /* CONFIG_SLUB_DEBUG */
1259
1260static __always_inline __realloc_size(2) void *
1261__do_krealloc(const void *p, size_t new_size, gfp_t flags)
1262{
1263 void *ret;
1264 size_t ks;
1265
1266 /* Check for double-free before calling ksize. */
1267 if (likely(!ZERO_OR_NULL_PTR(p))) {
1268 if (!kasan_check_byte(p))
1269 return NULL;
1270 ks = ksize(p);
1271 } else
1272 ks = 0;
1273
1274 /* If the object still fits, repoison it precisely. */
1275 if (ks >= new_size) {
1276 p = kasan_krealloc((void *)p, new_size, flags);
1277 return (void *)p;
1278 }
1279
1280 ret = kmalloc_node_track_caller_noprof(new_size, flags, NUMA_NO_NODE, _RET_IP_);
1281 if (ret && p) {
1282 /* Disable KASAN checks as the object's redzone is accessed. */
1283 kasan_disable_current();
1284 memcpy(ret, kasan_reset_tag(p), ks);
1285 kasan_enable_current();
1286 }
1287
1288 return ret;
1289}
1290
1291/**
1292 * krealloc - reallocate memory. The contents will remain unchanged.
1293 * @p: object to reallocate memory for.
1294 * @new_size: how many bytes of memory are required.
1295 * @flags: the type of memory to allocate.
1296 *
1297 * The contents of the object pointed to are preserved up to the
1298 * lesser of the new and old sizes (__GFP_ZERO flag is effectively ignored).
1299 * If @p is %NULL, krealloc() behaves exactly like kmalloc(). If @new_size
1300 * is 0 and @p is not a %NULL pointer, the object pointed to is freed.
1301 *
1302 * Return: pointer to the allocated memory or %NULL in case of error
1303 */
1304void *krealloc_noprof(const void *p, size_t new_size, gfp_t flags)
1305{
1306 void *ret;
1307
1308 if (unlikely(!new_size)) {
1309 kfree(p);
1310 return ZERO_SIZE_PTR;
1311 }
1312
1313 ret = __do_krealloc(p, new_size, flags);
1314 if (ret && kasan_reset_tag(p) != kasan_reset_tag(ret))
1315 kfree(p);
1316
1317 return ret;
1318}
1319EXPORT_SYMBOL(krealloc_noprof);
1320
1321/**
1322 * kfree_sensitive - Clear sensitive information in memory before freeing
1323 * @p: object to free memory of
1324 *
1325 * The memory of the object @p points to is zeroed before freed.
1326 * If @p is %NULL, kfree_sensitive() does nothing.
1327 *
1328 * Note: this function zeroes the whole allocated buffer which can be a good
1329 * deal bigger than the requested buffer size passed to kmalloc(). So be
1330 * careful when using this function in performance sensitive code.
1331 */
1332void kfree_sensitive(const void *p)
1333{
1334 size_t ks;
1335 void *mem = (void *)p;
1336
1337 ks = ksize(mem);
1338 if (ks) {
1339 kasan_unpoison_range(mem, ks);
1340 memzero_explicit(mem, ks);
1341 }
1342 kfree(mem);
1343}
1344EXPORT_SYMBOL(kfree_sensitive);
1345
1346size_t ksize(const void *objp)
1347{
1348 /*
1349 * We need to first check that the pointer to the object is valid.
1350 * The KASAN report printed from ksize() is more useful, then when
1351 * it's printed later when the behaviour could be undefined due to
1352 * a potential use-after-free or double-free.
1353 *
1354 * We use kasan_check_byte(), which is supported for the hardware
1355 * tag-based KASAN mode, unlike kasan_check_read/write().
1356 *
1357 * If the pointed to memory is invalid, we return 0 to avoid users of
1358 * ksize() writing to and potentially corrupting the memory region.
1359 *
1360 * We want to perform the check before __ksize(), to avoid potentially
1361 * crashing in __ksize() due to accessing invalid metadata.
1362 */
1363 if (unlikely(ZERO_OR_NULL_PTR(objp)) || !kasan_check_byte(objp))
1364 return 0;
1365
1366 return kfence_ksize(objp) ?: __ksize(objp);
1367}
1368EXPORT_SYMBOL(ksize);
1369
1370/* Tracepoints definitions. */
1371EXPORT_TRACEPOINT_SYMBOL(kmalloc);
1372EXPORT_TRACEPOINT_SYMBOL(kmem_cache_alloc);
1373EXPORT_TRACEPOINT_SYMBOL(kfree);
1374EXPORT_TRACEPOINT_SYMBOL(kmem_cache_free);
1375