mm/kfence: Convert kfence_guarded_alloc() to struct slab
[linux-block.git] / mm / kfence / core.c
CommitLineData
0ce20dd8
AP
1// SPDX-License-Identifier: GPL-2.0
2/*
3 * KFENCE guarded object allocator and fault handling.
4 *
5 * Copyright (C) 2020, Google LLC.
6 */
7
8#define pr_fmt(fmt) "kfence: " fmt
9
10#include <linux/atomic.h>
11#include <linux/bug.h>
12#include <linux/debugfs.h>
08f6b106 13#include <linux/hash.h>
407f1d8c 14#include <linux/irq_work.h>
08f6b106 15#include <linux/jhash.h>
0ce20dd8
AP
16#include <linux/kcsan-checks.h>
17#include <linux/kfence.h>
95511580 18#include <linux/kmemleak.h>
0ce20dd8
AP
19#include <linux/list.h>
20#include <linux/lockdep.h>
08f6b106 21#include <linux/log2.h>
0ce20dd8
AP
22#include <linux/memblock.h>
23#include <linux/moduleparam.h>
24#include <linux/random.h>
25#include <linux/rcupdate.h>
4bbf04aa 26#include <linux/sched/clock.h>
37c9284f 27#include <linux/sched/sysctl.h>
0ce20dd8
AP
28#include <linux/seq_file.h>
29#include <linux/slab.h>
30#include <linux/spinlock.h>
31#include <linux/string.h>
32
33#include <asm/kfence.h>
34
35#include "kfence.h"
36
37/* Disables KFENCE on the first warning assuming an irrecoverable error. */
38#define KFENCE_WARN_ON(cond) \
39 ({ \
40 const bool __cond = WARN_ON(cond); \
41 if (unlikely(__cond)) \
42 WRITE_ONCE(kfence_enabled, false); \
43 __cond; \
44 })
45
46/* === Data ================================================================= */
47
48static bool kfence_enabled __read_mostly;
49
50static unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL;
51
52#ifdef MODULE_PARAM_PREFIX
53#undef MODULE_PARAM_PREFIX
54#endif
55#define MODULE_PARAM_PREFIX "kfence."
56
57static int param_set_sample_interval(const char *val, const struct kernel_param *kp)
58{
59 unsigned long num;
60 int ret = kstrtoul(val, 0, &num);
61
62 if (ret < 0)
63 return ret;
64
65 if (!num) /* Using 0 to indicate KFENCE is disabled. */
66 WRITE_ONCE(kfence_enabled, false);
67 else if (!READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING)
68 return -EINVAL; /* Cannot (re-)enable KFENCE on-the-fly. */
69
70 *((unsigned long *)kp->arg) = num;
71 return 0;
72}
73
74static int param_get_sample_interval(char *buffer, const struct kernel_param *kp)
75{
76 if (!READ_ONCE(kfence_enabled))
77 return sprintf(buffer, "0\n");
78
79 return param_get_ulong(buffer, kp);
80}
81
82static const struct kernel_param_ops sample_interval_param_ops = {
83 .set = param_set_sample_interval,
84 .get = param_get_sample_interval,
85};
86module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600);
87
08f6b106
ME
88/* Pool usage% threshold when currently covered allocations are skipped. */
89static unsigned long kfence_skip_covered_thresh __read_mostly = 75;
90module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644);
91
0ce20dd8
AP
92/* The pool of pages used for guard pages and objects. */
93char *__kfence_pool __ro_after_init;
94EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */
95
96/*
97 * Per-object metadata, with one-to-one mapping of object metadata to
98 * backing pages (in __kfence_pool).
99 */
100static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0);
101struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS];
102
103/* Freelist with available objects. */
104static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist);
105static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */
106
07e8481d
ME
107/*
108 * The static key to set up a KFENCE allocation; or if static keys are not used
109 * to gate allocations, to avoid a load and compare if KFENCE is disabled.
110 */
0ce20dd8 111DEFINE_STATIC_KEY_FALSE(kfence_allocation_key);
0ce20dd8
AP
112
113/* Gates the allocation, ensuring only one succeeds in a given period. */
114atomic_t kfence_allocation_gate = ATOMIC_INIT(1);
115
08f6b106
ME
116/*
117 * A Counting Bloom filter of allocation coverage: limits currently covered
118 * allocations of the same source filling up the pool.
119 *
120 * Assuming a range of 15%-85% unique allocations in the pool at any point in
121 * time, the below parameters provide a probablity of 0.02-0.33 for false
122 * positive hits respectively:
123 *
124 * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM
125 */
126#define ALLOC_COVERED_HNUM 2
127#define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2)
128#define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER)
129#define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER)
130#define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1)
131static atomic_t alloc_covered[ALLOC_COVERED_SIZE];
132
133/* Stack depth used to determine uniqueness of an allocation. */
134#define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8)
135
136/*
137 * Randomness for stack hashes, making the same collisions across reboots and
138 * different machines less likely.
139 */
140static u32 stack_hash_seed __ro_after_init;
141
0ce20dd8
AP
142/* Statistics counters for debugfs. */
143enum kfence_counter_id {
144 KFENCE_COUNTER_ALLOCATED,
145 KFENCE_COUNTER_ALLOCS,
146 KFENCE_COUNTER_FREES,
147 KFENCE_COUNTER_ZOMBIES,
148 KFENCE_COUNTER_BUGS,
9a19aeb5
ME
149 KFENCE_COUNTER_SKIP_INCOMPAT,
150 KFENCE_COUNTER_SKIP_CAPACITY,
08f6b106 151 KFENCE_COUNTER_SKIP_COVERED,
0ce20dd8
AP
152 KFENCE_COUNTER_COUNT,
153};
154static atomic_long_t counters[KFENCE_COUNTER_COUNT];
155static const char *const counter_names[] = {
156 [KFENCE_COUNTER_ALLOCATED] = "currently allocated",
157 [KFENCE_COUNTER_ALLOCS] = "total allocations",
158 [KFENCE_COUNTER_FREES] = "total frees",
159 [KFENCE_COUNTER_ZOMBIES] = "zombie allocations",
160 [KFENCE_COUNTER_BUGS] = "total bugs",
9a19aeb5
ME
161 [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)",
162 [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)",
08f6b106 163 [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)",
0ce20dd8
AP
164};
165static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT);
166
167/* === Internals ============================================================ */
168
08f6b106
ME
169static inline bool should_skip_covered(void)
170{
171 unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100;
172
173 return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh;
174}
175
176static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries)
177{
178 num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH);
179 num_entries = filter_irq_stacks(stack_entries, num_entries);
180 return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed);
181}
182
183/*
184 * Adds (or subtracts) count @val for allocation stack trace hash
185 * @alloc_stack_hash from Counting Bloom filter.
186 */
187static void alloc_covered_add(u32 alloc_stack_hash, int val)
188{
189 int i;
190
191 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
192 atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]);
193 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
194 }
195}
196
197/*
198 * Returns true if the allocation stack trace hash @alloc_stack_hash is
199 * currently contained (non-zero count) in Counting Bloom filter.
200 */
201static bool alloc_covered_contains(u32 alloc_stack_hash)
202{
203 int i;
204
205 for (i = 0; i < ALLOC_COVERED_HNUM; i++) {
206 if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]))
207 return false;
208 alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash);
209 }
210
211 return true;
212}
213
0ce20dd8
AP
214static bool kfence_protect(unsigned long addr)
215{
216 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true));
217}
218
219static bool kfence_unprotect(unsigned long addr)
220{
221 return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false));
222}
223
224static inline struct kfence_metadata *addr_to_metadata(unsigned long addr)
225{
226 long index;
227
228 /* The checks do not affect performance; only called from slow-paths. */
229
230 if (!is_kfence_address((void *)addr))
231 return NULL;
232
233 /*
234 * May be an invalid index if called with an address at the edge of
235 * __kfence_pool, in which case we would report an "invalid access"
236 * error.
237 */
238 index = (addr - (unsigned long)__kfence_pool) / (PAGE_SIZE * 2) - 1;
239 if (index < 0 || index >= CONFIG_KFENCE_NUM_OBJECTS)
240 return NULL;
241
242 return &kfence_metadata[index];
243}
244
245static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta)
246{
247 unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2;
248 unsigned long pageaddr = (unsigned long)&__kfence_pool[offset];
249
250 /* The checks do not affect performance; only called from slow-paths. */
251
252 /* Only call with a pointer into kfence_metadata. */
253 if (KFENCE_WARN_ON(meta < kfence_metadata ||
254 meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS))
255 return 0;
256
257 /*
258 * This metadata object only ever maps to 1 page; verify that the stored
259 * address is in the expected range.
260 */
261 if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr))
262 return 0;
263
264 return pageaddr;
265}
266
267/*
268 * Update the object's metadata state, including updating the alloc/free stacks
269 * depending on the state transition.
270 */
a9ab52bb
ME
271static noinline void
272metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next,
273 unsigned long *stack_entries, size_t num_stack_entries)
0ce20dd8
AP
274{
275 struct kfence_track *track =
276 next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track;
277
278 lockdep_assert_held(&meta->lock);
279
a9ab52bb
ME
280 if (stack_entries) {
281 memcpy(track->stack_entries, stack_entries,
282 num_stack_entries * sizeof(stack_entries[0]));
283 } else {
284 /*
285 * Skip over 1 (this) functions; noinline ensures we do not
286 * accidentally skip over the caller by never inlining.
287 */
288 num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1);
289 }
290 track->num_stack_entries = num_stack_entries;
0ce20dd8 291 track->pid = task_pid_nr(current);
4bbf04aa
ME
292 track->cpu = raw_smp_processor_id();
293 track->ts_nsec = local_clock(); /* Same source as printk timestamps. */
0ce20dd8
AP
294
295 /*
296 * Pairs with READ_ONCE() in
297 * kfence_shutdown_cache(),
298 * kfence_handle_page_fault().
299 */
300 WRITE_ONCE(meta->state, next);
301}
302
303/* Write canary byte to @addr. */
304static inline bool set_canary_byte(u8 *addr)
305{
306 *addr = KFENCE_CANARY_PATTERN(addr);
307 return true;
308}
309
310/* Check canary byte at @addr. */
311static inline bool check_canary_byte(u8 *addr)
312{
49332956
ME
313 struct kfence_metadata *meta;
314 unsigned long flags;
315
0ce20dd8
AP
316 if (likely(*addr == KFENCE_CANARY_PATTERN(addr)))
317 return true;
318
319 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
49332956
ME
320
321 meta = addr_to_metadata((unsigned long)addr);
322 raw_spin_lock_irqsave(&meta->lock, flags);
323 kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION);
324 raw_spin_unlock_irqrestore(&meta->lock, flags);
325
0ce20dd8
AP
326 return false;
327}
328
329/* __always_inline this to ensure we won't do an indirect call to fn. */
330static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *))
331{
332 const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE);
333 unsigned long addr;
334
0ce20dd8
AP
335 /*
336 * We'll iterate over each canary byte per-side until fn() returns
337 * false. However, we'll still iterate over the canary bytes to the
338 * right of the object even if there was an error in the canary bytes to
339 * the left of the object. Specifically, if check_canary_byte()
340 * generates an error, showing both sides might give more clues as to
341 * what the error is about when displaying which bytes were corrupted.
342 */
343
344 /* Apply to left of object. */
345 for (addr = pageaddr; addr < meta->addr; addr++) {
346 if (!fn((u8 *)addr))
347 break;
348 }
349
350 /* Apply to right of object. */
351 for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) {
352 if (!fn((u8 *)addr))
353 break;
354 }
355}
356
a9ab52bb 357static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp,
08f6b106
ME
358 unsigned long *stack_entries, size_t num_stack_entries,
359 u32 alloc_stack_hash)
0ce20dd8
AP
360{
361 struct kfence_metadata *meta = NULL;
362 unsigned long flags;
8dae0cfe 363 struct slab *slab;
0ce20dd8
AP
364 void *addr;
365
366 /* Try to obtain a free object. */
367 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
368 if (!list_empty(&kfence_freelist)) {
369 meta = list_entry(kfence_freelist.next, struct kfence_metadata, list);
370 list_del_init(&meta->list);
371 }
372 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
9a19aeb5
ME
373 if (!meta) {
374 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]);
0ce20dd8 375 return NULL;
9a19aeb5 376 }
0ce20dd8
AP
377
378 if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) {
379 /*
380 * This is extremely unlikely -- we are reporting on a
381 * use-after-free, which locked meta->lock, and the reporting
382 * code via printk calls kmalloc() which ends up in
383 * kfence_alloc() and tries to grab the same object that we're
384 * reporting on. While it has never been observed, lockdep does
385 * report that there is a possibility of deadlock. Fix it by
386 * using trylock and bailing out gracefully.
387 */
388 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
389 /* Put the object back on the freelist. */
390 list_add_tail(&meta->list, &kfence_freelist);
391 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
392
393 return NULL;
394 }
395
396 meta->addr = metadata_to_pageaddr(meta);
397 /* Unprotect if we're reusing this page. */
398 if (meta->state == KFENCE_OBJECT_FREED)
399 kfence_unprotect(meta->addr);
400
401 /*
402 * Note: for allocations made before RNG initialization, will always
403 * return zero. We still benefit from enabling KFENCE as early as
404 * possible, even when the RNG is not yet available, as this will allow
405 * KFENCE to detect bugs due to earlier allocations. The only downside
406 * is that the out-of-bounds accesses detected are deterministic for
407 * such allocations.
408 */
409 if (prandom_u32_max(2)) {
410 /* Allocate on the "right" side, re-calculate address. */
411 meta->addr += PAGE_SIZE - size;
412 meta->addr = ALIGN_DOWN(meta->addr, cache->align);
413 }
414
415 addr = (void *)meta->addr;
416
417 /* Update remaining metadata. */
a9ab52bb 418 metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries);
0ce20dd8
AP
419 /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */
420 WRITE_ONCE(meta->cache, cache);
421 meta->size = size;
08f6b106 422 meta->alloc_stack_hash = alloc_stack_hash;
49332956 423 raw_spin_unlock_irqrestore(&meta->lock, flags);
08f6b106 424
49332956 425 alloc_covered_add(alloc_stack_hash, 1);
0ce20dd8 426
8dae0cfe
VB
427 /* Set required slab fields. */
428 slab = virt_to_slab((void *)meta->addr);
429 slab->slab_cache = cache;
b89fb5ef 430 if (IS_ENABLED(CONFIG_SLUB))
8dae0cfe 431 slab->objects = 1;
d3fb45f3 432 if (IS_ENABLED(CONFIG_SLAB))
8dae0cfe 433 slab->s_mem = addr;
0ce20dd8 434
0ce20dd8 435 /* Memory initialization. */
49332956 436 for_each_canary(meta, set_canary_byte);
0ce20dd8
AP
437
438 /*
439 * We check slab_want_init_on_alloc() ourselves, rather than letting
440 * SL*B do the initialization, as otherwise we might overwrite KFENCE's
441 * redzone.
442 */
443 if (unlikely(slab_want_init_on_alloc(gfp, cache)))
444 memzero_explicit(addr, size);
445 if (cache->ctor)
446 cache->ctor(addr);
447
448 if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS))
449 kfence_protect(meta->addr); /* Random "faults" by protecting the object. */
450
451 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]);
452 atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]);
453
454 return addr;
455}
456
457static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie)
458{
459 struct kcsan_scoped_access assert_page_exclusive;
460 unsigned long flags;
49332956 461 bool init;
0ce20dd8
AP
462
463 raw_spin_lock_irqsave(&meta->lock, flags);
464
465 if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) {
466 /* Invalid or double-free, bail out. */
467 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
bc8fbc5f
ME
468 kfence_report_error((unsigned long)addr, false, NULL, meta,
469 KFENCE_ERROR_INVALID_FREE);
0ce20dd8
AP
470 raw_spin_unlock_irqrestore(&meta->lock, flags);
471 return;
472 }
473
474 /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */
475 kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE,
476 KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT,
477 &assert_page_exclusive);
478
479 if (CONFIG_KFENCE_STRESS_TEST_FAULTS)
480 kfence_unprotect((unsigned long)addr); /* To check canary bytes. */
481
482 /* Restore page protection if there was an OOB access. */
483 if (meta->unprotected_page) {
94868a1e 484 memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE);
0ce20dd8
AP
485 kfence_protect(meta->unprotected_page);
486 meta->unprotected_page = 0;
487 }
488
49332956
ME
489 /* Mark the object as freed. */
490 metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0);
491 init = slab_want_init_on_free(meta->cache);
492 raw_spin_unlock_irqrestore(&meta->lock, flags);
493
494 alloc_covered_add(meta->alloc_stack_hash, -1);
495
0ce20dd8
AP
496 /* Check canary bytes for memory corruption. */
497 for_each_canary(meta, check_canary_byte);
498
499 /*
500 * Clear memory if init-on-free is set. While we protect the page, the
501 * data is still there, and after a use-after-free is detected, we
502 * unprotect the page, so the data is still accessible.
503 */
49332956 504 if (!zombie && unlikely(init))
0ce20dd8
AP
505 memzero_explicit(addr, meta->size);
506
0ce20dd8
AP
507 /* Protect to detect use-after-frees. */
508 kfence_protect((unsigned long)addr);
509
510 kcsan_end_scoped_access(&assert_page_exclusive);
511 if (!zombie) {
512 /* Add it to the tail of the freelist for reuse. */
513 raw_spin_lock_irqsave(&kfence_freelist_lock, flags);
514 KFENCE_WARN_ON(!list_empty(&meta->list));
515 list_add_tail(&meta->list, &kfence_freelist);
516 raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags);
517
518 atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]);
519 atomic_long_inc(&counters[KFENCE_COUNTER_FREES]);
520 } else {
521 /* See kfence_shutdown_cache(). */
522 atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]);
523 }
524}
525
526static void rcu_guarded_free(struct rcu_head *h)
527{
528 struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head);
529
530 kfence_guarded_free((void *)meta->addr, meta, false);
531}
532
533static bool __init kfence_init_pool(void)
534{
535 unsigned long addr = (unsigned long)__kfence_pool;
536 struct page *pages;
537 int i;
538
539 if (!__kfence_pool)
540 return false;
541
542 if (!arch_kfence_init_pool())
543 goto err;
544
545 pages = virt_to_page(addr);
546
547 /*
548 * Set up object pages: they must have PG_slab set, to avoid freeing
549 * these as real pages.
550 *
551 * We also want to avoid inserting kfence_free() in the kfree()
552 * fast-path in SLUB, and therefore need to ensure kfree() correctly
553 * enters __slab_free() slow-path.
554 */
555 for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) {
556 if (!i || (i % 2))
557 continue;
558
559 /* Verify we do not have a compound head page. */
560 if (WARN_ON(compound_head(&pages[i]) != &pages[i]))
561 goto err;
562
563 __SetPageSlab(&pages[i]);
564 }
565
566 /*
567 * Protect the first 2 pages. The first page is mostly unnecessary, and
568 * merely serves as an extended guard page. However, adding one
569 * additional page in the beginning gives us an even number of pages,
570 * which simplifies the mapping of address to metadata index.
571 */
572 for (i = 0; i < 2; i++) {
573 if (unlikely(!kfence_protect(addr)))
574 goto err;
575
576 addr += PAGE_SIZE;
577 }
578
579 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
580 struct kfence_metadata *meta = &kfence_metadata[i];
581
582 /* Initialize metadata. */
583 INIT_LIST_HEAD(&meta->list);
584 raw_spin_lock_init(&meta->lock);
585 meta->state = KFENCE_OBJECT_UNUSED;
586 meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */
587 list_add_tail(&meta->list, &kfence_freelist);
588
589 /* Protect the right redzone. */
590 if (unlikely(!kfence_protect(addr + PAGE_SIZE)))
591 goto err;
592
593 addr += 2 * PAGE_SIZE;
594 }
595
95511580
ME
596 /*
597 * The pool is live and will never be deallocated from this point on.
598 * Remove the pool object from the kmemleak object tree, as it would
599 * otherwise overlap with allocations returned by kfence_alloc(), which
600 * are registered with kmemleak through the slab post-alloc hook.
601 */
602 kmemleak_free(__kfence_pool);
603
0ce20dd8
AP
604 return true;
605
606err:
607 /*
608 * Only release unprotected pages, and do not try to go back and change
609 * page attributes due to risk of failing to do so as well. If changing
610 * page attributes for some pages fails, it is very likely that it also
611 * fails for the first page, and therefore expect addr==__kfence_pool in
612 * most failure cases.
613 */
614 memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool));
615 __kfence_pool = NULL;
616 return false;
617}
618
619/* === DebugFS Interface ==================================================== */
620
621static int stats_show(struct seq_file *seq, void *v)
622{
623 int i;
624
625 seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled));
626 for (i = 0; i < KFENCE_COUNTER_COUNT; i++)
627 seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i]));
628
629 return 0;
630}
631DEFINE_SHOW_ATTRIBUTE(stats);
632
633/*
634 * debugfs seq_file operations for /sys/kernel/debug/kfence/objects.
635 * start_object() and next_object() return the object index + 1, because NULL is used
636 * to stop iteration.
637 */
638static void *start_object(struct seq_file *seq, loff_t *pos)
639{
640 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
641 return (void *)((long)*pos + 1);
642 return NULL;
643}
644
645static void stop_object(struct seq_file *seq, void *v)
646{
647}
648
649static void *next_object(struct seq_file *seq, void *v, loff_t *pos)
650{
651 ++*pos;
652 if (*pos < CONFIG_KFENCE_NUM_OBJECTS)
653 return (void *)((long)*pos + 1);
654 return NULL;
655}
656
657static int show_object(struct seq_file *seq, void *v)
658{
659 struct kfence_metadata *meta = &kfence_metadata[(long)v - 1];
660 unsigned long flags;
661
662 raw_spin_lock_irqsave(&meta->lock, flags);
663 kfence_print_object(seq, meta);
664 raw_spin_unlock_irqrestore(&meta->lock, flags);
665 seq_puts(seq, "---------------------------------\n");
666
667 return 0;
668}
669
670static const struct seq_operations object_seqops = {
671 .start = start_object,
672 .next = next_object,
673 .stop = stop_object,
674 .show = show_object,
675};
676
677static int open_objects(struct inode *inode, struct file *file)
678{
679 return seq_open(file, &object_seqops);
680}
681
682static const struct file_operations objects_fops = {
683 .open = open_objects,
684 .read = seq_read,
685 .llseek = seq_lseek,
686};
687
688static int __init kfence_debugfs_init(void)
689{
690 struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL);
691
692 debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops);
693 debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops);
694 return 0;
695}
696
697late_initcall(kfence_debugfs_init);
698
699/* === Allocation Gate Timer ================================================ */
700
407f1d8c
ME
701#ifdef CONFIG_KFENCE_STATIC_KEYS
702/* Wait queue to wake up allocation-gate timer task. */
703static DECLARE_WAIT_QUEUE_HEAD(allocation_wait);
704
705static void wake_up_kfence_timer(struct irq_work *work)
706{
707 wake_up(&allocation_wait);
708}
709static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer);
710#endif
711
0ce20dd8
AP
712/*
713 * Set up delayed work, which will enable and disable the static key. We need to
714 * use a work queue (rather than a simple timer), since enabling and disabling a
715 * static key cannot be done from an interrupt.
716 *
717 * Note: Toggling a static branch currently causes IPIs, and here we'll end up
718 * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with
719 * more aggressive sampling intervals), we could get away with a variant that
720 * avoids IPIs, at the cost of not immediately capturing allocations if the
721 * instructions remain cached.
722 */
723static struct delayed_work kfence_timer;
724static void toggle_allocation_gate(struct work_struct *work)
725{
726 if (!READ_ONCE(kfence_enabled))
727 return;
728
0ce20dd8
AP
729 atomic_set(&kfence_allocation_gate, 0);
730#ifdef CONFIG_KFENCE_STATIC_KEYS
407f1d8c 731 /* Enable static key, and await allocation to happen. */
0ce20dd8 732 static_branch_enable(&kfence_allocation_key);
407f1d8c 733
37c9284f
ME
734 if (sysctl_hung_task_timeout_secs) {
735 /*
736 * During low activity with no allocations we might wait a
737 * while; let's avoid the hung task warning.
738 */
8fd0e995
ME
739 wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate),
740 sysctl_hung_task_timeout_secs * HZ / 2);
37c9284f 741 } else {
8fd0e995 742 wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate));
37c9284f 743 }
407f1d8c 744
0ce20dd8
AP
745 /* Disable static key and reset timer. */
746 static_branch_disable(&kfence_allocation_key);
747#endif
ff06e45d 748 queue_delayed_work(system_unbound_wq, &kfence_timer,
36f0b35d 749 msecs_to_jiffies(kfence_sample_interval));
0ce20dd8
AP
750}
751static DECLARE_DELAYED_WORK(kfence_timer, toggle_allocation_gate);
752
753/* === Public interface ===================================================== */
754
755void __init kfence_alloc_pool(void)
756{
757 if (!kfence_sample_interval)
758 return;
759
760 __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
761
762 if (!__kfence_pool)
763 pr_err("failed to allocate pool\n");
764}
765
766void __init kfence_init(void)
767{
768 /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */
769 if (!kfence_sample_interval)
770 return;
771
08f6b106 772 stack_hash_seed = (u32)random_get_entropy();
0ce20dd8
AP
773 if (!kfence_init_pool()) {
774 pr_err("%s failed\n", __func__);
775 return;
776 }
777
07e8481d
ME
778 if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS))
779 static_branch_enable(&kfence_allocation_key);
0ce20dd8 780 WRITE_ONCE(kfence_enabled, true);
ff06e45d 781 queue_delayed_work(system_unbound_wq, &kfence_timer, 0);
35beccf0
ME
782 pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE,
783 CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool,
784 (void *)(__kfence_pool + KFENCE_POOL_SIZE));
0ce20dd8
AP
785}
786
787void kfence_shutdown_cache(struct kmem_cache *s)
788{
789 unsigned long flags;
790 struct kfence_metadata *meta;
791 int i;
792
793 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
794 bool in_use;
795
796 meta = &kfence_metadata[i];
797
798 /*
799 * If we observe some inconsistent cache and state pair where we
800 * should have returned false here, cache destruction is racing
801 * with either kmem_cache_alloc() or kmem_cache_free(). Taking
802 * the lock will not help, as different critical section
803 * serialization will have the same outcome.
804 */
805 if (READ_ONCE(meta->cache) != s ||
806 READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED)
807 continue;
808
809 raw_spin_lock_irqsave(&meta->lock, flags);
810 in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED;
811 raw_spin_unlock_irqrestore(&meta->lock, flags);
812
813 if (in_use) {
814 /*
815 * This cache still has allocations, and we should not
816 * release them back into the freelist so they can still
817 * safely be used and retain the kernel's default
818 * behaviour of keeping the allocations alive (leak the
819 * cache); however, they effectively become "zombie
820 * allocations" as the KFENCE objects are the only ones
821 * still in use and the owning cache is being destroyed.
822 *
823 * We mark them freed, so that any subsequent use shows
824 * more useful error messages that will include stack
825 * traces of the user of the object, the original
826 * allocation, and caller to shutdown_cache().
827 */
828 kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true);
829 }
830 }
831
832 for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) {
833 meta = &kfence_metadata[i];
834
835 /* See above. */
836 if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED)
837 continue;
838
839 raw_spin_lock_irqsave(&meta->lock, flags);
840 if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED)
841 meta->cache = NULL;
842 raw_spin_unlock_irqrestore(&meta->lock, flags);
843 }
844}
845
846void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags)
847{
a9ab52bb
ME
848 unsigned long stack_entries[KFENCE_STACK_DEPTH];
849 size_t num_stack_entries;
08f6b106 850 u32 alloc_stack_hash;
a9ab52bb 851
235a85cb
AP
852 /*
853 * Perform size check before switching kfence_allocation_gate, so that
854 * we don't disable KFENCE without making an allocation.
855 */
9a19aeb5
ME
856 if (size > PAGE_SIZE) {
857 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
235a85cb 858 return NULL;
9a19aeb5 859 }
235a85cb 860
236e9f15
AP
861 /*
862 * Skip allocations from non-default zones, including DMA. We cannot
863 * guarantee that pages in the KFENCE pool will have the requested
864 * properties (e.g. reside in DMAable memory).
865 */
866 if ((flags & GFP_ZONEMASK) ||
9a19aeb5
ME
867 (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) {
868 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]);
236e9f15 869 return NULL;
9a19aeb5 870 }
236e9f15 871
07e8481d 872 if (atomic_inc_return(&kfence_allocation_gate) > 1)
0ce20dd8 873 return NULL;
407f1d8c
ME
874#ifdef CONFIG_KFENCE_STATIC_KEYS
875 /*
876 * waitqueue_active() is fully ordered after the update of
877 * kfence_allocation_gate per atomic_inc_return().
878 */
879 if (waitqueue_active(&allocation_wait)) {
880 /*
881 * Calling wake_up() here may deadlock when allocations happen
882 * from within timer code. Use an irq_work to defer it.
883 */
884 irq_work_queue(&wake_up_kfence_timer_work);
885 }
886#endif
0ce20dd8
AP
887
888 if (!READ_ONCE(kfence_enabled))
889 return NULL;
890
a9ab52bb
ME
891 num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0);
892
08f6b106
ME
893 /*
894 * Do expensive check for coverage of allocation in slow-path after
895 * allocation_gate has already become non-zero, even though it might
896 * mean not making any allocation within a given sample interval.
897 *
898 * This ensures reasonable allocation coverage when the pool is almost
899 * full, including avoiding long-lived allocations of the same source
900 * filling up the pool (e.g. pagecache allocations).
901 */
902 alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries);
903 if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) {
904 atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]);
905 return NULL;
906 }
907
908 return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries,
909 alloc_stack_hash);
0ce20dd8
AP
910}
911
912size_t kfence_ksize(const void *addr)
913{
914 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
915
916 /*
917 * Read locklessly -- if there is a race with __kfence_alloc(), this is
918 * either a use-after-free or invalid access.
919 */
920 return meta ? meta->size : 0;
921}
922
923void *kfence_object_start(const void *addr)
924{
925 const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
926
927 /*
928 * Read locklessly -- if there is a race with __kfence_alloc(), this is
929 * either a use-after-free or invalid access.
930 */
931 return meta ? (void *)meta->addr : NULL;
932}
933
934void __kfence_free(void *addr)
935{
936 struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr);
937
938 /*
939 * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing
940 * the object, as the object page may be recycled for other-typed
941 * objects once it has been freed. meta->cache may be NULL if the cache
942 * was destroyed.
943 */
944 if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU)))
945 call_rcu(&meta->rcu_head, rcu_guarded_free);
946 else
947 kfence_guarded_free(addr, meta, false);
948}
949
bc8fbc5f 950bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs)
0ce20dd8
AP
951{
952 const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE;
953 struct kfence_metadata *to_report = NULL;
954 enum kfence_error_type error_type;
955 unsigned long flags;
956
957 if (!is_kfence_address((void *)addr))
958 return false;
959
960 if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */
961 return kfence_unprotect(addr); /* ... unprotect and proceed. */
962
963 atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]);
964
965 if (page_index % 2) {
966 /* This is a redzone, report a buffer overflow. */
967 struct kfence_metadata *meta;
968 int distance = 0;
969
970 meta = addr_to_metadata(addr - PAGE_SIZE);
971 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
972 to_report = meta;
973 /* Data race ok; distance calculation approximate. */
974 distance = addr - data_race(meta->addr + meta->size);
975 }
976
977 meta = addr_to_metadata(addr + PAGE_SIZE);
978 if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) {
979 /* Data race ok; distance calculation approximate. */
980 if (!to_report || distance > data_race(meta->addr) - addr)
981 to_report = meta;
982 }
983
984 if (!to_report)
985 goto out;
986
987 raw_spin_lock_irqsave(&to_report->lock, flags);
988 to_report->unprotected_page = addr;
989 error_type = KFENCE_ERROR_OOB;
990
991 /*
992 * If the object was freed before we took the look we can still
993 * report this as an OOB -- the report will simply show the
994 * stacktrace of the free as well.
995 */
996 } else {
997 to_report = addr_to_metadata(addr);
998 if (!to_report)
999 goto out;
1000
1001 raw_spin_lock_irqsave(&to_report->lock, flags);
1002 error_type = KFENCE_ERROR_UAF;
1003 /*
1004 * We may race with __kfence_alloc(), and it is possible that a
1005 * freed object may be reallocated. We simply report this as a
1006 * use-after-free, with the stack trace showing the place where
1007 * the object was re-allocated.
1008 */
1009 }
1010
1011out:
1012 if (to_report) {
bc8fbc5f 1013 kfence_report_error(addr, is_write, regs, to_report, error_type);
0ce20dd8
AP
1014 raw_spin_unlock_irqrestore(&to_report->lock, flags);
1015 } else {
1016 /* This may be a UAF or OOB access, but we can't be sure. */
bc8fbc5f 1017 kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID);
0ce20dd8
AP
1018 }
1019
1020 return kfence_unprotect(addr); /* Unprotect and let access proceed. */
1021}