Commit | Line | Data |
---|---|---|
0ce20dd8 AP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KFENCE guarded object allocator and fault handling. | |
4 | * | |
5 | * Copyright (C) 2020, Google LLC. | |
6 | */ | |
7 | ||
8 | #define pr_fmt(fmt) "kfence: " fmt | |
9 | ||
10 | #include <linux/atomic.h> | |
11 | #include <linux/bug.h> | |
12 | #include <linux/debugfs.h> | |
08f6b106 | 13 | #include <linux/hash.h> |
407f1d8c | 14 | #include <linux/irq_work.h> |
08f6b106 | 15 | #include <linux/jhash.h> |
0ce20dd8 AP |
16 | #include <linux/kcsan-checks.h> |
17 | #include <linux/kfence.h> | |
95511580 | 18 | #include <linux/kmemleak.h> |
0ce20dd8 AP |
19 | #include <linux/list.h> |
20 | #include <linux/lockdep.h> | |
08f6b106 | 21 | #include <linux/log2.h> |
0ce20dd8 AP |
22 | #include <linux/memblock.h> |
23 | #include <linux/moduleparam.h> | |
3c81b3bb | 24 | #include <linux/notifier.h> |
25 | #include <linux/panic_notifier.h> | |
0ce20dd8 AP |
26 | #include <linux/random.h> |
27 | #include <linux/rcupdate.h> | |
4bbf04aa | 28 | #include <linux/sched/clock.h> |
37c9284f | 29 | #include <linux/sched/sysctl.h> |
0ce20dd8 AP |
30 | #include <linux/seq_file.h> |
31 | #include <linux/slab.h> | |
32 | #include <linux/spinlock.h> | |
33 | #include <linux/string.h> | |
34 | ||
35 | #include <asm/kfence.h> | |
36 | ||
37 | #include "kfence.h" | |
38 | ||
39 | /* Disables KFENCE on the first warning assuming an irrecoverable error. */ | |
40 | #define KFENCE_WARN_ON(cond) \ | |
41 | ({ \ | |
42 | const bool __cond = WARN_ON(cond); \ | |
698361bc | 43 | if (unlikely(__cond)) { \ |
0ce20dd8 | 44 | WRITE_ONCE(kfence_enabled, false); \ |
698361bc TD |
45 | disabled_by_warn = true; \ |
46 | } \ | |
0ce20dd8 AP |
47 | __cond; \ |
48 | }) | |
49 | ||
50 | /* === Data ================================================================= */ | |
51 | ||
52 | static bool kfence_enabled __read_mostly; | |
698361bc | 53 | static bool disabled_by_warn __read_mostly; |
0ce20dd8 | 54 | |
8913c610 PL |
55 | unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; |
56 | EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ | |
0ce20dd8 AP |
57 | |
58 | #ifdef MODULE_PARAM_PREFIX | |
59 | #undef MODULE_PARAM_PREFIX | |
60 | #endif | |
61 | #define MODULE_PARAM_PREFIX "kfence." | |
62 | ||
698361bc | 63 | static int kfence_enable_late(void); |
0ce20dd8 AP |
64 | static int param_set_sample_interval(const char *val, const struct kernel_param *kp) |
65 | { | |
66 | unsigned long num; | |
67 | int ret = kstrtoul(val, 0, &num); | |
68 | ||
69 | if (ret < 0) | |
70 | return ret; | |
71 | ||
83d7d04f JL |
72 | /* Using 0 to indicate KFENCE is disabled. */ |
73 | if (!num && READ_ONCE(kfence_enabled)) { | |
74 | pr_info("disabled\n"); | |
0ce20dd8 | 75 | WRITE_ONCE(kfence_enabled, false); |
83d7d04f | 76 | } |
0ce20dd8 AP |
77 | |
78 | *((unsigned long *)kp->arg) = num; | |
698361bc TD |
79 | |
80 | if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) | |
81 | return disabled_by_warn ? -EINVAL : kfence_enable_late(); | |
0ce20dd8 AP |
82 | return 0; |
83 | } | |
84 | ||
85 | static int param_get_sample_interval(char *buffer, const struct kernel_param *kp) | |
86 | { | |
87 | if (!READ_ONCE(kfence_enabled)) | |
88 | return sprintf(buffer, "0\n"); | |
89 | ||
90 | return param_get_ulong(buffer, kp); | |
91 | } | |
92 | ||
93 | static const struct kernel_param_ops sample_interval_param_ops = { | |
94 | .set = param_set_sample_interval, | |
95 | .get = param_get_sample_interval, | |
96 | }; | |
97 | module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); | |
98 | ||
08f6b106 ME |
99 | /* Pool usage% threshold when currently covered allocations are skipped. */ |
100 | static unsigned long kfence_skip_covered_thresh __read_mostly = 75; | |
101 | module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); | |
102 | ||
737b6a10 ME |
103 | /* If true, use a deferrable timer. */ |
104 | static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE); | |
105 | module_param_named(deferrable, kfence_deferrable, bool, 0444); | |
106 | ||
3c81b3bb | 107 | /* If true, check all canary bytes on panic. */ |
108 | static bool kfence_check_on_panic __read_mostly; | |
109 | module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); | |
110 | ||
0ce20dd8 | 111 | /* The pool of pages used for guard pages and objects. */ |
b33f778b | 112 | char *__kfence_pool __read_mostly; |
0ce20dd8 AP |
113 | EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ |
114 | ||
115 | /* | |
116 | * Per-object metadata, with one-to-one mapping of object metadata to | |
117 | * backing pages (in __kfence_pool). | |
118 | */ | |
119 | static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); | |
120 | struct kfence_metadata kfence_metadata[CONFIG_KFENCE_NUM_OBJECTS]; | |
121 | ||
122 | /* Freelist with available objects. */ | |
123 | static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); | |
124 | static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ | |
125 | ||
07e8481d ME |
126 | /* |
127 | * The static key to set up a KFENCE allocation; or if static keys are not used | |
128 | * to gate allocations, to avoid a load and compare if KFENCE is disabled. | |
129 | */ | |
0ce20dd8 | 130 | DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); |
0ce20dd8 AP |
131 | |
132 | /* Gates the allocation, ensuring only one succeeds in a given period. */ | |
133 | atomic_t kfence_allocation_gate = ATOMIC_INIT(1); | |
134 | ||
08f6b106 ME |
135 | /* |
136 | * A Counting Bloom filter of allocation coverage: limits currently covered | |
137 | * allocations of the same source filling up the pool. | |
138 | * | |
139 | * Assuming a range of 15%-85% unique allocations in the pool at any point in | |
140 | * time, the below parameters provide a probablity of 0.02-0.33 for false | |
141 | * positive hits respectively: | |
142 | * | |
143 | * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM | |
144 | */ | |
145 | #define ALLOC_COVERED_HNUM 2 | |
146 | #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) | |
147 | #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) | |
148 | #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) | |
149 | #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) | |
150 | static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; | |
151 | ||
152 | /* Stack depth used to determine uniqueness of an allocation. */ | |
153 | #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) | |
154 | ||
155 | /* | |
156 | * Randomness for stack hashes, making the same collisions across reboots and | |
157 | * different machines less likely. | |
158 | */ | |
159 | static u32 stack_hash_seed __ro_after_init; | |
160 | ||
0ce20dd8 AP |
161 | /* Statistics counters for debugfs. */ |
162 | enum kfence_counter_id { | |
163 | KFENCE_COUNTER_ALLOCATED, | |
164 | KFENCE_COUNTER_ALLOCS, | |
165 | KFENCE_COUNTER_FREES, | |
166 | KFENCE_COUNTER_ZOMBIES, | |
167 | KFENCE_COUNTER_BUGS, | |
9a19aeb5 ME |
168 | KFENCE_COUNTER_SKIP_INCOMPAT, |
169 | KFENCE_COUNTER_SKIP_CAPACITY, | |
08f6b106 | 170 | KFENCE_COUNTER_SKIP_COVERED, |
0ce20dd8 AP |
171 | KFENCE_COUNTER_COUNT, |
172 | }; | |
173 | static atomic_long_t counters[KFENCE_COUNTER_COUNT]; | |
174 | static const char *const counter_names[] = { | |
175 | [KFENCE_COUNTER_ALLOCATED] = "currently allocated", | |
176 | [KFENCE_COUNTER_ALLOCS] = "total allocations", | |
177 | [KFENCE_COUNTER_FREES] = "total frees", | |
178 | [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", | |
179 | [KFENCE_COUNTER_BUGS] = "total bugs", | |
9a19aeb5 ME |
180 | [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", |
181 | [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", | |
08f6b106 | 182 | [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)", |
0ce20dd8 AP |
183 | }; |
184 | static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); | |
185 | ||
186 | /* === Internals ============================================================ */ | |
187 | ||
08f6b106 ME |
188 | static inline bool should_skip_covered(void) |
189 | { | |
190 | unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; | |
191 | ||
192 | return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; | |
193 | } | |
194 | ||
195 | static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) | |
196 | { | |
197 | num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); | |
198 | num_entries = filter_irq_stacks(stack_entries, num_entries); | |
199 | return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); | |
200 | } | |
201 | ||
202 | /* | |
203 | * Adds (or subtracts) count @val for allocation stack trace hash | |
204 | * @alloc_stack_hash from Counting Bloom filter. | |
205 | */ | |
206 | static void alloc_covered_add(u32 alloc_stack_hash, int val) | |
207 | { | |
208 | int i; | |
209 | ||
210 | for (i = 0; i < ALLOC_COVERED_HNUM; i++) { | |
211 | atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); | |
212 | alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); | |
213 | } | |
214 | } | |
215 | ||
216 | /* | |
217 | * Returns true if the allocation stack trace hash @alloc_stack_hash is | |
218 | * currently contained (non-zero count) in Counting Bloom filter. | |
219 | */ | |
220 | static bool alloc_covered_contains(u32 alloc_stack_hash) | |
221 | { | |
222 | int i; | |
223 | ||
224 | for (i = 0; i < ALLOC_COVERED_HNUM; i++) { | |
225 | if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK])) | |
226 | return false; | |
227 | alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); | |
228 | } | |
229 | ||
230 | return true; | |
231 | } | |
232 | ||
0ce20dd8 AP |
233 | static bool kfence_protect(unsigned long addr) |
234 | { | |
235 | return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true)); | |
236 | } | |
237 | ||
238 | static bool kfence_unprotect(unsigned long addr) | |
239 | { | |
240 | return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); | |
241 | } | |
242 | ||
0ce20dd8 AP |
243 | static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) |
244 | { | |
245 | unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; | |
246 | unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; | |
247 | ||
248 | /* The checks do not affect performance; only called from slow-paths. */ | |
249 | ||
250 | /* Only call with a pointer into kfence_metadata. */ | |
251 | if (KFENCE_WARN_ON(meta < kfence_metadata || | |
252 | meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) | |
253 | return 0; | |
254 | ||
255 | /* | |
256 | * This metadata object only ever maps to 1 page; verify that the stored | |
257 | * address is in the expected range. | |
258 | */ | |
259 | if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) | |
260 | return 0; | |
261 | ||
262 | return pageaddr; | |
263 | } | |
264 | ||
265 | /* | |
266 | * Update the object's metadata state, including updating the alloc/free stacks | |
267 | * depending on the state transition. | |
268 | */ | |
a9ab52bb ME |
269 | static noinline void |
270 | metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, | |
271 | unsigned long *stack_entries, size_t num_stack_entries) | |
0ce20dd8 AP |
272 | { |
273 | struct kfence_track *track = | |
274 | next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; | |
275 | ||
276 | lockdep_assert_held(&meta->lock); | |
277 | ||
a9ab52bb ME |
278 | if (stack_entries) { |
279 | memcpy(track->stack_entries, stack_entries, | |
280 | num_stack_entries * sizeof(stack_entries[0])); | |
281 | } else { | |
282 | /* | |
283 | * Skip over 1 (this) functions; noinline ensures we do not | |
284 | * accidentally skip over the caller by never inlining. | |
285 | */ | |
286 | num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); | |
287 | } | |
288 | track->num_stack_entries = num_stack_entries; | |
0ce20dd8 | 289 | track->pid = task_pid_nr(current); |
4bbf04aa ME |
290 | track->cpu = raw_smp_processor_id(); |
291 | track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ | |
0ce20dd8 AP |
292 | |
293 | /* | |
294 | * Pairs with READ_ONCE() in | |
295 | * kfence_shutdown_cache(), | |
296 | * kfence_handle_page_fault(). | |
297 | */ | |
298 | WRITE_ONCE(meta->state, next); | |
299 | } | |
300 | ||
301 | /* Write canary byte to @addr. */ | |
302 | static inline bool set_canary_byte(u8 *addr) | |
303 | { | |
304 | *addr = KFENCE_CANARY_PATTERN(addr); | |
305 | return true; | |
306 | } | |
307 | ||
308 | /* Check canary byte at @addr. */ | |
309 | static inline bool check_canary_byte(u8 *addr) | |
310 | { | |
49332956 ME |
311 | struct kfence_metadata *meta; |
312 | unsigned long flags; | |
313 | ||
0ce20dd8 AP |
314 | if (likely(*addr == KFENCE_CANARY_PATTERN(addr))) |
315 | return true; | |
316 | ||
317 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
49332956 ME |
318 | |
319 | meta = addr_to_metadata((unsigned long)addr); | |
320 | raw_spin_lock_irqsave(&meta->lock, flags); | |
321 | kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION); | |
322 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
323 | ||
0ce20dd8 AP |
324 | return false; |
325 | } | |
326 | ||
327 | /* __always_inline this to ensure we won't do an indirect call to fn. */ | |
328 | static __always_inline void for_each_canary(const struct kfence_metadata *meta, bool (*fn)(u8 *)) | |
329 | { | |
330 | const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); | |
331 | unsigned long addr; | |
332 | ||
0ce20dd8 AP |
333 | /* |
334 | * We'll iterate over each canary byte per-side until fn() returns | |
335 | * false. However, we'll still iterate over the canary bytes to the | |
336 | * right of the object even if there was an error in the canary bytes to | |
337 | * the left of the object. Specifically, if check_canary_byte() | |
338 | * generates an error, showing both sides might give more clues as to | |
339 | * what the error is about when displaying which bytes were corrupted. | |
340 | */ | |
341 | ||
342 | /* Apply to left of object. */ | |
343 | for (addr = pageaddr; addr < meta->addr; addr++) { | |
344 | if (!fn((u8 *)addr)) | |
345 | break; | |
346 | } | |
347 | ||
348 | /* Apply to right of object. */ | |
349 | for (addr = meta->addr + meta->size; addr < pageaddr + PAGE_SIZE; addr++) { | |
350 | if (!fn((u8 *)addr)) | |
351 | break; | |
352 | } | |
353 | } | |
354 | ||
a9ab52bb | 355 | static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, |
08f6b106 ME |
356 | unsigned long *stack_entries, size_t num_stack_entries, |
357 | u32 alloc_stack_hash) | |
0ce20dd8 AP |
358 | { |
359 | struct kfence_metadata *meta = NULL; | |
360 | unsigned long flags; | |
8dae0cfe | 361 | struct slab *slab; |
0ce20dd8 AP |
362 | void *addr; |
363 | ||
364 | /* Try to obtain a free object. */ | |
365 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
366 | if (!list_empty(&kfence_freelist)) { | |
367 | meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); | |
368 | list_del_init(&meta->list); | |
369 | } | |
370 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
9a19aeb5 ME |
371 | if (!meta) { |
372 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); | |
0ce20dd8 | 373 | return NULL; |
9a19aeb5 | 374 | } |
0ce20dd8 AP |
375 | |
376 | if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { | |
377 | /* | |
378 | * This is extremely unlikely -- we are reporting on a | |
379 | * use-after-free, which locked meta->lock, and the reporting | |
380 | * code via printk calls kmalloc() which ends up in | |
381 | * kfence_alloc() and tries to grab the same object that we're | |
382 | * reporting on. While it has never been observed, lockdep does | |
383 | * report that there is a possibility of deadlock. Fix it by | |
384 | * using trylock and bailing out gracefully. | |
385 | */ | |
386 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
387 | /* Put the object back on the freelist. */ | |
388 | list_add_tail(&meta->list, &kfence_freelist); | |
389 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
390 | ||
391 | return NULL; | |
392 | } | |
393 | ||
394 | meta->addr = metadata_to_pageaddr(meta); | |
395 | /* Unprotect if we're reusing this page. */ | |
396 | if (meta->state == KFENCE_OBJECT_FREED) | |
397 | kfence_unprotect(meta->addr); | |
398 | ||
399 | /* | |
400 | * Note: for allocations made before RNG initialization, will always | |
401 | * return zero. We still benefit from enabling KFENCE as early as | |
402 | * possible, even when the RNG is not yet available, as this will allow | |
403 | * KFENCE to detect bugs due to earlier allocations. The only downside | |
404 | * is that the out-of-bounds accesses detected are deterministic for | |
405 | * such allocations. | |
406 | */ | |
407 | if (prandom_u32_max(2)) { | |
408 | /* Allocate on the "right" side, re-calculate address. */ | |
409 | meta->addr += PAGE_SIZE - size; | |
410 | meta->addr = ALIGN_DOWN(meta->addr, cache->align); | |
411 | } | |
412 | ||
413 | addr = (void *)meta->addr; | |
414 | ||
415 | /* Update remaining metadata. */ | |
a9ab52bb | 416 | metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); |
0ce20dd8 AP |
417 | /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ |
418 | WRITE_ONCE(meta->cache, cache); | |
419 | meta->size = size; | |
08f6b106 | 420 | meta->alloc_stack_hash = alloc_stack_hash; |
49332956 | 421 | raw_spin_unlock_irqrestore(&meta->lock, flags); |
08f6b106 | 422 | |
49332956 | 423 | alloc_covered_add(alloc_stack_hash, 1); |
0ce20dd8 | 424 | |
8dae0cfe VB |
425 | /* Set required slab fields. */ |
426 | slab = virt_to_slab((void *)meta->addr); | |
427 | slab->slab_cache = cache; | |
401fb12c VB |
428 | #if defined(CONFIG_SLUB) |
429 | slab->objects = 1; | |
430 | #elif defined(CONFIG_SLAB) | |
431 | slab->s_mem = addr; | |
432 | #endif | |
0ce20dd8 | 433 | |
0ce20dd8 | 434 | /* Memory initialization. */ |
49332956 | 435 | for_each_canary(meta, set_canary_byte); |
0ce20dd8 AP |
436 | |
437 | /* | |
438 | * We check slab_want_init_on_alloc() ourselves, rather than letting | |
439 | * SL*B do the initialization, as otherwise we might overwrite KFENCE's | |
440 | * redzone. | |
441 | */ | |
442 | if (unlikely(slab_want_init_on_alloc(gfp, cache))) | |
443 | memzero_explicit(addr, size); | |
444 | if (cache->ctor) | |
445 | cache->ctor(addr); | |
446 | ||
447 | if (CONFIG_KFENCE_STRESS_TEST_FAULTS && !prandom_u32_max(CONFIG_KFENCE_STRESS_TEST_FAULTS)) | |
448 | kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ | |
449 | ||
450 | atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); | |
451 | atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); | |
452 | ||
453 | return addr; | |
454 | } | |
455 | ||
456 | static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) | |
457 | { | |
458 | struct kcsan_scoped_access assert_page_exclusive; | |
459 | unsigned long flags; | |
49332956 | 460 | bool init; |
0ce20dd8 AP |
461 | |
462 | raw_spin_lock_irqsave(&meta->lock, flags); | |
463 | ||
464 | if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { | |
465 | /* Invalid or double-free, bail out. */ | |
466 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
bc8fbc5f ME |
467 | kfence_report_error((unsigned long)addr, false, NULL, meta, |
468 | KFENCE_ERROR_INVALID_FREE); | |
0ce20dd8 AP |
469 | raw_spin_unlock_irqrestore(&meta->lock, flags); |
470 | return; | |
471 | } | |
472 | ||
473 | /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ | |
474 | kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, | |
475 | KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, | |
476 | &assert_page_exclusive); | |
477 | ||
478 | if (CONFIG_KFENCE_STRESS_TEST_FAULTS) | |
479 | kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ | |
480 | ||
481 | /* Restore page protection if there was an OOB access. */ | |
482 | if (meta->unprotected_page) { | |
94868a1e | 483 | memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); |
0ce20dd8 AP |
484 | kfence_protect(meta->unprotected_page); |
485 | meta->unprotected_page = 0; | |
486 | } | |
487 | ||
49332956 ME |
488 | /* Mark the object as freed. */ |
489 | metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); | |
490 | init = slab_want_init_on_free(meta->cache); | |
491 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
492 | ||
493 | alloc_covered_add(meta->alloc_stack_hash, -1); | |
494 | ||
0ce20dd8 AP |
495 | /* Check canary bytes for memory corruption. */ |
496 | for_each_canary(meta, check_canary_byte); | |
497 | ||
498 | /* | |
499 | * Clear memory if init-on-free is set. While we protect the page, the | |
500 | * data is still there, and after a use-after-free is detected, we | |
501 | * unprotect the page, so the data is still accessible. | |
502 | */ | |
49332956 | 503 | if (!zombie && unlikely(init)) |
0ce20dd8 AP |
504 | memzero_explicit(addr, meta->size); |
505 | ||
0ce20dd8 AP |
506 | /* Protect to detect use-after-frees. */ |
507 | kfence_protect((unsigned long)addr); | |
508 | ||
509 | kcsan_end_scoped_access(&assert_page_exclusive); | |
510 | if (!zombie) { | |
511 | /* Add it to the tail of the freelist for reuse. */ | |
512 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
513 | KFENCE_WARN_ON(!list_empty(&meta->list)); | |
514 | list_add_tail(&meta->list, &kfence_freelist); | |
515 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
516 | ||
517 | atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); | |
518 | atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); | |
519 | } else { | |
520 | /* See kfence_shutdown_cache(). */ | |
521 | atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); | |
522 | } | |
523 | } | |
524 | ||
525 | static void rcu_guarded_free(struct rcu_head *h) | |
526 | { | |
527 | struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); | |
528 | ||
529 | kfence_guarded_free((void *)meta->addr, meta, false); | |
530 | } | |
531 | ||
b33f778b TD |
532 | /* |
533 | * Initialization of the KFENCE pool after its allocation. | |
534 | * Returns 0 on success; otherwise returns the address up to | |
535 | * which partial initialization succeeded. | |
536 | */ | |
537 | static unsigned long kfence_init_pool(void) | |
0ce20dd8 AP |
538 | { |
539 | unsigned long addr = (unsigned long)__kfence_pool; | |
540 | struct page *pages; | |
541 | int i; | |
542 | ||
0ce20dd8 | 543 | if (!arch_kfence_init_pool()) |
b33f778b | 544 | return addr; |
0ce20dd8 AP |
545 | |
546 | pages = virt_to_page(addr); | |
547 | ||
548 | /* | |
549 | * Set up object pages: they must have PG_slab set, to avoid freeing | |
550 | * these as real pages. | |
551 | * | |
552 | * We also want to avoid inserting kfence_free() in the kfree() | |
553 | * fast-path in SLUB, and therefore need to ensure kfree() correctly | |
554 | * enters __slab_free() slow-path. | |
555 | */ | |
556 | for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { | |
8f0b3649 MS |
557 | struct slab *slab = page_slab(&pages[i]); |
558 | ||
0ce20dd8 AP |
559 | if (!i || (i % 2)) |
560 | continue; | |
561 | ||
562 | /* Verify we do not have a compound head page. */ | |
563 | if (WARN_ON(compound_head(&pages[i]) != &pages[i])) | |
b33f778b | 564 | return addr; |
0ce20dd8 | 565 | |
8f0b3649 MS |
566 | __folio_set_slab(slab_folio(slab)); |
567 | #ifdef CONFIG_MEMCG | |
568 | slab->memcg_data = (unsigned long)&kfence_metadata[i / 2 - 1].objcg | | |
569 | MEMCG_DATA_OBJCGS; | |
570 | #endif | |
0ce20dd8 AP |
571 | } |
572 | ||
573 | /* | |
574 | * Protect the first 2 pages. The first page is mostly unnecessary, and | |
575 | * merely serves as an extended guard page. However, adding one | |
576 | * additional page in the beginning gives us an even number of pages, | |
577 | * which simplifies the mapping of address to metadata index. | |
578 | */ | |
579 | for (i = 0; i < 2; i++) { | |
580 | if (unlikely(!kfence_protect(addr))) | |
b33f778b | 581 | return addr; |
0ce20dd8 AP |
582 | |
583 | addr += PAGE_SIZE; | |
584 | } | |
585 | ||
586 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
587 | struct kfence_metadata *meta = &kfence_metadata[i]; | |
588 | ||
589 | /* Initialize metadata. */ | |
590 | INIT_LIST_HEAD(&meta->list); | |
591 | raw_spin_lock_init(&meta->lock); | |
592 | meta->state = KFENCE_OBJECT_UNUSED; | |
593 | meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ | |
594 | list_add_tail(&meta->list, &kfence_freelist); | |
595 | ||
596 | /* Protect the right redzone. */ | |
597 | if (unlikely(!kfence_protect(addr + PAGE_SIZE))) | |
b33f778b | 598 | return addr; |
0ce20dd8 AP |
599 | |
600 | addr += 2 * PAGE_SIZE; | |
601 | } | |
602 | ||
95511580 ME |
603 | /* |
604 | * The pool is live and will never be deallocated from this point on. | |
605 | * Remove the pool object from the kmemleak object tree, as it would | |
606 | * otherwise overlap with allocations returned by kfence_alloc(), which | |
607 | * are registered with kmemleak through the slab post-alloc hook. | |
608 | */ | |
609 | kmemleak_free(__kfence_pool); | |
610 | ||
b33f778b TD |
611 | return 0; |
612 | } | |
613 | ||
614 | static bool __init kfence_init_pool_early(void) | |
615 | { | |
616 | unsigned long addr; | |
617 | ||
618 | if (!__kfence_pool) | |
619 | return false; | |
620 | ||
621 | addr = kfence_init_pool(); | |
622 | ||
623 | if (!addr) | |
624 | return true; | |
0ce20dd8 | 625 | |
0ce20dd8 AP |
626 | /* |
627 | * Only release unprotected pages, and do not try to go back and change | |
628 | * page attributes due to risk of failing to do so as well. If changing | |
629 | * page attributes for some pages fails, it is very likely that it also | |
630 | * fails for the first page, and therefore expect addr==__kfence_pool in | |
631 | * most failure cases. | |
632 | */ | |
2839b099 HY |
633 | for (char *p = (char *)addr; p < __kfence_pool + KFENCE_POOL_SIZE; p += PAGE_SIZE) { |
634 | struct slab *slab = virt_to_slab(p); | |
635 | ||
636 | if (!slab) | |
637 | continue; | |
638 | #ifdef CONFIG_MEMCG | |
639 | slab->memcg_data = 0; | |
640 | #endif | |
641 | __folio_clear_slab(slab_folio(slab)); | |
642 | } | |
0ce20dd8 AP |
643 | memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); |
644 | __kfence_pool = NULL; | |
645 | return false; | |
646 | } | |
647 | ||
b33f778b TD |
648 | static bool kfence_init_pool_late(void) |
649 | { | |
650 | unsigned long addr, free_size; | |
651 | ||
652 | addr = kfence_init_pool(); | |
653 | ||
654 | if (!addr) | |
655 | return true; | |
656 | ||
657 | /* Same as above. */ | |
658 | free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); | |
659 | #ifdef CONFIG_CONTIG_ALLOC | |
660 | free_contig_range(page_to_pfn(virt_to_page(addr)), free_size / PAGE_SIZE); | |
661 | #else | |
662 | free_pages_exact((void *)addr, free_size); | |
663 | #endif | |
664 | __kfence_pool = NULL; | |
665 | return false; | |
666 | } | |
667 | ||
0ce20dd8 AP |
668 | /* === DebugFS Interface ==================================================== */ |
669 | ||
670 | static int stats_show(struct seq_file *seq, void *v) | |
671 | { | |
672 | int i; | |
673 | ||
674 | seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); | |
675 | for (i = 0; i < KFENCE_COUNTER_COUNT; i++) | |
676 | seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); | |
677 | ||
678 | return 0; | |
679 | } | |
680 | DEFINE_SHOW_ATTRIBUTE(stats); | |
681 | ||
682 | /* | |
683 | * debugfs seq_file operations for /sys/kernel/debug/kfence/objects. | |
684 | * start_object() and next_object() return the object index + 1, because NULL is used | |
685 | * to stop iteration. | |
686 | */ | |
687 | static void *start_object(struct seq_file *seq, loff_t *pos) | |
688 | { | |
689 | if (*pos < CONFIG_KFENCE_NUM_OBJECTS) | |
690 | return (void *)((long)*pos + 1); | |
691 | return NULL; | |
692 | } | |
693 | ||
694 | static void stop_object(struct seq_file *seq, void *v) | |
695 | { | |
696 | } | |
697 | ||
698 | static void *next_object(struct seq_file *seq, void *v, loff_t *pos) | |
699 | { | |
700 | ++*pos; | |
701 | if (*pos < CONFIG_KFENCE_NUM_OBJECTS) | |
702 | return (void *)((long)*pos + 1); | |
703 | return NULL; | |
704 | } | |
705 | ||
706 | static int show_object(struct seq_file *seq, void *v) | |
707 | { | |
708 | struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; | |
709 | unsigned long flags; | |
710 | ||
711 | raw_spin_lock_irqsave(&meta->lock, flags); | |
712 | kfence_print_object(seq, meta); | |
713 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
714 | seq_puts(seq, "---------------------------------\n"); | |
715 | ||
716 | return 0; | |
717 | } | |
718 | ||
719 | static const struct seq_operations object_seqops = { | |
720 | .start = start_object, | |
721 | .next = next_object, | |
722 | .stop = stop_object, | |
723 | .show = show_object, | |
724 | }; | |
725 | ||
726 | static int open_objects(struct inode *inode, struct file *file) | |
727 | { | |
728 | return seq_open(file, &object_seqops); | |
729 | } | |
730 | ||
731 | static const struct file_operations objects_fops = { | |
732 | .open = open_objects, | |
733 | .read = seq_read, | |
734 | .llseek = seq_lseek, | |
0129ab1f | 735 | .release = seq_release, |
0ce20dd8 AP |
736 | }; |
737 | ||
738 | static int __init kfence_debugfs_init(void) | |
739 | { | |
740 | struct dentry *kfence_dir = debugfs_create_dir("kfence", NULL); | |
741 | ||
742 | debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); | |
743 | debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); | |
744 | return 0; | |
745 | } | |
746 | ||
747 | late_initcall(kfence_debugfs_init); | |
748 | ||
3c81b3bb | 749 | /* === Panic Notifier ====================================================== */ |
750 | ||
751 | static void kfence_check_all_canary(void) | |
752 | { | |
753 | int i; | |
754 | ||
755 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
756 | struct kfence_metadata *meta = &kfence_metadata[i]; | |
757 | ||
758 | if (meta->state == KFENCE_OBJECT_ALLOCATED) | |
759 | for_each_canary(meta, check_canary_byte); | |
760 | } | |
761 | } | |
762 | ||
763 | static int kfence_check_canary_callback(struct notifier_block *nb, | |
764 | unsigned long reason, void *arg) | |
765 | { | |
766 | kfence_check_all_canary(); | |
767 | return NOTIFY_OK; | |
768 | } | |
769 | ||
770 | static struct notifier_block kfence_check_canary_notifier = { | |
771 | .notifier_call = kfence_check_canary_callback, | |
772 | }; | |
773 | ||
0ce20dd8 AP |
774 | /* === Allocation Gate Timer ================================================ */ |
775 | ||
737b6a10 ME |
776 | static struct delayed_work kfence_timer; |
777 | ||
407f1d8c ME |
778 | #ifdef CONFIG_KFENCE_STATIC_KEYS |
779 | /* Wait queue to wake up allocation-gate timer task. */ | |
780 | static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); | |
781 | ||
782 | static void wake_up_kfence_timer(struct irq_work *work) | |
783 | { | |
784 | wake_up(&allocation_wait); | |
785 | } | |
786 | static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer); | |
787 | #endif | |
788 | ||
0ce20dd8 AP |
789 | /* |
790 | * Set up delayed work, which will enable and disable the static key. We need to | |
791 | * use a work queue (rather than a simple timer), since enabling and disabling a | |
792 | * static key cannot be done from an interrupt. | |
793 | * | |
794 | * Note: Toggling a static branch currently causes IPIs, and here we'll end up | |
795 | * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with | |
796 | * more aggressive sampling intervals), we could get away with a variant that | |
797 | * avoids IPIs, at the cost of not immediately capturing allocations if the | |
798 | * instructions remain cached. | |
799 | */ | |
0ce20dd8 AP |
800 | static void toggle_allocation_gate(struct work_struct *work) |
801 | { | |
802 | if (!READ_ONCE(kfence_enabled)) | |
803 | return; | |
804 | ||
0ce20dd8 AP |
805 | atomic_set(&kfence_allocation_gate, 0); |
806 | #ifdef CONFIG_KFENCE_STATIC_KEYS | |
407f1d8c | 807 | /* Enable static key, and await allocation to happen. */ |
0ce20dd8 | 808 | static_branch_enable(&kfence_allocation_key); |
407f1d8c | 809 | |
37c9284f ME |
810 | if (sysctl_hung_task_timeout_secs) { |
811 | /* | |
812 | * During low activity with no allocations we might wait a | |
813 | * while; let's avoid the hung task warning. | |
814 | */ | |
8fd0e995 ME |
815 | wait_event_idle_timeout(allocation_wait, atomic_read(&kfence_allocation_gate), |
816 | sysctl_hung_task_timeout_secs * HZ / 2); | |
37c9284f | 817 | } else { |
8fd0e995 | 818 | wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); |
37c9284f | 819 | } |
407f1d8c | 820 | |
0ce20dd8 AP |
821 | /* Disable static key and reset timer. */ |
822 | static_branch_disable(&kfence_allocation_key); | |
823 | #endif | |
ff06e45d | 824 | queue_delayed_work(system_unbound_wq, &kfence_timer, |
36f0b35d | 825 | msecs_to_jiffies(kfence_sample_interval)); |
0ce20dd8 | 826 | } |
0ce20dd8 AP |
827 | |
828 | /* === Public interface ===================================================== */ | |
829 | ||
830 | void __init kfence_alloc_pool(void) | |
831 | { | |
832 | if (!kfence_sample_interval) | |
833 | return; | |
834 | ||
835 | __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); | |
836 | ||
837 | if (!__kfence_pool) | |
838 | pr_err("failed to allocate pool\n"); | |
839 | } | |
840 | ||
b33f778b TD |
841 | static void kfence_init_enable(void) |
842 | { | |
843 | if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) | |
844 | static_branch_enable(&kfence_allocation_key); | |
737b6a10 ME |
845 | |
846 | if (kfence_deferrable) | |
847 | INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); | |
848 | else | |
849 | INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate); | |
850 | ||
3c81b3bb | 851 | if (kfence_check_on_panic) |
852 | atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); | |
853 | ||
b33f778b TD |
854 | WRITE_ONCE(kfence_enabled, true); |
855 | queue_delayed_work(system_unbound_wq, &kfence_timer, 0); | |
737b6a10 | 856 | |
b33f778b TD |
857 | pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, |
858 | CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, | |
859 | (void *)(__kfence_pool + KFENCE_POOL_SIZE)); | |
860 | } | |
861 | ||
0ce20dd8 AP |
862 | void __init kfence_init(void) |
863 | { | |
b33f778b TD |
864 | stack_hash_seed = (u32)random_get_entropy(); |
865 | ||
0ce20dd8 AP |
866 | /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ |
867 | if (!kfence_sample_interval) | |
868 | return; | |
869 | ||
b33f778b | 870 | if (!kfence_init_pool_early()) { |
0ce20dd8 AP |
871 | pr_err("%s failed\n", __func__); |
872 | return; | |
873 | } | |
874 | ||
b33f778b TD |
875 | kfence_init_enable(); |
876 | } | |
877 | ||
878 | static int kfence_init_late(void) | |
879 | { | |
880 | const unsigned long nr_pages = KFENCE_POOL_SIZE / PAGE_SIZE; | |
881 | #ifdef CONFIG_CONTIG_ALLOC | |
882 | struct page *pages; | |
883 | ||
884 | pages = alloc_contig_pages(nr_pages, GFP_KERNEL, first_online_node, NULL); | |
885 | if (!pages) | |
886 | return -ENOMEM; | |
887 | __kfence_pool = page_to_virt(pages); | |
888 | #else | |
889 | if (nr_pages > MAX_ORDER_NR_PAGES) { | |
890 | pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); | |
891 | return -EINVAL; | |
892 | } | |
893 | __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); | |
894 | if (!__kfence_pool) | |
895 | return -ENOMEM; | |
896 | #endif | |
897 | ||
898 | if (!kfence_init_pool_late()) { | |
899 | pr_err("%s failed\n", __func__); | |
900 | return -EBUSY; | |
901 | } | |
902 | ||
903 | kfence_init_enable(); | |
904 | return 0; | |
0ce20dd8 AP |
905 | } |
906 | ||
698361bc TD |
907 | static int kfence_enable_late(void) |
908 | { | |
909 | if (!__kfence_pool) | |
b33f778b | 910 | return kfence_init_late(); |
698361bc TD |
911 | |
912 | WRITE_ONCE(kfence_enabled, true); | |
913 | queue_delayed_work(system_unbound_wq, &kfence_timer, 0); | |
83d7d04f | 914 | pr_info("re-enabled\n"); |
698361bc TD |
915 | return 0; |
916 | } | |
917 | ||
0ce20dd8 AP |
918 | void kfence_shutdown_cache(struct kmem_cache *s) |
919 | { | |
920 | unsigned long flags; | |
921 | struct kfence_metadata *meta; | |
922 | int i; | |
923 | ||
924 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
925 | bool in_use; | |
926 | ||
927 | meta = &kfence_metadata[i]; | |
928 | ||
929 | /* | |
930 | * If we observe some inconsistent cache and state pair where we | |
931 | * should have returned false here, cache destruction is racing | |
932 | * with either kmem_cache_alloc() or kmem_cache_free(). Taking | |
933 | * the lock will not help, as different critical section | |
934 | * serialization will have the same outcome. | |
935 | */ | |
936 | if (READ_ONCE(meta->cache) != s || | |
937 | READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) | |
938 | continue; | |
939 | ||
940 | raw_spin_lock_irqsave(&meta->lock, flags); | |
941 | in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; | |
942 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
943 | ||
944 | if (in_use) { | |
945 | /* | |
946 | * This cache still has allocations, and we should not | |
947 | * release them back into the freelist so they can still | |
948 | * safely be used and retain the kernel's default | |
949 | * behaviour of keeping the allocations alive (leak the | |
950 | * cache); however, they effectively become "zombie | |
951 | * allocations" as the KFENCE objects are the only ones | |
952 | * still in use and the owning cache is being destroyed. | |
953 | * | |
954 | * We mark them freed, so that any subsequent use shows | |
955 | * more useful error messages that will include stack | |
956 | * traces of the user of the object, the original | |
957 | * allocation, and caller to shutdown_cache(). | |
958 | */ | |
959 | kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); | |
960 | } | |
961 | } | |
962 | ||
963 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
964 | meta = &kfence_metadata[i]; | |
965 | ||
966 | /* See above. */ | |
967 | if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) | |
968 | continue; | |
969 | ||
970 | raw_spin_lock_irqsave(&meta->lock, flags); | |
971 | if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) | |
972 | meta->cache = NULL; | |
973 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
974 | } | |
975 | } | |
976 | ||
977 | void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) | |
978 | { | |
a9ab52bb ME |
979 | unsigned long stack_entries[KFENCE_STACK_DEPTH]; |
980 | size_t num_stack_entries; | |
08f6b106 | 981 | u32 alloc_stack_hash; |
a9ab52bb | 982 | |
235a85cb AP |
983 | /* |
984 | * Perform size check before switching kfence_allocation_gate, so that | |
985 | * we don't disable KFENCE without making an allocation. | |
986 | */ | |
9a19aeb5 ME |
987 | if (size > PAGE_SIZE) { |
988 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); | |
235a85cb | 989 | return NULL; |
9a19aeb5 | 990 | } |
235a85cb | 991 | |
236e9f15 AP |
992 | /* |
993 | * Skip allocations from non-default zones, including DMA. We cannot | |
994 | * guarantee that pages in the KFENCE pool will have the requested | |
995 | * properties (e.g. reside in DMAable memory). | |
996 | */ | |
997 | if ((flags & GFP_ZONEMASK) || | |
9a19aeb5 ME |
998 | (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { |
999 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); | |
236e9f15 | 1000 | return NULL; |
9a19aeb5 | 1001 | } |
236e9f15 | 1002 | |
07e8481d | 1003 | if (atomic_inc_return(&kfence_allocation_gate) > 1) |
0ce20dd8 | 1004 | return NULL; |
407f1d8c ME |
1005 | #ifdef CONFIG_KFENCE_STATIC_KEYS |
1006 | /* | |
1007 | * waitqueue_active() is fully ordered after the update of | |
1008 | * kfence_allocation_gate per atomic_inc_return(). | |
1009 | */ | |
1010 | if (waitqueue_active(&allocation_wait)) { | |
1011 | /* | |
1012 | * Calling wake_up() here may deadlock when allocations happen | |
1013 | * from within timer code. Use an irq_work to defer it. | |
1014 | */ | |
1015 | irq_work_queue(&wake_up_kfence_timer_work); | |
1016 | } | |
1017 | #endif | |
0ce20dd8 AP |
1018 | |
1019 | if (!READ_ONCE(kfence_enabled)) | |
1020 | return NULL; | |
1021 | ||
a9ab52bb ME |
1022 | num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); |
1023 | ||
08f6b106 ME |
1024 | /* |
1025 | * Do expensive check for coverage of allocation in slow-path after | |
1026 | * allocation_gate has already become non-zero, even though it might | |
1027 | * mean not making any allocation within a given sample interval. | |
1028 | * | |
1029 | * This ensures reasonable allocation coverage when the pool is almost | |
1030 | * full, including avoiding long-lived allocations of the same source | |
1031 | * filling up the pool (e.g. pagecache allocations). | |
1032 | */ | |
1033 | alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); | |
1034 | if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { | |
1035 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); | |
1036 | return NULL; | |
1037 | } | |
1038 | ||
1039 | return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, | |
1040 | alloc_stack_hash); | |
0ce20dd8 AP |
1041 | } |
1042 | ||
1043 | size_t kfence_ksize(const void *addr) | |
1044 | { | |
1045 | const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
1046 | ||
1047 | /* | |
1048 | * Read locklessly -- if there is a race with __kfence_alloc(), this is | |
1049 | * either a use-after-free or invalid access. | |
1050 | */ | |
1051 | return meta ? meta->size : 0; | |
1052 | } | |
1053 | ||
1054 | void *kfence_object_start(const void *addr) | |
1055 | { | |
1056 | const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
1057 | ||
1058 | /* | |
1059 | * Read locklessly -- if there is a race with __kfence_alloc(), this is | |
1060 | * either a use-after-free or invalid access. | |
1061 | */ | |
1062 | return meta ? (void *)meta->addr : NULL; | |
1063 | } | |
1064 | ||
1065 | void __kfence_free(void *addr) | |
1066 | { | |
1067 | struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
1068 | ||
8f0b3649 MS |
1069 | #ifdef CONFIG_MEMCG |
1070 | KFENCE_WARN_ON(meta->objcg); | |
1071 | #endif | |
0ce20dd8 AP |
1072 | /* |
1073 | * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing | |
1074 | * the object, as the object page may be recycled for other-typed | |
1075 | * objects once it has been freed. meta->cache may be NULL if the cache | |
1076 | * was destroyed. | |
1077 | */ | |
1078 | if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) | |
1079 | call_rcu(&meta->rcu_head, rcu_guarded_free); | |
1080 | else | |
1081 | kfence_guarded_free(addr, meta, false); | |
1082 | } | |
1083 | ||
bc8fbc5f | 1084 | bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) |
0ce20dd8 AP |
1085 | { |
1086 | const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; | |
1087 | struct kfence_metadata *to_report = NULL; | |
1088 | enum kfence_error_type error_type; | |
1089 | unsigned long flags; | |
1090 | ||
1091 | if (!is_kfence_address((void *)addr)) | |
1092 | return false; | |
1093 | ||
1094 | if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ | |
1095 | return kfence_unprotect(addr); /* ... unprotect and proceed. */ | |
1096 | ||
1097 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
1098 | ||
1099 | if (page_index % 2) { | |
1100 | /* This is a redzone, report a buffer overflow. */ | |
1101 | struct kfence_metadata *meta; | |
1102 | int distance = 0; | |
1103 | ||
1104 | meta = addr_to_metadata(addr - PAGE_SIZE); | |
1105 | if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { | |
1106 | to_report = meta; | |
1107 | /* Data race ok; distance calculation approximate. */ | |
1108 | distance = addr - data_race(meta->addr + meta->size); | |
1109 | } | |
1110 | ||
1111 | meta = addr_to_metadata(addr + PAGE_SIZE); | |
1112 | if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { | |
1113 | /* Data race ok; distance calculation approximate. */ | |
1114 | if (!to_report || distance > data_race(meta->addr) - addr) | |
1115 | to_report = meta; | |
1116 | } | |
1117 | ||
1118 | if (!to_report) | |
1119 | goto out; | |
1120 | ||
1121 | raw_spin_lock_irqsave(&to_report->lock, flags); | |
1122 | to_report->unprotected_page = addr; | |
1123 | error_type = KFENCE_ERROR_OOB; | |
1124 | ||
1125 | /* | |
1126 | * If the object was freed before we took the look we can still | |
1127 | * report this as an OOB -- the report will simply show the | |
1128 | * stacktrace of the free as well. | |
1129 | */ | |
1130 | } else { | |
1131 | to_report = addr_to_metadata(addr); | |
1132 | if (!to_report) | |
1133 | goto out; | |
1134 | ||
1135 | raw_spin_lock_irqsave(&to_report->lock, flags); | |
1136 | error_type = KFENCE_ERROR_UAF; | |
1137 | /* | |
1138 | * We may race with __kfence_alloc(), and it is possible that a | |
1139 | * freed object may be reallocated. We simply report this as a | |
1140 | * use-after-free, with the stack trace showing the place where | |
1141 | * the object was re-allocated. | |
1142 | */ | |
1143 | } | |
1144 | ||
1145 | out: | |
1146 | if (to_report) { | |
bc8fbc5f | 1147 | kfence_report_error(addr, is_write, regs, to_report, error_type); |
0ce20dd8 AP |
1148 | raw_spin_unlock_irqrestore(&to_report->lock, flags); |
1149 | } else { | |
1150 | /* This may be a UAF or OOB access, but we can't be sure. */ | |
bc8fbc5f | 1151 | kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID); |
0ce20dd8 AP |
1152 | } |
1153 | ||
1154 | return kfence_unprotect(addr); /* Unprotect and let access proceed. */ | |
1155 | } |