Commit | Line | Data |
---|---|---|
0ce20dd8 AP |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KFENCE guarded object allocator and fault handling. | |
4 | * | |
5 | * Copyright (C) 2020, Google LLC. | |
6 | */ | |
7 | ||
8 | #define pr_fmt(fmt) "kfence: " fmt | |
9 | ||
10 | #include <linux/atomic.h> | |
11 | #include <linux/bug.h> | |
12 | #include <linux/debugfs.h> | |
08f6b106 | 13 | #include <linux/hash.h> |
407f1d8c | 14 | #include <linux/irq_work.h> |
08f6b106 | 15 | #include <linux/jhash.h> |
0ce20dd8 AP |
16 | #include <linux/kcsan-checks.h> |
17 | #include <linux/kfence.h> | |
95511580 | 18 | #include <linux/kmemleak.h> |
0ce20dd8 AP |
19 | #include <linux/list.h> |
20 | #include <linux/lockdep.h> | |
08f6b106 | 21 | #include <linux/log2.h> |
0ce20dd8 AP |
22 | #include <linux/memblock.h> |
23 | #include <linux/moduleparam.h> | |
3c81b3bb | 24 | #include <linux/notifier.h> |
25 | #include <linux/panic_notifier.h> | |
0ce20dd8 AP |
26 | #include <linux/random.h> |
27 | #include <linux/rcupdate.h> | |
4bbf04aa | 28 | #include <linux/sched/clock.h> |
0ce20dd8 AP |
29 | #include <linux/seq_file.h> |
30 | #include <linux/slab.h> | |
31 | #include <linux/spinlock.h> | |
32 | #include <linux/string.h> | |
33 | ||
34 | #include <asm/kfence.h> | |
35 | ||
36 | #include "kfence.h" | |
37 | ||
38 | /* Disables KFENCE on the first warning assuming an irrecoverable error. */ | |
39 | #define KFENCE_WARN_ON(cond) \ | |
40 | ({ \ | |
41 | const bool __cond = WARN_ON(cond); \ | |
698361bc | 42 | if (unlikely(__cond)) { \ |
0ce20dd8 | 43 | WRITE_ONCE(kfence_enabled, false); \ |
698361bc TD |
44 | disabled_by_warn = true; \ |
45 | } \ | |
0ce20dd8 AP |
46 | __cond; \ |
47 | }) | |
48 | ||
49 | /* === Data ================================================================= */ | |
50 | ||
51 | static bool kfence_enabled __read_mostly; | |
698361bc | 52 | static bool disabled_by_warn __read_mostly; |
0ce20dd8 | 53 | |
8913c610 PL |
54 | unsigned long kfence_sample_interval __read_mostly = CONFIG_KFENCE_SAMPLE_INTERVAL; |
55 | EXPORT_SYMBOL_GPL(kfence_sample_interval); /* Export for test modules. */ | |
0ce20dd8 AP |
56 | |
57 | #ifdef MODULE_PARAM_PREFIX | |
58 | #undef MODULE_PARAM_PREFIX | |
59 | #endif | |
60 | #define MODULE_PARAM_PREFIX "kfence." | |
61 | ||
698361bc | 62 | static int kfence_enable_late(void); |
0ce20dd8 AP |
63 | static int param_set_sample_interval(const char *val, const struct kernel_param *kp) |
64 | { | |
65 | unsigned long num; | |
66 | int ret = kstrtoul(val, 0, &num); | |
67 | ||
68 | if (ret < 0) | |
69 | return ret; | |
70 | ||
83d7d04f JL |
71 | /* Using 0 to indicate KFENCE is disabled. */ |
72 | if (!num && READ_ONCE(kfence_enabled)) { | |
73 | pr_info("disabled\n"); | |
0ce20dd8 | 74 | WRITE_ONCE(kfence_enabled, false); |
83d7d04f | 75 | } |
0ce20dd8 AP |
76 | |
77 | *((unsigned long *)kp->arg) = num; | |
698361bc TD |
78 | |
79 | if (num && !READ_ONCE(kfence_enabled) && system_state != SYSTEM_BOOTING) | |
80 | return disabled_by_warn ? -EINVAL : kfence_enable_late(); | |
0ce20dd8 AP |
81 | return 0; |
82 | } | |
83 | ||
84 | static int param_get_sample_interval(char *buffer, const struct kernel_param *kp) | |
85 | { | |
86 | if (!READ_ONCE(kfence_enabled)) | |
87 | return sprintf(buffer, "0\n"); | |
88 | ||
89 | return param_get_ulong(buffer, kp); | |
90 | } | |
91 | ||
92 | static const struct kernel_param_ops sample_interval_param_ops = { | |
93 | .set = param_set_sample_interval, | |
94 | .get = param_get_sample_interval, | |
95 | }; | |
96 | module_param_cb(sample_interval, &sample_interval_param_ops, &kfence_sample_interval, 0600); | |
97 | ||
08f6b106 ME |
98 | /* Pool usage% threshold when currently covered allocations are skipped. */ |
99 | static unsigned long kfence_skip_covered_thresh __read_mostly = 75; | |
100 | module_param_named(skip_covered_thresh, kfence_skip_covered_thresh, ulong, 0644); | |
101 | ||
737b6a10 ME |
102 | /* If true, use a deferrable timer. */ |
103 | static bool kfence_deferrable __read_mostly = IS_ENABLED(CONFIG_KFENCE_DEFERRABLE); | |
104 | module_param_named(deferrable, kfence_deferrable, bool, 0444); | |
105 | ||
3c81b3bb | 106 | /* If true, check all canary bytes on panic. */ |
107 | static bool kfence_check_on_panic __read_mostly; | |
108 | module_param_named(check_on_panic, kfence_check_on_panic, bool, 0444); | |
109 | ||
0ce20dd8 | 110 | /* The pool of pages used for guard pages and objects. */ |
b33f778b | 111 | char *__kfence_pool __read_mostly; |
0ce20dd8 AP |
112 | EXPORT_SYMBOL(__kfence_pool); /* Export for test modules. */ |
113 | ||
114 | /* | |
115 | * Per-object metadata, with one-to-one mapping of object metadata to | |
116 | * backing pages (in __kfence_pool). | |
117 | */ | |
118 | static_assert(CONFIG_KFENCE_NUM_OBJECTS > 0); | |
cabdf74e PZ |
119 | struct kfence_metadata *kfence_metadata __read_mostly; |
120 | ||
121 | /* | |
122 | * If kfence_metadata is not NULL, it may be accessed by kfence_shutdown_cache(). | |
123 | * So introduce kfence_metadata_init to initialize metadata, and then make | |
124 | * kfence_metadata visible after initialization is successful. This prevents | |
125 | * potential UAF or access to uninitialized metadata. | |
126 | */ | |
127 | static struct kfence_metadata *kfence_metadata_init __read_mostly; | |
0ce20dd8 AP |
128 | |
129 | /* Freelist with available objects. */ | |
130 | static struct list_head kfence_freelist = LIST_HEAD_INIT(kfence_freelist); | |
131 | static DEFINE_RAW_SPINLOCK(kfence_freelist_lock); /* Lock protecting freelist. */ | |
132 | ||
07e8481d ME |
133 | /* |
134 | * The static key to set up a KFENCE allocation; or if static keys are not used | |
135 | * to gate allocations, to avoid a load and compare if KFENCE is disabled. | |
136 | */ | |
0ce20dd8 | 137 | DEFINE_STATIC_KEY_FALSE(kfence_allocation_key); |
0ce20dd8 AP |
138 | |
139 | /* Gates the allocation, ensuring only one succeeds in a given period. */ | |
140 | atomic_t kfence_allocation_gate = ATOMIC_INIT(1); | |
141 | ||
08f6b106 ME |
142 | /* |
143 | * A Counting Bloom filter of allocation coverage: limits currently covered | |
144 | * allocations of the same source filling up the pool. | |
145 | * | |
146 | * Assuming a range of 15%-85% unique allocations in the pool at any point in | |
147 | * time, the below parameters provide a probablity of 0.02-0.33 for false | |
148 | * positive hits respectively: | |
149 | * | |
150 | * P(alloc_traces) = (1 - e^(-HNUM * (alloc_traces / SIZE)) ^ HNUM | |
151 | */ | |
152 | #define ALLOC_COVERED_HNUM 2 | |
153 | #define ALLOC_COVERED_ORDER (const_ilog2(CONFIG_KFENCE_NUM_OBJECTS) + 2) | |
154 | #define ALLOC_COVERED_SIZE (1 << ALLOC_COVERED_ORDER) | |
155 | #define ALLOC_COVERED_HNEXT(h) hash_32(h, ALLOC_COVERED_ORDER) | |
156 | #define ALLOC_COVERED_MASK (ALLOC_COVERED_SIZE - 1) | |
157 | static atomic_t alloc_covered[ALLOC_COVERED_SIZE]; | |
158 | ||
159 | /* Stack depth used to determine uniqueness of an allocation. */ | |
160 | #define UNIQUE_ALLOC_STACK_DEPTH ((size_t)8) | |
161 | ||
162 | /* | |
163 | * Randomness for stack hashes, making the same collisions across reboots and | |
164 | * different machines less likely. | |
165 | */ | |
166 | static u32 stack_hash_seed __ro_after_init; | |
167 | ||
0ce20dd8 AP |
168 | /* Statistics counters for debugfs. */ |
169 | enum kfence_counter_id { | |
170 | KFENCE_COUNTER_ALLOCATED, | |
171 | KFENCE_COUNTER_ALLOCS, | |
172 | KFENCE_COUNTER_FREES, | |
173 | KFENCE_COUNTER_ZOMBIES, | |
174 | KFENCE_COUNTER_BUGS, | |
9a19aeb5 ME |
175 | KFENCE_COUNTER_SKIP_INCOMPAT, |
176 | KFENCE_COUNTER_SKIP_CAPACITY, | |
08f6b106 | 177 | KFENCE_COUNTER_SKIP_COVERED, |
0ce20dd8 AP |
178 | KFENCE_COUNTER_COUNT, |
179 | }; | |
180 | static atomic_long_t counters[KFENCE_COUNTER_COUNT]; | |
181 | static const char *const counter_names[] = { | |
182 | [KFENCE_COUNTER_ALLOCATED] = "currently allocated", | |
183 | [KFENCE_COUNTER_ALLOCS] = "total allocations", | |
184 | [KFENCE_COUNTER_FREES] = "total frees", | |
185 | [KFENCE_COUNTER_ZOMBIES] = "zombie allocations", | |
186 | [KFENCE_COUNTER_BUGS] = "total bugs", | |
9a19aeb5 ME |
187 | [KFENCE_COUNTER_SKIP_INCOMPAT] = "skipped allocations (incompatible)", |
188 | [KFENCE_COUNTER_SKIP_CAPACITY] = "skipped allocations (capacity)", | |
08f6b106 | 189 | [KFENCE_COUNTER_SKIP_COVERED] = "skipped allocations (covered)", |
0ce20dd8 AP |
190 | }; |
191 | static_assert(ARRAY_SIZE(counter_names) == KFENCE_COUNTER_COUNT); | |
192 | ||
193 | /* === Internals ============================================================ */ | |
194 | ||
08f6b106 ME |
195 | static inline bool should_skip_covered(void) |
196 | { | |
197 | unsigned long thresh = (CONFIG_KFENCE_NUM_OBJECTS * kfence_skip_covered_thresh) / 100; | |
198 | ||
199 | return atomic_long_read(&counters[KFENCE_COUNTER_ALLOCATED]) > thresh; | |
200 | } | |
201 | ||
202 | static u32 get_alloc_stack_hash(unsigned long *stack_entries, size_t num_entries) | |
203 | { | |
204 | num_entries = min(num_entries, UNIQUE_ALLOC_STACK_DEPTH); | |
205 | num_entries = filter_irq_stacks(stack_entries, num_entries); | |
206 | return jhash(stack_entries, num_entries * sizeof(stack_entries[0]), stack_hash_seed); | |
207 | } | |
208 | ||
209 | /* | |
210 | * Adds (or subtracts) count @val for allocation stack trace hash | |
211 | * @alloc_stack_hash from Counting Bloom filter. | |
212 | */ | |
213 | static void alloc_covered_add(u32 alloc_stack_hash, int val) | |
214 | { | |
215 | int i; | |
216 | ||
217 | for (i = 0; i < ALLOC_COVERED_HNUM; i++) { | |
218 | atomic_add(val, &alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK]); | |
219 | alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); | |
220 | } | |
221 | } | |
222 | ||
223 | /* | |
224 | * Returns true if the allocation stack trace hash @alloc_stack_hash is | |
225 | * currently contained (non-zero count) in Counting Bloom filter. | |
226 | */ | |
227 | static bool alloc_covered_contains(u32 alloc_stack_hash) | |
228 | { | |
229 | int i; | |
230 | ||
231 | for (i = 0; i < ALLOC_COVERED_HNUM; i++) { | |
232 | if (!atomic_read(&alloc_covered[alloc_stack_hash & ALLOC_COVERED_MASK])) | |
233 | return false; | |
234 | alloc_stack_hash = ALLOC_COVERED_HNEXT(alloc_stack_hash); | |
235 | } | |
236 | ||
237 | return true; | |
238 | } | |
239 | ||
0ce20dd8 AP |
240 | static bool kfence_protect(unsigned long addr) |
241 | { | |
242 | return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), true)); | |
243 | } | |
244 | ||
245 | static bool kfence_unprotect(unsigned long addr) | |
246 | { | |
247 | return !KFENCE_WARN_ON(!kfence_protect_page(ALIGN_DOWN(addr, PAGE_SIZE), false)); | |
248 | } | |
249 | ||
0ce20dd8 AP |
250 | static inline unsigned long metadata_to_pageaddr(const struct kfence_metadata *meta) |
251 | { | |
252 | unsigned long offset = (meta - kfence_metadata + 1) * PAGE_SIZE * 2; | |
253 | unsigned long pageaddr = (unsigned long)&__kfence_pool[offset]; | |
254 | ||
255 | /* The checks do not affect performance; only called from slow-paths. */ | |
256 | ||
257 | /* Only call with a pointer into kfence_metadata. */ | |
258 | if (KFENCE_WARN_ON(meta < kfence_metadata || | |
259 | meta >= kfence_metadata + CONFIG_KFENCE_NUM_OBJECTS)) | |
260 | return 0; | |
261 | ||
262 | /* | |
263 | * This metadata object only ever maps to 1 page; verify that the stored | |
264 | * address is in the expected range. | |
265 | */ | |
266 | if (KFENCE_WARN_ON(ALIGN_DOWN(meta->addr, PAGE_SIZE) != pageaddr)) | |
267 | return 0; | |
268 | ||
269 | return pageaddr; | |
270 | } | |
271 | ||
272 | /* | |
273 | * Update the object's metadata state, including updating the alloc/free stacks | |
274 | * depending on the state transition. | |
275 | */ | |
a9ab52bb ME |
276 | static noinline void |
277 | metadata_update_state(struct kfence_metadata *meta, enum kfence_object_state next, | |
278 | unsigned long *stack_entries, size_t num_stack_entries) | |
0ce20dd8 AP |
279 | { |
280 | struct kfence_track *track = | |
281 | next == KFENCE_OBJECT_FREED ? &meta->free_track : &meta->alloc_track; | |
282 | ||
283 | lockdep_assert_held(&meta->lock); | |
284 | ||
a9ab52bb ME |
285 | if (stack_entries) { |
286 | memcpy(track->stack_entries, stack_entries, | |
287 | num_stack_entries * sizeof(stack_entries[0])); | |
288 | } else { | |
289 | /* | |
290 | * Skip over 1 (this) functions; noinline ensures we do not | |
291 | * accidentally skip over the caller by never inlining. | |
292 | */ | |
293 | num_stack_entries = stack_trace_save(track->stack_entries, KFENCE_STACK_DEPTH, 1); | |
294 | } | |
295 | track->num_stack_entries = num_stack_entries; | |
0ce20dd8 | 296 | track->pid = task_pid_nr(current); |
4bbf04aa ME |
297 | track->cpu = raw_smp_processor_id(); |
298 | track->ts_nsec = local_clock(); /* Same source as printk timestamps. */ | |
0ce20dd8 AP |
299 | |
300 | /* | |
301 | * Pairs with READ_ONCE() in | |
302 | * kfence_shutdown_cache(), | |
303 | * kfence_handle_page_fault(). | |
304 | */ | |
305 | WRITE_ONCE(meta->state, next); | |
306 | } | |
307 | ||
0ce20dd8 AP |
308 | /* Check canary byte at @addr. */ |
309 | static inline bool check_canary_byte(u8 *addr) | |
310 | { | |
49332956 ME |
311 | struct kfence_metadata *meta; |
312 | unsigned long flags; | |
313 | ||
1ba3cbf3 | 314 | if (likely(*addr == KFENCE_CANARY_PATTERN_U8(addr))) |
0ce20dd8 AP |
315 | return true; |
316 | ||
317 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
49332956 ME |
318 | |
319 | meta = addr_to_metadata((unsigned long)addr); | |
320 | raw_spin_lock_irqsave(&meta->lock, flags); | |
321 | kfence_report_error((unsigned long)addr, false, NULL, meta, KFENCE_ERROR_CORRUPTION); | |
322 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
323 | ||
0ce20dd8 AP |
324 | return false; |
325 | } | |
326 | ||
1ba3cbf3 | 327 | static inline void set_canary(const struct kfence_metadata *meta) |
0ce20dd8 AP |
328 | { |
329 | const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); | |
1ba3cbf3 PZ |
330 | unsigned long addr = pageaddr; |
331 | ||
332 | /* | |
333 | * The canary may be written to part of the object memory, but it does | |
334 | * not affect it. The user should initialize the object before using it. | |
335 | */ | |
336 | for (; addr < meta->addr; addr += sizeof(u64)) | |
337 | *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; | |
338 | ||
339 | addr = ALIGN_DOWN(meta->addr + meta->size, sizeof(u64)); | |
340 | for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) | |
341 | *((u64 *)addr) = KFENCE_CANARY_PATTERN_U64; | |
342 | } | |
343 | ||
344 | static inline void check_canary(const struct kfence_metadata *meta) | |
345 | { | |
346 | const unsigned long pageaddr = ALIGN_DOWN(meta->addr, PAGE_SIZE); | |
347 | unsigned long addr = pageaddr; | |
0ce20dd8 | 348 | |
0ce20dd8 | 349 | /* |
1ba3cbf3 PZ |
350 | * We'll iterate over each canary byte per-side until a corrupted byte |
351 | * is found. However, we'll still iterate over the canary bytes to the | |
0ce20dd8 AP |
352 | * right of the object even if there was an error in the canary bytes to |
353 | * the left of the object. Specifically, if check_canary_byte() | |
354 | * generates an error, showing both sides might give more clues as to | |
355 | * what the error is about when displaying which bytes were corrupted. | |
356 | */ | |
357 | ||
358 | /* Apply to left of object. */ | |
1ba3cbf3 PZ |
359 | for (; meta->addr - addr >= sizeof(u64); addr += sizeof(u64)) { |
360 | if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) | |
0ce20dd8 AP |
361 | break; |
362 | } | |
363 | ||
1ba3cbf3 PZ |
364 | /* |
365 | * If the canary is corrupted in a certain 64 bytes, or the canary | |
366 | * memory cannot be completely covered by multiple consecutive 64 bytes, | |
367 | * it needs to be checked one by one. | |
368 | */ | |
369 | for (; addr < meta->addr; addr++) { | |
370 | if (unlikely(!check_canary_byte((u8 *)addr))) | |
0ce20dd8 AP |
371 | break; |
372 | } | |
1ba3cbf3 PZ |
373 | |
374 | /* Apply to right of object. */ | |
375 | for (addr = meta->addr + meta->size; addr % sizeof(u64) != 0; addr++) { | |
376 | if (unlikely(!check_canary_byte((u8 *)addr))) | |
377 | return; | |
378 | } | |
379 | for (; addr - pageaddr < PAGE_SIZE; addr += sizeof(u64)) { | |
380 | if (unlikely(*((u64 *)addr) != KFENCE_CANARY_PATTERN_U64)) { | |
381 | ||
382 | for (; addr - pageaddr < PAGE_SIZE; addr++) { | |
383 | if (!check_canary_byte((u8 *)addr)) | |
384 | return; | |
385 | } | |
386 | } | |
387 | } | |
0ce20dd8 AP |
388 | } |
389 | ||
a9ab52bb | 390 | static void *kfence_guarded_alloc(struct kmem_cache *cache, size_t size, gfp_t gfp, |
08f6b106 ME |
391 | unsigned long *stack_entries, size_t num_stack_entries, |
392 | u32 alloc_stack_hash) | |
0ce20dd8 AP |
393 | { |
394 | struct kfence_metadata *meta = NULL; | |
395 | unsigned long flags; | |
8dae0cfe | 396 | struct slab *slab; |
0ce20dd8 | 397 | void *addr; |
8032bf12 | 398 | const bool random_right_allocate = get_random_u32_below(2); |
327b18b7 | 399 | const bool random_fault = CONFIG_KFENCE_STRESS_TEST_FAULTS && |
8032bf12 | 400 | !get_random_u32_below(CONFIG_KFENCE_STRESS_TEST_FAULTS); |
0ce20dd8 AP |
401 | |
402 | /* Try to obtain a free object. */ | |
403 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
404 | if (!list_empty(&kfence_freelist)) { | |
405 | meta = list_entry(kfence_freelist.next, struct kfence_metadata, list); | |
406 | list_del_init(&meta->list); | |
407 | } | |
408 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
9a19aeb5 ME |
409 | if (!meta) { |
410 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_CAPACITY]); | |
0ce20dd8 | 411 | return NULL; |
9a19aeb5 | 412 | } |
0ce20dd8 AP |
413 | |
414 | if (unlikely(!raw_spin_trylock_irqsave(&meta->lock, flags))) { | |
415 | /* | |
416 | * This is extremely unlikely -- we are reporting on a | |
417 | * use-after-free, which locked meta->lock, and the reporting | |
418 | * code via printk calls kmalloc() which ends up in | |
419 | * kfence_alloc() and tries to grab the same object that we're | |
420 | * reporting on. While it has never been observed, lockdep does | |
421 | * report that there is a possibility of deadlock. Fix it by | |
422 | * using trylock and bailing out gracefully. | |
423 | */ | |
424 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
425 | /* Put the object back on the freelist. */ | |
426 | list_add_tail(&meta->list, &kfence_freelist); | |
427 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
428 | ||
429 | return NULL; | |
430 | } | |
431 | ||
432 | meta->addr = metadata_to_pageaddr(meta); | |
433 | /* Unprotect if we're reusing this page. */ | |
434 | if (meta->state == KFENCE_OBJECT_FREED) | |
435 | kfence_unprotect(meta->addr); | |
436 | ||
437 | /* | |
438 | * Note: for allocations made before RNG initialization, will always | |
439 | * return zero. We still benefit from enabling KFENCE as early as | |
440 | * possible, even when the RNG is not yet available, as this will allow | |
441 | * KFENCE to detect bugs due to earlier allocations. The only downside | |
442 | * is that the out-of-bounds accesses detected are deterministic for | |
443 | * such allocations. | |
444 | */ | |
327b18b7 | 445 | if (random_right_allocate) { |
0ce20dd8 AP |
446 | /* Allocate on the "right" side, re-calculate address. */ |
447 | meta->addr += PAGE_SIZE - size; | |
448 | meta->addr = ALIGN_DOWN(meta->addr, cache->align); | |
449 | } | |
450 | ||
451 | addr = (void *)meta->addr; | |
452 | ||
453 | /* Update remaining metadata. */ | |
a9ab52bb | 454 | metadata_update_state(meta, KFENCE_OBJECT_ALLOCATED, stack_entries, num_stack_entries); |
0ce20dd8 AP |
455 | /* Pairs with READ_ONCE() in kfence_shutdown_cache(). */ |
456 | WRITE_ONCE(meta->cache, cache); | |
457 | meta->size = size; | |
08f6b106 | 458 | meta->alloc_stack_hash = alloc_stack_hash; |
49332956 | 459 | raw_spin_unlock_irqrestore(&meta->lock, flags); |
08f6b106 | 460 | |
49332956 | 461 | alloc_covered_add(alloc_stack_hash, 1); |
0ce20dd8 | 462 | |
8dae0cfe VB |
463 | /* Set required slab fields. */ |
464 | slab = virt_to_slab((void *)meta->addr); | |
465 | slab->slab_cache = cache; | |
401fb12c | 466 | slab->objects = 1; |
0ce20dd8 | 467 | |
0ce20dd8 | 468 | /* Memory initialization. */ |
1ba3cbf3 | 469 | set_canary(meta); |
0ce20dd8 AP |
470 | |
471 | /* | |
472 | * We check slab_want_init_on_alloc() ourselves, rather than letting | |
473 | * SL*B do the initialization, as otherwise we might overwrite KFENCE's | |
474 | * redzone. | |
475 | */ | |
476 | if (unlikely(slab_want_init_on_alloc(gfp, cache))) | |
477 | memzero_explicit(addr, size); | |
478 | if (cache->ctor) | |
479 | cache->ctor(addr); | |
480 | ||
327b18b7 | 481 | if (random_fault) |
0ce20dd8 AP |
482 | kfence_protect(meta->addr); /* Random "faults" by protecting the object. */ |
483 | ||
484 | atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCATED]); | |
485 | atomic_long_inc(&counters[KFENCE_COUNTER_ALLOCS]); | |
486 | ||
487 | return addr; | |
488 | } | |
489 | ||
490 | static void kfence_guarded_free(void *addr, struct kfence_metadata *meta, bool zombie) | |
491 | { | |
492 | struct kcsan_scoped_access assert_page_exclusive; | |
493 | unsigned long flags; | |
49332956 | 494 | bool init; |
0ce20dd8 AP |
495 | |
496 | raw_spin_lock_irqsave(&meta->lock, flags); | |
497 | ||
498 | if (meta->state != KFENCE_OBJECT_ALLOCATED || meta->addr != (unsigned long)addr) { | |
499 | /* Invalid or double-free, bail out. */ | |
500 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
bc8fbc5f ME |
501 | kfence_report_error((unsigned long)addr, false, NULL, meta, |
502 | KFENCE_ERROR_INVALID_FREE); | |
0ce20dd8 AP |
503 | raw_spin_unlock_irqrestore(&meta->lock, flags); |
504 | return; | |
505 | } | |
506 | ||
507 | /* Detect racy use-after-free, or incorrect reallocation of this page by KFENCE. */ | |
508 | kcsan_begin_scoped_access((void *)ALIGN_DOWN((unsigned long)addr, PAGE_SIZE), PAGE_SIZE, | |
509 | KCSAN_ACCESS_SCOPED | KCSAN_ACCESS_WRITE | KCSAN_ACCESS_ASSERT, | |
510 | &assert_page_exclusive); | |
511 | ||
512 | if (CONFIG_KFENCE_STRESS_TEST_FAULTS) | |
513 | kfence_unprotect((unsigned long)addr); /* To check canary bytes. */ | |
514 | ||
515 | /* Restore page protection if there was an OOB access. */ | |
516 | if (meta->unprotected_page) { | |
94868a1e | 517 | memzero_explicit((void *)ALIGN_DOWN(meta->unprotected_page, PAGE_SIZE), PAGE_SIZE); |
0ce20dd8 AP |
518 | kfence_protect(meta->unprotected_page); |
519 | meta->unprotected_page = 0; | |
520 | } | |
521 | ||
49332956 ME |
522 | /* Mark the object as freed. */ |
523 | metadata_update_state(meta, KFENCE_OBJECT_FREED, NULL, 0); | |
524 | init = slab_want_init_on_free(meta->cache); | |
525 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
526 | ||
527 | alloc_covered_add(meta->alloc_stack_hash, -1); | |
528 | ||
0ce20dd8 | 529 | /* Check canary bytes for memory corruption. */ |
1ba3cbf3 | 530 | check_canary(meta); |
0ce20dd8 AP |
531 | |
532 | /* | |
533 | * Clear memory if init-on-free is set. While we protect the page, the | |
534 | * data is still there, and after a use-after-free is detected, we | |
535 | * unprotect the page, so the data is still accessible. | |
536 | */ | |
49332956 | 537 | if (!zombie && unlikely(init)) |
0ce20dd8 AP |
538 | memzero_explicit(addr, meta->size); |
539 | ||
0ce20dd8 AP |
540 | /* Protect to detect use-after-frees. */ |
541 | kfence_protect((unsigned long)addr); | |
542 | ||
543 | kcsan_end_scoped_access(&assert_page_exclusive); | |
544 | if (!zombie) { | |
545 | /* Add it to the tail of the freelist for reuse. */ | |
546 | raw_spin_lock_irqsave(&kfence_freelist_lock, flags); | |
547 | KFENCE_WARN_ON(!list_empty(&meta->list)); | |
548 | list_add_tail(&meta->list, &kfence_freelist); | |
549 | raw_spin_unlock_irqrestore(&kfence_freelist_lock, flags); | |
550 | ||
551 | atomic_long_dec(&counters[KFENCE_COUNTER_ALLOCATED]); | |
552 | atomic_long_inc(&counters[KFENCE_COUNTER_FREES]); | |
553 | } else { | |
554 | /* See kfence_shutdown_cache(). */ | |
555 | atomic_long_inc(&counters[KFENCE_COUNTER_ZOMBIES]); | |
556 | } | |
557 | } | |
558 | ||
559 | static void rcu_guarded_free(struct rcu_head *h) | |
560 | { | |
561 | struct kfence_metadata *meta = container_of(h, struct kfence_metadata, rcu_head); | |
562 | ||
563 | kfence_guarded_free((void *)meta->addr, meta, false); | |
564 | } | |
565 | ||
b33f778b TD |
566 | /* |
567 | * Initialization of the KFENCE pool after its allocation. | |
568 | * Returns 0 on success; otherwise returns the address up to | |
569 | * which partial initialization succeeded. | |
570 | */ | |
571 | static unsigned long kfence_init_pool(void) | |
0ce20dd8 | 572 | { |
ec9fee79 | 573 | unsigned long addr; |
0ce20dd8 AP |
574 | struct page *pages; |
575 | int i; | |
576 | ||
0ce20dd8 | 577 | if (!arch_kfence_init_pool()) |
ec9fee79 | 578 | return (unsigned long)__kfence_pool; |
0ce20dd8 | 579 | |
ec9fee79 | 580 | addr = (unsigned long)__kfence_pool; |
9e7ee421 | 581 | pages = virt_to_page(__kfence_pool); |
0ce20dd8 AP |
582 | |
583 | /* | |
584 | * Set up object pages: they must have PG_slab set, to avoid freeing | |
585 | * these as real pages. | |
586 | * | |
587 | * We also want to avoid inserting kfence_free() in the kfree() | |
588 | * fast-path in SLUB, and therefore need to ensure kfree() correctly | |
589 | * enters __slab_free() slow-path. | |
590 | */ | |
591 | for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { | |
1f2803b2 | 592 | struct slab *slab = page_slab(nth_page(pages, i)); |
8f0b3649 | 593 | |
0ce20dd8 AP |
594 | if (!i || (i % 2)) |
595 | continue; | |
596 | ||
8f0b3649 MS |
597 | __folio_set_slab(slab_folio(slab)); |
598 | #ifdef CONFIG_MEMCG | |
cabdf74e | 599 | slab->memcg_data = (unsigned long)&kfence_metadata_init[i / 2 - 1].objcg | |
8f0b3649 MS |
600 | MEMCG_DATA_OBJCGS; |
601 | #endif | |
0ce20dd8 AP |
602 | } |
603 | ||
604 | /* | |
605 | * Protect the first 2 pages. The first page is mostly unnecessary, and | |
606 | * merely serves as an extended guard page. However, adding one | |
607 | * additional page in the beginning gives us an even number of pages, | |
608 | * which simplifies the mapping of address to metadata index. | |
609 | */ | |
610 | for (i = 0; i < 2; i++) { | |
611 | if (unlikely(!kfence_protect(addr))) | |
b33f778b | 612 | return addr; |
0ce20dd8 AP |
613 | |
614 | addr += PAGE_SIZE; | |
615 | } | |
616 | ||
617 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
cabdf74e | 618 | struct kfence_metadata *meta = &kfence_metadata_init[i]; |
0ce20dd8 AP |
619 | |
620 | /* Initialize metadata. */ | |
621 | INIT_LIST_HEAD(&meta->list); | |
622 | raw_spin_lock_init(&meta->lock); | |
623 | meta->state = KFENCE_OBJECT_UNUSED; | |
624 | meta->addr = addr; /* Initialize for validation in metadata_to_pageaddr(). */ | |
625 | list_add_tail(&meta->list, &kfence_freelist); | |
626 | ||
627 | /* Protect the right redzone. */ | |
628 | if (unlikely(!kfence_protect(addr + PAGE_SIZE))) | |
3ee2d747 | 629 | goto reset_slab; |
0ce20dd8 AP |
630 | |
631 | addr += 2 * PAGE_SIZE; | |
632 | } | |
633 | ||
cabdf74e PZ |
634 | /* |
635 | * Make kfence_metadata visible only when initialization is successful. | |
636 | * Otherwise, if the initialization fails and kfence_metadata is freed, | |
637 | * it may cause UAF in kfence_shutdown_cache(). | |
638 | */ | |
639 | smp_store_release(&kfence_metadata, kfence_metadata_init); | |
b33f778b | 640 | return 0; |
3ee2d747 MS |
641 | |
642 | reset_slab: | |
643 | for (i = 0; i < KFENCE_POOL_SIZE / PAGE_SIZE; i++) { | |
1f2803b2 | 644 | struct slab *slab = page_slab(nth_page(pages, i)); |
3ee2d747 MS |
645 | |
646 | if (!i || (i % 2)) | |
647 | continue; | |
648 | #ifdef CONFIG_MEMCG | |
649 | slab->memcg_data = 0; | |
650 | #endif | |
651 | __folio_clear_slab(slab_folio(slab)); | |
652 | } | |
653 | ||
654 | return addr; | |
b33f778b TD |
655 | } |
656 | ||
657 | static bool __init kfence_init_pool_early(void) | |
658 | { | |
659 | unsigned long addr; | |
660 | ||
661 | if (!__kfence_pool) | |
662 | return false; | |
663 | ||
664 | addr = kfence_init_pool(); | |
665 | ||
07313a2b YL |
666 | if (!addr) { |
667 | /* | |
668 | * The pool is live and will never be deallocated from this point on. | |
669 | * Ignore the pool object from the kmemleak phys object tree, as it would | |
670 | * otherwise overlap with allocations returned by kfence_alloc(), which | |
671 | * are registered with kmemleak through the slab post-alloc hook. | |
672 | */ | |
673 | kmemleak_ignore_phys(__pa(__kfence_pool)); | |
b33f778b | 674 | return true; |
07313a2b | 675 | } |
0ce20dd8 | 676 | |
0ce20dd8 AP |
677 | /* |
678 | * Only release unprotected pages, and do not try to go back and change | |
679 | * page attributes due to risk of failing to do so as well. If changing | |
680 | * page attributes for some pages fails, it is very likely that it also | |
681 | * fails for the first page, and therefore expect addr==__kfence_pool in | |
682 | * most failure cases. | |
683 | */ | |
684 | memblock_free_late(__pa(addr), KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool)); | |
685 | __kfence_pool = NULL; | |
b33f778b | 686 | |
cabdf74e PZ |
687 | memblock_free_late(__pa(kfence_metadata_init), KFENCE_METADATA_SIZE); |
688 | kfence_metadata_init = NULL; | |
b33f778b | 689 | |
b33f778b TD |
690 | return false; |
691 | } | |
692 | ||
0ce20dd8 AP |
693 | /* === DebugFS Interface ==================================================== */ |
694 | ||
695 | static int stats_show(struct seq_file *seq, void *v) | |
696 | { | |
697 | int i; | |
698 | ||
699 | seq_printf(seq, "enabled: %i\n", READ_ONCE(kfence_enabled)); | |
700 | for (i = 0; i < KFENCE_COUNTER_COUNT; i++) | |
701 | seq_printf(seq, "%s: %ld\n", counter_names[i], atomic_long_read(&counters[i])); | |
702 | ||
703 | return 0; | |
704 | } | |
705 | DEFINE_SHOW_ATTRIBUTE(stats); | |
706 | ||
707 | /* | |
708 | * debugfs seq_file operations for /sys/kernel/debug/kfence/objects. | |
709 | * start_object() and next_object() return the object index + 1, because NULL is used | |
710 | * to stop iteration. | |
711 | */ | |
712 | static void *start_object(struct seq_file *seq, loff_t *pos) | |
713 | { | |
714 | if (*pos < CONFIG_KFENCE_NUM_OBJECTS) | |
715 | return (void *)((long)*pos + 1); | |
716 | return NULL; | |
717 | } | |
718 | ||
719 | static void stop_object(struct seq_file *seq, void *v) | |
720 | { | |
721 | } | |
722 | ||
723 | static void *next_object(struct seq_file *seq, void *v, loff_t *pos) | |
724 | { | |
725 | ++*pos; | |
726 | if (*pos < CONFIG_KFENCE_NUM_OBJECTS) | |
727 | return (void *)((long)*pos + 1); | |
728 | return NULL; | |
729 | } | |
730 | ||
731 | static int show_object(struct seq_file *seq, void *v) | |
732 | { | |
733 | struct kfence_metadata *meta = &kfence_metadata[(long)v - 1]; | |
734 | unsigned long flags; | |
735 | ||
736 | raw_spin_lock_irqsave(&meta->lock, flags); | |
737 | kfence_print_object(seq, meta); | |
738 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
739 | seq_puts(seq, "---------------------------------\n"); | |
740 | ||
741 | return 0; | |
742 | } | |
743 | ||
6b1964e6 | 744 | static const struct seq_operations objects_sops = { |
0ce20dd8 AP |
745 | .start = start_object, |
746 | .next = next_object, | |
747 | .stop = stop_object, | |
748 | .show = show_object, | |
749 | }; | |
6b1964e6 | 750 | DEFINE_SEQ_ATTRIBUTE(objects); |
0ce20dd8 | 751 | |
1c86a188 | 752 | static int kfence_debugfs_init(void) |
0ce20dd8 | 753 | { |
1c86a188 | 754 | struct dentry *kfence_dir; |
0ce20dd8 | 755 | |
1c86a188 MS |
756 | if (!READ_ONCE(kfence_enabled)) |
757 | return 0; | |
758 | ||
759 | kfence_dir = debugfs_create_dir("kfence", NULL); | |
0ce20dd8 AP |
760 | debugfs_create_file("stats", 0444, kfence_dir, NULL, &stats_fops); |
761 | debugfs_create_file("objects", 0400, kfence_dir, NULL, &objects_fops); | |
762 | return 0; | |
763 | } | |
764 | ||
765 | late_initcall(kfence_debugfs_init); | |
766 | ||
3c81b3bb | 767 | /* === Panic Notifier ====================================================== */ |
768 | ||
769 | static void kfence_check_all_canary(void) | |
770 | { | |
771 | int i; | |
772 | ||
773 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
774 | struct kfence_metadata *meta = &kfence_metadata[i]; | |
775 | ||
776 | if (meta->state == KFENCE_OBJECT_ALLOCATED) | |
1ba3cbf3 | 777 | check_canary(meta); |
3c81b3bb | 778 | } |
779 | } | |
780 | ||
781 | static int kfence_check_canary_callback(struct notifier_block *nb, | |
782 | unsigned long reason, void *arg) | |
783 | { | |
784 | kfence_check_all_canary(); | |
785 | return NOTIFY_OK; | |
786 | } | |
787 | ||
788 | static struct notifier_block kfence_check_canary_notifier = { | |
789 | .notifier_call = kfence_check_canary_callback, | |
790 | }; | |
791 | ||
0ce20dd8 AP |
792 | /* === Allocation Gate Timer ================================================ */ |
793 | ||
737b6a10 ME |
794 | static struct delayed_work kfence_timer; |
795 | ||
407f1d8c ME |
796 | #ifdef CONFIG_KFENCE_STATIC_KEYS |
797 | /* Wait queue to wake up allocation-gate timer task. */ | |
798 | static DECLARE_WAIT_QUEUE_HEAD(allocation_wait); | |
799 | ||
800 | static void wake_up_kfence_timer(struct irq_work *work) | |
801 | { | |
802 | wake_up(&allocation_wait); | |
803 | } | |
804 | static DEFINE_IRQ_WORK(wake_up_kfence_timer_work, wake_up_kfence_timer); | |
805 | #endif | |
806 | ||
0ce20dd8 AP |
807 | /* |
808 | * Set up delayed work, which will enable and disable the static key. We need to | |
809 | * use a work queue (rather than a simple timer), since enabling and disabling a | |
810 | * static key cannot be done from an interrupt. | |
811 | * | |
812 | * Note: Toggling a static branch currently causes IPIs, and here we'll end up | |
813 | * with a total of 2 IPIs to all CPUs. If this ends up a problem in future (with | |
814 | * more aggressive sampling intervals), we could get away with a variant that | |
815 | * avoids IPIs, at the cost of not immediately capturing allocations if the | |
816 | * instructions remain cached. | |
817 | */ | |
0ce20dd8 AP |
818 | static void toggle_allocation_gate(struct work_struct *work) |
819 | { | |
820 | if (!READ_ONCE(kfence_enabled)) | |
821 | return; | |
822 | ||
0ce20dd8 AP |
823 | atomic_set(&kfence_allocation_gate, 0); |
824 | #ifdef CONFIG_KFENCE_STATIC_KEYS | |
407f1d8c | 825 | /* Enable static key, and await allocation to happen. */ |
0ce20dd8 | 826 | static_branch_enable(&kfence_allocation_key); |
407f1d8c | 827 | |
c66b6ead | 828 | wait_event_idle(allocation_wait, atomic_read(&kfence_allocation_gate)); |
407f1d8c | 829 | |
0ce20dd8 AP |
830 | /* Disable static key and reset timer. */ |
831 | static_branch_disable(&kfence_allocation_key); | |
832 | #endif | |
ff06e45d | 833 | queue_delayed_work(system_unbound_wq, &kfence_timer, |
36f0b35d | 834 | msecs_to_jiffies(kfence_sample_interval)); |
0ce20dd8 | 835 | } |
0ce20dd8 AP |
836 | |
837 | /* === Public interface ===================================================== */ | |
838 | ||
cabdf74e | 839 | void __init kfence_alloc_pool_and_metadata(void) |
0ce20dd8 AP |
840 | { |
841 | if (!kfence_sample_interval) | |
842 | return; | |
843 | ||
cabdf74e PZ |
844 | /* |
845 | * If the pool has already been initialized by arch, there is no need to | |
846 | * re-allocate the memory pool. | |
847 | */ | |
0ce20dd8 | 848 | if (!__kfence_pool) |
cabdf74e PZ |
849 | __kfence_pool = memblock_alloc(KFENCE_POOL_SIZE, PAGE_SIZE); |
850 | ||
851 | if (!__kfence_pool) { | |
0ce20dd8 | 852 | pr_err("failed to allocate pool\n"); |
cabdf74e PZ |
853 | return; |
854 | } | |
855 | ||
856 | /* The memory allocated by memblock has been zeroed out. */ | |
857 | kfence_metadata_init = memblock_alloc(KFENCE_METADATA_SIZE, PAGE_SIZE); | |
858 | if (!kfence_metadata_init) { | |
859 | pr_err("failed to allocate metadata\n"); | |
860 | memblock_free(__kfence_pool, KFENCE_POOL_SIZE); | |
861 | __kfence_pool = NULL; | |
862 | } | |
0ce20dd8 AP |
863 | } |
864 | ||
b33f778b TD |
865 | static void kfence_init_enable(void) |
866 | { | |
867 | if (!IS_ENABLED(CONFIG_KFENCE_STATIC_KEYS)) | |
868 | static_branch_enable(&kfence_allocation_key); | |
737b6a10 ME |
869 | |
870 | if (kfence_deferrable) | |
871 | INIT_DEFERRABLE_WORK(&kfence_timer, toggle_allocation_gate); | |
872 | else | |
873 | INIT_DELAYED_WORK(&kfence_timer, toggle_allocation_gate); | |
874 | ||
3c81b3bb | 875 | if (kfence_check_on_panic) |
876 | atomic_notifier_chain_register(&panic_notifier_list, &kfence_check_canary_notifier); | |
877 | ||
b33f778b TD |
878 | WRITE_ONCE(kfence_enabled, true); |
879 | queue_delayed_work(system_unbound_wq, &kfence_timer, 0); | |
737b6a10 | 880 | |
b33f778b TD |
881 | pr_info("initialized - using %lu bytes for %d objects at 0x%p-0x%p\n", KFENCE_POOL_SIZE, |
882 | CONFIG_KFENCE_NUM_OBJECTS, (void *)__kfence_pool, | |
883 | (void *)(__kfence_pool + KFENCE_POOL_SIZE)); | |
884 | } | |
885 | ||
0ce20dd8 AP |
886 | void __init kfence_init(void) |
887 | { | |
08475dab | 888 | stack_hash_seed = get_random_u32(); |
b33f778b | 889 | |
0ce20dd8 AP |
890 | /* Setting kfence_sample_interval to 0 on boot disables KFENCE. */ |
891 | if (!kfence_sample_interval) | |
892 | return; | |
893 | ||
b33f778b | 894 | if (!kfence_init_pool_early()) { |
0ce20dd8 AP |
895 | pr_err("%s failed\n", __func__); |
896 | return; | |
897 | } | |
898 | ||
b33f778b TD |
899 | kfence_init_enable(); |
900 | } | |
901 | ||
902 | static int kfence_init_late(void) | |
903 | { | |
cabdf74e PZ |
904 | const unsigned long nr_pages_pool = KFENCE_POOL_SIZE / PAGE_SIZE; |
905 | const unsigned long nr_pages_meta = KFENCE_METADATA_SIZE / PAGE_SIZE; | |
906 | unsigned long addr = (unsigned long)__kfence_pool; | |
907 | unsigned long free_size = KFENCE_POOL_SIZE; | |
908 | int err = -ENOMEM; | |
909 | ||
b33f778b TD |
910 | #ifdef CONFIG_CONTIG_ALLOC |
911 | struct page *pages; | |
912 | ||
cabdf74e PZ |
913 | pages = alloc_contig_pages(nr_pages_pool, GFP_KERNEL, first_online_node, |
914 | NULL); | |
b33f778b TD |
915 | if (!pages) |
916 | return -ENOMEM; | |
cabdf74e | 917 | |
b33f778b | 918 | __kfence_pool = page_to_virt(pages); |
cabdf74e PZ |
919 | pages = alloc_contig_pages(nr_pages_meta, GFP_KERNEL, first_online_node, |
920 | NULL); | |
921 | if (pages) | |
922 | kfence_metadata_init = page_to_virt(pages); | |
b33f778b | 923 | #else |
cabdf74e PZ |
924 | if (nr_pages_pool > MAX_ORDER_NR_PAGES || |
925 | nr_pages_meta > MAX_ORDER_NR_PAGES) { | |
b33f778b TD |
926 | pr_warn("KFENCE_NUM_OBJECTS too large for buddy allocator\n"); |
927 | return -EINVAL; | |
928 | } | |
cabdf74e | 929 | |
b33f778b TD |
930 | __kfence_pool = alloc_pages_exact(KFENCE_POOL_SIZE, GFP_KERNEL); |
931 | if (!__kfence_pool) | |
932 | return -ENOMEM; | |
cabdf74e PZ |
933 | |
934 | kfence_metadata_init = alloc_pages_exact(KFENCE_METADATA_SIZE, GFP_KERNEL); | |
b33f778b TD |
935 | #endif |
936 | ||
cabdf74e PZ |
937 | if (!kfence_metadata_init) |
938 | goto free_pool; | |
939 | ||
940 | memzero_explicit(kfence_metadata_init, KFENCE_METADATA_SIZE); | |
941 | addr = kfence_init_pool(); | |
942 | if (!addr) { | |
943 | kfence_init_enable(); | |
944 | kfence_debugfs_init(); | |
945 | return 0; | |
b33f778b TD |
946 | } |
947 | ||
cabdf74e PZ |
948 | pr_err("%s failed\n", __func__); |
949 | free_size = KFENCE_POOL_SIZE - (addr - (unsigned long)__kfence_pool); | |
950 | err = -EBUSY; | |
1c86a188 | 951 | |
cabdf74e PZ |
952 | #ifdef CONFIG_CONTIG_ALLOC |
953 | free_contig_range(page_to_pfn(virt_to_page((void *)kfence_metadata_init)), | |
954 | nr_pages_meta); | |
955 | free_pool: | |
956 | free_contig_range(page_to_pfn(virt_to_page((void *)addr)), | |
957 | free_size / PAGE_SIZE); | |
958 | #else | |
959 | free_pages_exact((void *)kfence_metadata_init, KFENCE_METADATA_SIZE); | |
960 | free_pool: | |
961 | free_pages_exact((void *)addr, free_size); | |
962 | #endif | |
963 | ||
964 | kfence_metadata_init = NULL; | |
965 | __kfence_pool = NULL; | |
966 | return err; | |
0ce20dd8 AP |
967 | } |
968 | ||
698361bc TD |
969 | static int kfence_enable_late(void) |
970 | { | |
971 | if (!__kfence_pool) | |
b33f778b | 972 | return kfence_init_late(); |
698361bc TD |
973 | |
974 | WRITE_ONCE(kfence_enabled, true); | |
975 | queue_delayed_work(system_unbound_wq, &kfence_timer, 0); | |
83d7d04f | 976 | pr_info("re-enabled\n"); |
698361bc TD |
977 | return 0; |
978 | } | |
979 | ||
0ce20dd8 AP |
980 | void kfence_shutdown_cache(struct kmem_cache *s) |
981 | { | |
982 | unsigned long flags; | |
983 | struct kfence_metadata *meta; | |
984 | int i; | |
985 | ||
cabdf74e PZ |
986 | /* Pairs with release in kfence_init_pool(). */ |
987 | if (!smp_load_acquire(&kfence_metadata)) | |
988 | return; | |
989 | ||
0ce20dd8 AP |
990 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { |
991 | bool in_use; | |
992 | ||
993 | meta = &kfence_metadata[i]; | |
994 | ||
995 | /* | |
996 | * If we observe some inconsistent cache and state pair where we | |
997 | * should have returned false here, cache destruction is racing | |
998 | * with either kmem_cache_alloc() or kmem_cache_free(). Taking | |
999 | * the lock will not help, as different critical section | |
1000 | * serialization will have the same outcome. | |
1001 | */ | |
1002 | if (READ_ONCE(meta->cache) != s || | |
1003 | READ_ONCE(meta->state) != KFENCE_OBJECT_ALLOCATED) | |
1004 | continue; | |
1005 | ||
1006 | raw_spin_lock_irqsave(&meta->lock, flags); | |
1007 | in_use = meta->cache == s && meta->state == KFENCE_OBJECT_ALLOCATED; | |
1008 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
1009 | ||
1010 | if (in_use) { | |
1011 | /* | |
1012 | * This cache still has allocations, and we should not | |
1013 | * release them back into the freelist so they can still | |
1014 | * safely be used and retain the kernel's default | |
1015 | * behaviour of keeping the allocations alive (leak the | |
1016 | * cache); however, they effectively become "zombie | |
1017 | * allocations" as the KFENCE objects are the only ones | |
1018 | * still in use and the owning cache is being destroyed. | |
1019 | * | |
1020 | * We mark them freed, so that any subsequent use shows | |
1021 | * more useful error messages that will include stack | |
1022 | * traces of the user of the object, the original | |
1023 | * allocation, and caller to shutdown_cache(). | |
1024 | */ | |
1025 | kfence_guarded_free((void *)meta->addr, meta, /*zombie=*/true); | |
1026 | } | |
1027 | } | |
1028 | ||
1029 | for (i = 0; i < CONFIG_KFENCE_NUM_OBJECTS; i++) { | |
1030 | meta = &kfence_metadata[i]; | |
1031 | ||
1032 | /* See above. */ | |
1033 | if (READ_ONCE(meta->cache) != s || READ_ONCE(meta->state) != KFENCE_OBJECT_FREED) | |
1034 | continue; | |
1035 | ||
1036 | raw_spin_lock_irqsave(&meta->lock, flags); | |
1037 | if (meta->cache == s && meta->state == KFENCE_OBJECT_FREED) | |
1038 | meta->cache = NULL; | |
1039 | raw_spin_unlock_irqrestore(&meta->lock, flags); | |
1040 | } | |
1041 | } | |
1042 | ||
1043 | void *__kfence_alloc(struct kmem_cache *s, size_t size, gfp_t flags) | |
1044 | { | |
a9ab52bb ME |
1045 | unsigned long stack_entries[KFENCE_STACK_DEPTH]; |
1046 | size_t num_stack_entries; | |
08f6b106 | 1047 | u32 alloc_stack_hash; |
a9ab52bb | 1048 | |
235a85cb AP |
1049 | /* |
1050 | * Perform size check before switching kfence_allocation_gate, so that | |
1051 | * we don't disable KFENCE without making an allocation. | |
1052 | */ | |
9a19aeb5 ME |
1053 | if (size > PAGE_SIZE) { |
1054 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); | |
235a85cb | 1055 | return NULL; |
9a19aeb5 | 1056 | } |
235a85cb | 1057 | |
236e9f15 AP |
1058 | /* |
1059 | * Skip allocations from non-default zones, including DMA. We cannot | |
1060 | * guarantee that pages in the KFENCE pool will have the requested | |
1061 | * properties (e.g. reside in DMAable memory). | |
1062 | */ | |
1063 | if ((flags & GFP_ZONEMASK) || | |
9a19aeb5 ME |
1064 | (s->flags & (SLAB_CACHE_DMA | SLAB_CACHE_DMA32))) { |
1065 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_INCOMPAT]); | |
236e9f15 | 1066 | return NULL; |
9a19aeb5 | 1067 | } |
236e9f15 | 1068 | |
b84e04f1 IK |
1069 | /* |
1070 | * Skip allocations for this slab, if KFENCE has been disabled for | |
1071 | * this slab. | |
1072 | */ | |
1073 | if (s->flags & SLAB_SKIP_KFENCE) | |
1074 | return NULL; | |
1075 | ||
07e8481d | 1076 | if (atomic_inc_return(&kfence_allocation_gate) > 1) |
0ce20dd8 | 1077 | return NULL; |
407f1d8c ME |
1078 | #ifdef CONFIG_KFENCE_STATIC_KEYS |
1079 | /* | |
1080 | * waitqueue_active() is fully ordered after the update of | |
1081 | * kfence_allocation_gate per atomic_inc_return(). | |
1082 | */ | |
1083 | if (waitqueue_active(&allocation_wait)) { | |
1084 | /* | |
1085 | * Calling wake_up() here may deadlock when allocations happen | |
1086 | * from within timer code. Use an irq_work to defer it. | |
1087 | */ | |
1088 | irq_work_queue(&wake_up_kfence_timer_work); | |
1089 | } | |
1090 | #endif | |
0ce20dd8 AP |
1091 | |
1092 | if (!READ_ONCE(kfence_enabled)) | |
1093 | return NULL; | |
1094 | ||
a9ab52bb ME |
1095 | num_stack_entries = stack_trace_save(stack_entries, KFENCE_STACK_DEPTH, 0); |
1096 | ||
08f6b106 ME |
1097 | /* |
1098 | * Do expensive check for coverage of allocation in slow-path after | |
1099 | * allocation_gate has already become non-zero, even though it might | |
1100 | * mean not making any allocation within a given sample interval. | |
1101 | * | |
1102 | * This ensures reasonable allocation coverage when the pool is almost | |
1103 | * full, including avoiding long-lived allocations of the same source | |
1104 | * filling up the pool (e.g. pagecache allocations). | |
1105 | */ | |
1106 | alloc_stack_hash = get_alloc_stack_hash(stack_entries, num_stack_entries); | |
1107 | if (should_skip_covered() && alloc_covered_contains(alloc_stack_hash)) { | |
1108 | atomic_long_inc(&counters[KFENCE_COUNTER_SKIP_COVERED]); | |
1109 | return NULL; | |
1110 | } | |
1111 | ||
1112 | return kfence_guarded_alloc(s, size, flags, stack_entries, num_stack_entries, | |
1113 | alloc_stack_hash); | |
0ce20dd8 AP |
1114 | } |
1115 | ||
1116 | size_t kfence_ksize(const void *addr) | |
1117 | { | |
1118 | const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
1119 | ||
1120 | /* | |
1121 | * Read locklessly -- if there is a race with __kfence_alloc(), this is | |
1122 | * either a use-after-free or invalid access. | |
1123 | */ | |
1124 | return meta ? meta->size : 0; | |
1125 | } | |
1126 | ||
1127 | void *kfence_object_start(const void *addr) | |
1128 | { | |
1129 | const struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
1130 | ||
1131 | /* | |
1132 | * Read locklessly -- if there is a race with __kfence_alloc(), this is | |
1133 | * either a use-after-free or invalid access. | |
1134 | */ | |
1135 | return meta ? (void *)meta->addr : NULL; | |
1136 | } | |
1137 | ||
1138 | void __kfence_free(void *addr) | |
1139 | { | |
1140 | struct kfence_metadata *meta = addr_to_metadata((unsigned long)addr); | |
1141 | ||
8f0b3649 MS |
1142 | #ifdef CONFIG_MEMCG |
1143 | KFENCE_WARN_ON(meta->objcg); | |
1144 | #endif | |
0ce20dd8 AP |
1145 | /* |
1146 | * If the objects of the cache are SLAB_TYPESAFE_BY_RCU, defer freeing | |
1147 | * the object, as the object page may be recycled for other-typed | |
1148 | * objects once it has been freed. meta->cache may be NULL if the cache | |
1149 | * was destroyed. | |
1150 | */ | |
1151 | if (unlikely(meta->cache && (meta->cache->flags & SLAB_TYPESAFE_BY_RCU))) | |
1152 | call_rcu(&meta->rcu_head, rcu_guarded_free); | |
1153 | else | |
1154 | kfence_guarded_free(addr, meta, false); | |
1155 | } | |
1156 | ||
bc8fbc5f | 1157 | bool kfence_handle_page_fault(unsigned long addr, bool is_write, struct pt_regs *regs) |
0ce20dd8 AP |
1158 | { |
1159 | const int page_index = (addr - (unsigned long)__kfence_pool) / PAGE_SIZE; | |
1160 | struct kfence_metadata *to_report = NULL; | |
1161 | enum kfence_error_type error_type; | |
1162 | unsigned long flags; | |
1163 | ||
1164 | if (!is_kfence_address((void *)addr)) | |
1165 | return false; | |
1166 | ||
1167 | if (!READ_ONCE(kfence_enabled)) /* If disabled at runtime ... */ | |
1168 | return kfence_unprotect(addr); /* ... unprotect and proceed. */ | |
1169 | ||
1170 | atomic_long_inc(&counters[KFENCE_COUNTER_BUGS]); | |
1171 | ||
1172 | if (page_index % 2) { | |
1173 | /* This is a redzone, report a buffer overflow. */ | |
1174 | struct kfence_metadata *meta; | |
1175 | int distance = 0; | |
1176 | ||
1177 | meta = addr_to_metadata(addr - PAGE_SIZE); | |
1178 | if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { | |
1179 | to_report = meta; | |
1180 | /* Data race ok; distance calculation approximate. */ | |
1181 | distance = addr - data_race(meta->addr + meta->size); | |
1182 | } | |
1183 | ||
1184 | meta = addr_to_metadata(addr + PAGE_SIZE); | |
1185 | if (meta && READ_ONCE(meta->state) == KFENCE_OBJECT_ALLOCATED) { | |
1186 | /* Data race ok; distance calculation approximate. */ | |
1187 | if (!to_report || distance > data_race(meta->addr) - addr) | |
1188 | to_report = meta; | |
1189 | } | |
1190 | ||
1191 | if (!to_report) | |
1192 | goto out; | |
1193 | ||
1194 | raw_spin_lock_irqsave(&to_report->lock, flags); | |
1195 | to_report->unprotected_page = addr; | |
1196 | error_type = KFENCE_ERROR_OOB; | |
1197 | ||
1198 | /* | |
1199 | * If the object was freed before we took the look we can still | |
1200 | * report this as an OOB -- the report will simply show the | |
1201 | * stacktrace of the free as well. | |
1202 | */ | |
1203 | } else { | |
1204 | to_report = addr_to_metadata(addr); | |
1205 | if (!to_report) | |
1206 | goto out; | |
1207 | ||
1208 | raw_spin_lock_irqsave(&to_report->lock, flags); | |
1209 | error_type = KFENCE_ERROR_UAF; | |
1210 | /* | |
1211 | * We may race with __kfence_alloc(), and it is possible that a | |
1212 | * freed object may be reallocated. We simply report this as a | |
1213 | * use-after-free, with the stack trace showing the place where | |
1214 | * the object was re-allocated. | |
1215 | */ | |
1216 | } | |
1217 | ||
1218 | out: | |
1219 | if (to_report) { | |
bc8fbc5f | 1220 | kfence_report_error(addr, is_write, regs, to_report, error_type); |
0ce20dd8 AP |
1221 | raw_spin_unlock_irqrestore(&to_report->lock, flags); |
1222 | } else { | |
1223 | /* This may be a UAF or OOB access, but we can't be sure. */ | |
bc8fbc5f | 1224 | kfence_report_error(addr, is_write, regs, NULL, KFENCE_ERROR_INVALID); |
0ce20dd8 AP |
1225 | } |
1226 | ||
1227 | return kfence_unprotect(addr); /* Unprotect and let access proceed. */ | |
1228 | } |