kasan: add hooks implementation for tag-based mode
[linux-block.git] / mm / kasan / common.c
CommitLineData
bffa986c
AK
1/*
2 * This file contains common generic and tag-based KASAN code.
3 *
4 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6 *
7 * Some code borrowed from https://github.com/xairy/kasan-prototype by
8 * Andrey Konovalov <andreyknvl@gmail.com>
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 */
15
16#include <linux/export.h>
17#include <linux/interrupt.h>
18#include <linux/init.h>
19#include <linux/kasan.h>
20#include <linux/kernel.h>
21#include <linux/kmemleak.h>
22#include <linux/linkage.h>
23#include <linux/memblock.h>
24#include <linux/memory.h>
25#include <linux/mm.h>
26#include <linux/module.h>
27#include <linux/printk.h>
28#include <linux/sched.h>
29#include <linux/sched/task_stack.h>
30#include <linux/slab.h>
31#include <linux/stacktrace.h>
32#include <linux/string.h>
33#include <linux/types.h>
34#include <linux/vmalloc.h>
35#include <linux/bug.h>
36
37#include "kasan.h"
38#include "../slab.h"
39
40static inline int in_irqentry_text(unsigned long ptr)
41{
42 return (ptr >= (unsigned long)&__irqentry_text_start &&
43 ptr < (unsigned long)&__irqentry_text_end) ||
44 (ptr >= (unsigned long)&__softirqentry_text_start &&
45 ptr < (unsigned long)&__softirqentry_text_end);
46}
47
48static inline void filter_irq_stacks(struct stack_trace *trace)
49{
50 int i;
51
52 if (!trace->nr_entries)
53 return;
54 for (i = 0; i < trace->nr_entries; i++)
55 if (in_irqentry_text(trace->entries[i])) {
56 /* Include the irqentry function into the stack. */
57 trace->nr_entries = i + 1;
58 break;
59 }
60}
61
62static inline depot_stack_handle_t save_stack(gfp_t flags)
63{
64 unsigned long entries[KASAN_STACK_DEPTH];
65 struct stack_trace trace = {
66 .nr_entries = 0,
67 .entries = entries,
68 .max_entries = KASAN_STACK_DEPTH,
69 .skip = 0
70 };
71
72 save_stack_trace(&trace);
73 filter_irq_stacks(&trace);
74 if (trace.nr_entries != 0 &&
75 trace.entries[trace.nr_entries-1] == ULONG_MAX)
76 trace.nr_entries--;
77
78 return depot_save_stack(&trace, flags);
79}
80
81static inline void set_track(struct kasan_track *track, gfp_t flags)
82{
83 track->pid = current->pid;
84 track->stack = save_stack(flags);
85}
86
87void kasan_enable_current(void)
88{
89 current->kasan_depth++;
90}
91
92void kasan_disable_current(void)
93{
94 current->kasan_depth--;
95}
96
97void kasan_check_read(const volatile void *p, unsigned int size)
98{
99 check_memory_region((unsigned long)p, size, false, _RET_IP_);
100}
101EXPORT_SYMBOL(kasan_check_read);
102
103void kasan_check_write(const volatile void *p, unsigned int size)
104{
105 check_memory_region((unsigned long)p, size, true, _RET_IP_);
106}
107EXPORT_SYMBOL(kasan_check_write);
108
109#undef memset
110void *memset(void *addr, int c, size_t len)
111{
112 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
113
114 return __memset(addr, c, len);
115}
116
117#undef memmove
118void *memmove(void *dest, const void *src, size_t len)
119{
120 check_memory_region((unsigned long)src, len, false, _RET_IP_);
121 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
122
123 return __memmove(dest, src, len);
124}
125
126#undef memcpy
127void *memcpy(void *dest, const void *src, size_t len)
128{
129 check_memory_region((unsigned long)src, len, false, _RET_IP_);
130 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
131
132 return __memcpy(dest, src, len);
133}
134
135/*
136 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
137 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
138 */
139void kasan_poison_shadow(const void *address, size_t size, u8 value)
140{
141 void *shadow_start, *shadow_end;
142
7f94ffbc
AK
143 /*
144 * Perform shadow offset calculation based on untagged address, as
145 * some of the callers (e.g. kasan_poison_object_data) pass tagged
146 * addresses to this function.
147 */
148 address = reset_tag(address);
149
bffa986c
AK
150 shadow_start = kasan_mem_to_shadow(address);
151 shadow_end = kasan_mem_to_shadow(address + size);
152
153 __memset(shadow_start, value, shadow_end - shadow_start);
154}
155
156void kasan_unpoison_shadow(const void *address, size_t size)
157{
7f94ffbc
AK
158 u8 tag = get_tag(address);
159
160 /*
161 * Perform shadow offset calculation based on untagged address, as
162 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
163 * addresses to this function.
164 */
165 address = reset_tag(address);
166
167 kasan_poison_shadow(address, size, tag);
bffa986c
AK
168
169 if (size & KASAN_SHADOW_MASK) {
170 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
7f94ffbc
AK
171
172 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
173 *shadow = tag;
174 else
175 *shadow = size & KASAN_SHADOW_MASK;
bffa986c
AK
176 }
177}
178
179static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
180{
181 void *base = task_stack_page(task);
182 size_t size = sp - base;
183
184 kasan_unpoison_shadow(base, size);
185}
186
187/* Unpoison the entire stack for a task. */
188void kasan_unpoison_task_stack(struct task_struct *task)
189{
190 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
191}
192
193/* Unpoison the stack for the current task beyond a watermark sp value. */
194asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
195{
196 /*
197 * Calculate the task stack base address. Avoid using 'current'
198 * because this function is called by early resume code which hasn't
199 * yet set up the percpu register (%gs).
200 */
201 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
202
203 kasan_unpoison_shadow(base, watermark - base);
204}
205
206/*
207 * Clear all poison for the region between the current SP and a provided
208 * watermark value, as is sometimes required prior to hand-crafted asm function
209 * returns in the middle of functions.
210 */
211void kasan_unpoison_stack_above_sp_to(const void *watermark)
212{
213 const void *sp = __builtin_frame_address(0);
214 size_t size = watermark - sp;
215
216 if (WARN_ON(sp > watermark))
217 return;
218 kasan_unpoison_shadow(sp, size);
219}
220
221void kasan_alloc_pages(struct page *page, unsigned int order)
222{
7f94ffbc
AK
223 if (unlikely(PageHighMem(page)))
224 return;
225 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
bffa986c
AK
226}
227
228void kasan_free_pages(struct page *page, unsigned int order)
229{
230 if (likely(!PageHighMem(page)))
231 kasan_poison_shadow(page_address(page),
232 PAGE_SIZE << order,
233 KASAN_FREE_PAGE);
234}
235
236/*
237 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
238 * For larger allocations larger redzones are used.
239 */
240static inline unsigned int optimal_redzone(unsigned int object_size)
241{
7f94ffbc
AK
242 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
243 return 0;
244
bffa986c
AK
245 return
246 object_size <= 64 - 16 ? 16 :
247 object_size <= 128 - 32 ? 32 :
248 object_size <= 512 - 64 ? 64 :
249 object_size <= 4096 - 128 ? 128 :
250 object_size <= (1 << 14) - 256 ? 256 :
251 object_size <= (1 << 15) - 512 ? 512 :
252 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
253}
254
255void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
256 slab_flags_t *flags)
257{
258 unsigned int orig_size = *size;
7f94ffbc 259 unsigned int redzone_size;
bffa986c
AK
260 int redzone_adjust;
261
262 /* Add alloc meta. */
263 cache->kasan_info.alloc_meta_offset = *size;
264 *size += sizeof(struct kasan_alloc_meta);
265
266 /* Add free meta. */
7f94ffbc
AK
267 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
268 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
269 cache->object_size < sizeof(struct kasan_free_meta))) {
bffa986c
AK
270 cache->kasan_info.free_meta_offset = *size;
271 *size += sizeof(struct kasan_free_meta);
272 }
bffa986c 273
7f94ffbc
AK
274 redzone_size = optimal_redzone(cache->object_size);
275 redzone_adjust = redzone_size - (*size - cache->object_size);
bffa986c
AK
276 if (redzone_adjust > 0)
277 *size += redzone_adjust;
278
279 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
7f94ffbc 280 max(*size, cache->object_size + redzone_size));
bffa986c
AK
281
282 /*
283 * If the metadata doesn't fit, don't enable KASAN at all.
284 */
285 if (*size <= cache->kasan_info.alloc_meta_offset ||
286 *size <= cache->kasan_info.free_meta_offset) {
287 cache->kasan_info.alloc_meta_offset = 0;
288 cache->kasan_info.free_meta_offset = 0;
289 *size = orig_size;
290 return;
291 }
292
7f94ffbc
AK
293 cache->align = round_up(cache->align, KASAN_SHADOW_SCALE_SIZE);
294
bffa986c
AK
295 *flags |= SLAB_KASAN;
296}
297
298size_t kasan_metadata_size(struct kmem_cache *cache)
299{
300 return (cache->kasan_info.alloc_meta_offset ?
301 sizeof(struct kasan_alloc_meta) : 0) +
302 (cache->kasan_info.free_meta_offset ?
303 sizeof(struct kasan_free_meta) : 0);
304}
305
306struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
307 const void *object)
308{
309 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
310 return (void *)object + cache->kasan_info.alloc_meta_offset;
311}
312
313struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
314 const void *object)
315{
316 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
317 return (void *)object + cache->kasan_info.free_meta_offset;
318}
319
320void kasan_poison_slab(struct page *page)
321{
322 kasan_poison_shadow(page_address(page),
323 PAGE_SIZE << compound_order(page),
324 KASAN_KMALLOC_REDZONE);
325}
326
327void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
328{
329 kasan_unpoison_shadow(object, cache->object_size);
330}
331
332void kasan_poison_object_data(struct kmem_cache *cache, void *object)
333{
334 kasan_poison_shadow(object,
335 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
336 KASAN_KMALLOC_REDZONE);
337}
338
7f94ffbc
AK
339/*
340 * Since it's desirable to only call object contructors once during slab
341 * allocation, we preassign tags to all such objects. Also preassign tags for
342 * SLAB_TYPESAFE_BY_RCU slabs to avoid use-after-free reports.
343 * For SLAB allocator we can't preassign tags randomly since the freelist is
344 * stored as an array of indexes instead of a linked list. Assign tags based
345 * on objects indexes, so that objects that are next to each other get
346 * different tags.
347 * After a tag is assigned, the object always gets allocated with the same tag.
348 * The reason is that we can't change tags for objects with constructors on
349 * reallocation (even for non-SLAB_TYPESAFE_BY_RCU), because the constructor
350 * code can save the pointer to the object somewhere (e.g. in the object
351 * itself). Then if we retag it, the old saved pointer will become invalid.
352 */
353static u8 assign_tag(struct kmem_cache *cache, const void *object, bool new)
354{
355 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
356 return new ? KASAN_TAG_KERNEL : random_tag();
357
358#ifdef CONFIG_SLAB
359 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
360#else
361 return new ? random_tag() : get_tag(object);
362#endif
363}
364
bffa986c
AK
365void *kasan_init_slab_obj(struct kmem_cache *cache, const void *object)
366{
367 struct kasan_alloc_meta *alloc_info;
368
369 if (!(cache->flags & SLAB_KASAN))
370 return (void *)object;
371
372 alloc_info = get_alloc_info(cache, object);
373 __memset(alloc_info, 0, sizeof(*alloc_info));
374
7f94ffbc
AK
375 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
376 object = set_tag(object, assign_tag(cache, object, true));
377
bffa986c
AK
378 return (void *)object;
379}
380
381void *kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
382{
383 return kasan_kmalloc(cache, object, cache->object_size, flags);
384}
385
7f94ffbc
AK
386static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
387{
388 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
389 return shadow_byte < 0 ||
390 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
391 else
392 return tag != (u8)shadow_byte;
393}
394
bffa986c
AK
395static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
396 unsigned long ip, bool quarantine)
397{
398 s8 shadow_byte;
7f94ffbc
AK
399 u8 tag;
400 void *tagged_object;
bffa986c
AK
401 unsigned long rounded_up_size;
402
7f94ffbc
AK
403 tag = get_tag(object);
404 tagged_object = object;
405 object = reset_tag(object);
406
bffa986c
AK
407 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
408 object)) {
7f94ffbc 409 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
410 return true;
411 }
412
413 /* RCU slabs could be legally used after free within the RCU period */
414 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
415 return false;
416
417 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
7f94ffbc
AK
418 if (shadow_invalid(tag, shadow_byte)) {
419 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
420 return true;
421 }
422
423 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
424 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
425
7f94ffbc
AK
426 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
427 unlikely(!(cache->flags & SLAB_KASAN)))
bffa986c
AK
428 return false;
429
430 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
431 quarantine_put(get_free_info(cache, object), cache);
7f94ffbc
AK
432
433 return IS_ENABLED(CONFIG_KASAN_GENERIC);
bffa986c
AK
434}
435
436bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
437{
438 return __kasan_slab_free(cache, object, ip, true);
439}
440
441void *kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
442 gfp_t flags)
443{
444 unsigned long redzone_start;
445 unsigned long redzone_end;
7f94ffbc 446 u8 tag;
bffa986c
AK
447
448 if (gfpflags_allow_blocking(flags))
449 quarantine_reduce();
450
451 if (unlikely(object == NULL))
452 return NULL;
453
454 redzone_start = round_up((unsigned long)(object + size),
455 KASAN_SHADOW_SCALE_SIZE);
456 redzone_end = round_up((unsigned long)object + cache->object_size,
457 KASAN_SHADOW_SCALE_SIZE);
458
7f94ffbc
AK
459 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
460 tag = assign_tag(cache, object, false);
461
462 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
463 kasan_unpoison_shadow(set_tag(object, tag), size);
bffa986c
AK
464 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
465 KASAN_KMALLOC_REDZONE);
466
467 if (cache->flags & SLAB_KASAN)
468 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
469
7f94ffbc 470 return set_tag(object, tag);
bffa986c
AK
471}
472EXPORT_SYMBOL(kasan_kmalloc);
473
474void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
475{
476 struct page *page;
477 unsigned long redzone_start;
478 unsigned long redzone_end;
479
480 if (gfpflags_allow_blocking(flags))
481 quarantine_reduce();
482
483 if (unlikely(ptr == NULL))
484 return NULL;
485
486 page = virt_to_page(ptr);
487 redzone_start = round_up((unsigned long)(ptr + size),
488 KASAN_SHADOW_SCALE_SIZE);
489 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
490
491 kasan_unpoison_shadow(ptr, size);
492 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
493 KASAN_PAGE_REDZONE);
494
495 return (void *)ptr;
496}
497
498void *kasan_krealloc(const void *object, size_t size, gfp_t flags)
499{
500 struct page *page;
501
502 if (unlikely(object == ZERO_SIZE_PTR))
503 return (void *)object;
504
505 page = virt_to_head_page(object);
506
507 if (unlikely(!PageSlab(page)))
508 return kasan_kmalloc_large(object, size, flags);
509 else
510 return kasan_kmalloc(page->slab_cache, object, size, flags);
511}
512
513void kasan_poison_kfree(void *ptr, unsigned long ip)
514{
515 struct page *page;
516
517 page = virt_to_head_page(ptr);
518
519 if (unlikely(!PageSlab(page))) {
7f94ffbc 520 if (reset_tag(ptr) != page_address(page)) {
bffa986c
AK
521 kasan_report_invalid_free(ptr, ip);
522 return;
523 }
524 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
525 KASAN_FREE_PAGE);
526 } else {
527 __kasan_slab_free(page->slab_cache, ptr, ip, false);
528 }
529}
530
531void kasan_kfree_large(void *ptr, unsigned long ip)
532{
7f94ffbc 533 if (reset_tag(ptr) != page_address(virt_to_head_page(ptr)))
bffa986c
AK
534 kasan_report_invalid_free(ptr, ip);
535 /* The object will be poisoned by page_alloc. */
536}
537
538int kasan_module_alloc(void *addr, size_t size)
539{
540 void *ret;
541 size_t scaled_size;
542 size_t shadow_size;
543 unsigned long shadow_start;
544
545 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
546 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
547 shadow_size = round_up(scaled_size, PAGE_SIZE);
548
549 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
550 return -EINVAL;
551
552 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
553 shadow_start + shadow_size,
080eb83f 554 GFP_KERNEL,
bffa986c
AK
555 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
556 __builtin_return_address(0));
557
558 if (ret) {
080eb83f 559 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
bffa986c
AK
560 find_vm_area(addr)->flags |= VM_KASAN;
561 kmemleak_ignore(ret);
562 return 0;
563 }
564
565 return -ENOMEM;
566}
567
568void kasan_free_shadow(const struct vm_struct *vm)
569{
570 if (vm->flags & VM_KASAN)
571 vfree(kasan_mem_to_shadow(vm->addr));
572}
573
574#ifdef CONFIG_MEMORY_HOTPLUG
575static bool shadow_mapped(unsigned long addr)
576{
577 pgd_t *pgd = pgd_offset_k(addr);
578 p4d_t *p4d;
579 pud_t *pud;
580 pmd_t *pmd;
581 pte_t *pte;
582
583 if (pgd_none(*pgd))
584 return false;
585 p4d = p4d_offset(pgd, addr);
586 if (p4d_none(*p4d))
587 return false;
588 pud = pud_offset(p4d, addr);
589 if (pud_none(*pud))
590 return false;
591
592 /*
593 * We can't use pud_large() or pud_huge(), the first one is
594 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
595 * pud_bad(), if pud is bad then it's bad because it's huge.
596 */
597 if (pud_bad(*pud))
598 return true;
599 pmd = pmd_offset(pud, addr);
600 if (pmd_none(*pmd))
601 return false;
602
603 if (pmd_bad(*pmd))
604 return true;
605 pte = pte_offset_kernel(pmd, addr);
606 return !pte_none(*pte);
607}
608
609static int __meminit kasan_mem_notifier(struct notifier_block *nb,
610 unsigned long action, void *data)
611{
612 struct memory_notify *mem_data = data;
613 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
614 unsigned long shadow_end, shadow_size;
615
616 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
617 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
618 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
619 shadow_size = nr_shadow_pages << PAGE_SHIFT;
620 shadow_end = shadow_start + shadow_size;
621
622 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
623 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
624 return NOTIFY_BAD;
625
626 switch (action) {
627 case MEM_GOING_ONLINE: {
628 void *ret;
629
630 /*
631 * If shadow is mapped already than it must have been mapped
632 * during the boot. This could happen if we onlining previously
633 * offlined memory.
634 */
635 if (shadow_mapped(shadow_start))
636 return NOTIFY_OK;
637
638 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
639 shadow_end, GFP_KERNEL,
640 PAGE_KERNEL, VM_NO_GUARD,
641 pfn_to_nid(mem_data->start_pfn),
642 __builtin_return_address(0));
643 if (!ret)
644 return NOTIFY_BAD;
645
646 kmemleak_ignore(ret);
647 return NOTIFY_OK;
648 }
649 case MEM_CANCEL_ONLINE:
650 case MEM_OFFLINE: {
651 struct vm_struct *vm;
652
653 /*
654 * shadow_start was either mapped during boot by kasan_init()
655 * or during memory online by __vmalloc_node_range().
656 * In the latter case we can use vfree() to free shadow.
657 * Non-NULL result of the find_vm_area() will tell us if
658 * that was the second case.
659 *
660 * Currently it's not possible to free shadow mapped
661 * during boot by kasan_init(). It's because the code
662 * to do that hasn't been written yet. So we'll just
663 * leak the memory.
664 */
665 vm = find_vm_area((void *)shadow_start);
666 if (vm)
667 vfree((void *)shadow_start);
668 }
669 }
670
671 return NOTIFY_OK;
672}
673
674static int __init kasan_memhotplug_init(void)
675{
676 hotplug_memory_notifier(kasan_mem_notifier, 0);
677
678 return 0;
679}
680
681core_initcall(kasan_memhotplug_init);
682#endif