Merge branch 'x86-boot-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / kasan / common.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * This file contains common generic and tag-based KASAN code.
4  *
5  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  *
8  * Some code borrowed from https://github.com/xairy/kasan-prototype by
9  *        Andrey Konovalov <andreyknvl@gmail.com>
10  *
11  * This program is free software; you can redistribute it and/or modify
12  * it under the terms of the GNU General Public License version 2 as
13  * published by the Free Software Foundation.
14  *
15  */
16
17 #define __KASAN_INTERNAL
18
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/sched/task_stack.h>
33 #include <linux/slab.h>
34 #include <linux/stacktrace.h>
35 #include <linux/string.h>
36 #include <linux/types.h>
37 #include <linux/vmalloc.h>
38 #include <linux/bug.h>
39
40 #include "kasan.h"
41 #include "../slab.h"
42
43 static inline int in_irqentry_text(unsigned long ptr)
44 {
45         return (ptr >= (unsigned long)&__irqentry_text_start &&
46                 ptr < (unsigned long)&__irqentry_text_end) ||
47                 (ptr >= (unsigned long)&__softirqentry_text_start &&
48                  ptr < (unsigned long)&__softirqentry_text_end);
49 }
50
51 static inline void filter_irq_stacks(struct stack_trace *trace)
52 {
53         int i;
54
55         if (!trace->nr_entries)
56                 return;
57         for (i = 0; i < trace->nr_entries; i++)
58                 if (in_irqentry_text(trace->entries[i])) {
59                         /* Include the irqentry function into the stack. */
60                         trace->nr_entries = i + 1;
61                         break;
62                 }
63 }
64
65 static inline depot_stack_handle_t save_stack(gfp_t flags)
66 {
67         unsigned long entries[KASAN_STACK_DEPTH];
68         struct stack_trace trace = {
69                 .nr_entries = 0,
70                 .entries = entries,
71                 .max_entries = KASAN_STACK_DEPTH,
72                 .skip = 0
73         };
74
75         save_stack_trace(&trace);
76         filter_irq_stacks(&trace);
77         if (trace.nr_entries != 0 &&
78             trace.entries[trace.nr_entries-1] == ULONG_MAX)
79                 trace.nr_entries--;
80
81         return depot_save_stack(&trace, flags);
82 }
83
84 static inline void set_track(struct kasan_track *track, gfp_t flags)
85 {
86         track->pid = current->pid;
87         track->stack = save_stack(flags);
88 }
89
90 void kasan_enable_current(void)
91 {
92         current->kasan_depth++;
93 }
94
95 void kasan_disable_current(void)
96 {
97         current->kasan_depth--;
98 }
99
100 void kasan_check_read(const volatile void *p, unsigned int size)
101 {
102         check_memory_region((unsigned long)p, size, false, _RET_IP_);
103 }
104 EXPORT_SYMBOL(kasan_check_read);
105
106 void kasan_check_write(const volatile void *p, unsigned int size)
107 {
108         check_memory_region((unsigned long)p, size, true, _RET_IP_);
109 }
110 EXPORT_SYMBOL(kasan_check_write);
111
112 #undef memset
113 void *memset(void *addr, int c, size_t len)
114 {
115         check_memory_region((unsigned long)addr, len, true, _RET_IP_);
116
117         return __memset(addr, c, len);
118 }
119
120 #undef memmove
121 void *memmove(void *dest, const void *src, size_t len)
122 {
123         check_memory_region((unsigned long)src, len, false, _RET_IP_);
124         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
125
126         return __memmove(dest, src, len);
127 }
128
129 #undef memcpy
130 void *memcpy(void *dest, const void *src, size_t len)
131 {
132         check_memory_region((unsigned long)src, len, false, _RET_IP_);
133         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
134
135         return __memcpy(dest, src, len);
136 }
137
138 /*
139  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
140  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
141  */
142 void kasan_poison_shadow(const void *address, size_t size, u8 value)
143 {
144         void *shadow_start, *shadow_end;
145
146         /*
147          * Perform shadow offset calculation based on untagged address, as
148          * some of the callers (e.g. kasan_poison_object_data) pass tagged
149          * addresses to this function.
150          */
151         address = reset_tag(address);
152
153         shadow_start = kasan_mem_to_shadow(address);
154         shadow_end = kasan_mem_to_shadow(address + size);
155
156         __memset(shadow_start, value, shadow_end - shadow_start);
157 }
158
159 void kasan_unpoison_shadow(const void *address, size_t size)
160 {
161         u8 tag = get_tag(address);
162
163         /*
164          * Perform shadow offset calculation based on untagged address, as
165          * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
166          * addresses to this function.
167          */
168         address = reset_tag(address);
169
170         kasan_poison_shadow(address, size, tag);
171
172         if (size & KASAN_SHADOW_MASK) {
173                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
174
175                 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
176                         *shadow = tag;
177                 else
178                         *shadow = size & KASAN_SHADOW_MASK;
179         }
180 }
181
182 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
183 {
184         void *base = task_stack_page(task);
185         size_t size = sp - base;
186
187         kasan_unpoison_shadow(base, size);
188 }
189
190 /* Unpoison the entire stack for a task. */
191 void kasan_unpoison_task_stack(struct task_struct *task)
192 {
193         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
194 }
195
196 /* Unpoison the stack for the current task beyond a watermark sp value. */
197 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
198 {
199         /*
200          * Calculate the task stack base address.  Avoid using 'current'
201          * because this function is called by early resume code which hasn't
202          * yet set up the percpu register (%gs).
203          */
204         void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
205
206         kasan_unpoison_shadow(base, watermark - base);
207 }
208
209 /*
210  * Clear all poison for the region between the current SP and a provided
211  * watermark value, as is sometimes required prior to hand-crafted asm function
212  * returns in the middle of functions.
213  */
214 void kasan_unpoison_stack_above_sp_to(const void *watermark)
215 {
216         const void *sp = __builtin_frame_address(0);
217         size_t size = watermark - sp;
218
219         if (WARN_ON(sp > watermark))
220                 return;
221         kasan_unpoison_shadow(sp, size);
222 }
223
224 void kasan_alloc_pages(struct page *page, unsigned int order)
225 {
226         u8 tag;
227         unsigned long i;
228
229         if (unlikely(PageHighMem(page)))
230                 return;
231
232         tag = random_tag();
233         for (i = 0; i < (1 << order); i++)
234                 page_kasan_tag_set(page + i, tag);
235         kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
236 }
237
238 void kasan_free_pages(struct page *page, unsigned int order)
239 {
240         if (likely(!PageHighMem(page)))
241                 kasan_poison_shadow(page_address(page),
242                                 PAGE_SIZE << order,
243                                 KASAN_FREE_PAGE);
244 }
245
246 /*
247  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
248  * For larger allocations larger redzones are used.
249  */
250 static inline unsigned int optimal_redzone(unsigned int object_size)
251 {
252         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
253                 return 0;
254
255         return
256                 object_size <= 64        - 16   ? 16 :
257                 object_size <= 128       - 32   ? 32 :
258                 object_size <= 512       - 64   ? 64 :
259                 object_size <= 4096      - 128  ? 128 :
260                 object_size <= (1 << 14) - 256  ? 256 :
261                 object_size <= (1 << 15) - 512  ? 512 :
262                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
263 }
264
265 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
266                         slab_flags_t *flags)
267 {
268         unsigned int orig_size = *size;
269         unsigned int redzone_size;
270         int redzone_adjust;
271
272         /* Add alloc meta. */
273         cache->kasan_info.alloc_meta_offset = *size;
274         *size += sizeof(struct kasan_alloc_meta);
275
276         /* Add free meta. */
277         if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
278             (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
279              cache->object_size < sizeof(struct kasan_free_meta))) {
280                 cache->kasan_info.free_meta_offset = *size;
281                 *size += sizeof(struct kasan_free_meta);
282         }
283
284         redzone_size = optimal_redzone(cache->object_size);
285         redzone_adjust = redzone_size - (*size - cache->object_size);
286         if (redzone_adjust > 0)
287                 *size += redzone_adjust;
288
289         *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
290                         max(*size, cache->object_size + redzone_size));
291
292         /*
293          * If the metadata doesn't fit, don't enable KASAN at all.
294          */
295         if (*size <= cache->kasan_info.alloc_meta_offset ||
296                         *size <= cache->kasan_info.free_meta_offset) {
297                 cache->kasan_info.alloc_meta_offset = 0;
298                 cache->kasan_info.free_meta_offset = 0;
299                 *size = orig_size;
300                 return;
301         }
302
303         *flags |= SLAB_KASAN;
304 }
305
306 size_t kasan_metadata_size(struct kmem_cache *cache)
307 {
308         return (cache->kasan_info.alloc_meta_offset ?
309                 sizeof(struct kasan_alloc_meta) : 0) +
310                 (cache->kasan_info.free_meta_offset ?
311                 sizeof(struct kasan_free_meta) : 0);
312 }
313
314 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
315                                         const void *object)
316 {
317         BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
318         return (void *)object + cache->kasan_info.alloc_meta_offset;
319 }
320
321 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
322                                       const void *object)
323 {
324         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
325         return (void *)object + cache->kasan_info.free_meta_offset;
326 }
327
328 void kasan_poison_slab(struct page *page)
329 {
330         unsigned long i;
331
332         for (i = 0; i < (1 << compound_order(page)); i++)
333                 page_kasan_tag_reset(page + i);
334         kasan_poison_shadow(page_address(page),
335                         PAGE_SIZE << compound_order(page),
336                         KASAN_KMALLOC_REDZONE);
337 }
338
339 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
340 {
341         kasan_unpoison_shadow(object, cache->object_size);
342 }
343
344 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
345 {
346         kasan_poison_shadow(object,
347                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
348                         KASAN_KMALLOC_REDZONE);
349 }
350
351 /*
352  * This function assigns a tag to an object considering the following:
353  * 1. A cache might have a constructor, which might save a pointer to a slab
354  *    object somewhere (e.g. in the object itself). We preassign a tag for
355  *    each object in caches with constructors during slab creation and reuse
356  *    the same tag each time a particular object is allocated.
357  * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
358  *    accessed after being freed. We preassign tags for objects in these
359  *    caches as well.
360  * 3. For SLAB allocator we can't preassign tags randomly since the freelist
361  *    is stored as an array of indexes instead of a linked list. Assign tags
362  *    based on objects indexes, so that objects that are next to each other
363  *    get different tags.
364  */
365 static u8 assign_tag(struct kmem_cache *cache, const void *object,
366                         bool init, bool keep_tag)
367 {
368         /*
369          * 1. When an object is kmalloc()'ed, two hooks are called:
370          *    kasan_slab_alloc() and kasan_kmalloc(). We assign the
371          *    tag only in the first one.
372          * 2. We reuse the same tag for krealloc'ed objects.
373          */
374         if (keep_tag)
375                 return get_tag(object);
376
377         /*
378          * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
379          * set, assign a tag when the object is being allocated (init == false).
380          */
381         if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
382                 return init ? KASAN_TAG_KERNEL : random_tag();
383
384         /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
385 #ifdef CONFIG_SLAB
386         /* For SLAB assign tags based on the object index in the freelist. */
387         return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
388 #else
389         /*
390          * For SLUB assign a random tag during slab creation, otherwise reuse
391          * the already assigned tag.
392          */
393         return init ? random_tag() : get_tag(object);
394 #endif
395 }
396
397 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
398                                                 const void *object)
399 {
400         struct kasan_alloc_meta *alloc_info;
401
402         if (!(cache->flags & SLAB_KASAN))
403                 return (void *)object;
404
405         alloc_info = get_alloc_info(cache, object);
406         __memset(alloc_info, 0, sizeof(*alloc_info));
407
408         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
409                 object = set_tag(object,
410                                 assign_tag(cache, object, true, false));
411
412         return (void *)object;
413 }
414
415 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
416 {
417         if (IS_ENABLED(CONFIG_KASAN_GENERIC))
418                 return shadow_byte < 0 ||
419                         shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
420         else
421                 return tag != (u8)shadow_byte;
422 }
423
424 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
425                               unsigned long ip, bool quarantine)
426 {
427         s8 shadow_byte;
428         u8 tag;
429         void *tagged_object;
430         unsigned long rounded_up_size;
431
432         tag = get_tag(object);
433         tagged_object = object;
434         object = reset_tag(object);
435
436         if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
437             object)) {
438                 kasan_report_invalid_free(tagged_object, ip);
439                 return true;
440         }
441
442         /* RCU slabs could be legally used after free within the RCU period */
443         if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
444                 return false;
445
446         shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
447         if (shadow_invalid(tag, shadow_byte)) {
448                 kasan_report_invalid_free(tagged_object, ip);
449                 return true;
450         }
451
452         rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
453         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
454
455         if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
456                         unlikely(!(cache->flags & SLAB_KASAN)))
457                 return false;
458
459         set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
460         quarantine_put(get_free_info(cache, object), cache);
461
462         return IS_ENABLED(CONFIG_KASAN_GENERIC);
463 }
464
465 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
466 {
467         return __kasan_slab_free(cache, object, ip, true);
468 }
469
470 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
471                                 size_t size, gfp_t flags, bool keep_tag)
472 {
473         unsigned long redzone_start;
474         unsigned long redzone_end;
475         u8 tag;
476
477         if (gfpflags_allow_blocking(flags))
478                 quarantine_reduce();
479
480         if (unlikely(object == NULL))
481                 return NULL;
482
483         redzone_start = round_up((unsigned long)(object + size),
484                                 KASAN_SHADOW_SCALE_SIZE);
485         redzone_end = round_up((unsigned long)object + cache->object_size,
486                                 KASAN_SHADOW_SCALE_SIZE);
487
488         if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
489                 tag = assign_tag(cache, object, false, keep_tag);
490
491         /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
492         kasan_unpoison_shadow(set_tag(object, tag), size);
493         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
494                 KASAN_KMALLOC_REDZONE);
495
496         if (cache->flags & SLAB_KASAN)
497                 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
498
499         return set_tag(object, tag);
500 }
501
502 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
503                                         gfp_t flags)
504 {
505         return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
506 }
507
508 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
509                                 size_t size, gfp_t flags)
510 {
511         return __kasan_kmalloc(cache, object, size, flags, true);
512 }
513 EXPORT_SYMBOL(kasan_kmalloc);
514
515 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
516                                                 gfp_t flags)
517 {
518         struct page *page;
519         unsigned long redzone_start;
520         unsigned long redzone_end;
521
522         if (gfpflags_allow_blocking(flags))
523                 quarantine_reduce();
524
525         if (unlikely(ptr == NULL))
526                 return NULL;
527
528         page = virt_to_page(ptr);
529         redzone_start = round_up((unsigned long)(ptr + size),
530                                 KASAN_SHADOW_SCALE_SIZE);
531         redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
532
533         kasan_unpoison_shadow(ptr, size);
534         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
535                 KASAN_PAGE_REDZONE);
536
537         return (void *)ptr;
538 }
539
540 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
541 {
542         struct page *page;
543
544         if (unlikely(object == ZERO_SIZE_PTR))
545                 return (void *)object;
546
547         page = virt_to_head_page(object);
548
549         if (unlikely(!PageSlab(page)))
550                 return kasan_kmalloc_large(object, size, flags);
551         else
552                 return __kasan_kmalloc(page->slab_cache, object, size,
553                                                 flags, true);
554 }
555
556 void kasan_poison_kfree(void *ptr, unsigned long ip)
557 {
558         struct page *page;
559
560         page = virt_to_head_page(ptr);
561
562         if (unlikely(!PageSlab(page))) {
563                 if (ptr != page_address(page)) {
564                         kasan_report_invalid_free(ptr, ip);
565                         return;
566                 }
567                 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
568                                 KASAN_FREE_PAGE);
569         } else {
570                 __kasan_slab_free(page->slab_cache, ptr, ip, false);
571         }
572 }
573
574 void kasan_kfree_large(void *ptr, unsigned long ip)
575 {
576         if (ptr != page_address(virt_to_head_page(ptr)))
577                 kasan_report_invalid_free(ptr, ip);
578         /* The object will be poisoned by page_alloc. */
579 }
580
581 int kasan_module_alloc(void *addr, size_t size)
582 {
583         void *ret;
584         size_t scaled_size;
585         size_t shadow_size;
586         unsigned long shadow_start;
587
588         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
589         scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
590         shadow_size = round_up(scaled_size, PAGE_SIZE);
591
592         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
593                 return -EINVAL;
594
595         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
596                         shadow_start + shadow_size,
597                         GFP_KERNEL,
598                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
599                         __builtin_return_address(0));
600
601         if (ret) {
602                 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
603                 find_vm_area(addr)->flags |= VM_KASAN;
604                 kmemleak_ignore(ret);
605                 return 0;
606         }
607
608         return -ENOMEM;
609 }
610
611 void kasan_free_shadow(const struct vm_struct *vm)
612 {
613         if (vm->flags & VM_KASAN)
614                 vfree(kasan_mem_to_shadow(vm->addr));
615 }
616
617 #ifdef CONFIG_MEMORY_HOTPLUG
618 static bool shadow_mapped(unsigned long addr)
619 {
620         pgd_t *pgd = pgd_offset_k(addr);
621         p4d_t *p4d;
622         pud_t *pud;
623         pmd_t *pmd;
624         pte_t *pte;
625
626         if (pgd_none(*pgd))
627                 return false;
628         p4d = p4d_offset(pgd, addr);
629         if (p4d_none(*p4d))
630                 return false;
631         pud = pud_offset(p4d, addr);
632         if (pud_none(*pud))
633                 return false;
634
635         /*
636          * We can't use pud_large() or pud_huge(), the first one is
637          * arch-specific, the last one depends on HUGETLB_PAGE.  So let's abuse
638          * pud_bad(), if pud is bad then it's bad because it's huge.
639          */
640         if (pud_bad(*pud))
641                 return true;
642         pmd = pmd_offset(pud, addr);
643         if (pmd_none(*pmd))
644                 return false;
645
646         if (pmd_bad(*pmd))
647                 return true;
648         pte = pte_offset_kernel(pmd, addr);
649         return !pte_none(*pte);
650 }
651
652 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
653                         unsigned long action, void *data)
654 {
655         struct memory_notify *mem_data = data;
656         unsigned long nr_shadow_pages, start_kaddr, shadow_start;
657         unsigned long shadow_end, shadow_size;
658
659         nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
660         start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
661         shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
662         shadow_size = nr_shadow_pages << PAGE_SHIFT;
663         shadow_end = shadow_start + shadow_size;
664
665         if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
666                 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
667                 return NOTIFY_BAD;
668
669         switch (action) {
670         case MEM_GOING_ONLINE: {
671                 void *ret;
672
673                 /*
674                  * If shadow is mapped already than it must have been mapped
675                  * during the boot. This could happen if we onlining previously
676                  * offlined memory.
677                  */
678                 if (shadow_mapped(shadow_start))
679                         return NOTIFY_OK;
680
681                 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
682                                         shadow_end, GFP_KERNEL,
683                                         PAGE_KERNEL, VM_NO_GUARD,
684                                         pfn_to_nid(mem_data->start_pfn),
685                                         __builtin_return_address(0));
686                 if (!ret)
687                         return NOTIFY_BAD;
688
689                 kmemleak_ignore(ret);
690                 return NOTIFY_OK;
691         }
692         case MEM_CANCEL_ONLINE:
693         case MEM_OFFLINE: {
694                 struct vm_struct *vm;
695
696                 /*
697                  * shadow_start was either mapped during boot by kasan_init()
698                  * or during memory online by __vmalloc_node_range().
699                  * In the latter case we can use vfree() to free shadow.
700                  * Non-NULL result of the find_vm_area() will tell us if
701                  * that was the second case.
702                  *
703                  * Currently it's not possible to free shadow mapped
704                  * during boot by kasan_init(). It's because the code
705                  * to do that hasn't been written yet. So we'll just
706                  * leak the memory.
707                  */
708                 vm = find_vm_area((void *)shadow_start);
709                 if (vm)
710                         vfree((void *)shadow_start);
711         }
712         }
713
714         return NOTIFY_OK;
715 }
716
717 static int __init kasan_memhotplug_init(void)
718 {
719         hotplug_memory_notifier(kasan_mem_notifier, 0);
720
721         return 0;
722 }
723
724 core_initcall(kasan_memhotplug_init);
725 #endif