Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net-next
[linux-2.6-block.git] / mm / kasan / common.c
CommitLineData
e886bf9d 1// SPDX-License-Identifier: GPL-2.0
bffa986c
AK
2/*
3 * This file contains common generic and tag-based KASAN code.
4 *
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7 *
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 */
16
bcf6f55a
AB
17#define __KASAN_INTERNAL
18
bffa986c
AK
19#include <linux/export.h>
20#include <linux/interrupt.h>
21#include <linux/init.h>
22#include <linux/kasan.h>
23#include <linux/kernel.h>
24#include <linux/kmemleak.h>
25#include <linux/linkage.h>
26#include <linux/memblock.h>
27#include <linux/memory.h>
28#include <linux/mm.h>
29#include <linux/module.h>
30#include <linux/printk.h>
31#include <linux/sched.h>
32#include <linux/sched/task_stack.h>
33#include <linux/slab.h>
34#include <linux/stacktrace.h>
35#include <linux/string.h>
36#include <linux/types.h>
37#include <linux/vmalloc.h>
38#include <linux/bug.h>
57b78a62 39#include <linux/uaccess.h>
bffa986c
AK
40
41#include "kasan.h"
42#include "../slab.h"
43
44static inline int in_irqentry_text(unsigned long ptr)
45{
46 return (ptr >= (unsigned long)&__irqentry_text_start &&
47 ptr < (unsigned long)&__irqentry_text_end) ||
48 (ptr >= (unsigned long)&__softirqentry_text_start &&
49 ptr < (unsigned long)&__softirqentry_text_end);
50}
51
880e049c
TG
52static inline unsigned int filter_irq_stacks(unsigned long *entries,
53 unsigned int nr_entries)
bffa986c 54{
880e049c 55 unsigned int i;
bffa986c 56
880e049c
TG
57 for (i = 0; i < nr_entries; i++) {
58 if (in_irqentry_text(entries[i])) {
bffa986c 59 /* Include the irqentry function into the stack. */
880e049c 60 return i + 1;
bffa986c 61 }
880e049c
TG
62 }
63 return nr_entries;
bffa986c
AK
64}
65
66static inline depot_stack_handle_t save_stack(gfp_t flags)
67{
68 unsigned long entries[KASAN_STACK_DEPTH];
880e049c 69 unsigned int nr_entries;
bffa986c 70
880e049c
TG
71 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
72 nr_entries = filter_irq_stacks(entries, nr_entries);
73 return stack_depot_save(entries, nr_entries, flags);
bffa986c
AK
74}
75
76static inline void set_track(struct kasan_track *track, gfp_t flags)
77{
78 track->pid = current->pid;
79 track->stack = save_stack(flags);
80}
81
82void kasan_enable_current(void)
83{
84 current->kasan_depth++;
85}
86
87void kasan_disable_current(void)
88{
89 current->kasan_depth--;
90}
91
92void kasan_check_read(const volatile void *p, unsigned int size)
93{
94 check_memory_region((unsigned long)p, size, false, _RET_IP_);
95}
96EXPORT_SYMBOL(kasan_check_read);
97
98void kasan_check_write(const volatile void *p, unsigned int size)
99{
100 check_memory_region((unsigned long)p, size, true, _RET_IP_);
101}
102EXPORT_SYMBOL(kasan_check_write);
103
104#undef memset
105void *memset(void *addr, int c, size_t len)
106{
107 check_memory_region((unsigned long)addr, len, true, _RET_IP_);
108
109 return __memset(addr, c, len);
110}
111
112#undef memmove
113void *memmove(void *dest, const void *src, size_t len)
114{
115 check_memory_region((unsigned long)src, len, false, _RET_IP_);
116 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
117
118 return __memmove(dest, src, len);
119}
120
121#undef memcpy
122void *memcpy(void *dest, const void *src, size_t len)
123{
124 check_memory_region((unsigned long)src, len, false, _RET_IP_);
125 check_memory_region((unsigned long)dest, len, true, _RET_IP_);
126
127 return __memcpy(dest, src, len);
128}
129
130/*
131 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
132 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
133 */
134void kasan_poison_shadow(const void *address, size_t size, u8 value)
135{
136 void *shadow_start, *shadow_end;
137
7f94ffbc
AK
138 /*
139 * Perform shadow offset calculation based on untagged address, as
140 * some of the callers (e.g. kasan_poison_object_data) pass tagged
141 * addresses to this function.
142 */
143 address = reset_tag(address);
144
bffa986c
AK
145 shadow_start = kasan_mem_to_shadow(address);
146 shadow_end = kasan_mem_to_shadow(address + size);
147
148 __memset(shadow_start, value, shadow_end - shadow_start);
149}
150
151void kasan_unpoison_shadow(const void *address, size_t size)
152{
7f94ffbc
AK
153 u8 tag = get_tag(address);
154
155 /*
156 * Perform shadow offset calculation based on untagged address, as
157 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
158 * addresses to this function.
159 */
160 address = reset_tag(address);
161
162 kasan_poison_shadow(address, size, tag);
bffa986c
AK
163
164 if (size & KASAN_SHADOW_MASK) {
165 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
7f94ffbc
AK
166
167 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
168 *shadow = tag;
169 else
170 *shadow = size & KASAN_SHADOW_MASK;
bffa986c
AK
171 }
172}
173
174static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
175{
176 void *base = task_stack_page(task);
177 size_t size = sp - base;
178
179 kasan_unpoison_shadow(base, size);
180}
181
182/* Unpoison the entire stack for a task. */
183void kasan_unpoison_task_stack(struct task_struct *task)
184{
185 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
186}
187
188/* Unpoison the stack for the current task beyond a watermark sp value. */
189asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
190{
191 /*
192 * Calculate the task stack base address. Avoid using 'current'
193 * because this function is called by early resume code which hasn't
194 * yet set up the percpu register (%gs).
195 */
196 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
197
198 kasan_unpoison_shadow(base, watermark - base);
199}
200
201/*
202 * Clear all poison for the region between the current SP and a provided
203 * watermark value, as is sometimes required prior to hand-crafted asm function
204 * returns in the middle of functions.
205 */
206void kasan_unpoison_stack_above_sp_to(const void *watermark)
207{
208 const void *sp = __builtin_frame_address(0);
209 size_t size = watermark - sp;
210
211 if (WARN_ON(sp > watermark))
212 return;
213 kasan_unpoison_shadow(sp, size);
214}
215
216void kasan_alloc_pages(struct page *page, unsigned int order)
217{
2813b9c0
AK
218 u8 tag;
219 unsigned long i;
220
7f94ffbc
AK
221 if (unlikely(PageHighMem(page)))
222 return;
2813b9c0
AK
223
224 tag = random_tag();
225 for (i = 0; i < (1 << order); i++)
226 page_kasan_tag_set(page + i, tag);
7f94ffbc 227 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
bffa986c
AK
228}
229
230void kasan_free_pages(struct page *page, unsigned int order)
231{
232 if (likely(!PageHighMem(page)))
233 kasan_poison_shadow(page_address(page),
234 PAGE_SIZE << order,
235 KASAN_FREE_PAGE);
236}
237
238/*
239 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
240 * For larger allocations larger redzones are used.
241 */
242static inline unsigned int optimal_redzone(unsigned int object_size)
243{
7f94ffbc
AK
244 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
245 return 0;
246
bffa986c
AK
247 return
248 object_size <= 64 - 16 ? 16 :
249 object_size <= 128 - 32 ? 32 :
250 object_size <= 512 - 64 ? 64 :
251 object_size <= 4096 - 128 ? 128 :
252 object_size <= (1 << 14) - 256 ? 256 :
253 object_size <= (1 << 15) - 512 ? 512 :
254 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
255}
256
257void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
258 slab_flags_t *flags)
259{
260 unsigned int orig_size = *size;
7f94ffbc 261 unsigned int redzone_size;
bffa986c
AK
262 int redzone_adjust;
263
264 /* Add alloc meta. */
265 cache->kasan_info.alloc_meta_offset = *size;
266 *size += sizeof(struct kasan_alloc_meta);
267
268 /* Add free meta. */
7f94ffbc
AK
269 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
270 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
271 cache->object_size < sizeof(struct kasan_free_meta))) {
bffa986c
AK
272 cache->kasan_info.free_meta_offset = *size;
273 *size += sizeof(struct kasan_free_meta);
274 }
bffa986c 275
7f94ffbc
AK
276 redzone_size = optimal_redzone(cache->object_size);
277 redzone_adjust = redzone_size - (*size - cache->object_size);
bffa986c
AK
278 if (redzone_adjust > 0)
279 *size += redzone_adjust;
280
281 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
7f94ffbc 282 max(*size, cache->object_size + redzone_size));
bffa986c
AK
283
284 /*
285 * If the metadata doesn't fit, don't enable KASAN at all.
286 */
287 if (*size <= cache->kasan_info.alloc_meta_offset ||
288 *size <= cache->kasan_info.free_meta_offset) {
289 cache->kasan_info.alloc_meta_offset = 0;
290 cache->kasan_info.free_meta_offset = 0;
291 *size = orig_size;
292 return;
293 }
294
295 *flags |= SLAB_KASAN;
296}
297
298size_t kasan_metadata_size(struct kmem_cache *cache)
299{
300 return (cache->kasan_info.alloc_meta_offset ?
301 sizeof(struct kasan_alloc_meta) : 0) +
302 (cache->kasan_info.free_meta_offset ?
303 sizeof(struct kasan_free_meta) : 0);
304}
305
306struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
307 const void *object)
308{
309 BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
310 return (void *)object + cache->kasan_info.alloc_meta_offset;
311}
312
313struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
314 const void *object)
315{
316 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
317 return (void *)object + cache->kasan_info.free_meta_offset;
318}
319
320void kasan_poison_slab(struct page *page)
321{
2813b9c0
AK
322 unsigned long i;
323
324 for (i = 0; i < (1 << compound_order(page)); i++)
325 page_kasan_tag_reset(page + i);
bffa986c
AK
326 kasan_poison_shadow(page_address(page),
327 PAGE_SIZE << compound_order(page),
328 KASAN_KMALLOC_REDZONE);
329}
330
331void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
332{
333 kasan_unpoison_shadow(object, cache->object_size);
334}
335
336void kasan_poison_object_data(struct kmem_cache *cache, void *object)
337{
338 kasan_poison_shadow(object,
339 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
340 KASAN_KMALLOC_REDZONE);
341}
342
7f94ffbc 343/*
a3fe7cdf
AK
344 * This function assigns a tag to an object considering the following:
345 * 1. A cache might have a constructor, which might save a pointer to a slab
346 * object somewhere (e.g. in the object itself). We preassign a tag for
347 * each object in caches with constructors during slab creation and reuse
348 * the same tag each time a particular object is allocated.
349 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
350 * accessed after being freed. We preassign tags for objects in these
351 * caches as well.
352 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
353 * is stored as an array of indexes instead of a linked list. Assign tags
354 * based on objects indexes, so that objects that are next to each other
355 * get different tags.
7f94ffbc 356 */
a3fe7cdf 357static u8 assign_tag(struct kmem_cache *cache, const void *object,
e1db95be 358 bool init, bool keep_tag)
7f94ffbc 359{
e1db95be
AK
360 /*
361 * 1. When an object is kmalloc()'ed, two hooks are called:
362 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
363 * tag only in the first one.
364 * 2. We reuse the same tag for krealloc'ed objects.
365 */
366 if (keep_tag)
a3fe7cdf
AK
367 return get_tag(object);
368
369 /*
370 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
371 * set, assign a tag when the object is being allocated (init == false).
372 */
7f94ffbc 373 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
a3fe7cdf 374 return init ? KASAN_TAG_KERNEL : random_tag();
7f94ffbc 375
a3fe7cdf 376 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
7f94ffbc 377#ifdef CONFIG_SLAB
a3fe7cdf 378 /* For SLAB assign tags based on the object index in the freelist. */
7f94ffbc
AK
379 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
380#else
a3fe7cdf
AK
381 /*
382 * For SLUB assign a random tag during slab creation, otherwise reuse
383 * the already assigned tag.
384 */
385 return init ? random_tag() : get_tag(object);
7f94ffbc
AK
386#endif
387}
388
66afc7f1
AK
389void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
390 const void *object)
bffa986c
AK
391{
392 struct kasan_alloc_meta *alloc_info;
393
394 if (!(cache->flags & SLAB_KASAN))
395 return (void *)object;
396
397 alloc_info = get_alloc_info(cache, object);
398 __memset(alloc_info, 0, sizeof(*alloc_info));
399
7f94ffbc 400 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
a3fe7cdf
AK
401 object = set_tag(object,
402 assign_tag(cache, object, true, false));
7f94ffbc 403
bffa986c
AK
404 return (void *)object;
405}
406
7f94ffbc
AK
407static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
408{
409 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
410 return shadow_byte < 0 ||
411 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
412 else
413 return tag != (u8)shadow_byte;
414}
415
bffa986c
AK
416static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
417 unsigned long ip, bool quarantine)
418{
419 s8 shadow_byte;
7f94ffbc
AK
420 u8 tag;
421 void *tagged_object;
bffa986c
AK
422 unsigned long rounded_up_size;
423
7f94ffbc
AK
424 tag = get_tag(object);
425 tagged_object = object;
426 object = reset_tag(object);
427
bffa986c
AK
428 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
429 object)) {
7f94ffbc 430 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
431 return true;
432 }
433
434 /* RCU slabs could be legally used after free within the RCU period */
435 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
436 return false;
437
438 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
7f94ffbc
AK
439 if (shadow_invalid(tag, shadow_byte)) {
440 kasan_report_invalid_free(tagged_object, ip);
bffa986c
AK
441 return true;
442 }
443
444 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
445 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
446
7f94ffbc
AK
447 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
448 unlikely(!(cache->flags & SLAB_KASAN)))
bffa986c
AK
449 return false;
450
451 set_track(&get_alloc_info(cache, object)->free_track, GFP_NOWAIT);
452 quarantine_put(get_free_info(cache, object), cache);
7f94ffbc
AK
453
454 return IS_ENABLED(CONFIG_KASAN_GENERIC);
bffa986c
AK
455}
456
457bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
458{
459 return __kasan_slab_free(cache, object, ip, true);
460}
461
a3fe7cdf 462static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
e1db95be 463 size_t size, gfp_t flags, bool keep_tag)
bffa986c
AK
464{
465 unsigned long redzone_start;
466 unsigned long redzone_end;
7f94ffbc 467 u8 tag;
bffa986c
AK
468
469 if (gfpflags_allow_blocking(flags))
470 quarantine_reduce();
471
472 if (unlikely(object == NULL))
473 return NULL;
474
475 redzone_start = round_up((unsigned long)(object + size),
476 KASAN_SHADOW_SCALE_SIZE);
477 redzone_end = round_up((unsigned long)object + cache->object_size,
478 KASAN_SHADOW_SCALE_SIZE);
479
7f94ffbc 480 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
e1db95be 481 tag = assign_tag(cache, object, false, keep_tag);
7f94ffbc
AK
482
483 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
484 kasan_unpoison_shadow(set_tag(object, tag), size);
bffa986c
AK
485 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
486 KASAN_KMALLOC_REDZONE);
487
488 if (cache->flags & SLAB_KASAN)
489 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
490
7f94ffbc 491 return set_tag(object, tag);
bffa986c 492}
a3fe7cdf 493
e1db95be
AK
494void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
495 gfp_t flags)
496{
497 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
498}
499
a3fe7cdf
AK
500void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
501 size_t size, gfp_t flags)
502{
e1db95be 503 return __kasan_kmalloc(cache, object, size, flags, true);
a3fe7cdf 504}
bffa986c
AK
505EXPORT_SYMBOL(kasan_kmalloc);
506
66afc7f1
AK
507void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
508 gfp_t flags)
bffa986c
AK
509{
510 struct page *page;
511 unsigned long redzone_start;
512 unsigned long redzone_end;
513
514 if (gfpflags_allow_blocking(flags))
515 quarantine_reduce();
516
517 if (unlikely(ptr == NULL))
518 return NULL;
519
520 page = virt_to_page(ptr);
521 redzone_start = round_up((unsigned long)(ptr + size),
522 KASAN_SHADOW_SCALE_SIZE);
523 redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
524
525 kasan_unpoison_shadow(ptr, size);
526 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
527 KASAN_PAGE_REDZONE);
528
529 return (void *)ptr;
530}
531
66afc7f1 532void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
bffa986c
AK
533{
534 struct page *page;
535
536 if (unlikely(object == ZERO_SIZE_PTR))
537 return (void *)object;
538
539 page = virt_to_head_page(object);
540
541 if (unlikely(!PageSlab(page)))
542 return kasan_kmalloc_large(object, size, flags);
543 else
a3fe7cdf
AK
544 return __kasan_kmalloc(page->slab_cache, object, size,
545 flags, true);
bffa986c
AK
546}
547
548void kasan_poison_kfree(void *ptr, unsigned long ip)
549{
550 struct page *page;
551
552 page = virt_to_head_page(ptr);
553
554 if (unlikely(!PageSlab(page))) {
2813b9c0 555 if (ptr != page_address(page)) {
bffa986c
AK
556 kasan_report_invalid_free(ptr, ip);
557 return;
558 }
559 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
560 KASAN_FREE_PAGE);
561 } else {
562 __kasan_slab_free(page->slab_cache, ptr, ip, false);
563 }
564}
565
566void kasan_kfree_large(void *ptr, unsigned long ip)
567{
2813b9c0 568 if (ptr != page_address(virt_to_head_page(ptr)))
bffa986c
AK
569 kasan_report_invalid_free(ptr, ip);
570 /* The object will be poisoned by page_alloc. */
571}
572
573int kasan_module_alloc(void *addr, size_t size)
574{
575 void *ret;
576 size_t scaled_size;
577 size_t shadow_size;
578 unsigned long shadow_start;
579
580 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
581 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
582 shadow_size = round_up(scaled_size, PAGE_SIZE);
583
584 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
585 return -EINVAL;
586
587 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
588 shadow_start + shadow_size,
080eb83f 589 GFP_KERNEL,
bffa986c
AK
590 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
591 __builtin_return_address(0));
592
593 if (ret) {
080eb83f 594 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
bffa986c
AK
595 find_vm_area(addr)->flags |= VM_KASAN;
596 kmemleak_ignore(ret);
597 return 0;
598 }
599
600 return -ENOMEM;
601}
602
603void kasan_free_shadow(const struct vm_struct *vm)
604{
605 if (vm->flags & VM_KASAN)
606 vfree(kasan_mem_to_shadow(vm->addr));
607}
608
57b78a62
PZ
609extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
610
611void kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
612{
613 unsigned long flags = user_access_save();
614 __kasan_report(addr, size, is_write, ip);
615 user_access_restore(flags);
616}
617
bffa986c
AK
618#ifdef CONFIG_MEMORY_HOTPLUG
619static bool shadow_mapped(unsigned long addr)
620{
621 pgd_t *pgd = pgd_offset_k(addr);
622 p4d_t *p4d;
623 pud_t *pud;
624 pmd_t *pmd;
625 pte_t *pte;
626
627 if (pgd_none(*pgd))
628 return false;
629 p4d = p4d_offset(pgd, addr);
630 if (p4d_none(*p4d))
631 return false;
632 pud = pud_offset(p4d, addr);
633 if (pud_none(*pud))
634 return false;
635
636 /*
637 * We can't use pud_large() or pud_huge(), the first one is
638 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
639 * pud_bad(), if pud is bad then it's bad because it's huge.
640 */
641 if (pud_bad(*pud))
642 return true;
643 pmd = pmd_offset(pud, addr);
644 if (pmd_none(*pmd))
645 return false;
646
647 if (pmd_bad(*pmd))
648 return true;
649 pte = pte_offset_kernel(pmd, addr);
650 return !pte_none(*pte);
651}
652
653static int __meminit kasan_mem_notifier(struct notifier_block *nb,
654 unsigned long action, void *data)
655{
656 struct memory_notify *mem_data = data;
657 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
658 unsigned long shadow_end, shadow_size;
659
660 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
661 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
662 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
663 shadow_size = nr_shadow_pages << PAGE_SHIFT;
664 shadow_end = shadow_start + shadow_size;
665
666 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
667 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
668 return NOTIFY_BAD;
669
670 switch (action) {
671 case MEM_GOING_ONLINE: {
672 void *ret;
673
674 /*
675 * If shadow is mapped already than it must have been mapped
676 * during the boot. This could happen if we onlining previously
677 * offlined memory.
678 */
679 if (shadow_mapped(shadow_start))
680 return NOTIFY_OK;
681
682 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
683 shadow_end, GFP_KERNEL,
684 PAGE_KERNEL, VM_NO_GUARD,
685 pfn_to_nid(mem_data->start_pfn),
686 __builtin_return_address(0));
687 if (!ret)
688 return NOTIFY_BAD;
689
690 kmemleak_ignore(ret);
691 return NOTIFY_OK;
692 }
693 case MEM_CANCEL_ONLINE:
694 case MEM_OFFLINE: {
695 struct vm_struct *vm;
696
697 /*
698 * shadow_start was either mapped during boot by kasan_init()
699 * or during memory online by __vmalloc_node_range().
700 * In the latter case we can use vfree() to free shadow.
701 * Non-NULL result of the find_vm_area() will tell us if
702 * that was the second case.
703 *
704 * Currently it's not possible to free shadow mapped
705 * during boot by kasan_init(). It's because the code
706 * to do that hasn't been written yet. So we'll just
707 * leak the memory.
708 */
709 vm = find_vm_area((void *)shadow_start);
710 if (vm)
711 vfree((void *)shadow_start);
712 }
713 }
714
715 return NOTIFY_OK;
716}
717
718static int __init kasan_memhotplug_init(void)
719{
720 hotplug_memory_notifier(kasan_mem_notifier, 0);
721
722 return 0;
723}
724
725core_initcall(kasan_memhotplug_init);
726#endif