e5beb40d97b1c90d3ddc4fc6c47c170060efcdf7
[linux-2.6-block.git] / mm / kasan / kasan.c
1 /*
2  * This file contains shadow memory manipulation code.
3  *
4  * Copyright (c) 2014 Samsung Electronics Co., Ltd.
5  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
6  *
7  * Some code borrowed from https://github.com/xairy/kasan-prototype by
8  *        Andrey Konovalov <adech.fo@gmail.com>
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of the GNU General Public License version 2 as
12  * published by the Free Software Foundation.
13  *
14  */
15
16 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
17 #define DISABLE_BRANCH_PROFILING
18
19 #include <linux/export.h>
20 #include <linux/interrupt.h>
21 #include <linux/init.h>
22 #include <linux/kasan.h>
23 #include <linux/kernel.h>
24 #include <linux/kmemleak.h>
25 #include <linux/linkage.h>
26 #include <linux/memblock.h>
27 #include <linux/memory.h>
28 #include <linux/mm.h>
29 #include <linux/module.h>
30 #include <linux/printk.h>
31 #include <linux/sched.h>
32 #include <linux/slab.h>
33 #include <linux/stacktrace.h>
34 #include <linux/string.h>
35 #include <linux/types.h>
36 #include <linux/vmalloc.h>
37
38 #include "kasan.h"
39 #include "../slab.h"
40
41 /*
42  * Poisons the shadow memory for 'size' bytes starting from 'addr'.
43  * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
44  */
45 static void kasan_poison_shadow(const void *address, size_t size, u8 value)
46 {
47         void *shadow_start, *shadow_end;
48
49         shadow_start = kasan_mem_to_shadow(address);
50         shadow_end = kasan_mem_to_shadow(address + size);
51
52         memset(shadow_start, value, shadow_end - shadow_start);
53 }
54
55 void kasan_unpoison_shadow(const void *address, size_t size)
56 {
57         kasan_poison_shadow(address, size, 0);
58
59         if (size & KASAN_SHADOW_MASK) {
60                 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
61                 *shadow = size & KASAN_SHADOW_MASK;
62         }
63 }
64
65 static void __kasan_unpoison_stack(struct task_struct *task, void *sp)
66 {
67         void *base = task_stack_page(task);
68         size_t size = sp - base;
69
70         kasan_unpoison_shadow(base, size);
71 }
72
73 /* Unpoison the entire stack for a task. */
74 void kasan_unpoison_task_stack(struct task_struct *task)
75 {
76         __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
77 }
78
79 /* Unpoison the stack for the current task beyond a watermark sp value. */
80 asmlinkage void kasan_unpoison_remaining_stack(void *sp)
81 {
82         __kasan_unpoison_stack(current, sp);
83 }
84
85 /*
86  * All functions below always inlined so compiler could
87  * perform better optimizations in each of __asan_loadX/__assn_storeX
88  * depending on memory access size X.
89  */
90
91 static __always_inline bool memory_is_poisoned_1(unsigned long addr)
92 {
93         s8 shadow_value = *(s8 *)kasan_mem_to_shadow((void *)addr);
94
95         if (unlikely(shadow_value)) {
96                 s8 last_accessible_byte = addr & KASAN_SHADOW_MASK;
97                 return unlikely(last_accessible_byte >= shadow_value);
98         }
99
100         return false;
101 }
102
103 static __always_inline bool memory_is_poisoned_2(unsigned long addr)
104 {
105         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
106
107         if (unlikely(*shadow_addr)) {
108                 if (memory_is_poisoned_1(addr + 1))
109                         return true;
110
111                 /*
112                  * If single shadow byte covers 2-byte access, we don't
113                  * need to do anything more. Otherwise, test the first
114                  * shadow byte.
115                  */
116                 if (likely(((addr + 1) & KASAN_SHADOW_MASK) != 0))
117                         return false;
118
119                 return unlikely(*(u8 *)shadow_addr);
120         }
121
122         return false;
123 }
124
125 static __always_inline bool memory_is_poisoned_4(unsigned long addr)
126 {
127         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
128
129         if (unlikely(*shadow_addr)) {
130                 if (memory_is_poisoned_1(addr + 3))
131                         return true;
132
133                 /*
134                  * If single shadow byte covers 4-byte access, we don't
135                  * need to do anything more. Otherwise, test the first
136                  * shadow byte.
137                  */
138                 if (likely(((addr + 3) & KASAN_SHADOW_MASK) >= 3))
139                         return false;
140
141                 return unlikely(*(u8 *)shadow_addr);
142         }
143
144         return false;
145 }
146
147 static __always_inline bool memory_is_poisoned_8(unsigned long addr)
148 {
149         u16 *shadow_addr = (u16 *)kasan_mem_to_shadow((void *)addr);
150
151         if (unlikely(*shadow_addr)) {
152                 if (memory_is_poisoned_1(addr + 7))
153                         return true;
154
155                 /*
156                  * If single shadow byte covers 8-byte access, we don't
157                  * need to do anything more. Otherwise, test the first
158                  * shadow byte.
159                  */
160                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
161                         return false;
162
163                 return unlikely(*(u8 *)shadow_addr);
164         }
165
166         return false;
167 }
168
169 static __always_inline bool memory_is_poisoned_16(unsigned long addr)
170 {
171         u32 *shadow_addr = (u32 *)kasan_mem_to_shadow((void *)addr);
172
173         if (unlikely(*shadow_addr)) {
174                 u16 shadow_first_bytes = *(u16 *)shadow_addr;
175
176                 if (unlikely(shadow_first_bytes))
177                         return true;
178
179                 /*
180                  * If two shadow bytes covers 16-byte access, we don't
181                  * need to do anything more. Otherwise, test the last
182                  * shadow byte.
183                  */
184                 if (likely(IS_ALIGNED(addr, KASAN_SHADOW_SCALE_SIZE)))
185                         return false;
186
187                 return memory_is_poisoned_1(addr + 15);
188         }
189
190         return false;
191 }
192
193 static __always_inline unsigned long bytes_is_zero(const u8 *start,
194                                         size_t size)
195 {
196         while (size) {
197                 if (unlikely(*start))
198                         return (unsigned long)start;
199                 start++;
200                 size--;
201         }
202
203         return 0;
204 }
205
206 static __always_inline unsigned long memory_is_zero(const void *start,
207                                                 const void *end)
208 {
209         unsigned int words;
210         unsigned long ret;
211         unsigned int prefix = (unsigned long)start % 8;
212
213         if (end - start <= 16)
214                 return bytes_is_zero(start, end - start);
215
216         if (prefix) {
217                 prefix = 8 - prefix;
218                 ret = bytes_is_zero(start, prefix);
219                 if (unlikely(ret))
220                         return ret;
221                 start += prefix;
222         }
223
224         words = (end - start) / 8;
225         while (words) {
226                 if (unlikely(*(u64 *)start))
227                         return bytes_is_zero(start, 8);
228                 start += 8;
229                 words--;
230         }
231
232         return bytes_is_zero(start, (end - start) % 8);
233 }
234
235 static __always_inline bool memory_is_poisoned_n(unsigned long addr,
236                                                 size_t size)
237 {
238         unsigned long ret;
239
240         ret = memory_is_zero(kasan_mem_to_shadow((void *)addr),
241                         kasan_mem_to_shadow((void *)addr + size - 1) + 1);
242
243         if (unlikely(ret)) {
244                 unsigned long last_byte = addr + size - 1;
245                 s8 *last_shadow = (s8 *)kasan_mem_to_shadow((void *)last_byte);
246
247                 if (unlikely(ret != (unsigned long)last_shadow ||
248                         ((long)(last_byte & KASAN_SHADOW_MASK) >= *last_shadow)))
249                         return true;
250         }
251         return false;
252 }
253
254 static __always_inline bool memory_is_poisoned(unsigned long addr, size_t size)
255 {
256         if (__builtin_constant_p(size)) {
257                 switch (size) {
258                 case 1:
259                         return memory_is_poisoned_1(addr);
260                 case 2:
261                         return memory_is_poisoned_2(addr);
262                 case 4:
263                         return memory_is_poisoned_4(addr);
264                 case 8:
265                         return memory_is_poisoned_8(addr);
266                 case 16:
267                         return memory_is_poisoned_16(addr);
268                 default:
269                         BUILD_BUG();
270                 }
271         }
272
273         return memory_is_poisoned_n(addr, size);
274 }
275
276 static __always_inline void check_memory_region_inline(unsigned long addr,
277                                                 size_t size, bool write,
278                                                 unsigned long ret_ip)
279 {
280         if (unlikely(size == 0))
281                 return;
282
283         if (unlikely((void *)addr <
284                 kasan_shadow_to_mem((void *)KASAN_SHADOW_START))) {
285                 kasan_report(addr, size, write, ret_ip);
286                 return;
287         }
288
289         if (likely(!memory_is_poisoned(addr, size)))
290                 return;
291
292         kasan_report(addr, size, write, ret_ip);
293 }
294
295 static void check_memory_region(unsigned long addr,
296                                 size_t size, bool write,
297                                 unsigned long ret_ip)
298 {
299         check_memory_region_inline(addr, size, write, ret_ip);
300 }
301
302 #undef memset
303 void *memset(void *addr, int c, size_t len)
304 {
305         check_memory_region((unsigned long)addr, len, true, _RET_IP_);
306
307         return __memset(addr, c, len);
308 }
309
310 #undef memmove
311 void *memmove(void *dest, const void *src, size_t len)
312 {
313         check_memory_region((unsigned long)src, len, false, _RET_IP_);
314         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
315
316         return __memmove(dest, src, len);
317 }
318
319 #undef memcpy
320 void *memcpy(void *dest, const void *src, size_t len)
321 {
322         check_memory_region((unsigned long)src, len, false, _RET_IP_);
323         check_memory_region((unsigned long)dest, len, true, _RET_IP_);
324
325         return __memcpy(dest, src, len);
326 }
327
328 void kasan_alloc_pages(struct page *page, unsigned int order)
329 {
330         if (likely(!PageHighMem(page)))
331                 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
332 }
333
334 void kasan_free_pages(struct page *page, unsigned int order)
335 {
336         if (likely(!PageHighMem(page)))
337                 kasan_poison_shadow(page_address(page),
338                                 PAGE_SIZE << order,
339                                 KASAN_FREE_PAGE);
340 }
341
342 #ifdef CONFIG_SLAB
343 /*
344  * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
345  * For larger allocations larger redzones are used.
346  */
347 static size_t optimal_redzone(size_t object_size)
348 {
349         int rz =
350                 object_size <= 64        - 16   ? 16 :
351                 object_size <= 128       - 32   ? 32 :
352                 object_size <= 512       - 64   ? 64 :
353                 object_size <= 4096      - 128  ? 128 :
354                 object_size <= (1 << 14) - 256  ? 256 :
355                 object_size <= (1 << 15) - 512  ? 512 :
356                 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
357         return rz;
358 }
359
360 void kasan_cache_create(struct kmem_cache *cache, size_t *size,
361                         unsigned long *flags)
362 {
363         int redzone_adjust;
364         /* Make sure the adjusted size is still less than
365          * KMALLOC_MAX_CACHE_SIZE.
366          * TODO: this check is only useful for SLAB, but not SLUB. We'll need
367          * to skip it for SLUB when it starts using kasan_cache_create().
368          */
369         if (*size > KMALLOC_MAX_CACHE_SIZE -
370             sizeof(struct kasan_alloc_meta) -
371             sizeof(struct kasan_free_meta))
372                 return;
373         *flags |= SLAB_KASAN;
374         /* Add alloc meta. */
375         cache->kasan_info.alloc_meta_offset = *size;
376         *size += sizeof(struct kasan_alloc_meta);
377
378         /* Add free meta. */
379         if (cache->flags & SLAB_DESTROY_BY_RCU || cache->ctor ||
380             cache->object_size < sizeof(struct kasan_free_meta)) {
381                 cache->kasan_info.free_meta_offset = *size;
382                 *size += sizeof(struct kasan_free_meta);
383         }
384         redzone_adjust = optimal_redzone(cache->object_size) -
385                 (*size - cache->object_size);
386         if (redzone_adjust > 0)
387                 *size += redzone_adjust;
388         *size = min(KMALLOC_MAX_CACHE_SIZE,
389                     max(*size,
390                         cache->object_size +
391                         optimal_redzone(cache->object_size)));
392 }
393 #endif
394
395 void kasan_cache_shrink(struct kmem_cache *cache)
396 {
397         quarantine_remove_cache(cache);
398 }
399
400 void kasan_cache_destroy(struct kmem_cache *cache)
401 {
402         quarantine_remove_cache(cache);
403 }
404
405 void kasan_poison_slab(struct page *page)
406 {
407         kasan_poison_shadow(page_address(page),
408                         PAGE_SIZE << compound_order(page),
409                         KASAN_KMALLOC_REDZONE);
410 }
411
412 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
413 {
414         kasan_unpoison_shadow(object, cache->object_size);
415 }
416
417 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
418 {
419         kasan_poison_shadow(object,
420                         round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
421                         KASAN_KMALLOC_REDZONE);
422 #ifdef CONFIG_SLAB
423         if (cache->flags & SLAB_KASAN) {
424                 struct kasan_alloc_meta *alloc_info =
425                         get_alloc_info(cache, object);
426                 alloc_info->state = KASAN_STATE_INIT;
427         }
428 #endif
429 }
430
431 #ifdef CONFIG_SLAB
432 static inline int in_irqentry_text(unsigned long ptr)
433 {
434         return (ptr >= (unsigned long)&__irqentry_text_start &&
435                 ptr < (unsigned long)&__irqentry_text_end) ||
436                 (ptr >= (unsigned long)&__softirqentry_text_start &&
437                  ptr < (unsigned long)&__softirqentry_text_end);
438 }
439
440 static inline void filter_irq_stacks(struct stack_trace *trace)
441 {
442         int i;
443
444         if (!trace->nr_entries)
445                 return;
446         for (i = 0; i < trace->nr_entries; i++)
447                 if (in_irqentry_text(trace->entries[i])) {
448                         /* Include the irqentry function into the stack. */
449                         trace->nr_entries = i + 1;
450                         break;
451                 }
452 }
453
454 static inline depot_stack_handle_t save_stack(gfp_t flags)
455 {
456         unsigned long entries[KASAN_STACK_DEPTH];
457         struct stack_trace trace = {
458                 .nr_entries = 0,
459                 .entries = entries,
460                 .max_entries = KASAN_STACK_DEPTH,
461                 .skip = 0
462         };
463
464         save_stack_trace(&trace);
465         filter_irq_stacks(&trace);
466         if (trace.nr_entries != 0 &&
467             trace.entries[trace.nr_entries-1] == ULONG_MAX)
468                 trace.nr_entries--;
469
470         return depot_save_stack(&trace, flags);
471 }
472
473 static inline void set_track(struct kasan_track *track, gfp_t flags)
474 {
475         track->pid = current->pid;
476         track->stack = save_stack(flags);
477 }
478
479 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
480                                         const void *object)
481 {
482         BUILD_BUG_ON(sizeof(struct kasan_alloc_meta) > 32);
483         return (void *)object + cache->kasan_info.alloc_meta_offset;
484 }
485
486 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
487                                       const void *object)
488 {
489         BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
490         return (void *)object + cache->kasan_info.free_meta_offset;
491 }
492 #endif
493
494 void kasan_slab_alloc(struct kmem_cache *cache, void *object, gfp_t flags)
495 {
496         kasan_kmalloc(cache, object, cache->object_size, flags);
497 }
498
499 void kasan_poison_slab_free(struct kmem_cache *cache, void *object)
500 {
501         unsigned long size = cache->object_size;
502         unsigned long rounded_up_size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
503
504         /* RCU slabs could be legally used after free within the RCU period */
505         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
506                 return;
507
508         kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
509 }
510
511 bool kasan_slab_free(struct kmem_cache *cache, void *object)
512 {
513 #ifdef CONFIG_SLAB
514         /* RCU slabs could be legally used after free within the RCU period */
515         if (unlikely(cache->flags & SLAB_DESTROY_BY_RCU))
516                 return false;
517
518         if (likely(cache->flags & SLAB_KASAN)) {
519                 struct kasan_alloc_meta *alloc_info =
520                         get_alloc_info(cache, object);
521                 struct kasan_free_meta *free_info =
522                         get_free_info(cache, object);
523
524                 switch (alloc_info->state) {
525                 case KASAN_STATE_ALLOC:
526                         alloc_info->state = KASAN_STATE_QUARANTINE;
527                         quarantine_put(free_info, cache);
528                         set_track(&free_info->track, GFP_NOWAIT);
529                         kasan_poison_slab_free(cache, object);
530                         return true;
531                 case KASAN_STATE_QUARANTINE:
532                 case KASAN_STATE_FREE:
533                         pr_err("Double free");
534                         dump_stack();
535                         break;
536                 default:
537                         break;
538                 }
539         }
540         return false;
541 #else
542         kasan_poison_slab_free(cache, object);
543         return false;
544 #endif
545 }
546
547 void kasan_kmalloc(struct kmem_cache *cache, const void *object, size_t size,
548                    gfp_t flags)
549 {
550         unsigned long redzone_start;
551         unsigned long redzone_end;
552
553         if (flags & __GFP_RECLAIM)
554                 quarantine_reduce();
555
556         if (unlikely(object == NULL))
557                 return;
558
559         redzone_start = round_up((unsigned long)(object + size),
560                                 KASAN_SHADOW_SCALE_SIZE);
561         redzone_end = round_up((unsigned long)object + cache->object_size,
562                                 KASAN_SHADOW_SCALE_SIZE);
563
564         kasan_unpoison_shadow(object, size);
565         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
566                 KASAN_KMALLOC_REDZONE);
567 #ifdef CONFIG_SLAB
568         if (cache->flags & SLAB_KASAN) {
569                 struct kasan_alloc_meta *alloc_info =
570                         get_alloc_info(cache, object);
571
572                 alloc_info->state = KASAN_STATE_ALLOC;
573                 alloc_info->alloc_size = size;
574                 set_track(&alloc_info->track, flags);
575         }
576 #endif
577 }
578 EXPORT_SYMBOL(kasan_kmalloc);
579
580 void kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
581 {
582         struct page *page;
583         unsigned long redzone_start;
584         unsigned long redzone_end;
585
586         if (flags & __GFP_RECLAIM)
587                 quarantine_reduce();
588
589         if (unlikely(ptr == NULL))
590                 return;
591
592         page = virt_to_page(ptr);
593         redzone_start = round_up((unsigned long)(ptr + size),
594                                 KASAN_SHADOW_SCALE_SIZE);
595         redzone_end = (unsigned long)ptr + (PAGE_SIZE << compound_order(page));
596
597         kasan_unpoison_shadow(ptr, size);
598         kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
599                 KASAN_PAGE_REDZONE);
600 }
601
602 void kasan_krealloc(const void *object, size_t size, gfp_t flags)
603 {
604         struct page *page;
605
606         if (unlikely(object == ZERO_SIZE_PTR))
607                 return;
608
609         page = virt_to_head_page(object);
610
611         if (unlikely(!PageSlab(page)))
612                 kasan_kmalloc_large(object, size, flags);
613         else
614                 kasan_kmalloc(page->slab_cache, object, size, flags);
615 }
616
617 void kasan_kfree(void *ptr)
618 {
619         struct page *page;
620
621         page = virt_to_head_page(ptr);
622
623         if (unlikely(!PageSlab(page)))
624                 kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
625                                 KASAN_FREE_PAGE);
626         else
627                 kasan_slab_free(page->slab_cache, ptr);
628 }
629
630 void kasan_kfree_large(const void *ptr)
631 {
632         struct page *page = virt_to_page(ptr);
633
634         kasan_poison_shadow(ptr, PAGE_SIZE << compound_order(page),
635                         KASAN_FREE_PAGE);
636 }
637
638 int kasan_module_alloc(void *addr, size_t size)
639 {
640         void *ret;
641         size_t shadow_size;
642         unsigned long shadow_start;
643
644         shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
645         shadow_size = round_up(size >> KASAN_SHADOW_SCALE_SHIFT,
646                         PAGE_SIZE);
647
648         if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
649                 return -EINVAL;
650
651         ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
652                         shadow_start + shadow_size,
653                         GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO,
654                         PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
655                         __builtin_return_address(0));
656
657         if (ret) {
658                 find_vm_area(addr)->flags |= VM_KASAN;
659                 kmemleak_ignore(ret);
660                 return 0;
661         }
662
663         return -ENOMEM;
664 }
665
666 void kasan_free_shadow(const struct vm_struct *vm)
667 {
668         if (vm->flags & VM_KASAN)
669                 vfree(kasan_mem_to_shadow(vm->addr));
670 }
671
672 static void register_global(struct kasan_global *global)
673 {
674         size_t aligned_size = round_up(global->size, KASAN_SHADOW_SCALE_SIZE);
675
676         kasan_unpoison_shadow(global->beg, global->size);
677
678         kasan_poison_shadow(global->beg + aligned_size,
679                 global->size_with_redzone - aligned_size,
680                 KASAN_GLOBAL_REDZONE);
681 }
682
683 void __asan_register_globals(struct kasan_global *globals, size_t size)
684 {
685         int i;
686
687         for (i = 0; i < size; i++)
688                 register_global(&globals[i]);
689 }
690 EXPORT_SYMBOL(__asan_register_globals);
691
692 void __asan_unregister_globals(struct kasan_global *globals, size_t size)
693 {
694 }
695 EXPORT_SYMBOL(__asan_unregister_globals);
696
697 #define DEFINE_ASAN_LOAD_STORE(size)                                    \
698         void __asan_load##size(unsigned long addr)                      \
699         {                                                               \
700                 check_memory_region_inline(addr, size, false, _RET_IP_);\
701         }                                                               \
702         EXPORT_SYMBOL(__asan_load##size);                               \
703         __alias(__asan_load##size)                                      \
704         void __asan_load##size##_noabort(unsigned long);                \
705         EXPORT_SYMBOL(__asan_load##size##_noabort);                     \
706         void __asan_store##size(unsigned long addr)                     \
707         {                                                               \
708                 check_memory_region_inline(addr, size, true, _RET_IP_); \
709         }                                                               \
710         EXPORT_SYMBOL(__asan_store##size);                              \
711         __alias(__asan_store##size)                                     \
712         void __asan_store##size##_noabort(unsigned long);               \
713         EXPORT_SYMBOL(__asan_store##size##_noabort)
714
715 DEFINE_ASAN_LOAD_STORE(1);
716 DEFINE_ASAN_LOAD_STORE(2);
717 DEFINE_ASAN_LOAD_STORE(4);
718 DEFINE_ASAN_LOAD_STORE(8);
719 DEFINE_ASAN_LOAD_STORE(16);
720
721 void __asan_loadN(unsigned long addr, size_t size)
722 {
723         check_memory_region(addr, size, false, _RET_IP_);
724 }
725 EXPORT_SYMBOL(__asan_loadN);
726
727 __alias(__asan_loadN)
728 void __asan_loadN_noabort(unsigned long, size_t);
729 EXPORT_SYMBOL(__asan_loadN_noabort);
730
731 void __asan_storeN(unsigned long addr, size_t size)
732 {
733         check_memory_region(addr, size, true, _RET_IP_);
734 }
735 EXPORT_SYMBOL(__asan_storeN);
736
737 __alias(__asan_storeN)
738 void __asan_storeN_noabort(unsigned long, size_t);
739 EXPORT_SYMBOL(__asan_storeN_noabort);
740
741 /* to shut up compiler complaints */
742 void __asan_handle_no_return(void) {}
743 EXPORT_SYMBOL(__asan_handle_no_return);
744
745 #ifdef CONFIG_MEMORY_HOTPLUG
746 static int kasan_mem_notifier(struct notifier_block *nb,
747                         unsigned long action, void *data)
748 {
749         return (action == MEM_GOING_ONLINE) ? NOTIFY_BAD : NOTIFY_OK;
750 }
751
752 static int __init kasan_memhotplug_init(void)
753 {
754         pr_err("WARNING: KASAN doesn't support memory hot-add\n");
755         pr_err("Memory hot-add will be disabled\n");
756
757         hotplug_memory_notifier(kasan_mem_notifier, 0);
758
759         return 0;
760 }
761
762 module_init(kasan_memhotplug_init);
763 #endif