Merge branch 'x86-uaccess-cleanup': x86 uaccess header cleanups
[linux-block.git] / mm / kasan / kasan.h
... / ...
CommitLineData
1/* SPDX-License-Identifier: GPL-2.0 */
2#ifndef __MM_KASAN_KASAN_H
3#define __MM_KASAN_KASAN_H
4
5#include <linux/atomic.h>
6#include <linux/kasan.h>
7#include <linux/kasan-tags.h>
8#include <linux/kfence.h>
9#include <linux/stackdepot.h>
10
11#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
12
13#include <linux/static_key.h>
14
15DECLARE_STATIC_KEY_TRUE(kasan_flag_stacktrace);
16
17static inline bool kasan_stack_collection_enabled(void)
18{
19 return static_branch_unlikely(&kasan_flag_stacktrace);
20}
21
22#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
23
24static inline bool kasan_stack_collection_enabled(void)
25{
26 return true;
27}
28
29#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
30
31#ifdef CONFIG_KASAN_HW_TAGS
32
33#include "../slab.h"
34
35DECLARE_STATIC_KEY_TRUE(kasan_flag_vmalloc);
36
37enum kasan_mode {
38 KASAN_MODE_SYNC,
39 KASAN_MODE_ASYNC,
40 KASAN_MODE_ASYMM,
41};
42
43extern enum kasan_mode kasan_mode __ro_after_init;
44
45extern unsigned long kasan_page_alloc_sample;
46extern unsigned int kasan_page_alloc_sample_order;
47DECLARE_PER_CPU(long, kasan_page_alloc_skip);
48
49static inline bool kasan_vmalloc_enabled(void)
50{
51 return static_branch_likely(&kasan_flag_vmalloc);
52}
53
54static inline bool kasan_async_fault_possible(void)
55{
56 return kasan_mode == KASAN_MODE_ASYNC || kasan_mode == KASAN_MODE_ASYMM;
57}
58
59static inline bool kasan_sync_fault_possible(void)
60{
61 return kasan_mode == KASAN_MODE_SYNC || kasan_mode == KASAN_MODE_ASYMM;
62}
63
64static inline bool kasan_sample_page_alloc(unsigned int order)
65{
66 /* Fast-path for when sampling is disabled. */
67 if (kasan_page_alloc_sample == 1)
68 return true;
69
70 if (order < kasan_page_alloc_sample_order)
71 return true;
72
73 if (this_cpu_dec_return(kasan_page_alloc_skip) < 0) {
74 this_cpu_write(kasan_page_alloc_skip,
75 kasan_page_alloc_sample - 1);
76 return true;
77 }
78
79 return false;
80}
81
82#else /* CONFIG_KASAN_HW_TAGS */
83
84static inline bool kasan_async_fault_possible(void)
85{
86 return false;
87}
88
89static inline bool kasan_sync_fault_possible(void)
90{
91 return true;
92}
93
94static inline bool kasan_sample_page_alloc(unsigned int order)
95{
96 return true;
97}
98
99#endif /* CONFIG_KASAN_HW_TAGS */
100
101#ifdef CONFIG_KASAN_GENERIC
102
103/* Generic KASAN uses per-object metadata to store stack traces. */
104static inline bool kasan_requires_meta(void)
105{
106 /*
107 * Technically, Generic KASAN always collects stack traces right now.
108 * However, let's use kasan_stack_collection_enabled() in case the
109 * kasan.stacktrace command-line argument is changed to affect
110 * Generic KASAN.
111 */
112 return kasan_stack_collection_enabled();
113}
114
115#else /* CONFIG_KASAN_GENERIC */
116
117/* Tag-based KASAN modes do not use per-object metadata. */
118static inline bool kasan_requires_meta(void)
119{
120 return false;
121}
122
123#endif /* CONFIG_KASAN_GENERIC */
124
125#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
126#define KASAN_GRANULE_SIZE (1UL << KASAN_SHADOW_SCALE_SHIFT)
127#else
128#include <asm/mte-kasan.h>
129#define KASAN_GRANULE_SIZE MTE_GRANULE_SIZE
130#endif
131
132#define KASAN_GRANULE_MASK (KASAN_GRANULE_SIZE - 1)
133
134#define KASAN_MEMORY_PER_SHADOW_PAGE (KASAN_GRANULE_SIZE << PAGE_SHIFT)
135
136#ifdef CONFIG_KASAN_GENERIC
137#define KASAN_PAGE_FREE 0xFF /* freed page */
138#define KASAN_PAGE_REDZONE 0xFE /* redzone for kmalloc_large allocation */
139#define KASAN_SLAB_REDZONE 0xFC /* redzone for slab object */
140#define KASAN_SLAB_FREE 0xFB /* freed slab object */
141#define KASAN_VMALLOC_INVALID 0xF8 /* inaccessible space in vmap area */
142#else
143#define KASAN_PAGE_FREE KASAN_TAG_INVALID
144#define KASAN_PAGE_REDZONE KASAN_TAG_INVALID
145#define KASAN_SLAB_REDZONE KASAN_TAG_INVALID
146#define KASAN_SLAB_FREE KASAN_TAG_INVALID
147#define KASAN_VMALLOC_INVALID KASAN_TAG_INVALID /* only used for SW_TAGS */
148#endif
149
150#ifdef CONFIG_KASAN_GENERIC
151
152#define KASAN_SLAB_FREETRACK 0xFA /* freed slab object with free track */
153#define KASAN_GLOBAL_REDZONE 0xF9 /* redzone for global variable */
154
155/* Stack redzone shadow values. Compiler ABI, do not change. */
156#define KASAN_STACK_LEFT 0xF1
157#define KASAN_STACK_MID 0xF2
158#define KASAN_STACK_RIGHT 0xF3
159#define KASAN_STACK_PARTIAL 0xF4
160
161/* alloca redzone shadow values. */
162#define KASAN_ALLOCA_LEFT 0xCA
163#define KASAN_ALLOCA_RIGHT 0xCB
164
165/* alloca redzone size. Compiler ABI, do not change. */
166#define KASAN_ALLOCA_REDZONE_SIZE 32
167
168/* Stack frame marker. Compiler ABI, do not change. */
169#define KASAN_CURRENT_STACK_FRAME_MAGIC 0x41B58AB3
170
171/* Dummy value to avoid breaking randconfig/all*config builds. */
172#ifndef KASAN_ABI_VERSION
173#define KASAN_ABI_VERSION 1
174#endif
175
176#endif /* CONFIG_KASAN_GENERIC */
177
178/* Metadata layout customization. */
179#define META_BYTES_PER_BLOCK 1
180#define META_BLOCKS_PER_ROW 16
181#define META_BYTES_PER_ROW (META_BLOCKS_PER_ROW * META_BYTES_PER_BLOCK)
182#define META_MEM_BYTES_PER_ROW (META_BYTES_PER_ROW * KASAN_GRANULE_SIZE)
183#define META_ROWS_AROUND_ADDR 2
184
185#define KASAN_STACK_DEPTH 64
186
187struct kasan_track {
188 u32 pid;
189 depot_stack_handle_t stack;
190};
191
192enum kasan_report_type {
193 KASAN_REPORT_ACCESS,
194 KASAN_REPORT_INVALID_FREE,
195 KASAN_REPORT_DOUBLE_FREE,
196};
197
198struct kasan_report_info {
199 /* Filled in by kasan_report_*(). */
200 enum kasan_report_type type;
201 void *access_addr;
202 size_t access_size;
203 bool is_write;
204 unsigned long ip;
205
206 /* Filled in by the common reporting code. */
207 void *first_bad_addr;
208 struct kmem_cache *cache;
209 void *object;
210 size_t alloc_size;
211
212 /* Filled in by the mode-specific reporting code. */
213 const char *bug_type;
214 struct kasan_track alloc_track;
215 struct kasan_track free_track;
216};
217
218/* Do not change the struct layout: compiler ABI. */
219struct kasan_source_location {
220 const char *filename;
221 int line_no;
222 int column_no;
223};
224
225/* Do not change the struct layout: compiler ABI. */
226struct kasan_global {
227 const void *beg; /* Address of the beginning of the global variable. */
228 size_t size; /* Size of the global variable. */
229 size_t size_with_redzone; /* Size of the variable + size of the redzone. 32 bytes aligned. */
230 const void *name;
231 const void *module_name; /* Name of the module where the global variable is declared. */
232 unsigned long has_dynamic_init; /* This is needed for C++. */
233#if KASAN_ABI_VERSION >= 4
234 struct kasan_source_location *location;
235#endif
236#if KASAN_ABI_VERSION >= 5
237 char *odr_indicator;
238#endif
239};
240
241/* Structures for keeping alloc and free meta. */
242
243#ifdef CONFIG_KASAN_GENERIC
244
245struct kasan_alloc_meta {
246 struct kasan_track alloc_track;
247 /* Free track is stored in kasan_free_meta. */
248 depot_stack_handle_t aux_stack[2];
249};
250
251struct qlist_node {
252 struct qlist_node *next;
253};
254
255/*
256 * Free meta is stored either in the object itself or in the redzone after the
257 * object. In the former case, free meta offset is 0. In the latter case, the
258 * offset is between 0 and INT_MAX. INT_MAX marks that free meta is not present.
259 */
260#define KASAN_NO_FREE_META INT_MAX
261
262/*
263 * Free meta is only used by Generic mode while the object is in quarantine.
264 * After that, slab allocator stores the freelist pointer in the object.
265 */
266struct kasan_free_meta {
267 struct qlist_node quarantine_link;
268 struct kasan_track free_track;
269};
270
271#endif /* CONFIG_KASAN_GENERIC */
272
273#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
274
275struct kasan_stack_ring_entry {
276 void *ptr;
277 size_t size;
278 u32 pid;
279 depot_stack_handle_t stack;
280 bool is_free;
281};
282
283struct kasan_stack_ring {
284 rwlock_t lock;
285 size_t size;
286 atomic64_t pos;
287 struct kasan_stack_ring_entry *entries;
288};
289
290#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
291
292#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
293
294static inline const void *kasan_shadow_to_mem(const void *shadow_addr)
295{
296 return (void *)(((unsigned long)shadow_addr - KASAN_SHADOW_OFFSET)
297 << KASAN_SHADOW_SCALE_SHIFT);
298}
299
300static __always_inline bool addr_has_metadata(const void *addr)
301{
302 return (kasan_reset_tag(addr) >=
303 kasan_shadow_to_mem((void *)KASAN_SHADOW_START));
304}
305
306/**
307 * kasan_check_range - Check memory region, and report if invalid access.
308 * @addr: the accessed address
309 * @size: the accessed size
310 * @write: true if access is a write access
311 * @ret_ip: return address
312 * @return: true if access was valid, false if invalid
313 */
314bool kasan_check_range(unsigned long addr, size_t size, bool write,
315 unsigned long ret_ip);
316
317#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
318
319static __always_inline bool addr_has_metadata(const void *addr)
320{
321 return (is_vmalloc_addr(addr) || virt_addr_valid(addr));
322}
323
324#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
325
326void *kasan_find_first_bad_addr(void *addr, size_t size);
327size_t kasan_get_alloc_size(void *object, struct kmem_cache *cache);
328void kasan_complete_mode_report_info(struct kasan_report_info *info);
329void kasan_metadata_fetch_row(char *buffer, void *row);
330
331#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
332void kasan_print_tags(u8 addr_tag, const void *addr);
333#else
334static inline void kasan_print_tags(u8 addr_tag, const void *addr) { }
335#endif
336
337#if defined(CONFIG_KASAN_STACK)
338void kasan_print_address_stack_frame(const void *addr);
339#else
340static inline void kasan_print_address_stack_frame(const void *addr) { }
341#endif
342
343#ifdef CONFIG_KASAN_GENERIC
344void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object);
345#else
346static inline void kasan_print_aux_stacks(struct kmem_cache *cache, const void *object) { }
347#endif
348
349bool kasan_report(unsigned long addr, size_t size,
350 bool is_write, unsigned long ip);
351void kasan_report_invalid_free(void *object, unsigned long ip, enum kasan_report_type type);
352
353struct slab *kasan_addr_to_slab(const void *addr);
354
355#ifdef CONFIG_KASAN_GENERIC
356void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size);
357void kasan_init_object_meta(struct kmem_cache *cache, const void *object);
358struct kasan_alloc_meta *kasan_get_alloc_meta(struct kmem_cache *cache,
359 const void *object);
360struct kasan_free_meta *kasan_get_free_meta(struct kmem_cache *cache,
361 const void *object);
362#else
363static inline void kasan_init_cache_meta(struct kmem_cache *cache, unsigned int *size) { }
364static inline void kasan_init_object_meta(struct kmem_cache *cache, const void *object) { }
365#endif
366
367depot_stack_handle_t kasan_save_stack(gfp_t flags, bool can_alloc);
368void kasan_set_track(struct kasan_track *track, gfp_t flags);
369void kasan_save_alloc_info(struct kmem_cache *cache, void *object, gfp_t flags);
370void kasan_save_free_info(struct kmem_cache *cache, void *object);
371
372#if defined(CONFIG_KASAN_GENERIC) && \
373 (defined(CONFIG_SLAB) || defined(CONFIG_SLUB))
374bool kasan_quarantine_put(struct kmem_cache *cache, void *object);
375void kasan_quarantine_reduce(void);
376void kasan_quarantine_remove_cache(struct kmem_cache *cache);
377#else
378static inline bool kasan_quarantine_put(struct kmem_cache *cache, void *object) { return false; }
379static inline void kasan_quarantine_reduce(void) { }
380static inline void kasan_quarantine_remove_cache(struct kmem_cache *cache) { }
381#endif
382
383#ifndef arch_kasan_set_tag
384static inline const void *arch_kasan_set_tag(const void *addr, u8 tag)
385{
386 return addr;
387}
388#endif
389#ifndef arch_kasan_get_tag
390#define arch_kasan_get_tag(addr) 0
391#endif
392
393#define set_tag(addr, tag) ((void *)arch_kasan_set_tag((addr), (tag)))
394#define get_tag(addr) arch_kasan_get_tag(addr)
395
396#ifdef CONFIG_KASAN_HW_TAGS
397
398#define hw_enable_tag_checks_sync() arch_enable_tag_checks_sync()
399#define hw_enable_tag_checks_async() arch_enable_tag_checks_async()
400#define hw_enable_tag_checks_asymm() arch_enable_tag_checks_asymm()
401#define hw_suppress_tag_checks_start() arch_suppress_tag_checks_start()
402#define hw_suppress_tag_checks_stop() arch_suppress_tag_checks_stop()
403#define hw_force_async_tag_fault() arch_force_async_tag_fault()
404#define hw_get_random_tag() arch_get_random_tag()
405#define hw_get_mem_tag(addr) arch_get_mem_tag(addr)
406#define hw_set_mem_tag_range(addr, size, tag, init) \
407 arch_set_mem_tag_range((addr), (size), (tag), (init))
408
409void kasan_enable_hw_tags(void);
410
411#else /* CONFIG_KASAN_HW_TAGS */
412
413static inline void kasan_enable_hw_tags(void) { }
414
415#endif /* CONFIG_KASAN_HW_TAGS */
416
417#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
418void __init kasan_init_tags(void);
419#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
420
421#if defined(CONFIG_KASAN_HW_TAGS) && IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
422
423void kasan_force_async_fault(void);
424
425#else /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
426
427static inline void kasan_force_async_fault(void) { }
428
429#endif /* CONFIG_KASAN_HW_TAGS && CONFIG_KASAN_KUNIT_TEST */
430
431#ifdef CONFIG_KASAN_SW_TAGS
432u8 kasan_random_tag(void);
433#elif defined(CONFIG_KASAN_HW_TAGS)
434static inline u8 kasan_random_tag(void) { return hw_get_random_tag(); }
435#else
436static inline u8 kasan_random_tag(void) { return 0; }
437#endif
438
439#ifdef CONFIG_KASAN_HW_TAGS
440
441static inline void kasan_poison(const void *addr, size_t size, u8 value, bool init)
442{
443 addr = kasan_reset_tag(addr);
444
445 /* Skip KFENCE memory if called explicitly outside of sl*b. */
446 if (is_kfence_address(addr))
447 return;
448
449 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
450 return;
451 if (WARN_ON(size & KASAN_GRANULE_MASK))
452 return;
453
454 hw_set_mem_tag_range((void *)addr, size, value, init);
455}
456
457static inline void kasan_unpoison(const void *addr, size_t size, bool init)
458{
459 u8 tag = get_tag(addr);
460
461 addr = kasan_reset_tag(addr);
462
463 /* Skip KFENCE memory if called explicitly outside of sl*b. */
464 if (is_kfence_address(addr))
465 return;
466
467 if (WARN_ON((unsigned long)addr & KASAN_GRANULE_MASK))
468 return;
469 /*
470 * Explicitly initialize the memory with the precise object size to
471 * avoid overwriting the slab redzone. This disables initialization in
472 * the arch code and may thus lead to performance penalty. This penalty
473 * does not affect production builds, as slab redzones are not enabled
474 * there.
475 */
476 if (__slub_debug_enabled() &&
477 init && ((unsigned long)size & KASAN_GRANULE_MASK)) {
478 init = false;
479 memzero_explicit((void *)addr, size);
480 }
481 size = round_up(size, KASAN_GRANULE_SIZE);
482
483 hw_set_mem_tag_range((void *)addr, size, tag, init);
484}
485
486static inline bool kasan_byte_accessible(const void *addr)
487{
488 u8 ptr_tag = get_tag(addr);
489 u8 mem_tag = hw_get_mem_tag((void *)addr);
490
491 return ptr_tag == KASAN_TAG_KERNEL || ptr_tag == mem_tag;
492}
493
494#else /* CONFIG_KASAN_HW_TAGS */
495
496/**
497 * kasan_poison - mark the memory range as inaccessible
498 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
499 * @size - range size, must be aligned to KASAN_GRANULE_SIZE
500 * @value - value that's written to metadata for the range
501 * @init - whether to initialize the memory range (only for hardware tag-based)
502 *
503 * The size gets aligned to KASAN_GRANULE_SIZE before marking the range.
504 */
505void kasan_poison(const void *addr, size_t size, u8 value, bool init);
506
507/**
508 * kasan_unpoison - mark the memory range as accessible
509 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
510 * @size - range size, can be unaligned
511 * @init - whether to initialize the memory range (only for hardware tag-based)
512 *
513 * For the tag-based modes, the @size gets aligned to KASAN_GRANULE_SIZE before
514 * marking the range.
515 * For the generic mode, the last granule of the memory range gets partially
516 * unpoisoned based on the @size.
517 */
518void kasan_unpoison(const void *addr, size_t size, bool init);
519
520bool kasan_byte_accessible(const void *addr);
521
522#endif /* CONFIG_KASAN_HW_TAGS */
523
524#ifdef CONFIG_KASAN_GENERIC
525
526/**
527 * kasan_poison_last_granule - mark the last granule of the memory range as
528 * inaccessible
529 * @addr - range start address, must be aligned to KASAN_GRANULE_SIZE
530 * @size - range size
531 *
532 * This function is only available for the generic mode, as it's the only mode
533 * that has partially poisoned memory granules.
534 */
535void kasan_poison_last_granule(const void *address, size_t size);
536
537#else /* CONFIG_KASAN_GENERIC */
538
539static inline void kasan_poison_last_granule(const void *address, size_t size) { }
540
541#endif /* CONFIG_KASAN_GENERIC */
542
543#ifndef kasan_arch_is_ready
544static inline bool kasan_arch_is_ready(void) { return true; }
545#elif !defined(CONFIG_KASAN_GENERIC) || !defined(CONFIG_KASAN_OUTLINE)
546#error kasan_arch_is_ready only works in KASAN generic outline mode!
547#endif
548
549#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST)
550
551void kasan_kunit_test_suite_start(void);
552void kasan_kunit_test_suite_end(void);
553
554#else /* CONFIG_KASAN_KUNIT_TEST */
555
556static inline void kasan_kunit_test_suite_start(void) { }
557static inline void kasan_kunit_test_suite_end(void) { }
558
559#endif /* CONFIG_KASAN_KUNIT_TEST */
560
561#if IS_ENABLED(CONFIG_KASAN_KUNIT_TEST) || IS_ENABLED(CONFIG_KASAN_MODULE_TEST)
562
563bool kasan_save_enable_multi_shot(void);
564void kasan_restore_multi_shot(bool enabled);
565
566#endif
567
568/*
569 * Exported functions for interfaces called from assembly or from generated
570 * code. Declared here to avoid warnings about missing declarations.
571 */
572
573asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
574void __asan_register_globals(struct kasan_global *globals, size_t size);
575void __asan_unregister_globals(struct kasan_global *globals, size_t size);
576void __asan_handle_no_return(void);
577void __asan_alloca_poison(unsigned long addr, size_t size);
578void __asan_allocas_unpoison(const void *stack_top, const void *stack_bottom);
579
580void __asan_load1(unsigned long addr);
581void __asan_store1(unsigned long addr);
582void __asan_load2(unsigned long addr);
583void __asan_store2(unsigned long addr);
584void __asan_load4(unsigned long addr);
585void __asan_store4(unsigned long addr);
586void __asan_load8(unsigned long addr);
587void __asan_store8(unsigned long addr);
588void __asan_load16(unsigned long addr);
589void __asan_store16(unsigned long addr);
590void __asan_loadN(unsigned long addr, size_t size);
591void __asan_storeN(unsigned long addr, size_t size);
592
593void __asan_load1_noabort(unsigned long addr);
594void __asan_store1_noabort(unsigned long addr);
595void __asan_load2_noabort(unsigned long addr);
596void __asan_store2_noabort(unsigned long addr);
597void __asan_load4_noabort(unsigned long addr);
598void __asan_store4_noabort(unsigned long addr);
599void __asan_load8_noabort(unsigned long addr);
600void __asan_store8_noabort(unsigned long addr);
601void __asan_load16_noabort(unsigned long addr);
602void __asan_store16_noabort(unsigned long addr);
603void __asan_loadN_noabort(unsigned long addr, size_t size);
604void __asan_storeN_noabort(unsigned long addr, size_t size);
605
606void __asan_report_load1_noabort(unsigned long addr);
607void __asan_report_store1_noabort(unsigned long addr);
608void __asan_report_load2_noabort(unsigned long addr);
609void __asan_report_store2_noabort(unsigned long addr);
610void __asan_report_load4_noabort(unsigned long addr);
611void __asan_report_store4_noabort(unsigned long addr);
612void __asan_report_load8_noabort(unsigned long addr);
613void __asan_report_store8_noabort(unsigned long addr);
614void __asan_report_load16_noabort(unsigned long addr);
615void __asan_report_store16_noabort(unsigned long addr);
616void __asan_report_load_n_noabort(unsigned long addr, size_t size);
617void __asan_report_store_n_noabort(unsigned long addr, size_t size);
618
619void __asan_set_shadow_00(const void *addr, size_t size);
620void __asan_set_shadow_f1(const void *addr, size_t size);
621void __asan_set_shadow_f2(const void *addr, size_t size);
622void __asan_set_shadow_f3(const void *addr, size_t size);
623void __asan_set_shadow_f5(const void *addr, size_t size);
624void __asan_set_shadow_f8(const void *addr, size_t size);
625
626void *__asan_memset(void *addr, int c, size_t len);
627void *__asan_memmove(void *dest, const void *src, size_t len);
628void *__asan_memcpy(void *dest, const void *src, size_t len);
629
630void __hwasan_load1_noabort(unsigned long addr);
631void __hwasan_store1_noabort(unsigned long addr);
632void __hwasan_load2_noabort(unsigned long addr);
633void __hwasan_store2_noabort(unsigned long addr);
634void __hwasan_load4_noabort(unsigned long addr);
635void __hwasan_store4_noabort(unsigned long addr);
636void __hwasan_load8_noabort(unsigned long addr);
637void __hwasan_store8_noabort(unsigned long addr);
638void __hwasan_load16_noabort(unsigned long addr);
639void __hwasan_store16_noabort(unsigned long addr);
640void __hwasan_loadN_noabort(unsigned long addr, size_t size);
641void __hwasan_storeN_noabort(unsigned long addr, size_t size);
642
643void __hwasan_tag_memory(unsigned long addr, u8 tag, unsigned long size);
644
645void *__hwasan_memset(void *addr, int c, size_t len);
646void *__hwasan_memmove(void *dest, const void *src, size_t len);
647void *__hwasan_memcpy(void *dest, const void *src, size_t len);
648
649#endif /* __MM_KASAN_KASAN_H */