kasan, vmalloc: add vmalloc tagging for SW_TAGS
[linux-block.git] / include / linux / kasan.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0b24becc
AR
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
7a3b8353 5#include <linux/bug.h>
f9b5e46f 6#include <linux/kasan-enabled.h>
2db710cc 7#include <linux/kernel.h>
34303244 8#include <linux/static_key.h>
0b24becc
AR
9#include <linux/types.h>
10
11struct kmem_cache;
12struct page;
6e48a966 13struct slab;
a5af5aa8 14struct vm_struct;
5be9b730 15struct task_struct;
0b24becc
AR
16
17#ifdef CONFIG_KASAN
18
d5750edf 19#include <linux/linkage.h>
65fddcfc 20#include <asm/kasan.h>
0b24becc 21
83c4e7a0
PA
22/* kasan_data struct is used in KUnit tests for KASAN expected failures */
23struct kunit_kasan_expectation {
83c4e7a0
PA
24 bool report_found;
25};
26
d5750edf
AK
27#endif
28
29#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
30
31#include <linux/pgtable.h>
32
33/* Software KASAN implementations use shadow memory. */
34
35#ifdef CONFIG_KASAN_SW_TAGS
a064cb00
AK
36/* This matches KASAN_TAG_INVALID. */
37#define KASAN_SHADOW_INIT 0xFE
d5750edf
AK
38#else
39#define KASAN_SHADOW_INIT 0
40#endif
41
29970dc2
HL
42#ifndef PTE_HWTABLE_PTRS
43#define PTE_HWTABLE_PTRS 0
44#endif
45
9577dd74 46extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
cb32c9c5
DA
47extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
48extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
49extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
9577dd74 50extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
69786cdb 51
9577dd74 52int kasan_populate_early_shadow(const void *shadow_start,
69786cdb
AR
53 const void *shadow_end);
54
0b24becc
AR
55static inline void *kasan_mem_to_shadow(const void *addr)
56{
57 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
58 + KASAN_SHADOW_OFFSET;
59}
60
d5750edf
AK
61int kasan_add_zero_shadow(void *start, unsigned long size);
62void kasan_remove_zero_shadow(void *start, unsigned long size);
63
d73b4936
AK
64/* Enable reporting bugs after kasan_disable_current() */
65extern void kasan_enable_current(void);
66
67/* Disable reporting bugs for current task */
68extern void kasan_disable_current(void);
69
d5750edf
AK
70#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
71
72static inline int kasan_add_zero_shadow(void *start, unsigned long size)
73{
74 return 0;
75}
76static inline void kasan_remove_zero_shadow(void *start,
77 unsigned long size)
78{}
79
d73b4936
AK
80static inline void kasan_enable_current(void) {}
81static inline void kasan_disable_current(void) {}
82
d5750edf
AK
83#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
84
34303244 85#ifdef CONFIG_KASAN_HW_TAGS
e86f8b09 86
e86f8b09
AK
87#else /* CONFIG_KASAN_HW_TAGS */
88
e86f8b09
AK
89#endif /* CONFIG_KASAN_HW_TAGS */
90
e5af50a5
PC
91static inline bool kasan_has_integrated_init(void)
92{
93 return kasan_hw_tags_enabled();
94}
95
7a3b8353
PC
96#ifdef CONFIG_KASAN
97
98struct kasan_cache {
99 int alloc_meta_offset;
100 int free_meta_offset;
101 bool is_kmalloc;
102};
103
e86f8b09
AK
104slab_flags_t __kasan_never_merge(void);
105static __always_inline slab_flags_t kasan_never_merge(void)
106{
107 if (kasan_enabled())
108 return __kasan_never_merge();
109 return 0;
110}
b8c73fc2 111
34303244
AK
112void __kasan_unpoison_range(const void *addr, size_t size);
113static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
114{
115 if (kasan_enabled())
116 __kasan_unpoison_range(addr, size);
117}
7ed2f9e6 118
7a3b8353
PC
119void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
120static __always_inline void kasan_poison_pages(struct page *page,
1bb5eab3 121 unsigned int order, bool init)
34303244
AK
122{
123 if (kasan_enabled())
7a3b8353 124 __kasan_poison_pages(page, order, init);
34303244 125}
0316bec2 126
7a3b8353
PC
127void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
128static __always_inline void kasan_unpoison_pages(struct page *page,
129 unsigned int order, bool init)
34303244
AK
130{
131 if (kasan_enabled())
7a3b8353 132 __kasan_unpoison_pages(page, order, init);
34303244 133}
0316bec2 134
34303244
AK
135void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
136 slab_flags_t *flags);
137static __always_inline void kasan_cache_create(struct kmem_cache *cache,
138 unsigned int *size, slab_flags_t *flags)
139{
140 if (kasan_enabled())
141 __kasan_cache_create(cache, size, flags);
142}
0316bec2 143
92850134
AK
144void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
145static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
146{
147 if (kasan_enabled())
148 __kasan_cache_create_kmalloc(cache);
149}
150
34303244
AK
151size_t __kasan_metadata_size(struct kmem_cache *cache);
152static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
153{
154 if (kasan_enabled())
155 return __kasan_metadata_size(cache);
156 return 0;
157}
7ed2f9e6 158
6e48a966
MWO
159void __kasan_poison_slab(struct slab *slab);
160static __always_inline void kasan_poison_slab(struct slab *slab)
34303244
AK
161{
162 if (kasan_enabled())
6e48a966 163 __kasan_poison_slab(slab);
34303244
AK
164}
165
166void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
167static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
168 void *object)
169{
170 if (kasan_enabled())
171 __kasan_unpoison_object_data(cache, object);
172}
173
174void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
175static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
176 void *object)
177{
178 if (kasan_enabled())
179 __kasan_poison_object_data(cache, object);
180}
181
182void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
183 const void *object);
184static __always_inline void * __must_check kasan_init_slab_obj(
185 struct kmem_cache *cache, const void *object)
186{
187 if (kasan_enabled())
188 return __kasan_init_slab_obj(cache, object);
189 return (void *)object;
190}
191
d57a964e
AK
192bool __kasan_slab_free(struct kmem_cache *s, void *object,
193 unsigned long ip, bool init);
194static __always_inline bool kasan_slab_free(struct kmem_cache *s,
195 void *object, bool init)
34303244
AK
196{
197 if (kasan_enabled())
d57a964e 198 return __kasan_slab_free(s, object, _RET_IP_, init);
34303244
AK
199 return false;
200}
201
200072ce
AK
202void __kasan_kfree_large(void *ptr, unsigned long ip);
203static __always_inline void kasan_kfree_large(void *ptr)
204{
205 if (kasan_enabled())
206 __kasan_kfree_large(ptr, _RET_IP_);
207}
208
eeb3160c 209void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
027b37b5 210static __always_inline void kasan_slab_free_mempool(void *ptr)
eeb3160c
AK
211{
212 if (kasan_enabled())
027b37b5 213 __kasan_slab_free_mempool(ptr, _RET_IP_);
eeb3160c
AK
214}
215
34303244 216void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
da844b78 217 void *object, gfp_t flags, bool init);
34303244 218static __always_inline void * __must_check kasan_slab_alloc(
da844b78 219 struct kmem_cache *s, void *object, gfp_t flags, bool init)
34303244
AK
220{
221 if (kasan_enabled())
da844b78 222 return __kasan_slab_alloc(s, object, flags, init);
34303244
AK
223 return object;
224}
225
226void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
227 size_t size, gfp_t flags);
228static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
229 const void *object, size_t size, gfp_t flags)
230{
231 if (kasan_enabled())
232 return __kasan_kmalloc(s, object, size, flags);
233 return (void *)object;
234}
235
236void * __must_check __kasan_kmalloc_large(const void *ptr,
237 size_t size, gfp_t flags);
238static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
239 size_t size, gfp_t flags)
240{
241 if (kasan_enabled())
242 return __kasan_kmalloc_large(ptr, size, flags);
243 return (void *)ptr;
244}
245
246void * __must_check __kasan_krealloc(const void *object,
247 size_t new_size, gfp_t flags);
248static __always_inline void * __must_check kasan_krealloc(const void *object,
249 size_t new_size, gfp_t flags)
250{
251 if (kasan_enabled())
252 return __kasan_krealloc(object, new_size, flags);
253 return (void *)object;
254}
255
611806b4
AK
256/*
257 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
258 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
259 */
260bool __kasan_check_byte(const void *addr, unsigned long ip);
261static __always_inline bool kasan_check_byte(const void *addr)
262{
263 if (kasan_enabled())
264 return __kasan_check_byte(addr, _RET_IP_);
265 return true;
266}
267
268
b0845ce5
MR
269bool kasan_save_enable_multi_shot(void);
270void kasan_restore_multi_shot(bool enabled);
271
0b24becc
AR
272#else /* CONFIG_KASAN */
273
e86f8b09
AK
274static inline slab_flags_t kasan_never_merge(void)
275{
276 return 0;
277}
cebd0eb2 278static inline void kasan_unpoison_range(const void *address, size_t size) {}
7a3b8353
PC
279static inline void kasan_poison_pages(struct page *page, unsigned int order,
280 bool init) {}
281static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
282 bool init) {}
7ed2f9e6 283static inline void kasan_cache_create(struct kmem_cache *cache,
be4a7988 284 unsigned int *size,
d50112ed 285 slab_flags_t *flags) {}
92850134 286static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
34303244 287static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
6e48a966 288static inline void kasan_poison_slab(struct slab *slab) {}
0316bec2
AR
289static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
290 void *object) {}
291static inline void kasan_poison_object_data(struct kmem_cache *cache,
292 void *object) {}
0116523c
AK
293static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
294 const void *object)
295{
296 return (void *)object;
297}
d57a964e 298static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
34303244
AK
299{
300 return false;
301}
200072ce 302static inline void kasan_kfree_large(void *ptr) {}
027b37b5 303static inline void kasan_slab_free_mempool(void *ptr) {}
34303244 304static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
da844b78 305 gfp_t flags, bool init)
0116523c 306{
34303244 307 return object;
0116523c 308}
0116523c
AK
309static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
310 size_t size, gfp_t flags)
311{
312 return (void *)object;
313}
34303244
AK
314static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
315{
316 return (void *)ptr;
317}
0116523c
AK
318static inline void *kasan_krealloc(const void *object, size_t new_size,
319 gfp_t flags)
320{
321 return (void *)object;
322}
611806b4
AK
323static inline bool kasan_check_byte(const void *address)
324{
325 return true;
326}
9b75a867 327
0b24becc
AR
328#endif /* CONFIG_KASAN */
329
02c58773 330#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
d56a9ef8
AK
331void kasan_unpoison_task_stack(struct task_struct *task);
332#else
333static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
334#endif
335
2bd926b4
AK
336#ifdef CONFIG_KASAN_GENERIC
337
338void kasan_cache_shrink(struct kmem_cache *cache);
339void kasan_cache_shutdown(struct kmem_cache *cache);
26e760c9 340void kasan_record_aux_stack(void *ptr);
7cb3007c 341void kasan_record_aux_stack_noalloc(void *ptr);
2bd926b4
AK
342
343#else /* CONFIG_KASAN_GENERIC */
344
345static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
346static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
26e760c9 347static inline void kasan_record_aux_stack(void *ptr) {}
7cb3007c 348static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
2bd926b4
AK
349
350#endif /* CONFIG_KASAN_GENERIC */
351
2e903b91 352#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
3c9e3aa1 353
c0054c56
AK
354static inline void *kasan_reset_tag(const void *addr)
355{
356 return (void *)arch_kasan_reset_tag(addr);
357}
3c9e3aa1 358
49c6631d
VF
359/**
360 * kasan_report - print a report about a bad memory access detected by KASAN
361 * @addr: address of the bad access
362 * @size: size of the bad access
363 * @is_write: whether the bad access is a write or a read
364 * @ip: instruction pointer for the accessibility check or the bad access itself
365 */
8cceeff4 366bool kasan_report(unsigned long addr, size_t size,
41eea9cd
AK
367 bool is_write, unsigned long ip);
368
2e903b91 369#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
3c9e3aa1
AK
370
371static inline void *kasan_reset_tag(const void *addr)
372{
373 return (void *)addr;
374}
375
2e903b91
AK
376#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
377
8f7b5054
VF
378#ifdef CONFIG_KASAN_HW_TAGS
379
380void kasan_report_async(void);
381
382#endif /* CONFIG_KASAN_HW_TAGS */
383
2e903b91
AK
384#ifdef CONFIG_KASAN_SW_TAGS
385void __init kasan_init_sw_tags(void);
386#else
387static inline void kasan_init_sw_tags(void) { }
388#endif
389
390#ifdef CONFIG_KASAN_HW_TAGS
391void kasan_init_hw_tags_cpu(void);
392void __init kasan_init_hw_tags(void);
393#else
394static inline void kasan_init_hw_tags_cpu(void) { }
395static inline void kasan_init_hw_tags(void) { }
396#endif
080eb83f 397
3c5c3cfb 398#ifdef CONFIG_KASAN_VMALLOC
3b1a4a86 399
5bd9bae2 400void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
d98c9e83 401int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
3c5c3cfb
DA
402void kasan_release_vmalloc(unsigned long start, unsigned long end,
403 unsigned long free_region_start,
404 unsigned long free_region_end);
3b1a4a86 405
1d96320f
AK
406void *__kasan_unpoison_vmalloc(const void *start, unsigned long size);
407static __always_inline void *kasan_unpoison_vmalloc(const void *start,
408 unsigned long size)
579fb0ac
AK
409{
410 if (kasan_enabled())
1d96320f
AK
411 return __kasan_unpoison_vmalloc(start, size);
412 return (void *)start;
579fb0ac
AK
413}
414
415void __kasan_poison_vmalloc(const void *start, unsigned long size);
416static __always_inline void kasan_poison_vmalloc(const void *start,
417 unsigned long size)
418{
419 if (kasan_enabled())
420 __kasan_poison_vmalloc(start, size);
421}
3252b1d8 422
3b1a4a86
AK
423#else /* CONFIG_KASAN_VMALLOC */
424
5bd9bae2
AK
425static inline void kasan_populate_early_vm_area_shadow(void *start,
426 unsigned long size) { }
d98c9e83
AR
427static inline int kasan_populate_vmalloc(unsigned long start,
428 unsigned long size)
3c5c3cfb
DA
429{
430 return 0;
431}
3c5c3cfb
DA
432static inline void kasan_release_vmalloc(unsigned long start,
433 unsigned long end,
434 unsigned long free_region_start,
5bd9bae2 435 unsigned long free_region_end) { }
3b1a4a86 436
1d96320f
AK
437static inline void *kasan_unpoison_vmalloc(const void *start,
438 unsigned long size)
439{
440 return (void *)start;
441}
5bd9bae2 442static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
3252b1d8
KW
443{ }
444
3b1a4a86
AK
445#endif /* CONFIG_KASAN_VMALLOC */
446
0fea6e9a
AK
447#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
448 !defined(CONFIG_KASAN_VMALLOC)
3b1a4a86
AK
449
450/*
63840de2
AK
451 * These functions allocate and free shadow memory for kernel modules.
452 * They are only required when KASAN_VMALLOC is not supported, as otherwise
453 * shadow memory is allocated by the generic vmalloc handlers.
3b1a4a86 454 */
63840de2
AK
455int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
456void kasan_free_module_shadow(const struct vm_struct *vm);
3b1a4a86 457
0fea6e9a 458#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3b1a4a86 459
63840de2
AK
460static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
461static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
3b1a4a86 462
0fea6e9a 463#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3c5c3cfb 464
2f004eea
JH
465#ifdef CONFIG_KASAN_INLINE
466void kasan_non_canonical_hook(unsigned long addr);
467#else /* CONFIG_KASAN_INLINE */
468static inline void kasan_non_canonical_hook(unsigned long addr) { }
469#endif /* CONFIG_KASAN_INLINE */
470
0b24becc 471#endif /* LINUX_KASAN_H */