kasan: init memory in kasan_(un)poison for HW_TAGS
[linux-block.git] / include / linux / kasan.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0b24becc
AR
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
34303244 5#include <linux/static_key.h>
0b24becc
AR
6#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
a5af5aa8 10struct vm_struct;
5be9b730 11struct task_struct;
0b24becc
AR
12
13#ifdef CONFIG_KASAN
14
d5750edf 15#include <linux/linkage.h>
65fddcfc 16#include <asm/kasan.h>
0b24becc 17
83c4e7a0
PA
18/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
d5750edf
AK
24#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
a064cb00
AK
33/* This matches KASAN_TAG_INVALID. */
34#define KASAN_SHADOW_INIT 0xFE
d5750edf
AK
35#else
36#define KASAN_SHADOW_INIT 0
37#endif
38
29970dc2
HL
39#ifndef PTE_HWTABLE_PTRS
40#define PTE_HWTABLE_PTRS 0
41#endif
42
9577dd74 43extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
29970dc2 44extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
9577dd74
AK
45extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
46extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
47extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
69786cdb 48
9577dd74 49int kasan_populate_early_shadow(const void *shadow_start,
69786cdb
AR
50 const void *shadow_end);
51
0b24becc
AR
52static inline void *kasan_mem_to_shadow(const void *addr)
53{
54 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
55 + KASAN_SHADOW_OFFSET;
56}
57
d5750edf
AK
58int kasan_add_zero_shadow(void *start, unsigned long size);
59void kasan_remove_zero_shadow(void *start, unsigned long size);
60
d73b4936
AK
61/* Enable reporting bugs after kasan_disable_current() */
62extern void kasan_enable_current(void);
63
64/* Disable reporting bugs for current task */
65extern void kasan_disable_current(void);
66
d5750edf
AK
67#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
68
69static inline int kasan_add_zero_shadow(void *start, unsigned long size)
70{
71 return 0;
72}
73static inline void kasan_remove_zero_shadow(void *start,
74 unsigned long size)
75{}
76
d73b4936
AK
77static inline void kasan_enable_current(void) {}
78static inline void kasan_disable_current(void) {}
79
d5750edf
AK
80#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
81
82#ifdef CONFIG_KASAN
83
34303244
AK
84struct kasan_cache {
85 int alloc_meta_offset;
86 int free_meta_offset;
92850134 87 bool is_kmalloc;
34303244 88};
0b24becc 89
34303244 90#ifdef CONFIG_KASAN_HW_TAGS
e86f8b09 91
34303244 92DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
e86f8b09 93
34303244
AK
94static __always_inline bool kasan_enabled(void)
95{
96 return static_branch_likely(&kasan_flag_enabled);
97}
e86f8b09
AK
98
99#else /* CONFIG_KASAN_HW_TAGS */
100
34303244
AK
101static inline bool kasan_enabled(void)
102{
103 return true;
104}
e86f8b09
AK
105
106#endif /* CONFIG_KASAN_HW_TAGS */
107
108slab_flags_t __kasan_never_merge(void);
109static __always_inline slab_flags_t kasan_never_merge(void)
110{
111 if (kasan_enabled())
112 return __kasan_never_merge();
113 return 0;
114}
b8c73fc2 115
34303244
AK
116void __kasan_unpoison_range(const void *addr, size_t size);
117static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
118{
119 if (kasan_enabled())
120 __kasan_unpoison_range(addr, size);
121}
7ed2f9e6 122
34303244
AK
123void __kasan_alloc_pages(struct page *page, unsigned int order);
124static __always_inline void kasan_alloc_pages(struct page *page,
125 unsigned int order)
126{
127 if (kasan_enabled())
128 __kasan_alloc_pages(page, order);
129}
0316bec2 130
34303244
AK
131void __kasan_free_pages(struct page *page, unsigned int order);
132static __always_inline void kasan_free_pages(struct page *page,
133 unsigned int order)
134{
135 if (kasan_enabled())
136 __kasan_free_pages(page, order);
137}
0316bec2 138
34303244
AK
139void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
140 slab_flags_t *flags);
141static __always_inline void kasan_cache_create(struct kmem_cache *cache,
142 unsigned int *size, slab_flags_t *flags)
143{
144 if (kasan_enabled())
145 __kasan_cache_create(cache, size, flags);
146}
0316bec2 147
92850134
AK
148void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
149static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
150{
151 if (kasan_enabled())
152 __kasan_cache_create_kmalloc(cache);
153}
154
34303244
AK
155size_t __kasan_metadata_size(struct kmem_cache *cache);
156static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
157{
158 if (kasan_enabled())
159 return __kasan_metadata_size(cache);
160 return 0;
161}
7ed2f9e6 162
34303244
AK
163void __kasan_poison_slab(struct page *page);
164static __always_inline void kasan_poison_slab(struct page *page)
165{
166 if (kasan_enabled())
167 __kasan_poison_slab(page);
168}
169
170void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
171static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
172 void *object)
173{
174 if (kasan_enabled())
175 __kasan_unpoison_object_data(cache, object);
176}
177
178void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
179static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
180 void *object)
181{
182 if (kasan_enabled())
183 __kasan_poison_object_data(cache, object);
184}
185
186void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
187 const void *object);
188static __always_inline void * __must_check kasan_init_slab_obj(
189 struct kmem_cache *cache, const void *object)
190{
191 if (kasan_enabled())
192 return __kasan_init_slab_obj(cache, object);
193 return (void *)object;
194}
195
196bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
027b37b5 197static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object)
34303244
AK
198{
199 if (kasan_enabled())
027b37b5 200 return __kasan_slab_free(s, object, _RET_IP_);
34303244
AK
201 return false;
202}
203
200072ce
AK
204void __kasan_kfree_large(void *ptr, unsigned long ip);
205static __always_inline void kasan_kfree_large(void *ptr)
206{
207 if (kasan_enabled())
208 __kasan_kfree_large(ptr, _RET_IP_);
209}
210
eeb3160c 211void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
027b37b5 212static __always_inline void kasan_slab_free_mempool(void *ptr)
eeb3160c
AK
213{
214 if (kasan_enabled())
027b37b5 215 __kasan_slab_free_mempool(ptr, _RET_IP_);
eeb3160c
AK
216}
217
34303244
AK
218void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
219 void *object, gfp_t flags);
220static __always_inline void * __must_check kasan_slab_alloc(
221 struct kmem_cache *s, void *object, gfp_t flags)
222{
223 if (kasan_enabled())
224 return __kasan_slab_alloc(s, object, flags);
225 return object;
226}
227
228void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
229 size_t size, gfp_t flags);
230static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
231 const void *object, size_t size, gfp_t flags)
232{
233 if (kasan_enabled())
234 return __kasan_kmalloc(s, object, size, flags);
235 return (void *)object;
236}
237
238void * __must_check __kasan_kmalloc_large(const void *ptr,
239 size_t size, gfp_t flags);
240static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
241 size_t size, gfp_t flags)
242{
243 if (kasan_enabled())
244 return __kasan_kmalloc_large(ptr, size, flags);
245 return (void *)ptr;
246}
247
248void * __must_check __kasan_krealloc(const void *object,
249 size_t new_size, gfp_t flags);
250static __always_inline void * __must_check kasan_krealloc(const void *object,
251 size_t new_size, gfp_t flags)
252{
253 if (kasan_enabled())
254 return __kasan_krealloc(object, new_size, flags);
255 return (void *)object;
256}
257
611806b4
AK
258/*
259 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
260 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
261 */
262bool __kasan_check_byte(const void *addr, unsigned long ip);
263static __always_inline bool kasan_check_byte(const void *addr)
264{
265 if (kasan_enabled())
266 return __kasan_check_byte(addr, _RET_IP_);
267 return true;
268}
269
270
b0845ce5
MR
271bool kasan_save_enable_multi_shot(void);
272void kasan_restore_multi_shot(bool enabled);
273
0b24becc
AR
274#else /* CONFIG_KASAN */
275
34303244
AK
276static inline bool kasan_enabled(void)
277{
278 return false;
279}
e86f8b09
AK
280static inline slab_flags_t kasan_never_merge(void)
281{
282 return 0;
283}
cebd0eb2 284static inline void kasan_unpoison_range(const void *address, size_t size) {}
b8c73fc2
AR
285static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
286static inline void kasan_free_pages(struct page *page, unsigned int order) {}
7ed2f9e6 287static inline void kasan_cache_create(struct kmem_cache *cache,
be4a7988 288 unsigned int *size,
d50112ed 289 slab_flags_t *flags) {}
92850134 290static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
34303244 291static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
0316bec2
AR
292static inline void kasan_poison_slab(struct page *page) {}
293static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
294 void *object) {}
295static inline void kasan_poison_object_data(struct kmem_cache *cache,
296 void *object) {}
0116523c
AK
297static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
298 const void *object)
299{
300 return (void *)object;
301}
027b37b5 302static inline bool kasan_slab_free(struct kmem_cache *s, void *object)
34303244
AK
303{
304 return false;
305}
200072ce 306static inline void kasan_kfree_large(void *ptr) {}
027b37b5 307static inline void kasan_slab_free_mempool(void *ptr) {}
34303244
AK
308static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
309 gfp_t flags)
0116523c 310{
34303244 311 return object;
0116523c 312}
0116523c
AK
313static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
314 size_t size, gfp_t flags)
315{
316 return (void *)object;
317}
34303244
AK
318static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
319{
320 return (void *)ptr;
321}
0116523c
AK
322static inline void *kasan_krealloc(const void *object, size_t new_size,
323 gfp_t flags)
324{
325 return (void *)object;
326}
611806b4
AK
327static inline bool kasan_check_byte(const void *address)
328{
329 return true;
330}
9b75a867 331
0b24becc
AR
332#endif /* CONFIG_KASAN */
333
02c58773 334#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
d56a9ef8
AK
335void kasan_unpoison_task_stack(struct task_struct *task);
336#else
337static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
338#endif
339
2bd926b4
AK
340#ifdef CONFIG_KASAN_GENERIC
341
342void kasan_cache_shrink(struct kmem_cache *cache);
343void kasan_cache_shutdown(struct kmem_cache *cache);
26e760c9 344void kasan_record_aux_stack(void *ptr);
2bd926b4
AK
345
346#else /* CONFIG_KASAN_GENERIC */
347
348static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
349static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
26e760c9 350static inline void kasan_record_aux_stack(void *ptr) {}
2bd926b4
AK
351
352#endif /* CONFIG_KASAN_GENERIC */
353
2e903b91 354#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
3c9e3aa1 355
c0054c56
AK
356static inline void *kasan_reset_tag(const void *addr)
357{
358 return (void *)arch_kasan_reset_tag(addr);
359}
3c9e3aa1 360
49c6631d
VF
361/**
362 * kasan_report - print a report about a bad memory access detected by KASAN
363 * @addr: address of the bad access
364 * @size: size of the bad access
365 * @is_write: whether the bad access is a write or a read
366 * @ip: instruction pointer for the accessibility check or the bad access itself
367 */
8cceeff4 368bool kasan_report(unsigned long addr, size_t size,
41eea9cd
AK
369 bool is_write, unsigned long ip);
370
2e903b91 371#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
3c9e3aa1
AK
372
373static inline void *kasan_reset_tag(const void *addr)
374{
375 return (void *)addr;
376}
377
2e903b91
AK
378#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
379
8f7b5054
VF
380#ifdef CONFIG_KASAN_HW_TAGS
381
382void kasan_report_async(void);
383
384#endif /* CONFIG_KASAN_HW_TAGS */
385
2e903b91
AK
386#ifdef CONFIG_KASAN_SW_TAGS
387void __init kasan_init_sw_tags(void);
388#else
389static inline void kasan_init_sw_tags(void) { }
390#endif
391
392#ifdef CONFIG_KASAN_HW_TAGS
393void kasan_init_hw_tags_cpu(void);
394void __init kasan_init_hw_tags(void);
395#else
396static inline void kasan_init_hw_tags_cpu(void) { }
397static inline void kasan_init_hw_tags(void) { }
398#endif
080eb83f 399
3c5c3cfb 400#ifdef CONFIG_KASAN_VMALLOC
3b1a4a86 401
d98c9e83
AR
402int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
403void kasan_poison_vmalloc(const void *start, unsigned long size);
404void kasan_unpoison_vmalloc(const void *start, unsigned long size);
3c5c3cfb
DA
405void kasan_release_vmalloc(unsigned long start, unsigned long end,
406 unsigned long free_region_start,
407 unsigned long free_region_end);
3b1a4a86
AK
408
409#else /* CONFIG_KASAN_VMALLOC */
410
d98c9e83
AR
411static inline int kasan_populate_vmalloc(unsigned long start,
412 unsigned long size)
3c5c3cfb
DA
413{
414 return 0;
415}
416
d98c9e83
AR
417static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
418{ }
419static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
420{ }
3c5c3cfb
DA
421static inline void kasan_release_vmalloc(unsigned long start,
422 unsigned long end,
423 unsigned long free_region_start,
424 unsigned long free_region_end) {}
3b1a4a86
AK
425
426#endif /* CONFIG_KASAN_VMALLOC */
427
0fea6e9a
AK
428#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
429 !defined(CONFIG_KASAN_VMALLOC)
3b1a4a86
AK
430
431/*
432 * These functions provide a special case to support backing module
433 * allocations with real shadow memory. With KASAN vmalloc, the special
434 * case is unnecessary, as the work is handled in the generic case.
435 */
436int kasan_module_alloc(void *addr, size_t size);
437void kasan_free_shadow(const struct vm_struct *vm);
438
0fea6e9a 439#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3b1a4a86
AK
440
441static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
442static inline void kasan_free_shadow(const struct vm_struct *vm) {}
443
0fea6e9a 444#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3c5c3cfb 445
2f004eea
JH
446#ifdef CONFIG_KASAN_INLINE
447void kasan_non_canonical_hook(unsigned long addr);
448#else /* CONFIG_KASAN_INLINE */
449static inline void kasan_non_canonical_hook(unsigned long addr) { }
450#endif /* CONFIG_KASAN_INLINE */
451
0b24becc 452#endif /* LINUX_KASAN_H */