mm: defer kmemleak object creation of module_alloc()
[linux-2.6-block.git] / include / linux / kasan.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0b24becc
AR
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
7a3b8353 5#include <linux/bug.h>
2db710cc 6#include <linux/kernel.h>
34303244 7#include <linux/static_key.h>
0b24becc
AR
8#include <linux/types.h>
9
10struct kmem_cache;
11struct page;
a5af5aa8 12struct vm_struct;
5be9b730 13struct task_struct;
0b24becc
AR
14
15#ifdef CONFIG_KASAN
16
d5750edf 17#include <linux/linkage.h>
65fddcfc 18#include <asm/kasan.h>
0b24becc 19
83c4e7a0
PA
20/* kasan_data struct is used in KUnit tests for KASAN expected failures */
21struct kunit_kasan_expectation {
83c4e7a0
PA
22 bool report_found;
23};
24
d5750edf
AK
25#endif
26
27#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
28
29#include <linux/pgtable.h>
30
31/* Software KASAN implementations use shadow memory. */
32
33#ifdef CONFIG_KASAN_SW_TAGS
a064cb00
AK
34/* This matches KASAN_TAG_INVALID. */
35#define KASAN_SHADOW_INIT 0xFE
d5750edf
AK
36#else
37#define KASAN_SHADOW_INIT 0
38#endif
39
29970dc2
HL
40#ifndef PTE_HWTABLE_PTRS
41#define PTE_HWTABLE_PTRS 0
42#endif
43
9577dd74 44extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
cb32c9c5
DA
45extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
46extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
47extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
9577dd74 48extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
69786cdb 49
9577dd74 50int kasan_populate_early_shadow(const void *shadow_start,
69786cdb
AR
51 const void *shadow_end);
52
0b24becc
AR
53static inline void *kasan_mem_to_shadow(const void *addr)
54{
55 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
56 + KASAN_SHADOW_OFFSET;
57}
58
d5750edf
AK
59int kasan_add_zero_shadow(void *start, unsigned long size);
60void kasan_remove_zero_shadow(void *start, unsigned long size);
61
d73b4936
AK
62/* Enable reporting bugs after kasan_disable_current() */
63extern void kasan_enable_current(void);
64
65/* Disable reporting bugs for current task */
66extern void kasan_disable_current(void);
67
d5750edf
AK
68#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
69
70static inline int kasan_add_zero_shadow(void *start, unsigned long size)
71{
72 return 0;
73}
74static inline void kasan_remove_zero_shadow(void *start,
75 unsigned long size)
76{}
77
d73b4936
AK
78static inline void kasan_enable_current(void) {}
79static inline void kasan_disable_current(void) {}
80
d5750edf
AK
81#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
82
34303244 83#ifdef CONFIG_KASAN_HW_TAGS
e86f8b09 84
34303244 85DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
e86f8b09 86
34303244
AK
87static __always_inline bool kasan_enabled(void)
88{
89 return static_branch_likely(&kasan_flag_enabled);
90}
e86f8b09 91
e5af50a5 92static inline bool kasan_hw_tags_enabled(void)
1bb5eab3
AK
93{
94 return kasan_enabled();
95}
96
7a3b8353
PC
97void kasan_alloc_pages(struct page *page, unsigned int order, gfp_t flags);
98void kasan_free_pages(struct page *page, unsigned int order);
99
e86f8b09
AK
100#else /* CONFIG_KASAN_HW_TAGS */
101
34303244
AK
102static inline bool kasan_enabled(void)
103{
7a3b8353 104 return IS_ENABLED(CONFIG_KASAN);
34303244 105}
e86f8b09 106
e5af50a5 107static inline bool kasan_hw_tags_enabled(void)
1bb5eab3
AK
108{
109 return false;
110}
111
7a3b8353
PC
112static __always_inline void kasan_alloc_pages(struct page *page,
113 unsigned int order, gfp_t flags)
114{
115 /* Only available for integrated init. */
116 BUILD_BUG();
117}
118
119static __always_inline void kasan_free_pages(struct page *page,
120 unsigned int order)
121{
122 /* Only available for integrated init. */
123 BUILD_BUG();
124}
125
e86f8b09
AK
126#endif /* CONFIG_KASAN_HW_TAGS */
127
e5af50a5
PC
128static inline bool kasan_has_integrated_init(void)
129{
130 return kasan_hw_tags_enabled();
131}
132
7a3b8353
PC
133#ifdef CONFIG_KASAN
134
135struct kasan_cache {
136 int alloc_meta_offset;
137 int free_meta_offset;
138 bool is_kmalloc;
139};
140
e86f8b09
AK
141slab_flags_t __kasan_never_merge(void);
142static __always_inline slab_flags_t kasan_never_merge(void)
143{
144 if (kasan_enabled())
145 return __kasan_never_merge();
146 return 0;
147}
b8c73fc2 148
34303244
AK
149void __kasan_unpoison_range(const void *addr, size_t size);
150static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
151{
152 if (kasan_enabled())
153 __kasan_unpoison_range(addr, size);
154}
7ed2f9e6 155
7a3b8353
PC
156void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
157static __always_inline void kasan_poison_pages(struct page *page,
1bb5eab3 158 unsigned int order, bool init)
34303244
AK
159{
160 if (kasan_enabled())
7a3b8353 161 __kasan_poison_pages(page, order, init);
34303244 162}
0316bec2 163
7a3b8353
PC
164void __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
165static __always_inline void kasan_unpoison_pages(struct page *page,
166 unsigned int order, bool init)
34303244
AK
167{
168 if (kasan_enabled())
7a3b8353 169 __kasan_unpoison_pages(page, order, init);
34303244 170}
0316bec2 171
34303244
AK
172void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
173 slab_flags_t *flags);
174static __always_inline void kasan_cache_create(struct kmem_cache *cache,
175 unsigned int *size, slab_flags_t *flags)
176{
177 if (kasan_enabled())
178 __kasan_cache_create(cache, size, flags);
179}
0316bec2 180
92850134
AK
181void __kasan_cache_create_kmalloc(struct kmem_cache *cache);
182static __always_inline void kasan_cache_create_kmalloc(struct kmem_cache *cache)
183{
184 if (kasan_enabled())
185 __kasan_cache_create_kmalloc(cache);
186}
187
34303244
AK
188size_t __kasan_metadata_size(struct kmem_cache *cache);
189static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
190{
191 if (kasan_enabled())
192 return __kasan_metadata_size(cache);
193 return 0;
194}
7ed2f9e6 195
34303244
AK
196void __kasan_poison_slab(struct page *page);
197static __always_inline void kasan_poison_slab(struct page *page)
198{
199 if (kasan_enabled())
200 __kasan_poison_slab(page);
201}
202
203void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
204static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
205 void *object)
206{
207 if (kasan_enabled())
208 __kasan_unpoison_object_data(cache, object);
209}
210
211void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
212static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
213 void *object)
214{
215 if (kasan_enabled())
216 __kasan_poison_object_data(cache, object);
217}
218
219void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
220 const void *object);
221static __always_inline void * __must_check kasan_init_slab_obj(
222 struct kmem_cache *cache, const void *object)
223{
224 if (kasan_enabled())
225 return __kasan_init_slab_obj(cache, object);
226 return (void *)object;
227}
228
d57a964e
AK
229bool __kasan_slab_free(struct kmem_cache *s, void *object,
230 unsigned long ip, bool init);
231static __always_inline bool kasan_slab_free(struct kmem_cache *s,
232 void *object, bool init)
34303244
AK
233{
234 if (kasan_enabled())
d57a964e 235 return __kasan_slab_free(s, object, _RET_IP_, init);
34303244
AK
236 return false;
237}
238
200072ce
AK
239void __kasan_kfree_large(void *ptr, unsigned long ip);
240static __always_inline void kasan_kfree_large(void *ptr)
241{
242 if (kasan_enabled())
243 __kasan_kfree_large(ptr, _RET_IP_);
244}
245
eeb3160c 246void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
027b37b5 247static __always_inline void kasan_slab_free_mempool(void *ptr)
eeb3160c
AK
248{
249 if (kasan_enabled())
027b37b5 250 __kasan_slab_free_mempool(ptr, _RET_IP_);
eeb3160c
AK
251}
252
34303244 253void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
da844b78 254 void *object, gfp_t flags, bool init);
34303244 255static __always_inline void * __must_check kasan_slab_alloc(
da844b78 256 struct kmem_cache *s, void *object, gfp_t flags, bool init)
34303244
AK
257{
258 if (kasan_enabled())
da844b78 259 return __kasan_slab_alloc(s, object, flags, init);
34303244
AK
260 return object;
261}
262
263void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
264 size_t size, gfp_t flags);
265static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
266 const void *object, size_t size, gfp_t flags)
267{
268 if (kasan_enabled())
269 return __kasan_kmalloc(s, object, size, flags);
270 return (void *)object;
271}
272
273void * __must_check __kasan_kmalloc_large(const void *ptr,
274 size_t size, gfp_t flags);
275static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
276 size_t size, gfp_t flags)
277{
278 if (kasan_enabled())
279 return __kasan_kmalloc_large(ptr, size, flags);
280 return (void *)ptr;
281}
282
283void * __must_check __kasan_krealloc(const void *object,
284 size_t new_size, gfp_t flags);
285static __always_inline void * __must_check kasan_krealloc(const void *object,
286 size_t new_size, gfp_t flags)
287{
288 if (kasan_enabled())
289 return __kasan_krealloc(object, new_size, flags);
290 return (void *)object;
291}
292
611806b4
AK
293/*
294 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
295 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
296 */
297bool __kasan_check_byte(const void *addr, unsigned long ip);
298static __always_inline bool kasan_check_byte(const void *addr)
299{
300 if (kasan_enabled())
301 return __kasan_check_byte(addr, _RET_IP_);
302 return true;
303}
304
305
b0845ce5
MR
306bool kasan_save_enable_multi_shot(void);
307void kasan_restore_multi_shot(bool enabled);
308
0b24becc
AR
309#else /* CONFIG_KASAN */
310
e86f8b09
AK
311static inline slab_flags_t kasan_never_merge(void)
312{
313 return 0;
314}
cebd0eb2 315static inline void kasan_unpoison_range(const void *address, size_t size) {}
7a3b8353
PC
316static inline void kasan_poison_pages(struct page *page, unsigned int order,
317 bool init) {}
318static inline void kasan_unpoison_pages(struct page *page, unsigned int order,
319 bool init) {}
7ed2f9e6 320static inline void kasan_cache_create(struct kmem_cache *cache,
be4a7988 321 unsigned int *size,
d50112ed 322 slab_flags_t *flags) {}
92850134 323static inline void kasan_cache_create_kmalloc(struct kmem_cache *cache) {}
34303244 324static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
0316bec2
AR
325static inline void kasan_poison_slab(struct page *page) {}
326static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
327 void *object) {}
328static inline void kasan_poison_object_data(struct kmem_cache *cache,
329 void *object) {}
0116523c
AK
330static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
331 const void *object)
332{
333 return (void *)object;
334}
d57a964e 335static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
34303244
AK
336{
337 return false;
338}
200072ce 339static inline void kasan_kfree_large(void *ptr) {}
027b37b5 340static inline void kasan_slab_free_mempool(void *ptr) {}
34303244 341static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
da844b78 342 gfp_t flags, bool init)
0116523c 343{
34303244 344 return object;
0116523c 345}
0116523c
AK
346static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
347 size_t size, gfp_t flags)
348{
349 return (void *)object;
350}
34303244
AK
351static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
352{
353 return (void *)ptr;
354}
0116523c
AK
355static inline void *kasan_krealloc(const void *object, size_t new_size,
356 gfp_t flags)
357{
358 return (void *)object;
359}
611806b4
AK
360static inline bool kasan_check_byte(const void *address)
361{
362 return true;
363}
9b75a867 364
0b24becc
AR
365#endif /* CONFIG_KASAN */
366
02c58773 367#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
d56a9ef8
AK
368void kasan_unpoison_task_stack(struct task_struct *task);
369#else
370static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
371#endif
372
2bd926b4
AK
373#ifdef CONFIG_KASAN_GENERIC
374
375void kasan_cache_shrink(struct kmem_cache *cache);
376void kasan_cache_shutdown(struct kmem_cache *cache);
26e760c9 377void kasan_record_aux_stack(void *ptr);
7cb3007c 378void kasan_record_aux_stack_noalloc(void *ptr);
2bd926b4
AK
379
380#else /* CONFIG_KASAN_GENERIC */
381
382static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
383static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
26e760c9 384static inline void kasan_record_aux_stack(void *ptr) {}
7cb3007c 385static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
2bd926b4
AK
386
387#endif /* CONFIG_KASAN_GENERIC */
388
2e903b91 389#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
3c9e3aa1 390
c0054c56
AK
391static inline void *kasan_reset_tag(const void *addr)
392{
393 return (void *)arch_kasan_reset_tag(addr);
394}
3c9e3aa1 395
49c6631d
VF
396/**
397 * kasan_report - print a report about a bad memory access detected by KASAN
398 * @addr: address of the bad access
399 * @size: size of the bad access
400 * @is_write: whether the bad access is a write or a read
401 * @ip: instruction pointer for the accessibility check or the bad access itself
402 */
8cceeff4 403bool kasan_report(unsigned long addr, size_t size,
41eea9cd
AK
404 bool is_write, unsigned long ip);
405
2e903b91 406#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
3c9e3aa1
AK
407
408static inline void *kasan_reset_tag(const void *addr)
409{
410 return (void *)addr;
411}
412
2e903b91
AK
413#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
414
8f7b5054
VF
415#ifdef CONFIG_KASAN_HW_TAGS
416
417void kasan_report_async(void);
418
419#endif /* CONFIG_KASAN_HW_TAGS */
420
2e903b91
AK
421#ifdef CONFIG_KASAN_SW_TAGS
422void __init kasan_init_sw_tags(void);
423#else
424static inline void kasan_init_sw_tags(void) { }
425#endif
426
427#ifdef CONFIG_KASAN_HW_TAGS
428void kasan_init_hw_tags_cpu(void);
429void __init kasan_init_hw_tags(void);
430#else
431static inline void kasan_init_hw_tags_cpu(void) { }
432static inline void kasan_init_hw_tags(void) { }
433#endif
080eb83f 434
3c5c3cfb 435#ifdef CONFIG_KASAN_VMALLOC
3b1a4a86 436
d98c9e83
AR
437int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
438void kasan_poison_vmalloc(const void *start, unsigned long size);
439void kasan_unpoison_vmalloc(const void *start, unsigned long size);
3c5c3cfb
DA
440void kasan_release_vmalloc(unsigned long start, unsigned long end,
441 unsigned long free_region_start,
442 unsigned long free_region_end);
3b1a4a86 443
3252b1d8
KW
444void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
445
3b1a4a86
AK
446#else /* CONFIG_KASAN_VMALLOC */
447
d98c9e83
AR
448static inline int kasan_populate_vmalloc(unsigned long start,
449 unsigned long size)
3c5c3cfb
DA
450{
451 return 0;
452}
453
d98c9e83
AR
454static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
455{ }
456static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
457{ }
3c5c3cfb
DA
458static inline void kasan_release_vmalloc(unsigned long start,
459 unsigned long end,
460 unsigned long free_region_start,
461 unsigned long free_region_end) {}
3b1a4a86 462
3252b1d8
KW
463static inline void kasan_populate_early_vm_area_shadow(void *start,
464 unsigned long size)
465{ }
466
3b1a4a86
AK
467#endif /* CONFIG_KASAN_VMALLOC */
468
0fea6e9a
AK
469#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
470 !defined(CONFIG_KASAN_VMALLOC)
3b1a4a86
AK
471
472/*
473 * These functions provide a special case to support backing module
474 * allocations with real shadow memory. With KASAN vmalloc, the special
475 * case is unnecessary, as the work is handled in the generic case.
476 */
60115fa5 477int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask);
3b1a4a86
AK
478void kasan_free_shadow(const struct vm_struct *vm);
479
0fea6e9a 480#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3b1a4a86 481
60115fa5 482static inline int kasan_module_alloc(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
3b1a4a86
AK
483static inline void kasan_free_shadow(const struct vm_struct *vm) {}
484
0fea6e9a 485#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3c5c3cfb 486
2f004eea
JH
487#ifdef CONFIG_KASAN_INLINE
488void kasan_non_canonical_hook(unsigned long addr);
489#else /* CONFIG_KASAN_INLINE */
490static inline void kasan_non_canonical_hook(unsigned long addr) { }
491#endif /* CONFIG_KASAN_INLINE */
492
0b24becc 493#endif /* LINUX_KASAN_H */