mm/filemap: add missing mem_cgroup_uncharge() to __add_to_page_cache_locked()
[linux-block.git] / include / linux / kasan.h
CommitLineData
b2441318 1/* SPDX-License-Identifier: GPL-2.0 */
0b24becc
AR
2#ifndef _LINUX_KASAN_H
3#define _LINUX_KASAN_H
4
34303244 5#include <linux/static_key.h>
0b24becc
AR
6#include <linux/types.h>
7
8struct kmem_cache;
9struct page;
a5af5aa8 10struct vm_struct;
5be9b730 11struct task_struct;
0b24becc
AR
12
13#ifdef CONFIG_KASAN
14
d5750edf 15#include <linux/linkage.h>
65fddcfc 16#include <asm/kasan.h>
0b24becc 17
83c4e7a0
PA
18/* kasan_data struct is used in KUnit tests for KASAN expected failures */
19struct kunit_kasan_expectation {
20 bool report_expected;
21 bool report_found;
22};
23
d5750edf
AK
24#endif
25
26#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
27
28#include <linux/pgtable.h>
29
30/* Software KASAN implementations use shadow memory. */
31
32#ifdef CONFIG_KASAN_SW_TAGS
33#define KASAN_SHADOW_INIT 0xFF
34#else
35#define KASAN_SHADOW_INIT 0
36#endif
37
29970dc2
HL
38#ifndef PTE_HWTABLE_PTRS
39#define PTE_HWTABLE_PTRS 0
40#endif
41
9577dd74 42extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
29970dc2 43extern pte_t kasan_early_shadow_pte[PTRS_PER_PTE + PTE_HWTABLE_PTRS];
9577dd74
AK
44extern pmd_t kasan_early_shadow_pmd[PTRS_PER_PMD];
45extern pud_t kasan_early_shadow_pud[PTRS_PER_PUD];
46extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
69786cdb 47
9577dd74 48int kasan_populate_early_shadow(const void *shadow_start,
69786cdb
AR
49 const void *shadow_end);
50
0b24becc
AR
51static inline void *kasan_mem_to_shadow(const void *addr)
52{
53 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
54 + KASAN_SHADOW_OFFSET;
55}
56
d5750edf
AK
57int kasan_add_zero_shadow(void *start, unsigned long size);
58void kasan_remove_zero_shadow(void *start, unsigned long size);
59
d73b4936
AK
60/* Enable reporting bugs after kasan_disable_current() */
61extern void kasan_enable_current(void);
62
63/* Disable reporting bugs for current task */
64extern void kasan_disable_current(void);
65
d5750edf
AK
66#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
67
68static inline int kasan_add_zero_shadow(void *start, unsigned long size)
69{
70 return 0;
71}
72static inline void kasan_remove_zero_shadow(void *start,
73 unsigned long size)
74{}
75
d73b4936
AK
76static inline void kasan_enable_current(void) {}
77static inline void kasan_disable_current(void) {}
78
d5750edf
AK
79#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
80
81#ifdef CONFIG_KASAN
82
34303244
AK
83struct kasan_cache {
84 int alloc_meta_offset;
85 int free_meta_offset;
86};
0b24becc 87
34303244 88#ifdef CONFIG_KASAN_HW_TAGS
e86f8b09 89
34303244 90DECLARE_STATIC_KEY_FALSE(kasan_flag_enabled);
e86f8b09 91
34303244
AK
92static __always_inline bool kasan_enabled(void)
93{
94 return static_branch_likely(&kasan_flag_enabled);
95}
e86f8b09
AK
96
97#else /* CONFIG_KASAN_HW_TAGS */
98
34303244
AK
99static inline bool kasan_enabled(void)
100{
101 return true;
102}
e86f8b09
AK
103
104#endif /* CONFIG_KASAN_HW_TAGS */
105
106slab_flags_t __kasan_never_merge(void);
107static __always_inline slab_flags_t kasan_never_merge(void)
108{
109 if (kasan_enabled())
110 return __kasan_never_merge();
111 return 0;
112}
b8c73fc2 113
34303244
AK
114void __kasan_unpoison_range(const void *addr, size_t size);
115static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
116{
117 if (kasan_enabled())
118 __kasan_unpoison_range(addr, size);
119}
7ed2f9e6 120
34303244
AK
121void __kasan_alloc_pages(struct page *page, unsigned int order);
122static __always_inline void kasan_alloc_pages(struct page *page,
123 unsigned int order)
124{
125 if (kasan_enabled())
126 __kasan_alloc_pages(page, order);
127}
0316bec2 128
34303244
AK
129void __kasan_free_pages(struct page *page, unsigned int order);
130static __always_inline void kasan_free_pages(struct page *page,
131 unsigned int order)
132{
133 if (kasan_enabled())
134 __kasan_free_pages(page, order);
135}
0316bec2 136
34303244
AK
137void __kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
138 slab_flags_t *flags);
139static __always_inline void kasan_cache_create(struct kmem_cache *cache,
140 unsigned int *size, slab_flags_t *flags)
141{
142 if (kasan_enabled())
143 __kasan_cache_create(cache, size, flags);
144}
0316bec2 145
34303244
AK
146size_t __kasan_metadata_size(struct kmem_cache *cache);
147static __always_inline size_t kasan_metadata_size(struct kmem_cache *cache)
148{
149 if (kasan_enabled())
150 return __kasan_metadata_size(cache);
151 return 0;
152}
7ed2f9e6 153
34303244
AK
154void __kasan_poison_slab(struct page *page);
155static __always_inline void kasan_poison_slab(struct page *page)
156{
157 if (kasan_enabled())
158 __kasan_poison_slab(page);
159}
160
161void __kasan_unpoison_object_data(struct kmem_cache *cache, void *object);
162static __always_inline void kasan_unpoison_object_data(struct kmem_cache *cache,
163 void *object)
164{
165 if (kasan_enabled())
166 __kasan_unpoison_object_data(cache, object);
167}
168
169void __kasan_poison_object_data(struct kmem_cache *cache, void *object);
170static __always_inline void kasan_poison_object_data(struct kmem_cache *cache,
171 void *object)
172{
173 if (kasan_enabled())
174 __kasan_poison_object_data(cache, object);
175}
176
177void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
178 const void *object);
179static __always_inline void * __must_check kasan_init_slab_obj(
180 struct kmem_cache *cache, const void *object)
181{
182 if (kasan_enabled())
183 return __kasan_init_slab_obj(cache, object);
184 return (void *)object;
185}
186
187bool __kasan_slab_free(struct kmem_cache *s, void *object, unsigned long ip);
188static __always_inline bool kasan_slab_free(struct kmem_cache *s, void *object,
189 unsigned long ip)
190{
191 if (kasan_enabled())
192 return __kasan_slab_free(s, object, ip);
193 return false;
194}
195
eeb3160c
AK
196void __kasan_slab_free_mempool(void *ptr, unsigned long ip);
197static __always_inline void kasan_slab_free_mempool(void *ptr, unsigned long ip)
198{
199 if (kasan_enabled())
200 __kasan_slab_free_mempool(ptr, ip);
201}
202
34303244
AK
203void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
204 void *object, gfp_t flags);
205static __always_inline void * __must_check kasan_slab_alloc(
206 struct kmem_cache *s, void *object, gfp_t flags)
207{
208 if (kasan_enabled())
209 return __kasan_slab_alloc(s, object, flags);
210 return object;
211}
212
213void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
214 size_t size, gfp_t flags);
215static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
216 const void *object, size_t size, gfp_t flags)
217{
218 if (kasan_enabled())
219 return __kasan_kmalloc(s, object, size, flags);
220 return (void *)object;
221}
222
223void * __must_check __kasan_kmalloc_large(const void *ptr,
224 size_t size, gfp_t flags);
225static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
226 size_t size, gfp_t flags)
227{
228 if (kasan_enabled())
229 return __kasan_kmalloc_large(ptr, size, flags);
230 return (void *)ptr;
231}
232
233void * __must_check __kasan_krealloc(const void *object,
234 size_t new_size, gfp_t flags);
235static __always_inline void * __must_check kasan_krealloc(const void *object,
236 size_t new_size, gfp_t flags)
237{
238 if (kasan_enabled())
239 return __kasan_krealloc(object, new_size, flags);
240 return (void *)object;
241}
242
34303244
AK
243void __kasan_kfree_large(void *ptr, unsigned long ip);
244static __always_inline void kasan_kfree_large(void *ptr, unsigned long ip)
245{
246 if (kasan_enabled())
247 __kasan_kfree_large(ptr, ip);
248}
9b75a867 249
b0845ce5
MR
250bool kasan_save_enable_multi_shot(void);
251void kasan_restore_multi_shot(bool enabled);
252
0b24becc
AR
253#else /* CONFIG_KASAN */
254
34303244
AK
255static inline bool kasan_enabled(void)
256{
257 return false;
258}
e86f8b09
AK
259static inline slab_flags_t kasan_never_merge(void)
260{
261 return 0;
262}
cebd0eb2 263static inline void kasan_unpoison_range(const void *address, size_t size) {}
b8c73fc2
AR
264static inline void kasan_alloc_pages(struct page *page, unsigned int order) {}
265static inline void kasan_free_pages(struct page *page, unsigned int order) {}
7ed2f9e6 266static inline void kasan_cache_create(struct kmem_cache *cache,
be4a7988 267 unsigned int *size,
d50112ed 268 slab_flags_t *flags) {}
34303244 269static inline size_t kasan_metadata_size(struct kmem_cache *cache) { return 0; }
0316bec2
AR
270static inline void kasan_poison_slab(struct page *page) {}
271static inline void kasan_unpoison_object_data(struct kmem_cache *cache,
272 void *object) {}
273static inline void kasan_poison_object_data(struct kmem_cache *cache,
274 void *object) {}
0116523c
AK
275static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
276 const void *object)
277{
278 return (void *)object;
279}
34303244
AK
280static inline bool kasan_slab_free(struct kmem_cache *s, void *object,
281 unsigned long ip)
282{
283 return false;
284}
eeb3160c 285static inline void kasan_slab_free_mempool(void *ptr, unsigned long ip) {}
34303244
AK
286static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
287 gfp_t flags)
0116523c 288{
34303244 289 return object;
0116523c 290}
0116523c
AK
291static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
292 size_t size, gfp_t flags)
293{
294 return (void *)object;
295}
34303244
AK
296static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
297{
298 return (void *)ptr;
299}
0116523c
AK
300static inline void *kasan_krealloc(const void *object, size_t new_size,
301 gfp_t flags)
302{
303 return (void *)object;
304}
34303244 305static inline void kasan_kfree_large(void *ptr, unsigned long ip) {}
9b75a867 306
0b24becc
AR
307#endif /* CONFIG_KASAN */
308
d56a9ef8
AK
309#if defined(CONFIG_KASAN) && CONFIG_KASAN_STACK
310void kasan_unpoison_task_stack(struct task_struct *task);
311#else
312static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
313#endif
314
2bd926b4
AK
315#ifdef CONFIG_KASAN_GENERIC
316
317void kasan_cache_shrink(struct kmem_cache *cache);
318void kasan_cache_shutdown(struct kmem_cache *cache);
26e760c9 319void kasan_record_aux_stack(void *ptr);
2bd926b4
AK
320
321#else /* CONFIG_KASAN_GENERIC */
322
323static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
324static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
26e760c9 325static inline void kasan_record_aux_stack(void *ptr) {}
2bd926b4
AK
326
327#endif /* CONFIG_KASAN_GENERIC */
328
2e903b91 329#if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
3c9e3aa1 330
c0054c56
AK
331static inline void *kasan_reset_tag(const void *addr)
332{
333 return (void *)arch_kasan_reset_tag(addr);
334}
3c9e3aa1 335
8cceeff4 336bool kasan_report(unsigned long addr, size_t size,
41eea9cd
AK
337 bool is_write, unsigned long ip);
338
2e903b91 339#else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
3c9e3aa1
AK
340
341static inline void *kasan_reset_tag(const void *addr)
342{
343 return (void *)addr;
344}
345
2e903b91
AK
346#endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
347
348#ifdef CONFIG_KASAN_SW_TAGS
349void __init kasan_init_sw_tags(void);
350#else
351static inline void kasan_init_sw_tags(void) { }
352#endif
353
354#ifdef CONFIG_KASAN_HW_TAGS
355void kasan_init_hw_tags_cpu(void);
356void __init kasan_init_hw_tags(void);
357#else
358static inline void kasan_init_hw_tags_cpu(void) { }
359static inline void kasan_init_hw_tags(void) { }
360#endif
080eb83f 361
3c5c3cfb 362#ifdef CONFIG_KASAN_VMALLOC
3b1a4a86 363
d98c9e83
AR
364int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
365void kasan_poison_vmalloc(const void *start, unsigned long size);
366void kasan_unpoison_vmalloc(const void *start, unsigned long size);
3c5c3cfb
DA
367void kasan_release_vmalloc(unsigned long start, unsigned long end,
368 unsigned long free_region_start,
369 unsigned long free_region_end);
3b1a4a86
AK
370
371#else /* CONFIG_KASAN_VMALLOC */
372
d98c9e83
AR
373static inline int kasan_populate_vmalloc(unsigned long start,
374 unsigned long size)
3c5c3cfb
DA
375{
376 return 0;
377}
378
d98c9e83
AR
379static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
380{ }
381static inline void kasan_unpoison_vmalloc(const void *start, unsigned long size)
382{ }
3c5c3cfb
DA
383static inline void kasan_release_vmalloc(unsigned long start,
384 unsigned long end,
385 unsigned long free_region_start,
386 unsigned long free_region_end) {}
3b1a4a86
AK
387
388#endif /* CONFIG_KASAN_VMALLOC */
389
0fea6e9a
AK
390#if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
391 !defined(CONFIG_KASAN_VMALLOC)
3b1a4a86
AK
392
393/*
394 * These functions provide a special case to support backing module
395 * allocations with real shadow memory. With KASAN vmalloc, the special
396 * case is unnecessary, as the work is handled in the generic case.
397 */
398int kasan_module_alloc(void *addr, size_t size);
399void kasan_free_shadow(const struct vm_struct *vm);
400
0fea6e9a 401#else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3b1a4a86
AK
402
403static inline int kasan_module_alloc(void *addr, size_t size) { return 0; }
404static inline void kasan_free_shadow(const struct vm_struct *vm) {}
405
0fea6e9a 406#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
3c5c3cfb 407
2f004eea
JH
408#ifdef CONFIG_KASAN_INLINE
409void kasan_non_canonical_hook(unsigned long addr);
410#else /* CONFIG_KASAN_INLINE */
411static inline void kasan_non_canonical_hook(unsigned long addr) { }
412#endif /* CONFIG_KASAN_INLINE */
413
0b24becc 414#endif /* LINUX_KASAN_H */