1 /* SPDX-License-Identifier: GPL-2.0 */
6 #include <linux/kasan-enabled.h>
7 #include <linux/kasan-tags.h>
8 #include <linux/kernel.h>
9 #include <linux/static_key.h>
10 #include <linux/types.h>
20 #include <linux/linkage.h>
21 #include <asm/kasan.h>
25 typedef unsigned int __bitwise kasan_vmalloc_flags_t;
27 #define KASAN_VMALLOC_NONE ((__force kasan_vmalloc_flags_t)0x00u)
28 #define KASAN_VMALLOC_INIT ((__force kasan_vmalloc_flags_t)0x01u)
29 #define KASAN_VMALLOC_VM_ALLOC ((__force kasan_vmalloc_flags_t)0x02u)
30 #define KASAN_VMALLOC_PROT_NORMAL ((__force kasan_vmalloc_flags_t)0x04u)
32 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
34 #include <linux/pgtable.h>
36 /* Software KASAN implementations use shadow memory. */
38 #ifdef CONFIG_KASAN_SW_TAGS
39 /* This matches KASAN_TAG_INVALID. */
40 #define KASAN_SHADOW_INIT 0xFE
42 #define KASAN_SHADOW_INIT 0
45 #ifndef PTE_HWTABLE_PTRS
46 #define PTE_HWTABLE_PTRS 0
49 extern unsigned char kasan_early_shadow_page[PAGE_SIZE];
50 extern pte_t kasan_early_shadow_pte[MAX_PTRS_PER_PTE + PTE_HWTABLE_PTRS];
51 extern pmd_t kasan_early_shadow_pmd[MAX_PTRS_PER_PMD];
52 extern pud_t kasan_early_shadow_pud[MAX_PTRS_PER_PUD];
53 extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
55 int kasan_populate_early_shadow(const void *shadow_start,
56 const void *shadow_end);
58 #ifndef kasan_mem_to_shadow
59 static inline void *kasan_mem_to_shadow(const void *addr)
61 return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
62 + KASAN_SHADOW_OFFSET;
66 int kasan_add_zero_shadow(void *start, unsigned long size);
67 void kasan_remove_zero_shadow(void *start, unsigned long size);
69 /* Enable reporting bugs after kasan_disable_current() */
70 extern void kasan_enable_current(void);
72 /* Disable reporting bugs for current task */
73 extern void kasan_disable_current(void);
75 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
77 static inline int kasan_add_zero_shadow(void *start, unsigned long size)
81 static inline void kasan_remove_zero_shadow(void *start,
85 static inline void kasan_enable_current(void) {}
86 static inline void kasan_disable_current(void) {}
88 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
90 #ifdef CONFIG_KASAN_HW_TAGS
92 #else /* CONFIG_KASAN_HW_TAGS */
94 #endif /* CONFIG_KASAN_HW_TAGS */
96 static inline bool kasan_has_integrated_init(void)
98 return kasan_hw_tags_enabled();
102 void __kasan_unpoison_range(const void *addr, size_t size);
103 static __always_inline void kasan_unpoison_range(const void *addr, size_t size)
106 __kasan_unpoison_range(addr, size);
109 void __kasan_poison_pages(struct page *page, unsigned int order, bool init);
110 static __always_inline void kasan_poison_pages(struct page *page,
111 unsigned int order, bool init)
114 __kasan_poison_pages(page, order, init);
117 bool __kasan_unpoison_pages(struct page *page, unsigned int order, bool init);
118 static __always_inline bool kasan_unpoison_pages(struct page *page,
119 unsigned int order, bool init)
122 return __kasan_unpoison_pages(page, order, init);
126 void __kasan_poison_slab(struct slab *slab);
127 static __always_inline void kasan_poison_slab(struct slab *slab)
130 __kasan_poison_slab(slab);
133 void __kasan_unpoison_new_object(struct kmem_cache *cache, void *object);
135 * kasan_unpoison_new_object - Temporarily unpoison a new slab object.
136 * @cache: Cache the object belong to.
137 * @object: Pointer to the object.
139 * This function is intended for the slab allocator's internal use. It
140 * temporarily unpoisons an object from a newly allocated slab without doing
141 * anything else. The object must later be repoisoned by
142 * kasan_poison_new_object().
144 static __always_inline void kasan_unpoison_new_object(struct kmem_cache *cache,
148 __kasan_unpoison_new_object(cache, object);
151 void __kasan_poison_new_object(struct kmem_cache *cache, void *object);
153 * kasan_unpoison_new_object - Repoison a new slab object.
154 * @cache: Cache the object belong to.
155 * @object: Pointer to the object.
157 * This function is intended for the slab allocator's internal use. It
158 * repoisons an object that was previously unpoisoned by
159 * kasan_unpoison_new_object() without doing anything else.
161 static __always_inline void kasan_poison_new_object(struct kmem_cache *cache,
165 __kasan_poison_new_object(cache, object);
168 void * __must_check __kasan_init_slab_obj(struct kmem_cache *cache,
170 static __always_inline void * __must_check kasan_init_slab_obj(
171 struct kmem_cache *cache, const void *object)
174 return __kasan_init_slab_obj(cache, object);
175 return (void *)object;
178 bool __kasan_slab_free(struct kmem_cache *s, void *object,
179 unsigned long ip, bool init);
180 static __always_inline bool kasan_slab_free(struct kmem_cache *s,
181 void *object, bool init)
184 return __kasan_slab_free(s, object, _RET_IP_, init);
188 void __kasan_kfree_large(void *ptr, unsigned long ip);
189 static __always_inline void kasan_kfree_large(void *ptr)
192 __kasan_kfree_large(ptr, _RET_IP_);
195 void * __must_check __kasan_slab_alloc(struct kmem_cache *s,
196 void *object, gfp_t flags, bool init);
197 static __always_inline void * __must_check kasan_slab_alloc(
198 struct kmem_cache *s, void *object, gfp_t flags, bool init)
201 return __kasan_slab_alloc(s, object, flags, init);
205 void * __must_check __kasan_kmalloc(struct kmem_cache *s, const void *object,
206 size_t size, gfp_t flags);
207 static __always_inline void * __must_check kasan_kmalloc(struct kmem_cache *s,
208 const void *object, size_t size, gfp_t flags)
211 return __kasan_kmalloc(s, object, size, flags);
212 return (void *)object;
215 void * __must_check __kasan_kmalloc_large(const void *ptr,
216 size_t size, gfp_t flags);
217 static __always_inline void * __must_check kasan_kmalloc_large(const void *ptr,
218 size_t size, gfp_t flags)
221 return __kasan_kmalloc_large(ptr, size, flags);
225 void * __must_check __kasan_krealloc(const void *object,
226 size_t new_size, gfp_t flags);
227 static __always_inline void * __must_check kasan_krealloc(const void *object,
228 size_t new_size, gfp_t flags)
231 return __kasan_krealloc(object, new_size, flags);
232 return (void *)object;
235 bool __kasan_mempool_poison_pages(struct page *page, unsigned int order,
238 * kasan_mempool_poison_pages - Check and poison a mempool page allocation.
239 * @page: Pointer to the page allocation.
240 * @order: Order of the allocation.
242 * This function is intended for kernel subsystems that cache page allocations
243 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
245 * This function is similar to kasan_mempool_poison_object() but operates on
248 * Before the poisoned allocation can be reused, it must be unpoisoned via
249 * kasan_mempool_unpoison_pages().
251 * Return: true if the allocation can be safely reused; false otherwise.
253 static __always_inline bool kasan_mempool_poison_pages(struct page *page,
257 return __kasan_mempool_poison_pages(page, order, _RET_IP_);
261 void __kasan_mempool_unpoison_pages(struct page *page, unsigned int order,
264 * kasan_mempool_unpoison_pages - Unpoison a mempool page allocation.
265 * @page: Pointer to the page allocation.
266 * @order: Order of the allocation.
268 * This function is intended for kernel subsystems that cache page allocations
269 * to reuse them instead of freeing them back to page_alloc (e.g. mempool).
271 * This function unpoisons a page allocation that was previously poisoned by
272 * kasan_mempool_poison_pages() without zeroing the allocation's memory. For
273 * the tag-based modes, this function assigns a new tag to the allocation.
275 static __always_inline void kasan_mempool_unpoison_pages(struct page *page,
279 __kasan_mempool_unpoison_pages(page, order, _RET_IP_);
282 bool __kasan_mempool_poison_object(void *ptr, unsigned long ip);
284 * kasan_mempool_poison_object - Check and poison a mempool slab allocation.
285 * @ptr: Pointer to the slab allocation.
287 * This function is intended for kernel subsystems that cache slab allocations
288 * to reuse them instead of freeing them back to the slab allocator (e.g.
291 * This function poisons a slab allocation and saves a free stack trace for it
292 * without initializing the allocation's memory and without putting it into the
293 * quarantine (for the Generic mode).
295 * This function also performs checks to detect double-free and invalid-free
296 * bugs and reports them. The caller can use the return value of this function
297 * to find out if the allocation is buggy.
299 * Before the poisoned allocation can be reused, it must be unpoisoned via
300 * kasan_mempool_unpoison_object().
302 * This function operates on all slab allocations including large kmalloc
303 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
304 * size > KMALLOC_MAX_SIZE).
306 * Return: true if the allocation can be safely reused; false otherwise.
308 static __always_inline bool kasan_mempool_poison_object(void *ptr)
311 return __kasan_mempool_poison_object(ptr, _RET_IP_);
315 void __kasan_mempool_unpoison_object(void *ptr, size_t size, unsigned long ip);
317 * kasan_mempool_unpoison_object - Unpoison a mempool slab allocation.
318 * @ptr: Pointer to the slab allocation.
319 * @size: Size to be unpoisoned.
321 * This function is intended for kernel subsystems that cache slab allocations
322 * to reuse them instead of freeing them back to the slab allocator (e.g.
325 * This function unpoisons a slab allocation that was previously poisoned via
326 * kasan_mempool_poison_object() and saves an alloc stack trace for it without
327 * initializing the allocation's memory. For the tag-based modes, this function
328 * does not assign a new tag to the allocation and instead restores the
329 * original tags based on the pointer value.
331 * This function operates on all slab allocations including large kmalloc
332 * allocations (the ones returned by kmalloc_large() or by kmalloc() with the
333 * size > KMALLOC_MAX_SIZE).
335 static __always_inline void kasan_mempool_unpoison_object(void *ptr,
339 __kasan_mempool_unpoison_object(ptr, size, _RET_IP_);
343 * Unlike kasan_check_read/write(), kasan_check_byte() is performed even for
344 * the hardware tag-based mode that doesn't rely on compiler instrumentation.
346 bool __kasan_check_byte(const void *addr, unsigned long ip);
347 static __always_inline bool kasan_check_byte(const void *addr)
350 return __kasan_check_byte(addr, _RET_IP_);
354 #else /* CONFIG_KASAN */
356 static inline void kasan_unpoison_range(const void *address, size_t size) {}
357 static inline void kasan_poison_pages(struct page *page, unsigned int order,
359 static inline bool kasan_unpoison_pages(struct page *page, unsigned int order,
364 static inline void kasan_poison_slab(struct slab *slab) {}
365 static inline void kasan_unpoison_new_object(struct kmem_cache *cache,
367 static inline void kasan_poison_new_object(struct kmem_cache *cache,
369 static inline void *kasan_init_slab_obj(struct kmem_cache *cache,
372 return (void *)object;
374 static inline bool kasan_slab_free(struct kmem_cache *s, void *object, bool init)
378 static inline void kasan_kfree_large(void *ptr) {}
379 static inline void *kasan_slab_alloc(struct kmem_cache *s, void *object,
380 gfp_t flags, bool init)
384 static inline void *kasan_kmalloc(struct kmem_cache *s, const void *object,
385 size_t size, gfp_t flags)
387 return (void *)object;
389 static inline void *kasan_kmalloc_large(const void *ptr, size_t size, gfp_t flags)
393 static inline void *kasan_krealloc(const void *object, size_t new_size,
396 return (void *)object;
398 static inline bool kasan_mempool_poison_pages(struct page *page, unsigned int order)
402 static inline void kasan_mempool_unpoison_pages(struct page *page, unsigned int order) {}
403 static inline bool kasan_mempool_poison_object(void *ptr)
407 static inline void kasan_mempool_unpoison_object(void *ptr, size_t size) {}
409 static inline bool kasan_check_byte(const void *address)
414 #endif /* CONFIG_KASAN */
416 #if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
417 void kasan_unpoison_task_stack(struct task_struct *task);
418 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
420 static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
421 static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
424 #ifdef CONFIG_KASAN_GENERIC
427 int alloc_meta_offset;
428 int free_meta_offset;
431 size_t kasan_metadata_size(struct kmem_cache *cache, bool in_object);
432 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
433 slab_flags_t *flags);
435 void kasan_cache_shrink(struct kmem_cache *cache);
436 void kasan_cache_shutdown(struct kmem_cache *cache);
437 void kasan_record_aux_stack(void *ptr);
438 void kasan_record_aux_stack_noalloc(void *ptr);
440 #else /* CONFIG_KASAN_GENERIC */
442 /* Tag-based KASAN modes do not use per-object metadata. */
443 static inline size_t kasan_metadata_size(struct kmem_cache *cache,
448 /* And no cache-related metadata initialization is required. */
449 static inline void kasan_cache_create(struct kmem_cache *cache,
451 slab_flags_t *flags) {}
453 static inline void kasan_cache_shrink(struct kmem_cache *cache) {}
454 static inline void kasan_cache_shutdown(struct kmem_cache *cache) {}
455 static inline void kasan_record_aux_stack(void *ptr) {}
456 static inline void kasan_record_aux_stack_noalloc(void *ptr) {}
458 #endif /* CONFIG_KASAN_GENERIC */
460 #if defined(CONFIG_KASAN_SW_TAGS) || defined(CONFIG_KASAN_HW_TAGS)
462 static inline void *kasan_reset_tag(const void *addr)
464 return (void *)arch_kasan_reset_tag(addr);
468 * kasan_report - print a report about a bad memory access detected by KASAN
469 * @addr: address of the bad access
470 * @size: size of the bad access
471 * @is_write: whether the bad access is a write or a read
472 * @ip: instruction pointer for the accessibility check or the bad access itself
474 bool kasan_report(const void *addr, size_t size,
475 bool is_write, unsigned long ip);
477 #else /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS */
479 static inline void *kasan_reset_tag(const void *addr)
484 #endif /* CONFIG_KASAN_SW_TAGS || CONFIG_KASAN_HW_TAGS*/
486 #ifdef CONFIG_KASAN_HW_TAGS
488 void kasan_report_async(void);
490 #endif /* CONFIG_KASAN_HW_TAGS */
492 #ifdef CONFIG_KASAN_SW_TAGS
493 void __init kasan_init_sw_tags(void);
495 static inline void kasan_init_sw_tags(void) { }
498 #ifdef CONFIG_KASAN_HW_TAGS
499 void kasan_init_hw_tags_cpu(void);
500 void __init kasan_init_hw_tags(void);
502 static inline void kasan_init_hw_tags_cpu(void) { }
503 static inline void kasan_init_hw_tags(void) { }
506 #ifdef CONFIG_KASAN_VMALLOC
508 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
510 void kasan_populate_early_vm_area_shadow(void *start, unsigned long size);
511 int kasan_populate_vmalloc(unsigned long addr, unsigned long size);
512 void kasan_release_vmalloc(unsigned long start, unsigned long end,
513 unsigned long free_region_start,
514 unsigned long free_region_end);
516 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
518 static inline void kasan_populate_early_vm_area_shadow(void *start,
521 static inline int kasan_populate_vmalloc(unsigned long start,
526 static inline void kasan_release_vmalloc(unsigned long start,
528 unsigned long free_region_start,
529 unsigned long free_region_end) { }
531 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
533 void *__kasan_unpoison_vmalloc(const void *start, unsigned long size,
534 kasan_vmalloc_flags_t flags);
535 static __always_inline void *kasan_unpoison_vmalloc(const void *start,
537 kasan_vmalloc_flags_t flags)
540 return __kasan_unpoison_vmalloc(start, size, flags);
541 return (void *)start;
544 void __kasan_poison_vmalloc(const void *start, unsigned long size);
545 static __always_inline void kasan_poison_vmalloc(const void *start,
549 __kasan_poison_vmalloc(start, size);
552 #else /* CONFIG_KASAN_VMALLOC */
554 static inline void kasan_populate_early_vm_area_shadow(void *start,
555 unsigned long size) { }
556 static inline int kasan_populate_vmalloc(unsigned long start,
561 static inline void kasan_release_vmalloc(unsigned long start,
563 unsigned long free_region_start,
564 unsigned long free_region_end) { }
566 static inline void *kasan_unpoison_vmalloc(const void *start,
568 kasan_vmalloc_flags_t flags)
570 return (void *)start;
572 static inline void kasan_poison_vmalloc(const void *start, unsigned long size)
575 #endif /* CONFIG_KASAN_VMALLOC */
577 #if (defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)) && \
578 !defined(CONFIG_KASAN_VMALLOC)
581 * These functions allocate and free shadow memory for kernel modules.
582 * They are only required when KASAN_VMALLOC is not supported, as otherwise
583 * shadow memory is allocated by the generic vmalloc handlers.
585 int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask);
586 void kasan_free_module_shadow(const struct vm_struct *vm);
588 #else /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
590 static inline int kasan_alloc_module_shadow(void *addr, size_t size, gfp_t gfp_mask) { return 0; }
591 static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
593 #endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
595 #if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
596 void kasan_non_canonical_hook(unsigned long addr);
597 #else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
598 static inline void kasan_non_canonical_hook(unsigned long addr) { }
599 #endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
601 #endif /* LINUX_KASAN_H */