1 // SPDX-License-Identifier: GPL-2.0
3 * This file contains common generic and tag-based KASAN code.
5 * Copyright (c) 2014 Samsung Electronics Co., Ltd.
6 * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
8 * Some code borrowed from https://github.com/xairy/kasan-prototype by
9 * Andrey Konovalov <andreyknvl@gmail.com>
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
17 #include <linux/export.h>
18 #include <linux/interrupt.h>
19 #include <linux/init.h>
20 #include <linux/kasan.h>
21 #include <linux/kernel.h>
22 #include <linux/kmemleak.h>
23 #include <linux/linkage.h>
24 #include <linux/memblock.h>
25 #include <linux/memory.h>
27 #include <linux/module.h>
28 #include <linux/printk.h>
29 #include <linux/sched.h>
30 #include <linux/sched/task_stack.h>
31 #include <linux/slab.h>
32 #include <linux/stacktrace.h>
33 #include <linux/string.h>
34 #include <linux/types.h>
35 #include <linux/vmalloc.h>
36 #include <linux/bug.h>
37 #include <linux/uaccess.h>
39 #include <asm/cacheflush.h>
40 #include <asm/tlbflush.h>
45 static inline int in_irqentry_text(unsigned long ptr)
47 return (ptr >= (unsigned long)&__irqentry_text_start &&
48 ptr < (unsigned long)&__irqentry_text_end) ||
49 (ptr >= (unsigned long)&__softirqentry_text_start &&
50 ptr < (unsigned long)&__softirqentry_text_end);
53 static inline unsigned int filter_irq_stacks(unsigned long *entries,
54 unsigned int nr_entries)
58 for (i = 0; i < nr_entries; i++) {
59 if (in_irqentry_text(entries[i])) {
60 /* Include the irqentry function into the stack. */
67 static inline depot_stack_handle_t save_stack(gfp_t flags)
69 unsigned long entries[KASAN_STACK_DEPTH];
70 unsigned int nr_entries;
72 nr_entries = stack_trace_save(entries, ARRAY_SIZE(entries), 0);
73 nr_entries = filter_irq_stacks(entries, nr_entries);
74 return stack_depot_save(entries, nr_entries, flags);
77 static inline void set_track(struct kasan_track *track, gfp_t flags)
79 track->pid = current->pid;
80 track->stack = save_stack(flags);
83 void kasan_enable_current(void)
85 current->kasan_depth++;
88 void kasan_disable_current(void)
90 current->kasan_depth--;
93 bool __kasan_check_read(const volatile void *p, unsigned int size)
95 return check_memory_region((unsigned long)p, size, false, _RET_IP_);
97 EXPORT_SYMBOL(__kasan_check_read);
99 bool __kasan_check_write(const volatile void *p, unsigned int size)
101 return check_memory_region((unsigned long)p, size, true, _RET_IP_);
103 EXPORT_SYMBOL(__kasan_check_write);
106 void *memset(void *addr, int c, size_t len)
108 if (!check_memory_region((unsigned long)addr, len, true, _RET_IP_))
111 return __memset(addr, c, len);
114 #ifdef __HAVE_ARCH_MEMMOVE
116 void *memmove(void *dest, const void *src, size_t len)
118 if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
119 !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
122 return __memmove(dest, src, len);
127 void *memcpy(void *dest, const void *src, size_t len)
129 if (!check_memory_region((unsigned long)src, len, false, _RET_IP_) ||
130 !check_memory_region((unsigned long)dest, len, true, _RET_IP_))
133 return __memcpy(dest, src, len);
137 * Poisons the shadow memory for 'size' bytes starting from 'addr'.
138 * Memory addresses should be aligned to KASAN_SHADOW_SCALE_SIZE.
140 void kasan_poison_shadow(const void *address, size_t size, u8 value)
142 void *shadow_start, *shadow_end;
145 * Perform shadow offset calculation based on untagged address, as
146 * some of the callers (e.g. kasan_poison_object_data) pass tagged
147 * addresses to this function.
149 address = reset_tag(address);
151 shadow_start = kasan_mem_to_shadow(address);
152 shadow_end = kasan_mem_to_shadow(address + size);
154 __memset(shadow_start, value, shadow_end - shadow_start);
157 void kasan_unpoison_shadow(const void *address, size_t size)
159 u8 tag = get_tag(address);
162 * Perform shadow offset calculation based on untagged address, as
163 * some of the callers (e.g. kasan_unpoison_object_data) pass tagged
164 * addresses to this function.
166 address = reset_tag(address);
168 kasan_poison_shadow(address, size, tag);
170 if (size & KASAN_SHADOW_MASK) {
171 u8 *shadow = (u8 *)kasan_mem_to_shadow(address + size);
173 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
176 *shadow = size & KASAN_SHADOW_MASK;
180 static void __kasan_unpoison_stack(struct task_struct *task, const void *sp)
182 void *base = task_stack_page(task);
183 size_t size = sp - base;
185 kasan_unpoison_shadow(base, size);
188 /* Unpoison the entire stack for a task. */
189 void kasan_unpoison_task_stack(struct task_struct *task)
191 __kasan_unpoison_stack(task, task_stack_page(task) + THREAD_SIZE);
194 /* Unpoison the stack for the current task beyond a watermark sp value. */
195 asmlinkage void kasan_unpoison_task_stack_below(const void *watermark)
198 * Calculate the task stack base address. Avoid using 'current'
199 * because this function is called by early resume code which hasn't
200 * yet set up the percpu register (%gs).
202 void *base = (void *)((unsigned long)watermark & ~(THREAD_SIZE - 1));
204 kasan_unpoison_shadow(base, watermark - base);
208 * Clear all poison for the region between the current SP and a provided
209 * watermark value, as is sometimes required prior to hand-crafted asm function
210 * returns in the middle of functions.
212 void kasan_unpoison_stack_above_sp_to(const void *watermark)
214 const void *sp = __builtin_frame_address(0);
215 size_t size = watermark - sp;
217 if (WARN_ON(sp > watermark))
219 kasan_unpoison_shadow(sp, size);
222 void kasan_alloc_pages(struct page *page, unsigned int order)
227 if (unlikely(PageHighMem(page)))
231 for (i = 0; i < (1 << order); i++)
232 page_kasan_tag_set(page + i, tag);
233 kasan_unpoison_shadow(page_address(page), PAGE_SIZE << order);
236 void kasan_free_pages(struct page *page, unsigned int order)
238 if (likely(!PageHighMem(page)))
239 kasan_poison_shadow(page_address(page),
245 * Adaptive redzone policy taken from the userspace AddressSanitizer runtime.
246 * For larger allocations larger redzones are used.
248 static inline unsigned int optimal_redzone(unsigned int object_size)
250 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
254 object_size <= 64 - 16 ? 16 :
255 object_size <= 128 - 32 ? 32 :
256 object_size <= 512 - 64 ? 64 :
257 object_size <= 4096 - 128 ? 128 :
258 object_size <= (1 << 14) - 256 ? 256 :
259 object_size <= (1 << 15) - 512 ? 512 :
260 object_size <= (1 << 16) - 1024 ? 1024 : 2048;
263 void kasan_cache_create(struct kmem_cache *cache, unsigned int *size,
266 unsigned int orig_size = *size;
267 unsigned int redzone_size;
270 /* Add alloc meta. */
271 cache->kasan_info.alloc_meta_offset = *size;
272 *size += sizeof(struct kasan_alloc_meta);
275 if (IS_ENABLED(CONFIG_KASAN_GENERIC) &&
276 (cache->flags & SLAB_TYPESAFE_BY_RCU || cache->ctor ||
277 cache->object_size < sizeof(struct kasan_free_meta))) {
278 cache->kasan_info.free_meta_offset = *size;
279 *size += sizeof(struct kasan_free_meta);
282 redzone_size = optimal_redzone(cache->object_size);
283 redzone_adjust = redzone_size - (*size - cache->object_size);
284 if (redzone_adjust > 0)
285 *size += redzone_adjust;
287 *size = min_t(unsigned int, KMALLOC_MAX_SIZE,
288 max(*size, cache->object_size + redzone_size));
291 * If the metadata doesn't fit, don't enable KASAN at all.
293 if (*size <= cache->kasan_info.alloc_meta_offset ||
294 *size <= cache->kasan_info.free_meta_offset) {
295 cache->kasan_info.alloc_meta_offset = 0;
296 cache->kasan_info.free_meta_offset = 0;
301 *flags |= SLAB_KASAN;
304 size_t kasan_metadata_size(struct kmem_cache *cache)
306 return (cache->kasan_info.alloc_meta_offset ?
307 sizeof(struct kasan_alloc_meta) : 0) +
308 (cache->kasan_info.free_meta_offset ?
309 sizeof(struct kasan_free_meta) : 0);
312 struct kasan_alloc_meta *get_alloc_info(struct kmem_cache *cache,
315 return (void *)object + cache->kasan_info.alloc_meta_offset;
318 struct kasan_free_meta *get_free_info(struct kmem_cache *cache,
321 BUILD_BUG_ON(sizeof(struct kasan_free_meta) > 32);
322 return (void *)object + cache->kasan_info.free_meta_offset;
326 static void kasan_set_free_info(struct kmem_cache *cache,
327 void *object, u8 tag)
329 struct kasan_alloc_meta *alloc_meta;
332 alloc_meta = get_alloc_info(cache, object);
334 #ifdef CONFIG_KASAN_SW_TAGS_IDENTIFY
335 idx = alloc_meta->free_track_idx;
336 alloc_meta->free_pointer_tag[idx] = tag;
337 alloc_meta->free_track_idx = (idx + 1) % KASAN_NR_FREE_STACKS;
340 set_track(&alloc_meta->free_track[idx], GFP_NOWAIT);
343 void kasan_poison_slab(struct page *page)
347 for (i = 0; i < compound_nr(page); i++)
348 page_kasan_tag_reset(page + i);
349 kasan_poison_shadow(page_address(page), page_size(page),
350 KASAN_KMALLOC_REDZONE);
353 void kasan_unpoison_object_data(struct kmem_cache *cache, void *object)
355 kasan_unpoison_shadow(object, cache->object_size);
358 void kasan_poison_object_data(struct kmem_cache *cache, void *object)
360 kasan_poison_shadow(object,
361 round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE),
362 KASAN_KMALLOC_REDZONE);
366 * This function assigns a tag to an object considering the following:
367 * 1. A cache might have a constructor, which might save a pointer to a slab
368 * object somewhere (e.g. in the object itself). We preassign a tag for
369 * each object in caches with constructors during slab creation and reuse
370 * the same tag each time a particular object is allocated.
371 * 2. A cache might be SLAB_TYPESAFE_BY_RCU, which means objects can be
372 * accessed after being freed. We preassign tags for objects in these
374 * 3. For SLAB allocator we can't preassign tags randomly since the freelist
375 * is stored as an array of indexes instead of a linked list. Assign tags
376 * based on objects indexes, so that objects that are next to each other
377 * get different tags.
379 static u8 assign_tag(struct kmem_cache *cache, const void *object,
380 bool init, bool keep_tag)
383 * 1. When an object is kmalloc()'ed, two hooks are called:
384 * kasan_slab_alloc() and kasan_kmalloc(). We assign the
385 * tag only in the first one.
386 * 2. We reuse the same tag for krealloc'ed objects.
389 return get_tag(object);
392 * If the cache neither has a constructor nor has SLAB_TYPESAFE_BY_RCU
393 * set, assign a tag when the object is being allocated (init == false).
395 if (!cache->ctor && !(cache->flags & SLAB_TYPESAFE_BY_RCU))
396 return init ? KASAN_TAG_KERNEL : random_tag();
398 /* For caches that either have a constructor or SLAB_TYPESAFE_BY_RCU: */
400 /* For SLAB assign tags based on the object index in the freelist. */
401 return (u8)obj_to_index(cache, virt_to_page(object), (void *)object);
404 * For SLUB assign a random tag during slab creation, otherwise reuse
405 * the already assigned tag.
407 return init ? random_tag() : get_tag(object);
411 void * __must_check kasan_init_slab_obj(struct kmem_cache *cache,
414 struct kasan_alloc_meta *alloc_info;
416 if (!(cache->flags & SLAB_KASAN))
417 return (void *)object;
419 alloc_info = get_alloc_info(cache, object);
420 __memset(alloc_info, 0, sizeof(*alloc_info));
422 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
423 object = set_tag(object,
424 assign_tag(cache, object, true, false));
426 return (void *)object;
429 static inline bool shadow_invalid(u8 tag, s8 shadow_byte)
431 if (IS_ENABLED(CONFIG_KASAN_GENERIC))
432 return shadow_byte < 0 ||
433 shadow_byte >= KASAN_SHADOW_SCALE_SIZE;
435 /* else CONFIG_KASAN_SW_TAGS: */
436 if ((u8)shadow_byte == KASAN_TAG_INVALID)
438 if ((tag != KASAN_TAG_KERNEL) && (tag != (u8)shadow_byte))
444 static bool __kasan_slab_free(struct kmem_cache *cache, void *object,
445 unsigned long ip, bool quarantine)
450 unsigned long rounded_up_size;
452 tag = get_tag(object);
453 tagged_object = object;
454 object = reset_tag(object);
456 if (unlikely(nearest_obj(cache, virt_to_head_page(object), object) !=
458 kasan_report_invalid_free(tagged_object, ip);
462 /* RCU slabs could be legally used after free within the RCU period */
463 if (unlikely(cache->flags & SLAB_TYPESAFE_BY_RCU))
466 shadow_byte = READ_ONCE(*(s8 *)kasan_mem_to_shadow(object));
467 if (shadow_invalid(tag, shadow_byte)) {
468 kasan_report_invalid_free(tagged_object, ip);
472 rounded_up_size = round_up(cache->object_size, KASAN_SHADOW_SCALE_SIZE);
473 kasan_poison_shadow(object, rounded_up_size, KASAN_KMALLOC_FREE);
475 if ((IS_ENABLED(CONFIG_KASAN_GENERIC) && !quarantine) ||
476 unlikely(!(cache->flags & SLAB_KASAN)))
479 kasan_set_free_info(cache, object, tag);
481 quarantine_put(get_free_info(cache, object), cache);
483 return IS_ENABLED(CONFIG_KASAN_GENERIC);
486 bool kasan_slab_free(struct kmem_cache *cache, void *object, unsigned long ip)
488 return __kasan_slab_free(cache, object, ip, true);
491 static void *__kasan_kmalloc(struct kmem_cache *cache, const void *object,
492 size_t size, gfp_t flags, bool keep_tag)
494 unsigned long redzone_start;
495 unsigned long redzone_end;
498 if (gfpflags_allow_blocking(flags))
501 if (unlikely(object == NULL))
504 redzone_start = round_up((unsigned long)(object + size),
505 KASAN_SHADOW_SCALE_SIZE);
506 redzone_end = round_up((unsigned long)object + cache->object_size,
507 KASAN_SHADOW_SCALE_SIZE);
509 if (IS_ENABLED(CONFIG_KASAN_SW_TAGS))
510 tag = assign_tag(cache, object, false, keep_tag);
512 /* Tag is ignored in set_tag without CONFIG_KASAN_SW_TAGS */
513 kasan_unpoison_shadow(set_tag(object, tag), size);
514 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
515 KASAN_KMALLOC_REDZONE);
517 if (cache->flags & SLAB_KASAN)
518 set_track(&get_alloc_info(cache, object)->alloc_track, flags);
520 return set_tag(object, tag);
523 void * __must_check kasan_slab_alloc(struct kmem_cache *cache, void *object,
526 return __kasan_kmalloc(cache, object, cache->object_size, flags, false);
529 void * __must_check kasan_kmalloc(struct kmem_cache *cache, const void *object,
530 size_t size, gfp_t flags)
532 return __kasan_kmalloc(cache, object, size, flags, true);
534 EXPORT_SYMBOL(kasan_kmalloc);
536 void * __must_check kasan_kmalloc_large(const void *ptr, size_t size,
540 unsigned long redzone_start;
541 unsigned long redzone_end;
543 if (gfpflags_allow_blocking(flags))
546 if (unlikely(ptr == NULL))
549 page = virt_to_page(ptr);
550 redzone_start = round_up((unsigned long)(ptr + size),
551 KASAN_SHADOW_SCALE_SIZE);
552 redzone_end = (unsigned long)ptr + page_size(page);
554 kasan_unpoison_shadow(ptr, size);
555 kasan_poison_shadow((void *)redzone_start, redzone_end - redzone_start,
561 void * __must_check kasan_krealloc(const void *object, size_t size, gfp_t flags)
565 if (unlikely(object == ZERO_SIZE_PTR))
566 return (void *)object;
568 page = virt_to_head_page(object);
570 if (unlikely(!PageSlab(page)))
571 return kasan_kmalloc_large(object, size, flags);
573 return __kasan_kmalloc(page->slab_cache, object, size,
577 void kasan_poison_kfree(void *ptr, unsigned long ip)
581 page = virt_to_head_page(ptr);
583 if (unlikely(!PageSlab(page))) {
584 if (ptr != page_address(page)) {
585 kasan_report_invalid_free(ptr, ip);
588 kasan_poison_shadow(ptr, page_size(page), KASAN_FREE_PAGE);
590 __kasan_slab_free(page->slab_cache, ptr, ip, false);
594 void kasan_kfree_large(void *ptr, unsigned long ip)
596 if (ptr != page_address(virt_to_head_page(ptr)))
597 kasan_report_invalid_free(ptr, ip);
598 /* The object will be poisoned by page_alloc. */
601 #ifndef CONFIG_KASAN_VMALLOC
602 int kasan_module_alloc(void *addr, size_t size)
607 unsigned long shadow_start;
609 shadow_start = (unsigned long)kasan_mem_to_shadow(addr);
610 scaled_size = (size + KASAN_SHADOW_MASK) >> KASAN_SHADOW_SCALE_SHIFT;
611 shadow_size = round_up(scaled_size, PAGE_SIZE);
613 if (WARN_ON(!PAGE_ALIGNED(shadow_start)))
616 ret = __vmalloc_node_range(shadow_size, 1, shadow_start,
617 shadow_start + shadow_size,
619 PAGE_KERNEL, VM_NO_GUARD, NUMA_NO_NODE,
620 __builtin_return_address(0));
623 __memset(ret, KASAN_SHADOW_INIT, shadow_size);
624 find_vm_area(addr)->flags |= VM_KASAN;
625 kmemleak_ignore(ret);
632 void kasan_free_shadow(const struct vm_struct *vm)
634 if (vm->flags & VM_KASAN)
635 vfree(kasan_mem_to_shadow(vm->addr));
639 extern void __kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip);
640 extern bool report_enabled(void);
642 bool kasan_report(unsigned long addr, size_t size, bool is_write, unsigned long ip)
644 unsigned long flags = user_access_save();
647 if (likely(report_enabled())) {
648 __kasan_report(addr, size, is_write, ip);
652 user_access_restore(flags);
657 #ifdef CONFIG_MEMORY_HOTPLUG
658 static bool shadow_mapped(unsigned long addr)
660 pgd_t *pgd = pgd_offset_k(addr);
668 p4d = p4d_offset(pgd, addr);
671 pud = pud_offset(p4d, addr);
676 * We can't use pud_large() or pud_huge(), the first one is
677 * arch-specific, the last one depends on HUGETLB_PAGE. So let's abuse
678 * pud_bad(), if pud is bad then it's bad because it's huge.
682 pmd = pmd_offset(pud, addr);
688 pte = pte_offset_kernel(pmd, addr);
689 return !pte_none(*pte);
692 static int __meminit kasan_mem_notifier(struct notifier_block *nb,
693 unsigned long action, void *data)
695 struct memory_notify *mem_data = data;
696 unsigned long nr_shadow_pages, start_kaddr, shadow_start;
697 unsigned long shadow_end, shadow_size;
699 nr_shadow_pages = mem_data->nr_pages >> KASAN_SHADOW_SCALE_SHIFT;
700 start_kaddr = (unsigned long)pfn_to_kaddr(mem_data->start_pfn);
701 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)start_kaddr);
702 shadow_size = nr_shadow_pages << PAGE_SHIFT;
703 shadow_end = shadow_start + shadow_size;
705 if (WARN_ON(mem_data->nr_pages % KASAN_SHADOW_SCALE_SIZE) ||
706 WARN_ON(start_kaddr % (KASAN_SHADOW_SCALE_SIZE << PAGE_SHIFT)))
710 case MEM_GOING_ONLINE: {
714 * If shadow is mapped already than it must have been mapped
715 * during the boot. This could happen if we onlining previously
718 if (shadow_mapped(shadow_start))
721 ret = __vmalloc_node_range(shadow_size, PAGE_SIZE, shadow_start,
722 shadow_end, GFP_KERNEL,
723 PAGE_KERNEL, VM_NO_GUARD,
724 pfn_to_nid(mem_data->start_pfn),
725 __builtin_return_address(0));
729 kmemleak_ignore(ret);
732 case MEM_CANCEL_ONLINE:
734 struct vm_struct *vm;
737 * shadow_start was either mapped during boot by kasan_init()
738 * or during memory online by __vmalloc_node_range().
739 * In the latter case we can use vfree() to free shadow.
740 * Non-NULL result of the find_vm_area() will tell us if
741 * that was the second case.
743 * Currently it's not possible to free shadow mapped
744 * during boot by kasan_init(). It's because the code
745 * to do that hasn't been written yet. So we'll just
748 vm = find_vm_area((void *)shadow_start);
750 vfree((void *)shadow_start);
757 static int __init kasan_memhotplug_init(void)
759 hotplug_memory_notifier(kasan_mem_notifier, 0);
764 core_initcall(kasan_memhotplug_init);
767 #ifdef CONFIG_KASAN_VMALLOC
768 static int kasan_populate_vmalloc_pte(pte_t *ptep, unsigned long addr,
774 if (likely(!pte_none(*ptep)))
777 page = __get_free_page(GFP_KERNEL);
781 memset((void *)page, KASAN_VMALLOC_INVALID, PAGE_SIZE);
782 pte = pfn_pte(PFN_DOWN(__pa(page)), PAGE_KERNEL);
784 spin_lock(&init_mm.page_table_lock);
785 if (likely(pte_none(*ptep))) {
786 set_pte_at(&init_mm, addr, ptep, pte);
789 spin_unlock(&init_mm.page_table_lock);
795 int kasan_populate_vmalloc(unsigned long addr, unsigned long size)
797 unsigned long shadow_start, shadow_end;
800 if (!is_vmalloc_or_module_addr((void *)addr))
803 shadow_start = (unsigned long)kasan_mem_to_shadow((void *)addr);
804 shadow_start = ALIGN_DOWN(shadow_start, PAGE_SIZE);
805 shadow_end = (unsigned long)kasan_mem_to_shadow((void *)addr + size);
806 shadow_end = ALIGN(shadow_end, PAGE_SIZE);
808 ret = apply_to_page_range(&init_mm, shadow_start,
809 shadow_end - shadow_start,
810 kasan_populate_vmalloc_pte, NULL);
814 flush_cache_vmap(shadow_start, shadow_end);
817 * We need to be careful about inter-cpu effects here. Consider:
820 * WRITE_ONCE(p, vmalloc(100)); while (x = READ_ONCE(p)) ;
823 * With compiler instrumentation, that ends up looking like this:
826 * // vmalloc() allocates memory
827 * // let a = area->addr
828 * // we reach kasan_populate_vmalloc
829 * // and call kasan_unpoison_shadow:
830 * STORE shadow(a), unpoison_val
832 * STORE shadow(a+99), unpoison_val x = LOAD p
833 * // rest of vmalloc process <data dependency>
834 * STORE p, a LOAD shadow(x+99)
836 * If there is no barrier between the end of unpoisioning the shadow
837 * and the store of the result to p, the stores could be committed
838 * in a different order by CPU#0, and CPU#1 could erroneously observe
839 * poison in the shadow.
841 * We need some sort of barrier between the stores.
843 * In the vmalloc() case, this is provided by a smp_wmb() in
844 * clear_vm_uninitialized_flag(). In the per-cpu allocator and in
845 * get_vm_area() and friends, the caller gets shadow allocated but
846 * doesn't have any pages mapped into the virtual address space that
847 * has been reserved. Mapping those pages in will involve taking and
848 * releasing a page-table lock, which will provide the barrier.
855 * Poison the shadow for a vmalloc region. Called as part of the
856 * freeing process at the time the region is freed.
858 void kasan_poison_vmalloc(const void *start, unsigned long size)
860 if (!is_vmalloc_or_module_addr(start))
863 size = round_up(size, KASAN_SHADOW_SCALE_SIZE);
864 kasan_poison_shadow(start, size, KASAN_VMALLOC_INVALID);
867 void kasan_unpoison_vmalloc(const void *start, unsigned long size)
869 if (!is_vmalloc_or_module_addr(start))
872 kasan_unpoison_shadow(start, size);
875 static int kasan_depopulate_vmalloc_pte(pte_t *ptep, unsigned long addr,
880 page = (unsigned long)__va(pte_pfn(*ptep) << PAGE_SHIFT);
882 spin_lock(&init_mm.page_table_lock);
884 if (likely(!pte_none(*ptep))) {
885 pte_clear(&init_mm, addr, ptep);
888 spin_unlock(&init_mm.page_table_lock);
894 * Release the backing for the vmalloc region [start, end), which
895 * lies within the free region [free_region_start, free_region_end).
897 * This can be run lazily, long after the region was freed. It runs
898 * under vmap_area_lock, so it's not safe to interact with the vmalloc/vmap
901 * How does this work?
902 * -------------------
904 * We have a region that is page aligned, labelled as A.
905 * That might not map onto the shadow in a way that is page-aligned:
909 * |????????|????????|AAAAAAAA|AA....AA|AAAAAAAA|????????| < vmalloc
910 * -------- -------- -------- -------- --------
913 * \-------\|/------/ |/---------------/
915 * |??AAAAAA|AAAAAAAA|AA??????| < shadow
918 * First we align the start upwards and the end downwards, so that the
919 * shadow of the region aligns with shadow page boundaries. In the
920 * example, this gives us the shadow page (2). This is the shadow entirely
921 * covered by this allocation.
923 * Then we have the tricky bits. We want to know if we can free the
924 * partially covered shadow pages - (1) and (3) in the example. For this,
925 * we are given the start and end of the free region that contains this
926 * allocation. Extending our previous example, we could have:
928 * free_region_start free_region_end
931 * |FFFFFFFF|FFFFFFFF|AAAAAAAA|AA....AA|AAAAAAAA|FFFFFFFF| < vmalloc
932 * -------- -------- -------- -------- --------
935 * \-------\|/------/ |/---------------/
937 * |FFAAAAAA|AAAAAAAA|AAF?????| < shadow
940 * Once again, we align the start of the free region up, and the end of
941 * the free region down so that the shadow is page aligned. So we can free
942 * page (1) - we know no allocation currently uses anything in that page,
943 * because all of it is in the vmalloc free region. But we cannot free
944 * page (3), because we can't be sure that the rest of it is unused.
946 * We only consider pages that contain part of the original region for
947 * freeing: we don't try to free other pages from the free region or we'd
948 * end up trying to free huge chunks of virtual address space.
953 * How do we know that we're not freeing a page that is simultaneously
954 * being used for a fresh allocation in kasan_populate_vmalloc(_pte)?
956 * We _can_ have kasan_release_vmalloc and kasan_populate_vmalloc running
957 * at the same time. While we run under free_vmap_area_lock, the population
960 * free_vmap_area_lock instead operates to ensure that the larger range
961 * [free_region_start, free_region_end) is safe: because __alloc_vmap_area and
962 * the per-cpu region-finding algorithm both run under free_vmap_area_lock,
963 * no space identified as free will become used while we are running. This
964 * means that so long as we are careful with alignment and only free shadow
965 * pages entirely covered by the free region, we will not run in to any
966 * trouble - any simultaneous allocations will be for disjoint regions.
968 void kasan_release_vmalloc(unsigned long start, unsigned long end,
969 unsigned long free_region_start,
970 unsigned long free_region_end)
972 void *shadow_start, *shadow_end;
973 unsigned long region_start, region_end;
976 region_start = ALIGN(start, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
977 region_end = ALIGN_DOWN(end, PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
979 free_region_start = ALIGN(free_region_start,
980 PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
982 if (start != region_start &&
983 free_region_start < region_start)
984 region_start -= PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
986 free_region_end = ALIGN_DOWN(free_region_end,
987 PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE);
989 if (end != region_end &&
990 free_region_end > region_end)
991 region_end += PAGE_SIZE * KASAN_SHADOW_SCALE_SIZE;
993 shadow_start = kasan_mem_to_shadow((void *)region_start);
994 shadow_end = kasan_mem_to_shadow((void *)region_end);
996 if (shadow_end > shadow_start) {
997 size = shadow_end - shadow_start;
998 apply_to_existing_page_range(&init_mm,
999 (unsigned long)shadow_start,
1000 size, kasan_depopulate_vmalloc_pte,
1002 flush_tlb_kernel_range((unsigned long)shadow_start,
1003 (unsigned long)shadow_end);