1 // SPDX-License-Identifier: GPL-2.0
3 #define DISABLE_BRANCH_PROFILING
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/moduleloader.h>
9 #include <linux/sched/task.h>
10 #include <linux/vmalloc.h>
11 #include <asm/pgalloc.h>
12 #include <asm/code-patching.h>
13 #include <mm/mmu_decl.h>
15 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
17 unsigned long va = (unsigned long)kasan_early_shadow_page;
18 phys_addr_t pa = __pa(kasan_early_shadow_page);
21 for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
22 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
25 static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
28 unsigned long k_cur, k_next;
30 pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
32 for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
35 k_next = pgd_addr_end(k_cur, k_end);
36 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
39 if (slab_is_available())
40 new = pte_alloc_one_kernel(&init_mm);
42 new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
46 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
47 kasan_populate_pte(new, PAGE_READONLY);
49 kasan_populate_pte(new, PAGE_KERNEL_RO);
51 smp_wmb(); /* See comment in __pte_alloc */
53 spin_lock(&init_mm.page_table_lock);
54 /* Has another populated it ? */
55 if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
56 pmd_populate_kernel(&init_mm, pmd, new);
59 spin_unlock(&init_mm.page_table_lock);
61 if (new && slab_is_available())
62 pte_free_kernel(&init_mm, new);
67 static void __ref *kasan_get_one_page(void)
69 if (slab_is_available())
70 return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
72 return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
75 static int __ref kasan_init_region(void *start, size_t size)
77 unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
78 unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
83 ret = kasan_init_shadow_page_tables(k_start, k_end);
87 if (!slab_is_available())
88 block = memblock_alloc(k_end - k_start, PAGE_SIZE);
90 for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
91 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
92 void *va = block ? block + k_cur - k_start : kasan_get_one_page();
93 pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
98 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
100 flush_tlb_kernel_range(k_start, k_end);
104 static void __init kasan_remap_early_shadow_ro(void)
106 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
107 kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
109 kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
111 flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
114 void __init kasan_mmu_init(void)
117 struct memblock_region *reg;
119 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
120 ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
123 panic("kasan: kasan_init_shadow_page_tables() failed");
126 for_each_memblock(memory, reg) {
127 phys_addr_t base = reg->base;
128 phys_addr_t top = min(base + reg->size, total_lowmem);
133 ret = kasan_init_region(__va(base), top - base);
135 panic("kasan: kasan_init_region() failed");
139 void __init kasan_init(void)
141 kasan_remap_early_shadow_ro();
143 clear_page(kasan_early_shadow_page);
145 /* At this point kasan is fully initialized. Enable error messages */
146 init_task.kasan_depth = 0;
147 pr_info("KASAN init done\n");
150 #ifdef CONFIG_MODULES
151 void *module_alloc(unsigned long size)
155 base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
156 GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
157 NUMA_NO_NODE, __builtin_return_address(0));
162 if (!kasan_init_region(base, size))
171 #ifdef CONFIG_PPC_BOOK3S_32
172 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
174 static void __init kasan_early_hash_table(void)
176 modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
177 modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
179 Hash = (struct hash_pte *)early_hash;
182 static void __init kasan_early_hash_table(void) {}
185 void __init kasan_early_init(void)
187 unsigned long addr = KASAN_SHADOW_START;
188 unsigned long end = KASAN_SHADOW_END;
190 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
192 BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
194 kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
197 next = pgd_addr_end(addr, end);
198 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
199 } while (pmd++, addr = next, addr != end);
201 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
202 kasan_early_hash_table();