Commit | Line | Data |
---|---|---|
41b7a347 DA |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * KASAN for 64-bit Book3S powerpc | |
4 | * | |
5 | * Copyright 2019-2022, Daniel Axtens, IBM Corporation. | |
6 | */ | |
7 | ||
8 | /* | |
9 | * ppc64 turns on virtual memory late in boot, after calling into generic code | |
10 | * like the device-tree parser, so it uses this in conjunction with a hook in | |
11 | * outline mode to avoid invalid access early in boot. | |
12 | */ | |
13 | ||
14 | #define DISABLE_BRANCH_PROFILING | |
15 | ||
16 | #include <linux/kasan.h> | |
17 | #include <linux/printk.h> | |
18 | #include <linux/sched/task.h> | |
19 | #include <linux/memblock.h> | |
20 | #include <asm/pgalloc.h> | |
21 | ||
22 | DEFINE_STATIC_KEY_FALSE(powerpc_kasan_enabled_key); | |
23 | ||
24 | static void __init kasan_init_phys_region(void *start, void *end) | |
25 | { | |
26 | unsigned long k_start, k_end, k_cur; | |
27 | void *va; | |
28 | ||
29 | if (start >= end) | |
30 | return; | |
31 | ||
32 | k_start = ALIGN_DOWN((unsigned long)kasan_mem_to_shadow(start), PAGE_SIZE); | |
33 | k_end = ALIGN((unsigned long)kasan_mem_to_shadow(end), PAGE_SIZE); | |
34 | ||
35 | va = memblock_alloc(k_end - k_start, PAGE_SIZE); | |
36 | for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE, va += PAGE_SIZE) | |
37 | map_kernel_page(k_cur, __pa(va), PAGE_KERNEL); | |
38 | } | |
39 | ||
40 | void __init kasan_init(void) | |
41 | { | |
42 | /* | |
43 | * We want to do the following things: | |
44 | * 1) Map real memory into the shadow for all physical memblocks | |
45 | * This takes us from c000... to c008... | |
46 | * 2) Leave a hole over the shadow of vmalloc space. KASAN_VMALLOC | |
47 | * will manage this for us. | |
48 | * This takes us from c008... to c00a... | |
49 | * 3) Map the 'early shadow'/zero page over iomap and vmemmap space. | |
50 | * This takes us up to where we start at c00e... | |
51 | */ | |
52 | ||
53 | void *k_start = kasan_mem_to_shadow((void *)RADIX_VMALLOC_END); | |
54 | void *k_end = kasan_mem_to_shadow((void *)RADIX_VMEMMAP_END); | |
55 | phys_addr_t start, end; | |
56 | u64 i; | |
57 | pte_t zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL); | |
58 | ||
59 | if (!early_radix_enabled()) { | |
60 | pr_warn("KASAN not enabled as it requires radix!"); | |
61 | return; | |
62 | } | |
63 | ||
64 | for_each_mem_range(i, &start, &end) | |
295454ed | 65 | kasan_init_phys_region(phys_to_virt(start), phys_to_virt(end)); |
41b7a347 DA |
66 | |
67 | for (i = 0; i < PTRS_PER_PTE; i++) | |
68 | __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, | |
69 | &kasan_early_shadow_pte[i], zero_pte, 0); | |
70 | ||
71 | for (i = 0; i < PTRS_PER_PMD; i++) | |
72 | pmd_populate_kernel(&init_mm, &kasan_early_shadow_pmd[i], | |
73 | kasan_early_shadow_pte); | |
74 | ||
75 | for (i = 0; i < PTRS_PER_PUD; i++) | |
76 | pud_populate(&init_mm, &kasan_early_shadow_pud[i], | |
77 | kasan_early_shadow_pmd); | |
78 | ||
79 | /* map the early shadow over the iomap and vmemmap space */ | |
80 | kasan_populate_early_shadow(k_start, k_end); | |
81 | ||
82 | /* mark early shadow region as RO and wipe it */ | |
83 | zero_pte = pfn_pte(virt_to_pfn(kasan_early_shadow_page), PAGE_KERNEL_RO); | |
84 | for (i = 0; i < PTRS_PER_PTE; i++) | |
85 | __set_pte_at(&init_mm, (unsigned long)kasan_early_shadow_page, | |
86 | &kasan_early_shadow_pte[i], zero_pte, 0); | |
87 | ||
88 | /* | |
89 | * clear_page relies on some cache info that hasn't been set up yet. | |
90 | * It ends up looping ~forever and blows up other data. | |
91 | * Use memset instead. | |
92 | */ | |
93 | memset(kasan_early_shadow_page, 0, PAGE_SIZE); | |
94 | ||
95 | static_branch_inc(&powerpc_kasan_enabled_key); | |
96 | ||
97 | /* Enable error messages */ | |
98 | init_task.kasan_depth = 0; | |
99 | pr_info("KASAN init done\n"); | |
100 | } | |
101 | ||
c7b9ed7c CL |
102 | void __init kasan_early_init(void) { } |
103 | ||
41b7a347 | 104 | void __init kasan_late_init(void) { } |