powerpc/kasan: Fix shadow area set up for modules.
[linux-2.6-block.git] / arch / powerpc / mm / kasan / kasan_init_32.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #define DISABLE_BRANCH_PROFILING
4
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/moduleloader.h>
9 #include <linux/sched/task.h>
10 #include <linux/vmalloc.h>
11 #include <asm/pgalloc.h>
12 #include <asm/code-patching.h>
13 #include <mm/mmu_decl.h>
14
15 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
16 {
17         unsigned long va = (unsigned long)kasan_early_shadow_page;
18         phys_addr_t pa = __pa(kasan_early_shadow_page);
19         int i;
20
21         for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
22                 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
23 }
24
25 static int __ref kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
26 {
27         pmd_t *pmd;
28         unsigned long k_cur, k_next;
29
30         pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
31
32         for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
33                 pte_t *new;
34
35                 k_next = pgd_addr_end(k_cur, k_end);
36                 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
37                         continue;
38
39                 if (slab_is_available())
40                         new = pte_alloc_one_kernel(&init_mm);
41                 else
42                         new = memblock_alloc(PTE_FRAG_SIZE, PTE_FRAG_SIZE);
43
44                 if (!new)
45                         return -ENOMEM;
46                 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
47                         kasan_populate_pte(new, PAGE_READONLY);
48                 else
49                         kasan_populate_pte(new, PAGE_KERNEL_RO);
50
51                 smp_wmb(); /* See comment in __pte_alloc */
52
53                 spin_lock(&init_mm.page_table_lock);
54                         /* Has another populated it ? */
55                 if (likely((void *)pmd_page_vaddr(*pmd) == kasan_early_shadow_pte)) {
56                         pmd_populate_kernel(&init_mm, pmd, new);
57                         new = NULL;
58                 }
59                 spin_unlock(&init_mm.page_table_lock);
60
61                 if (new && slab_is_available())
62                         pte_free_kernel(&init_mm, new);
63         }
64         return 0;
65 }
66
67 static void __ref *kasan_get_one_page(void)
68 {
69         if (slab_is_available())
70                 return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
71
72         return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
73 }
74
75 static int __ref kasan_init_region(void *start, size_t size)
76 {
77         unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
78         unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
79         unsigned long k_cur;
80         int ret;
81         void *block = NULL;
82
83         ret = kasan_init_shadow_page_tables(k_start, k_end);
84         if (ret)
85                 return ret;
86
87         if (!slab_is_available())
88                 block = memblock_alloc(k_end - k_start, PAGE_SIZE);
89
90         for (k_cur = k_start & PAGE_MASK; k_cur < k_end; k_cur += PAGE_SIZE) {
91                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
92                 void *va = block ? block + k_cur - k_start : kasan_get_one_page();
93                 pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
94
95                 if (!va)
96                         return -ENOMEM;
97
98                 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
99         }
100         flush_tlb_kernel_range(k_start, k_end);
101         return 0;
102 }
103
104 static void __init kasan_remap_early_shadow_ro(void)
105 {
106         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
107                 kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
108         else
109                 kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
110
111         flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
112 }
113
114 void __init kasan_mmu_init(void)
115 {
116         int ret;
117         struct memblock_region *reg;
118
119         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
120                 ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
121
122                 if (ret)
123                         panic("kasan: kasan_init_shadow_page_tables() failed");
124         }
125
126         for_each_memblock(memory, reg) {
127                 phys_addr_t base = reg->base;
128                 phys_addr_t top = min(base + reg->size, total_lowmem);
129
130                 if (base >= top)
131                         continue;
132
133                 ret = kasan_init_region(__va(base), top - base);
134                 if (ret)
135                         panic("kasan: kasan_init_region() failed");
136         }
137 }
138
139 void __init kasan_init(void)
140 {
141         kasan_remap_early_shadow_ro();
142
143         clear_page(kasan_early_shadow_page);
144
145         /* At this point kasan is fully initialized. Enable error messages */
146         init_task.kasan_depth = 0;
147         pr_info("KASAN init done\n");
148 }
149
150 #ifdef CONFIG_MODULES
151 void *module_alloc(unsigned long size)
152 {
153         void *base;
154
155         base = __vmalloc_node_range(size, MODULE_ALIGN, VMALLOC_START, VMALLOC_END,
156                                     GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
157                                     NUMA_NO_NODE, __builtin_return_address(0));
158
159         if (!base)
160                 return NULL;
161
162         if (!kasan_init_region(base, size))
163                 return base;
164
165         vfree(base);
166
167         return NULL;
168 }
169 #endif
170
171 #ifdef CONFIG_PPC_BOOK3S_32
172 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
173
174 static void __init kasan_early_hash_table(void)
175 {
176         modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
177         modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
178
179         Hash = (struct hash_pte *)early_hash;
180 }
181 #else
182 static void __init kasan_early_hash_table(void) {}
183 #endif
184
185 void __init kasan_early_init(void)
186 {
187         unsigned long addr = KASAN_SHADOW_START;
188         unsigned long end = KASAN_SHADOW_END;
189         unsigned long next;
190         pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
191
192         BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
193
194         kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
195
196         do {
197                 next = pgd_addr_end(addr, end);
198                 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
199         } while (pmd++, addr = next, addr != end);
200
201         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
202                 kasan_early_hash_table();
203 }