Merge tag 'ext4_for_linus_stable' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / powerpc / mm / kasan / kasan_init_32.c
1 // SPDX-License-Identifier: GPL-2.0
2
3 #define DISABLE_BRANCH_PROFILING
4
5 #include <linux/kasan.h>
6 #include <linux/printk.h>
7 #include <linux/memblock.h>
8 #include <linux/sched/task.h>
9 #include <linux/vmalloc.h>
10 #include <asm/pgalloc.h>
11 #include <asm/code-patching.h>
12 #include <mm/mmu_decl.h>
13
14 static void kasan_populate_pte(pte_t *ptep, pgprot_t prot)
15 {
16         unsigned long va = (unsigned long)kasan_early_shadow_page;
17         phys_addr_t pa = __pa(kasan_early_shadow_page);
18         int i;
19
20         for (i = 0; i < PTRS_PER_PTE; i++, ptep++)
21                 __set_pte_at(&init_mm, va, ptep, pfn_pte(PHYS_PFN(pa), prot), 0);
22 }
23
24 static int kasan_init_shadow_page_tables(unsigned long k_start, unsigned long k_end)
25 {
26         pmd_t *pmd;
27         unsigned long k_cur, k_next;
28
29         pmd = pmd_offset(pud_offset(pgd_offset_k(k_start), k_start), k_start);
30
31         for (k_cur = k_start; k_cur != k_end; k_cur = k_next, pmd++) {
32                 pte_t *new;
33
34                 k_next = pgd_addr_end(k_cur, k_end);
35                 if ((void *)pmd_page_vaddr(*pmd) != kasan_early_shadow_pte)
36                         continue;
37
38                 new = pte_alloc_one_kernel(&init_mm);
39
40                 if (!new)
41                         return -ENOMEM;
42                 if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
43                         kasan_populate_pte(new, PAGE_READONLY);
44                 else
45                         kasan_populate_pte(new, PAGE_KERNEL_RO);
46                 pmd_populate_kernel(&init_mm, pmd, new);
47         }
48         return 0;
49 }
50
51 static void __ref *kasan_get_one_page(void)
52 {
53         if (slab_is_available())
54                 return (void *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
55
56         return memblock_alloc(PAGE_SIZE, PAGE_SIZE);
57 }
58
59 static int __ref kasan_init_region(void *start, size_t size)
60 {
61         unsigned long k_start = (unsigned long)kasan_mem_to_shadow(start);
62         unsigned long k_end = (unsigned long)kasan_mem_to_shadow(start + size);
63         unsigned long k_cur;
64         int ret;
65         void *block = NULL;
66
67         ret = kasan_init_shadow_page_tables(k_start, k_end);
68         if (ret)
69                 return ret;
70
71         if (!slab_is_available())
72                 block = memblock_alloc(k_end - k_start, PAGE_SIZE);
73
74         for (k_cur = k_start; k_cur < k_end; k_cur += PAGE_SIZE) {
75                 pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(k_cur), k_cur), k_cur);
76                 void *va = block ? block + k_cur - k_start : kasan_get_one_page();
77                 pte_t pte = pfn_pte(PHYS_PFN(__pa(va)), PAGE_KERNEL);
78
79                 if (!va)
80                         return -ENOMEM;
81
82                 __set_pte_at(&init_mm, k_cur, pte_offset_kernel(pmd, k_cur), pte, 0);
83         }
84         flush_tlb_kernel_range(k_start, k_end);
85         return 0;
86 }
87
88 static void __init kasan_remap_early_shadow_ro(void)
89 {
90         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
91                 kasan_populate_pte(kasan_early_shadow_pte, PAGE_READONLY);
92         else
93                 kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL_RO);
94
95         flush_tlb_kernel_range(KASAN_SHADOW_START, KASAN_SHADOW_END);
96 }
97
98 void __init kasan_mmu_init(void)
99 {
100         int ret;
101         struct memblock_region *reg;
102
103         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE)) {
104                 ret = kasan_init_shadow_page_tables(KASAN_SHADOW_START, KASAN_SHADOW_END);
105
106                 if (ret)
107                         panic("kasan: kasan_init_shadow_page_tables() failed");
108         }
109
110         for_each_memblock(memory, reg) {
111                 phys_addr_t base = reg->base;
112                 phys_addr_t top = min(base + reg->size, total_lowmem);
113
114                 if (base >= top)
115                         continue;
116
117                 ret = kasan_init_region(__va(base), top - base);
118                 if (ret)
119                         panic("kasan: kasan_init_region() failed");
120         }
121 }
122
123 void __init kasan_init(void)
124 {
125         kasan_remap_early_shadow_ro();
126
127         clear_page(kasan_early_shadow_page);
128
129         /* At this point kasan is fully initialized. Enable error messages */
130         init_task.kasan_depth = 0;
131         pr_info("KASAN init done\n");
132 }
133
134 #ifdef CONFIG_MODULES
135 void *module_alloc(unsigned long size)
136 {
137         void *base = vmalloc_exec(size);
138
139         if (!base)
140                 return NULL;
141
142         if (!kasan_init_region(base, size))
143                 return base;
144
145         vfree(base);
146
147         return NULL;
148 }
149 #endif
150
151 #ifdef CONFIG_PPC_BOOK3S_32
152 u8 __initdata early_hash[256 << 10] __aligned(256 << 10) = {0};
153
154 static void __init kasan_early_hash_table(void)
155 {
156         modify_instruction_site(&patch__hash_page_A0, 0xffff, __pa(early_hash) >> 16);
157         modify_instruction_site(&patch__flush_hash_A0, 0xffff, __pa(early_hash) >> 16);
158
159         Hash = (struct hash_pte *)early_hash;
160 }
161 #else
162 static void __init kasan_early_hash_table(void) {}
163 #endif
164
165 void __init kasan_early_init(void)
166 {
167         unsigned long addr = KASAN_SHADOW_START;
168         unsigned long end = KASAN_SHADOW_END;
169         unsigned long next;
170         pmd_t *pmd = pmd_offset(pud_offset(pgd_offset_k(addr), addr), addr);
171
172         BUILD_BUG_ON(KASAN_SHADOW_START & ~PGDIR_MASK);
173
174         kasan_populate_pte(kasan_early_shadow_pte, PAGE_KERNEL);
175
176         do {
177                 next = pgd_addr_end(addr, end);
178                 pmd_populate_kernel(&init_mm, pmd, kasan_early_shadow_pte);
179         } while (pmd++, addr = next, addr != end);
180
181         if (early_mmu_has_feature(MMU_FTR_HPTE_TABLE))
182                 kasan_early_hash_table();
183 }