arm64: add support for folded p4d page tables
[linux-2.6-block.git] / arch / arm64 / mm / kasan_init.c
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * This file contains kasan initialization code for ARM64.
4  *
5  * Copyright (c) 2015 Samsung Electronics Co., Ltd.
6  * Author: Andrey Ryabinin <ryabinin.a.a@gmail.com>
7  */
8
9 #define pr_fmt(fmt) "kasan: " fmt
10 #include <linux/kasan.h>
11 #include <linux/kernel.h>
12 #include <linux/sched/task.h>
13 #include <linux/memblock.h>
14 #include <linux/start_kernel.h>
15 #include <linux/mm.h>
16
17 #include <asm/mmu_context.h>
18 #include <asm/kernel-pgtable.h>
19 #include <asm/page.h>
20 #include <asm/pgalloc.h>
21 #include <asm/pgtable.h>
22 #include <asm/sections.h>
23 #include <asm/tlbflush.h>
24
25 static pgd_t tmp_pg_dir[PTRS_PER_PGD] __initdata __aligned(PGD_SIZE);
26
27 /*
28  * The p*d_populate functions call virt_to_phys implicitly so they can't be used
29  * directly on kernel symbols (bm_p*d). All the early functions are called too
30  * early to use lm_alias so __p*d_populate functions must be used to populate
31  * with the physical address from __pa_symbol.
32  */
33
34 static phys_addr_t __init kasan_alloc_zeroed_page(int node)
35 {
36         void *p = memblock_alloc_try_nid(PAGE_SIZE, PAGE_SIZE,
37                                               __pa(MAX_DMA_ADDRESS),
38                                               MEMBLOCK_ALLOC_KASAN, node);
39         if (!p)
40                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
41                       __func__, PAGE_SIZE, PAGE_SIZE, node,
42                       __pa(MAX_DMA_ADDRESS));
43
44         return __pa(p);
45 }
46
47 static phys_addr_t __init kasan_alloc_raw_page(int node)
48 {
49         void *p = memblock_alloc_try_nid_raw(PAGE_SIZE, PAGE_SIZE,
50                                                 __pa(MAX_DMA_ADDRESS),
51                                                 MEMBLOCK_ALLOC_KASAN, node);
52         if (!p)
53                 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%llx\n",
54                       __func__, PAGE_SIZE, PAGE_SIZE, node,
55                       __pa(MAX_DMA_ADDRESS));
56
57         return __pa(p);
58 }
59
60 static pte_t *__init kasan_pte_offset(pmd_t *pmdp, unsigned long addr, int node,
61                                       bool early)
62 {
63         if (pmd_none(READ_ONCE(*pmdp))) {
64                 phys_addr_t pte_phys = early ?
65                                 __pa_symbol(kasan_early_shadow_pte)
66                                         : kasan_alloc_zeroed_page(node);
67                 __pmd_populate(pmdp, pte_phys, PMD_TYPE_TABLE);
68         }
69
70         return early ? pte_offset_kimg(pmdp, addr)
71                      : pte_offset_kernel(pmdp, addr);
72 }
73
74 static pmd_t *__init kasan_pmd_offset(pud_t *pudp, unsigned long addr, int node,
75                                       bool early)
76 {
77         if (pud_none(READ_ONCE(*pudp))) {
78                 phys_addr_t pmd_phys = early ?
79                                 __pa_symbol(kasan_early_shadow_pmd)
80                                         : kasan_alloc_zeroed_page(node);
81                 __pud_populate(pudp, pmd_phys, PMD_TYPE_TABLE);
82         }
83
84         return early ? pmd_offset_kimg(pudp, addr) : pmd_offset(pudp, addr);
85 }
86
87 static pud_t *__init kasan_pud_offset(p4d_t *p4dp, unsigned long addr, int node,
88                                       bool early)
89 {
90         if (p4d_none(READ_ONCE(*p4dp))) {
91                 phys_addr_t pud_phys = early ?
92                                 __pa_symbol(kasan_early_shadow_pud)
93                                         : kasan_alloc_zeroed_page(node);
94                 __p4d_populate(p4dp, pud_phys, PMD_TYPE_TABLE);
95         }
96
97         return early ? pud_offset_kimg(p4dp, addr) : pud_offset(p4dp, addr);
98 }
99
100 static void __init kasan_pte_populate(pmd_t *pmdp, unsigned long addr,
101                                       unsigned long end, int node, bool early)
102 {
103         unsigned long next;
104         pte_t *ptep = kasan_pte_offset(pmdp, addr, node, early);
105
106         do {
107                 phys_addr_t page_phys = early ?
108                                 __pa_symbol(kasan_early_shadow_page)
109                                         : kasan_alloc_raw_page(node);
110                 if (!early)
111                         memset(__va(page_phys), KASAN_SHADOW_INIT, PAGE_SIZE);
112                 next = addr + PAGE_SIZE;
113                 set_pte(ptep, pfn_pte(__phys_to_pfn(page_phys), PAGE_KERNEL));
114         } while (ptep++, addr = next, addr != end && pte_none(READ_ONCE(*ptep)));
115 }
116
117 static void __init kasan_pmd_populate(pud_t *pudp, unsigned long addr,
118                                       unsigned long end, int node, bool early)
119 {
120         unsigned long next;
121         pmd_t *pmdp = kasan_pmd_offset(pudp, addr, node, early);
122
123         do {
124                 next = pmd_addr_end(addr, end);
125                 kasan_pte_populate(pmdp, addr, next, node, early);
126         } while (pmdp++, addr = next, addr != end && pmd_none(READ_ONCE(*pmdp)));
127 }
128
129 static void __init kasan_pud_populate(p4d_t *p4dp, unsigned long addr,
130                                       unsigned long end, int node, bool early)
131 {
132         unsigned long next;
133         pud_t *pudp = kasan_pud_offset(p4dp, addr, node, early);
134
135         do {
136                 next = pud_addr_end(addr, end);
137                 kasan_pmd_populate(pudp, addr, next, node, early);
138         } while (pudp++, addr = next, addr != end && pud_none(READ_ONCE(*pudp)));
139 }
140
141 static void __init kasan_p4d_populate(pgd_t *pgdp, unsigned long addr,
142                                       unsigned long end, int node, bool early)
143 {
144         unsigned long next;
145         p4d_t *p4dp = p4d_offset(pgdp, addr);
146
147         do {
148                 next = p4d_addr_end(addr, end);
149                 kasan_pud_populate(p4dp, addr, next, node, early);
150         } while (p4dp++, addr = next, addr != end);
151 }
152
153 static void __init kasan_pgd_populate(unsigned long addr, unsigned long end,
154                                       int node, bool early)
155 {
156         unsigned long next;
157         pgd_t *pgdp;
158
159         pgdp = pgd_offset_k(addr);
160         do {
161                 next = pgd_addr_end(addr, end);
162                 kasan_p4d_populate(pgdp, addr, next, node, early);
163         } while (pgdp++, addr = next, addr != end);
164 }
165
166 /* The early shadow maps everything to a single page of zeroes */
167 asmlinkage void __init kasan_early_init(void)
168 {
169         BUILD_BUG_ON(KASAN_SHADOW_OFFSET !=
170                 KASAN_SHADOW_END - (1UL << (64 - KASAN_SHADOW_SCALE_SHIFT)));
171         BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS), PGDIR_SIZE));
172         BUILD_BUG_ON(!IS_ALIGNED(_KASAN_SHADOW_START(VA_BITS_MIN), PGDIR_SIZE));
173         BUILD_BUG_ON(!IS_ALIGNED(KASAN_SHADOW_END, PGDIR_SIZE));
174         kasan_pgd_populate(KASAN_SHADOW_START, KASAN_SHADOW_END, NUMA_NO_NODE,
175                            true);
176 }
177
178 /* Set up full kasan mappings, ensuring that the mapped pages are zeroed */
179 static void __init kasan_map_populate(unsigned long start, unsigned long end,
180                                       int node)
181 {
182         kasan_pgd_populate(start & PAGE_MASK, PAGE_ALIGN(end), node, false);
183 }
184
185 /*
186  * Copy the current shadow region into a new pgdir.
187  */
188 void __init kasan_copy_shadow(pgd_t *pgdir)
189 {
190         pgd_t *pgdp, *pgdp_new, *pgdp_end;
191
192         pgdp = pgd_offset_k(KASAN_SHADOW_START);
193         pgdp_end = pgd_offset_k(KASAN_SHADOW_END);
194         pgdp_new = pgd_offset_raw(pgdir, KASAN_SHADOW_START);
195         do {
196                 set_pgd(pgdp_new, READ_ONCE(*pgdp));
197         } while (pgdp++, pgdp_new++, pgdp != pgdp_end);
198 }
199
200 static void __init clear_pgds(unsigned long start,
201                         unsigned long end)
202 {
203         /*
204          * Remove references to kasan page tables from
205          * swapper_pg_dir. pgd_clear() can't be used
206          * here because it's nop on 2,3-level pagetable setups
207          */
208         for (; start < end; start += PGDIR_SIZE)
209                 set_pgd(pgd_offset_k(start), __pgd(0));
210 }
211
212 void __init kasan_init(void)
213 {
214         u64 kimg_shadow_start, kimg_shadow_end;
215         u64 mod_shadow_start, mod_shadow_end;
216         struct memblock_region *reg;
217         int i;
218
219         kimg_shadow_start = (u64)kasan_mem_to_shadow(_text) & PAGE_MASK;
220         kimg_shadow_end = PAGE_ALIGN((u64)kasan_mem_to_shadow(_end));
221
222         mod_shadow_start = (u64)kasan_mem_to_shadow((void *)MODULES_VADDR);
223         mod_shadow_end = (u64)kasan_mem_to_shadow((void *)MODULES_END);
224
225         /*
226          * We are going to perform proper setup of shadow memory.
227          * At first we should unmap early shadow (clear_pgds() call below).
228          * However, instrumented code couldn't execute without shadow memory.
229          * tmp_pg_dir used to keep early shadow mapped until full shadow
230          * setup will be finished.
231          */
232         memcpy(tmp_pg_dir, swapper_pg_dir, sizeof(tmp_pg_dir));
233         dsb(ishst);
234         cpu_replace_ttbr1(lm_alias(tmp_pg_dir));
235
236         clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
237
238         kasan_map_populate(kimg_shadow_start, kimg_shadow_end,
239                            early_pfn_to_nid(virt_to_pfn(lm_alias(_text))));
240
241         kasan_populate_early_shadow(kasan_mem_to_shadow((void *)PAGE_END),
242                                    (void *)mod_shadow_start);
243         kasan_populate_early_shadow((void *)kimg_shadow_end,
244                                    (void *)KASAN_SHADOW_END);
245
246         if (kimg_shadow_start > mod_shadow_end)
247                 kasan_populate_early_shadow((void *)mod_shadow_end,
248                                             (void *)kimg_shadow_start);
249
250         for_each_memblock(memory, reg) {
251                 void *start = (void *)__phys_to_virt(reg->base);
252                 void *end = (void *)__phys_to_virt(reg->base + reg->size);
253
254                 if (start >= end)
255                         break;
256
257                 kasan_map_populate((unsigned long)kasan_mem_to_shadow(start),
258                                    (unsigned long)kasan_mem_to_shadow(end),
259                                    early_pfn_to_nid(virt_to_pfn(start)));
260         }
261
262         /*
263          * KAsan may reuse the contents of kasan_early_shadow_pte directly,
264          * so we should make sure that it maps the zero page read-only.
265          */
266         for (i = 0; i < PTRS_PER_PTE; i++)
267                 set_pte(&kasan_early_shadow_pte[i],
268                         pfn_pte(sym_to_pfn(kasan_early_shadow_page),
269                                 PAGE_KERNEL_RO));
270
271         memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
272         cpu_replace_ttbr1(lm_alias(swapper_pg_dir));
273
274         /* At this point kasan is fully initialized. Enable error messages */
275         init_task.kasan_depth = 0;
276         pr_info("KernelAddressSanitizer initialized\n");
277 }