memblock: introduce saner 'memblock_free_ptr()' interface
[linux-2.6-block.git] / arch / x86 / mm / kasan_init_64.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
be3606ff 2#define DISABLE_BRANCH_PROFILING
85155229 3#define pr_fmt(fmt) "kasan: " fmt
39b95522 4
ad3fe525
KS
5/* cpu_feature_enabled() cannot be used this early */
6#define USE_EARLY_PGTABLE_L5
39b95522 7
57c8a661 8#include <linux/memblock.h>
ef7f0d6a
AR
9#include <linux/kasan.h>
10#include <linux/kdebug.h>
11#include <linux/mm.h>
12#include <linux/sched.h>
9164bb4a 13#include <linux/sched/task.h>
ef7f0d6a
AR
14#include <linux/vmalloc.h>
15
5520b7e7 16#include <asm/e820/types.h>
2aeb0736 17#include <asm/pgalloc.h>
ef7f0d6a
AR
18#include <asm/tlbflush.h>
19#include <asm/sections.h>
92a0f81d 20#include <asm/cpu_entry_area.h>
ef7f0d6a 21
08b46d5d 22extern struct range pfn_mapped[E820_MAX_ENTRIES];
ef7f0d6a 23
c65e774f 24static p4d_t tmp_p4d_table[MAX_PTRS_PER_P4D] __initdata __aligned(PAGE_SIZE);
12a8cc7f 25
26fb3dae 26static __init void *early_alloc(size_t size, int nid, bool should_panic)
2aeb0736 27{
26fb3dae 28 void *ptr = memblock_alloc_try_nid(size, size,
97ad1087 29 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, nid);
26fb3dae
MR
30
31 if (!ptr && should_panic)
32 panic("%pS: Failed to allocate page, nid=%d from=%lx\n",
33 (void *)_RET_IP_, nid, __pa(MAX_DMA_ADDRESS));
34
35 return ptr;
2aeb0736
AR
36}
37
38static void __init kasan_populate_pmd(pmd_t *pmd, unsigned long addr,
39 unsigned long end, int nid)
40{
41 pte_t *pte;
42
43 if (pmd_none(*pmd)) {
44 void *p;
45
46 if (boot_cpu_has(X86_FEATURE_PSE) &&
47 ((end - addr) == PMD_SIZE) &&
48 IS_ALIGNED(addr, PMD_SIZE)) {
0d39e266 49 p = early_alloc(PMD_SIZE, nid, false);
2aeb0736
AR
50 if (p && pmd_set_huge(pmd, __pa(p), PAGE_KERNEL))
51 return;
77e02cf5 52 memblock_free_ptr(p, PMD_SIZE);
2aeb0736
AR
53 }
54
0d39e266 55 p = early_alloc(PAGE_SIZE, nid, true);
2aeb0736
AR
56 pmd_populate_kernel(&init_mm, pmd, p);
57 }
58
59 pte = pte_offset_kernel(pmd, addr);
60 do {
61 pte_t entry;
62 void *p;
63
64 if (!pte_none(*pte))
65 continue;
66
0d39e266 67 p = early_alloc(PAGE_SIZE, nid, true);
2aeb0736
AR
68 entry = pfn_pte(PFN_DOWN(__pa(p)), PAGE_KERNEL);
69 set_pte_at(&init_mm, addr, pte, entry);
70 } while (pte++, addr += PAGE_SIZE, addr != end);
71}
72
73static void __init kasan_populate_pud(pud_t *pud, unsigned long addr,
74 unsigned long end, int nid)
75{
76 pmd_t *pmd;
77 unsigned long next;
78
79 if (pud_none(*pud)) {
80 void *p;
81
82 if (boot_cpu_has(X86_FEATURE_GBPAGES) &&
83 ((end - addr) == PUD_SIZE) &&
84 IS_ALIGNED(addr, PUD_SIZE)) {
0d39e266 85 p = early_alloc(PUD_SIZE, nid, false);
2aeb0736
AR
86 if (p && pud_set_huge(pud, __pa(p), PAGE_KERNEL))
87 return;
77e02cf5 88 memblock_free_ptr(p, PUD_SIZE);
2aeb0736
AR
89 }
90
0d39e266 91 p = early_alloc(PAGE_SIZE, nid, true);
2aeb0736
AR
92 pud_populate(&init_mm, pud, p);
93 }
94
95 pmd = pmd_offset(pud, addr);
96 do {
97 next = pmd_addr_end(addr, end);
98 if (!pmd_large(*pmd))
99 kasan_populate_pmd(pmd, addr, next, nid);
100 } while (pmd++, addr = next, addr != end);
101}
102
103static void __init kasan_populate_p4d(p4d_t *p4d, unsigned long addr,
104 unsigned long end, int nid)
105{
106 pud_t *pud;
107 unsigned long next;
108
109 if (p4d_none(*p4d)) {
0d39e266 110 void *p = early_alloc(PAGE_SIZE, nid, true);
2aeb0736
AR
111
112 p4d_populate(&init_mm, p4d, p);
113 }
114
115 pud = pud_offset(p4d, addr);
116 do {
117 next = pud_addr_end(addr, end);
118 if (!pud_large(*pud))
119 kasan_populate_pud(pud, addr, next, nid);
120 } while (pud++, addr = next, addr != end);
121}
122
123static void __init kasan_populate_pgd(pgd_t *pgd, unsigned long addr,
124 unsigned long end, int nid)
125{
126 void *p;
127 p4d_t *p4d;
128 unsigned long next;
129
130 if (pgd_none(*pgd)) {
0d39e266 131 p = early_alloc(PAGE_SIZE, nid, true);
2aeb0736
AR
132 pgd_populate(&init_mm, pgd, p);
133 }
134
135 p4d = p4d_offset(pgd, addr);
136 do {
137 next = p4d_addr_end(addr, end);
138 kasan_populate_p4d(p4d, addr, next, nid);
139 } while (p4d++, addr = next, addr != end);
140}
141
142static void __init kasan_populate_shadow(unsigned long addr, unsigned long end,
143 int nid)
144{
145 pgd_t *pgd;
146 unsigned long next;
147
148 addr = addr & PAGE_MASK;
149 end = round_up(end, PAGE_SIZE);
150 pgd = pgd_offset_k(addr);
151 do {
152 next = pgd_addr_end(addr, end);
153 kasan_populate_pgd(pgd, addr, next, nid);
154 } while (pgd++, addr = next, addr != end);
155}
156
157static void __init map_range(struct range *range)
ef7f0d6a
AR
158{
159 unsigned long start;
160 unsigned long end;
161
162 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
163 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
164
2aeb0736 165 kasan_populate_shadow(start, end, early_pfn_to_nid(range->start));
ef7f0d6a
AR
166}
167
168static void __init clear_pgds(unsigned long start,
169 unsigned long end)
170{
d691a3cf 171 pgd_t *pgd;
12a8cc7f
AR
172 /* See comment in kasan_init() */
173 unsigned long pgd_end = end & PGDIR_MASK;
d691a3cf 174
12a8cc7f 175 for (; start < pgd_end; start += PGDIR_SIZE) {
d691a3cf
KS
176 pgd = pgd_offset_k(start);
177 /*
178 * With folded p4d, pgd_clear() is nop, use p4d_clear()
179 * instead.
180 */
ed7588d5 181 if (pgtable_l5_enabled())
d691a3cf 182 pgd_clear(pgd);
91f606a8
KS
183 else
184 p4d_clear(p4d_offset(pgd, start));
d691a3cf 185 }
12a8cc7f
AR
186
187 pgd = pgd_offset_k(start);
188 for (; start < end; start += P4D_SIZE)
189 p4d_clear(p4d_offset(pgd, start));
190}
191
192static inline p4d_t *early_p4d_offset(pgd_t *pgd, unsigned long addr)
193{
194 unsigned long p4d;
195
ed7588d5 196 if (!pgtable_l5_enabled())
12a8cc7f
AR
197 return (p4d_t *)pgd;
198
f3176ec9 199 p4d = pgd_val(*pgd) & PTE_PFN_MASK;
12a8cc7f
AR
200 p4d += __START_KERNEL_map - phys_base;
201 return (p4d_t *)p4d + p4d_index(addr);
202}
203
204static void __init kasan_early_p4d_populate(pgd_t *pgd,
205 unsigned long addr,
206 unsigned long end)
207{
208 pgd_t pgd_entry;
209 p4d_t *p4d, p4d_entry;
210 unsigned long next;
211
212 if (pgd_none(*pgd)) {
9577dd74
AK
213 pgd_entry = __pgd(_KERNPG_TABLE |
214 __pa_nodebug(kasan_early_shadow_p4d));
12a8cc7f
AR
215 set_pgd(pgd, pgd_entry);
216 }
217
218 p4d = early_p4d_offset(pgd, addr);
219 do {
220 next = p4d_addr_end(addr, end);
221
222 if (!p4d_none(*p4d))
223 continue;
224
9577dd74
AK
225 p4d_entry = __p4d(_KERNPG_TABLE |
226 __pa_nodebug(kasan_early_shadow_pud));
12a8cc7f
AR
227 set_p4d(p4d, p4d_entry);
228 } while (p4d++, addr = next, addr != end && p4d_none(*p4d));
ef7f0d6a
AR
229}
230
5d5aa3cf 231static void __init kasan_map_early_shadow(pgd_t *pgd)
ef7f0d6a 232{
12a8cc7f
AR
233 /* See comment in kasan_init() */
234 unsigned long addr = KASAN_SHADOW_START & PGDIR_MASK;
ef7f0d6a 235 unsigned long end = KASAN_SHADOW_END;
12a8cc7f 236 unsigned long next;
ef7f0d6a 237
12a8cc7f
AR
238 pgd += pgd_index(addr);
239 do {
240 next = pgd_addr_end(addr, end);
241 kasan_early_p4d_populate(pgd, addr, next);
242 } while (pgd++, addr = next, addr != end);
ef7f0d6a
AR
243}
244
0609ae01
DA
245static void __init kasan_shallow_populate_p4ds(pgd_t *pgd,
246 unsigned long addr,
247 unsigned long end)
248{
249 p4d_t *p4d;
250 unsigned long next;
251 void *p;
252
253 p4d = p4d_offset(pgd, addr);
254 do {
255 next = p4d_addr_end(addr, end);
256
257 if (p4d_none(*p4d)) {
258 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
259 p4d_populate(&init_mm, p4d, p);
260 }
261 } while (p4d++, addr = next, addr != end);
262}
263
264static void __init kasan_shallow_populate_pgds(void *start, void *end)
265{
266 unsigned long addr, next;
267 pgd_t *pgd;
268 void *p;
269
270 addr = (unsigned long)start;
271 pgd = pgd_offset_k(addr);
272 do {
273 next = pgd_addr_end(addr, (unsigned long)end);
274
275 if (pgd_none(*pgd)) {
276 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE, true);
277 pgd_populate(&init_mm, pgd, p);
278 }
279
280 /*
281 * we need to populate p4ds to be synced when running in
282 * four level mode - see sync_global_pgds_l4()
283 */
284 kasan_shallow_populate_p4ds(pgd, addr, next);
285 } while (pgd++, addr = next, addr != (unsigned long)end);
286}
287
5d5aa3cf
AP
288void __init kasan_early_init(void)
289{
290 int i;
9577dd74
AK
291 pteval_t pte_val = __pa_nodebug(kasan_early_shadow_page) |
292 __PAGE_KERNEL | _PAGE_ENC;
293 pmdval_t pmd_val = __pa_nodebug(kasan_early_shadow_pte) | _KERNPG_TABLE;
294 pudval_t pud_val = __pa_nodebug(kasan_early_shadow_pmd) | _KERNPG_TABLE;
295 p4dval_t p4d_val = __pa_nodebug(kasan_early_shadow_pud) | _KERNPG_TABLE;
5d5aa3cf 296
fb43d6cb
DH
297 /* Mask out unsupported __PAGE_KERNEL bits: */
298 pte_val &= __default_kernel_pte_mask;
299 pmd_val &= __default_kernel_pte_mask;
300 pud_val &= __default_kernel_pte_mask;
301 p4d_val &= __default_kernel_pte_mask;
302
5d5aa3cf 303 for (i = 0; i < PTRS_PER_PTE; i++)
9577dd74 304 kasan_early_shadow_pte[i] = __pte(pte_val);
5d5aa3cf
AP
305
306 for (i = 0; i < PTRS_PER_PMD; i++)
9577dd74 307 kasan_early_shadow_pmd[i] = __pmd(pmd_val);
5d5aa3cf
AP
308
309 for (i = 0; i < PTRS_PER_PUD; i++)
9577dd74 310 kasan_early_shadow_pud[i] = __pud(pud_val);
5d5aa3cf 311
ed7588d5 312 for (i = 0; pgtable_l5_enabled() && i < PTRS_PER_P4D; i++)
9577dd74 313 kasan_early_shadow_p4d[i] = __p4d(p4d_val);
5480bb61 314
65ade2f8
KS
315 kasan_map_early_shadow(early_top_pgt);
316 kasan_map_early_shadow(init_top_pgt);
5d5aa3cf
AP
317}
318
ef7f0d6a
AR
319void __init kasan_init(void)
320{
321 int i;
21506525 322 void *shadow_cpu_entry_begin, *shadow_cpu_entry_end;
ef7f0d6a 323
65ade2f8 324 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
12a8cc7f
AR
325
326 /*
327 * We use the same shadow offset for 4- and 5-level paging to
328 * facilitate boot-time switching between paging modes.
329 * As result in 5-level paging mode KASAN_SHADOW_START and
330 * KASAN_SHADOW_END are not aligned to PGD boundary.
331 *
332 * KASAN_SHADOW_START doesn't share PGD with anything else.
333 * We claim whole PGD entry to make things easier.
334 *
335 * KASAN_SHADOW_END lands in the last PGD entry and it collides with
336 * bunch of things like kernel code, modules, EFI mapping, etc.
337 * We need to take extra steps to not overwrite them.
338 */
ed7588d5 339 if (pgtable_l5_enabled()) {
12a8cc7f
AR
340 void *ptr;
341
342 ptr = (void *)pgd_page_vaddr(*pgd_offset_k(KASAN_SHADOW_END));
343 memcpy(tmp_p4d_table, (void *)ptr, sizeof(tmp_p4d_table));
344 set_pgd(&early_top_pgt[pgd_index(KASAN_SHADOW_END)],
345 __pgd(__pa(tmp_p4d_table) | _KERNPG_TABLE));
346 }
347
65ade2f8 348 load_cr3(early_top_pgt);
241d2c54 349 __flush_tlb_all();
ef7f0d6a 350
12a8cc7f 351 clear_pgds(KASAN_SHADOW_START & PGDIR_MASK, KASAN_SHADOW_END);
ef7f0d6a 352
9577dd74 353 kasan_populate_early_shadow((void *)(KASAN_SHADOW_START & PGDIR_MASK),
ef7f0d6a
AR
354 kasan_mem_to_shadow((void *)PAGE_OFFSET));
355
08b46d5d 356 for (i = 0; i < E820_MAX_ENTRIES; i++) {
ef7f0d6a
AR
357 if (pfn_mapped[i].end == 0)
358 break;
359
2aeb0736 360 map_range(&pfn_mapped[i]);
ef7f0d6a 361 }
2aeb0736 362
92a0f81d 363 shadow_cpu_entry_begin = (void *)CPU_ENTRY_AREA_BASE;
21506525 364 shadow_cpu_entry_begin = kasan_mem_to_shadow(shadow_cpu_entry_begin);
9577dd74
AK
365 shadow_cpu_entry_begin = (void *)round_down(
366 (unsigned long)shadow_cpu_entry_begin, PAGE_SIZE);
21506525 367
92a0f81d
TG
368 shadow_cpu_entry_end = (void *)(CPU_ENTRY_AREA_BASE +
369 CPU_ENTRY_AREA_MAP_SIZE);
21506525 370 shadow_cpu_entry_end = kasan_mem_to_shadow(shadow_cpu_entry_end);
9577dd74
AK
371 shadow_cpu_entry_end = (void *)round_up(
372 (unsigned long)shadow_cpu_entry_end, PAGE_SIZE);
21506525 373
9577dd74 374 kasan_populate_early_shadow(
92a0f81d 375 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
0609ae01
DA
376 kasan_mem_to_shadow((void *)VMALLOC_START));
377
378 /*
379 * If we're in full vmalloc mode, don't back vmalloc space with early
380 * shadow pages. Instead, prepopulate pgds/p4ds so they are synced to
381 * the global table and we can populate the lower levels on demand.
382 */
383 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
384 kasan_shallow_populate_pgds(
385 kasan_mem_to_shadow((void *)VMALLOC_START),
386 kasan_mem_to_shadow((void *)VMALLOC_END));
387 else
388 kasan_populate_early_shadow(
389 kasan_mem_to_shadow((void *)VMALLOC_START),
390 kasan_mem_to_shadow((void *)VMALLOC_END));
391
392 kasan_populate_early_shadow(
393 kasan_mem_to_shadow((void *)VMALLOC_END + 1),
92a0f81d 394 shadow_cpu_entry_begin);
21506525
AL
395
396 kasan_populate_shadow((unsigned long)shadow_cpu_entry_begin,
397 (unsigned long)shadow_cpu_entry_end, 0);
398
9577dd74
AK
399 kasan_populate_early_shadow(shadow_cpu_entry_end,
400 kasan_mem_to_shadow((void *)__START_KERNEL_map));
92a0f81d
TG
401
402 kasan_populate_shadow((unsigned long)kasan_mem_to_shadow(_stext),
403 (unsigned long)kasan_mem_to_shadow(_end),
404 early_pfn_to_nid(__pa(_stext)));
405
9577dd74
AK
406 kasan_populate_early_shadow(kasan_mem_to_shadow((void *)MODULES_END),
407 (void *)KASAN_SHADOW_END);
ef7f0d6a 408
65ade2f8 409 load_cr3(init_top_pgt);
241d2c54 410 __flush_tlb_all();
85155229 411
69e0210f 412 /*
9577dd74
AK
413 * kasan_early_shadow_page has been used as early shadow memory, thus
414 * it may contain some garbage. Now we can clear and write protect it,
415 * since after the TLB flush no one should write to it.
69e0210f 416 */
9577dd74 417 memset(kasan_early_shadow_page, 0, PAGE_SIZE);
063fb3e5 418 for (i = 0; i < PTRS_PER_PTE; i++) {
fb43d6cb
DH
419 pte_t pte;
420 pgprot_t prot;
421
422 prot = __pgprot(__PAGE_KERNEL_RO | _PAGE_ENC);
423 pgprot_val(prot) &= __default_kernel_pte_mask;
424
9577dd74
AK
425 pte = __pte(__pa(kasan_early_shadow_page) | pgprot_val(prot));
426 set_pte(&kasan_early_shadow_pte[i], pte);
063fb3e5
AR
427 }
428 /* Flush TLBs again to be sure that write protection applied. */
429 __flush_tlb_all();
69e0210f
AR
430
431 init_task.kasan_depth = 0;
25add7ec 432 pr_info("KernelAddressSanitizer initialized\n");
ef7f0d6a 433}