Commit | Line | Data |
---|---|---|
c633544a MF |
1 | /* |
2 | * Xtensa KASAN shadow map initialization | |
3 | * | |
4 | * This file is subject to the terms and conditions of the GNU General Public | |
5 | * License. See the file "COPYING" in the main directory of this archive | |
6 | * for more details. | |
7 | * | |
8 | * Copyright (C) 2017 Cadence Design Systems Inc. | |
9 | */ | |
10 | ||
11 | #include <linux/bootmem.h> | |
12 | #include <linux/init_task.h> | |
13 | #include <linux/kasan.h> | |
14 | #include <linux/kernel.h> | |
15 | #include <linux/memblock.h> | |
16 | #include <asm/initialize_mmu.h> | |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/traps.h> | |
19 | ||
20 | void __init kasan_early_init(void) | |
21 | { | |
22 | unsigned long vaddr = KASAN_SHADOW_START; | |
23 | pgd_t *pgd = pgd_offset_k(vaddr); | |
24 | pmd_t *pmd = pmd_offset(pgd, vaddr); | |
25 | int i; | |
26 | ||
27 | for (i = 0; i < PTRS_PER_PTE; ++i) | |
28 | set_pte(kasan_zero_pte + i, | |
29 | mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL)); | |
30 | ||
31 | for (vaddr = 0; vaddr < KASAN_SHADOW_SIZE; vaddr += PMD_SIZE, ++pmd) { | |
32 | BUG_ON(!pmd_none(*pmd)); | |
33 | set_pmd(pmd, __pmd((unsigned long)kasan_zero_pte)); | |
34 | } | |
35 | early_trap_init(); | |
36 | } | |
37 | ||
38 | static void __init populate(void *start, void *end) | |
39 | { | |
40 | unsigned long n_pages = (end - start) / PAGE_SIZE; | |
41 | unsigned long n_pmds = n_pages / PTRS_PER_PTE; | |
42 | unsigned long i, j; | |
43 | unsigned long vaddr = (unsigned long)start; | |
44 | pgd_t *pgd = pgd_offset_k(vaddr); | |
45 | pmd_t *pmd = pmd_offset(pgd, vaddr); | |
eb31d559 | 46 | pte_t *pte = memblock_alloc(n_pages * sizeof(pte_t), PAGE_SIZE); |
c633544a MF |
47 | |
48 | pr_debug("%s: %p - %p\n", __func__, start, end); | |
49 | ||
50 | for (i = j = 0; i < n_pmds; ++i) { | |
51 | int k; | |
52 | ||
53 | for (k = 0; k < PTRS_PER_PTE; ++k, ++j) { | |
54 | phys_addr_t phys = | |
55 | memblock_alloc_base(PAGE_SIZE, PAGE_SIZE, | |
56 | MEMBLOCK_ALLOC_ANYWHERE); | |
57 | ||
58 | set_pte(pte + j, pfn_pte(PHYS_PFN(phys), PAGE_KERNEL)); | |
59 | } | |
60 | } | |
61 | ||
62 | for (i = 0; i < n_pmds ; ++i, pte += PTRS_PER_PTE) | |
63 | set_pmd(pmd + i, __pmd((unsigned long)pte)); | |
64 | ||
65 | local_flush_tlb_all(); | |
66 | memset(start, 0, end - start); | |
67 | } | |
68 | ||
69 | void __init kasan_init(void) | |
70 | { | |
71 | int i; | |
72 | ||
73 | BUILD_BUG_ON(KASAN_SHADOW_OFFSET != KASAN_SHADOW_START - | |
74 | (KASAN_START_VADDR >> KASAN_SHADOW_SCALE_SHIFT)); | |
75 | BUILD_BUG_ON(VMALLOC_START < KASAN_START_VADDR); | |
76 | ||
77 | /* | |
78 | * Replace shadow map pages that cover addresses from VMALLOC area | |
79 | * start to the end of KSEG with clean writable pages. | |
80 | */ | |
81 | populate(kasan_mem_to_shadow((void *)VMALLOC_START), | |
82 | kasan_mem_to_shadow((void *)XCHAL_KSEG_BYPASS_VADDR)); | |
83 | ||
84 | /* Write protect kasan_zero_page and zero-initialize it again. */ | |
85 | for (i = 0; i < PTRS_PER_PTE; ++i) | |
86 | set_pte(kasan_zero_pte + i, | |
87 | mk_pte(virt_to_page(kasan_zero_page), PAGE_KERNEL_RO)); | |
88 | ||
89 | local_flush_tlb_all(); | |
90 | memset(kasan_zero_page, 0, PAGE_SIZE); | |
91 | ||
92 | /* At this point kasan is fully initialized. Enable error messages. */ | |
93 | current->kasan_depth = 0; | |
94 | pr_info("KernelAddressSanitizer initialized\n"); | |
95 | } |