License cleanup: add SPDX GPL-2.0 license identifier to files with no license
[linux-2.6-block.git] / arch / x86 / mm / kasan_init_64.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
be3606ff 2#define DISABLE_BRANCH_PROFILING
85155229 3#define pr_fmt(fmt) "kasan: " fmt
ef7f0d6a
AR
4#include <linux/bootmem.h>
5#include <linux/kasan.h>
6#include <linux/kdebug.h>
7#include <linux/mm.h>
8#include <linux/sched.h>
9164bb4a 9#include <linux/sched/task.h>
ef7f0d6a
AR
10#include <linux/vmalloc.h>
11
5520b7e7 12#include <asm/e820/types.h>
ef7f0d6a
AR
13#include <asm/tlbflush.h>
14#include <asm/sections.h>
b9d05200 15#include <asm/pgtable.h>
ef7f0d6a 16
08b46d5d 17extern struct range pfn_mapped[E820_MAX_ENTRIES];
ef7f0d6a 18
ef7f0d6a
AR
19static int __init map_range(struct range *range)
20{
21 unsigned long start;
22 unsigned long end;
23
24 start = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->start));
25 end = (unsigned long)kasan_mem_to_shadow(pfn_to_kaddr(range->end));
26
4d461333 27 return vmemmap_populate(start, end, NUMA_NO_NODE);
ef7f0d6a
AR
28}
29
30static void __init clear_pgds(unsigned long start,
31 unsigned long end)
32{
d691a3cf
KS
33 pgd_t *pgd;
34
35 for (; start < end; start += PGDIR_SIZE) {
36 pgd = pgd_offset_k(start);
37 /*
38 * With folded p4d, pgd_clear() is nop, use p4d_clear()
39 * instead.
40 */
41 if (CONFIG_PGTABLE_LEVELS < 5)
42 p4d_clear(p4d_offset(pgd, start));
43 else
44 pgd_clear(pgd);
45 }
ef7f0d6a
AR
46}
47
5d5aa3cf 48static void __init kasan_map_early_shadow(pgd_t *pgd)
ef7f0d6a
AR
49{
50 int i;
51 unsigned long start = KASAN_SHADOW_START;
52 unsigned long end = KASAN_SHADOW_END;
53
54 for (i = pgd_index(start); start < end; i++) {
5480bb61
KS
55 switch (CONFIG_PGTABLE_LEVELS) {
56 case 4:
57 pgd[i] = __pgd(__pa_nodebug(kasan_zero_pud) |
58 _KERNPG_TABLE);
59 break;
60 case 5:
61 pgd[i] = __pgd(__pa_nodebug(kasan_zero_p4d) |
62 _KERNPG_TABLE);
63 break;
64 default:
65 BUILD_BUG();
66 }
ef7f0d6a
AR
67 start += PGDIR_SIZE;
68 }
69}
70
ef7f0d6a
AR
71#ifdef CONFIG_KASAN_INLINE
72static int kasan_die_handler(struct notifier_block *self,
73 unsigned long val,
74 void *data)
75{
76 if (val == DIE_GPF) {
2ba78056
DV
77 pr_emerg("CONFIG_KASAN_INLINE enabled\n");
78 pr_emerg("GPF could be caused by NULL-ptr deref or user memory access\n");
ef7f0d6a
AR
79 }
80 return NOTIFY_OK;
81}
82
83static struct notifier_block kasan_die_notifier = {
84 .notifier_call = kasan_die_handler,
85};
86#endif
87
5d5aa3cf
AP
88void __init kasan_early_init(void)
89{
90 int i;
21729f81 91 pteval_t pte_val = __pa_nodebug(kasan_zero_page) | __PAGE_KERNEL | _PAGE_ENC;
5d5aa3cf
AP
92 pmdval_t pmd_val = __pa_nodebug(kasan_zero_pte) | _KERNPG_TABLE;
93 pudval_t pud_val = __pa_nodebug(kasan_zero_pmd) | _KERNPG_TABLE;
5480bb61 94 p4dval_t p4d_val = __pa_nodebug(kasan_zero_pud) | _KERNPG_TABLE;
5d5aa3cf
AP
95
96 for (i = 0; i < PTRS_PER_PTE; i++)
97 kasan_zero_pte[i] = __pte(pte_val);
98
99 for (i = 0; i < PTRS_PER_PMD; i++)
100 kasan_zero_pmd[i] = __pmd(pmd_val);
101
102 for (i = 0; i < PTRS_PER_PUD; i++)
103 kasan_zero_pud[i] = __pud(pud_val);
104
5480bb61
KS
105 for (i = 0; CONFIG_PGTABLE_LEVELS >= 5 && i < PTRS_PER_P4D; i++)
106 kasan_zero_p4d[i] = __p4d(p4d_val);
107
65ade2f8
KS
108 kasan_map_early_shadow(early_top_pgt);
109 kasan_map_early_shadow(init_top_pgt);
5d5aa3cf
AP
110}
111
ef7f0d6a
AR
112void __init kasan_init(void)
113{
114 int i;
115
116#ifdef CONFIG_KASAN_INLINE
117 register_die_notifier(&kasan_die_notifier);
118#endif
119
65ade2f8
KS
120 memcpy(early_top_pgt, init_top_pgt, sizeof(early_top_pgt));
121 load_cr3(early_top_pgt);
241d2c54 122 __flush_tlb_all();
ef7f0d6a
AR
123
124 clear_pgds(KASAN_SHADOW_START, KASAN_SHADOW_END);
125
69786cdb 126 kasan_populate_zero_shadow((void *)KASAN_SHADOW_START,
ef7f0d6a
AR
127 kasan_mem_to_shadow((void *)PAGE_OFFSET));
128
08b46d5d 129 for (i = 0; i < E820_MAX_ENTRIES; i++) {
ef7f0d6a
AR
130 if (pfn_mapped[i].end == 0)
131 break;
132
133 if (map_range(&pfn_mapped[i]))
134 panic("kasan: unable to allocate shadow!");
135 }
69786cdb
AR
136 kasan_populate_zero_shadow(
137 kasan_mem_to_shadow((void *)PAGE_OFFSET + MAXMEM),
138 kasan_mem_to_shadow((void *)__START_KERNEL_map));
c420f167
AR
139
140 vmemmap_populate((unsigned long)kasan_mem_to_shadow(_stext),
141 (unsigned long)kasan_mem_to_shadow(_end),
142 NUMA_NO_NODE);
143
69786cdb 144 kasan_populate_zero_shadow(kasan_mem_to_shadow((void *)MODULES_END),
c420f167 145 (void *)KASAN_SHADOW_END);
ef7f0d6a 146
65ade2f8 147 load_cr3(init_top_pgt);
241d2c54 148 __flush_tlb_all();
85155229 149
69e0210f
AR
150 /*
151 * kasan_zero_page has been used as early shadow memory, thus it may
063fb3e5
AR
152 * contain some garbage. Now we can clear and write protect it, since
153 * after the TLB flush no one should write to it.
69e0210f
AR
154 */
155 memset(kasan_zero_page, 0, PAGE_SIZE);
063fb3e5 156 for (i = 0; i < PTRS_PER_PTE; i++) {
21729f81 157 pte_t pte = __pte(__pa(kasan_zero_page) | __PAGE_KERNEL_RO | _PAGE_ENC);
063fb3e5
AR
158 set_pte(&kasan_zero_pte[i], pte);
159 }
160 /* Flush TLBs again to be sure that write protection applied. */
161 __flush_tlb_all();
69e0210f
AR
162
163 init_task.kasan_depth = 0;
25add7ec 164 pr_info("KernelAddressSanitizer initialized\n");
ef7f0d6a 165}