1 // SPDX-License-Identifier: GPL-2.0
2 // Copyright (C) 2019 Andes Technology Corporation
5 #include <linux/init_task.h>
6 #include <linux/kasan.h>
7 #include <linux/kernel.h>
8 #include <linux/memblock.h>
9 #include <linux/pgtable.h>
10 #include <asm/tlbflush.h>
11 #include <asm/fixmap.h>
12 #include <asm/pgalloc.h>
14 static __init void *early_alloc(size_t size, int node)
16 void *ptr = memblock_alloc_try_nid(size, size,
17 __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
20 panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
21 __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
26 extern pgd_t early_pg_dir[PTRS_PER_PGD];
27 asmlinkage void __init kasan_early_init(void)
30 pgd_t *pgd = early_pg_dir + pgd_index(KASAN_SHADOW_START);
32 for (i = 0; i < PTRS_PER_PTE; ++i)
33 set_pte(kasan_early_shadow_pte + i,
34 mk_pte(virt_to_page(kasan_early_shadow_page),
37 for (i = 0; i < PTRS_PER_PMD; ++i)
38 set_pmd(kasan_early_shadow_pmd + i,
40 (__pa((uintptr_t) kasan_early_shadow_pte)),
41 __pgprot(_PAGE_TABLE)));
43 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
44 i += PGDIR_SIZE, ++pgd)
47 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
48 __pgprot(_PAGE_TABLE)));
50 /* init for swapper_pg_dir */
51 pgd = pgd_offset_k(KASAN_SHADOW_START);
53 for (i = KASAN_SHADOW_START; i < KASAN_SHADOW_END;
54 i += PGDIR_SIZE, ++pgd)
57 (__pa(((uintptr_t) kasan_early_shadow_pmd))),
58 __pgprot(_PAGE_TABLE)));
60 local_flush_tlb_all();
63 static void kasan_populate_pte(pmd_t *pmd, unsigned long vaddr, unsigned long end)
65 phys_addr_t phys_addr;
66 pte_t *ptep, *base_pte;
69 base_pte = memblock_alloc(PTRS_PER_PTE * sizeof(pte_t), PAGE_SIZE);
71 base_pte = (pte_t *)pmd_page_vaddr(*pmd);
73 ptep = base_pte + pte_index(vaddr);
76 if (pte_none(*ptep)) {
77 phys_addr = memblock_phys_alloc(PAGE_SIZE, PAGE_SIZE);
78 set_pte(ptep, pfn_pte(PFN_DOWN(phys_addr), PAGE_KERNEL));
80 } while (ptep++, vaddr += PAGE_SIZE, vaddr != end);
82 set_pmd(pmd, pfn_pmd(PFN_DOWN(__pa(base_pte)), PAGE_TABLE));
85 static void kasan_populate_pmd(pgd_t *pgd, unsigned long vaddr, unsigned long end)
87 phys_addr_t phys_addr;
88 pmd_t *pmdp, *base_pmd;
91 base_pmd = (pmd_t *)pgd_page_vaddr(*pgd);
92 if (base_pmd == lm_alias(kasan_early_shadow_pmd))
93 base_pmd = memblock_alloc(PTRS_PER_PMD * sizeof(pmd_t), PAGE_SIZE);
95 pmdp = base_pmd + pmd_index(vaddr);
98 next = pmd_addr_end(vaddr, end);
100 if (pmd_none(*pmdp) && IS_ALIGNED(vaddr, PMD_SIZE) && (next - vaddr) >= PMD_SIZE) {
101 phys_addr = memblock_phys_alloc(PMD_SIZE, PMD_SIZE);
103 set_pmd(pmdp, pfn_pmd(PFN_DOWN(phys_addr), PAGE_KERNEL));
108 kasan_populate_pte(pmdp, vaddr, next);
109 } while (pmdp++, vaddr = next, vaddr != end);
112 * Wait for the whole PGD to be populated before setting the PGD in
113 * the page table, otherwise, if we did set the PGD before populating
114 * it entirely, memblock could allocate a page at a physical address
115 * where KASAN is not populated yet and then we'd get a page fault.
117 set_pgd(pgd, pfn_pgd(PFN_DOWN(__pa(base_pmd)), PAGE_TABLE));
120 static void kasan_populate_pgd(unsigned long vaddr, unsigned long end)
122 phys_addr_t phys_addr;
123 pgd_t *pgdp = pgd_offset_k(vaddr);
127 next = pgd_addr_end(vaddr, end);
130 * pgdp can't be none since kasan_early_init initialized all KASAN
131 * shadow region with kasan_early_shadow_pmd: if this is stillthe case,
132 * that means we can try to allocate a hugepage as a replacement.
134 if (pgd_page_vaddr(*pgdp) == (unsigned long)lm_alias(kasan_early_shadow_pmd) &&
135 IS_ALIGNED(vaddr, PGDIR_SIZE) && (next - vaddr) >= PGDIR_SIZE) {
136 phys_addr = memblock_phys_alloc(PGDIR_SIZE, PGDIR_SIZE);
138 set_pgd(pgdp, pfn_pgd(PFN_DOWN(phys_addr), PAGE_KERNEL));
143 kasan_populate_pmd(pgdp, vaddr, next);
144 } while (pgdp++, vaddr = next, vaddr != end);
147 static void __init kasan_populate(void *start, void *end)
149 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
150 unsigned long vend = PAGE_ALIGN((unsigned long)end);
152 kasan_populate_pgd(vaddr, vend);
154 local_flush_tlb_all();
155 memset(start, KASAN_SHADOW_INIT, end - start);
158 void __init kasan_shallow_populate(void *start, void *end)
160 unsigned long vaddr = (unsigned long)start & PAGE_MASK;
161 unsigned long vend = PAGE_ALIGN((unsigned long)end);
165 pud_t *pud_dir, *pud_k;
166 pgd_t *pgd_dir, *pgd_k;
167 p4d_t *p4d_dir, *p4d_k;
169 while (vaddr < vend) {
170 index = pgd_index(vaddr);
171 pfn = csr_read(CSR_SATP) & SATP_PPN;
172 pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
173 pgd_k = init_mm.pgd + index;
174 pgd_dir = pgd_offset_k(vaddr);
175 set_pgd(pgd_dir, *pgd_k);
177 p4d_dir = p4d_offset(pgd_dir, vaddr);
178 p4d_k = p4d_offset(pgd_k, vaddr);
180 vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
181 pud_dir = pud_offset(p4d_dir, vaddr);
182 pud_k = pud_offset(p4d_k, vaddr);
184 if (pud_present(*pud_dir)) {
185 p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
186 pud_populate(&init_mm, pud_dir, p);
192 void __init kasan_init(void)
194 phys_addr_t _start, _end;
197 kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
198 (void *)kasan_mem_to_shadow((void *)
200 if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
201 kasan_shallow_populate(
202 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
203 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
205 kasan_populate_early_shadow(
206 (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
207 (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
209 for_each_mem_range(i, &_start, &_end) {
210 void *start = (void *)__va(_start);
211 void *end = (void *)__va(_end);
216 kasan_populate(kasan_mem_to_shadow(start), kasan_mem_to_shadow(end));
219 for (i = 0; i < PTRS_PER_PTE; i++)
220 set_pte(&kasan_early_shadow_pte[i],
221 mk_pte(virt_to_page(kasan_early_shadow_page),
222 __pgprot(_PAGE_PRESENT | _PAGE_READ |
225 memset(kasan_early_shadow_page, KASAN_SHADOW_INIT, PAGE_SIZE);
226 init_task.kasan_depth = 0;