riscv/kasan: add KASAN_VMALLOC support
authorNylon Chen <nylon7@andestech.com>
Sat, 16 Jan 2021 05:58:35 +0000 (13:58 +0800)
committerPalmer Dabbelt <palmerdabbelt@google.com>
Fri, 19 Feb 2021 07:17:59 +0000 (23:17 -0800)
It references to x86/s390 architecture.

So, it doesn't map the early shadow page to cover VMALLOC space.

Prepopulate top level page table for the range that would otherwise be
empty.

lower levels are filled dynamically upon memory allocation while
booting.

Signed-off-by: Nylon Chen <nylon7@andestech.com>
Signed-off-by: Nick Hu <nickhu@andestech.com>
Signed-off-by: Palmer Dabbelt <palmerdabbelt@google.com>
arch/riscv/Kconfig
arch/riscv/mm/kasan_init.c

index 8eadd1cbd524ebd90cb92c8de5e3bd797296c5ca..3832a537c5d64931d4570375ca7e6d9887fea7be 100644 (file)
@@ -57,6 +57,7 @@ config RISCV
        select HAVE_ARCH_JUMP_LABEL
        select HAVE_ARCH_JUMP_LABEL_RELATIVE
        select HAVE_ARCH_KASAN if MMU && 64BIT
+       select HAVE_ARCH_KASAN_VMALLOC if MMU && 64BIT
        select HAVE_ARCH_KGDB
        select HAVE_ARCH_KGDB_QXFER_PKT
        select HAVE_ARCH_MMAP_RND_BITS if MMU
index 12ddd1f6bf70c8e96ac92b02e0b37c8e22c51043..4b9149f963d365d0900ef6fee883d62e80b91d97 100644 (file)
@@ -9,6 +9,19 @@
 #include <linux/pgtable.h>
 #include <asm/tlbflush.h>
 #include <asm/fixmap.h>
+#include <asm/pgalloc.h>
+
+static __init void *early_alloc(size_t size, int node)
+{
+       void *ptr = memblock_alloc_try_nid(size, size,
+               __pa(MAX_DMA_ADDRESS), MEMBLOCK_ALLOC_ACCESSIBLE, node);
+
+       if (!ptr)
+               panic("%pS: Failed to allocate %zu bytes align=%zx nid=%d from=%llx\n",
+                       __func__, size, size, node, (u64)__pa(MAX_DMA_ADDRESS));
+
+       return ptr;
+}
 
 extern pgd_t early_pg_dir[PTRS_PER_PGD];
 asmlinkage void __init kasan_early_init(void)
@@ -83,6 +96,40 @@ static void __init populate(void *start, void *end)
        memset(start, 0, end - start);
 }
 
+void __init kasan_shallow_populate(void *start, void *end)
+{
+       unsigned long vaddr = (unsigned long)start & PAGE_MASK;
+       unsigned long vend = PAGE_ALIGN((unsigned long)end);
+       unsigned long pfn;
+       int index;
+       void *p;
+       pud_t *pud_dir, *pud_k;
+       pgd_t *pgd_dir, *pgd_k;
+       p4d_t *p4d_dir, *p4d_k;
+
+       while (vaddr < vend) {
+               index = pgd_index(vaddr);
+               pfn = csr_read(CSR_SATP) & SATP_PPN;
+               pgd_dir = (pgd_t *)pfn_to_virt(pfn) + index;
+               pgd_k = init_mm.pgd + index;
+               pgd_dir = pgd_offset_k(vaddr);
+               set_pgd(pgd_dir, *pgd_k);
+
+               p4d_dir = p4d_offset(pgd_dir, vaddr);
+               p4d_k  = p4d_offset(pgd_k, vaddr);
+
+               vaddr = (vaddr + PUD_SIZE) & PUD_MASK;
+               pud_dir = pud_offset(p4d_dir, vaddr);
+               pud_k = pud_offset(p4d_k, vaddr);
+
+               if (pud_present(*pud_dir)) {
+                       p = early_alloc(PAGE_SIZE, NUMA_NO_NODE);
+                       pud_populate(&init_mm, pud_dir, p);
+               }
+               vaddr += PAGE_SIZE;
+       }
+}
+
 void __init kasan_init(void)
 {
        phys_addr_t _start, _end;
@@ -90,7 +137,15 @@ void __init kasan_init(void)
 
        kasan_populate_early_shadow((void *)KASAN_SHADOW_START,
                                    (void *)kasan_mem_to_shadow((void *)
-                                                               VMALLOC_END));
+                                                               VMEMMAP_END));
+       if (IS_ENABLED(CONFIG_KASAN_VMALLOC))
+               kasan_shallow_populate(
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
+       else
+               kasan_populate_early_shadow(
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_START),
+                       (void *)kasan_mem_to_shadow((void *)VMALLOC_END));
 
        for_each_mem_range(i, &_start, &_end) {
                void *start = (void *)_start;