KVM: arm64: Reserve memory for host stage 2
authorQuentin Perret <qperret@google.com>
Fri, 19 Mar 2021 10:01:34 +0000 (10:01 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 19 Mar 2021 12:01:22 +0000 (12:01 +0000)
Extend the memory pool allocated for the hypervisor to include enough
pages to map all of memory at page granularity for the host stage 2.
While at it, also reserve some memory for device mappings.

Acked-by: Will Deacon <will@kernel.org>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20210319100146.1149909-27-qperret@google.com
arch/arm64/kvm/hyp/include/nvhe/mm.h
arch/arm64/kvm/hyp/nvhe/setup.c
arch/arm64/kvm/hyp/reserved_mem.c

index ac0f7fcffd08144ac826a6fc4bbb2a8574a627c9..0095f62897429ece1ae2d4221a8e898c0e30b98c 100644 (file)
@@ -53,7 +53,7 @@ static inline unsigned long __hyp_pgtable_max_pages(unsigned long nr_pages)
        return total;
 }
 
-static inline unsigned long hyp_s1_pgtable_pages(void)
+static inline unsigned long __hyp_pgtable_total_pages(void)
 {
        unsigned long res = 0, i;
 
@@ -63,9 +63,34 @@ static inline unsigned long hyp_s1_pgtable_pages(void)
                res += __hyp_pgtable_max_pages(reg->size >> PAGE_SHIFT);
        }
 
+       return res;
+}
+
+static inline unsigned long hyp_s1_pgtable_pages(void)
+{
+       unsigned long res;
+
+       res = __hyp_pgtable_total_pages();
+
        /* Allow 1 GiB for private mappings */
        res += __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
 
        return res;
 }
+
+static inline unsigned long host_s2_mem_pgtable_pages(void)
+{
+       /*
+        * Include an extra 16 pages to safely upper-bound the worst case of
+        * concatenated pgds.
+        */
+       return __hyp_pgtable_total_pages() + 16;
+}
+
+static inline unsigned long host_s2_dev_pgtable_pages(void)
+{
+       /* Allow 1 GiB for MMIO mappings */
+       return __hyp_pgtable_max_pages(SZ_1G >> PAGE_SHIFT);
+}
+
 #endif /* __KVM_HYP_MM_H */
index 1e8bcd8b0299acfaf2b7dc69ed61e09e951e260a..c1a3e7e0ebbc0964b9a7bce103ca6ea0cf6b2301 100644 (file)
@@ -24,6 +24,8 @@ unsigned long hyp_nr_cpus;
 
 static void *vmemmap_base;
 static void *hyp_pgt_base;
+static void *host_s2_mem_pgt_base;
+static void *host_s2_dev_pgt_base;
 
 static int divide_memory_pool(void *virt, unsigned long size)
 {
@@ -42,6 +44,16 @@ static int divide_memory_pool(void *virt, unsigned long size)
        if (!hyp_pgt_base)
                return -ENOMEM;
 
+       nr_pages = host_s2_mem_pgtable_pages();
+       host_s2_mem_pgt_base = hyp_early_alloc_contig(nr_pages);
+       if (!host_s2_mem_pgt_base)
+               return -ENOMEM;
+
+       nr_pages = host_s2_dev_pgtable_pages();
+       host_s2_dev_pgt_base = hyp_early_alloc_contig(nr_pages);
+       if (!host_s2_dev_pgt_base)
+               return -ENOMEM;
+
        return 0;
 }
 
index 9bc6a6d27904168bfe4aadc0efd909f973daa89c..fd42705a3c2628e48694305c441b6baef44a193d 100644 (file)
@@ -52,6 +52,8 @@ void __init kvm_hyp_reserve(void)
        }
 
        hyp_mem_pages += hyp_s1_pgtable_pages();
+       hyp_mem_pages += host_s2_mem_pgtable_pages();
+       hyp_mem_pages += host_s2_dev_pgtable_pages();
 
        /*
         * The hyp_vmemmap needs to be backed by pages, but these pages