KVM: arm64: Unmap 'kvm_arm_hyp_percpu_base' from the host
authorQuentin Perret <qperret@google.com>
Thu, 10 Nov 2022 19:02:54 +0000 (19:02 +0000)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Nov 2022 17:19:35 +0000 (17:19 +0000)
When pKVM is enabled, the hypervisor at EL2 does not trust the host at
EL1 and must therefore prevent it from having unrestricted access to
internal hypervisor state.

The 'kvm_arm_hyp_percpu_base' array holds the offsets for hypervisor
per-cpu allocations, so move this this into the nVHE code where it
cannot be modified by the untrusted host at EL1.

Tested-by: Vincent Donnefort <vdonnefort@google.com>
Signed-off-by: Quentin Perret <qperret@google.com>
Signed-off-by: Will Deacon <will@kernel.org>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20221110190259.26861-22-will@kernel.org
arch/arm64/include/asm/kvm_asm.h
arch/arm64/kernel/image-vars.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/nvhe/hyp-smp.c

index de52ba775d48f77d57b681339bf1767f7ef79d23..43c3bc0f9544d9a3f477427315d07e6d17fafb94 100644 (file)
@@ -109,7 +109,7 @@ enum __kvm_host_smccc_func {
 #define per_cpu_ptr_nvhe_sym(sym, cpu)                                         \
        ({                                                                      \
                unsigned long base, off;                                        \
-               base = kvm_arm_hyp_percpu_base[cpu];                            \
+               base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];              \
                off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -                    \
                      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);         \
                base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;      \
@@ -214,7 +214,7 @@ DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
 #define __kvm_hyp_init         CHOOSE_NVHE_SYM(__kvm_hyp_init)
 #define __kvm_hyp_vector       CHOOSE_HYP_SYM(__kvm_hyp_vector)
 
-extern unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
+extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
 
index 7f4e43bfaade740523e7e0c0c4bceeb254919b01..ae8f37f4aa8c63067e91e6786265455cca5fff82 100644 (file)
@@ -89,9 +89,6 @@ KVM_NVHE_ALIAS(gic_nonsecure_priorities);
 KVM_NVHE_ALIAS(__start___kvm_ex_table);
 KVM_NVHE_ALIAS(__stop___kvm_ex_table);
 
-/* Array containing bases of nVHE per-CPU memory regions. */
-KVM_NVHE_ALIAS(kvm_arm_hyp_percpu_base);
-
 /* PMU available static key */
 #ifdef CONFIG_HW_PERF_EVENTS
 KVM_NVHE_ALIAS(kvm_arm_pmu_available);
index f78eefa02f6bdd7ba7dd403885fde44612a1dd71..25467f24803ddb1c78dba833482afdb8ea4c090f 100644 (file)
@@ -51,7 +51,6 @@ DEFINE_STATIC_KEY_FALSE(kvm_protected_mode_initialized);
 DECLARE_KVM_HYP_PER_CPU(unsigned long, kvm_hyp_vector);
 
 DEFINE_PER_CPU(unsigned long, kvm_arm_hyp_stack_page);
-unsigned long kvm_arm_hyp_percpu_base[NR_CPUS];
 DECLARE_KVM_NVHE_PER_CPU(struct kvm_nvhe_init_params, kvm_init_params);
 
 static bool vgic_present;
@@ -1857,13 +1856,13 @@ static void teardown_hyp_mode(void)
        free_hyp_pgds();
        for_each_possible_cpu(cpu) {
                free_page(per_cpu(kvm_arm_hyp_stack_page, cpu));
-               free_pages(kvm_arm_hyp_percpu_base[cpu], nvhe_percpu_order());
+               free_pages(kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu], nvhe_percpu_order());
        }
 }
 
 static int do_pkvm_init(u32 hyp_va_bits)
 {
-       void *per_cpu_base = kvm_ksym_ref(kvm_arm_hyp_percpu_base);
+       void *per_cpu_base = kvm_ksym_ref(kvm_nvhe_sym(kvm_arm_hyp_percpu_base));
        int ret;
 
        preempt_disable();
@@ -1967,7 +1966,7 @@ static int init_hyp_mode(void)
 
                page_addr = page_address(page);
                memcpy(page_addr, CHOOSE_NVHE_SYM(__per_cpu_start), nvhe_percpu_size());
-               kvm_arm_hyp_percpu_base[cpu] = (unsigned long)page_addr;
+               kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu] = (unsigned long)page_addr;
        }
 
        /*
@@ -2060,7 +2059,7 @@ static int init_hyp_mode(void)
        }
 
        for_each_possible_cpu(cpu) {
-               char *percpu_begin = (char *)kvm_arm_hyp_percpu_base[cpu];
+               char *percpu_begin = (char *)kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];
                char *percpu_end = percpu_begin + nvhe_percpu_size();
 
                /* Map Hyp percpu pages */
index 9f54833af400972670ceddec0a7904d190a005e5..04d194583f1e47a42079726276bc941cdc146641 100644 (file)
@@ -23,6 +23,8 @@ u64 cpu_logical_map(unsigned int cpu)
        return hyp_cpu_logical_map[cpu];
 }
 
+unsigned long __ro_after_init kvm_arm_hyp_percpu_base[NR_CPUS];
+
 unsigned long __hyp_per_cpu_offset(unsigned int cpu)
 {
        unsigned long *cpu_base_array;