KVM: arm64: Unify stage-2 programming behind __load_stage2()
authorMarc Zyngier <maz@kernel.org>
Fri, 6 Aug 2021 11:31:07 +0000 (12:31 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 20 Aug 2021 08:11:28 +0000 (09:11 +0100)
The protected mode relies on a separate helper to load the
S2 context. Move over to the __load_guest_stage2() helper
instead, and rename it to __load_stage2() to present a unified
interface.

Cc: Catalin Marinas <catalin.marinas@arm.com>
Cc: Jade Alglave <jade.alglave@arm.com>
Cc: Shameer Kolothum <shameerali.kolothum.thodi@huawei.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
Link: https://lore.kernel.org/r/20210806113109.2475-5-will@kernel.org
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/kvm/hyp/include/nvhe/mem_protect.h
arch/arm64/kvm/hyp/nvhe/mem_protect.c
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/nvhe/tlb.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hyp/vhe/tlb.c

index 05e089653a1a78b126d4aa690ef44e12a97c4cd3..08bc81f6944be0d9ee3ee3e4d0152bbf073f0c20 100644 (file)
@@ -267,9 +267,10 @@ static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
  * Must be called from hyp code running at EL2 with an updated VTTBR
  * and interrupts disabled.
  */
-static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long vtcr)
+static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
+                                         struct kvm_arch *arch)
 {
-       write_sysreg(vtcr, vtcr_el2);
+       write_sysreg(arch->vtcr, vtcr_el2);
        write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
 
        /*
@@ -280,12 +281,6 @@ static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu, unsigned long
        asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
-static __always_inline void __load_guest_stage2(struct kvm_s2_mmu *mmu,
-                                               struct kvm_arch *arch)
-{
-       __load_stage2(mmu, arch->vtcr);
-}
-
 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
 {
        return container_of(mmu->arch, struct kvm, arch);
index 9c227d87c36d3c23a04d3ab19c9ed0a66c7fcf74..8901dc95d7dea4fec722ff8abf6b381cf400a511 100644 (file)
@@ -29,7 +29,7 @@ void handle_host_mem_abort(struct kvm_cpu_context *host_ctxt);
 static __always_inline void __load_host_stage2(void)
 {
        if (static_branch_likely(&kvm_protected_mode_initialized))
-               __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+               __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
        else
                write_sysreg(0, vttbr_el2);
 }
index d938ce95d3bdf255251cc0c69722c89275832749..36aea13c9e5a49b7fb27c0bedd890ef332930eac 100644 (file)
@@ -126,7 +126,7 @@ int __pkvm_prot_finalize(void)
        kvm_flush_dcache_to_poc(params, sizeof(*params));
 
        write_sysreg(params->hcr_el2, hcr_el2);
-       __load_stage2(&host_kvm.arch.mmu, host_kvm.arch.vtcr);
+       __load_stage2(&host_kvm.arch.mmu, &host_kvm.arch);
 
        /*
         * Make sure to have an ISB before the TLB maintenance below but only
index e50a490829234c5646d28ec688e78b5c72a72f2d..3e7ad32b3f0da8a5079d077c300eb3417e2ce44d 100644 (file)
@@ -215,7 +215,7 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
        __sysreg_restore_state_nvhe(guest_ctxt);
 
        mmu = kern_hyp_va(vcpu->arch.hw_mmu);
-       __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
+       __load_stage2(mmu, kern_hyp_va(mmu->arch));
        __activate_traps(vcpu);
 
        __hyp_vgic_restore_state(vcpu);
index 76229407d8f01854470ae0b0afc90ee933fa0d26..d296d617f589633924fa11eddfb75ace470d703b 100644 (file)
@@ -34,12 +34,12 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
        }
 
        /*
-        * __load_guest_stage2() includes an ISB only when the AT
+        * __load_stage2() includes an ISB only when the AT
         * workaround is applied. Take care of the opposite condition,
         * ensuring that we always have an ISB, but not two ISBs back
         * to back.
         */
-       __load_guest_stage2(mmu, kern_hyp_va(mmu->arch));
+       __load_stage2(mmu, kern_hyp_va(mmu->arch));
        asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT));
 }
 
index 0cb7523a501a8941239c2f3b43bfebcaee4c304d..709f6438283ea62bc97b0ec9f9fc7f314114deb0 100644 (file)
@@ -124,11 +124,11 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
         *
         * We have already configured the guest's stage 1 translation in
         * kvm_vcpu_load_sysregs_vhe above.  We must now call
-        * __load_guest_stage2 before __activate_traps, because
-        * __load_guest_stage2 configures stage 2 translation, and
+        * __load_stage2 before __activate_traps, because
+        * __load_stage2 configures stage 2 translation, and
         * __activate_traps clear HCR_EL2.TGE (among other things).
         */
-       __load_guest_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
+       __load_stage2(vcpu->arch.hw_mmu, vcpu->arch.hw_mmu->arch);
        __activate_traps(vcpu);
 
        __kvm_adjust_pc(vcpu);
index 5e9fb3989e0bb775c60fa0a915bdbfa7400bf086..24cef9b87f9e9cee40e7e85e38236931a9d5a29a 100644 (file)
@@ -50,10 +50,10 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu,
         *
         * ARM erratum 1165522 requires some special handling (again),
         * as we need to make sure both stages of translation are in
-        * place before clearing TGE. __load_guest_stage2() already
+        * place before clearing TGE. __load_stage2() already
         * has an ISB in order to deal with this.
         */
-       __load_guest_stage2(mmu, mmu->arch);
+       __load_stage2(mmu, mmu->arch);
        val = read_sysreg(hcr_el2);
        val &= ~HCR_TGE;
        write_sysreg(val, hcr_el2);