KVM: arm64: nv: Load guest FP state for ZCR_EL2 trap
authorOliver Upton <oliver.upton@linux.dev>
Thu, 20 Jun 2024 16:46:47 +0000 (16:46 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 20 Jun 2024 19:04:49 +0000 (19:04 +0000)
Round out the ZCR_EL2 gymnastics by loading SVE state in the fast path
when the guest hypervisor tries to access SVE state.

Reviewed-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/20240620164653.1130714-11-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/vhe/switch.c

index ab70e6e6bb0cfea5a518cf76cbc2f7055f6f5233..aa768d97ddd60383d884ac35c34848ca1b3653dd 100644 (file)
@@ -371,6 +371,10 @@ static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code)
                if (guest_hyp_fpsimd_traps_enabled(vcpu))
                        return false;
                break;
+       case ESR_ELx_EC_SYS64:
+               if (WARN_ON_ONCE(!is_hyp_ctxt(vcpu)))
+                       return false;
+               fallthrough;
        case ESR_ELx_EC_SVE:
                if (!sve_guest)
                        return false;
index 3d51789e7d7704af6632f634a7f5c3e61e7b4f32..f4ce892edcd6ca5868574bafbead6692fb3e352a 100644 (file)
@@ -288,11 +288,38 @@ static bool kvm_hyp_handle_cpacr_el1(struct kvm_vcpu *vcpu, u64 *exit_code)
        return true;
 }
 
+static bool kvm_hyp_handle_zcr_el2(struct kvm_vcpu *vcpu, u64 *exit_code)
+{
+       u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu));
+
+       if (!vcpu_has_nv(vcpu))
+               return false;
+
+       if (sysreg != SYS_ZCR_EL2)
+               return false;
+
+       if (guest_owns_fp_regs())
+               return false;
+
+       /*
+        * ZCR_EL2 traps are handled in the slow path, with the expectation
+        * that the guest's FP context has already been loaded onto the CPU.
+        *
+        * Load the guest's FP context and unconditionally forward to the
+        * slow path for handling (i.e. return false).
+        */
+       kvm_hyp_handle_fpsimd(vcpu, exit_code);
+       return false;
+}
+
 static bool kvm_hyp_handle_sysreg_vhe(struct kvm_vcpu *vcpu, u64 *exit_code)
 {
        if (kvm_hyp_handle_cpacr_el1(vcpu, exit_code))
                return true;
 
+       if (kvm_hyp_handle_zcr_el2(vcpu, exit_code))
+               return true;
+
        return kvm_hyp_handle_sysreg(vcpu, exit_code);
 }