KVM: TDX: Handle TDX PV rdmsr/wrmsr hypercall
authorIsaku Yamahata <isaku.yamahata@intel.com>
Thu, 27 Feb 2025 01:20:10 +0000 (09:20 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 14 Mar 2025 18:20:57 +0000 (14:20 -0400)
Morph PV RDMSR/WRMSR hypercall to EXIT_REASON_MSR_{READ,WRITE} and
wire up KVM backend functions.

For complete_emulated_msr() callback, instead of injecting #GP on error,
implement tdx_complete_emulated_msr() to set return code on error.  Also
set return value on MSR read according to the values from kvm x86
registers.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20250227012021.1778144-10-binbin.wu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/vmx/tdx.h

index de0db4751172193e2e0b6f7e8dcb90ecfc2b5b78..713426198930d7194b78d5f3fc815cd71164ecc2 100644 (file)
@@ -235,6 +235,14 @@ static void vt_msr_filter_changed(struct kvm_vcpu *vcpu)
        vmx_msr_filter_changed(vcpu);
 }
 
+static int vt_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+{
+       if (is_td_vcpu(vcpu))
+               return tdx_complete_emulated_msr(vcpu, err);
+
+       return kvm_complete_insn_gp(vcpu, err);
+}
+
 #ifdef CONFIG_KVM_SMM
 static int vt_smi_allowed(struct kvm_vcpu *vcpu, bool for_injection)
 {
@@ -686,7 +694,7 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .migrate_timers = vmx_migrate_timers,
 
        .msr_filter_changed = vt_msr_filter_changed,
-       .complete_emulated_msr = kvm_complete_insn_gp,
+       .complete_emulated_msr = vt_complete_emulated_msr,
 
        .vcpu_deliver_sipi_vector = kvm_vcpu_deliver_sipi_vector,
 
index 980f3dc01854ced8b68516f28e33ecf76403b49c..6ce20082a519c416d9e09ffa7712dc22a93650e6 100644 (file)
@@ -877,6 +877,8 @@ static __always_inline u32 tdcall_to_vmx_exit_reason(struct kvm_vcpu *vcpu)
        case EXIT_REASON_CPUID:
        case EXIT_REASON_HLT:
        case EXIT_REASON_IO_INSTRUCTION:
+       case EXIT_REASON_MSR_READ:
+       case EXIT_REASON_MSR_WRITE:
                return tdvmcall_leaf(vcpu);
        case EXIT_REASON_EPT_VIOLATION:
                return EXIT_REASON_EPT_MISCONFIG;
@@ -1906,6 +1908,20 @@ static int tdx_handle_ept_violation(struct kvm_vcpu *vcpu)
        return ret;
 }
 
+int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err)
+{
+       if (err) {
+               tdvmcall_set_return_code(vcpu, TDVMCALL_STATUS_INVALID_OPERAND);
+               return 1;
+       }
+
+       if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_MSR_READ)
+               tdvmcall_set_return_val(vcpu, kvm_read_edx_eax(vcpu));
+
+       return 1;
+}
+
+
 int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
 {
        struct vcpu_tdx *tdx = to_tdx(vcpu);
@@ -1971,6 +1987,14 @@ int tdx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t fastpath)
                return tdx_emulate_vmcall(vcpu);
        case EXIT_REASON_IO_INSTRUCTION:
                return tdx_emulate_io(vcpu);
+       case EXIT_REASON_MSR_READ:
+               kvm_rcx_write(vcpu, tdx->vp_enter_args.r12);
+               return kvm_emulate_rdmsr(vcpu);
+       case EXIT_REASON_MSR_WRITE:
+               kvm_rcx_write(vcpu, tdx->vp_enter_args.r12);
+               kvm_rax_write(vcpu, tdx->vp_enter_args.r13 & -1u);
+               kvm_rdx_write(vcpu, tdx->vp_enter_args.r13 >> 32);
+               return kvm_emulate_wrmsr(vcpu);
        case EXIT_REASON_EPT_MISCONFIG:
                return tdx_emulate_mmio(vcpu);
        case EXIT_REASON_EPT_VIOLATION:
index 51983743b37556ca4129b4a73bffdd9f7430c7ad..51f98443e8a255ef51a20d79e37fa28c899aac55 100644 (file)
@@ -173,6 +173,7 @@ static __always_inline void td_##lclass##_clearbit##bits(struct vcpu_tdx *tdx,      \
 
 
 bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu);
+int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err);
 
 TDX_BUILD_TDVPS_ACCESSORS(16, VMCS, vmcs);
 TDX_BUILD_TDVPS_ACCESSORS(32, VMCS, vmcs);
@@ -196,6 +197,7 @@ struct vcpu_tdx {
 };
 
 static inline bool tdx_interrupt_allowed(struct kvm_vcpu *vcpu) { return false; }
+static inline int tdx_complete_emulated_msr(struct kvm_vcpu *vcpu, int err) { return 0; }
 
 #endif