KVM: TDX: vcpu_run: save/restore host state(host kernel gs)
authorIsaku Yamahata <isaku.yamahata@intel.com>
Wed, 29 Jan 2025 09:58:55 +0000 (11:58 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 14 Mar 2025 18:20:54 +0000 (14:20 -0400)
On entering/exiting TDX vcpu, preserved or clobbered CPU state is different
from the VMX case. Add TDX hooks to save/restore host/guest CPU state.
Save/restore kernel GS base MSR.

Signed-off-by: Isaku Yamahata <isaku.yamahata@intel.com>
Signed-off-by: Adrian Hunter <adrian.hunter@intel.com>
Reviewed-by: Paolo Bonzini <pbonzini@redhat.com>
Message-ID: <20250129095902.16391-7-adrian.hunter@intel.com>
Reviewed-by: Xiayao Li <xiaoyao.li@intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/tdx.c
arch/x86/kvm/vmx/x86_ops.h

index 037590fc05e91c7bf3d7df405a275f52590de96b..c0497ed0c9be00989082c3f5b1f63ef1d156869c 100644 (file)
@@ -145,6 +145,26 @@ static void vt_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
        vmx_update_cpu_dirty_logging(vcpu);
 }
 
+static void vt_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu)) {
+               tdx_prepare_switch_to_guest(vcpu);
+               return;
+       }
+
+       vmx_prepare_switch_to_guest(vcpu);
+}
+
+static void vt_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       if (is_td_vcpu(vcpu)) {
+               tdx_vcpu_put(vcpu);
+               return;
+       }
+
+       vmx_vcpu_put(vcpu);
+}
+
 static int vt_vcpu_pre_run(struct kvm_vcpu *vcpu)
 {
        if (is_td_vcpu(vcpu))
@@ -265,9 +285,9 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .vcpu_free = vt_vcpu_free,
        .vcpu_reset = vt_vcpu_reset,
 
-       .prepare_switch_to_guest = vmx_prepare_switch_to_guest,
+       .prepare_switch_to_guest = vt_prepare_switch_to_guest,
        .vcpu_load = vt_vcpu_load,
-       .vcpu_put = vmx_vcpu_put,
+       .vcpu_put = vt_vcpu_put,
 
        .update_exception_bitmap = vmx_update_exception_bitmap,
        .get_feature_msr = vmx_get_feature_msr,
index bd299606b2bad1ca257a0888c447290e2a14a6d0..eac5fa794b568656ec8e7b41e5568178528cfd89 100644 (file)
@@ -3,6 +3,7 @@
 #include <linux/cpu.h>
 #include <asm/cpufeature.h>
 #include <linux/misc_cgroup.h>
+#include <linux/mmu_context.h>
 #include <asm/tdx.h>
 #include "capabilities.h"
 #include "mmu.h"
@@ -12,6 +13,7 @@
 #include "vmx.h"
 #include "mmu/spte.h"
 #include "common.h"
+#include "posted_intr.h"
 #include <trace/events/kvm.h>
 #include "trace.h"
 
@@ -629,6 +631,44 @@ void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        local_irq_enable();
 }
 
+/*
+ * Compared to vmx_prepare_switch_to_guest(), there is not much to do
+ * as SEAMCALL/SEAMRET calls take care of most of save and restore.
+ */
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vt *vt = to_vt(vcpu);
+
+       if (vt->guest_state_loaded)
+               return;
+
+       if (likely(is_64bit_mm(current->mm)))
+               vt->msr_host_kernel_gs_base = current->thread.gsbase;
+       else
+               vt->msr_host_kernel_gs_base = read_msr(MSR_KERNEL_GS_BASE);
+
+       vt->guest_state_loaded = true;
+}
+
+static void tdx_prepare_switch_to_host(struct kvm_vcpu *vcpu)
+{
+       struct vcpu_vt *vt = to_vt(vcpu);
+
+       if (!vt->guest_state_loaded)
+               return;
+
+       ++vcpu->stat.host_state_reload;
+       wrmsrl(MSR_KERNEL_GS_BASE, vt->msr_host_kernel_gs_base);
+
+       vt->guest_state_loaded = false;
+}
+
+void tdx_vcpu_put(struct kvm_vcpu *vcpu)
+{
+       vmx_vcpu_pi_put(vcpu);
+       tdx_prepare_switch_to_host(vcpu);
+}
+
 void tdx_vcpu_free(struct kvm_vcpu *vcpu)
 {
        struct kvm_tdx *kvm_tdx = to_kvm_tdx(vcpu->kvm);
index 578c26d3aec47f44701b8821b7b5cb59cf8d5cce..cd18e9b1e1243ed105c49d5f46585aae288d67c0 100644 (file)
@@ -133,6 +133,8 @@ void tdx_vcpu_free(struct kvm_vcpu *vcpu);
 void tdx_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
 int tdx_vcpu_pre_run(struct kvm_vcpu *vcpu);
 fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit);
+void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu);
+void tdx_vcpu_put(struct kvm_vcpu *vcpu);
 
 int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp);
 
@@ -164,6 +166,8 @@ static inline fastpath_t tdx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediat
 {
        return EXIT_FASTPATH_NONE;
 }
+static inline void tdx_prepare_switch_to_guest(struct kvm_vcpu *vcpu) {}
+static inline void tdx_vcpu_put(struct kvm_vcpu *vcpu) {}
 
 static inline int tdx_vcpu_ioctl(struct kvm_vcpu *vcpu, void __user *argp) { return -EOPNOTSUPP; }