KVM: VMX: Move emulation_required to struct vcpu_vt
authorBinbin Wu <binbin.wu@linux.intel.com>
Sat, 22 Feb 2025 01:47:54 +0000 (09:47 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 14 Mar 2025 18:20:56 +0000 (14:20 -0400)
Move emulation_required from struct vcpu_vmx to struct vcpu_vt so that
vmx_handle_exit_irqoff() can be reused by TDX code.

No functional change intended.

Signed-off-by: Binbin Wu <binbin.wu@linux.intel.com>
Message-ID: <20250222014757.897978-14-binbin.wu@linux.intel.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/vmx/common.h
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/vmx.h

index 8f30de03adabd6bf78955eca1c2fd3f585b1168c..1f2c2b4ee1da306a90889b18be6e591bd0bceebc 100644 (file)
@@ -48,6 +48,7 @@ struct vcpu_vt {
         * hardware.
         */
        bool            guest_state_loaded;
+       bool            emulation_required;
 
 #ifdef CONFIG_X86_64
        u64             msr_host_kernel_gs_base;
index 99f02972cd7452c98a07ae9dc462849b075683a5..5b5d6017cb3dbfbab646d91e327a2be2787165ba 100644 (file)
@@ -4794,7 +4794,7 @@ static void load_vmcs12_host_state(struct kvm_vcpu *vcpu,
                                vmcs12->vm_exit_msr_load_count))
                nested_vmx_abort(vcpu, VMX_ABORT_LOAD_HOST_MSR_FAIL);
 
-       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
+       to_vt(vcpu)->emulation_required = vmx_emulation_required(vcpu);
 }
 
 static inline u64 nested_vmx_get_vmcs01_guest_efer(struct vcpu_vmx *vmx)
index 1303b4868a945b2cd5ad059b7f45c2d627649dce..279ce9be8995919f0023d18523221bd1b5e479ea 100644 (file)
@@ -1580,7 +1580,7 @@ void vmx_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
        vmcs_writel(GUEST_RFLAGS, rflags);
 
        if ((old_rflags ^ vmx->rflags) & X86_EFLAGS_VM)
-               vmx->emulation_required = vmx_emulation_required(vcpu);
+               vmx->vt.emulation_required = vmx_emulation_required(vcpu);
 }
 
 bool vmx_get_if_flag(struct kvm_vcpu *vcpu)
@@ -1862,7 +1862,7 @@ void vmx_inject_exception(struct kvm_vcpu *vcpu)
                return;
        }
 
-       WARN_ON_ONCE(vmx->emulation_required);
+       WARN_ON_ONCE(vmx->vt.emulation_required);
 
        if (kvm_exception_is_soft(ex->vector)) {
                vmcs_write32(VM_ENTRY_INSTRUCTION_LEN,
@@ -3391,7 +3391,7 @@ void vmx_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
        }
 
        /* depends on vcpu->arch.cr0 to be set to a new value */
-       vmx->emulation_required = vmx_emulation_required(vcpu);
+       vmx->vt.emulation_required = vmx_emulation_required(vcpu);
 }
 
 static int vmx_get_max_ept_level(void)
@@ -3654,7 +3654,7 @@ void vmx_set_segment(struct kvm_vcpu *vcpu, struct kvm_segment *var, int seg)
 {
        __vmx_set_segment(vcpu, var, seg);
 
-       to_vmx(vcpu)->emulation_required = vmx_emulation_required(vcpu);
+       to_vmx(vcpu)->vt.emulation_required = vmx_emulation_required(vcpu);
 }
 
 void vmx_get_cs_db_l_bits(struct kvm_vcpu *vcpu, int *db, int *l)
@@ -5800,7 +5800,7 @@ static bool vmx_emulation_required_with_pending_exception(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       return vmx->emulation_required && !vmx->rmode.vm86_active &&
+       return vmx->vt.emulation_required && !vmx->rmode.vm86_active &&
               (kvm_is_exception_pending(vcpu) || vcpu->arch.exception.injected);
 }
 
@@ -5813,7 +5813,7 @@ static int handle_invalid_guest_state(struct kvm_vcpu *vcpu)
        intr_window_requested = exec_controls_get(vmx) &
                                CPU_BASED_INTR_WINDOW_EXITING;
 
-       while (vmx->emulation_required && count-- != 0) {
+       while (vmx->vt.emulation_required && count-- != 0) {
                if (intr_window_requested && !vmx_interrupt_blocked(vcpu))
                        return handle_interrupt_window(&vmx->vcpu);
 
@@ -6460,7 +6460,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
                 * the least awful solution for the userspace case without
                 * risking false positives.
                 */
-               if (vmx->emulation_required) {
+               if (vmx->vt.emulation_required) {
                        nested_vmx_vmexit(vcpu, EXIT_REASON_TRIPLE_FAULT, 0, 0);
                        return 1;
                }
@@ -6470,7 +6470,7 @@ static int __vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
        }
 
        /* If guest state is invalid, start emulating.  L2 is handled above. */
-       if (vmx->emulation_required)
+       if (vmx->vt.emulation_required)
                return handle_invalid_guest_state(vcpu);
 
        if (exit_reason.failed_vmentry) {
@@ -6963,7 +6963,7 @@ void vmx_handle_exit_irqoff(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
-       if (vmx->emulation_required)
+       if (vmx->vt.emulation_required)
                return;
 
        if (vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXTERNAL_INTERRUPT)
@@ -7286,7 +7286,7 @@ fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu, bool force_immediate_exit)
         * start emulation until we arrive back to a valid state.  Synthesize a
         * consistency check VM-Exit due to invalid guest state and bail.
         */
-       if (unlikely(vmx->emulation_required)) {
+       if (unlikely(vmx->vt.emulation_required)) {
                vmx->fail = 0;
 
                vmx->vt.exit_reason.full = EXIT_REASON_INVALID_STATE;
index e635199901e2daf8e7f7481fc2aac4cacbb5dfd3..6d1e40ecc024137198318b854349d5b0d5ad59b3 100644 (file)
@@ -263,7 +263,6 @@ struct vcpu_vmx {
                } seg[8];
        } segment_cache;
        int vpid;
-       bool emulation_required;
 
        /* Support for a guest hypervisor (nested VMX) */
        struct nested_vmx nested;