KVM: X86: Introduce kvm_vcpu_exit_request() helper
authorWanpeng Li <wanpengli@tencent.com>
Tue, 28 Apr 2020 06:23:26 +0000 (14:23 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 15 May 2020 16:26:19 +0000 (12:26 -0400)
Introduce kvm_vcpu_exit_request() helper, we need to check some conditions
before enter guest again immediately, we skip invoking the exit handler and
go through full run loop if complete fastpath but there is stuff preventing
we enter guest again immediately.

Tested-by: Haiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1588055009-12677-5-git-send-email-wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 370288fdedba85210fc940f13d19bdd04f3863ac..29a41aa98929de0eb9bfd126c1cb31e505a35dd7 100644 (file)
@@ -1573,6 +1573,13 @@ int kvm_emulate_wrmsr(struct kvm_vcpu *vcpu)
 }
 EXPORT_SYMBOL_GPL(kvm_emulate_wrmsr);
 
+bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu)
+{
+       return vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu) ||
+               need_resched() || signal_pending(current);
+}
+EXPORT_SYMBOL_GPL(kvm_vcpu_exit_request);
+
 /*
  * The fast path for frequent and performance sensitive wrmsr emulation,
  * i.e. the sending of IPI, sending IPI early in the VM-Exit flow reduces
@@ -8396,8 +8403,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
                kvm_x86_ops.sync_pir_to_irr(vcpu);
 
-       if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
-           || need_resched() || signal_pending(current)) {
+       if (kvm_vcpu_exit_request(vcpu)) {
                vcpu->mode = OUTSIDE_GUEST_MODE;
                smp_wmb();
                local_irq_enable();
index 7b5ed8ed628e281b7b2f0595e96a13bd4f1519cc..e02fe28254b6ad8d1dac115fa191fda284df7b58 100644 (file)
@@ -364,5 +364,6 @@ static inline bool kvm_dr7_valid(u64 data)
 void kvm_load_guest_xsave_state(struct kvm_vcpu *vcpu);
 void kvm_load_host_xsave_state(struct kvm_vcpu *vcpu);
 u64 kvm_spec_ctrl_valid_bits(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_exit_request(struct kvm_vcpu *vcpu);
 
 #endif