KVM: x86: check_nested_events is never NULL
authorPaolo Bonzini <pbonzini@redhat.com>
Fri, 17 Apr 2020 14:32:53 +0000 (10:32 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 23 Apr 2020 13:04:56 +0000 (09:04 -0400)
Both Intel and AMD now implement it, so there is no need to check if the
callback is implemented.

Reviewed-by: Vitaly Kuznetsov <vkuznets@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/x86.c

index 59958ce2b6816de16e9d13fa0e2241de509d6fcd..0492baeb78ab898335034d2ba6a7661d187604d4 100644 (file)
@@ -7699,7 +7699,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
         * from L2 to L1 due to pending L1 events which require exit
         * from L2 to L1.
         */
-       if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
+       if (is_guest_mode(vcpu)) {
                r = kvm_x86_ops.check_nested_events(vcpu);
                if (r != 0)
                        return r;
@@ -7761,7 +7761,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu)
                 * proposal and current concerns.  Perhaps we should be setting
                 * KVM_REQ_EVENT only on certain events and not unconditionally?
                 */
-               if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events) {
+               if (is_guest_mode(vcpu)) {
                        r = kvm_x86_ops.check_nested_events(vcpu);
                        if (r != 0)
                                return r;
@@ -8527,7 +8527,7 @@ static inline int vcpu_block(struct kvm *kvm, struct kvm_vcpu *vcpu)
 
 static inline bool kvm_vcpu_running(struct kvm_vcpu *vcpu)
 {
-       if (is_guest_mode(vcpu) && kvm_x86_ops.check_nested_events)
+       if (is_guest_mode(vcpu))
                kvm_x86_ops.check_nested_events(vcpu);
 
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&