KVM: x86: add kvm_leave_nested
authorMaxim Levitsky <mlevitsk@redhat.com>
Thu, 3 Nov 2022 14:13:45 +0000 (16:13 +0200)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Nov 2022 16:39:56 +0000 (11:39 -0500)
add kvm_leave_nested which wraps a call to nested_ops->leave_nested
into a function.

Cc: stable@vger.kernel.org
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20221103141351.50662-4-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/nested.c
arch/x86/kvm/vmx/nested.c
arch/x86/kvm/x86.c

index b02a3a1792f194a8ef2156c23ce9a998a334ac5d..7354f0035a691dc09444acdf726ccb3442ffe833 100644 (file)
@@ -1146,9 +1146,6 @@ void svm_free_nested(struct vcpu_svm *svm)
        svm->nested.initialized = false;
 }
 
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
 void svm_leave_nested(struct kvm_vcpu *vcpu)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
index 0c62352dda6abc9bf72dfaaaa760cc5bb78bbcbf..f7333b9cdfbc7f11f3dacb08456cace5188d9436 100644 (file)
@@ -6440,9 +6440,6 @@ out:
        return kvm_state.size;
 }
 
-/*
- * Forcibly leave nested mode in order to be able to reset the VCPU later on.
- */
 void vmx_leave_nested(struct kvm_vcpu *vcpu)
 {
        if (is_guest_mode(vcpu)) {
index ecea83f0da498fbba6e4f1887012cbd59fa95390..ff5be71892373314912a05eb39e912fadddc7ef8 100644 (file)
@@ -628,6 +628,12 @@ static void kvm_queue_exception_vmexit(struct kvm_vcpu *vcpu, unsigned int vecto
        ex->payload = payload;
 }
 
+/* Forcibly leave the nested mode in cases like a vCPU reset */
+static void kvm_leave_nested(struct kvm_vcpu *vcpu)
+{
+       kvm_x86_ops.nested_ops->leave_nested(vcpu);
+}
+
 static void kvm_multiple_exception(struct kvm_vcpu *vcpu,
                unsigned nr, bool has_error, u32 error_code,
                bool has_payload, unsigned long payload, bool reinject)
@@ -5195,7 +5201,7 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
 
        if (events->flags & KVM_VCPUEVENT_VALID_SMM) {
                if (!!(vcpu->arch.hflags & HF_SMM_MASK) != events->smi.smm) {
-                       kvm_x86_ops.nested_ops->leave_nested(vcpu);
+                       kvm_leave_nested(vcpu);
                        kvm_smm_changed(vcpu, events->smi.smm);
                }