KVM: x86: nSVM: refactor svm_leave_smm and smm_enter_smm
authorMaxim Levitsky <mlevitsk@redhat.com>
Wed, 22 Sep 2021 14:28:43 +0000 (10:28 -0400)
committerPaolo Bonzini <pbonzini@redhat.com>
Wed, 22 Sep 2021 14:47:43 +0000 (10:47 -0400)
Use return statements instead of nested if, and fix error
path to free all the maps that were allocated.

Suggested-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210913140954.165665-2-mlevitsk@redhat.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm/svm.c

index ffdde862a5f62281750c0121d52c69c6e4f79cc1..196bb3d22383dd247525e6fffc61ce92055003a1 100644 (file)
@@ -4285,43 +4285,44 @@ static int svm_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
        struct kvm_host_map map_save;
        int ret;
 
-       if (is_guest_mode(vcpu)) {
-               /* FED8h - SVM Guest */
-               put_smstate(u64, smstate, 0x7ed8, 1);
-               /* FEE0h - SVM Guest VMCB Physical Address */
-               put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
+       if (!is_guest_mode(vcpu))
+               return 0;
 
-               svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
-               svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
-               svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
+       /* FED8h - SVM Guest */
+       put_smstate(u64, smstate, 0x7ed8, 1);
+       /* FEE0h - SVM Guest VMCB Physical Address */
+       put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb12_gpa);
 
-               ret = nested_svm_vmexit(svm);
-               if (ret)
-                       return ret;
+       svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
+       svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
+       svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
 
-               /*
-                * KVM uses VMCB01 to store L1 host state while L2 runs but
-                * VMCB01 is going to be used during SMM and thus the state will
-                * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
-                * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
-                * format of the area is identical to guest save area offsetted
-                * by 0x400 (matches the offset of 'struct vmcb_save_area'
-                * within 'struct vmcb'). Note: HSAVE area may also be used by
-                * L1 hypervisor to save additional host context (e.g. KVM does
-                * that, see svm_prepare_guest_switch()) which must be
-                * preserved.
-                */
-               if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                &map_save) == -EINVAL)
-                       return 1;
+       ret = nested_svm_vmexit(svm);
+       if (ret)
+               return ret;
+
+       /*
+        * KVM uses VMCB01 to store L1 host state while L2 runs but
+        * VMCB01 is going to be used during SMM and thus the state will
+        * be lost. Temporary save non-VMLOAD/VMSAVE state to the host save
+        * area pointed to by MSR_VM_HSAVE_PA. APM guarantees that the
+        * format of the area is identical to guest save area offsetted
+        * by 0x400 (matches the offset of 'struct vmcb_save_area'
+        * within 'struct vmcb'). Note: HSAVE area may also be used by
+        * L1 hypervisor to save additional host context (e.g. KVM does
+        * that, see svm_prepare_guest_switch()) which must be
+        * preserved.
+        */
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
+                        &map_save) == -EINVAL)
+               return 1;
 
-               BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
+       BUILD_BUG_ON(offsetof(struct vmcb, save) != 0x400);
 
-               svm_copy_vmrun_state(map_save.hva + 0x400,
-                                    &svm->vmcb01.ptr->save);
+       svm_copy_vmrun_state(map_save.hva + 0x400,
+                            &svm->vmcb01.ptr->save);
 
-               kvm_vcpu_unmap(vcpu, &map_save, true);
-       }
+       kvm_vcpu_unmap(vcpu, &map_save, true);
        return 0;
 }
 
@@ -4329,52 +4330,54 @@ static int svm_leave_smm(struct kvm_vcpu *vcpu, const char *smstate)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_host_map map, map_save;
-       int ret = 0;
+       u64 saved_efer, vmcb12_gpa;
+       struct vmcb *vmcb12;
+       int ret;
 
-       if (guest_cpuid_has(vcpu, X86_FEATURE_LM)) {
-               u64 saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
-               u64 guest = GET_SMSTATE(u64, smstate, 0x7ed8);
-               u64 vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
-               struct vmcb *vmcb12;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_LM))
+               return 0;
 
-               if (guest) {
-                       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
-                               return 1;
+       /* Non-zero if SMI arrived while vCPU was in guest mode. */
+       if (!GET_SMSTATE(u64, smstate, 0x7ed8))
+               return 0;
 
-                       if (!(saved_efer & EFER_SVME))
-                               return 1;
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_SVM))
+               return 1;
 
-                       if (kvm_vcpu_map(vcpu,
-                                        gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
-                               return 1;
+       saved_efer = GET_SMSTATE(u64, smstate, 0x7ed0);
+       if (!(saved_efer & EFER_SVME))
+               return 1;
 
-                       if (svm_allocate_nested(svm))
-                               return 1;
+       vmcb12_gpa = GET_SMSTATE(u64, smstate, 0x7ee0);
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(vmcb12_gpa), &map) == -EINVAL)
+               return 1;
 
-                       kvm_vcpu_unmap(vcpu, &map, true);
+       ret = 1;
+       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr), &map_save) == -EINVAL)
+               goto unmap_map;
 
-                       /*
-                        * Restore L1 host state from L1 HSAVE area as VMCB01 was
-                        * used during SMM (see svm_enter_smm())
-                        */
-                       if (kvm_vcpu_map(vcpu, gpa_to_gfn(svm->nested.hsave_msr),
-                                        &map_save) == -EINVAL)
-                               return 1;
+       if (svm_allocate_nested(svm))
+               goto unmap_save;
 
-                       svm_copy_vmrun_state(&svm->vmcb01.ptr->save,
-                                            map_save.hva + 0x400);
+       /*
+        * Restore L1 host state from L1 HSAVE area as VMCB01 was
+        * used during SMM (see svm_enter_smm())
+        */
 
-                       /*
-                        * Enter the nested guest now
-                        */
-                       vmcb12 = map.hva;
-                       nested_load_control_from_vmcb12(svm, &vmcb12->control);
-                       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+       svm_copy_vmrun_state(&svm->vmcb01.ptr->save, map_save.hva + 0x400);
 
-                       kvm_vcpu_unmap(vcpu, &map_save, true);
-               }
-       }
+       /*
+        * Enter the nested guest now
+        */
 
+       vmcb12 = map.hva;
+       nested_load_control_from_vmcb12(svm, &vmcb12->control);
+       ret = enter_svm_guest_mode(vcpu, vmcb12_gpa, vmcb12, false);
+
+unmap_save:
+       kvm_vcpu_unmap(vcpu, &map_save, true);
+unmap_map:
+       kvm_vcpu_unmap(vcpu, &map, true);
        return ret;
 }