Merge branch 'x86/hyperv' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip
[linux-2.6-block.git] / arch / x86 / kvm / x86.c
index cd3b3bc67c5a68b2d6a6dc868c4fa0ac9fedc017..0e27ee573bd52dec1b3801155c2f73289d5e42c6 100644 (file)
@@ -179,7 +179,6 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
        { "request_irq", VCPU_STAT(request_irq_exits) },
        { "irq_exits", VCPU_STAT(irq_exits) },
        { "host_state_reload", VCPU_STAT(host_state_reload) },
-       { "efer_reload", VCPU_STAT(efer_reload) },
        { "fpu_reload", VCPU_STAT(fpu_reload) },
        { "insn_emulation", VCPU_STAT(insn_emulation) },
        { "insn_emulation_fail", VCPU_STAT(insn_emulation_fail) },
@@ -704,7 +703,8 @@ static void kvm_load_guest_xcr0(struct kvm_vcpu *vcpu)
        if (kvm_read_cr4_bits(vcpu, X86_CR4_OSXSAVE) &&
                        !vcpu->guest_xcr0_loaded) {
                /* kvm_set_xcr() also depends on this */
-               xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
+               if (vcpu->arch.xcr0 != host_xcr0)
+                       xsetbv(XCR_XFEATURE_ENABLED_MASK, vcpu->arch.xcr0);
                vcpu->guest_xcr0_loaded = 1;
        }
 }
@@ -796,6 +796,9 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
        if (!guest_cpuid_has(vcpu, X86_FEATURE_LA57) && (cr4 & X86_CR4_LA57))
                return 1;
 
+       if (!guest_cpuid_has(vcpu, X86_FEATURE_UMIP) && (cr4 & X86_CR4_UMIP))
+               return 1;
+
        if (is_long_mode(vcpu)) {
                if (!(cr4 & X86_CR4_PAE))
                        return 1;
@@ -1038,6 +1041,7 @@ static u32 emulated_msrs[] = {
        MSR_IA32_MCG_CTL,
        MSR_IA32_MCG_EXT_CTL,
        MSR_IA32_SMBASE,
+       MSR_SMI_COUNT,
        MSR_PLATFORM_INFO,
        MSR_MISC_FEATURES_ENABLES,
 };
@@ -2162,6 +2166,12 @@ static void kvmclock_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.pv_time_enabled = false;
 }
 
+static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
+{
+       ++vcpu->stat.tlb_flush;
+       kvm_x86_ops->tlb_flush(vcpu, invalidate_gpa);
+}
+
 static void record_steal_time(struct kvm_vcpu *vcpu)
 {
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
@@ -2171,7 +2181,12 @@ static void record_steal_time(struct kvm_vcpu *vcpu)
                &vcpu->arch.st.steal, sizeof(struct kvm_steal_time))))
                return;
 
-       vcpu->arch.st.steal.preempted = 0;
+       /*
+        * Doing a TLB flush here, on the guest's behalf, can avoid
+        * expensive IPIs.
+        */
+       if (xchg(&vcpu->arch.st.steal.preempted, 0) & KVM_VCPU_FLUSH_TLB)
+               kvm_vcpu_flush_tlb(vcpu, false);
 
        if (vcpu->arch.st.steal.version & 1)
                vcpu->arch.st.steal.version += 1;  /* first time write, random junk */
@@ -2272,6 +2287,11 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                vcpu->arch.smbase = data;
                break;
+       case MSR_SMI_COUNT:
+               if (!msr_info->host_initiated)
+                       return 1;
+               vcpu->arch.smi_count = data;
+               break;
        case MSR_KVM_WALL_CLOCK_NEW:
        case MSR_KVM_WALL_CLOCK:
                vcpu->kvm->arch.wall_clock = data;
@@ -2546,6 +2566,9 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                        return 1;
                msr_info->data = vcpu->arch.smbase;
                break;
+       case MSR_SMI_COUNT:
+               msr_info->data = vcpu->arch.smi_count;
+               break;
        case MSR_IA32_PERF_STATUS:
                /* TSC increment by tick */
                msr_info->data = 1000ULL;
@@ -2948,7 +2971,7 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
        if (!(vcpu->arch.st.msr_val & KVM_MSR_ENABLED))
                return;
 
-       vcpu->arch.st.steal.preempted = 1;
+       vcpu->arch.st.steal.preempted = KVM_VCPU_PREEMPTED;
 
        kvm_write_guest_offset_cached(vcpu->kvm, &vcpu->arch.st.stime,
                        &vcpu->arch.st.steal.preempted,
@@ -2982,12 +3005,18 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
        pagefault_enable();
        kvm_x86_ops->vcpu_put(vcpu);
        vcpu->arch.last_host_tsc = rdtsc();
+       /*
+        * If userspace has set any breakpoints or watchpoints, dr6 is restored
+        * on every vmexit, but if not, we might have a stale dr6 from the
+        * guest. do_debug expects dr6 to be cleared after it runs, do the same.
+        */
+       set_debugreg(0, 6);
 }
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
                                    struct kvm_lapic_state *s)
 {
-       if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+       if (vcpu->arch.apicv_active)
                kvm_x86_ops->sync_pir_to_irr(vcpu);
 
        return kvm_apic_get_state(vcpu, s);
@@ -3516,6 +3545,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                void *buffer;
        } u;
 
+       vcpu_load(vcpu);
+
        u.buffer = NULL;
        switch (ioctl) {
        case KVM_GET_LAPIC: {
@@ -3541,8 +3572,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
                if (!lapic_in_kernel(vcpu))
                        goto out;
                u.lapic = memdup_user(argp, sizeof(*u.lapic));
-               if (IS_ERR(u.lapic))
-                       return PTR_ERR(u.lapic);
+               if (IS_ERR(u.lapic)) {
+                       r = PTR_ERR(u.lapic);
+                       goto out_nofree;
+               }
 
                r = kvm_vcpu_ioctl_set_lapic(vcpu, u.lapic);
                break;
@@ -3716,8 +3749,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        }
        case KVM_SET_XSAVE: {
                u.xsave = memdup_user(argp, sizeof(*u.xsave));
-               if (IS_ERR(u.xsave))
-                       return PTR_ERR(u.xsave);
+               if (IS_ERR(u.xsave)) {
+                       r = PTR_ERR(u.xsave);
+                       goto out_nofree;
+               }
 
                r = kvm_vcpu_ioctl_x86_set_xsave(vcpu, u.xsave);
                break;
@@ -3739,8 +3774,10 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        }
        case KVM_SET_XCRS: {
                u.xcrs = memdup_user(argp, sizeof(*u.xcrs));
-               if (IS_ERR(u.xcrs))
-                       return PTR_ERR(u.xcrs);
+               if (IS_ERR(u.xcrs)) {
+                       r = PTR_ERR(u.xcrs);
+                       goto out_nofree;
+               }
 
                r = kvm_vcpu_ioctl_x86_set_xcrs(vcpu, u.xcrs);
                break;
@@ -3784,6 +3821,8 @@ long kvm_arch_vcpu_ioctl(struct file *filp,
        }
 out:
        kfree(u.buffer);
+out_nofree:
+       vcpu_put(vcpu);
        return r;
 }
 
@@ -4339,6 +4378,36 @@ set_identity_unlock:
                r = kvm_vm_ioctl_enable_cap(kvm, &cap);
                break;
        }
+       case KVM_MEMORY_ENCRYPT_OP: {
+               r = -ENOTTY;
+               if (kvm_x86_ops->mem_enc_op)
+                       r = kvm_x86_ops->mem_enc_op(kvm, argp);
+               break;
+       }
+       case KVM_MEMORY_ENCRYPT_REG_REGION: {
+               struct kvm_enc_region region;
+
+               r = -EFAULT;
+               if (copy_from_user(&region, argp, sizeof(region)))
+                       goto out;
+
+               r = -ENOTTY;
+               if (kvm_x86_ops->mem_enc_reg_region)
+                       r = kvm_x86_ops->mem_enc_reg_region(kvm, &region);
+               break;
+       }
+       case KVM_MEMORY_ENCRYPT_UNREG_REGION: {
+               struct kvm_enc_region region;
+
+               r = -EFAULT;
+               if (copy_from_user(&region, argp, sizeof(region)))
+                       goto out;
+
+               r = -ENOTTY;
+               if (kvm_x86_ops->mem_enc_unreg_region)
+                       r = kvm_x86_ops->mem_enc_unreg_region(kvm, &region);
+               break;
+       }
        default:
                r = -ENOTTY;
        }
@@ -5747,7 +5816,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                 * handle watchpoints yet, those would be handled in
                 * the emulate_ops.
                 */
-               if (kvm_vcpu_check_breakpoint(vcpu, &r))
+               if (!(emulation_type & EMULTYPE_SKIP) &&
+                   kvm_vcpu_check_breakpoint(vcpu, &r))
                        return r;
 
                ctxt->interruptibility = 0;
@@ -6536,6 +6606,7 @@ static int inject_pending_event(struct kvm_vcpu *vcpu, bool req_int_win)
                kvm_x86_ops->queue_exception(vcpu);
        } else if (vcpu->arch.smi_pending && !is_smm(vcpu) && kvm_x86_ops->smi_allowed(vcpu)) {
                vcpu->arch.smi_pending = false;
+               ++vcpu->arch.smi_count;
                enter_smm(vcpu);
        } else if (vcpu->arch.nmi_pending && kvm_x86_ops->nmi_allowed(vcpu)) {
                --vcpu->arch.nmi_pending;
@@ -6837,7 +6908,7 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        if (irqchip_split(vcpu->kvm))
                kvm_scan_ioapic_routes(vcpu, vcpu->arch.ioapic_handled_vectors);
        else {
-               if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
+               if (vcpu->arch.apicv_active)
                        kvm_x86_ops->sync_pir_to_irr(vcpu);
                kvm_ioapic_scan_entry(vcpu, vcpu->arch.ioapic_handled_vectors);
        }
@@ -6846,12 +6917,6 @@ static void vcpu_scan_ioapic(struct kvm_vcpu *vcpu)
        kvm_x86_ops->load_eoi_exitmap(vcpu, eoi_exit_bitmap);
 }
 
-static void kvm_vcpu_flush_tlb(struct kvm_vcpu *vcpu)
-{
-       ++vcpu->stat.tlb_flush;
-       kvm_x86_ops->tlb_flush(vcpu);
-}
-
 void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm,
                unsigned long start, unsigned long end)
 {
@@ -6920,7 +6985,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
                if (kvm_check_request(KVM_REQ_MMU_SYNC, vcpu))
                        kvm_mmu_sync_roots(vcpu);
                if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
-                       kvm_vcpu_flush_tlb(vcpu);
+                       kvm_vcpu_flush_tlb(vcpu, true);
                if (kvm_check_request(KVM_REQ_REPORT_TPR_ACCESS, vcpu)) {
                        vcpu->run->exit_reason = KVM_EXIT_TPR_ACCESS;
                        r = 0;
@@ -7069,10 +7134,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
         * This handles the case where a posted interrupt was
         * notified with kvm_vcpu_kick.
         */
-       if (kvm_lapic_enabled(vcpu)) {
-               if (kvm_x86_ops->sync_pir_to_irr && vcpu->arch.apicv_active)
-                       kvm_x86_ops->sync_pir_to_irr(vcpu);
-       }
+       if (kvm_lapic_enabled(vcpu) && vcpu->arch.apicv_active)
+               kvm_x86_ops->sync_pir_to_irr(vcpu);
 
        if (vcpu->mode == EXITING_GUEST_MODE || kvm_request_pending(vcpu)
            || need_resched() || signal_pending(current)) {
@@ -7093,7 +7156,8 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        }
 
        trace_kvm_entry(vcpu->vcpu_id);
-       wait_lapic_expire(vcpu);
+       if (lapic_timer_advance_ns)
+               wait_lapic_expire(vcpu);
        guest_enter_irqoff();
 
        if (unlikely(vcpu->arch.switch_db_regs)) {
@@ -7354,8 +7418,8 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 {
        int r;
 
+       vcpu_load(vcpu);
        kvm_sigset_activate(vcpu);
-
        kvm_load_guest_fpu(vcpu);
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
@@ -7402,11 +7466,14 @@ out:
        post_kvm_run_save(vcpu);
        kvm_sigset_deactivate(vcpu);
 
+       vcpu_put(vcpu);
        return r;
 }
 
 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
+       vcpu_load(vcpu);
+
        if (vcpu->arch.emulate_regs_need_sync_to_vcpu) {
                /*
                 * We are here if userspace calls get_regs() in the middle of
@@ -7440,11 +7507,14 @@ int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
        regs->rip = kvm_rip_read(vcpu);
        regs->rflags = kvm_get_rflags(vcpu);
 
+       vcpu_put(vcpu);
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 {
+       vcpu_load(vcpu);
+
        vcpu->arch.emulate_regs_need_sync_from_vcpu = true;
        vcpu->arch.emulate_regs_need_sync_to_vcpu = false;
 
@@ -7474,6 +7544,7 @@ int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
+       vcpu_put(vcpu);
        return 0;
 }
 
@@ -7492,6 +7563,8 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 {
        struct desc_ptr dt;
 
+       vcpu_load(vcpu);
+
        kvm_get_segment(vcpu, &sregs->cs, VCPU_SREG_CS);
        kvm_get_segment(vcpu, &sregs->ds, VCPU_SREG_DS);
        kvm_get_segment(vcpu, &sregs->es, VCPU_SREG_ES);
@@ -7523,12 +7596,15 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
                set_bit(vcpu->arch.interrupt.nr,
                        (unsigned long *)sregs->interrupt_bitmap);
 
+       vcpu_put(vcpu);
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
+       vcpu_load(vcpu);
+
        kvm_apic_accept_events(vcpu);
        if (vcpu->arch.mp_state == KVM_MP_STATE_HALTED &&
                                        vcpu->arch.pv.pv_unhalted)
@@ -7536,21 +7612,26 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
        else
                mp_state->mp_state = vcpu->arch.mp_state;
 
+       vcpu_put(vcpu);
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
+       int ret = -EINVAL;
+
+       vcpu_load(vcpu);
+
        if (!lapic_in_kernel(vcpu) &&
            mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
-               return -EINVAL;
+               goto out;
 
        /* INITs are latched while in SMM */
        if ((is_smm(vcpu) || vcpu->arch.smi_pending) &&
            (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED ||
             mp_state->mp_state == KVM_MP_STATE_INIT_RECEIVED))
-               return -EINVAL;
+               goto out;
 
        if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
                vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
@@ -7558,7 +7639,11 @@ int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
        } else
                vcpu->arch.mp_state = mp_state->mp_state;
        kvm_make_request(KVM_REQ_EVENT, vcpu);
-       return 0;
+
+       ret = 0;
+out:
+       vcpu_put(vcpu);
+       return ret;
 }
 
 int kvm_task_switch(struct kvm_vcpu *vcpu, u16 tss_selector, int idt_index,
@@ -7612,10 +7697,13 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        int mmu_reset_needed = 0;
        int pending_vec, max_bits, idx;
        struct desc_ptr dt;
+       int ret = -EINVAL;
+
+       vcpu_load(vcpu);
 
        if (!guest_cpuid_has(vcpu, X86_FEATURE_XSAVE) &&
                        (sregs->cr4 & X86_CR4_OSXSAVE))
-               return -EINVAL;
+               goto out;
 
        if (kvm_valid_sregs(vcpu, sregs))
                return -EINVAL;
@@ -7623,7 +7711,7 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
        apic_base_msr.data = sregs->apic_base;
        apic_base_msr.host_initiated = true;
        if (kvm_set_apic_base(vcpu, &apic_base_msr))
-               return -EINVAL;
+               goto out;
 
        dt.size = sregs->idt.limit;
        dt.address = sregs->idt.base;
@@ -7689,7 +7777,10 @@ int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
-       return 0;
+       ret = 0;
+out:
+       vcpu_put(vcpu);
+       return ret;
 }
 
 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
@@ -7698,6 +7789,8 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
        unsigned long rflags;
        int i, r;
 
+       vcpu_load(vcpu);
+
        if (dbg->control & (KVM_GUESTDBG_INJECT_DB | KVM_GUESTDBG_INJECT_BP)) {
                r = -EBUSY;
                if (vcpu->arch.exception.pending)
@@ -7743,7 +7836,7 @@ int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
        r = 0;
 
 out:
-
+       vcpu_put(vcpu);
        return r;
 }
 
@@ -7757,6 +7850,8 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
        gpa_t gpa;
        int idx;
 
+       vcpu_load(vcpu);
+
        idx = srcu_read_lock(&vcpu->kvm->srcu);
        gpa = kvm_mmu_gva_to_gpa_system(vcpu, vaddr, NULL);
        srcu_read_unlock(&vcpu->kvm->srcu, idx);
@@ -7765,14 +7860,17 @@ int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
        tr->writeable = 1;
        tr->usermode = 0;
 
+       vcpu_put(vcpu);
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       struct fxregs_state *fxsave =
-                       &vcpu->arch.guest_fpu.state.fxsave;
+       struct fxregs_state *fxsave;
+
+       vcpu_load(vcpu);
 
+       fxsave = &vcpu->arch.guest_fpu.state.fxsave;
        memcpy(fpu->fpr, fxsave->st_space, 128);
        fpu->fcw = fxsave->cwd;
        fpu->fsw = fxsave->swd;
@@ -7782,13 +7880,17 @@ int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        fpu->last_dp = fxsave->rdp;
        memcpy(fpu->xmm, fxsave->xmm_space, sizeof fxsave->xmm_space);
 
+       vcpu_put(vcpu);
        return 0;
 }
 
 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
 {
-       struct fxregs_state *fxsave =
-                       &vcpu->arch.guest_fpu.state.fxsave;
+       struct fxregs_state *fxsave;
+
+       vcpu_load(vcpu);
+
+       fxsave = &vcpu->arch.guest_fpu.state.fxsave;
 
        memcpy(fxsave->st_space, fpu->fpr, 128);
        fxsave->cwd = fpu->fcw;
@@ -7799,6 +7901,7 @@ int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
        fxsave->rdp = fpu->last_dp;
        memcpy(fxsave->xmm_space, fpu->xmm, sizeof fxsave->xmm_space);
 
+       vcpu_put(vcpu);
        return 0;
 }
 
@@ -7867,16 +7970,12 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
 
 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
 {
-       int r;
-
        kvm_vcpu_mtrr_init(vcpu);
-       r = vcpu_load(vcpu);
-       if (r)
-               return r;
+       vcpu_load(vcpu);
        kvm_vcpu_reset(vcpu, false);
        kvm_mmu_setup(vcpu);
        vcpu_put(vcpu);
-       return r;
+       return 0;
 }
 
 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
@@ -7886,13 +7985,15 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
        kvm_hv_vcpu_postcreate(vcpu);
 
-       if (vcpu_load(vcpu))
+       if (mutex_lock_killable(&vcpu->mutex))
                return;
+       vcpu_load(vcpu);
        msr.data = 0x0;
        msr.index = MSR_IA32_TSC;
        msr.host_initiated = true;
        kvm_write_tsc(vcpu, &msr);
        vcpu_put(vcpu);
+       mutex_unlock(&vcpu->mutex);
 
        if (!kvmclock_periodic_sync)
                return;
@@ -7903,11 +8004,9 @@ void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
 {
-       int r;
        vcpu->arch.apf.msr_val = 0;
 
-       r = vcpu_load(vcpu);
-       BUG_ON(r);
+       vcpu_load(vcpu);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
 
@@ -7919,6 +8018,7 @@ void kvm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
        vcpu->arch.hflags = 0;
 
        vcpu->arch.smi_pending = 0;
+       vcpu->arch.smi_count = 0;
        atomic_set(&vcpu->arch.nmi_queued, 0);
        vcpu->arch.nmi_pending = 0;
        vcpu->arch.nmi_injected = false;
@@ -8278,9 +8378,7 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
 
 static void kvm_unload_vcpu_mmu(struct kvm_vcpu *vcpu)
 {
-       int r;
-       r = vcpu_load(vcpu);
-       BUG_ON(r);
+       vcpu_load(vcpu);
        kvm_mmu_unload(vcpu);
        vcpu_put(vcpu);
 }