KVM: Use eoi to track RTC interrupt delivery status
[linux-2.6-block.git] / arch / x86 / kvm / x86.c
index f71500af1f813245bb12092665ac7dea3ba5f24f..ae9744d03c8329818b2018e95210bfa385144e11 100644 (file)
@@ -162,8 +162,6 @@ u64 __read_mostly host_xcr0;
 
 static int emulator_fix_hypercall(struct x86_emulate_ctxt *ctxt);
 
-static int kvm_vcpu_reset(struct kvm_vcpu *vcpu);
-
 static inline void kvm_async_pf_hash_reset(struct kvm_vcpu *vcpu)
 {
        int i;
@@ -263,6 +261,13 @@ void kvm_set_apic_base(struct kvm_vcpu *vcpu, u64 data)
 }
 EXPORT_SYMBOL_GPL(kvm_set_apic_base);
 
+asmlinkage void kvm_spurious_fault(void)
+{
+       /* Fault while not rebooting.  We want the trace. */
+       BUG();
+}
+EXPORT_SYMBOL_GPL(kvm_spurious_fault);
+
 #define EXCPT_BENIGN           0
 #define EXCPT_CONTRIBUTORY     1
 #define EXCPT_PF               2
@@ -1079,6 +1084,10 @@ static void kvm_set_tsc_khz(struct kvm_vcpu *vcpu, u32 this_tsc_khz)
        u32 thresh_lo, thresh_hi;
        int use_scaling = 0;
 
+       /* tsc_khz can be zero if TSC calibration fails */
+       if (this_tsc_khz == 0)
+               return;
+
        /* Compute a scale to convert nanoseconds in TSC cycles */
        kvm_get_time_scale(this_tsc_khz, NSEC_PER_SEC / 1000,
                           &vcpu->arch.virtual_tsc_shift,
@@ -1156,20 +1165,23 @@ void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr)
        ns = get_kernel_ns();
        elapsed = ns - kvm->arch.last_tsc_nsec;
 
-       /* n.b - signed multiplication and division required */
-       usdiff = data - kvm->arch.last_tsc_write;
+       if (vcpu->arch.virtual_tsc_khz) {
+               /* n.b - signed multiplication and division required */
+               usdiff = data - kvm->arch.last_tsc_write;
 #ifdef CONFIG_X86_64
-       usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
+               usdiff = (usdiff * 1000) / vcpu->arch.virtual_tsc_khz;
 #else
-       /* do_div() only does unsigned */
-       asm("idivl %2; xor %%edx, %%edx"
-           : "=A"(usdiff)
-           : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
+               /* do_div() only does unsigned */
+               asm("idivl %2; xor %%edx, %%edx"
+               : "=A"(usdiff)
+               : "A"(usdiff * 1000), "rm"(vcpu->arch.virtual_tsc_khz));
 #endif
-       do_div(elapsed, 1000);
-       usdiff -= elapsed;
-       if (usdiff < 0)
-               usdiff = -usdiff;
+               do_div(elapsed, 1000);
+               usdiff -= elapsed;
+               if (usdiff < 0)
+                       usdiff = -usdiff;
+       } else
+               usdiff = USEC_PER_SEC; /* disable TSC match window below */
 
        /*
         * Special case: TSC write with a small delta (1 second) of virtual
@@ -1406,25 +1418,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
        unsigned long flags, this_tsc_khz;
        struct kvm_vcpu_arch *vcpu = &v->arch;
        struct kvm_arch *ka = &v->kvm->arch;
-       void *shared_kaddr;
        s64 kernel_ns, max_kernel_ns;
        u64 tsc_timestamp, host_tsc;
-       struct pvclock_vcpu_time_info *guest_hv_clock;
+       struct pvclock_vcpu_time_info guest_hv_clock;
        u8 pvclock_flags;
        bool use_master_clock;
 
        kernel_ns = 0;
        host_tsc = 0;
 
-       /* Keep irq disabled to prevent changes to the clock */
-       local_irq_save(flags);
-       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
-       if (unlikely(this_tsc_khz == 0)) {
-               local_irq_restore(flags);
-               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
-               return 1;
-       }
-
        /*
         * If the host uses TSC clock, then passthrough TSC as stable
         * to the guest.
@@ -1436,6 +1438,15 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
                kernel_ns = ka->master_kernel_ns;
        }
        spin_unlock(&ka->pvclock_gtod_sync_lock);
+
+       /* Keep irq disabled to prevent changes to the clock */
+       local_irq_save(flags);
+       this_tsc_khz = __get_cpu_var(cpu_tsc_khz);
+       if (unlikely(this_tsc_khz == 0)) {
+               local_irq_restore(flags);
+               kvm_make_request(KVM_REQ_CLOCK_UPDATE, v);
+               return 1;
+       }
        if (!use_master_clock) {
                host_tsc = native_read_tsc();
                kernel_ns = get_kernel_ns();
@@ -1463,7 +1474,7 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 
        local_irq_restore(flags);
 
-       if (!vcpu->time_page)
+       if (!vcpu->pv_time_enabled)
                return 0;
 
        /*
@@ -1525,12 +1536,12 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
         */
        vcpu->hv_clock.version += 2;
 
-       shared_kaddr = kmap_atomic(vcpu->time_page);
-
-       guest_hv_clock = shared_kaddr + vcpu->time_offset;
+       if (unlikely(kvm_read_guest_cached(v->kvm, &vcpu->pv_time,
+               &guest_hv_clock, sizeof(guest_hv_clock))))
+               return 0;
 
        /* retain PVCLOCK_GUEST_STOPPED if set in guest copy */
-       pvclock_flags = (guest_hv_clock->flags & PVCLOCK_GUEST_STOPPED);
+       pvclock_flags = (guest_hv_clock.flags & PVCLOCK_GUEST_STOPPED);
 
        if (vcpu->pvclock_set_guest_stopped_request) {
                pvclock_flags |= PVCLOCK_GUEST_STOPPED;
@@ -1543,12 +1554,9 @@ static int kvm_guest_time_update(struct kvm_vcpu *v)
 
        vcpu->hv_clock.flags = pvclock_flags;
 
-       memcpy(shared_kaddr + vcpu->time_offset, &vcpu->hv_clock,
-              sizeof(vcpu->hv_clock));
-
-       kunmap_atomic(shared_kaddr);
-
-       mark_page_dirty(v->kvm, vcpu->time >> PAGE_SHIFT);
+       kvm_write_guest_cached(v->kvm, &vcpu->pv_time,
+                               &vcpu->hv_clock,
+                               sizeof(vcpu->hv_clock));
        return 0;
 }
 
@@ -1837,10 +1845,7 @@ static int kvm_pv_enable_async_pf(struct kvm_vcpu *vcpu, u64 data)
 
 static void kvmclock_reset(struct kvm_vcpu *vcpu)
 {
-       if (vcpu->arch.time_page) {
-               kvm_release_page_dirty(vcpu->arch.time_page);
-               vcpu->arch.time_page = NULL;
-       }
+       vcpu->arch.pv_time_enabled = false;
 }
 
 static void accumulate_steal_time(struct kvm_vcpu *vcpu)
@@ -1947,6 +1952,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                break;
        case MSR_KVM_SYSTEM_TIME_NEW:
        case MSR_KVM_SYSTEM_TIME: {
+               u64 gpa_offset;
                kvmclock_reset(vcpu);
 
                vcpu->arch.time = data;
@@ -1956,14 +1962,17 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (!(data & 1))
                        break;
 
-               /* ...but clean it before doing the actual write */
-               vcpu->arch.time_offset = data & ~(PAGE_MASK | 1);
+               gpa_offset = data & ~(PAGE_MASK | 1);
 
-               vcpu->arch.time_page =
-                               gfn_to_page(vcpu->kvm, data >> PAGE_SHIFT);
+               /* Check that the address is 32-byte aligned. */
+               if (gpa_offset & (sizeof(struct pvclock_vcpu_time_info) - 1))
+                       break;
 
-               if (is_error_page(vcpu->arch.time_page))
-                       vcpu->arch.time_page = NULL;
+               if (kvm_gfn_to_hva_cache_init(vcpu->kvm,
+                    &vcpu->arch.pv_time, data & ~1ULL))
+                       vcpu->arch.pv_time_enabled = false;
+               else
+                       vcpu->arch.pv_time_enabled = true;
 
                break;
        }
@@ -2038,7 +2047,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
        case MSR_P6_EVNTSEL0:
        case MSR_P6_EVNTSEL1:
                if (kvm_pmu_msr(vcpu, msr))
-                       return kvm_pmu_set_msr(vcpu, msr, data);
+                       return kvm_pmu_set_msr(vcpu, msr_info);
 
                if (pr || data != 0)
                        vcpu_unimpl(vcpu, "disabled perfctr wrmsr: "
@@ -2084,7 +2093,7 @@ int kvm_set_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
                if (msr && (msr == vcpu->kvm->arch.xen_hvm_config.msr))
                        return xen_hvm_config(vcpu, data);
                if (kvm_pmu_msr(vcpu, msr))
-                       return kvm_pmu_set_msr(vcpu, msr, data);
+                       return kvm_pmu_set_msr(vcpu, msr_info);
                if (!ignore_msrs) {
                        vcpu_unimpl(vcpu, "unhandled wrmsr: 0x%x data %llx\n",
                                    msr, data);
@@ -2700,7 +2709,7 @@ static int kvm_vcpu_ioctl_set_lapic(struct kvm_vcpu *vcpu,
 static int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu,
                                    struct kvm_interrupt *irq)
 {
-       if (irq->irq < 0 || irq->irq >= KVM_NR_INTERRUPTS)
+       if (irq->irq >= KVM_NR_INTERRUPTS)
                return -EINVAL;
        if (irqchip_in_kernel(vcpu->kvm))
                return -ENXIO;
@@ -2823,10 +2832,9 @@ static void kvm_vcpu_ioctl_x86_get_vcpu_events(struct kvm_vcpu *vcpu,
        events->nmi.masked = kvm_x86_ops->get_nmi_mask(vcpu);
        events->nmi.pad = 0;
 
-       events->sipi_vector = vcpu->arch.sipi_vector;
+       events->sipi_vector = 0; /* never valid when reporting to user space */
 
        events->flags = (KVM_VCPUEVENT_VALID_NMI_PENDING
-                        | KVM_VCPUEVENT_VALID_SIPI_VECTOR
                         | KVM_VCPUEVENT_VALID_SHADOW);
        memset(&events->reserved, 0, sizeof(events->reserved));
 }
@@ -2857,8 +2865,9 @@ static int kvm_vcpu_ioctl_x86_set_vcpu_events(struct kvm_vcpu *vcpu,
                vcpu->arch.nmi_pending = events->nmi.pending;
        kvm_x86_ops->set_nmi_mask(vcpu, events->nmi.masked);
 
-       if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR)
-               vcpu->arch.sipi_vector = events->sipi_vector;
+       if (events->flags & KVM_VCPUEVENT_VALID_SIPI_VECTOR &&
+           kvm_vcpu_has_lapic(vcpu))
+               vcpu->arch.apic->sipi_vector = events->sipi_vector;
 
        kvm_make_request(KVM_REQ_EVENT, vcpu);
 
@@ -2967,7 +2976,7 @@ static int kvm_vcpu_ioctl_x86_set_xcrs(struct kvm_vcpu *vcpu,
  */
 static int kvm_set_guest_paused(struct kvm_vcpu *vcpu)
 {
-       if (!vcpu->arch.time_page)
+       if (!vcpu->arch.pv_time_enabled)
                return -EINVAL;
        vcpu->arch.pvclock_set_guest_stopped_request = true;
        kvm_make_request(KVM_REQ_CLOCK_UPDATE, vcpu);
@@ -3482,13 +3491,15 @@ out:
        return r;
 }
 
-int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event)
+int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
+                       bool line_status)
 {
        if (!irqchip_in_kernel(kvm))
                return -ENXIO;
 
        irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
-                                       irq_event->irq, irq_event->level);
+                                       irq_event->irq, irq_event->level,
+                                       line_status);
        return 0;
 }
 
@@ -4756,11 +4767,15 @@ static int handle_emulation_failure(struct kvm_vcpu *vcpu)
 }
 
 static bool reexecute_instruction(struct kvm_vcpu *vcpu, gva_t cr2,
-                                 bool write_fault_to_shadow_pgtable)
+                                 bool write_fault_to_shadow_pgtable,
+                                 int emulation_type)
 {
        gpa_t gpa = cr2;
        pfn_t pfn;
 
+       if (emulation_type & EMULTYPE_NO_REEXECUTE)
+               return false;
+
        if (!vcpu->arch.mmu.direct_map) {
                /*
                 * Write permission should be allowed since only
@@ -4903,8 +4918,8 @@ int x86_emulate_instruction(struct kvm_vcpu *vcpu,
                if (r != EMULATION_OK)  {
                        if (emulation_type & EMULTYPE_TRAP_UD)
                                return EMULATE_FAIL;
-                       if (reexecute_instruction(vcpu, cr2,
-                                                 write_fault_to_spt))
+                       if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+                                               emulation_type))
                                return EMULATE_DONE;
                        if (emulation_type & EMULTYPE_SKIP)
                                return EMULATE_FAIL;
@@ -4934,7 +4949,8 @@ restart:
                return EMULATE_DONE;
 
        if (r == EMULATION_FAILED) {
-               if (reexecute_instruction(vcpu, cr2, write_fault_to_spt))
+               if (reexecute_instruction(vcpu, cr2, write_fault_to_spt,
+                                       emulation_type))
                        return EMULATE_DONE;
 
                return handle_emulation_failure(vcpu);
@@ -5713,6 +5729,12 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        }
 
        if (kvm_check_request(KVM_REQ_EVENT, vcpu) || req_int_win) {
+               kvm_apic_accept_events(vcpu);
+               if (vcpu->arch.mp_state == KVM_MP_STATE_INIT_RECEIVED) {
+                       r = 1;
+                       goto out;
+               }
+
                inject_pending_event(vcpu);
 
                /* enable NMI/IRQ window open exits if needed */
@@ -5847,16 +5869,6 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
        int r;
        struct kvm *kvm = vcpu->kvm;
 
-       if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED)) {
-               pr_debug("vcpu %d received sipi with vector # %x\n",
-                        vcpu->vcpu_id, vcpu->arch.sipi_vector);
-               kvm_lapic_reset(vcpu);
-               r = kvm_vcpu_reset(vcpu);
-               if (r)
-                       return r;
-               vcpu->arch.mp_state = KVM_MP_STATE_RUNNABLE;
-       }
-
        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
        r = vapic_enter(vcpu);
        if (r) {
@@ -5873,8 +5885,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                        srcu_read_unlock(&kvm->srcu, vcpu->srcu_idx);
                        kvm_vcpu_block(vcpu);
                        vcpu->srcu_idx = srcu_read_lock(&kvm->srcu);
-                       if (kvm_check_request(KVM_REQ_UNHALT, vcpu))
-                       {
+                       if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
+                               kvm_apic_accept_events(vcpu);
                                switch(vcpu->arch.mp_state) {
                                case KVM_MP_STATE_HALTED:
                                        vcpu->arch.mp_state =
@@ -5882,7 +5894,8 @@ static int __vcpu_run(struct kvm_vcpu *vcpu)
                                case KVM_MP_STATE_RUNNABLE:
                                        vcpu->arch.apf.halted = false;
                                        break;
-                               case KVM_MP_STATE_SIPI_RECEIVED:
+                               case KVM_MP_STATE_INIT_RECEIVED:
+                                       break;
                                default:
                                        r = -EINTR;
                                        break;
@@ -6017,6 +6030,7 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
 
        if (unlikely(vcpu->arch.mp_state == KVM_MP_STATE_UNINITIALIZED)) {
                kvm_vcpu_block(vcpu);
+               kvm_apic_accept_events(vcpu);
                clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
                r = -EAGAIN;
                goto out;
@@ -6173,6 +6187,7 @@ int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
+       kvm_apic_accept_events(vcpu);
        mp_state->mp_state = vcpu->arch.mp_state;
        return 0;
 }
@@ -6180,7 +6195,15 @@ int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
                                    struct kvm_mp_state *mp_state)
 {
-       vcpu->arch.mp_state = mp_state->mp_state;
+       if (!kvm_vcpu_has_lapic(vcpu) &&
+           mp_state->mp_state != KVM_MP_STATE_RUNNABLE)
+               return -EINVAL;
+
+       if (mp_state->mp_state == KVM_MP_STATE_SIPI_RECEIVED) {
+               vcpu->arch.mp_state = KVM_MP_STATE_INIT_RECEIVED;
+               set_bit(KVM_APIC_SIPI, &vcpu->arch.apic->pending_events);
+       } else
+               vcpu->arch.mp_state = mp_state->mp_state;
        kvm_make_request(KVM_REQ_EVENT, vcpu);
        return 0;
 }
@@ -6479,9 +6502,8 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
        r = vcpu_load(vcpu);
        if (r)
                return r;
-       r = kvm_vcpu_reset(vcpu);
-       if (r == 0)
-               r = kvm_mmu_setup(vcpu);
+       kvm_vcpu_reset(vcpu);
+       r = kvm_mmu_setup(vcpu);
        vcpu_put(vcpu);
 
        return r;
@@ -6518,7 +6540,7 @@ void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
        kvm_x86_ops->vcpu_free(vcpu);
 }
 
-static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
+void kvm_vcpu_reset(struct kvm_vcpu *vcpu)
 {
        atomic_set(&vcpu->arch.nmi_queued, 0);
        vcpu->arch.nmi_pending = 0;
@@ -6545,7 +6567,18 @@ static int kvm_vcpu_reset(struct kvm_vcpu *vcpu)
        vcpu->arch.regs_avail = ~0;
        vcpu->arch.regs_dirty = ~0;
 
-       return kvm_x86_ops->vcpu_reset(vcpu);
+       kvm_x86_ops->vcpu_reset(vcpu);
+}
+
+void kvm_vcpu_deliver_sipi_vector(struct kvm_vcpu *vcpu, unsigned int vector)
+{
+       struct kvm_segment cs;
+
+       kvm_get_segment(vcpu, &cs, VCPU_SREG_CS);
+       cs.selector = vector << 8;
+       cs.base = vector << 12;
+       kvm_set_segment(vcpu, &cs, VCPU_SREG_CS);
+       kvm_rip_write(vcpu, 0);
 }
 
 int kvm_arch_hardware_enable(void *garbage)
@@ -6718,6 +6751,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
                goto fail_free_wbinvd_dirty_mask;
 
        vcpu->arch.ia32_tsc_adjust_msr = 0x0;
+       vcpu->arch.pv_time_enabled = false;
        kvm_async_pf_hash_reset(vcpu);
        kvm_pmu_init(vcpu);
 
@@ -6906,24 +6940,21 @@ out_free:
 
 int kvm_arch_prepare_memory_region(struct kvm *kvm,
                                struct kvm_memory_slot *memslot,
-                               struct kvm_memory_slot old,
                                struct kvm_userspace_memory_region *mem,
-                               bool user_alloc)
+                               enum kvm_mr_change change)
 {
-       int npages = memslot->npages;
-
        /*
         * Only private memory slots need to be mapped here since
         * KVM_SET_MEMORY_REGION ioctl is no longer supported.
         */
-       if ((memslot->id >= KVM_USER_MEM_SLOTS) && npages && !old.npages) {
+       if ((memslot->id >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_CREATE)) {
                unsigned long userspace_addr;
 
                /*
                 * MAP_SHARED to prevent internal slot pages from being moved
                 * by fork()/COW.
                 */
-               userspace_addr = vm_mmap(NULL, 0, npages * PAGE_SIZE,
+               userspace_addr = vm_mmap(NULL, 0, memslot->npages * PAGE_SIZE,
                                         PROT_READ | PROT_WRITE,
                                         MAP_SHARED | MAP_ANONYMOUS, 0);
 
@@ -6938,17 +6969,17 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm,
 
 void kvm_arch_commit_memory_region(struct kvm *kvm,
                                struct kvm_userspace_memory_region *mem,
-                               struct kvm_memory_slot old,
-                               bool user_alloc)
+                               const struct kvm_memory_slot *old,
+                               enum kvm_mr_change change)
 {
 
-       int nr_mmu_pages = 0, npages = mem->memory_size >> PAGE_SHIFT;
+       int nr_mmu_pages = 0;
 
-       if ((mem->slot >= KVM_USER_MEM_SLOTS) && old.npages && !npages) {
+       if ((mem->slot >= KVM_USER_MEM_SLOTS) && (change == KVM_MR_DELETE)) {
                int ret;
 
-               ret = vm_munmap(old.userspace_addr,
-                               old.npages * PAGE_SIZE);
+               ret = vm_munmap(old->userspace_addr,
+                               old->npages * PAGE_SIZE);
                if (ret < 0)
                        printk(KERN_WARNING
                               "kvm_vm_ioctl_set_memory_region: "
@@ -6965,14 +6996,14 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
         * Existing largepage mappings are destroyed here and new ones will
         * not be created until the end of the logging.
         */
-       if (npages && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
+       if ((change != KVM_MR_DELETE) && (mem->flags & KVM_MEM_LOG_DIRTY_PAGES))
                kvm_mmu_slot_remove_write_access(kvm, mem->slot);
        /*
         * If memory slot is created, or moved, we need to clear all
         * mmio sptes.
         */
-       if (npages && old.base_gfn != mem->guest_phys_addr >> PAGE_SHIFT) {
-               kvm_mmu_zap_all(kvm);
+       if ((change == KVM_MR_CREATE) || (change == KVM_MR_MOVE)) {
+               kvm_mmu_zap_mmio_sptes(kvm);
                kvm_reload_remote_mmus(kvm);
        }
 }
@@ -6994,7 +7025,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
        return (vcpu->arch.mp_state == KVM_MP_STATE_RUNNABLE &&
                !vcpu->arch.apf.halted)
                || !list_empty_careful(&vcpu->async_pf.done)
-               || vcpu->arch.mp_state == KVM_MP_STATE_SIPI_RECEIVED
+               || kvm_apic_has_events(vcpu)
                || atomic_read(&vcpu->arch.nmi_queued) ||
                (kvm_arch_interrupt_allowed(vcpu) &&
                 kvm_cpu_has_interrupt(vcpu));