Merge branch 'kvm-async-pf-int' into HEAD
[linux-block.git] / arch / x86 / kernel / kvm.c
index 3a0115e8d880617becad69ba5312152b1a5d3c5c..681bc4090e91c58099c4f9fed749c6eb5627ef2c 100644 (file)
@@ -9,6 +9,7 @@
 
 #include <linux/context_tracking.h>
 #include <linux/init.h>
+#include <linux/irq.h>
 #include <linux/kernel.h>
 #include <linux/kvm_para.h>
 #include <linux/cpu.h>
@@ -217,7 +218,7 @@ again:
 }
 EXPORT_SYMBOL_GPL(kvm_async_pf_task_wake);
 
-u32 kvm_read_and_reset_apf_flags(void)
+noinstr u32 kvm_read_and_reset_apf_flags(void)
 {
        u32 flags = 0;
 
@@ -229,15 +230,18 @@ u32 kvm_read_and_reset_apf_flags(void)
        return flags;
 }
 EXPORT_SYMBOL_GPL(kvm_read_and_reset_apf_flags);
-NOKPROBE_SYMBOL(kvm_read_and_reset_apf_flags);
 
-bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
+noinstr bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
 {
        u32 flags = kvm_read_and_reset_apf_flags();
+       bool rcu_exit;
 
        if (!flags)
                return false;
 
+       rcu_exit = idtentry_enter_cond_rcu(regs);
+       instrumentation_begin();
+
        /*
         * If the host managed to inject an async #PF into an interrupt
         * disabled region, then die hard as this is not going to end well
@@ -251,32 +255,34 @@ bool __kvm_handle_async_pf(struct pt_regs *regs, u32 token)
                        panic("Host injected async #PF in kernel mode\n");
                /* Page is swapped out by the host. */
                kvm_async_pf_task_wait_schedule(token);
-               return true;
+       } else {
+               WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
        }
 
-       WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
+       instrumentation_end();
+       idtentry_exit_cond_rcu(regs, rcu_exit);
        return true;
 }
-NOKPROBE_SYMBOL(__kvm_handle_async_pf);
 
-__visible void __irq_entry kvm_async_pf_intr(struct pt_regs *regs)
+DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_asyncpf_interrupt)
 {
+       struct pt_regs *old_regs = set_irq_regs(regs);
        u32 token;
+       bool rcu_exit;
 
-       entering_ack_irq();
+       rcu_exit = idtentry_enter_cond_rcu(regs);
 
        inc_irq_stat(irq_hv_callback_count);
 
        if (__this_cpu_read(apf_reason.enabled)) {
                token = __this_cpu_read(apf_reason.token);
-               rcu_irq_enter();
                kvm_async_pf_task_wake(token);
-               rcu_irq_exit();
                __this_cpu_write(apf_reason.token, 0);
                wrmsrl(MSR_KVM_ASYNC_PF_ACK, 1);
        }
 
-       exiting_irq();
+       idtentry_exit_cond_rcu(regs, rcu_exit);
+       set_irq_regs(old_regs);
 }
 
 static void __init paravirt_ops_setup(void)
@@ -661,7 +667,7 @@ static void __init kvm_guest_init(void)
 
        if (kvm_para_has_feature(KVM_FEATURE_ASYNC_PF_INT) && kvmapf) {
                static_branch_enable(&kvm_async_pf_enabled);
-               alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, kvm_async_pf_vector);
+               alloc_intr_gate(HYPERVISOR_CALLBACK_VECTOR, asm_sysvec_kvm_asyncpf_interrupt);
        }
 
 #ifdef CONFIG_SMP