KVM: X86: Introduce more exit_fastpath_completion enum values
authorWanpeng Li <wanpengli@tencent.com>
Tue, 28 Apr 2020 06:23:25 +0000 (14:23 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Fri, 15 May 2020 16:26:19 +0000 (12:26 -0400)
Adds a fastpath_t typedef since enum lines are a bit long, and replace
EXIT_FASTPATH_SKIP_EMUL_INS with two new exit_fastpath_completion enum values.

- EXIT_FASTPATH_EXIT_HANDLED  kvm will still go through it's full run loop,
                              but it would skip invoking the exit handler.

- EXIT_FASTPATH_REENTER_GUEST complete fastpath, guest can be re-entered
                              without invoking the exit handler or going
                              back to vcpu_run

Tested-by: Haiwei Li <lihaiwei@tencent.com>
Cc: Haiwei Li <lihaiwei@tencent.com>
Signed-off-by: Wanpeng Li <wanpengli@tencent.com>
Message-Id: <1588055009-12677-4-git-send-email-wanpengli@tencent.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c
arch/x86/kvm/x86.h

index 04f44961858b275b8789745cc5066f1eb828dd94..c3906fb2b93f7eeb625aefe17e20f9c9012a50b3 100644 (file)
@@ -182,8 +182,10 @@ enum {
 
 enum exit_fastpath_completion {
        EXIT_FASTPATH_NONE,
-       EXIT_FASTPATH_SKIP_EMUL_INS,
+       EXIT_FASTPATH_REENTER_GUEST,
+       EXIT_FASTPATH_EXIT_HANDLED,
 };
+typedef enum exit_fastpath_completion fastpath_t;
 
 struct x86_emulate_ctxt;
 struct x86_exception;
index 4c808cc059f1056af2949dce14d615b5f70bb763..29e7f7bc284d36431e11d8dce9b140bf72cc0dea 100644 (file)
@@ -2893,8 +2893,7 @@ static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
        *info2 = control->exit_info_2;
 }
 
-static int handle_exit(struct kvm_vcpu *vcpu,
-       enum exit_fastpath_completion exit_fastpath)
+static int handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
        struct kvm_run *kvm_run = vcpu->run;
@@ -2952,10 +2951,10 @@ static int handle_exit(struct kvm_vcpu *vcpu,
                       __func__, svm->vmcb->control.exit_int_info,
                       exit_code);
 
-       if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
-               kvm_skip_emulated_instruction(vcpu);
+       if (exit_fastpath != EXIT_FASTPATH_NONE)
                return 1;
-       } else if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
+
+       if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
            || !svm_exit_handlers[exit_code]) {
                vcpu_unimpl(vcpu, "svm: unexpected exit reason 0x%x\n", exit_code);
                dump_vmcb(vcpu);
@@ -3324,7 +3323,7 @@ static void svm_cancel_injection(struct kvm_vcpu *vcpu)
        svm_complete_interrupts(svm);
 }
 
-static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+static fastpath_t svm_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 {
        if (!is_guest_mode(vcpu) &&
            to_svm(vcpu)->vmcb->control.exit_code == SVM_EXIT_MSR &&
@@ -3336,9 +3335,9 @@ static enum exit_fastpath_completion svm_exit_handlers_fastpath(struct kvm_vcpu
 
 void __svm_vcpu_run(unsigned long vmcb_pa, unsigned long *regs);
 
-static enum exit_fastpath_completion svm_vcpu_run(struct kvm_vcpu *vcpu)
+static fastpath_t svm_vcpu_run(struct kvm_vcpu *vcpu)
 {
-       enum exit_fastpath_completion exit_fastpath;
+       fastpath_t exit_fastpath;
        struct vcpu_svm *svm = to_svm(vcpu);
 
        svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
index d883fbb63566f94e67dffea6cd3f23cdd7b6120c..c7730d3aa7065d7cde190fe2701c269fc76dc47a 100644 (file)
@@ -5926,8 +5926,7 @@ void dump_vmcs(void)
  * The guest has exited.  See if we can fix it or if we need userspace
  * assistance.
  */
-static int vmx_handle_exit(struct kvm_vcpu *vcpu,
-       enum exit_fastpath_completion exit_fastpath)
+static int vmx_handle_exit(struct kvm_vcpu *vcpu, fastpath_t exit_fastpath)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        u32 exit_reason = vmx->exit_reason;
@@ -6034,10 +6033,8 @@ static int vmx_handle_exit(struct kvm_vcpu *vcpu,
                }
        }
 
-       if (exit_fastpath == EXIT_FASTPATH_SKIP_EMUL_INS) {
-               kvm_skip_emulated_instruction(vcpu);
+       if (exit_fastpath != EXIT_FASTPATH_NONE)
                return 1;
-       }
 
        if (exit_reason >= kvm_vmx_max_exit_handlers)
                goto unexpected_vmexit;
@@ -6628,7 +6625,7 @@ void vmx_update_host_rsp(struct vcpu_vmx *vmx, unsigned long host_rsp)
        }
 }
 
-static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu)
 {
        switch (to_vmx(vcpu)->exit_reason) {
        case EXIT_REASON_MSR_WRITE:
@@ -6640,12 +6637,13 @@ static enum exit_fastpath_completion vmx_exit_handlers_fastpath(struct kvm_vcpu
 
 bool __vmx_vcpu_run(struct vcpu_vmx *vmx, unsigned long *regs, bool launched);
 
-static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
+static fastpath_t vmx_vcpu_run(struct kvm_vcpu *vcpu)
 {
-       enum exit_fastpath_completion exit_fastpath;
+       fastpath_t exit_fastpath;
        struct vcpu_vmx *vmx = to_vmx(vcpu);
        unsigned long cr3, cr4;
 
+reenter_guest:
        /* Record the guest's net vcpu time for enforced NMI injections. */
        if (unlikely(!enable_vnmi &&
                     vmx->loaded_vmcs->soft_vnmi_blocked))
@@ -6807,6 +6805,18 @@ static enum exit_fastpath_completion vmx_vcpu_run(struct kvm_vcpu *vcpu)
                return EXIT_FASTPATH_NONE;
 
        exit_fastpath = vmx_exit_handlers_fastpath(vcpu);
+       if (exit_fastpath == EXIT_FASTPATH_REENTER_GUEST) {
+               if (!kvm_vcpu_exit_request(vcpu)) {
+                       /*
+                        * FIXME: this goto should be a loop in vcpu_enter_guest,
+                        * but it would incur the cost of a retpoline for now.
+                        * Revisit once static calls are available.
+                        */
+                       goto reenter_guest;
+               }
+               exit_fastpath = EXIT_FASTPATH_EXIT_HANDLED;
+       }
+
        return exit_fastpath;
 }
 
index 29a41aa98929de0eb9bfd126c1cb31e505a35dd7..71749fcb229e259845efbfd6aa8a883cc684cd2e 100644 (file)
@@ -1608,27 +1608,28 @@ static int handle_fastpath_set_x2apic_icr_irqoff(struct kvm_vcpu *vcpu, u64 data
        return 1;
 }
 
-enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
+fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu)
 {
        u32 msr = kvm_rcx_read(vcpu);
        u64 data;
-       int ret = 0;
+       fastpath_t ret = EXIT_FASTPATH_NONE;
 
        switch (msr) {
        case APIC_BASE_MSR + (APIC_ICR >> 4):
                data = kvm_read_edx_eax(vcpu);
-               ret = handle_fastpath_set_x2apic_icr_irqoff(vcpu, data);
+               if (!handle_fastpath_set_x2apic_icr_irqoff(vcpu, data)) {
+                       kvm_skip_emulated_instruction(vcpu);
+                       ret = EXIT_FASTPATH_EXIT_HANDLED;
+               }
                break;
        default:
-               return EXIT_FASTPATH_NONE;
+               break;
        }
 
-       if (!ret) {
+       if (ret != EXIT_FASTPATH_NONE)
                trace_kvm_msr_write(msr, data);
-               return EXIT_FASTPATH_SKIP_EMUL_INS;
-       }
 
-       return EXIT_FASTPATH_NONE;
+       return ret;
 }
 EXPORT_SYMBOL_GPL(handle_fastpath_set_msr_irqoff);
 
@@ -8205,7 +8206,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
        bool req_int_win =
                dm_request_for_irq_injection(vcpu) &&
                kvm_cpu_accept_dm_intr(vcpu);
-       enum exit_fastpath_completion exit_fastpath;
+       fastpath_t exit_fastpath;
 
        bool req_immediate_exit = false;
 
index e02fe28254b6ad8d1dac115fa191fda284df7b58..6eb62e97e59faab3a81c901b3e2a6a0ed50947fa 100644 (file)
@@ -274,7 +274,7 @@ bool kvm_mtrr_check_gfn_range_consistency(struct kvm_vcpu *vcpu, gfn_t gfn,
 bool kvm_vector_hashing_enabled(void);
 int x86_emulate_instruction(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
                            int emulation_type, void *insn, int insn_len);
-enum exit_fastpath_completion handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
+fastpath_t handle_fastpath_set_msr_irqoff(struct kvm_vcpu *vcpu);
 
 extern u64 host_xcr0;
 extern u64 supported_xcr0;