KVM: X86: implement the logic for spinlock optimization
authorLongpeng(Mike) <longpeng2@huawei.com>
Tue, 8 Aug 2017 04:05:33 +0000 (12:05 +0800)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 8 Aug 2017 08:57:43 +0000 (10:57 +0200)
get_cpl requires vcpu_load, so we must cache the result (whether the
vcpu was preempted when its cpl=0) in kvm_vcpu_arch.

Signed-off-by: Longpeng(Mike) <longpeng2@huawei.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/hyperv.c
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c
arch/x86/kvm/x86.c

index 87ac4fba6d8e12f07e8a9f191bdb028a1c3e6234..1679aabcabe5011ebbefc8c74013378c56b1b27b 100644 (file)
@@ -688,6 +688,9 @@ struct kvm_vcpu_arch {
 
        /* GPA available (AMD only) */
        bool gpa_available;
+
+       /* be preempted when it's in kernel-mode(cpl=0) */
+       bool preempted_in_kernel;
 };
 
 struct kvm_lpage_info {
index 5243d54f73abaeeda43db011a1e5dbb498431809..dc97f2544b6f8e840156725be640477cff3b6426 100644 (file)
@@ -1274,7 +1274,7 @@ int kvm_hv_hypercall(struct kvm_vcpu *vcpu)
 
        switch (code) {
        case HVCALL_NOTIFY_LONG_SPIN_WAIT:
-               kvm_vcpu_on_spin(vcpu, false);
+               kvm_vcpu_on_spin(vcpu, true);
                break;
        case HVCALL_POST_MESSAGE:
        case HVCALL_SIGNAL_EVENT:
index 0cc486fd98713626dea2e993832b41b1bcc34663..1fa9ee5660f45a0d8d08b6eeebf0a12e3bdb3375 100644 (file)
@@ -3749,7 +3749,10 @@ static int interrupt_window_interception(struct vcpu_svm *svm)
 
 static int pause_interception(struct vcpu_svm *svm)
 {
-       kvm_vcpu_on_spin(&svm->vcpu, false);
+       struct kvm_vcpu *vcpu = &svm->vcpu;
+       bool in_kernel = (svm_get_cpl(vcpu) == 0);
+
+       kvm_vcpu_on_spin(vcpu, in_kernel);
        return 1;
 }
 
index fef784c22190bd61ef422db58f77a0b627dde85d..46d08b389e36c26b6708f5494fcdab2826814609 100644 (file)
@@ -6781,7 +6781,13 @@ static int handle_pause(struct kvm_vcpu *vcpu)
        if (ple_gap)
                grow_ple_window(vcpu);
 
-       kvm_vcpu_on_spin(vcpu, false);
+       /*
+        * Intel sdm vol3 ch-25.1.3 says: The "PAUSE-loop exiting"
+        * VM-execution control is ignored if CPL > 0. OTOH, KVM
+        * never set PAUSE_EXITING and just set PLE if supported,
+        * so the vcpu must be CPL=0 if it gets a PAUSE exit.
+        */
+       kvm_vcpu_on_spin(vcpu, true);
        return kvm_skip_emulated_instruction(vcpu);
 }
 
index 6125e1743b69580c1e244653fcf805379905d6b0..69b72c9e1f12711ec80ce0252c9c5b4fadef1012 100644 (file)
@@ -2873,6 +2873,10 @@ static void kvm_steal_time_set_preempted(struct kvm_vcpu *vcpu)
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
        int idx;
+
+       if (vcpu->preempted)
+               vcpu->arch.preempted_in_kernel = !kvm_x86_ops->get_cpl(vcpu);
+
        /*
         * Disable page faults because we're in atomic context here.
         * kvm_write_guest_offset_cached() would call might_fault()
@@ -7985,6 +7989,7 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
        kvm_pmu_init(vcpu);
 
        vcpu->arch.pending_external_vector = -1;
+       vcpu->arch.preempted_in_kernel = false;
 
        kvm_hv_vcpu_init(vcpu);
 
@@ -8434,7 +8439,7 @@ int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
 
 bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu)
 {
-       return false;
+       return vcpu->arch.preempted_in_kernel;
 }
 
 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)