KVM: x86: Trace changes to active TSC offset regardless if vCPU in guest-mode
authorPaolo Bonzini <pbonzini@redhat.com>
Sun, 25 Nov 2018 17:45:35 +0000 (18:45 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Tue, 27 Nov 2018 11:53:43 +0000 (12:53 +0100)
For some reason, kvm_x86_ops->write_l1_tsc_offset() skipped trace
of change to active TSC offset in case vCPU is in guest-mode.
This patch changes write_l1_tsc_offset() behavior to trace any change
to active TSC offset to aid debugging.  The VMX code is changed to
look more similar to SVM, which is in my opinion nicer.

Based on a patch by Liran Alon.

Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/kvm/svm.c
arch/x86/kvm/vmx.c

index cc6467b35a85f6cec9300011cfa0c464574ed5d3..d7fec9f4af43c997dbe4802d4edb75e282bbf942 100644 (file)
@@ -1456,10 +1456,11 @@ static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
                g_tsc_offset = svm->vmcb->control.tsc_offset -
                               svm->nested.hsave->control.tsc_offset;
                svm->nested.hsave->control.tsc_offset = offset;
-       } else
-               trace_kvm_write_tsc_offset(vcpu->vcpu_id,
-                                          svm->vmcb->control.tsc_offset,
-                                          offset);
+       }
+
+       trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+                                  svm->vmcb->control.tsc_offset - g_tsc_offset,
+                                  offset);
 
        svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
 
index 02edd9960e9d94cf8cbac80ea1bfccc5673f3089..9e2438e3c6467662d0e4d165b3bba4a02559dcea 100644 (file)
@@ -3466,24 +3466,24 @@ static u64 vmx_read_l1_tsc_offset(struct kvm_vcpu *vcpu)
 
 static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
-       u64 active_offset = offset;
-       if (is_guest_mode(vcpu)) {
-               /*
-                * We're here if L1 chose not to trap WRMSR to TSC. According
-                * to the spec, this should set L1's TSC; The offset that L1
-                * set for L2 remains unchanged, and still needs to be added
-                * to the newly set TSC to get L2's TSC.
-                */
-               struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-               if (nested_cpu_has(vmcs12, CPU_BASED_USE_TSC_OFFSETING))
-                       active_offset += vmcs12->tsc_offset;
-       } else {
-               trace_kvm_write_tsc_offset(vcpu->vcpu_id,
-                                          vmcs_read64(TSC_OFFSET), offset);
-       }
+       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
+       u64 g_tsc_offset = 0;
+
+       /*
+        * We're here if L1 chose not to trap WRMSR to TSC. According
+        * to the spec, this should set L1's TSC; The offset that L1
+        * set for L2 remains unchanged, and still needs to be added
+        * to the newly set TSC to get L2's TSC.
+        */
+       if (is_guest_mode(vcpu) &&
+           (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETING))
+               g_tsc_offset = vmcs12->tsc_offset;
 
-       vmcs_write64(TSC_OFFSET, active_offset);
-       return active_offset;
+       trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+                                  vcpu->arch.tsc_offset - g_tsc_offset,
+                                  offset);
+       vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
+       return offset + g_tsc_offset;
 }
 
 /*