KVM: X86: Move write_l1_tsc_offset() logic to common code and rename it
authorIlias Stamatis <ilstam@amazon.com>
Wed, 26 May 2021 18:44:15 +0000 (19:44 +0100)
committerPaolo Bonzini <pbonzini@redhat.com>
Thu, 17 Jun 2021 17:09:29 +0000 (13:09 -0400)
The write_l1_tsc_offset() callback has a misleading name. It does not
set L1's TSC offset, it rather updates the current TSC offset which
might be different if a nested guest is executing. Additionally, both
the vmx and svm implementations use the same logic for calculating the
current TSC before writing it to hardware.

Rename the function and move the common logic to the caller. The vmx/svm
specific code now merely sets the given offset to the corresponding
hardware structure.

Signed-off-by: Ilias Stamatis <ilstam@amazon.com>
Reviewed-by: Maxim Levitsky <mlevitsk@redhat.com>
Message-Id: <20210526184418.28881-9-ilstam@amazon.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/x86.c

index c4906f73603d90845dce32c455103714d05af869..026ca50ef73eeb20ba96325caf7d3f29805ba6da 100644 (file)
@@ -89,7 +89,7 @@ KVM_X86_OP(load_mmu_pgd)
 KVM_X86_OP_NULL(has_wbinvd_exit)
 KVM_X86_OP(get_l2_tsc_offset)
 KVM_X86_OP(get_l2_tsc_multiplier)
-KVM_X86_OP(write_l1_tsc_offset)
+KVM_X86_OP(write_tsc_offset)
 KVM_X86_OP(get_exit_info)
 KVM_X86_OP(check_intercept)
 KVM_X86_OP(handle_exit_irqoff)
index 14546c30bc6357f89aa1d9c74ec2bd1c116d7fc9..08773980393d4680bc3b3ca4d40efaa57b2bd4fe 100644 (file)
@@ -1313,8 +1313,7 @@ struct kvm_x86_ops {
 
        u64 (*get_l2_tsc_offset)(struct kvm_vcpu *vcpu);
        u64 (*get_l2_tsc_multiplier)(struct kvm_vcpu *vcpu);
-       /* Returns actual tsc_offset set in active VMCS */
-       u64 (*write_l1_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
+       void (*write_tsc_offset)(struct kvm_vcpu *vcpu, u64 offset);
 
        /*
         * Retrieve somewhat arbitrary exit information.  Intended to be used
index 95ae2734760ebed5ffd1c84e524154ee50f9beaf..623f3c4b795a47f24371829c3f7dc712e306393a 100644 (file)
@@ -1092,26 +1092,13 @@ static u64 svm_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
        return kvm_default_tsc_scaling_ratio;
 }
 
-static u64 svm_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
        struct vcpu_svm *svm = to_svm(vcpu);
-       u64 g_tsc_offset = 0;
-
-       if (is_guest_mode(vcpu)) {
-               /* Write L1's TSC offset.  */
-               g_tsc_offset = svm->vmcb->control.tsc_offset -
-                              svm->vmcb01.ptr->control.tsc_offset;
-               svm->vmcb01.ptr->control.tsc_offset = offset;
-       }
-
-       trace_kvm_write_tsc_offset(vcpu->vcpu_id,
-                                  svm->vmcb->control.tsc_offset - g_tsc_offset,
-                                  offset);
-
-       svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
 
+       svm->vmcb01.ptr->control.tsc_offset = vcpu->arch.l1_tsc_offset;
+       svm->vmcb->control.tsc_offset = offset;
        vmcb_mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
-       return svm->vmcb->control.tsc_offset;
 }
 
 /* Evaluate instruction intercepts that depend on guest CPUID features. */
@@ -4538,7 +4525,7 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
 
        .get_l2_tsc_offset = svm_get_l2_tsc_offset,
        .get_l2_tsc_multiplier = svm_get_l2_tsc_multiplier,
-       .write_l1_tsc_offset = svm_write_l1_tsc_offset,
+       .write_tsc_offset = svm_write_tsc_offset,
 
        .load_mmu_pgd = svm_load_mmu_pgd,
 
index 2ce2c73645bf9bcaa235ad5e5972ca21ddd719ea..54d08bebf9c62cab168887f6f92f0984a83b9467 100644 (file)
@@ -1808,26 +1808,9 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu)
        return kvm_default_tsc_scaling_ratio;
 }
 
-static u64 vmx_write_l1_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void vmx_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
 {
-       struct vmcs12 *vmcs12 = get_vmcs12(vcpu);
-       u64 g_tsc_offset = 0;
-
-       /*
-        * We're here if L1 chose not to trap WRMSR to TSC. According
-        * to the spec, this should set L1's TSC; The offset that L1
-        * set for L2 remains unchanged, and still needs to be added
-        * to the newly set TSC to get L2's TSC.
-        */
-       if (is_guest_mode(vcpu) &&
-           (vmcs12->cpu_based_vm_exec_control & CPU_BASED_USE_TSC_OFFSETTING))
-               g_tsc_offset = vmcs12->tsc_offset;
-
-       trace_kvm_write_tsc_offset(vcpu->vcpu_id,
-                                  vcpu->arch.tsc_offset - g_tsc_offset,
-                                  offset);
-       vmcs_write64(TSC_OFFSET, offset + g_tsc_offset);
-       return offset + g_tsc_offset;
+       vmcs_write64(TSC_OFFSET, offset);
 }
 
 /*
@@ -7723,7 +7706,7 @@ static struct kvm_x86_ops vmx_x86_ops __initdata = {
 
        .get_l2_tsc_offset = vmx_get_l2_tsc_offset,
        .get_l2_tsc_multiplier = vmx_get_l2_tsc_multiplier,
-       .write_l1_tsc_offset = vmx_write_l1_tsc_offset,
+       .write_tsc_offset = vmx_write_tsc_offset,
 
        .load_mmu_pgd = vmx_load_mmu_pgd,
 
index 61024ee9e85f4d843a4ff0cec83a9cc5cb14353b..b42f6c8674e6345acedf0a26288333016cc9cb21 100644 (file)
@@ -2359,10 +2359,28 @@ u64 kvm_calc_nested_tsc_multiplier(u64 l1_multiplier, u64 l2_multiplier)
 }
 EXPORT_SYMBOL_GPL(kvm_calc_nested_tsc_multiplier);
 
-static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
+static void kvm_vcpu_write_tsc_offset(struct kvm_vcpu *vcpu, u64 l1_offset)
 {
-       vcpu->arch.l1_tsc_offset = offset;
-       vcpu->arch.tsc_offset = static_call(kvm_x86_write_l1_tsc_offset)(vcpu, offset);
+       trace_kvm_write_tsc_offset(vcpu->vcpu_id,
+                                  vcpu->arch.l1_tsc_offset,
+                                  l1_offset);
+
+       vcpu->arch.l1_tsc_offset = l1_offset;
+
+       /*
+        * If we are here because L1 chose not to trap WRMSR to TSC then
+        * according to the spec this should set L1's TSC (as opposed to
+        * setting L1's offset for L2).
+        */
+       if (is_guest_mode(vcpu))
+               vcpu->arch.tsc_offset = kvm_calc_nested_tsc_offset(
+                       l1_offset,
+                       static_call(kvm_x86_get_l2_tsc_offset)(vcpu),
+                       static_call(kvm_x86_get_l2_tsc_multiplier)(vcpu));
+       else
+               vcpu->arch.tsc_offset = l1_offset;
+
+       static_call(kvm_x86_write_tsc_offset)(vcpu, vcpu->arch.tsc_offset);
 }
 
 static inline bool kvm_check_tsc_unstable(void)