KVM: x86: Fold kvm_arch_sched_in() into kvm_arch_vcpu_load()
authorSean Christopherson <seanjc@google.com>
Wed, 22 May 2024 01:40:10 +0000 (18:40 -0700)
committerSean Christopherson <seanjc@google.com>
Tue, 11 Jun 2024 21:18:44 +0000 (14:18 -0700)
Fold the guts of kvm_arch_sched_in() into kvm_arch_vcpu_load(), keying
off the recently added kvm_vcpu.scheduled_out as appropriate.

Note, there is a very slight functional change, as PLE shrink updates will
now happen after blasting WBINVD, but that is quite uninteresting as the
two operations do not interact in any way.

Acked-by: Kai Huang <kai.huang@intel.com>
Link: https://lore.kernel.org/r/20240522014013.1672962-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm-x86-ops.h
arch/x86/include/asm/kvm_host.h
arch/x86/kvm/svm/svm.c
arch/x86/kvm/vmx/main.c
arch/x86/kvm/vmx/vmx.c
arch/x86/kvm/vmx/x86_ops.h
arch/x86/kvm/x86.c

index 5187fcf4b610b99f50a792092da6b354b3c3a83e..910d06cdb86ba3ab5dbe2b310da22de685f465f0 100644 (file)
@@ -103,7 +103,6 @@ KVM_X86_OP(write_tsc_multiplier)
 KVM_X86_OP(get_exit_info)
 KVM_X86_OP(check_intercept)
 KVM_X86_OP(handle_exit_irqoff)
-KVM_X86_OP(sched_in)
 KVM_X86_OP_OPTIONAL(update_cpu_dirty_logging)
 KVM_X86_OP_OPTIONAL(vcpu_blocking)
 KVM_X86_OP_OPTIONAL(vcpu_unblocking)
index ece45b3f6f2073ea81ad65b139173411c77b3d73..d7d84fc623bba463e5cbf0eb29a2ee5aa8fbd46f 100644 (file)
@@ -1749,8 +1749,6 @@ struct kvm_x86_ops {
                               struct x86_exception *exception);
        void (*handle_exit_irqoff)(struct kvm_vcpu *vcpu);
 
-       void (*sched_in)(struct kvm_vcpu *vcpu, int cpu);
-
        /*
         * Size of the CPU's dirty log buffer, i.e. VMX's PML buffer.  A zero
         * value indicates CPU dirty logging is unsupported or disabled.
index c8dc25886c16581c2e89e94b685fa6c95ad4fc7a..a2667cf0c963fce8446f1751591fbd45c0ab7ffd 100644 (file)
@@ -1545,6 +1545,9 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
        struct vcpu_svm *svm = to_svm(vcpu);
        struct svm_cpu_data *sd = per_cpu_ptr(&svm_data, cpu);
 
+       if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
+               shrink_ple_window(vcpu);
+
        if (sd->current_vmcb != svm->vmcb) {
                sd->current_vmcb = svm->vmcb;
 
@@ -4560,12 +4563,6 @@ static void svm_handle_exit_irqoff(struct kvm_vcpu *vcpu)
                vcpu->arch.at_instruction_boundary = true;
 }
 
-static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
-{
-       if (!kvm_pause_in_guest(vcpu->kvm))
-               shrink_ple_window(vcpu);
-}
-
 static void svm_setup_mce(struct kvm_vcpu *vcpu)
 {
        /* [63:9] are reserved. */
@@ -5025,8 +5022,6 @@ static struct kvm_x86_ops svm_x86_ops __initdata = {
        .check_intercept = svm_check_intercept,
        .handle_exit_irqoff = svm_handle_exit_irqoff,
 
-       .sched_in = svm_sched_in,
-
        .nested_ops = &svm_nested_ops,
 
        .deliver_interrupt = svm_deliver_interrupt,
index d4ed681785fd649d0dcbcfa4a2c1064546a4dca1..c7a86be0f30e55201e3d84285066b35853acb5d4 100644 (file)
@@ -122,8 +122,6 @@ struct kvm_x86_ops vt_x86_ops __initdata = {
        .check_intercept = vmx_check_intercept,
        .handle_exit_irqoff = vmx_handle_exit_irqoff,
 
-       .sched_in = vmx_sched_in,
-
        .cpu_dirty_log_size = PML_ENTITY_NUM,
        .update_cpu_dirty_logging = vmx_update_cpu_dirty_logging,
 
index 0f535a952ab13cafe7dc5c81fd9248f8ca4598bd..9c9c25e2b1b9ea2d9d7e25708b6175e343d716e2 100644 (file)
@@ -1518,6 +1518,9 @@ void vmx_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
 
+       if (vcpu->scheduled_out && !kvm_pause_in_guest(vcpu->kvm))
+               shrink_ple_window(vcpu);
+
        vmx_vcpu_load_vmcs(vcpu, cpu, NULL);
 
        vmx_vcpu_pi_load(vcpu, cpu);
@@ -8172,12 +8175,6 @@ void vmx_cancel_hv_timer(struct kvm_vcpu *vcpu)
 }
 #endif
 
-void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu)
-{
-       if (!kvm_pause_in_guest(vcpu->kvm))
-               shrink_ple_window(vcpu);
-}
-
 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu)
 {
        struct vcpu_vmx *vmx = to_vmx(vcpu);
index 502704596c8324b9b909ceb7cb3432ec94c7642c..3cb0be94e77926e04a24ddfa45767ba3cf351b76 100644 (file)
@@ -112,7 +112,6 @@ u64 vmx_get_l2_tsc_multiplier(struct kvm_vcpu *vcpu);
 void vmx_write_tsc_offset(struct kvm_vcpu *vcpu);
 void vmx_write_tsc_multiplier(struct kvm_vcpu *vcpu);
 void vmx_request_immediate_exit(struct kvm_vcpu *vcpu);
-void vmx_sched_in(struct kvm_vcpu *vcpu, int cpu);
 void vmx_update_cpu_dirty_logging(struct kvm_vcpu *vcpu);
 #ifdef CONFIG_X86_64
 int vmx_set_hv_timer(struct kvm_vcpu *vcpu, u64 guest_deadline_tsc,
index a5c8caed76b1a74b83b0f4cfd53cbc158f2e9330..75d413ffcd5f56305ff1dbd4e57054dbbac8e5f6 100644 (file)
@@ -5004,6 +5004,16 @@ static bool need_emulate_wbinvd(struct kvm_vcpu *vcpu)
 
 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
 {
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+
+       if (vcpu->scheduled_out) {
+               vcpu->arch.l1tf_flush_l1d = true;
+               if (pmu->version && unlikely(pmu->event_count)) {
+                       pmu->need_cleanup = true;
+                       kvm_make_request(KVM_REQ_PMU, vcpu);
+               }
+       }
+
        /* Address WBINVD may be executed by guest */
        if (need_emulate_wbinvd(vcpu)) {
                if (static_call(kvm_x86_has_wbinvd_exit)())
@@ -12567,14 +12577,7 @@ bool kvm_vcpu_is_bsp(struct kvm_vcpu *vcpu)
 
 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu)
 {
-       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
 
-       vcpu->arch.l1tf_flush_l1d = true;
-       if (pmu->version && unlikely(pmu->event_count)) {
-               pmu->need_cleanup = true;
-               kvm_make_request(KVM_REQ_PMU, vcpu);
-       }
-       static_call(kvm_x86_sched_in)(vcpu, cpu);
 }
 
 void kvm_arch_free_vm(struct kvm *kvm)