KVM: x86/pmu: Move PMU reset logic to common x86 code
authorSean Christopherson <seanjc@google.com>
Fri, 3 Nov 2023 23:05:36 +0000 (16:05 -0700)
committerSean Christopherson <seanjc@google.com>
Thu, 30 Nov 2023 20:52:54 +0000 (12:52 -0800)
Move the common (or at least "ignored") aspects of resetting the vPMU to
common x86 code, along with the stop/release helpers that are no used only
by the common pmu.c.

There is no need to manually handle fixed counters as all_valid_pmc_idx
tracks both fixed and general purpose counters, and resetting the vPMU is
far from a hot path, i.e. the extra bit of overhead to the PMC from the
index is a non-issue.

Zero fixed_ctr_ctrl in common code even though it's Intel specific.
Ensuring it's zero doesn't harm AMD/SVM in any way, and stopping the fixed
counters via all_valid_pmc_idx, but not clearing the associated control
bits, would be odd/confusing.

Make the .reset() hook optional as SVM no longer needs vendor specific
handling.

Cc: stable@vger.kernel.org
Reviewed-by: Dapeng Mi <dapeng1.mi@linux.intel.com>
Link: https://lore.kernel.org/r/20231103230541.352265-2-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm-x86-pmu-ops.h
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index 6c98f4bb4228ba5a05acb55f30fa965020c206c4..058bc636356a1133ad151457d8bf0b56528e7f39 100644 (file)
@@ -22,7 +22,7 @@ KVM_X86_PMU_OP(get_msr)
 KVM_X86_PMU_OP(set_msr)
 KVM_X86_PMU_OP(refresh)
 KVM_X86_PMU_OP(init)
-KVM_X86_PMU_OP(reset)
+KVM_X86_PMU_OP_OPTIONAL(reset)
 KVM_X86_PMU_OP_OPTIONAL(deliver_pmi)
 KVM_X86_PMU_OP_OPTIONAL(cleanup)
 
index 9ae07db6f0f6481e060c260834050e5c5abfe14a..027e9c3c2b93e1fce76c69cee2f2cc7494fa018a 100644 (file)
@@ -250,6 +250,24 @@ static bool pmc_resume_counter(struct kvm_pmc *pmc)
        return true;
 }
 
+static void pmc_release_perf_event(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               perf_event_release_kernel(pmc->perf_event);
+               pmc->perf_event = NULL;
+               pmc->current_config = 0;
+               pmc_to_pmu(pmc)->event_count--;
+       }
+}
+
+static void pmc_stop_counter(struct kvm_pmc *pmc)
+{
+       if (pmc->perf_event) {
+               pmc->counter = pmc_read_counter(pmc);
+               pmc_release_perf_event(pmc);
+       }
+}
+
 static int filter_cmp(const void *pa, const void *pb, u64 mask)
 {
        u64 a = *(u64 *)pa & mask;
@@ -654,7 +672,27 @@ void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
 
 void kvm_pmu_reset(struct kvm_vcpu *vcpu)
 {
-       static_call(kvm_x86_pmu_reset)(vcpu);
+       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
+       struct kvm_pmc *pmc;
+       int i;
+
+       bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
+
+       for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
+               pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
+               if (!pmc)
+                       continue;
+
+               pmc_stop_counter(pmc);
+               pmc->counter = 0;
+
+               if (pmc_is_gp(pmc))
+                       pmc->eventsel = 0;
+       }
+
+       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
+
+       static_call_cond(kvm_x86_pmu_reset)(vcpu);
 }
 
 void kvm_pmu_init(struct kvm_vcpu *vcpu)
index 1d64113de4883ec77d4aa8c7c83f27a8c01a667d..a46aa9b25150f5a034839a7b99123c5e96b97f9a 100644 (file)
@@ -80,24 +80,6 @@ static inline void pmc_write_counter(struct kvm_pmc *pmc, u64 val)
        pmc->counter &= pmc_bitmask(pmc);
 }
 
-static inline void pmc_release_perf_event(struct kvm_pmc *pmc)
-{
-       if (pmc->perf_event) {
-               perf_event_release_kernel(pmc->perf_event);
-               pmc->perf_event = NULL;
-               pmc->current_config = 0;
-               pmc_to_pmu(pmc)->event_count--;
-       }
-}
-
-static inline void pmc_stop_counter(struct kvm_pmc *pmc)
-{
-       if (pmc->perf_event) {
-               pmc->counter = pmc_read_counter(pmc);
-               pmc_release_perf_event(pmc);
-       }
-}
-
 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
 {
        return pmc->type == KVM_PMC_GP;
index 373ff6a6687b3a7fcb82cc75e1d8e96994161a4b..3fd47de14b38a3ca33ce2212c6d289decc5d7d5e 100644 (file)
@@ -233,21 +233,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
        }
 }
 
-static void amd_pmu_reset(struct kvm_vcpu *vcpu)
-{
-       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-       int i;
-
-       for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
-               struct kvm_pmc *pmc = &pmu->gp_counters[i];
-
-               pmc_stop_counter(pmc);
-               pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
-       }
-
-       pmu->global_ctrl = pmu->global_status = 0;
-}
-
 struct kvm_pmu_ops amd_pmu_ops __initdata = {
        .hw_event_available = amd_hw_event_available,
        .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
@@ -259,7 +244,6 @@ struct kvm_pmu_ops amd_pmu_ops __initdata = {
        .set_msr = amd_pmu_set_msr,
        .refresh = amd_pmu_refresh,
        .init = amd_pmu_init,
-       .reset = amd_pmu_reset,
        .EVENTSEL_EVENT = AMD64_EVENTSEL_EVENT,
        .MAX_NR_GP_COUNTERS = KVM_AMD_PMC_MAX_GENERIC,
        .MIN_NR_GP_COUNTERS = AMD64_NUM_COUNTERS,
index 820d3e1f6b4f825fc653e745a4bb839ca2f57416..90c1f7f07e53b07f0c6d9cb92e8f00a6913032c8 100644 (file)
@@ -632,26 +632,6 @@ static void intel_pmu_init(struct kvm_vcpu *vcpu)
 
 static void intel_pmu_reset(struct kvm_vcpu *vcpu)
 {
-       struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
-       struct kvm_pmc *pmc = NULL;
-       int i;
-
-       for (i = 0; i < KVM_INTEL_PMC_MAX_GENERIC; i++) {
-               pmc = &pmu->gp_counters[i];
-
-               pmc_stop_counter(pmc);
-               pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
-       }
-
-       for (i = 0; i < KVM_PMC_MAX_FIXED; i++) {
-               pmc = &pmu->fixed_counters[i];
-
-               pmc_stop_counter(pmc);
-               pmc->counter = pmc->prev_counter = 0;
-       }
-
-       pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status = 0;
-
        intel_pmu_release_guest_lbr_event(vcpu);
 }