KVM: x86/pmu: Move pmc_idx => pmc translation helper to common code
authorSean Christopherson <seanjc@google.com>
Fri, 10 Nov 2023 02:28:50 +0000 (18:28 -0800)
committerSean Christopherson <seanjc@google.com>
Thu, 1 Feb 2024 17:35:47 +0000 (09:35 -0800)
Add a common helper for *internal* PMC lookups, and delete the ops hook
and Intel's implementation.  Keep AMD's implementation, but rename it to
amd_pmu_get_pmc() to make it somewhat more obvious that it's suited for
both KVM-internal and guest-initiated lookups.

Because KVM tracks all counters in a single bitmap, getting a counter
when iterating over a bitmap, e.g. of all valid PMCs, requires a small
amount of math, that while simple, isn't super obvious and doesn't use the
same semantics as PMC lookups from RDPMC!  Although AMD doesn't support
fixed counters, the common PMU code still behaves as if there a split, the
high half of which just happens to always be empty.

Opportunstically add a comment to explain both what is going on, and why
KVM uses a single bitmap, e.g. the boilerplate for iterating over separate
bitmaps could be done via macros, so it's not (just) about deduplicating
code.

Link: https://lore.kernel.org/r/20231110022857.1273836-4-seanjc@google.com
Signed-off-by: Sean Christopherson <seanjc@google.com>
arch/x86/include/asm/kvm-x86-pmu-ops.h
arch/x86/kvm/pmu.c
arch/x86/kvm/pmu.h
arch/x86/kvm/svm/pmu.c
arch/x86/kvm/vmx/pmu_intel.c

index f0cd482221330c2f1631f3286dedb189ece6be8c..f852b13aeefea7a15f811c62a7035d32527b8740 100644 (file)
@@ -12,7 +12,6 @@ BUILD_BUG_ON(1)
  * a NULL definition, for example if "static_call_cond()" will be used
  * at the call sites.
  */
-KVM_X86_PMU_OP(pmc_idx_to_pmc)
 KVM_X86_PMU_OP(rdpmc_ecx_to_pmc)
 KVM_X86_PMU_OP(msr_idx_to_pmc)
 KVM_X86_PMU_OP_OPTIONAL(check_rdpmc_early)
index 67d589ac9363d4c9be7d41d0dda5d47051b2048a..0873937c90bc5c5592d32aed52eae2a0a3d2a423 100644 (file)
@@ -505,7 +505,7 @@ void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
        int bit;
 
        for_each_set_bit(bit, pmu->reprogram_pmi, X86_PMC_IDX_MAX) {
-               struct kvm_pmc *pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, bit);
+               struct kvm_pmc *pmc = kvm_pmc_idx_to_pmc(pmu, bit);
 
                if (unlikely(!pmc)) {
                        clear_bit(bit, pmu->reprogram_pmi);
@@ -725,7 +725,7 @@ static void kvm_pmu_reset(struct kvm_vcpu *vcpu)
        bitmap_zero(pmu->reprogram_pmi, X86_PMC_IDX_MAX);
 
        for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
-               pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
+               pmc = kvm_pmc_idx_to_pmc(pmu, i);
                if (!pmc)
                        continue;
 
@@ -801,7 +801,7 @@ void kvm_pmu_cleanup(struct kvm_vcpu *vcpu)
                      pmu->pmc_in_use, X86_PMC_IDX_MAX);
 
        for_each_set_bit(i, bitmask, X86_PMC_IDX_MAX) {
-               pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
+               pmc = kvm_pmc_idx_to_pmc(pmu, i);
 
                if (pmc && pmc->perf_event && !pmc_speculative_in_use(pmc))
                        pmc_stop_counter(pmc);
@@ -856,7 +856,7 @@ void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id)
        int i;
 
        for_each_set_bit(i, pmu->all_valid_pmc_idx, X86_PMC_IDX_MAX) {
-               pmc = static_call(kvm_x86_pmu_pmc_idx_to_pmc)(pmu, i);
+               pmc = kvm_pmc_idx_to_pmc(pmu, i);
 
                if (!pmc || !pmc_event_is_allowed(pmc))
                        continue;
index e8c6a1f4b8e8715551e752849a8f0033432b5db5..56e8e665e1aff78f32fc56583b5fd6e5224e4011 100644 (file)
@@ -4,6 +4,8 @@
 
 #include <linux/nospec.h>
 
+#include <asm/kvm_host.h>
+
 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
@@ -21,7 +23,6 @@
 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
 
 struct kvm_pmu_ops {
-       struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx);
        struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
                unsigned int idx, u64 *mask);
        struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
@@ -56,6 +57,32 @@ static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
        return pmu->version > 1;
 }
 
+/*
+ * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
+ * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
+ * is tracked internally via index 32.  On Intel, (AMD doesn't support fixed
+ * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
+ * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
+ * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
+ * enabling/disable/reset operations.
+ *
+ * WARNING!  This helper is only for lookups that are initiated by KVM, it is
+ * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
+ * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
+ * for RDPMC, not by adding 32 to the fixed counter index).
+ */
+static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
+{
+       if (idx < pmu->nr_arch_gp_counters)
+               return &pmu->gp_counters[idx];
+
+       idx -= KVM_FIXED_PMC_BASE_IDX;
+       if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
+               return &pmu->fixed_counters[idx];
+
+       return NULL;
+}
+
 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
 {
        struct kvm_pmu *pmu = pmc_to_pmu(pmc);
index e886300f0f974c78d54d931a1b11ac11f6ca8d6f..dfcc38bd97d34f4c618bc88f202cc4dd627f00ea 100644 (file)
@@ -25,7 +25,7 @@ enum pmu_type {
        PMU_TYPE_EVNTSEL,
 };
 
-static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
+static struct kvm_pmc *amd_pmu_get_pmc(struct kvm_pmu *pmu, int pmc_idx)
 {
        unsigned int num_counters = pmu->nr_arch_gp_counters;
 
@@ -70,7 +70,7 @@ static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
                return NULL;
        }
 
-       return amd_pmc_idx_to_pmc(pmu, idx);
+       return amd_pmu_get_pmc(pmu, idx);
 }
 
 static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
@@ -87,7 +87,7 @@ static int amd_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx)
 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
        unsigned int idx, u64 *mask)
 {
-       return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx);
+       return amd_pmu_get_pmc(vcpu_to_pmu(vcpu), idx);
 }
 
 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
@@ -229,7 +229,6 @@ static void amd_pmu_init(struct kvm_vcpu *vcpu)
 }
 
 struct kvm_pmu_ops amd_pmu_ops __initdata = {
-       .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
        .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
        .msr_idx_to_pmc = amd_msr_idx_to_pmc,
        .check_rdpmc_early = amd_check_rdpmc_early,
index fe7a2ba51e1be3223b1beef760a51a4e1b2d431a..845a964f22a622ed582b7e05f690d39e6594c618 100644 (file)
@@ -55,18 +55,6 @@ static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
        }
 }
 
-static struct kvm_pmc *intel_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
-{
-       if (pmc_idx < KVM_FIXED_PMC_BASE_IDX) {
-               return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + pmc_idx,
-                                 MSR_P6_EVNTSEL0);
-       } else {
-               u32 idx = pmc_idx - KVM_FIXED_PMC_BASE_IDX;
-
-               return get_fixed_pmc(pmu, idx + MSR_CORE_PERF_FIXED_CTR0);
-       }
-}
-
 static struct kvm_pmc *intel_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
                                            unsigned int idx, u64 *mask)
 {
@@ -718,7 +706,7 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
 
        for_each_set_bit(bit, (unsigned long *)&pmu->global_ctrl,
                         X86_PMC_IDX_MAX) {
-               pmc = intel_pmc_idx_to_pmc(pmu, bit);
+               pmc = kvm_pmc_idx_to_pmc(pmu, bit);
 
                if (!pmc || !pmc_speculative_in_use(pmc) ||
                    !pmc_is_globally_enabled(pmc) || !pmc->perf_event)
@@ -735,7 +723,6 @@ void intel_pmu_cross_mapped_check(struct kvm_pmu *pmu)
 }
 
 struct kvm_pmu_ops intel_pmu_ops __initdata = {
-       .pmc_idx_to_pmc = intel_pmc_idx_to_pmc,
        .rdpmc_ecx_to_pmc = intel_rdpmc_ecx_to_pmc,
        .msr_idx_to_pmc = intel_msr_idx_to_pmc,
        .is_valid_msr = intel_is_valid_msr,