1 // SPDX-License-Identifier: GPL-2.0-only
3 * KVM PMU support for AMD
5 * Copyright 2015, Red Hat, Inc. and/or its affiliates.
8 * Wei Huang <wei@redhat.com>
10 * Implementation is based on pmu_intel.c file
12 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
14 #include <linux/types.h>
15 #include <linux/kvm_host.h>
16 #include <linux/perf_event.h>
28 static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx)
30 unsigned int num_counters = pmu->nr_arch_gp_counters;
32 if (pmc_idx >= num_counters)
35 return &pmu->gp_counters[array_index_nospec(pmc_idx, num_counters)];
38 static inline struct kvm_pmc *get_gp_pmc_amd(struct kvm_pmu *pmu, u32 msr,
41 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
44 if (!vcpu->kvm->arch.enable_pmu)
48 case MSR_F15H_PERF_CTL0 ... MSR_F15H_PERF_CTR5:
49 if (!guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
52 * Each PMU counter has a pair of CTL and CTR MSRs. CTLn
53 * MSRs (accessed via EVNTSEL) are even, CTRn MSRs are odd.
55 idx = (unsigned int)((msr - MSR_F15H_PERF_CTL0) / 2);
56 if (!(msr & 0x1) != (type == PMU_TYPE_EVNTSEL))
59 case MSR_K7_EVNTSEL0 ... MSR_K7_EVNTSEL3:
60 if (type != PMU_TYPE_EVNTSEL)
62 idx = msr - MSR_K7_EVNTSEL0;
64 case MSR_K7_PERFCTR0 ... MSR_K7_PERFCTR3:
65 if (type != PMU_TYPE_COUNTER)
67 idx = msr - MSR_K7_PERFCTR0;
73 return amd_pmc_idx_to_pmc(pmu, idx);
76 static bool amd_hw_event_available(struct kvm_pmc *pmc)
81 /* check if a PMC is enabled by comparing it against global_ctrl bits. Because
82 * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE).
84 static bool amd_pmc_is_enabled(struct kvm_pmc *pmc)
89 static bool amd_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx)
91 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
95 return idx < pmu->nr_arch_gp_counters;
98 /* idx is the ECX register of RDPMC instruction */
99 static struct kvm_pmc *amd_rdpmc_ecx_to_pmc(struct kvm_vcpu *vcpu,
100 unsigned int idx, u64 *mask)
102 return amd_pmc_idx_to_pmc(vcpu_to_pmu(vcpu), idx & ~(3u << 30));
105 static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
107 /* All MSRs refer to exactly one PMC, so msr_idx_to_pmc is enough. */
111 static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, u32 msr)
113 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
116 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
117 pmc = pmc ? pmc : get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
122 static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
124 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
126 u32 msr = msr_info->index;
129 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
131 msr_info->data = pmc_read_counter(pmc);
135 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
137 msr_info->data = pmc->eventsel;
144 static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
146 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
148 u32 msr = msr_info->index;
149 u64 data = msr_info->data;
152 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_COUNTER);
154 pmc->counter += data - pmc_read_counter(pmc);
155 pmc_update_sample_period(pmc);
159 pmc = get_gp_pmc_amd(pmu, msr, PMU_TYPE_EVNTSEL);
161 data &= ~pmu->reserved_bits;
162 if (data != pmc->eventsel) {
163 pmc->eventsel = data;
164 kvm_pmu_request_counter_reprogam(pmc);
172 static void amd_pmu_refresh(struct kvm_vcpu *vcpu)
174 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
176 if (guest_cpuid_has(vcpu, X86_FEATURE_PERFCTR_CORE))
177 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS_CORE;
179 pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS;
181 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1;
182 pmu->reserved_bits = 0xfffffff000280000ull;
183 pmu->raw_event_mask = AMD64_RAW_EVENT_MASK;
185 /* not applicable to AMD; but clean them to prevent any fall out */
186 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
187 pmu->nr_arch_fixed_counters = 0;
188 pmu->global_status = 0;
189 bitmap_set(pmu->all_valid_pmc_idx, 0, pmu->nr_arch_gp_counters);
192 static void amd_pmu_init(struct kvm_vcpu *vcpu)
194 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
197 BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > AMD64_NUM_COUNTERS_CORE);
198 BUILD_BUG_ON(KVM_AMD_PMC_MAX_GENERIC > INTEL_PMC_MAX_GENERIC);
200 for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC ; i++) {
201 pmu->gp_counters[i].type = KVM_PMC_GP;
202 pmu->gp_counters[i].vcpu = vcpu;
203 pmu->gp_counters[i].idx = i;
204 pmu->gp_counters[i].current_config = 0;
208 static void amd_pmu_reset(struct kvm_vcpu *vcpu)
210 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
213 for (i = 0; i < KVM_AMD_PMC_MAX_GENERIC; i++) {
214 struct kvm_pmc *pmc = &pmu->gp_counters[i];
216 pmc_stop_counter(pmc);
217 pmc->counter = pmc->prev_counter = pmc->eventsel = 0;
221 struct kvm_pmu_ops amd_pmu_ops __initdata = {
222 .hw_event_available = amd_hw_event_available,
223 .pmc_is_enabled = amd_pmc_is_enabled,
224 .pmc_idx_to_pmc = amd_pmc_idx_to_pmc,
225 .rdpmc_ecx_to_pmc = amd_rdpmc_ecx_to_pmc,
226 .msr_idx_to_pmc = amd_msr_idx_to_pmc,
227 .is_valid_rdpmc_ecx = amd_is_valid_rdpmc_ecx,
228 .is_valid_msr = amd_is_valid_msr,
229 .get_msr = amd_pmu_get_msr,
230 .set_msr = amd_pmu_set_msr,
231 .refresh = amd_pmu_refresh,
232 .init = amd_pmu_init,
233 .reset = amd_pmu_reset,