Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
474a5bb9 WH |
2 | #ifndef __KVM_X86_PMU_H |
3 | #define __KVM_X86_PMU_H | |
4 | ||
13c5183a MP |
5 | #include <linux/nospec.h> |
6 | ||
b31880ca SC |
7 | #include <asm/kvm_host.h> |
8 | ||
474a5bb9 WH |
9 | #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) |
10 | #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) | |
11 | #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) | |
12 | ||
b9181c8e LX |
13 | #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ |
14 | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) | |
15 | ||
25462f7f WH |
16 | /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ |
17 | #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) | |
18 | ||
2d7921c4 AM |
19 | #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 |
20 | #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 | |
21 | #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 | |
22 | ||
be6b067d SC |
23 | #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED |
24 | ||
f19063b1 SC |
25 | struct kvm_pmu_emulated_event_selectors { |
26 | u64 INSTRUCTIONS_RETIRED; | |
27 | u64 BRANCH_INSTRUCTIONS_RETIRED; | |
28 | }; | |
29 | ||
25462f7f | 30 | struct kvm_pmu_ops { |
98ff80f5 LX |
31 | struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, |
32 | unsigned int idx, u64 *mask); | |
c900c156 | 33 | struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); |
7bb7fce1 | 34 | int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx); |
545feb96 | 35 | bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); |
cbd71758 | 36 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
25462f7f WH |
37 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
38 | void (*refresh)(struct kvm_vcpu *vcpu); | |
39 | void (*init)(struct kvm_vcpu *vcpu); | |
40 | void (*reset)(struct kvm_vcpu *vcpu); | |
e6209a3b | 41 | void (*deliver_pmi)(struct kvm_vcpu *vcpu); |
9aa4f622 | 42 | void (*cleanup)(struct kvm_vcpu *vcpu); |
6a5cba7b AL |
43 | |
44 | const u64 EVENTSEL_EVENT; | |
8911ce66 | 45 | const int MAX_NR_GP_COUNTERS; |
6a08083f | 46 | const int MIN_NR_GP_COUNTERS; |
25462f7f WH |
47 | }; |
48 | ||
8f969c0c LX |
49 | void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); |
50 | ||
c85cdc1c LX |
51 | static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu) |
52 | { | |
53 | /* | |
54 | * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is | |
55 | * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is | |
56 | * greater than zero. However, KVM only exposes and emulates the MSR | |
57 | * to/for the guest if the guest PMU supports at least "Architectural | |
58 | * Performance Monitoring Version 2". | |
59 | * | |
60 | * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2. | |
61 | */ | |
62 | return pmu->version > 1; | |
63 | } | |
64 | ||
b31880ca SC |
65 | /* |
66 | * KVM tracks all counters in 64-bit bitmaps, with general purpose counters | |
67 | * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0 | |
68 | * is tracked internally via index 32. On Intel, (AMD doesn't support fixed | |
69 | * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL | |
70 | * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the | |
71 | * amounter of boilerplate needed to iterate over PMCs *and* simplifies common | |
72 | * enabling/disable/reset operations. | |
73 | * | |
74 | * WARNING! This helper is only for lookups that are initiated by KVM, it is | |
75 | * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw | |
76 | * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX | |
77 | * for RDPMC, not by adding 32 to the fixed counter index). | |
78 | */ | |
79 | static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx) | |
80 | { | |
81 | if (idx < pmu->nr_arch_gp_counters) | |
82 | return &pmu->gp_counters[idx]; | |
83 | ||
84 | idx -= KVM_FIXED_PMC_BASE_IDX; | |
85 | if (idx >= 0 && idx < pmu->nr_arch_fixed_counters) | |
86 | return &pmu->fixed_counters[idx]; | |
87 | ||
88 | return NULL; | |
89 | } | |
90 | ||
e5a65d4f SC |
91 | #define kvm_for_each_pmc(pmu, pmc, i, bitmap) \ |
92 | for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX) \ | |
93 | if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i))) \ | |
94 | continue; \ | |
95 | else \ | |
96 | ||
25462f7f WH |
97 | static inline u64 pmc_bitmask(struct kvm_pmc *pmc) |
98 | { | |
99 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); | |
100 | ||
101 | return pmu->counter_bitmask[pmc->type]; | |
102 | } | |
103 | ||
104 | static inline u64 pmc_read_counter(struct kvm_pmc *pmc) | |
105 | { | |
106 | u64 counter, enabled, running; | |
107 | ||
fd89499a SC |
108 | counter = pmc->counter + pmc->emulated_counter; |
109 | ||
e79f49c3 | 110 | if (pmc->perf_event && !pmc->is_paused) |
25462f7f WH |
111 | counter += perf_event_read_value(pmc->perf_event, |
112 | &enabled, &running); | |
113 | /* FIXME: Scaling needed? */ | |
114 | return counter & pmc_bitmask(pmc); | |
115 | } | |
116 | ||
89acf123 | 117 | void pmc_write_counter(struct kvm_pmc *pmc, u64 val); |
b29a2acd | 118 | |
25462f7f WH |
119 | static inline bool pmc_is_gp(struct kvm_pmc *pmc) |
120 | { | |
121 | return pmc->type == KVM_PMC_GP; | |
122 | } | |
123 | ||
124 | static inline bool pmc_is_fixed(struct kvm_pmc *pmc) | |
125 | { | |
126 | return pmc->type == KVM_PMC_FIXED; | |
127 | } | |
128 | ||
9477f444 OU |
129 | static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, |
130 | u64 data) | |
131 | { | |
132 | return !(pmu->global_ctrl_mask & data); | |
133 | } | |
134 | ||
25462f7f WH |
135 | /* returns general purpose PMC with the specified MSR. Note that it can be |
136 | * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a | |
d9f6e12f | 137 | * parameter to tell them apart. |
25462f7f WH |
138 | */ |
139 | static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, | |
140 | u32 base) | |
141 | { | |
13c5183a MP |
142 | if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { |
143 | u32 index = array_index_nospec(msr - base, | |
144 | pmu->nr_arch_gp_counters); | |
145 | ||
146 | return &pmu->gp_counters[index]; | |
147 | } | |
25462f7f WH |
148 | |
149 | return NULL; | |
150 | } | |
151 | ||
152 | /* returns fixed PMC with the specified MSR */ | |
153 | static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) | |
154 | { | |
155 | int base = MSR_CORE_PERF_FIXED_CTR0; | |
156 | ||
13c5183a MP |
157 | if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { |
158 | u32 index = array_index_nospec(msr - base, | |
159 | pmu->nr_arch_fixed_counters); | |
160 | ||
161 | return &pmu->fixed_counters[index]; | |
162 | } | |
25462f7f WH |
163 | |
164 | return NULL; | |
165 | } | |
166 | ||
63f21f32 LX |
167 | static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) |
168 | { | |
169 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); | |
170 | ||
171 | if (pmc_is_fixed(pmc)) | |
172 | return fixed_ctrl_field(pmu->fixed_ctr_ctrl, | |
be6b067d | 173 | pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3; |
63f21f32 LX |
174 | |
175 | return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; | |
176 | } | |
177 | ||
968635ab | 178 | extern struct x86_pmu_capability kvm_pmu_cap; |
f19063b1 | 179 | extern struct kvm_pmu_emulated_event_selectors kvm_pmu_eventsel; |
968635ab | 180 | |
8911ce66 | 181 | static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops) |
968635ab | 182 | { |
d7808f73 | 183 | bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL; |
6a08083f | 184 | int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS; |
d7808f73 | 185 | |
4d7404e5 SC |
186 | /* |
187 | * Hybrid PMUs don't play nice with virtualization without careful | |
188 | * configuration by userspace, and KVM's APIs for reporting supported | |
189 | * vPMU features do not account for hybrid PMUs. Disable vPMU support | |
190 | * for hybrid PMUs until KVM gains a way to let userspace opt-in. | |
191 | */ | |
192 | if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) | |
d7808f73 | 193 | enable_pmu = false; |
6ef25aa0 | 194 | |
4d7404e5 SC |
195 | if (enable_pmu) { |
196 | perf_get_x86_pmu_capability(&kvm_pmu_cap); | |
197 | ||
198 | /* | |
6a08083f LX |
199 | * WARN if perf did NOT disable hardware PMU if the number of |
200 | * architecturally required GP counters aren't present, i.e. if | |
201 | * there are a non-zero number of counters, but fewer than what | |
202 | * is architecturally required. | |
4d7404e5 | 203 | */ |
6a08083f LX |
204 | if (!kvm_pmu_cap.num_counters_gp || |
205 | WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs)) | |
206 | enable_pmu = false; | |
207 | else if (is_intel && !kvm_pmu_cap.version) | |
4d7404e5 SC |
208 | enable_pmu = false; |
209 | } | |
210 | ||
6ef25aa0 LX |
211 | if (!enable_pmu) { |
212 | memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); | |
d7808f73 LX |
213 | return; |
214 | } | |
968635ab LX |
215 | |
216 | kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2); | |
8911ce66 SC |
217 | kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp, |
218 | pmu_ops->MAX_NR_GP_COUNTERS); | |
968635ab LX |
219 | kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, |
220 | KVM_PMC_MAX_FIXED); | |
f19063b1 SC |
221 | |
222 | kvm_pmu_eventsel.INSTRUCTIONS_RETIRED = | |
223 | perf_get_hw_event_config(PERF_COUNT_HW_INSTRUCTIONS); | |
224 | kvm_pmu_eventsel.BRANCH_INSTRUCTIONS_RETIRED = | |
225 | perf_get_hw_event_config(PERF_COUNT_HW_BRANCH_INSTRUCTIONS); | |
968635ab LX |
226 | } |
227 | ||
4fa5843d | 228 | static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc) |
68fb4757 LX |
229 | { |
230 | set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); | |
231 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); | |
232 | } | |
25462f7f | 233 | |
8de18543 LX |
234 | static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff) |
235 | { | |
236 | int bit; | |
237 | ||
238 | if (!diff) | |
239 | return; | |
240 | ||
241 | for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX) | |
242 | set_bit(bit, pmu->reprogram_pmi); | |
243 | kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu)); | |
244 | } | |
245 | ||
13afa29a LX |
246 | /* |
247 | * Check if a PMC is enabled by comparing it against global_ctrl bits. | |
248 | * | |
249 | * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled. | |
250 | */ | |
251 | static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc) | |
252 | { | |
253 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); | |
254 | ||
255 | if (!kvm_pmu_has_perf_global_ctrl(pmu)) | |
256 | return true; | |
257 | ||
258 | return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl); | |
259 | } | |
260 | ||
474a5bb9 WH |
261 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); |
262 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); | |
263 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); | |
7bb7fce1 | 264 | int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx); |
545feb96 | 265 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); |
cbd71758 | 266 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
474a5bb9 WH |
267 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
268 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu); | |
474a5bb9 | 269 | void kvm_pmu_init(struct kvm_vcpu *vcpu); |
b35e5548 | 270 | void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); |
474a5bb9 | 271 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); |
66bb8a06 | 272 | int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); |
f19063b1 | 273 | void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 eventsel); |
474a5bb9 | 274 | |
2d7921c4 AM |
275 | bool is_vmware_backdoor_pmc(u32 pmc_idx); |
276 | ||
25462f7f WH |
277 | extern struct kvm_pmu_ops intel_pmu_ops; |
278 | extern struct kvm_pmu_ops amd_pmu_ops; | |
474a5bb9 | 279 | #endif /* __KVM_X86_PMU_H */ |