KVM: x86/pmu: Add macros to iterate over all PMCs given a bitmap
[linux-block.git] / arch / x86 / kvm / pmu.h
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __KVM_X86_PMU_H
3 #define __KVM_X86_PMU_H
4
5 #include <linux/nospec.h>
6
7 #include <asm/kvm_host.h>
8
9 #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu)
10 #define pmu_to_vcpu(pmu)  (container_of((pmu), struct kvm_vcpu, arch.pmu))
11 #define pmc_to_pmu(pmc)   (&(pmc)->vcpu->arch.pmu)
12
13 #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL |   \
14                                           MSR_IA32_MISC_ENABLE_BTS_UNAVAIL)
15
16 /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */
17 #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf)
18
19 #define VMWARE_BACKDOOR_PMC_HOST_TSC            0x10000
20 #define VMWARE_BACKDOOR_PMC_REAL_TIME           0x10001
21 #define VMWARE_BACKDOOR_PMC_APPARENT_TIME       0x10002
22
23 #define KVM_FIXED_PMC_BASE_IDX INTEL_PMC_IDX_FIXED
24
25 struct kvm_pmu_ops {
26         struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu,
27                 unsigned int idx, u64 *mask);
28         struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr);
29         int (*check_rdpmc_early)(struct kvm_vcpu *vcpu, unsigned int idx);
30         bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr);
31         int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
32         int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
33         void (*refresh)(struct kvm_vcpu *vcpu);
34         void (*init)(struct kvm_vcpu *vcpu);
35         void (*reset)(struct kvm_vcpu *vcpu);
36         void (*deliver_pmi)(struct kvm_vcpu *vcpu);
37         void (*cleanup)(struct kvm_vcpu *vcpu);
38
39         const u64 EVENTSEL_EVENT;
40         const int MAX_NR_GP_COUNTERS;
41         const int MIN_NR_GP_COUNTERS;
42 };
43
44 void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops);
45
46 static inline bool kvm_pmu_has_perf_global_ctrl(struct kvm_pmu *pmu)
47 {
48         /*
49          * Architecturally, Intel's SDM states that IA32_PERF_GLOBAL_CTRL is
50          * supported if "CPUID.0AH: EAX[7:0] > 0", i.e. if the PMU version is
51          * greater than zero.  However, KVM only exposes and emulates the MSR
52          * to/for the guest if the guest PMU supports at least "Architectural
53          * Performance Monitoring Version 2".
54          *
55          * AMD's version of PERF_GLOBAL_CTRL conveniently shows up with v2.
56          */
57         return pmu->version > 1;
58 }
59
60 /*
61  * KVM tracks all counters in 64-bit bitmaps, with general purpose counters
62  * mapped to bits 31:0 and fixed counters mapped to 63:32, e.g. fixed counter 0
63  * is tracked internally via index 32.  On Intel, (AMD doesn't support fixed
64  * counters), this mirrors how fixed counters are mapped to PERF_GLOBAL_CTRL
65  * and similar MSRs, i.e. tracking fixed counters at base index 32 reduces the
66  * amounter of boilerplate needed to iterate over PMCs *and* simplifies common
67  * enabling/disable/reset operations.
68  *
69  * WARNING!  This helper is only for lookups that are initiated by KVM, it is
70  * NOT safe for guest lookups, e.g. will do the wrong thing if passed a raw
71  * ECX value from RDPMC (fixed counters are accessed by setting bit 30 in ECX
72  * for RDPMC, not by adding 32 to the fixed counter index).
73  */
74 static inline struct kvm_pmc *kvm_pmc_idx_to_pmc(struct kvm_pmu *pmu, int idx)
75 {
76         if (idx < pmu->nr_arch_gp_counters)
77                 return &pmu->gp_counters[idx];
78
79         idx -= KVM_FIXED_PMC_BASE_IDX;
80         if (idx >= 0 && idx < pmu->nr_arch_fixed_counters)
81                 return &pmu->fixed_counters[idx];
82
83         return NULL;
84 }
85
86 #define kvm_for_each_pmc(pmu, pmc, i, bitmap)                   \
87         for_each_set_bit(i, bitmap, X86_PMC_IDX_MAX)            \
88                 if (!(pmc = kvm_pmc_idx_to_pmc(pmu, i)))        \
89                         continue;                               \
90                 else                                            \
91
92 static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
93 {
94         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
95
96         return pmu->counter_bitmask[pmc->type];
97 }
98
99 static inline u64 pmc_read_counter(struct kvm_pmc *pmc)
100 {
101         u64 counter, enabled, running;
102
103         counter = pmc->counter + pmc->emulated_counter;
104
105         if (pmc->perf_event && !pmc->is_paused)
106                 counter += perf_event_read_value(pmc->perf_event,
107                                                  &enabled, &running);
108         /* FIXME: Scaling needed? */
109         return counter & pmc_bitmask(pmc);
110 }
111
112 void pmc_write_counter(struct kvm_pmc *pmc, u64 val);
113
114 static inline bool pmc_is_gp(struct kvm_pmc *pmc)
115 {
116         return pmc->type == KVM_PMC_GP;
117 }
118
119 static inline bool pmc_is_fixed(struct kvm_pmc *pmc)
120 {
121         return pmc->type == KVM_PMC_FIXED;
122 }
123
124 static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu,
125                                                  u64 data)
126 {
127         return !(pmu->global_ctrl_mask & data);
128 }
129
130 /* returns general purpose PMC with the specified MSR. Note that it can be
131  * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a
132  * parameter to tell them apart.
133  */
134 static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
135                                          u32 base)
136 {
137         if (msr >= base && msr < base + pmu->nr_arch_gp_counters) {
138                 u32 index = array_index_nospec(msr - base,
139                                                pmu->nr_arch_gp_counters);
140
141                 return &pmu->gp_counters[index];
142         }
143
144         return NULL;
145 }
146
147 /* returns fixed PMC with the specified MSR */
148 static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
149 {
150         int base = MSR_CORE_PERF_FIXED_CTR0;
151
152         if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) {
153                 u32 index = array_index_nospec(msr - base,
154                                                pmu->nr_arch_fixed_counters);
155
156                 return &pmu->fixed_counters[index];
157         }
158
159         return NULL;
160 }
161
162 static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc)
163 {
164         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
165
166         if (pmc_is_fixed(pmc))
167                 return fixed_ctrl_field(pmu->fixed_ctr_ctrl,
168                                         pmc->idx - KVM_FIXED_PMC_BASE_IDX) & 0x3;
169
170         return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE;
171 }
172
173 extern struct x86_pmu_capability kvm_pmu_cap;
174
175 static inline void kvm_init_pmu_capability(const struct kvm_pmu_ops *pmu_ops)
176 {
177         bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL;
178         int min_nr_gp_ctrs = pmu_ops->MIN_NR_GP_COUNTERS;
179
180         /*
181          * Hybrid PMUs don't play nice with virtualization without careful
182          * configuration by userspace, and KVM's APIs for reporting supported
183          * vPMU features do not account for hybrid PMUs.  Disable vPMU support
184          * for hybrid PMUs until KVM gains a way to let userspace opt-in.
185          */
186         if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
187                 enable_pmu = false;
188
189         if (enable_pmu) {
190                 perf_get_x86_pmu_capability(&kvm_pmu_cap);
191
192                 /*
193                  * WARN if perf did NOT disable hardware PMU if the number of
194                  * architecturally required GP counters aren't present, i.e. if
195                  * there are a non-zero number of counters, but fewer than what
196                  * is architecturally required.
197                  */
198                 if (!kvm_pmu_cap.num_counters_gp ||
199                     WARN_ON_ONCE(kvm_pmu_cap.num_counters_gp < min_nr_gp_ctrs))
200                         enable_pmu = false;
201                 else if (is_intel && !kvm_pmu_cap.version)
202                         enable_pmu = false;
203         }
204
205         if (!enable_pmu) {
206                 memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap));
207                 return;
208         }
209
210         kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2);
211         kvm_pmu_cap.num_counters_gp = min(kvm_pmu_cap.num_counters_gp,
212                                           pmu_ops->MAX_NR_GP_COUNTERS);
213         kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed,
214                                              KVM_PMC_MAX_FIXED);
215 }
216
217 static inline void kvm_pmu_request_counter_reprogram(struct kvm_pmc *pmc)
218 {
219         set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi);
220         kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
221 }
222
223 static inline void reprogram_counters(struct kvm_pmu *pmu, u64 diff)
224 {
225         int bit;
226
227         if (!diff)
228                 return;
229
230         for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
231                 set_bit(bit, pmu->reprogram_pmi);
232         kvm_make_request(KVM_REQ_PMU, pmu_to_vcpu(pmu));
233 }
234
235 /*
236  * Check if a PMC is enabled by comparing it against global_ctrl bits.
237  *
238  * If the vPMU doesn't have global_ctrl MSR, all vPMCs are enabled.
239  */
240 static inline bool pmc_is_globally_enabled(struct kvm_pmc *pmc)
241 {
242         struct kvm_pmu *pmu = pmc_to_pmu(pmc);
243
244         if (!kvm_pmu_has_perf_global_ctrl(pmu))
245                 return true;
246
247         return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
248 }
249
250 void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu);
251 void kvm_pmu_handle_event(struct kvm_vcpu *vcpu);
252 int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data);
253 int kvm_pmu_check_rdpmc_early(struct kvm_vcpu *vcpu, unsigned int idx);
254 bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr);
255 int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
256 int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info);
257 void kvm_pmu_refresh(struct kvm_vcpu *vcpu);
258 void kvm_pmu_init(struct kvm_vcpu *vcpu);
259 void kvm_pmu_cleanup(struct kvm_vcpu *vcpu);
260 void kvm_pmu_destroy(struct kvm_vcpu *vcpu);
261 int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp);
262 void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id);
263
264 bool is_vmware_backdoor_pmc(u32 pmc_idx);
265
266 extern struct kvm_pmu_ops intel_pmu_ops;
267 extern struct kvm_pmu_ops amd_pmu_ops;
268 #endif /* __KVM_X86_PMU_H */