Commit | Line | Data |
---|---|---|
b2441318 | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
474a5bb9 WH |
2 | #ifndef __KVM_X86_PMU_H |
3 | #define __KVM_X86_PMU_H | |
4 | ||
13c5183a MP |
5 | #include <linux/nospec.h> |
6 | ||
474a5bb9 WH |
7 | #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) |
8 | #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) | |
9 | #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) | |
10 | ||
b9181c8e LX |
11 | #define MSR_IA32_MISC_ENABLE_PMU_RO_MASK (MSR_IA32_MISC_ENABLE_PEBS_UNAVAIL | \ |
12 | MSR_IA32_MISC_ENABLE_BTS_UNAVAIL) | |
13 | ||
25462f7f WH |
14 | /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ |
15 | #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) | |
16 | ||
2d7921c4 AM |
17 | #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 |
18 | #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 | |
19 | #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 | |
20 | ||
474a5bb9 WH |
21 | struct kvm_event_hw_type_mapping { |
22 | u8 eventsel; | |
23 | u8 unit_mask; | |
24 | unsigned event_type; | |
25 | }; | |
26 | ||
25462f7f | 27 | struct kvm_pmu_ops { |
7aadaa98 | 28 | bool (*hw_event_available)(struct kvm_pmc *pmc); |
25462f7f WH |
29 | bool (*pmc_is_enabled)(struct kvm_pmc *pmc); |
30 | struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); | |
98ff80f5 LX |
31 | struct kvm_pmc *(*rdpmc_ecx_to_pmc)(struct kvm_vcpu *vcpu, |
32 | unsigned int idx, u64 *mask); | |
c900c156 | 33 | struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, u32 msr); |
e6cd31f1 | 34 | bool (*is_valid_rdpmc_ecx)(struct kvm_vcpu *vcpu, unsigned int idx); |
545feb96 | 35 | bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); |
cbd71758 | 36 | int (*get_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
25462f7f WH |
37 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
38 | void (*refresh)(struct kvm_vcpu *vcpu); | |
39 | void (*init)(struct kvm_vcpu *vcpu); | |
40 | void (*reset)(struct kvm_vcpu *vcpu); | |
e6209a3b | 41 | void (*deliver_pmi)(struct kvm_vcpu *vcpu); |
9aa4f622 | 42 | void (*cleanup)(struct kvm_vcpu *vcpu); |
25462f7f WH |
43 | }; |
44 | ||
8f969c0c LX |
45 | void kvm_pmu_ops_update(const struct kvm_pmu_ops *pmu_ops); |
46 | ||
25462f7f WH |
47 | static inline u64 pmc_bitmask(struct kvm_pmc *pmc) |
48 | { | |
49 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); | |
50 | ||
51 | return pmu->counter_bitmask[pmc->type]; | |
52 | } | |
53 | ||
54 | static inline u64 pmc_read_counter(struct kvm_pmc *pmc) | |
55 | { | |
56 | u64 counter, enabled, running; | |
57 | ||
58 | counter = pmc->counter; | |
e79f49c3 | 59 | if (pmc->perf_event && !pmc->is_paused) |
25462f7f WH |
60 | counter += perf_event_read_value(pmc->perf_event, |
61 | &enabled, &running); | |
62 | /* FIXME: Scaling needed? */ | |
63 | return counter & pmc_bitmask(pmc); | |
64 | } | |
65 | ||
a6da0d77 | 66 | static inline void pmc_release_perf_event(struct kvm_pmc *pmc) |
25462f7f WH |
67 | { |
68 | if (pmc->perf_event) { | |
25462f7f WH |
69 | perf_event_release_kernel(pmc->perf_event); |
70 | pmc->perf_event = NULL; | |
a6da0d77 | 71 | pmc->current_config = 0; |
b35e5548 | 72 | pmc_to_pmu(pmc)->event_count--; |
a6da0d77 LX |
73 | } |
74 | } | |
75 | ||
76 | static inline void pmc_stop_counter(struct kvm_pmc *pmc) | |
77 | { | |
78 | if (pmc->perf_event) { | |
79 | pmc->counter = pmc_read_counter(pmc); | |
80 | pmc_release_perf_event(pmc); | |
25462f7f WH |
81 | } |
82 | } | |
83 | ||
84 | static inline bool pmc_is_gp(struct kvm_pmc *pmc) | |
85 | { | |
86 | return pmc->type == KVM_PMC_GP; | |
87 | } | |
88 | ||
89 | static inline bool pmc_is_fixed(struct kvm_pmc *pmc) | |
90 | { | |
91 | return pmc->type == KVM_PMC_FIXED; | |
92 | } | |
93 | ||
9477f444 OU |
94 | static inline bool kvm_valid_perf_global_ctrl(struct kvm_pmu *pmu, |
95 | u64 data) | |
96 | { | |
97 | return !(pmu->global_ctrl_mask & data); | |
98 | } | |
99 | ||
25462f7f WH |
100 | /* returns general purpose PMC with the specified MSR. Note that it can be |
101 | * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a | |
d9f6e12f | 102 | * parameter to tell them apart. |
25462f7f WH |
103 | */ |
104 | static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, | |
105 | u32 base) | |
106 | { | |
13c5183a MP |
107 | if (msr >= base && msr < base + pmu->nr_arch_gp_counters) { |
108 | u32 index = array_index_nospec(msr - base, | |
109 | pmu->nr_arch_gp_counters); | |
110 | ||
111 | return &pmu->gp_counters[index]; | |
112 | } | |
25462f7f WH |
113 | |
114 | return NULL; | |
115 | } | |
116 | ||
117 | /* returns fixed PMC with the specified MSR */ | |
118 | static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) | |
119 | { | |
120 | int base = MSR_CORE_PERF_FIXED_CTR0; | |
121 | ||
13c5183a MP |
122 | if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) { |
123 | u32 index = array_index_nospec(msr - base, | |
124 | pmu->nr_arch_fixed_counters); | |
125 | ||
126 | return &pmu->fixed_counters[index]; | |
127 | } | |
25462f7f WH |
128 | |
129 | return NULL; | |
130 | } | |
131 | ||
168d918f EH |
132 | static inline u64 get_sample_period(struct kvm_pmc *pmc, u64 counter_value) |
133 | { | |
134 | u64 sample_period = (-counter_value) & pmc_bitmask(pmc); | |
135 | ||
136 | if (!sample_period) | |
137 | sample_period = pmc_bitmask(pmc) + 1; | |
138 | return sample_period; | |
139 | } | |
140 | ||
75189d1d LX |
141 | static inline void pmc_update_sample_period(struct kvm_pmc *pmc) |
142 | { | |
55c590ad LX |
143 | if (!pmc->perf_event || pmc->is_paused || |
144 | !is_sampling_event(pmc->perf_event)) | |
75189d1d LX |
145 | return; |
146 | ||
147 | perf_event_period(pmc->perf_event, | |
148 | get_sample_period(pmc, pmc->counter)); | |
149 | } | |
150 | ||
63f21f32 LX |
151 | static inline bool pmc_speculative_in_use(struct kvm_pmc *pmc) |
152 | { | |
153 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); | |
154 | ||
155 | if (pmc_is_fixed(pmc)) | |
156 | return fixed_ctrl_field(pmu->fixed_ctr_ctrl, | |
157 | pmc->idx - INTEL_PMC_IDX_FIXED) & 0x3; | |
158 | ||
159 | return pmc->eventsel & ARCH_PERFMON_EVENTSEL_ENABLE; | |
160 | } | |
161 | ||
968635ab LX |
162 | extern struct x86_pmu_capability kvm_pmu_cap; |
163 | ||
164 | static inline void kvm_init_pmu_capability(void) | |
165 | { | |
d7808f73 LX |
166 | bool is_intel = boot_cpu_data.x86_vendor == X86_VENDOR_INTEL; |
167 | ||
4d7404e5 SC |
168 | /* |
169 | * Hybrid PMUs don't play nice with virtualization without careful | |
170 | * configuration by userspace, and KVM's APIs for reporting supported | |
171 | * vPMU features do not account for hybrid PMUs. Disable vPMU support | |
172 | * for hybrid PMUs until KVM gains a way to let userspace opt-in. | |
173 | */ | |
174 | if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU)) | |
d7808f73 | 175 | enable_pmu = false; |
6ef25aa0 | 176 | |
4d7404e5 SC |
177 | if (enable_pmu) { |
178 | perf_get_x86_pmu_capability(&kvm_pmu_cap); | |
179 | ||
180 | /* | |
181 | * For Intel, only support guest architectural pmu | |
182 | * on a host with architectural pmu. | |
183 | */ | |
184 | if ((is_intel && !kvm_pmu_cap.version) || | |
185 | !kvm_pmu_cap.num_counters_gp) | |
186 | enable_pmu = false; | |
187 | } | |
188 | ||
6ef25aa0 LX |
189 | if (!enable_pmu) { |
190 | memset(&kvm_pmu_cap, 0, sizeof(kvm_pmu_cap)); | |
d7808f73 LX |
191 | return; |
192 | } | |
968635ab LX |
193 | |
194 | kvm_pmu_cap.version = min(kvm_pmu_cap.version, 2); | |
195 | kvm_pmu_cap.num_counters_fixed = min(kvm_pmu_cap.num_counters_fixed, | |
196 | KVM_PMC_MAX_FIXED); | |
197 | } | |
198 | ||
68fb4757 LX |
199 | static inline void kvm_pmu_request_counter_reprogam(struct kvm_pmc *pmc) |
200 | { | |
201 | set_bit(pmc->idx, pmc_to_pmu(pmc)->reprogram_pmi); | |
202 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); | |
203 | } | |
25462f7f | 204 | |
474a5bb9 WH |
205 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); |
206 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); | |
207 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); | |
e6cd31f1 | 208 | bool kvm_pmu_is_valid_rdpmc_ecx(struct kvm_vcpu *vcpu, unsigned int idx); |
545feb96 | 209 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); |
cbd71758 | 210 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
474a5bb9 WH |
211 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
212 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu); | |
213 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); | |
214 | void kvm_pmu_init(struct kvm_vcpu *vcpu); | |
b35e5548 | 215 | void kvm_pmu_cleanup(struct kvm_vcpu *vcpu); |
474a5bb9 | 216 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); |
66bb8a06 | 217 | int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp); |
9cd803d4 | 218 | void kvm_pmu_trigger_event(struct kvm_vcpu *vcpu, u64 perf_hw_id); |
474a5bb9 | 219 | |
2d7921c4 AM |
220 | bool is_vmware_backdoor_pmc(u32 pmc_idx); |
221 | ||
25462f7f WH |
222 | extern struct kvm_pmu_ops intel_pmu_ops; |
223 | extern struct kvm_pmu_ops amd_pmu_ops; | |
474a5bb9 | 224 | #endif /* __KVM_X86_PMU_H */ |