Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
f5132b01 | 2 | /* |
c7a7062f | 3 | * Kernel-based Virtual Machine -- Performance Monitoring Unit support |
f5132b01 | 4 | * |
25462f7f | 5 | * Copyright 2015 Red Hat, Inc. and/or its affiliates. |
f5132b01 GN |
6 | * |
7 | * Authors: | |
8 | * Avi Kivity <avi@redhat.com> | |
9 | * Gleb Natapov <gleb@redhat.com> | |
25462f7f | 10 | * Wei Huang <wei@redhat.com> |
f5132b01 GN |
11 | */ |
12 | ||
13 | #include <linux/types.h> | |
14 | #include <linux/kvm_host.h> | |
15 | #include <linux/perf_event.h> | |
d27aa7f1 | 16 | #include <asm/perf_event.h> |
f5132b01 GN |
17 | #include "x86.h" |
18 | #include "cpuid.h" | |
19 | #include "lapic.h" | |
474a5bb9 | 20 | #include "pmu.h" |
f5132b01 | 21 | |
66bb8a06 EH |
22 | /* This keeps the total size of the filter under 4k. */ |
23 | #define KVM_PMU_EVENT_FILTER_MAX_EVENTS 63 | |
24 | ||
25462f7f WH |
25 | /* NOTE: |
26 | * - Each perf counter is defined as "struct kvm_pmc"; | |
27 | * - There are two types of perf counters: general purpose (gp) and fixed. | |
28 | * gp counters are stored in gp_counters[] and fixed counters are stored | |
29 | * in fixed_counters[] respectively. Both of them are part of "struct | |
30 | * kvm_pmu"; | |
31 | * - pmu.c understands the difference between gp counters and fixed counters. | |
32 | * However AMD doesn't support fixed-counters; | |
33 | * - There are three types of index to access perf counters (PMC): | |
34 | * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD | |
35 | * has MSR_K7_PERFCTRn. | |
36 | * 2. MSR Index (named idx): This normally is used by RDPMC instruction. | |
37 | * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access | |
38 | * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except | |
39 | * that it also supports fixed counters. idx can be used to as index to | |
40 | * gp and fixed counters. | |
41 | * 3. Global PMC Index (named pmc): pmc is an index specific to PMU | |
42 | * code. Each pmc, stored in kvm_pmc.idx field, is unique across | |
43 | * all perf counters (both gp and fixed). The mapping relationship | |
44 | * between pmc and perf counters is as the following: | |
45 | * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters | |
46 | * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed | |
47 | * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters | |
48 | */ | |
f5132b01 | 49 | |
c6702c9d | 50 | static void kvm_pmi_trigger_fn(struct irq_work *irq_work) |
f5132b01 | 51 | { |
212dba12 WH |
52 | struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); |
53 | struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); | |
f5132b01 | 54 | |
c6702c9d | 55 | kvm_pmu_deliver_pmi(vcpu); |
f5132b01 GN |
56 | } |
57 | ||
58 | static void kvm_perf_overflow(struct perf_event *perf_event, | |
59 | struct perf_sample_data *data, | |
60 | struct pt_regs *regs) | |
61 | { | |
62 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
212dba12 | 63 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
e84cfe4c WH |
64 | |
65 | if (!test_and_set_bit(pmc->idx, | |
66 | (unsigned long *)&pmu->reprogram_pmi)) { | |
671bd993 NA |
67 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
68 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); | |
69 | } | |
f5132b01 GN |
70 | } |
71 | ||
72 | static void kvm_perf_overflow_intr(struct perf_event *perf_event, | |
e84cfe4c WH |
73 | struct perf_sample_data *data, |
74 | struct pt_regs *regs) | |
f5132b01 GN |
75 | { |
76 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
212dba12 | 77 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
e84cfe4c WH |
78 | |
79 | if (!test_and_set_bit(pmc->idx, | |
80 | (unsigned long *)&pmu->reprogram_pmi)) { | |
671bd993 | 81 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
f5132b01 | 82 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); |
e84cfe4c | 83 | |
f5132b01 GN |
84 | /* |
85 | * Inject PMI. If vcpu was in a guest mode during NMI PMI | |
86 | * can be ejected on a guest mode re-entry. Otherwise we can't | |
87 | * be sure that vcpu wasn't executing hlt instruction at the | |
e84cfe4c | 88 | * time of vmexit and is not going to re-enter guest mode until |
f5132b01 GN |
89 | * woken up. So we should wake it, but this is impossible from |
90 | * NMI context. Do it from irq work instead. | |
91 | */ | |
92 | if (!kvm_is_in_guest()) | |
212dba12 | 93 | irq_work_queue(&pmc_to_pmu(pmc)->irq_work); |
f5132b01 GN |
94 | else |
95 | kvm_make_request(KVM_REQ_PMI, pmc->vcpu); | |
96 | } | |
97 | } | |
98 | ||
c6702c9d | 99 | static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, |
e84cfe4c WH |
100 | unsigned config, bool exclude_user, |
101 | bool exclude_kernel, bool intr, | |
102 | bool in_tx, bool in_tx_cp) | |
f5132b01 GN |
103 | { |
104 | struct perf_event *event; | |
105 | struct perf_event_attr attr = { | |
106 | .type = type, | |
107 | .size = sizeof(attr), | |
108 | .pinned = true, | |
109 | .exclude_idle = true, | |
110 | .exclude_host = 1, | |
111 | .exclude_user = exclude_user, | |
112 | .exclude_kernel = exclude_kernel, | |
113 | .config = config, | |
114 | }; | |
e84cfe4c | 115 | |
bba82fd7 RC |
116 | attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); |
117 | ||
103af0a9 AK |
118 | if (in_tx) |
119 | attr.config |= HSW_IN_TX; | |
bba82fd7 RC |
120 | if (in_tx_cp) { |
121 | /* | |
122 | * HSW_IN_TX_CHECKPOINTED is not supported with nonzero | |
123 | * period. Just clear the sample period so at least | |
124 | * allocating the counter doesn't fail. | |
125 | */ | |
126 | attr.sample_period = 0; | |
103af0a9 | 127 | attr.config |= HSW_IN_TX_CHECKPOINTED; |
bba82fd7 | 128 | } |
f5132b01 GN |
129 | |
130 | event = perf_event_create_kernel_counter(&attr, -1, current, | |
131 | intr ? kvm_perf_overflow_intr : | |
132 | kvm_perf_overflow, pmc); | |
133 | if (IS_ERR(event)) { | |
6fc3977c LX |
134 | pr_debug_ratelimited("kvm_pmu: event creation failed %ld for pmc->idx = %d\n", |
135 | PTR_ERR(event), pmc->idx); | |
f5132b01 GN |
136 | return; |
137 | } | |
138 | ||
139 | pmc->perf_event = event; | |
212dba12 | 140 | clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); |
f5132b01 GN |
141 | } |
142 | ||
25462f7f | 143 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) |
f5132b01 GN |
144 | { |
145 | unsigned config, type = PERF_TYPE_RAW; | |
146 | u8 event_select, unit_mask; | |
66bb8a06 EH |
147 | struct kvm *kvm = pmc->vcpu->kvm; |
148 | struct kvm_pmu_event_filter *filter; | |
149 | int i; | |
150 | bool allow_event = true; | |
f5132b01 | 151 | |
a7b9d2cc GN |
152 | if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) |
153 | printk_once("kvm pmu: pin control bit is ignored\n"); | |
154 | ||
f5132b01 GN |
155 | pmc->eventsel = eventsel; |
156 | ||
c6702c9d | 157 | pmc_stop_counter(pmc); |
f5132b01 | 158 | |
c6702c9d | 159 | if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) |
f5132b01 GN |
160 | return; |
161 | ||
66bb8a06 EH |
162 | filter = srcu_dereference(kvm->arch.pmu_event_filter, &kvm->srcu); |
163 | if (filter) { | |
164 | for (i = 0; i < filter->nevents; i++) | |
165 | if (filter->events[i] == | |
166 | (eventsel & AMD64_RAW_EVENT_MASK_NB)) | |
167 | break; | |
168 | if (filter->action == KVM_PMU_EVENT_ALLOW && | |
169 | i == filter->nevents) | |
170 | allow_event = false; | |
171 | if (filter->action == KVM_PMU_EVENT_DENY && | |
172 | i < filter->nevents) | |
173 | allow_event = false; | |
174 | } | |
175 | if (!allow_event) | |
176 | return; | |
177 | ||
f5132b01 GN |
178 | event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; |
179 | unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; | |
180 | ||
fac33683 | 181 | if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | |
e84cfe4c WH |
182 | ARCH_PERFMON_EVENTSEL_INV | |
183 | ARCH_PERFMON_EVENTSEL_CMASK | | |
184 | HSW_IN_TX | | |
185 | HSW_IN_TX_CHECKPOINTED))) { | |
25462f7f WH |
186 | config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), |
187 | event_select, | |
188 | unit_mask); | |
f5132b01 GN |
189 | if (config != PERF_COUNT_HW_MAX) |
190 | type = PERF_TYPE_HARDWARE; | |
191 | } | |
192 | ||
193 | if (type == PERF_TYPE_RAW) | |
194 | config = eventsel & X86_RAW_EVENT_MASK; | |
195 | ||
c6702c9d | 196 | pmc_reprogram_counter(pmc, type, config, |
e84cfe4c WH |
197 | !(eventsel & ARCH_PERFMON_EVENTSEL_USR), |
198 | !(eventsel & ARCH_PERFMON_EVENTSEL_OS), | |
199 | eventsel & ARCH_PERFMON_EVENTSEL_INT, | |
200 | (eventsel & HSW_IN_TX), | |
201 | (eventsel & HSW_IN_TX_CHECKPOINTED)); | |
f5132b01 | 202 | } |
25462f7f | 203 | EXPORT_SYMBOL_GPL(reprogram_gp_counter); |
f5132b01 | 204 | |
25462f7f | 205 | void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) |
f5132b01 | 206 | { |
e84cfe4c WH |
207 | unsigned en_field = ctrl & 0x3; |
208 | bool pmi = ctrl & 0x8; | |
f5132b01 | 209 | |
c6702c9d | 210 | pmc_stop_counter(pmc); |
f5132b01 | 211 | |
e84cfe4c | 212 | if (!en_field || !pmc_is_enabled(pmc)) |
f5132b01 GN |
213 | return; |
214 | ||
c6702c9d | 215 | pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, |
25462f7f | 216 | kvm_x86_ops->pmu_ops->find_fixed_event(idx), |
e84cfe4c WH |
217 | !(en_field & 0x2), /* exclude user */ |
218 | !(en_field & 0x1), /* exclude kernel */ | |
219 | pmi, false, false); | |
f5132b01 | 220 | } |
25462f7f | 221 | EXPORT_SYMBOL_GPL(reprogram_fixed_counter); |
f5132b01 | 222 | |
25462f7f | 223 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx) |
f5132b01 | 224 | { |
25462f7f | 225 | struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); |
f5132b01 GN |
226 | |
227 | if (!pmc) | |
228 | return; | |
229 | ||
230 | if (pmc_is_gp(pmc)) | |
231 | reprogram_gp_counter(pmc, pmc->eventsel); | |
232 | else { | |
e84cfe4c WH |
233 | int idx = pmc_idx - INTEL_PMC_IDX_FIXED; |
234 | u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx); | |
235 | ||
236 | reprogram_fixed_counter(pmc, ctrl, idx); | |
f5132b01 GN |
237 | } |
238 | } | |
25462f7f | 239 | EXPORT_SYMBOL_GPL(reprogram_counter); |
f5132b01 | 240 | |
e5af058a WH |
241 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) |
242 | { | |
243 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); | |
244 | u64 bitmask; | |
245 | int bit; | |
246 | ||
247 | bitmask = pmu->reprogram_pmi; | |
248 | ||
249 | for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { | |
25462f7f | 250 | struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); |
e5af058a WH |
251 | |
252 | if (unlikely(!pmc || !pmc->perf_event)) { | |
253 | clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); | |
254 | continue; | |
255 | } | |
256 | ||
257 | reprogram_counter(pmu, bit); | |
258 | } | |
259 | } | |
260 | ||
261 | /* check if idx is a valid index to access PMU */ | |
262 | int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) | |
263 | { | |
25462f7f | 264 | return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); |
41aac14a WH |
265 | } |
266 | ||
2d7921c4 AM |
267 | bool is_vmware_backdoor_pmc(u32 pmc_idx) |
268 | { | |
269 | switch (pmc_idx) { | |
270 | case VMWARE_BACKDOOR_PMC_HOST_TSC: | |
271 | case VMWARE_BACKDOOR_PMC_REAL_TIME: | |
272 | case VMWARE_BACKDOOR_PMC_APPARENT_TIME: | |
273 | return true; | |
274 | } | |
275 | return false; | |
276 | } | |
277 | ||
278 | static int kvm_pmu_rdpmc_vmware(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) | |
279 | { | |
280 | u64 ctr_val; | |
281 | ||
282 | switch (idx) { | |
283 | case VMWARE_BACKDOOR_PMC_HOST_TSC: | |
284 | ctr_val = rdtsc(); | |
285 | break; | |
286 | case VMWARE_BACKDOOR_PMC_REAL_TIME: | |
9285ec4c | 287 | ctr_val = ktime_get_boottime_ns(); |
2d7921c4 AM |
288 | break; |
289 | case VMWARE_BACKDOOR_PMC_APPARENT_TIME: | |
9285ec4c | 290 | ctr_val = ktime_get_boottime_ns() + |
2d7921c4 AM |
291 | vcpu->kvm->arch.kvmclock_offset; |
292 | break; | |
293 | default: | |
294 | return 1; | |
295 | } | |
296 | ||
297 | *data = ctr_val; | |
298 | return 0; | |
299 | } | |
300 | ||
41aac14a WH |
301 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) |
302 | { | |
303 | bool fast_mode = idx & (1u << 31); | |
672ff6cf | 304 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
41aac14a | 305 | struct kvm_pmc *pmc; |
0e6f467e | 306 | u64 mask = fast_mode ? ~0u : ~0ull; |
41aac14a | 307 | |
672ff6cf LA |
308 | if (!pmu->version) |
309 | return 1; | |
310 | ||
2d7921c4 AM |
311 | if (is_vmware_backdoor_pmc(idx)) |
312 | return kvm_pmu_rdpmc_vmware(vcpu, idx, data); | |
313 | ||
0e6f467e | 314 | pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx, &mask); |
41aac14a WH |
315 | if (!pmc) |
316 | return 1; | |
317 | ||
0e6f467e | 318 | *data = pmc_read_counter(pmc) & mask; |
e5af058a WH |
319 | return 0; |
320 | } | |
321 | ||
322 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) | |
323 | { | |
bce87cce | 324 | if (lapic_in_kernel(vcpu)) |
e5af058a WH |
325 | kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); |
326 | } | |
327 | ||
c6702c9d | 328 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
f5132b01 | 329 | { |
25462f7f | 330 | return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); |
f5132b01 GN |
331 | } |
332 | ||
25462f7f | 333 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
f5132b01 | 334 | { |
25462f7f | 335 | return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); |
f5132b01 GN |
336 | } |
337 | ||
afd80d85 | 338 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
f5132b01 | 339 | { |
25462f7f | 340 | return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); |
f5132b01 GN |
341 | } |
342 | ||
e84cfe4c WH |
343 | /* refresh PMU settings. This function generally is called when underlying |
344 | * settings are changed (such as changes of PMU CPUID by guest VMs), which | |
345 | * should rarely happen. | |
346 | */ | |
c6702c9d | 347 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu) |
f5132b01 | 348 | { |
25462f7f | 349 | kvm_x86_ops->pmu_ops->refresh(vcpu); |
f5132b01 GN |
350 | } |
351 | ||
f5132b01 GN |
352 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) |
353 | { | |
212dba12 | 354 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
f5132b01 GN |
355 | |
356 | irq_work_sync(&pmu->irq_work); | |
25462f7f | 357 | kvm_x86_ops->pmu_ops->reset(vcpu); |
f5132b01 GN |
358 | } |
359 | ||
e5af058a | 360 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
f5132b01 | 361 | { |
212dba12 | 362 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
f5132b01 | 363 | |
e5af058a | 364 | memset(pmu, 0, sizeof(*pmu)); |
25462f7f | 365 | kvm_x86_ops->pmu_ops->init(vcpu); |
e5af058a WH |
366 | init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); |
367 | kvm_pmu_refresh(vcpu); | |
368 | } | |
369 | ||
370 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) | |
371 | { | |
372 | kvm_pmu_reset(vcpu); | |
f5132b01 | 373 | } |
66bb8a06 EH |
374 | |
375 | int kvm_vm_ioctl_set_pmu_event_filter(struct kvm *kvm, void __user *argp) | |
376 | { | |
377 | struct kvm_pmu_event_filter tmp, *filter; | |
378 | size_t size; | |
379 | int r; | |
380 | ||
381 | if (copy_from_user(&tmp, argp, sizeof(tmp))) | |
382 | return -EFAULT; | |
383 | ||
384 | if (tmp.action != KVM_PMU_EVENT_ALLOW && | |
385 | tmp.action != KVM_PMU_EVENT_DENY) | |
386 | return -EINVAL; | |
387 | ||
388 | if (tmp.nevents > KVM_PMU_EVENT_FILTER_MAX_EVENTS) | |
389 | return -E2BIG; | |
390 | ||
391 | size = struct_size(filter, events, tmp.nevents); | |
392 | filter = kmalloc(size, GFP_KERNEL_ACCOUNT); | |
393 | if (!filter) | |
394 | return -ENOMEM; | |
395 | ||
396 | r = -EFAULT; | |
397 | if (copy_from_user(filter, argp, size)) | |
398 | goto cleanup; | |
399 | ||
400 | /* Ensure nevents can't be changed between the user copies. */ | |
401 | *filter = tmp; | |
402 | ||
403 | mutex_lock(&kvm->lock); | |
404 | rcu_swap_protected(kvm->arch.pmu_event_filter, filter, | |
405 | mutex_is_locked(&kvm->lock)); | |
406 | mutex_unlock(&kvm->lock); | |
407 | ||
408 | synchronize_srcu_expedited(&kvm->srcu); | |
409 | r = 0; | |
410 | cleanup: | |
411 | kfree(filter); | |
412 | return r; | |
413 | } |