Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
051ff581 SZ |
2 | /* |
3 | * Copyright (C) 2015 Linaro Ltd. | |
4 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
051ff581 SZ |
5 | */ |
6 | ||
7 | #include <linux/cpu.h> | |
8 | #include <linux/kvm.h> | |
9 | #include <linux/kvm_host.h> | |
db858060 | 10 | #include <linux/list.h> |
051ff581 | 11 | #include <linux/perf_event.h> |
8c3252c0 | 12 | #include <linux/perf/arm_pmu.h> |
bb0c70bc | 13 | #include <linux/uaccess.h> |
051ff581 SZ |
14 | #include <asm/kvm_emulate.h> |
15 | #include <kvm/arm_pmu.h> | |
b02386eb | 16 | #include <kvm/arm_vgic.h> |
051ff581 | 17 | |
bead0220 MZ |
18 | #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) |
19 | ||
be399d82 SC |
20 | DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available); |
21 | ||
db858060 AE |
22 | static LIST_HEAD(arm_pmus); |
23 | static DEFINE_MUTEX(arm_pmus_lock); | |
24 | ||
30d97754 | 25 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); |
80f393a2 | 26 | |
fd65a3b5 MZ |
27 | static u32 kvm_pmu_event_mask(struct kvm *kvm) |
28 | { | |
46b18782 MZ |
29 | unsigned int pmuver; |
30 | ||
31 | pmuver = kvm->arch.arm_pmu->pmuver; | |
32 | ||
33 | switch (pmuver) { | |
121a8fc0 | 34 | case ID_AA64DFR0_EL1_PMUVer_IMP: |
fd65a3b5 | 35 | return GENMASK(9, 0); |
121a8fc0 MB |
36 | case ID_AA64DFR0_EL1_PMUVer_V3P1: |
37 | case ID_AA64DFR0_EL1_PMUVer_V3P4: | |
38 | case ID_AA64DFR0_EL1_PMUVer_V3P5: | |
39 | case ID_AA64DFR0_EL1_PMUVer_V3P7: | |
fd65a3b5 MZ |
40 | return GENMASK(15, 0); |
41 | default: /* Shouldn't be here, just for sanity */ | |
46b18782 | 42 | WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); |
fd65a3b5 MZ |
43 | return 0; |
44 | } | |
45 | } | |
46 | ||
218907cb AM |
47 | /** |
48 | * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter | |
49 | * @vcpu: The vcpu pointer | |
50 | * @select_idx: The counter index | |
51 | */ | |
52 | static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) | |
c82d28cb MZ |
53 | { |
54 | return (select_idx == ARMV8_PMU_CYCLE_IDX); | |
55 | } | |
56 | ||
57 | static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) | |
218907cb AM |
58 | { |
59 | return (select_idx == ARMV8_PMU_CYCLE_IDX && | |
60 | __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); | |
61 | } | |
62 | ||
bead0220 MZ |
63 | static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) |
64 | { | |
c82d28cb MZ |
65 | return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX && |
66 | !kvm_pmu_idx_has_64bit_overflow(vcpu, idx)); | |
bead0220 MZ |
67 | } |
68 | ||
80f393a2 AM |
69 | static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) |
70 | { | |
71 | struct kvm_pmu *pmu; | |
72 | struct kvm_vcpu_arch *vcpu_arch; | |
73 | ||
74 | pmc -= pmc->idx; | |
75 | pmu = container_of(pmc, struct kvm_pmu, pmc[0]); | |
76 | vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); | |
77 | return container_of(vcpu_arch, struct kvm_vcpu, arch); | |
78 | } | |
79 | ||
0cb9c3c8 MZ |
80 | static u32 counter_index_to_reg(u64 idx) |
81 | { | |
82 | return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx; | |
83 | } | |
84 | ||
85 | static u32 counter_index_to_evtreg(u64 idx) | |
86 | { | |
87 | return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx; | |
88 | } | |
89 | ||
051ff581 | 90 | /** |
bead0220 | 91 | * kvm_pmu_get_counter_value - get PMU counter value |
051ff581 SZ |
92 | * @vcpu: The vcpu pointer |
93 | * @select_idx: The counter index | |
94 | */ | |
bead0220 | 95 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) |
80f393a2 | 96 | { |
bead0220 MZ |
97 | u64 counter, reg, enabled, running; |
98 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
99 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; | |
80f393a2 | 100 | |
bead0220 MZ |
101 | if (!kvm_vcpu_has_pmu(vcpu)) |
102 | return 0; | |
80f393a2 | 103 | |
0cb9c3c8 | 104 | reg = counter_index_to_reg(select_idx); |
bead0220 | 105 | counter = __vcpu_sys_reg(vcpu, reg); |
80f393a2 AM |
106 | |
107 | /* | |
108 | * The real counter value is equal to the value of counter register plus | |
051ff581 SZ |
109 | * the value perf event counts. |
110 | */ | |
111 | if (pmc->perf_event) | |
112 | counter += perf_event_read_value(pmc->perf_event, &enabled, | |
113 | &running); | |
114 | ||
c82d28cb | 115 | if (!kvm_pmu_idx_is_64bit(vcpu, select_idx)) |
218907cb AM |
116 | counter = lower_32_bits(counter); |
117 | ||
118 | return counter; | |
051ff581 SZ |
119 | } |
120 | ||
121 | /** | |
122 | * kvm_pmu_set_counter_value - set PMU counter value | |
123 | * @vcpu: The vcpu pointer | |
124 | * @select_idx: The counter index | |
125 | * @val: The counter value | |
126 | */ | |
127 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) | |
128 | { | |
129 | u64 reg; | |
130 | ||
8f6379e2 AE |
131 | if (!kvm_vcpu_has_pmu(vcpu)) |
132 | return; | |
133 | ||
0cb9c3c8 | 134 | reg = counter_index_to_reg(select_idx); |
8d404c4c | 135 | __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); |
30d97754 AM |
136 | |
137 | /* Recreate the perf event to reflect the updated sample_period */ | |
138 | kvm_pmu_create_perf_event(vcpu, select_idx); | |
051ff581 | 139 | } |
96b0eebc | 140 | |
6f4d2a0b AM |
141 | /** |
142 | * kvm_pmu_release_perf_event - remove the perf event | |
143 | * @pmc: The PMU counter pointer | |
144 | */ | |
145 | static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) | |
146 | { | |
147 | if (pmc->perf_event) { | |
148 | perf_event_disable(pmc->perf_event); | |
149 | perf_event_release_kernel(pmc->perf_event); | |
150 | pmc->perf_event = NULL; | |
151 | } | |
152 | } | |
153 | ||
7f766358 SZ |
154 | /** |
155 | * kvm_pmu_stop_counter - stop PMU counter | |
156 | * @pmc: The PMU counter pointer | |
157 | * | |
158 | * If this counter has been configured to monitor some event, release it here. | |
159 | */ | |
160 | static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) | |
161 | { | |
0f1e172b | 162 | u64 reg, val; |
7f766358 | 163 | |
80f393a2 AM |
164 | if (!pmc->perf_event) |
165 | return; | |
166 | ||
0f1e172b | 167 | val = kvm_pmu_get_counter_value(vcpu, pmc->idx); |
80f393a2 | 168 | |
0cb9c3c8 | 169 | reg = counter_index_to_reg(pmc->idx); |
80f393a2 | 170 | |
f4e23cf9 MZ |
171 | __vcpu_sys_reg(vcpu, reg) = val; |
172 | ||
80f393a2 | 173 | kvm_pmu_release_perf_event(pmc); |
7f766358 SZ |
174 | } |
175 | ||
bca031e2 ZY |
176 | /** |
177 | * kvm_pmu_vcpu_init - assign pmu counter idx for cpu | |
178 | * @vcpu: The vcpu pointer | |
179 | * | |
180 | */ | |
181 | void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) | |
182 | { | |
183 | int i; | |
184 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
185 | ||
186 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) | |
187 | pmu->pmc[i].idx = i; | |
188 | } | |
189 | ||
2aa36e98 SZ |
190 | /** |
191 | * kvm_pmu_vcpu_reset - reset pmu state for cpu | |
192 | * @vcpu: The vcpu pointer | |
193 | * | |
194 | */ | |
195 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) | |
196 | { | |
c01d6a18 | 197 | unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); |
2aa36e98 | 198 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
c01d6a18 | 199 | int i; |
2aa36e98 | 200 | |
c01d6a18 | 201 | for_each_set_bit(i, &mask, 32) |
2aa36e98 | 202 | kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); |
2aa36e98 SZ |
203 | } |
204 | ||
5f0a714a SZ |
205 | /** |
206 | * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu | |
207 | * @vcpu: The vcpu pointer | |
208 | * | |
209 | */ | |
210 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) | |
211 | { | |
212 | int i; | |
213 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
214 | ||
6f4d2a0b AM |
215 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) |
216 | kvm_pmu_release_perf_event(&pmu->pmc[i]); | |
95e92e45 | 217 | irq_work_sync(&vcpu->arch.pmu.overflow_work); |
5f0a714a SZ |
218 | } |
219 | ||
96b0eebc SZ |
220 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
221 | { | |
8d404c4c | 222 | u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; |
96b0eebc SZ |
223 | |
224 | val &= ARMV8_PMU_PMCR_N_MASK; | |
225 | if (val == 0) | |
226 | return BIT(ARMV8_PMU_CYCLE_IDX); | |
227 | else | |
228 | return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); | |
229 | } | |
230 | ||
231 | /** | |
418e5ca8 | 232 | * kvm_pmu_enable_counter_mask - enable selected PMU counters |
96b0eebc SZ |
233 | * @vcpu: The vcpu pointer |
234 | * @val: the value guest writes to PMCNTENSET register | |
235 | * | |
236 | * Call perf_event_enable to start counting the perf event | |
237 | */ | |
418e5ca8 | 238 | void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) |
96b0eebc SZ |
239 | { |
240 | int i; | |
241 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
242 | struct kvm_pmc *pmc; | |
243 | ||
8f6379e2 AE |
244 | if (!kvm_vcpu_has_pmu(vcpu)) |
245 | return; | |
246 | ||
8d404c4c | 247 | if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) |
96b0eebc SZ |
248 | return; |
249 | ||
250 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
251 | if (!(val & BIT(i))) | |
252 | continue; | |
253 | ||
254 | pmc = &pmu->pmc[i]; | |
80f393a2 | 255 | |
bead0220 MZ |
256 | if (!pmc->perf_event) { |
257 | kvm_pmu_create_perf_event(vcpu, i); | |
258 | } else { | |
96b0eebc SZ |
259 | perf_event_enable(pmc->perf_event); |
260 | if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) | |
261 | kvm_debug("fail to enable perf event\n"); | |
262 | } | |
263 | } | |
264 | } | |
265 | ||
266 | /** | |
418e5ca8 | 267 | * kvm_pmu_disable_counter_mask - disable selected PMU counters |
96b0eebc SZ |
268 | * @vcpu: The vcpu pointer |
269 | * @val: the value guest writes to PMCNTENCLR register | |
270 | * | |
271 | * Call perf_event_disable to stop counting the perf event | |
272 | */ | |
418e5ca8 | 273 | void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) |
96b0eebc SZ |
274 | { |
275 | int i; | |
276 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
277 | struct kvm_pmc *pmc; | |
278 | ||
8f6379e2 | 279 | if (!kvm_vcpu_has_pmu(vcpu) || !val) |
96b0eebc SZ |
280 | return; |
281 | ||
282 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
283 | if (!(val & BIT(i))) | |
284 | continue; | |
285 | ||
286 | pmc = &pmu->pmc[i]; | |
80f393a2 | 287 | |
96b0eebc SZ |
288 | if (pmc->perf_event) |
289 | perf_event_disable(pmc->perf_event); | |
290 | } | |
291 | } | |
7f766358 | 292 | |
76d883c4 SZ |
293 | static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) |
294 | { | |
295 | u64 reg = 0; | |
296 | ||
8d404c4c CD |
297 | if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { |
298 | reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); | |
299 | reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
300 | reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); | |
7d4bd1d2 | 301 | } |
76d883c4 SZ |
302 | |
303 | return reg; | |
304 | } | |
305 | ||
d9f89b4e | 306 | static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) |
b7484931 AJ |
307 | { |
308 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
d9f89b4e AJ |
309 | bool overflow; |
310 | ||
46acf89d | 311 | if (!kvm_vcpu_has_pmu(vcpu)) |
d9f89b4e | 312 | return; |
b7484931 | 313 | |
d9f89b4e | 314 | overflow = !!kvm_pmu_overflow_status(vcpu); |
b7484931 AJ |
315 | if (pmu->irq_level == overflow) |
316 | return; | |
317 | ||
318 | pmu->irq_level = overflow; | |
319 | ||
320 | if (likely(irqchip_in_kernel(vcpu->kvm))) { | |
321 | int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | |
d9f89b4e | 322 | pmu->irq_num, overflow, pmu); |
b7484931 AJ |
323 | WARN_ON(ret); |
324 | } | |
325 | } | |
326 | ||
3dbbdf78 CD |
327 | bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) |
328 | { | |
329 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
330 | struct kvm_sync_regs *sregs = &vcpu->run->s.regs; | |
331 | bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; | |
332 | ||
333 | if (likely(irqchip_in_kernel(vcpu->kvm))) | |
334 | return false; | |
335 | ||
336 | return pmu->irq_level != run_level; | |
337 | } | |
338 | ||
339 | /* | |
340 | * Reflect the PMU overflow interrupt output level into the kvm_run structure | |
341 | */ | |
342 | void kvm_pmu_update_run(struct kvm_vcpu *vcpu) | |
343 | { | |
344 | struct kvm_sync_regs *regs = &vcpu->run->s.regs; | |
345 | ||
346 | /* Populate the timer bitmap for user space */ | |
347 | regs->device_irq_level &= ~KVM_ARM_DEV_PMU; | |
348 | if (vcpu->arch.pmu.irq_level) | |
349 | regs->device_irq_level |= KVM_ARM_DEV_PMU; | |
350 | } | |
351 | ||
b02386eb SZ |
352 | /** |
353 | * kvm_pmu_flush_hwstate - flush pmu state to cpu | |
354 | * @vcpu: The vcpu pointer | |
355 | * | |
356 | * Check if the PMU has overflowed while we were running in the host, and inject | |
357 | * an interrupt if that was the case. | |
358 | */ | |
359 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) | |
360 | { | |
361 | kvm_pmu_update_state(vcpu); | |
362 | } | |
363 | ||
364 | /** | |
365 | * kvm_pmu_sync_hwstate - sync pmu state from cpu | |
366 | * @vcpu: The vcpu pointer | |
367 | * | |
368 | * Check if the PMU has overflowed while we were running in the guest, and | |
369 | * inject an interrupt if that was the case. | |
370 | */ | |
371 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) | |
372 | { | |
373 | kvm_pmu_update_state(vcpu); | |
374 | } | |
375 | ||
95e92e45 JT |
376 | /** |
377 | * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding | |
378 | * to the event. | |
379 | * This is why we need a callback to do it once outside of the NMI context. | |
380 | */ | |
381 | static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) | |
382 | { | |
383 | struct kvm_vcpu *vcpu; | |
384 | struct kvm_pmu *pmu; | |
385 | ||
386 | pmu = container_of(work, struct kvm_pmu, overflow_work); | |
387 | vcpu = kvm_pmc_to_vcpu(pmu->pmc); | |
388 | ||
389 | kvm_vcpu_kick(vcpu); | |
390 | } | |
391 | ||
bead0220 MZ |
392 | /* |
393 | * Perform an increment on any of the counters described in @mask, | |
394 | * generating the overflow if required, and propagate it as a chained | |
395 | * event if possible. | |
396 | */ | |
397 | static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, | |
398 | unsigned long mask, u32 event) | |
399 | { | |
400 | int i; | |
401 | ||
402 | if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) | |
403 | return; | |
404 | ||
405 | /* Weed out disabled counters */ | |
406 | mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
407 | ||
408 | for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) { | |
409 | u64 type, reg; | |
410 | ||
411 | /* Filter on event type */ | |
0cb9c3c8 | 412 | type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i)); |
bead0220 MZ |
413 | type &= kvm_pmu_event_mask(vcpu->kvm); |
414 | if (type != event) | |
415 | continue; | |
416 | ||
417 | /* Increment this counter */ | |
0cb9c3c8 | 418 | reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; |
0f1e172b MZ |
419 | if (!kvm_pmu_idx_is_64bit(vcpu, i)) |
420 | reg = lower_32_bits(reg); | |
0cb9c3c8 | 421 | __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; |
bead0220 | 422 | |
001d85bd MZ |
423 | /* No overflow? move on */ |
424 | if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg)) | |
bead0220 MZ |
425 | continue; |
426 | ||
427 | /* Mark overflow */ | |
428 | __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); | |
429 | ||
430 | if (kvm_pmu_counter_can_chain(vcpu, i)) | |
431 | kvm_pmu_counter_increment(vcpu, BIT(i + 1), | |
432 | ARMV8_PMUV3_PERFCTR_CHAIN); | |
433 | } | |
434 | } | |
435 | ||
c82d28cb MZ |
436 | /* Compute the sample period for a given counter value */ |
437 | static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter) | |
438 | { | |
439 | u64 val; | |
440 | ||
441 | if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) { | |
442 | if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx)) | |
443 | val = -(counter & GENMASK(31, 0)); | |
444 | else | |
445 | val = (-counter) & GENMASK(63, 0); | |
446 | } else { | |
447 | val = (-counter) & GENMASK(31, 0); | |
448 | } | |
449 | ||
450 | return val; | |
451 | } | |
452 | ||
b02386eb | 453 | /** |
d9f89b4e | 454 | * When the perf event overflows, set the overflow status and inform the vcpu. |
b02386eb SZ |
455 | */ |
456 | static void kvm_pmu_perf_overflow(struct perf_event *perf_event, | |
457 | struct perf_sample_data *data, | |
458 | struct pt_regs *regs) | |
459 | { | |
460 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
8c3252c0 | 461 | struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); |
b02386eb SZ |
462 | struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); |
463 | int idx = pmc->idx; | |
8c3252c0 MZ |
464 | u64 period; |
465 | ||
466 | cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); | |
467 | ||
468 | /* | |
469 | * Reset the sample period to the architectural limit, | |
470 | * i.e. the point where the counter overflows. | |
471 | */ | |
c82d28cb | 472 | period = compute_period(vcpu, idx, local64_read(&perf_event->count)); |
8c3252c0 MZ |
473 | |
474 | local64_set(&perf_event->hw.period_left, 0); | |
475 | perf_event->attr.sample_period = period; | |
476 | perf_event->hw.sample_period = period; | |
b02386eb | 477 | |
8d404c4c | 478 | __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); |
d9f89b4e | 479 | |
bead0220 MZ |
480 | if (kvm_pmu_counter_can_chain(vcpu, idx)) |
481 | kvm_pmu_counter_increment(vcpu, BIT(idx + 1), | |
482 | ARMV8_PMUV3_PERFCTR_CHAIN); | |
483 | ||
d9f89b4e AJ |
484 | if (kvm_pmu_overflow_status(vcpu)) { |
485 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | |
95e92e45 JT |
486 | |
487 | if (!in_nmi()) | |
488 | kvm_vcpu_kick(vcpu); | |
489 | else | |
490 | irq_work_queue(&vcpu->arch.pmu.overflow_work); | |
d9f89b4e | 491 | } |
8c3252c0 MZ |
492 | |
493 | cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); | |
b02386eb SZ |
494 | } |
495 | ||
7a0adc70 SZ |
496 | /** |
497 | * kvm_pmu_software_increment - do software increment | |
498 | * @vcpu: The vcpu pointer | |
499 | * @val: the value guest writes to PMSWINC register | |
500 | */ | |
501 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) | |
502 | { | |
bead0220 | 503 | kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); |
7a0adc70 SZ |
504 | } |
505 | ||
76993739 SZ |
506 | /** |
507 | * kvm_pmu_handle_pmcr - handle PMCR register | |
508 | * @vcpu: The vcpu pointer | |
509 | * @val: the value guest writes to PMCR register | |
510 | */ | |
511 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) | |
512 | { | |
76993739 SZ |
513 | int i; |
514 | ||
8f6379e2 AE |
515 | if (!kvm_vcpu_has_pmu(vcpu)) |
516 | return; | |
517 | ||
76993739 | 518 | if (val & ARMV8_PMU_PMCR_E) { |
418e5ca8 | 519 | kvm_pmu_enable_counter_mask(vcpu, |
f5eff400 | 520 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); |
76993739 | 521 | } else { |
ca4f202d AC |
522 | kvm_pmu_disable_counter_mask(vcpu, |
523 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); | |
76993739 SZ |
524 | } |
525 | ||
526 | if (val & ARMV8_PMU_PMCR_C) | |
527 | kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); | |
528 | ||
529 | if (val & ARMV8_PMU_PMCR_P) { | |
ca4f202d | 530 | unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); |
2a71fabf | 531 | mask &= ~BIT(ARMV8_PMU_CYCLE_IDX); |
c01d6a18 | 532 | for_each_set_bit(i, &mask, 32) |
76993739 SZ |
533 | kvm_pmu_set_counter_value(vcpu, i, 0); |
534 | } | |
76993739 SZ |
535 | } |
536 | ||
7f766358 SZ |
537 | static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) |
538 | { | |
8d404c4c CD |
539 | return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && |
540 | (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); | |
7f766358 SZ |
541 | } |
542 | ||
543 | /** | |
30d97754 | 544 | * kvm_pmu_create_perf_event - create a perf event for a counter |
7f766358 | 545 | * @vcpu: The vcpu pointer |
7f766358 | 546 | * @select_idx: The number of selected counter |
7f766358 | 547 | */ |
30d97754 | 548 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) |
7f766358 | 549 | { |
46b18782 | 550 | struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; |
7f766358 | 551 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
bead0220 | 552 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; |
7f766358 SZ |
553 | struct perf_event *event; |
554 | struct perf_event_attr attr; | |
30d97754 AM |
555 | u64 eventsel, counter, reg, data; |
556 | ||
0cb9c3c8 | 557 | reg = counter_index_to_evtreg(select_idx); |
30d97754 | 558 | data = __vcpu_sys_reg(vcpu, reg); |
7f766358 SZ |
559 | |
560 | kvm_pmu_stop_counter(vcpu, pmc); | |
d7eec236 MZ |
561 | if (pmc->idx == ARMV8_PMU_CYCLE_IDX) |
562 | eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; | |
563 | else | |
564 | eventsel = data & kvm_pmu_event_mask(vcpu->kvm); | |
565 | ||
bead0220 MZ |
566 | /* |
567 | * Neither SW increment nor chained events need to be backed | |
568 | * by a perf event. | |
569 | */ | |
570 | if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR || | |
571 | eventsel == ARMV8_PMUV3_PERFCTR_CHAIN) | |
d7eec236 | 572 | return; |
7f766358 | 573 | |
d7eec236 MZ |
574 | /* |
575 | * If we have a filter in place and that the event isn't allowed, do | |
576 | * not install a perf event either. | |
577 | */ | |
578 | if (vcpu->kvm->arch.pmu_filter && | |
579 | !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) | |
7a0adc70 SZ |
580 | return; |
581 | ||
7f766358 | 582 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
46b18782 | 583 | attr.type = arm_pmu->pmu.type; |
7f766358 SZ |
584 | attr.size = sizeof(attr); |
585 | attr.pinned = 1; | |
80f393a2 | 586 | attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); |
7f766358 SZ |
587 | attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; |
588 | attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; | |
589 | attr.exclude_hv = 1; /* Don't count EL2 events */ | |
590 | attr.exclude_host = 1; /* Don't count host events */ | |
d7eec236 | 591 | attr.config = eventsel; |
7f766358 | 592 | |
bead0220 | 593 | counter = kvm_pmu_get_counter_value(vcpu, select_idx); |
80f393a2 | 594 | |
bead0220 MZ |
595 | /* |
596 | * If counting with a 64bit counter, advertise it to the perf | |
c82d28cb MZ |
597 | * code, carefully dealing with the initial sample period |
598 | * which also depends on the overflow. | |
bead0220 | 599 | */ |
c82d28cb | 600 | if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) |
bead0220 | 601 | attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT; |
c82d28cb MZ |
602 | |
603 | attr.sample_period = compute_period(vcpu, select_idx, counter); | |
7f766358 | 604 | |
bead0220 | 605 | event = perf_event_create_kernel_counter(&attr, -1, current, |
b02386eb | 606 | kvm_pmu_perf_overflow, pmc); |
80f393a2 | 607 | |
7f766358 SZ |
608 | if (IS_ERR(event)) { |
609 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
610 | PTR_ERR(event)); | |
611 | return; | |
612 | } | |
613 | ||
614 | pmc->perf_event = event; | |
615 | } | |
808e7381 | 616 | |
30d97754 AM |
617 | /** |
618 | * kvm_pmu_set_counter_event_type - set selected counter to monitor some event | |
619 | * @vcpu: The vcpu pointer | |
620 | * @data: The data guest writes to PMXEVTYPER_EL0 | |
621 | * @select_idx: The number of selected counter | |
622 | * | |
623 | * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an | |
624 | * event with given hardware event number. Here we call perf_event API to | |
625 | * emulate this action and create a kernel perf event for it. | |
626 | */ | |
627 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, | |
628 | u64 select_idx) | |
629 | { | |
fd65a3b5 MZ |
630 | u64 reg, mask; |
631 | ||
8f6379e2 AE |
632 | if (!kvm_vcpu_has_pmu(vcpu)) |
633 | return; | |
634 | ||
fd65a3b5 MZ |
635 | mask = ARMV8_PMU_EVTYPE_MASK; |
636 | mask &= ~ARMV8_PMU_EVTYPE_EVENT; | |
637 | mask |= kvm_pmu_event_mask(vcpu->kvm); | |
30d97754 | 638 | |
0cb9c3c8 | 639 | reg = counter_index_to_evtreg(select_idx); |
30d97754 | 640 | |
fd65a3b5 | 641 | __vcpu_sys_reg(vcpu, reg) = data & mask; |
80f393a2 | 642 | |
30d97754 AM |
643 | kvm_pmu_create_perf_event(vcpu, select_idx); |
644 | } | |
645 | ||
e840f42a MZ |
646 | void kvm_host_pmu_init(struct arm_pmu *pmu) |
647 | { | |
db858060 AE |
648 | struct arm_pmu_entry *entry; |
649 | ||
fcf37b38 | 650 | if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
db858060 AE |
651 | return; |
652 | ||
653 | mutex_lock(&arm_pmus_lock); | |
654 | ||
655 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
656 | if (!entry) | |
657 | goto out_unlock; | |
658 | ||
659 | entry->arm_pmu = pmu; | |
660 | list_add_tail(&entry->entry, &arm_pmus); | |
661 | ||
662 | if (list_is_singular(&arm_pmus)) | |
e840f42a | 663 | static_branch_enable(&kvm_arm_pmu_available); |
db858060 AE |
664 | |
665 | out_unlock: | |
666 | mutex_unlock(&arm_pmus_lock); | |
e840f42a MZ |
667 | } |
668 | ||
46b18782 | 669 | static struct arm_pmu *kvm_pmu_probe_armpmu(void) |
fd65a3b5 MZ |
670 | { |
671 | struct perf_event_attr attr = { }; | |
672 | struct perf_event *event; | |
46b18782 | 673 | struct arm_pmu *pmu = NULL; |
fd65a3b5 MZ |
674 | |
675 | /* | |
676 | * Create a dummy event that only counts user cycles. As we'll never | |
677 | * leave this function with the event being live, it will never | |
678 | * count anything. But it allows us to probe some of the PMU | |
679 | * details. Yes, this is terrible. | |
680 | */ | |
681 | attr.type = PERF_TYPE_RAW; | |
682 | attr.size = sizeof(attr); | |
683 | attr.pinned = 1; | |
684 | attr.disabled = 0; | |
685 | attr.exclude_user = 0; | |
686 | attr.exclude_kernel = 1; | |
687 | attr.exclude_hv = 1; | |
688 | attr.exclude_host = 1; | |
689 | attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; | |
690 | attr.sample_period = GENMASK(63, 0); | |
691 | ||
692 | event = perf_event_create_kernel_counter(&attr, -1, current, | |
693 | kvm_pmu_perf_overflow, &attr); | |
694 | ||
695 | if (IS_ERR(event)) { | |
696 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
697 | PTR_ERR(event)); | |
46b18782 | 698 | return NULL; |
fd65a3b5 MZ |
699 | } |
700 | ||
701 | if (event->pmu) { | |
702 | pmu = to_arm_pmu(event->pmu); | |
46b18782 | 703 | if (pmu->pmuver == 0 || |
fcf37b38 | 704 | pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
46b18782 | 705 | pmu = NULL; |
fd65a3b5 MZ |
706 | } |
707 | ||
708 | perf_event_disable(event); | |
709 | perf_event_release_kernel(event); | |
710 | ||
46b18782 | 711 | return pmu; |
fd65a3b5 MZ |
712 | } |
713 | ||
88865bec MZ |
714 | u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) |
715 | { | |
716 | unsigned long *bmap = vcpu->kvm->arch.pmu_filter; | |
717 | u64 val, mask = 0; | |
9529aaa0 | 718 | int base, i, nr_events; |
88865bec | 719 | |
8f6379e2 AE |
720 | if (!kvm_vcpu_has_pmu(vcpu)) |
721 | return 0; | |
722 | ||
88865bec MZ |
723 | if (!pmceid1) { |
724 | val = read_sysreg(pmceid0_el0); | |
acdd8a4e MZ |
725 | /* always support CHAIN */ |
726 | val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN); | |
88865bec MZ |
727 | base = 0; |
728 | } else { | |
729 | val = read_sysreg(pmceid1_el0); | |
46081078 MZ |
730 | /* |
731 | * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled | |
732 | * as RAZ | |
733 | */ | |
121a8fc0 | 734 | if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) |
46081078 | 735 | val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); |
88865bec MZ |
736 | base = 32; |
737 | } | |
738 | ||
739 | if (!bmap) | |
740 | return val; | |
741 | ||
9529aaa0 MZ |
742 | nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; |
743 | ||
88865bec MZ |
744 | for (i = 0; i < 32; i += 8) { |
745 | u64 byte; | |
746 | ||
747 | byte = bitmap_get_value8(bmap, base + i); | |
748 | mask |= byte << i; | |
9529aaa0 MZ |
749 | if (nr_events >= (0x4000 + base + 32)) { |
750 | byte = bitmap_get_value8(bmap, 0x4000 + base + i); | |
751 | mask |= byte << (32 + i); | |
752 | } | |
88865bec MZ |
753 | } |
754 | ||
755 | return val & mask; | |
756 | } | |
757 | ||
a2befacf | 758 | int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) |
bb0c70bc | 759 | { |
9bbfa4b5 | 760 | if (!kvm_vcpu_has_pmu(vcpu)) |
a2befacf | 761 | return 0; |
bb0c70bc | 762 | |
9bbfa4b5 AE |
763 | if (!vcpu->arch.pmu.created) |
764 | return -EINVAL; | |
765 | ||
6fe407f2 | 766 | /* |
a2befacf CD |
767 | * A valid interrupt configuration for the PMU is either to have a |
768 | * properly configured interrupt number and using an in-kernel | |
ebb127f2 | 769 | * irqchip, or to not have an in-kernel GIC and not set an IRQ. |
6fe407f2 | 770 | */ |
ebb127f2 CD |
771 | if (irqchip_in_kernel(vcpu->kvm)) { |
772 | int irq = vcpu->arch.pmu.irq_num; | |
ebb127f2 CD |
773 | /* |
774 | * If we are using an in-kernel vgic, at this point we know | |
775 | * the vgic will be initialized, so we can check the PMU irq | |
776 | * number against the dimensions of the vgic and make sure | |
777 | * it's valid. | |
778 | */ | |
779 | if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) | |
780 | return -EINVAL; | |
781 | } else if (kvm_arm_pmu_irq_initialized(vcpu)) { | |
782 | return -EINVAL; | |
783 | } | |
a2befacf | 784 | |
d0c94c49 MZ |
785 | /* One-off reload of the PMU on first run */ |
786 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); | |
787 | ||
a2befacf CD |
788 | return 0; |
789 | } | |
790 | ||
791 | static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) | |
792 | { | |
a2befacf | 793 | if (irqchip_in_kernel(vcpu->kvm)) { |
abcb851d CD |
794 | int ret; |
795 | ||
a2befacf CD |
796 | /* |
797 | * If using the PMU with an in-kernel virtual GIC | |
798 | * implementation, we require the GIC to be already | |
799 | * initialized when initializing the PMU. | |
800 | */ | |
801 | if (!vgic_initialized(vcpu->kvm)) | |
802 | return -ENODEV; | |
803 | ||
804 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
805 | return -ENXIO; | |
abcb851d CD |
806 | |
807 | ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, | |
808 | &vcpu->arch.pmu); | |
809 | if (ret) | |
810 | return ret; | |
a2befacf | 811 | } |
bb0c70bc | 812 | |
95e92e45 JT |
813 | init_irq_work(&vcpu->arch.pmu.overflow_work, |
814 | kvm_pmu_perf_overflow_notify_vcpu); | |
815 | ||
a2befacf | 816 | vcpu->arch.pmu.created = true; |
bb0c70bc SZ |
817 | return 0; |
818 | } | |
819 | ||
2defaff4 AP |
820 | /* |
821 | * For one VM the interrupt type must be same for each vcpu. | |
822 | * As a PPI, the interrupt number is the same for all vcpus, | |
823 | * while as an SPI it must be a separate number per vcpu. | |
824 | */ | |
825 | static bool pmu_irq_is_valid(struct kvm *kvm, int irq) | |
bb0c70bc | 826 | { |
46808a4c | 827 | unsigned long i; |
bb0c70bc SZ |
828 | struct kvm_vcpu *vcpu; |
829 | ||
830 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
831 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
832 | continue; | |
833 | ||
2defaff4 | 834 | if (irq_is_ppi(irq)) { |
bb0c70bc SZ |
835 | if (vcpu->arch.pmu.irq_num != irq) |
836 | return false; | |
837 | } else { | |
838 | if (vcpu->arch.pmu.irq_num == irq) | |
839 | return false; | |
840 | } | |
841 | } | |
842 | ||
843 | return true; | |
844 | } | |
845 | ||
6ee7fca2 AE |
846 | static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) |
847 | { | |
848 | struct kvm *kvm = vcpu->kvm; | |
849 | struct arm_pmu_entry *entry; | |
850 | struct arm_pmu *arm_pmu; | |
851 | int ret = -ENXIO; | |
852 | ||
853 | mutex_lock(&kvm->lock); | |
854 | mutex_lock(&arm_pmus_lock); | |
855 | ||
856 | list_for_each_entry(entry, &arm_pmus, entry) { | |
857 | arm_pmu = entry->arm_pmu; | |
858 | if (arm_pmu->pmu.type == pmu_id) { | |
06394531 | 859 | if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) || |
6ee7fca2 AE |
860 | (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { |
861 | ret = -EBUSY; | |
862 | break; | |
863 | } | |
864 | ||
865 | kvm->arch.arm_pmu = arm_pmu; | |
583cda1b | 866 | cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); |
6ee7fca2 AE |
867 | ret = 0; |
868 | break; | |
869 | } | |
870 | } | |
871 | ||
872 | mutex_unlock(&arm_pmus_lock); | |
873 | mutex_unlock(&kvm->lock); | |
874 | return ret; | |
875 | } | |
876 | ||
bb0c70bc SZ |
877 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
878 | { | |
5177fe91 MZ |
879 | struct kvm *kvm = vcpu->kvm; |
880 | ||
77da4303 | 881 | if (!kvm_vcpu_has_pmu(vcpu)) |
42223fb1 MZ |
882 | return -ENODEV; |
883 | ||
884 | if (vcpu->arch.pmu.created) | |
885 | return -EBUSY; | |
886 | ||
46b18782 MZ |
887 | mutex_lock(&kvm->lock); |
888 | if (!kvm->arch.arm_pmu) { | |
889 | /* No PMU set, get the default one */ | |
890 | kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); | |
891 | if (!kvm->arch.arm_pmu) { | |
892 | mutex_unlock(&kvm->lock); | |
893 | return -ENODEV; | |
894 | } | |
895 | } | |
896 | mutex_unlock(&kvm->lock); | |
fd65a3b5 | 897 | |
bb0c70bc SZ |
898 | switch (attr->attr) { |
899 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
900 | int __user *uaddr = (int __user *)(long)attr->addr; | |
901 | int irq; | |
902 | ||
5177fe91 | 903 | if (!irqchip_in_kernel(kvm)) |
a2befacf CD |
904 | return -EINVAL; |
905 | ||
bb0c70bc SZ |
906 | if (get_user(irq, uaddr)) |
907 | return -EFAULT; | |
908 | ||
2defaff4 | 909 | /* The PMU overflow interrupt can be a PPI or a valid SPI. */ |
ebb127f2 | 910 | if (!(irq_is_ppi(irq) || irq_is_spi(irq))) |
2defaff4 AP |
911 | return -EINVAL; |
912 | ||
5177fe91 | 913 | if (!pmu_irq_is_valid(kvm, irq)) |
bb0c70bc SZ |
914 | return -EINVAL; |
915 | ||
916 | if (kvm_arm_pmu_irq_initialized(vcpu)) | |
917 | return -EBUSY; | |
918 | ||
919 | kvm_debug("Set kvm ARM PMU irq: %d\n", irq); | |
920 | vcpu->arch.pmu.irq_num = irq; | |
921 | return 0; | |
922 | } | |
d7eec236 MZ |
923 | case KVM_ARM_VCPU_PMU_V3_FILTER: { |
924 | struct kvm_pmu_event_filter __user *uaddr; | |
925 | struct kvm_pmu_event_filter filter; | |
926 | int nr_events; | |
927 | ||
5177fe91 | 928 | nr_events = kvm_pmu_event_mask(kvm) + 1; |
d7eec236 MZ |
929 | |
930 | uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; | |
931 | ||
932 | if (copy_from_user(&filter, uaddr, sizeof(filter))) | |
933 | return -EFAULT; | |
934 | ||
935 | if (((u32)filter.base_event + filter.nevents) > nr_events || | |
936 | (filter.action != KVM_PMU_EVENT_ALLOW && | |
937 | filter.action != KVM_PMU_EVENT_DENY)) | |
938 | return -EINVAL; | |
939 | ||
5177fe91 MZ |
940 | mutex_lock(&kvm->lock); |
941 | ||
06394531 | 942 | if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) { |
5177fe91 MZ |
943 | mutex_unlock(&kvm->lock); |
944 | return -EBUSY; | |
945 | } | |
d7eec236 | 946 | |
5177fe91 MZ |
947 | if (!kvm->arch.pmu_filter) { |
948 | kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); | |
949 | if (!kvm->arch.pmu_filter) { | |
950 | mutex_unlock(&kvm->lock); | |
d7eec236 MZ |
951 | return -ENOMEM; |
952 | } | |
953 | ||
954 | /* | |
955 | * The default depends on the first applied filter. | |
956 | * If it allows events, the default is to deny. | |
957 | * Conversely, if the first filter denies a set of | |
958 | * events, the default is to allow. | |
959 | */ | |
960 | if (filter.action == KVM_PMU_EVENT_ALLOW) | |
5177fe91 | 961 | bitmap_zero(kvm->arch.pmu_filter, nr_events); |
d7eec236 | 962 | else |
5177fe91 | 963 | bitmap_fill(kvm->arch.pmu_filter, nr_events); |
d7eec236 MZ |
964 | } |
965 | ||
966 | if (filter.action == KVM_PMU_EVENT_ALLOW) | |
5177fe91 | 967 | bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); |
d7eec236 | 968 | else |
5177fe91 | 969 | bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); |
d7eec236 | 970 | |
5177fe91 | 971 | mutex_unlock(&kvm->lock); |
d7eec236 MZ |
972 | |
973 | return 0; | |
974 | } | |
6ee7fca2 AE |
975 | case KVM_ARM_VCPU_PMU_V3_SET_PMU: { |
976 | int __user *uaddr = (int __user *)(long)attr->addr; | |
977 | int pmu_id; | |
978 | ||
979 | if (get_user(pmu_id, uaddr)) | |
980 | return -EFAULT; | |
981 | ||
982 | return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); | |
983 | } | |
bb0c70bc SZ |
984 | case KVM_ARM_VCPU_PMU_V3_INIT: |
985 | return kvm_arm_pmu_v3_init(vcpu); | |
986 | } | |
987 | ||
988 | return -ENXIO; | |
989 | } | |
990 | ||
991 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
992 | { | |
993 | switch (attr->attr) { | |
994 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
995 | int __user *uaddr = (int __user *)(long)attr->addr; | |
996 | int irq; | |
997 | ||
a2befacf CD |
998 | if (!irqchip_in_kernel(vcpu->kvm)) |
999 | return -EINVAL; | |
1000 | ||
14bda7a9 | 1001 | if (!kvm_vcpu_has_pmu(vcpu)) |
bb0c70bc SZ |
1002 | return -ENODEV; |
1003 | ||
1004 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
1005 | return -ENXIO; | |
1006 | ||
1007 | irq = vcpu->arch.pmu.irq_num; | |
1008 | return put_user(irq, uaddr); | |
1009 | } | |
1010 | } | |
1011 | ||
1012 | return -ENXIO; | |
1013 | } | |
1014 | ||
1015 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
1016 | { | |
1017 | switch (attr->attr) { | |
1018 | case KVM_ARM_VCPU_PMU_V3_IRQ: | |
1019 | case KVM_ARM_VCPU_PMU_V3_INIT: | |
d7eec236 | 1020 | case KVM_ARM_VCPU_PMU_V3_FILTER: |
6ee7fca2 | 1021 | case KVM_ARM_VCPU_PMU_V3_SET_PMU: |
77da4303 | 1022 | if (kvm_vcpu_has_pmu(vcpu)) |
bb0c70bc SZ |
1023 | return 0; |
1024 | } | |
1025 | ||
1026 | return -ENXIO; | |
1027 | } |