Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
051ff581 SZ |
2 | /* |
3 | * Copyright (C) 2015 Linaro Ltd. | |
4 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
051ff581 SZ |
5 | */ |
6 | ||
7 | #include <linux/cpu.h> | |
8 | #include <linux/kvm.h> | |
9 | #include <linux/kvm_host.h> | |
db858060 | 10 | #include <linux/list.h> |
051ff581 | 11 | #include <linux/perf_event.h> |
8c3252c0 | 12 | #include <linux/perf/arm_pmu.h> |
bb0c70bc | 13 | #include <linux/uaccess.h> |
051ff581 SZ |
14 | #include <asm/kvm_emulate.h> |
15 | #include <kvm/arm_pmu.h> | |
b02386eb | 16 | #include <kvm/arm_vgic.h> |
051ff581 | 17 | |
bead0220 MZ |
18 | #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) |
19 | ||
be399d82 SC |
20 | DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available); |
21 | ||
db858060 AE |
22 | static LIST_HEAD(arm_pmus); |
23 | static DEFINE_MUTEX(arm_pmus_lock); | |
24 | ||
30d97754 | 25 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); |
80f393a2 | 26 | |
fd65a3b5 MZ |
27 | static u32 kvm_pmu_event_mask(struct kvm *kvm) |
28 | { | |
46b18782 MZ |
29 | unsigned int pmuver; |
30 | ||
31 | pmuver = kvm->arch.arm_pmu->pmuver; | |
32 | ||
33 | switch (pmuver) { | |
121a8fc0 | 34 | case ID_AA64DFR0_EL1_PMUVer_IMP: |
fd65a3b5 | 35 | return GENMASK(9, 0); |
121a8fc0 MB |
36 | case ID_AA64DFR0_EL1_PMUVer_V3P1: |
37 | case ID_AA64DFR0_EL1_PMUVer_V3P4: | |
38 | case ID_AA64DFR0_EL1_PMUVer_V3P5: | |
39 | case ID_AA64DFR0_EL1_PMUVer_V3P7: | |
fd65a3b5 MZ |
40 | return GENMASK(15, 0); |
41 | default: /* Shouldn't be here, just for sanity */ | |
46b18782 | 42 | WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); |
fd65a3b5 MZ |
43 | return 0; |
44 | } | |
45 | } | |
46 | ||
218907cb AM |
47 | /** |
48 | * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter | |
49 | * @vcpu: The vcpu pointer | |
50 | * @select_idx: The counter index | |
51 | */ | |
52 | static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) | |
53 | { | |
54 | return (select_idx == ARMV8_PMU_CYCLE_IDX && | |
55 | __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); | |
56 | } | |
57 | ||
bead0220 MZ |
58 | static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) |
59 | { | |
60 | return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX); | |
61 | } | |
62 | ||
80f393a2 AM |
63 | static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) |
64 | { | |
65 | struct kvm_pmu *pmu; | |
66 | struct kvm_vcpu_arch *vcpu_arch; | |
67 | ||
68 | pmc -= pmc->idx; | |
69 | pmu = container_of(pmc, struct kvm_pmu, pmc[0]); | |
70 | vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); | |
71 | return container_of(vcpu_arch, struct kvm_vcpu, arch); | |
72 | } | |
73 | ||
051ff581 | 74 | /** |
bead0220 | 75 | * kvm_pmu_get_counter_value - get PMU counter value |
051ff581 SZ |
76 | * @vcpu: The vcpu pointer |
77 | * @select_idx: The counter index | |
78 | */ | |
bead0220 | 79 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) |
80f393a2 | 80 | { |
bead0220 MZ |
81 | u64 counter, reg, enabled, running; |
82 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
83 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; | |
80f393a2 | 84 | |
bead0220 MZ |
85 | if (!kvm_vcpu_has_pmu(vcpu)) |
86 | return 0; | |
80f393a2 | 87 | |
bead0220 MZ |
88 | reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) |
89 | ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx; | |
90 | counter = __vcpu_sys_reg(vcpu, reg); | |
80f393a2 AM |
91 | |
92 | /* | |
93 | * The real counter value is equal to the value of counter register plus | |
051ff581 SZ |
94 | * the value perf event counts. |
95 | */ | |
96 | if (pmc->perf_event) | |
97 | counter += perf_event_read_value(pmc->perf_event, &enabled, | |
98 | &running); | |
99 | ||
bead0220 | 100 | if (select_idx != ARMV8_PMU_CYCLE_IDX) |
218907cb AM |
101 | counter = lower_32_bits(counter); |
102 | ||
103 | return counter; | |
051ff581 SZ |
104 | } |
105 | ||
106 | /** | |
107 | * kvm_pmu_set_counter_value - set PMU counter value | |
108 | * @vcpu: The vcpu pointer | |
109 | * @select_idx: The counter index | |
110 | * @val: The counter value | |
111 | */ | |
112 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) | |
113 | { | |
114 | u64 reg; | |
115 | ||
8f6379e2 AE |
116 | if (!kvm_vcpu_has_pmu(vcpu)) |
117 | return; | |
118 | ||
051ff581 SZ |
119 | reg = (select_idx == ARMV8_PMU_CYCLE_IDX) |
120 | ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + select_idx; | |
8d404c4c | 121 | __vcpu_sys_reg(vcpu, reg) += (s64)val - kvm_pmu_get_counter_value(vcpu, select_idx); |
30d97754 AM |
122 | |
123 | /* Recreate the perf event to reflect the updated sample_period */ | |
124 | kvm_pmu_create_perf_event(vcpu, select_idx); | |
051ff581 | 125 | } |
96b0eebc | 126 | |
6f4d2a0b AM |
127 | /** |
128 | * kvm_pmu_release_perf_event - remove the perf event | |
129 | * @pmc: The PMU counter pointer | |
130 | */ | |
131 | static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) | |
132 | { | |
133 | if (pmc->perf_event) { | |
134 | perf_event_disable(pmc->perf_event); | |
135 | perf_event_release_kernel(pmc->perf_event); | |
136 | pmc->perf_event = NULL; | |
137 | } | |
138 | } | |
139 | ||
7f766358 SZ |
140 | /** |
141 | * kvm_pmu_stop_counter - stop PMU counter | |
142 | * @pmc: The PMU counter pointer | |
143 | * | |
144 | * If this counter has been configured to monitor some event, release it here. | |
145 | */ | |
146 | static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) | |
147 | { | |
f4e23cf9 | 148 | u64 counter, reg, val; |
7f766358 | 149 | |
80f393a2 AM |
150 | if (!pmc->perf_event) |
151 | return; | |
152 | ||
bead0220 | 153 | counter = kvm_pmu_get_counter_value(vcpu, pmc->idx); |
80f393a2 | 154 | |
f4e23cf9 MZ |
155 | if (pmc->idx == ARMV8_PMU_CYCLE_IDX) { |
156 | reg = PMCCNTR_EL0; | |
157 | val = counter; | |
80f393a2 | 158 | } else { |
f4e23cf9 MZ |
159 | reg = PMEVCNTR0_EL0 + pmc->idx; |
160 | val = lower_32_bits(counter); | |
7f766358 | 161 | } |
80f393a2 | 162 | |
f4e23cf9 MZ |
163 | __vcpu_sys_reg(vcpu, reg) = val; |
164 | ||
80f393a2 | 165 | kvm_pmu_release_perf_event(pmc); |
7f766358 SZ |
166 | } |
167 | ||
bca031e2 ZY |
168 | /** |
169 | * kvm_pmu_vcpu_init - assign pmu counter idx for cpu | |
170 | * @vcpu: The vcpu pointer | |
171 | * | |
172 | */ | |
173 | void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) | |
174 | { | |
175 | int i; | |
176 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
177 | ||
178 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) | |
179 | pmu->pmc[i].idx = i; | |
180 | } | |
181 | ||
2aa36e98 SZ |
182 | /** |
183 | * kvm_pmu_vcpu_reset - reset pmu state for cpu | |
184 | * @vcpu: The vcpu pointer | |
185 | * | |
186 | */ | |
187 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) | |
188 | { | |
c01d6a18 | 189 | unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); |
2aa36e98 | 190 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
c01d6a18 | 191 | int i; |
2aa36e98 | 192 | |
c01d6a18 | 193 | for_each_set_bit(i, &mask, 32) |
2aa36e98 | 194 | kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); |
2aa36e98 SZ |
195 | } |
196 | ||
5f0a714a SZ |
197 | /** |
198 | * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu | |
199 | * @vcpu: The vcpu pointer | |
200 | * | |
201 | */ | |
202 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) | |
203 | { | |
204 | int i; | |
205 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
206 | ||
6f4d2a0b AM |
207 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) |
208 | kvm_pmu_release_perf_event(&pmu->pmc[i]); | |
95e92e45 | 209 | irq_work_sync(&vcpu->arch.pmu.overflow_work); |
5f0a714a SZ |
210 | } |
211 | ||
96b0eebc SZ |
212 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
213 | { | |
8d404c4c | 214 | u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; |
96b0eebc SZ |
215 | |
216 | val &= ARMV8_PMU_PMCR_N_MASK; | |
217 | if (val == 0) | |
218 | return BIT(ARMV8_PMU_CYCLE_IDX); | |
219 | else | |
220 | return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); | |
221 | } | |
222 | ||
223 | /** | |
418e5ca8 | 224 | * kvm_pmu_enable_counter_mask - enable selected PMU counters |
96b0eebc SZ |
225 | * @vcpu: The vcpu pointer |
226 | * @val: the value guest writes to PMCNTENSET register | |
227 | * | |
228 | * Call perf_event_enable to start counting the perf event | |
229 | */ | |
418e5ca8 | 230 | void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) |
96b0eebc SZ |
231 | { |
232 | int i; | |
233 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
234 | struct kvm_pmc *pmc; | |
235 | ||
8f6379e2 AE |
236 | if (!kvm_vcpu_has_pmu(vcpu)) |
237 | return; | |
238 | ||
8d404c4c | 239 | if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) |
96b0eebc SZ |
240 | return; |
241 | ||
242 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
243 | if (!(val & BIT(i))) | |
244 | continue; | |
245 | ||
246 | pmc = &pmu->pmc[i]; | |
80f393a2 | 247 | |
bead0220 MZ |
248 | if (!pmc->perf_event) { |
249 | kvm_pmu_create_perf_event(vcpu, i); | |
250 | } else { | |
96b0eebc SZ |
251 | perf_event_enable(pmc->perf_event); |
252 | if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) | |
253 | kvm_debug("fail to enable perf event\n"); | |
254 | } | |
255 | } | |
256 | } | |
257 | ||
258 | /** | |
418e5ca8 | 259 | * kvm_pmu_disable_counter_mask - disable selected PMU counters |
96b0eebc SZ |
260 | * @vcpu: The vcpu pointer |
261 | * @val: the value guest writes to PMCNTENCLR register | |
262 | * | |
263 | * Call perf_event_disable to stop counting the perf event | |
264 | */ | |
418e5ca8 | 265 | void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) |
96b0eebc SZ |
266 | { |
267 | int i; | |
268 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
269 | struct kvm_pmc *pmc; | |
270 | ||
8f6379e2 | 271 | if (!kvm_vcpu_has_pmu(vcpu) || !val) |
96b0eebc SZ |
272 | return; |
273 | ||
274 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
275 | if (!(val & BIT(i))) | |
276 | continue; | |
277 | ||
278 | pmc = &pmu->pmc[i]; | |
80f393a2 | 279 | |
96b0eebc SZ |
280 | if (pmc->perf_event) |
281 | perf_event_disable(pmc->perf_event); | |
282 | } | |
283 | } | |
7f766358 | 284 | |
76d883c4 SZ |
285 | static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) |
286 | { | |
287 | u64 reg = 0; | |
288 | ||
8d404c4c CD |
289 | if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { |
290 | reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); | |
291 | reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
292 | reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); | |
7d4bd1d2 | 293 | } |
76d883c4 SZ |
294 | |
295 | return reg; | |
296 | } | |
297 | ||
d9f89b4e | 298 | static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) |
b7484931 AJ |
299 | { |
300 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
d9f89b4e AJ |
301 | bool overflow; |
302 | ||
46acf89d | 303 | if (!kvm_vcpu_has_pmu(vcpu)) |
d9f89b4e | 304 | return; |
b7484931 | 305 | |
d9f89b4e | 306 | overflow = !!kvm_pmu_overflow_status(vcpu); |
b7484931 AJ |
307 | if (pmu->irq_level == overflow) |
308 | return; | |
309 | ||
310 | pmu->irq_level = overflow; | |
311 | ||
312 | if (likely(irqchip_in_kernel(vcpu->kvm))) { | |
313 | int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | |
d9f89b4e | 314 | pmu->irq_num, overflow, pmu); |
b7484931 AJ |
315 | WARN_ON(ret); |
316 | } | |
317 | } | |
318 | ||
3dbbdf78 CD |
319 | bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) |
320 | { | |
321 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
322 | struct kvm_sync_regs *sregs = &vcpu->run->s.regs; | |
323 | bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; | |
324 | ||
325 | if (likely(irqchip_in_kernel(vcpu->kvm))) | |
326 | return false; | |
327 | ||
328 | return pmu->irq_level != run_level; | |
329 | } | |
330 | ||
331 | /* | |
332 | * Reflect the PMU overflow interrupt output level into the kvm_run structure | |
333 | */ | |
334 | void kvm_pmu_update_run(struct kvm_vcpu *vcpu) | |
335 | { | |
336 | struct kvm_sync_regs *regs = &vcpu->run->s.regs; | |
337 | ||
338 | /* Populate the timer bitmap for user space */ | |
339 | regs->device_irq_level &= ~KVM_ARM_DEV_PMU; | |
340 | if (vcpu->arch.pmu.irq_level) | |
341 | regs->device_irq_level |= KVM_ARM_DEV_PMU; | |
342 | } | |
343 | ||
b02386eb SZ |
344 | /** |
345 | * kvm_pmu_flush_hwstate - flush pmu state to cpu | |
346 | * @vcpu: The vcpu pointer | |
347 | * | |
348 | * Check if the PMU has overflowed while we were running in the host, and inject | |
349 | * an interrupt if that was the case. | |
350 | */ | |
351 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) | |
352 | { | |
353 | kvm_pmu_update_state(vcpu); | |
354 | } | |
355 | ||
356 | /** | |
357 | * kvm_pmu_sync_hwstate - sync pmu state from cpu | |
358 | * @vcpu: The vcpu pointer | |
359 | * | |
360 | * Check if the PMU has overflowed while we were running in the guest, and | |
361 | * inject an interrupt if that was the case. | |
362 | */ | |
363 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) | |
364 | { | |
365 | kvm_pmu_update_state(vcpu); | |
366 | } | |
367 | ||
95e92e45 JT |
368 | /** |
369 | * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding | |
370 | * to the event. | |
371 | * This is why we need a callback to do it once outside of the NMI context. | |
372 | */ | |
373 | static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) | |
374 | { | |
375 | struct kvm_vcpu *vcpu; | |
376 | struct kvm_pmu *pmu; | |
377 | ||
378 | pmu = container_of(work, struct kvm_pmu, overflow_work); | |
379 | vcpu = kvm_pmc_to_vcpu(pmu->pmc); | |
380 | ||
381 | kvm_vcpu_kick(vcpu); | |
382 | } | |
383 | ||
bead0220 MZ |
384 | /* |
385 | * Perform an increment on any of the counters described in @mask, | |
386 | * generating the overflow if required, and propagate it as a chained | |
387 | * event if possible. | |
388 | */ | |
389 | static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, | |
390 | unsigned long mask, u32 event) | |
391 | { | |
392 | int i; | |
393 | ||
394 | if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) | |
395 | return; | |
396 | ||
397 | /* Weed out disabled counters */ | |
398 | mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
399 | ||
400 | for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) { | |
401 | u64 type, reg; | |
402 | ||
403 | /* Filter on event type */ | |
404 | type = __vcpu_sys_reg(vcpu, PMEVTYPER0_EL0 + i); | |
405 | type &= kvm_pmu_event_mask(vcpu->kvm); | |
406 | if (type != event) | |
407 | continue; | |
408 | ||
409 | /* Increment this counter */ | |
410 | reg = __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) + 1; | |
411 | reg = lower_32_bits(reg); | |
412 | __vcpu_sys_reg(vcpu, PMEVCNTR0_EL0 + i) = reg; | |
413 | ||
414 | if (reg) /* No overflow? move on */ | |
415 | continue; | |
416 | ||
417 | /* Mark overflow */ | |
418 | __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); | |
419 | ||
420 | if (kvm_pmu_counter_can_chain(vcpu, i)) | |
421 | kvm_pmu_counter_increment(vcpu, BIT(i + 1), | |
422 | ARMV8_PMUV3_PERFCTR_CHAIN); | |
423 | } | |
424 | } | |
425 | ||
b02386eb | 426 | /** |
d9f89b4e | 427 | * When the perf event overflows, set the overflow status and inform the vcpu. |
b02386eb SZ |
428 | */ |
429 | static void kvm_pmu_perf_overflow(struct perf_event *perf_event, | |
430 | struct perf_sample_data *data, | |
431 | struct pt_regs *regs) | |
432 | { | |
433 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
8c3252c0 | 434 | struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); |
b02386eb SZ |
435 | struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); |
436 | int idx = pmc->idx; | |
8c3252c0 MZ |
437 | u64 period; |
438 | ||
439 | cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); | |
440 | ||
441 | /* | |
442 | * Reset the sample period to the architectural limit, | |
443 | * i.e. the point where the counter overflows. | |
444 | */ | |
445 | period = -(local64_read(&perf_event->count)); | |
446 | ||
447 | if (!kvm_pmu_idx_is_64bit(vcpu, pmc->idx)) | |
448 | period &= GENMASK(31, 0); | |
449 | ||
450 | local64_set(&perf_event->hw.period_left, 0); | |
451 | perf_event->attr.sample_period = period; | |
452 | perf_event->hw.sample_period = period; | |
b02386eb | 453 | |
8d404c4c | 454 | __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); |
d9f89b4e | 455 | |
bead0220 MZ |
456 | if (kvm_pmu_counter_can_chain(vcpu, idx)) |
457 | kvm_pmu_counter_increment(vcpu, BIT(idx + 1), | |
458 | ARMV8_PMUV3_PERFCTR_CHAIN); | |
459 | ||
d9f89b4e AJ |
460 | if (kvm_pmu_overflow_status(vcpu)) { |
461 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | |
95e92e45 JT |
462 | |
463 | if (!in_nmi()) | |
464 | kvm_vcpu_kick(vcpu); | |
465 | else | |
466 | irq_work_queue(&vcpu->arch.pmu.overflow_work); | |
d9f89b4e | 467 | } |
8c3252c0 MZ |
468 | |
469 | cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); | |
b02386eb SZ |
470 | } |
471 | ||
7a0adc70 SZ |
472 | /** |
473 | * kvm_pmu_software_increment - do software increment | |
474 | * @vcpu: The vcpu pointer | |
475 | * @val: the value guest writes to PMSWINC register | |
476 | */ | |
477 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) | |
478 | { | |
bead0220 | 479 | kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); |
7a0adc70 SZ |
480 | } |
481 | ||
76993739 SZ |
482 | /** |
483 | * kvm_pmu_handle_pmcr - handle PMCR register | |
484 | * @vcpu: The vcpu pointer | |
485 | * @val: the value guest writes to PMCR register | |
486 | */ | |
487 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) | |
488 | { | |
76993739 SZ |
489 | int i; |
490 | ||
8f6379e2 AE |
491 | if (!kvm_vcpu_has_pmu(vcpu)) |
492 | return; | |
493 | ||
76993739 | 494 | if (val & ARMV8_PMU_PMCR_E) { |
418e5ca8 | 495 | kvm_pmu_enable_counter_mask(vcpu, |
f5eff400 | 496 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); |
76993739 | 497 | } else { |
ca4f202d AC |
498 | kvm_pmu_disable_counter_mask(vcpu, |
499 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); | |
76993739 SZ |
500 | } |
501 | ||
502 | if (val & ARMV8_PMU_PMCR_C) | |
503 | kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); | |
504 | ||
505 | if (val & ARMV8_PMU_PMCR_P) { | |
ca4f202d | 506 | unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); |
2a71fabf | 507 | mask &= ~BIT(ARMV8_PMU_CYCLE_IDX); |
c01d6a18 | 508 | for_each_set_bit(i, &mask, 32) |
76993739 SZ |
509 | kvm_pmu_set_counter_value(vcpu, i, 0); |
510 | } | |
76993739 SZ |
511 | } |
512 | ||
7f766358 SZ |
513 | static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) |
514 | { | |
8d404c4c CD |
515 | return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && |
516 | (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); | |
7f766358 SZ |
517 | } |
518 | ||
519 | /** | |
30d97754 | 520 | * kvm_pmu_create_perf_event - create a perf event for a counter |
7f766358 | 521 | * @vcpu: The vcpu pointer |
7f766358 | 522 | * @select_idx: The number of selected counter |
7f766358 | 523 | */ |
30d97754 | 524 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) |
7f766358 | 525 | { |
46b18782 | 526 | struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; |
7f766358 | 527 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
bead0220 | 528 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; |
7f766358 SZ |
529 | struct perf_event *event; |
530 | struct perf_event_attr attr; | |
30d97754 AM |
531 | u64 eventsel, counter, reg, data; |
532 | ||
80f393a2 AM |
533 | reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX) |
534 | ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + pmc->idx; | |
30d97754 | 535 | data = __vcpu_sys_reg(vcpu, reg); |
7f766358 SZ |
536 | |
537 | kvm_pmu_stop_counter(vcpu, pmc); | |
d7eec236 MZ |
538 | if (pmc->idx == ARMV8_PMU_CYCLE_IDX) |
539 | eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; | |
540 | else | |
541 | eventsel = data & kvm_pmu_event_mask(vcpu->kvm); | |
542 | ||
bead0220 MZ |
543 | /* |
544 | * Neither SW increment nor chained events need to be backed | |
545 | * by a perf event. | |
546 | */ | |
547 | if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR || | |
548 | eventsel == ARMV8_PMUV3_PERFCTR_CHAIN) | |
d7eec236 | 549 | return; |
7f766358 | 550 | |
d7eec236 MZ |
551 | /* |
552 | * If we have a filter in place and that the event isn't allowed, do | |
553 | * not install a perf event either. | |
554 | */ | |
555 | if (vcpu->kvm->arch.pmu_filter && | |
556 | !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) | |
7a0adc70 SZ |
557 | return; |
558 | ||
7f766358 | 559 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
46b18782 | 560 | attr.type = arm_pmu->pmu.type; |
7f766358 SZ |
561 | attr.size = sizeof(attr); |
562 | attr.pinned = 1; | |
80f393a2 | 563 | attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); |
7f766358 SZ |
564 | attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; |
565 | attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; | |
566 | attr.exclude_hv = 1; /* Don't count EL2 events */ | |
567 | attr.exclude_host = 1; /* Don't count host events */ | |
d7eec236 | 568 | attr.config = eventsel; |
7f766358 | 569 | |
bead0220 | 570 | counter = kvm_pmu_get_counter_value(vcpu, select_idx); |
80f393a2 | 571 | |
bead0220 MZ |
572 | /* |
573 | * If counting with a 64bit counter, advertise it to the perf | |
574 | * code, carefully dealing with the initial sample period. | |
575 | */ | |
576 | if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) { | |
577 | attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT; | |
218907cb | 578 | attr.sample_period = (-counter) & GENMASK(63, 0); |
80f393a2 | 579 | } else { |
bead0220 MZ |
580 | attr.sample_period = (-counter) & GENMASK(31, 0); |
581 | } | |
7f766358 | 582 | |
bead0220 | 583 | event = perf_event_create_kernel_counter(&attr, -1, current, |
b02386eb | 584 | kvm_pmu_perf_overflow, pmc); |
80f393a2 | 585 | |
7f766358 SZ |
586 | if (IS_ERR(event)) { |
587 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
588 | PTR_ERR(event)); | |
589 | return; | |
590 | } | |
591 | ||
592 | pmc->perf_event = event; | |
593 | } | |
808e7381 | 594 | |
30d97754 AM |
595 | /** |
596 | * kvm_pmu_set_counter_event_type - set selected counter to monitor some event | |
597 | * @vcpu: The vcpu pointer | |
598 | * @data: The data guest writes to PMXEVTYPER_EL0 | |
599 | * @select_idx: The number of selected counter | |
600 | * | |
601 | * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an | |
602 | * event with given hardware event number. Here we call perf_event API to | |
603 | * emulate this action and create a kernel perf event for it. | |
604 | */ | |
605 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, | |
606 | u64 select_idx) | |
607 | { | |
fd65a3b5 MZ |
608 | u64 reg, mask; |
609 | ||
8f6379e2 AE |
610 | if (!kvm_vcpu_has_pmu(vcpu)) |
611 | return; | |
612 | ||
fd65a3b5 MZ |
613 | mask = ARMV8_PMU_EVTYPE_MASK; |
614 | mask &= ~ARMV8_PMU_EVTYPE_EVENT; | |
615 | mask |= kvm_pmu_event_mask(vcpu->kvm); | |
30d97754 AM |
616 | |
617 | reg = (select_idx == ARMV8_PMU_CYCLE_IDX) | |
618 | ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + select_idx; | |
619 | ||
fd65a3b5 | 620 | __vcpu_sys_reg(vcpu, reg) = data & mask; |
80f393a2 | 621 | |
30d97754 AM |
622 | kvm_pmu_create_perf_event(vcpu, select_idx); |
623 | } | |
624 | ||
e840f42a MZ |
625 | void kvm_host_pmu_init(struct arm_pmu *pmu) |
626 | { | |
db858060 AE |
627 | struct arm_pmu_entry *entry; |
628 | ||
fcf37b38 | 629 | if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
db858060 AE |
630 | return; |
631 | ||
632 | mutex_lock(&arm_pmus_lock); | |
633 | ||
634 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
635 | if (!entry) | |
636 | goto out_unlock; | |
637 | ||
638 | entry->arm_pmu = pmu; | |
639 | list_add_tail(&entry->entry, &arm_pmus); | |
640 | ||
641 | if (list_is_singular(&arm_pmus)) | |
e840f42a | 642 | static_branch_enable(&kvm_arm_pmu_available); |
db858060 AE |
643 | |
644 | out_unlock: | |
645 | mutex_unlock(&arm_pmus_lock); | |
e840f42a MZ |
646 | } |
647 | ||
46b18782 | 648 | static struct arm_pmu *kvm_pmu_probe_armpmu(void) |
fd65a3b5 MZ |
649 | { |
650 | struct perf_event_attr attr = { }; | |
651 | struct perf_event *event; | |
46b18782 | 652 | struct arm_pmu *pmu = NULL; |
fd65a3b5 MZ |
653 | |
654 | /* | |
655 | * Create a dummy event that only counts user cycles. As we'll never | |
656 | * leave this function with the event being live, it will never | |
657 | * count anything. But it allows us to probe some of the PMU | |
658 | * details. Yes, this is terrible. | |
659 | */ | |
660 | attr.type = PERF_TYPE_RAW; | |
661 | attr.size = sizeof(attr); | |
662 | attr.pinned = 1; | |
663 | attr.disabled = 0; | |
664 | attr.exclude_user = 0; | |
665 | attr.exclude_kernel = 1; | |
666 | attr.exclude_hv = 1; | |
667 | attr.exclude_host = 1; | |
668 | attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; | |
669 | attr.sample_period = GENMASK(63, 0); | |
670 | ||
671 | event = perf_event_create_kernel_counter(&attr, -1, current, | |
672 | kvm_pmu_perf_overflow, &attr); | |
673 | ||
674 | if (IS_ERR(event)) { | |
675 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
676 | PTR_ERR(event)); | |
46b18782 | 677 | return NULL; |
fd65a3b5 MZ |
678 | } |
679 | ||
680 | if (event->pmu) { | |
681 | pmu = to_arm_pmu(event->pmu); | |
46b18782 | 682 | if (pmu->pmuver == 0 || |
fcf37b38 | 683 | pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
46b18782 | 684 | pmu = NULL; |
fd65a3b5 MZ |
685 | } |
686 | ||
687 | perf_event_disable(event); | |
688 | perf_event_release_kernel(event); | |
689 | ||
46b18782 | 690 | return pmu; |
fd65a3b5 MZ |
691 | } |
692 | ||
88865bec MZ |
693 | u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) |
694 | { | |
695 | unsigned long *bmap = vcpu->kvm->arch.pmu_filter; | |
696 | u64 val, mask = 0; | |
9529aaa0 | 697 | int base, i, nr_events; |
88865bec | 698 | |
8f6379e2 AE |
699 | if (!kvm_vcpu_has_pmu(vcpu)) |
700 | return 0; | |
701 | ||
88865bec MZ |
702 | if (!pmceid1) { |
703 | val = read_sysreg(pmceid0_el0); | |
acdd8a4e MZ |
704 | /* always support CHAIN */ |
705 | val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN); | |
88865bec MZ |
706 | base = 0; |
707 | } else { | |
708 | val = read_sysreg(pmceid1_el0); | |
46081078 MZ |
709 | /* |
710 | * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled | |
711 | * as RAZ | |
712 | */ | |
121a8fc0 | 713 | if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) |
46081078 | 714 | val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); |
88865bec MZ |
715 | base = 32; |
716 | } | |
717 | ||
718 | if (!bmap) | |
719 | return val; | |
720 | ||
9529aaa0 MZ |
721 | nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; |
722 | ||
88865bec MZ |
723 | for (i = 0; i < 32; i += 8) { |
724 | u64 byte; | |
725 | ||
726 | byte = bitmap_get_value8(bmap, base + i); | |
727 | mask |= byte << i; | |
9529aaa0 MZ |
728 | if (nr_events >= (0x4000 + base + 32)) { |
729 | byte = bitmap_get_value8(bmap, 0x4000 + base + i); | |
730 | mask |= byte << (32 + i); | |
731 | } | |
88865bec MZ |
732 | } |
733 | ||
734 | return val & mask; | |
735 | } | |
736 | ||
a2befacf | 737 | int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) |
bb0c70bc | 738 | { |
9bbfa4b5 | 739 | if (!kvm_vcpu_has_pmu(vcpu)) |
a2befacf | 740 | return 0; |
bb0c70bc | 741 | |
9bbfa4b5 AE |
742 | if (!vcpu->arch.pmu.created) |
743 | return -EINVAL; | |
744 | ||
6fe407f2 | 745 | /* |
a2befacf CD |
746 | * A valid interrupt configuration for the PMU is either to have a |
747 | * properly configured interrupt number and using an in-kernel | |
ebb127f2 | 748 | * irqchip, or to not have an in-kernel GIC and not set an IRQ. |
6fe407f2 | 749 | */ |
ebb127f2 CD |
750 | if (irqchip_in_kernel(vcpu->kvm)) { |
751 | int irq = vcpu->arch.pmu.irq_num; | |
ebb127f2 CD |
752 | /* |
753 | * If we are using an in-kernel vgic, at this point we know | |
754 | * the vgic will be initialized, so we can check the PMU irq | |
755 | * number against the dimensions of the vgic and make sure | |
756 | * it's valid. | |
757 | */ | |
758 | if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) | |
759 | return -EINVAL; | |
760 | } else if (kvm_arm_pmu_irq_initialized(vcpu)) { | |
761 | return -EINVAL; | |
762 | } | |
a2befacf | 763 | |
d0c94c49 MZ |
764 | /* One-off reload of the PMU on first run */ |
765 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); | |
766 | ||
a2befacf CD |
767 | return 0; |
768 | } | |
769 | ||
770 | static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) | |
771 | { | |
a2befacf | 772 | if (irqchip_in_kernel(vcpu->kvm)) { |
abcb851d CD |
773 | int ret; |
774 | ||
a2befacf CD |
775 | /* |
776 | * If using the PMU with an in-kernel virtual GIC | |
777 | * implementation, we require the GIC to be already | |
778 | * initialized when initializing the PMU. | |
779 | */ | |
780 | if (!vgic_initialized(vcpu->kvm)) | |
781 | return -ENODEV; | |
782 | ||
783 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
784 | return -ENXIO; | |
abcb851d CD |
785 | |
786 | ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, | |
787 | &vcpu->arch.pmu); | |
788 | if (ret) | |
789 | return ret; | |
a2befacf | 790 | } |
bb0c70bc | 791 | |
95e92e45 JT |
792 | init_irq_work(&vcpu->arch.pmu.overflow_work, |
793 | kvm_pmu_perf_overflow_notify_vcpu); | |
794 | ||
a2befacf | 795 | vcpu->arch.pmu.created = true; |
bb0c70bc SZ |
796 | return 0; |
797 | } | |
798 | ||
2defaff4 AP |
799 | /* |
800 | * For one VM the interrupt type must be same for each vcpu. | |
801 | * As a PPI, the interrupt number is the same for all vcpus, | |
802 | * while as an SPI it must be a separate number per vcpu. | |
803 | */ | |
804 | static bool pmu_irq_is_valid(struct kvm *kvm, int irq) | |
bb0c70bc | 805 | { |
46808a4c | 806 | unsigned long i; |
bb0c70bc SZ |
807 | struct kvm_vcpu *vcpu; |
808 | ||
809 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
810 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
811 | continue; | |
812 | ||
2defaff4 | 813 | if (irq_is_ppi(irq)) { |
bb0c70bc SZ |
814 | if (vcpu->arch.pmu.irq_num != irq) |
815 | return false; | |
816 | } else { | |
817 | if (vcpu->arch.pmu.irq_num == irq) | |
818 | return false; | |
819 | } | |
820 | } | |
821 | ||
822 | return true; | |
823 | } | |
824 | ||
6ee7fca2 AE |
825 | static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) |
826 | { | |
827 | struct kvm *kvm = vcpu->kvm; | |
828 | struct arm_pmu_entry *entry; | |
829 | struct arm_pmu *arm_pmu; | |
830 | int ret = -ENXIO; | |
831 | ||
832 | mutex_lock(&kvm->lock); | |
833 | mutex_lock(&arm_pmus_lock); | |
834 | ||
835 | list_for_each_entry(entry, &arm_pmus, entry) { | |
836 | arm_pmu = entry->arm_pmu; | |
837 | if (arm_pmu->pmu.type == pmu_id) { | |
06394531 | 838 | if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) || |
6ee7fca2 AE |
839 | (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { |
840 | ret = -EBUSY; | |
841 | break; | |
842 | } | |
843 | ||
844 | kvm->arch.arm_pmu = arm_pmu; | |
583cda1b | 845 | cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); |
6ee7fca2 AE |
846 | ret = 0; |
847 | break; | |
848 | } | |
849 | } | |
850 | ||
851 | mutex_unlock(&arm_pmus_lock); | |
852 | mutex_unlock(&kvm->lock); | |
853 | return ret; | |
854 | } | |
855 | ||
bb0c70bc SZ |
856 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
857 | { | |
5177fe91 MZ |
858 | struct kvm *kvm = vcpu->kvm; |
859 | ||
77da4303 | 860 | if (!kvm_vcpu_has_pmu(vcpu)) |
42223fb1 MZ |
861 | return -ENODEV; |
862 | ||
863 | if (vcpu->arch.pmu.created) | |
864 | return -EBUSY; | |
865 | ||
46b18782 MZ |
866 | mutex_lock(&kvm->lock); |
867 | if (!kvm->arch.arm_pmu) { | |
868 | /* No PMU set, get the default one */ | |
869 | kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); | |
870 | if (!kvm->arch.arm_pmu) { | |
871 | mutex_unlock(&kvm->lock); | |
872 | return -ENODEV; | |
873 | } | |
874 | } | |
875 | mutex_unlock(&kvm->lock); | |
fd65a3b5 | 876 | |
bb0c70bc SZ |
877 | switch (attr->attr) { |
878 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
879 | int __user *uaddr = (int __user *)(long)attr->addr; | |
880 | int irq; | |
881 | ||
5177fe91 | 882 | if (!irqchip_in_kernel(kvm)) |
a2befacf CD |
883 | return -EINVAL; |
884 | ||
bb0c70bc SZ |
885 | if (get_user(irq, uaddr)) |
886 | return -EFAULT; | |
887 | ||
2defaff4 | 888 | /* The PMU overflow interrupt can be a PPI or a valid SPI. */ |
ebb127f2 | 889 | if (!(irq_is_ppi(irq) || irq_is_spi(irq))) |
2defaff4 AP |
890 | return -EINVAL; |
891 | ||
5177fe91 | 892 | if (!pmu_irq_is_valid(kvm, irq)) |
bb0c70bc SZ |
893 | return -EINVAL; |
894 | ||
895 | if (kvm_arm_pmu_irq_initialized(vcpu)) | |
896 | return -EBUSY; | |
897 | ||
898 | kvm_debug("Set kvm ARM PMU irq: %d\n", irq); | |
899 | vcpu->arch.pmu.irq_num = irq; | |
900 | return 0; | |
901 | } | |
d7eec236 MZ |
902 | case KVM_ARM_VCPU_PMU_V3_FILTER: { |
903 | struct kvm_pmu_event_filter __user *uaddr; | |
904 | struct kvm_pmu_event_filter filter; | |
905 | int nr_events; | |
906 | ||
5177fe91 | 907 | nr_events = kvm_pmu_event_mask(kvm) + 1; |
d7eec236 MZ |
908 | |
909 | uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; | |
910 | ||
911 | if (copy_from_user(&filter, uaddr, sizeof(filter))) | |
912 | return -EFAULT; | |
913 | ||
914 | if (((u32)filter.base_event + filter.nevents) > nr_events || | |
915 | (filter.action != KVM_PMU_EVENT_ALLOW && | |
916 | filter.action != KVM_PMU_EVENT_DENY)) | |
917 | return -EINVAL; | |
918 | ||
5177fe91 MZ |
919 | mutex_lock(&kvm->lock); |
920 | ||
06394531 | 921 | if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) { |
5177fe91 MZ |
922 | mutex_unlock(&kvm->lock); |
923 | return -EBUSY; | |
924 | } | |
d7eec236 | 925 | |
5177fe91 MZ |
926 | if (!kvm->arch.pmu_filter) { |
927 | kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); | |
928 | if (!kvm->arch.pmu_filter) { | |
929 | mutex_unlock(&kvm->lock); | |
d7eec236 MZ |
930 | return -ENOMEM; |
931 | } | |
932 | ||
933 | /* | |
934 | * The default depends on the first applied filter. | |
935 | * If it allows events, the default is to deny. | |
936 | * Conversely, if the first filter denies a set of | |
937 | * events, the default is to allow. | |
938 | */ | |
939 | if (filter.action == KVM_PMU_EVENT_ALLOW) | |
5177fe91 | 940 | bitmap_zero(kvm->arch.pmu_filter, nr_events); |
d7eec236 | 941 | else |
5177fe91 | 942 | bitmap_fill(kvm->arch.pmu_filter, nr_events); |
d7eec236 MZ |
943 | } |
944 | ||
945 | if (filter.action == KVM_PMU_EVENT_ALLOW) | |
5177fe91 | 946 | bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); |
d7eec236 | 947 | else |
5177fe91 | 948 | bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); |
d7eec236 | 949 | |
5177fe91 | 950 | mutex_unlock(&kvm->lock); |
d7eec236 MZ |
951 | |
952 | return 0; | |
953 | } | |
6ee7fca2 AE |
954 | case KVM_ARM_VCPU_PMU_V3_SET_PMU: { |
955 | int __user *uaddr = (int __user *)(long)attr->addr; | |
956 | int pmu_id; | |
957 | ||
958 | if (get_user(pmu_id, uaddr)) | |
959 | return -EFAULT; | |
960 | ||
961 | return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); | |
962 | } | |
bb0c70bc SZ |
963 | case KVM_ARM_VCPU_PMU_V3_INIT: |
964 | return kvm_arm_pmu_v3_init(vcpu); | |
965 | } | |
966 | ||
967 | return -ENXIO; | |
968 | } | |
969 | ||
970 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
971 | { | |
972 | switch (attr->attr) { | |
973 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
974 | int __user *uaddr = (int __user *)(long)attr->addr; | |
975 | int irq; | |
976 | ||
a2befacf CD |
977 | if (!irqchip_in_kernel(vcpu->kvm)) |
978 | return -EINVAL; | |
979 | ||
14bda7a9 | 980 | if (!kvm_vcpu_has_pmu(vcpu)) |
bb0c70bc SZ |
981 | return -ENODEV; |
982 | ||
983 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
984 | return -ENXIO; | |
985 | ||
986 | irq = vcpu->arch.pmu.irq_num; | |
987 | return put_user(irq, uaddr); | |
988 | } | |
989 | } | |
990 | ||
991 | return -ENXIO; | |
992 | } | |
993 | ||
994 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
995 | { | |
996 | switch (attr->attr) { | |
997 | case KVM_ARM_VCPU_PMU_V3_IRQ: | |
998 | case KVM_ARM_VCPU_PMU_V3_INIT: | |
d7eec236 | 999 | case KVM_ARM_VCPU_PMU_V3_FILTER: |
6ee7fca2 | 1000 | case KVM_ARM_VCPU_PMU_V3_SET_PMU: |
77da4303 | 1001 | if (kvm_vcpu_has_pmu(vcpu)) |
bb0c70bc SZ |
1002 | return 0; |
1003 | } | |
1004 | ||
1005 | return -ENXIO; | |
1006 | } |