Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
051ff581 SZ |
2 | /* |
3 | * Copyright (C) 2015 Linaro Ltd. | |
4 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
051ff581 SZ |
5 | */ |
6 | ||
7 | #include <linux/cpu.h> | |
8 | #include <linux/kvm.h> | |
9 | #include <linux/kvm_host.h> | |
db858060 | 10 | #include <linux/list.h> |
051ff581 | 11 | #include <linux/perf_event.h> |
8c3252c0 | 12 | #include <linux/perf/arm_pmu.h> |
bb0c70bc | 13 | #include <linux/uaccess.h> |
051ff581 SZ |
14 | #include <asm/kvm_emulate.h> |
15 | #include <kvm/arm_pmu.h> | |
b02386eb | 16 | #include <kvm/arm_vgic.h> |
051ff581 | 17 | |
bead0220 MZ |
18 | #define PERF_ATTR_CFG1_COUNTER_64BIT BIT(0) |
19 | ||
be399d82 SC |
20 | DEFINE_STATIC_KEY_FALSE(kvm_arm_pmu_available); |
21 | ||
db858060 AE |
22 | static LIST_HEAD(arm_pmus); |
23 | static DEFINE_MUTEX(arm_pmus_lock); | |
24 | ||
30d97754 | 25 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx); |
9917264d | 26 | static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc); |
80f393a2 | 27 | |
fd65a3b5 MZ |
28 | static u32 kvm_pmu_event_mask(struct kvm *kvm) |
29 | { | |
46b18782 MZ |
30 | unsigned int pmuver; |
31 | ||
32 | pmuver = kvm->arch.arm_pmu->pmuver; | |
33 | ||
34 | switch (pmuver) { | |
121a8fc0 | 35 | case ID_AA64DFR0_EL1_PMUVer_IMP: |
fd65a3b5 | 36 | return GENMASK(9, 0); |
121a8fc0 MB |
37 | case ID_AA64DFR0_EL1_PMUVer_V3P1: |
38 | case ID_AA64DFR0_EL1_PMUVer_V3P4: | |
39 | case ID_AA64DFR0_EL1_PMUVer_V3P5: | |
40 | case ID_AA64DFR0_EL1_PMUVer_V3P7: | |
fd65a3b5 MZ |
41 | return GENMASK(15, 0); |
42 | default: /* Shouldn't be here, just for sanity */ | |
46b18782 | 43 | WARN_ONCE(1, "Unknown PMU version %d\n", pmuver); |
fd65a3b5 MZ |
44 | return 0; |
45 | } | |
46 | } | |
47 | ||
218907cb AM |
48 | /** |
49 | * kvm_pmu_idx_is_64bit - determine if select_idx is a 64bit counter | |
50 | * @vcpu: The vcpu pointer | |
51 | * @select_idx: The counter index | |
52 | */ | |
53 | static bool kvm_pmu_idx_is_64bit(struct kvm_vcpu *vcpu, u64 select_idx) | |
c82d28cb MZ |
54 | { |
55 | return (select_idx == ARMV8_PMU_CYCLE_IDX); | |
56 | } | |
57 | ||
58 | static bool kvm_pmu_idx_has_64bit_overflow(struct kvm_vcpu *vcpu, u64 select_idx) | |
218907cb AM |
59 | { |
60 | return (select_idx == ARMV8_PMU_CYCLE_IDX && | |
61 | __vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_LC); | |
62 | } | |
63 | ||
bead0220 MZ |
64 | static bool kvm_pmu_counter_can_chain(struct kvm_vcpu *vcpu, u64 idx) |
65 | { | |
c82d28cb MZ |
66 | return (!(idx & 1) && (idx + 1) < ARMV8_PMU_CYCLE_IDX && |
67 | !kvm_pmu_idx_has_64bit_overflow(vcpu, idx)); | |
bead0220 MZ |
68 | } |
69 | ||
80f393a2 AM |
70 | static struct kvm_vcpu *kvm_pmc_to_vcpu(struct kvm_pmc *pmc) |
71 | { | |
72 | struct kvm_pmu *pmu; | |
73 | struct kvm_vcpu_arch *vcpu_arch; | |
74 | ||
75 | pmc -= pmc->idx; | |
76 | pmu = container_of(pmc, struct kvm_pmu, pmc[0]); | |
77 | vcpu_arch = container_of(pmu, struct kvm_vcpu_arch, pmu); | |
78 | return container_of(vcpu_arch, struct kvm_vcpu, arch); | |
79 | } | |
80 | ||
0cb9c3c8 MZ |
81 | static u32 counter_index_to_reg(u64 idx) |
82 | { | |
83 | return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + idx; | |
84 | } | |
85 | ||
86 | static u32 counter_index_to_evtreg(u64 idx) | |
87 | { | |
88 | return (idx == ARMV8_PMU_CYCLE_IDX) ? PMCCFILTR_EL0 : PMEVTYPER0_EL0 + idx; | |
89 | } | |
90 | ||
051ff581 | 91 | /** |
bead0220 | 92 | * kvm_pmu_get_counter_value - get PMU counter value |
051ff581 SZ |
93 | * @vcpu: The vcpu pointer |
94 | * @select_idx: The counter index | |
95 | */ | |
bead0220 | 96 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx) |
80f393a2 | 97 | { |
bead0220 MZ |
98 | u64 counter, reg, enabled, running; |
99 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
100 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; | |
80f393a2 | 101 | |
bead0220 MZ |
102 | if (!kvm_vcpu_has_pmu(vcpu)) |
103 | return 0; | |
80f393a2 | 104 | |
0cb9c3c8 | 105 | reg = counter_index_to_reg(select_idx); |
bead0220 | 106 | counter = __vcpu_sys_reg(vcpu, reg); |
80f393a2 AM |
107 | |
108 | /* | |
109 | * The real counter value is equal to the value of counter register plus | |
051ff581 SZ |
110 | * the value perf event counts. |
111 | */ | |
112 | if (pmc->perf_event) | |
113 | counter += perf_event_read_value(pmc->perf_event, &enabled, | |
114 | &running); | |
115 | ||
c82d28cb | 116 | if (!kvm_pmu_idx_is_64bit(vcpu, select_idx)) |
218907cb AM |
117 | counter = lower_32_bits(counter); |
118 | ||
119 | return counter; | |
051ff581 SZ |
120 | } |
121 | ||
122 | /** | |
123 | * kvm_pmu_set_counter_value - set PMU counter value | |
124 | * @vcpu: The vcpu pointer | |
125 | * @select_idx: The counter index | |
126 | * @val: The counter value | |
127 | */ | |
128 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val) | |
129 | { | |
130 | u64 reg; | |
131 | ||
8f6379e2 AE |
132 | if (!kvm_vcpu_has_pmu(vcpu)) |
133 | return; | |
134 | ||
9917264d MZ |
135 | kvm_pmu_release_perf_event(&vcpu->arch.pmu.pmc[select_idx]); |
136 | ||
0cb9c3c8 | 137 | reg = counter_index_to_reg(select_idx); |
9917264d | 138 | __vcpu_sys_reg(vcpu, reg) = val; |
30d97754 AM |
139 | |
140 | /* Recreate the perf event to reflect the updated sample_period */ | |
141 | kvm_pmu_create_perf_event(vcpu, select_idx); | |
051ff581 | 142 | } |
96b0eebc | 143 | |
6f4d2a0b AM |
144 | /** |
145 | * kvm_pmu_release_perf_event - remove the perf event | |
146 | * @pmc: The PMU counter pointer | |
147 | */ | |
148 | static void kvm_pmu_release_perf_event(struct kvm_pmc *pmc) | |
149 | { | |
150 | if (pmc->perf_event) { | |
151 | perf_event_disable(pmc->perf_event); | |
152 | perf_event_release_kernel(pmc->perf_event); | |
153 | pmc->perf_event = NULL; | |
154 | } | |
155 | } | |
156 | ||
7f766358 SZ |
157 | /** |
158 | * kvm_pmu_stop_counter - stop PMU counter | |
159 | * @pmc: The PMU counter pointer | |
160 | * | |
161 | * If this counter has been configured to monitor some event, release it here. | |
162 | */ | |
163 | static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc) | |
164 | { | |
0f1e172b | 165 | u64 reg, val; |
7f766358 | 166 | |
80f393a2 AM |
167 | if (!pmc->perf_event) |
168 | return; | |
169 | ||
0f1e172b | 170 | val = kvm_pmu_get_counter_value(vcpu, pmc->idx); |
80f393a2 | 171 | |
0cb9c3c8 | 172 | reg = counter_index_to_reg(pmc->idx); |
80f393a2 | 173 | |
f4e23cf9 MZ |
174 | __vcpu_sys_reg(vcpu, reg) = val; |
175 | ||
80f393a2 | 176 | kvm_pmu_release_perf_event(pmc); |
7f766358 SZ |
177 | } |
178 | ||
bca031e2 ZY |
179 | /** |
180 | * kvm_pmu_vcpu_init - assign pmu counter idx for cpu | |
181 | * @vcpu: The vcpu pointer | |
182 | * | |
183 | */ | |
184 | void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) | |
185 | { | |
186 | int i; | |
187 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
188 | ||
189 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) | |
190 | pmu->pmc[i].idx = i; | |
191 | } | |
192 | ||
2aa36e98 SZ |
193 | /** |
194 | * kvm_pmu_vcpu_reset - reset pmu state for cpu | |
195 | * @vcpu: The vcpu pointer | |
196 | * | |
197 | */ | |
198 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) | |
199 | { | |
c01d6a18 | 200 | unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); |
2aa36e98 | 201 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
c01d6a18 | 202 | int i; |
2aa36e98 | 203 | |
c01d6a18 | 204 | for_each_set_bit(i, &mask, 32) |
2aa36e98 | 205 | kvm_pmu_stop_counter(vcpu, &pmu->pmc[i]); |
2aa36e98 SZ |
206 | } |
207 | ||
5f0a714a SZ |
208 | /** |
209 | * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu | |
210 | * @vcpu: The vcpu pointer | |
211 | * | |
212 | */ | |
213 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) | |
214 | { | |
215 | int i; | |
216 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
217 | ||
6f4d2a0b AM |
218 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) |
219 | kvm_pmu_release_perf_event(&pmu->pmc[i]); | |
95e92e45 | 220 | irq_work_sync(&vcpu->arch.pmu.overflow_work); |
5f0a714a SZ |
221 | } |
222 | ||
96b0eebc SZ |
223 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
224 | { | |
8d404c4c | 225 | u64 val = __vcpu_sys_reg(vcpu, PMCR_EL0) >> ARMV8_PMU_PMCR_N_SHIFT; |
96b0eebc SZ |
226 | |
227 | val &= ARMV8_PMU_PMCR_N_MASK; | |
228 | if (val == 0) | |
229 | return BIT(ARMV8_PMU_CYCLE_IDX); | |
230 | else | |
231 | return GENMASK(val - 1, 0) | BIT(ARMV8_PMU_CYCLE_IDX); | |
232 | } | |
233 | ||
234 | /** | |
418e5ca8 | 235 | * kvm_pmu_enable_counter_mask - enable selected PMU counters |
96b0eebc SZ |
236 | * @vcpu: The vcpu pointer |
237 | * @val: the value guest writes to PMCNTENSET register | |
238 | * | |
239 | * Call perf_event_enable to start counting the perf event | |
240 | */ | |
418e5ca8 | 241 | void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) |
96b0eebc SZ |
242 | { |
243 | int i; | |
244 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
245 | struct kvm_pmc *pmc; | |
246 | ||
8f6379e2 AE |
247 | if (!kvm_vcpu_has_pmu(vcpu)) |
248 | return; | |
249 | ||
8d404c4c | 250 | if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) || !val) |
96b0eebc SZ |
251 | return; |
252 | ||
253 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
254 | if (!(val & BIT(i))) | |
255 | continue; | |
256 | ||
257 | pmc = &pmu->pmc[i]; | |
80f393a2 | 258 | |
bead0220 MZ |
259 | if (!pmc->perf_event) { |
260 | kvm_pmu_create_perf_event(vcpu, i); | |
261 | } else { | |
96b0eebc SZ |
262 | perf_event_enable(pmc->perf_event); |
263 | if (pmc->perf_event->state != PERF_EVENT_STATE_ACTIVE) | |
264 | kvm_debug("fail to enable perf event\n"); | |
265 | } | |
266 | } | |
267 | } | |
268 | ||
269 | /** | |
418e5ca8 | 270 | * kvm_pmu_disable_counter_mask - disable selected PMU counters |
96b0eebc SZ |
271 | * @vcpu: The vcpu pointer |
272 | * @val: the value guest writes to PMCNTENCLR register | |
273 | * | |
274 | * Call perf_event_disable to stop counting the perf event | |
275 | */ | |
418e5ca8 | 276 | void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) |
96b0eebc SZ |
277 | { |
278 | int i; | |
279 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
280 | struct kvm_pmc *pmc; | |
281 | ||
8f6379e2 | 282 | if (!kvm_vcpu_has_pmu(vcpu) || !val) |
96b0eebc SZ |
283 | return; |
284 | ||
285 | for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) { | |
286 | if (!(val & BIT(i))) | |
287 | continue; | |
288 | ||
289 | pmc = &pmu->pmc[i]; | |
80f393a2 | 290 | |
96b0eebc SZ |
291 | if (pmc->perf_event) |
292 | perf_event_disable(pmc->perf_event); | |
293 | } | |
294 | } | |
7f766358 | 295 | |
76d883c4 SZ |
296 | static u64 kvm_pmu_overflow_status(struct kvm_vcpu *vcpu) |
297 | { | |
298 | u64 reg = 0; | |
299 | ||
8d404c4c CD |
300 | if ((__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) { |
301 | reg = __vcpu_sys_reg(vcpu, PMOVSSET_EL0); | |
302 | reg &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
303 | reg &= __vcpu_sys_reg(vcpu, PMINTENSET_EL1); | |
7d4bd1d2 | 304 | } |
76d883c4 SZ |
305 | |
306 | return reg; | |
307 | } | |
308 | ||
d9f89b4e | 309 | static void kvm_pmu_update_state(struct kvm_vcpu *vcpu) |
b7484931 AJ |
310 | { |
311 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
d9f89b4e AJ |
312 | bool overflow; |
313 | ||
46acf89d | 314 | if (!kvm_vcpu_has_pmu(vcpu)) |
d9f89b4e | 315 | return; |
b7484931 | 316 | |
d9f89b4e | 317 | overflow = !!kvm_pmu_overflow_status(vcpu); |
b7484931 AJ |
318 | if (pmu->irq_level == overflow) |
319 | return; | |
320 | ||
321 | pmu->irq_level = overflow; | |
322 | ||
323 | if (likely(irqchip_in_kernel(vcpu->kvm))) { | |
324 | int ret = kvm_vgic_inject_irq(vcpu->kvm, vcpu->vcpu_id, | |
d9f89b4e | 325 | pmu->irq_num, overflow, pmu); |
b7484931 AJ |
326 | WARN_ON(ret); |
327 | } | |
328 | } | |
329 | ||
3dbbdf78 CD |
330 | bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) |
331 | { | |
332 | struct kvm_pmu *pmu = &vcpu->arch.pmu; | |
333 | struct kvm_sync_regs *sregs = &vcpu->run->s.regs; | |
334 | bool run_level = sregs->device_irq_level & KVM_ARM_DEV_PMU; | |
335 | ||
336 | if (likely(irqchip_in_kernel(vcpu->kvm))) | |
337 | return false; | |
338 | ||
339 | return pmu->irq_level != run_level; | |
340 | } | |
341 | ||
342 | /* | |
343 | * Reflect the PMU overflow interrupt output level into the kvm_run structure | |
344 | */ | |
345 | void kvm_pmu_update_run(struct kvm_vcpu *vcpu) | |
346 | { | |
347 | struct kvm_sync_regs *regs = &vcpu->run->s.regs; | |
348 | ||
349 | /* Populate the timer bitmap for user space */ | |
350 | regs->device_irq_level &= ~KVM_ARM_DEV_PMU; | |
351 | if (vcpu->arch.pmu.irq_level) | |
352 | regs->device_irq_level |= KVM_ARM_DEV_PMU; | |
353 | } | |
354 | ||
b02386eb SZ |
355 | /** |
356 | * kvm_pmu_flush_hwstate - flush pmu state to cpu | |
357 | * @vcpu: The vcpu pointer | |
358 | * | |
359 | * Check if the PMU has overflowed while we were running in the host, and inject | |
360 | * an interrupt if that was the case. | |
361 | */ | |
362 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) | |
363 | { | |
364 | kvm_pmu_update_state(vcpu); | |
365 | } | |
366 | ||
367 | /** | |
368 | * kvm_pmu_sync_hwstate - sync pmu state from cpu | |
369 | * @vcpu: The vcpu pointer | |
370 | * | |
371 | * Check if the PMU has overflowed while we were running in the guest, and | |
372 | * inject an interrupt if that was the case. | |
373 | */ | |
374 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) | |
375 | { | |
376 | kvm_pmu_update_state(vcpu); | |
377 | } | |
378 | ||
95e92e45 JT |
379 | /** |
380 | * When perf interrupt is an NMI, we cannot safely notify the vcpu corresponding | |
381 | * to the event. | |
382 | * This is why we need a callback to do it once outside of the NMI context. | |
383 | */ | |
384 | static void kvm_pmu_perf_overflow_notify_vcpu(struct irq_work *work) | |
385 | { | |
386 | struct kvm_vcpu *vcpu; | |
387 | struct kvm_pmu *pmu; | |
388 | ||
389 | pmu = container_of(work, struct kvm_pmu, overflow_work); | |
390 | vcpu = kvm_pmc_to_vcpu(pmu->pmc); | |
391 | ||
392 | kvm_vcpu_kick(vcpu); | |
393 | } | |
394 | ||
bead0220 MZ |
395 | /* |
396 | * Perform an increment on any of the counters described in @mask, | |
397 | * generating the overflow if required, and propagate it as a chained | |
398 | * event if possible. | |
399 | */ | |
400 | static void kvm_pmu_counter_increment(struct kvm_vcpu *vcpu, | |
401 | unsigned long mask, u32 event) | |
402 | { | |
403 | int i; | |
404 | ||
405 | if (!(__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E)) | |
406 | return; | |
407 | ||
408 | /* Weed out disabled counters */ | |
409 | mask &= __vcpu_sys_reg(vcpu, PMCNTENSET_EL0); | |
410 | ||
411 | for_each_set_bit(i, &mask, ARMV8_PMU_CYCLE_IDX) { | |
412 | u64 type, reg; | |
413 | ||
414 | /* Filter on event type */ | |
0cb9c3c8 | 415 | type = __vcpu_sys_reg(vcpu, counter_index_to_evtreg(i)); |
bead0220 MZ |
416 | type &= kvm_pmu_event_mask(vcpu->kvm); |
417 | if (type != event) | |
418 | continue; | |
419 | ||
420 | /* Increment this counter */ | |
0cb9c3c8 | 421 | reg = __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) + 1; |
0f1e172b MZ |
422 | if (!kvm_pmu_idx_is_64bit(vcpu, i)) |
423 | reg = lower_32_bits(reg); | |
0cb9c3c8 | 424 | __vcpu_sys_reg(vcpu, counter_index_to_reg(i)) = reg; |
bead0220 | 425 | |
001d85bd MZ |
426 | /* No overflow? move on */ |
427 | if (kvm_pmu_idx_has_64bit_overflow(vcpu, i) ? reg : lower_32_bits(reg)) | |
bead0220 MZ |
428 | continue; |
429 | ||
430 | /* Mark overflow */ | |
431 | __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(i); | |
432 | ||
433 | if (kvm_pmu_counter_can_chain(vcpu, i)) | |
434 | kvm_pmu_counter_increment(vcpu, BIT(i + 1), | |
435 | ARMV8_PMUV3_PERFCTR_CHAIN); | |
436 | } | |
437 | } | |
438 | ||
c82d28cb MZ |
439 | /* Compute the sample period for a given counter value */ |
440 | static u64 compute_period(struct kvm_vcpu *vcpu, u64 select_idx, u64 counter) | |
441 | { | |
442 | u64 val; | |
443 | ||
444 | if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) { | |
445 | if (!kvm_pmu_idx_has_64bit_overflow(vcpu, select_idx)) | |
446 | val = -(counter & GENMASK(31, 0)); | |
447 | else | |
448 | val = (-counter) & GENMASK(63, 0); | |
449 | } else { | |
450 | val = (-counter) & GENMASK(31, 0); | |
451 | } | |
452 | ||
453 | return val; | |
454 | } | |
455 | ||
b02386eb | 456 | /** |
d9f89b4e | 457 | * When the perf event overflows, set the overflow status and inform the vcpu. |
b02386eb SZ |
458 | */ |
459 | static void kvm_pmu_perf_overflow(struct perf_event *perf_event, | |
460 | struct perf_sample_data *data, | |
461 | struct pt_regs *regs) | |
462 | { | |
463 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; | |
8c3252c0 | 464 | struct arm_pmu *cpu_pmu = to_arm_pmu(perf_event->pmu); |
b02386eb SZ |
465 | struct kvm_vcpu *vcpu = kvm_pmc_to_vcpu(pmc); |
466 | int idx = pmc->idx; | |
8c3252c0 MZ |
467 | u64 period; |
468 | ||
469 | cpu_pmu->pmu.stop(perf_event, PERF_EF_UPDATE); | |
470 | ||
471 | /* | |
472 | * Reset the sample period to the architectural limit, | |
473 | * i.e. the point where the counter overflows. | |
474 | */ | |
c82d28cb | 475 | period = compute_period(vcpu, idx, local64_read(&perf_event->count)); |
8c3252c0 MZ |
476 | |
477 | local64_set(&perf_event->hw.period_left, 0); | |
478 | perf_event->attr.sample_period = period; | |
479 | perf_event->hw.sample_period = period; | |
b02386eb | 480 | |
8d404c4c | 481 | __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= BIT(idx); |
d9f89b4e | 482 | |
bead0220 MZ |
483 | if (kvm_pmu_counter_can_chain(vcpu, idx)) |
484 | kvm_pmu_counter_increment(vcpu, BIT(idx + 1), | |
485 | ARMV8_PMUV3_PERFCTR_CHAIN); | |
486 | ||
d9f89b4e AJ |
487 | if (kvm_pmu_overflow_status(vcpu)) { |
488 | kvm_make_request(KVM_REQ_IRQ_PENDING, vcpu); | |
95e92e45 JT |
489 | |
490 | if (!in_nmi()) | |
491 | kvm_vcpu_kick(vcpu); | |
492 | else | |
493 | irq_work_queue(&vcpu->arch.pmu.overflow_work); | |
d9f89b4e | 494 | } |
8c3252c0 MZ |
495 | |
496 | cpu_pmu->pmu.start(perf_event, PERF_EF_RELOAD); | |
b02386eb SZ |
497 | } |
498 | ||
7a0adc70 SZ |
499 | /** |
500 | * kvm_pmu_software_increment - do software increment | |
501 | * @vcpu: The vcpu pointer | |
502 | * @val: the value guest writes to PMSWINC register | |
503 | */ | |
504 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) | |
505 | { | |
bead0220 | 506 | kvm_pmu_counter_increment(vcpu, val, ARMV8_PMUV3_PERFCTR_SW_INCR); |
7a0adc70 SZ |
507 | } |
508 | ||
76993739 SZ |
509 | /** |
510 | * kvm_pmu_handle_pmcr - handle PMCR register | |
511 | * @vcpu: The vcpu pointer | |
512 | * @val: the value guest writes to PMCR register | |
513 | */ | |
514 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) | |
515 | { | |
76993739 SZ |
516 | int i; |
517 | ||
8f6379e2 AE |
518 | if (!kvm_vcpu_has_pmu(vcpu)) |
519 | return; | |
520 | ||
76993739 | 521 | if (val & ARMV8_PMU_PMCR_E) { |
418e5ca8 | 522 | kvm_pmu_enable_counter_mask(vcpu, |
f5eff400 | 523 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); |
76993739 | 524 | } else { |
ca4f202d AC |
525 | kvm_pmu_disable_counter_mask(vcpu, |
526 | __vcpu_sys_reg(vcpu, PMCNTENSET_EL0)); | |
76993739 SZ |
527 | } |
528 | ||
529 | if (val & ARMV8_PMU_PMCR_C) | |
530 | kvm_pmu_set_counter_value(vcpu, ARMV8_PMU_CYCLE_IDX, 0); | |
531 | ||
532 | if (val & ARMV8_PMU_PMCR_P) { | |
ca4f202d | 533 | unsigned long mask = kvm_pmu_valid_counter_mask(vcpu); |
2a71fabf | 534 | mask &= ~BIT(ARMV8_PMU_CYCLE_IDX); |
c01d6a18 | 535 | for_each_set_bit(i, &mask, 32) |
76993739 SZ |
536 | kvm_pmu_set_counter_value(vcpu, i, 0); |
537 | } | |
76993739 SZ |
538 | } |
539 | ||
7f766358 SZ |
540 | static bool kvm_pmu_counter_is_enabled(struct kvm_vcpu *vcpu, u64 select_idx) |
541 | { | |
8d404c4c CD |
542 | return (__vcpu_sys_reg(vcpu, PMCR_EL0) & ARMV8_PMU_PMCR_E) && |
543 | (__vcpu_sys_reg(vcpu, PMCNTENSET_EL0) & BIT(select_idx)); | |
7f766358 SZ |
544 | } |
545 | ||
546 | /** | |
30d97754 | 547 | * kvm_pmu_create_perf_event - create a perf event for a counter |
7f766358 | 548 | * @vcpu: The vcpu pointer |
7f766358 | 549 | * @select_idx: The number of selected counter |
7f766358 | 550 | */ |
30d97754 | 551 | static void kvm_pmu_create_perf_event(struct kvm_vcpu *vcpu, u64 select_idx) |
7f766358 | 552 | { |
46b18782 | 553 | struct arm_pmu *arm_pmu = vcpu->kvm->arch.arm_pmu; |
7f766358 | 554 | struct kvm_pmu *pmu = &vcpu->arch.pmu; |
bead0220 | 555 | struct kvm_pmc *pmc = &pmu->pmc[select_idx]; |
7f766358 SZ |
556 | struct perf_event *event; |
557 | struct perf_event_attr attr; | |
30d97754 AM |
558 | u64 eventsel, counter, reg, data; |
559 | ||
0cb9c3c8 | 560 | reg = counter_index_to_evtreg(select_idx); |
30d97754 | 561 | data = __vcpu_sys_reg(vcpu, reg); |
7f766358 SZ |
562 | |
563 | kvm_pmu_stop_counter(vcpu, pmc); | |
d7eec236 MZ |
564 | if (pmc->idx == ARMV8_PMU_CYCLE_IDX) |
565 | eventsel = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; | |
566 | else | |
567 | eventsel = data & kvm_pmu_event_mask(vcpu->kvm); | |
568 | ||
bead0220 MZ |
569 | /* |
570 | * Neither SW increment nor chained events need to be backed | |
571 | * by a perf event. | |
572 | */ | |
573 | if (eventsel == ARMV8_PMUV3_PERFCTR_SW_INCR || | |
574 | eventsel == ARMV8_PMUV3_PERFCTR_CHAIN) | |
d7eec236 | 575 | return; |
7f766358 | 576 | |
d7eec236 MZ |
577 | /* |
578 | * If we have a filter in place and that the event isn't allowed, do | |
579 | * not install a perf event either. | |
580 | */ | |
581 | if (vcpu->kvm->arch.pmu_filter && | |
582 | !test_bit(eventsel, vcpu->kvm->arch.pmu_filter)) | |
7a0adc70 SZ |
583 | return; |
584 | ||
7f766358 | 585 | memset(&attr, 0, sizeof(struct perf_event_attr)); |
46b18782 | 586 | attr.type = arm_pmu->pmu.type; |
7f766358 SZ |
587 | attr.size = sizeof(attr); |
588 | attr.pinned = 1; | |
80f393a2 | 589 | attr.disabled = !kvm_pmu_counter_is_enabled(vcpu, pmc->idx); |
7f766358 SZ |
590 | attr.exclude_user = data & ARMV8_PMU_EXCLUDE_EL0 ? 1 : 0; |
591 | attr.exclude_kernel = data & ARMV8_PMU_EXCLUDE_EL1 ? 1 : 0; | |
592 | attr.exclude_hv = 1; /* Don't count EL2 events */ | |
593 | attr.exclude_host = 1; /* Don't count host events */ | |
d7eec236 | 594 | attr.config = eventsel; |
7f766358 | 595 | |
bead0220 | 596 | counter = kvm_pmu_get_counter_value(vcpu, select_idx); |
80f393a2 | 597 | |
bead0220 MZ |
598 | /* |
599 | * If counting with a 64bit counter, advertise it to the perf | |
c82d28cb MZ |
600 | * code, carefully dealing with the initial sample period |
601 | * which also depends on the overflow. | |
bead0220 | 602 | */ |
c82d28cb | 603 | if (kvm_pmu_idx_is_64bit(vcpu, select_idx)) |
bead0220 | 604 | attr.config1 |= PERF_ATTR_CFG1_COUNTER_64BIT; |
c82d28cb MZ |
605 | |
606 | attr.sample_period = compute_period(vcpu, select_idx, counter); | |
7f766358 | 607 | |
bead0220 | 608 | event = perf_event_create_kernel_counter(&attr, -1, current, |
b02386eb | 609 | kvm_pmu_perf_overflow, pmc); |
80f393a2 | 610 | |
7f766358 SZ |
611 | if (IS_ERR(event)) { |
612 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
613 | PTR_ERR(event)); | |
614 | return; | |
615 | } | |
616 | ||
617 | pmc->perf_event = event; | |
618 | } | |
808e7381 | 619 | |
30d97754 AM |
620 | /** |
621 | * kvm_pmu_set_counter_event_type - set selected counter to monitor some event | |
622 | * @vcpu: The vcpu pointer | |
623 | * @data: The data guest writes to PMXEVTYPER_EL0 | |
624 | * @select_idx: The number of selected counter | |
625 | * | |
626 | * When OS accesses PMXEVTYPER_EL0, that means it wants to set a PMC to count an | |
627 | * event with given hardware event number. Here we call perf_event API to | |
628 | * emulate this action and create a kernel perf event for it. | |
629 | */ | |
630 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, | |
631 | u64 select_idx) | |
632 | { | |
fd65a3b5 MZ |
633 | u64 reg, mask; |
634 | ||
8f6379e2 AE |
635 | if (!kvm_vcpu_has_pmu(vcpu)) |
636 | return; | |
637 | ||
fd65a3b5 MZ |
638 | mask = ARMV8_PMU_EVTYPE_MASK; |
639 | mask &= ~ARMV8_PMU_EVTYPE_EVENT; | |
640 | mask |= kvm_pmu_event_mask(vcpu->kvm); | |
30d97754 | 641 | |
0cb9c3c8 | 642 | reg = counter_index_to_evtreg(select_idx); |
30d97754 | 643 | |
fd65a3b5 | 644 | __vcpu_sys_reg(vcpu, reg) = data & mask; |
80f393a2 | 645 | |
30d97754 AM |
646 | kvm_pmu_create_perf_event(vcpu, select_idx); |
647 | } | |
648 | ||
e840f42a MZ |
649 | void kvm_host_pmu_init(struct arm_pmu *pmu) |
650 | { | |
db858060 AE |
651 | struct arm_pmu_entry *entry; |
652 | ||
fcf37b38 | 653 | if (pmu->pmuver == 0 || pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
db858060 AE |
654 | return; |
655 | ||
656 | mutex_lock(&arm_pmus_lock); | |
657 | ||
658 | entry = kmalloc(sizeof(*entry), GFP_KERNEL); | |
659 | if (!entry) | |
660 | goto out_unlock; | |
661 | ||
662 | entry->arm_pmu = pmu; | |
663 | list_add_tail(&entry->entry, &arm_pmus); | |
664 | ||
665 | if (list_is_singular(&arm_pmus)) | |
e840f42a | 666 | static_branch_enable(&kvm_arm_pmu_available); |
db858060 AE |
667 | |
668 | out_unlock: | |
669 | mutex_unlock(&arm_pmus_lock); | |
e840f42a MZ |
670 | } |
671 | ||
46b18782 | 672 | static struct arm_pmu *kvm_pmu_probe_armpmu(void) |
fd65a3b5 MZ |
673 | { |
674 | struct perf_event_attr attr = { }; | |
675 | struct perf_event *event; | |
46b18782 | 676 | struct arm_pmu *pmu = NULL; |
fd65a3b5 MZ |
677 | |
678 | /* | |
679 | * Create a dummy event that only counts user cycles. As we'll never | |
680 | * leave this function with the event being live, it will never | |
681 | * count anything. But it allows us to probe some of the PMU | |
682 | * details. Yes, this is terrible. | |
683 | */ | |
684 | attr.type = PERF_TYPE_RAW; | |
685 | attr.size = sizeof(attr); | |
686 | attr.pinned = 1; | |
687 | attr.disabled = 0; | |
688 | attr.exclude_user = 0; | |
689 | attr.exclude_kernel = 1; | |
690 | attr.exclude_hv = 1; | |
691 | attr.exclude_host = 1; | |
692 | attr.config = ARMV8_PMUV3_PERFCTR_CPU_CYCLES; | |
693 | attr.sample_period = GENMASK(63, 0); | |
694 | ||
695 | event = perf_event_create_kernel_counter(&attr, -1, current, | |
696 | kvm_pmu_perf_overflow, &attr); | |
697 | ||
698 | if (IS_ERR(event)) { | |
699 | pr_err_once("kvm: pmu event creation failed %ld\n", | |
700 | PTR_ERR(event)); | |
46b18782 | 701 | return NULL; |
fd65a3b5 MZ |
702 | } |
703 | ||
704 | if (event->pmu) { | |
705 | pmu = to_arm_pmu(event->pmu); | |
46b18782 | 706 | if (pmu->pmuver == 0 || |
fcf37b38 | 707 | pmu->pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) |
46b18782 | 708 | pmu = NULL; |
fd65a3b5 MZ |
709 | } |
710 | ||
711 | perf_event_disable(event); | |
712 | perf_event_release_kernel(event); | |
713 | ||
46b18782 | 714 | return pmu; |
fd65a3b5 MZ |
715 | } |
716 | ||
88865bec MZ |
717 | u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) |
718 | { | |
719 | unsigned long *bmap = vcpu->kvm->arch.pmu_filter; | |
720 | u64 val, mask = 0; | |
9529aaa0 | 721 | int base, i, nr_events; |
88865bec | 722 | |
8f6379e2 AE |
723 | if (!kvm_vcpu_has_pmu(vcpu)) |
724 | return 0; | |
725 | ||
88865bec MZ |
726 | if (!pmceid1) { |
727 | val = read_sysreg(pmceid0_el0); | |
acdd8a4e MZ |
728 | /* always support CHAIN */ |
729 | val |= BIT(ARMV8_PMUV3_PERFCTR_CHAIN); | |
88865bec MZ |
730 | base = 0; |
731 | } else { | |
732 | val = read_sysreg(pmceid1_el0); | |
46081078 MZ |
733 | /* |
734 | * Don't advertise STALL_SLOT, as PMMIR_EL0 is handled | |
735 | * as RAZ | |
736 | */ | |
121a8fc0 | 737 | if (vcpu->kvm->arch.arm_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4) |
46081078 | 738 | val &= ~BIT_ULL(ARMV8_PMUV3_PERFCTR_STALL_SLOT - 32); |
88865bec MZ |
739 | base = 32; |
740 | } | |
741 | ||
742 | if (!bmap) | |
743 | return val; | |
744 | ||
9529aaa0 MZ |
745 | nr_events = kvm_pmu_event_mask(vcpu->kvm) + 1; |
746 | ||
88865bec MZ |
747 | for (i = 0; i < 32; i += 8) { |
748 | u64 byte; | |
749 | ||
750 | byte = bitmap_get_value8(bmap, base + i); | |
751 | mask |= byte << i; | |
9529aaa0 MZ |
752 | if (nr_events >= (0x4000 + base + 32)) { |
753 | byte = bitmap_get_value8(bmap, 0x4000 + base + i); | |
754 | mask |= byte << (32 + i); | |
755 | } | |
88865bec MZ |
756 | } |
757 | ||
758 | return val & mask; | |
759 | } | |
760 | ||
a2befacf | 761 | int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) |
bb0c70bc | 762 | { |
9bbfa4b5 | 763 | if (!kvm_vcpu_has_pmu(vcpu)) |
a2befacf | 764 | return 0; |
bb0c70bc | 765 | |
9bbfa4b5 AE |
766 | if (!vcpu->arch.pmu.created) |
767 | return -EINVAL; | |
768 | ||
6fe407f2 | 769 | /* |
a2befacf CD |
770 | * A valid interrupt configuration for the PMU is either to have a |
771 | * properly configured interrupt number and using an in-kernel | |
ebb127f2 | 772 | * irqchip, or to not have an in-kernel GIC and not set an IRQ. |
6fe407f2 | 773 | */ |
ebb127f2 CD |
774 | if (irqchip_in_kernel(vcpu->kvm)) { |
775 | int irq = vcpu->arch.pmu.irq_num; | |
ebb127f2 CD |
776 | /* |
777 | * If we are using an in-kernel vgic, at this point we know | |
778 | * the vgic will be initialized, so we can check the PMU irq | |
779 | * number against the dimensions of the vgic and make sure | |
780 | * it's valid. | |
781 | */ | |
782 | if (!irq_is_ppi(irq) && !vgic_valid_spi(vcpu->kvm, irq)) | |
783 | return -EINVAL; | |
784 | } else if (kvm_arm_pmu_irq_initialized(vcpu)) { | |
785 | return -EINVAL; | |
786 | } | |
a2befacf | 787 | |
d0c94c49 MZ |
788 | /* One-off reload of the PMU on first run */ |
789 | kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu); | |
790 | ||
a2befacf CD |
791 | return 0; |
792 | } | |
793 | ||
794 | static int kvm_arm_pmu_v3_init(struct kvm_vcpu *vcpu) | |
795 | { | |
a2befacf | 796 | if (irqchip_in_kernel(vcpu->kvm)) { |
abcb851d CD |
797 | int ret; |
798 | ||
a2befacf CD |
799 | /* |
800 | * If using the PMU with an in-kernel virtual GIC | |
801 | * implementation, we require the GIC to be already | |
802 | * initialized when initializing the PMU. | |
803 | */ | |
804 | if (!vgic_initialized(vcpu->kvm)) | |
805 | return -ENODEV; | |
806 | ||
807 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
808 | return -ENXIO; | |
abcb851d CD |
809 | |
810 | ret = kvm_vgic_set_owner(vcpu, vcpu->arch.pmu.irq_num, | |
811 | &vcpu->arch.pmu); | |
812 | if (ret) | |
813 | return ret; | |
a2befacf | 814 | } |
bb0c70bc | 815 | |
95e92e45 JT |
816 | init_irq_work(&vcpu->arch.pmu.overflow_work, |
817 | kvm_pmu_perf_overflow_notify_vcpu); | |
818 | ||
a2befacf | 819 | vcpu->arch.pmu.created = true; |
bb0c70bc SZ |
820 | return 0; |
821 | } | |
822 | ||
2defaff4 AP |
823 | /* |
824 | * For one VM the interrupt type must be same for each vcpu. | |
825 | * As a PPI, the interrupt number is the same for all vcpus, | |
826 | * while as an SPI it must be a separate number per vcpu. | |
827 | */ | |
828 | static bool pmu_irq_is_valid(struct kvm *kvm, int irq) | |
bb0c70bc | 829 | { |
46808a4c | 830 | unsigned long i; |
bb0c70bc SZ |
831 | struct kvm_vcpu *vcpu; |
832 | ||
833 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
834 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
835 | continue; | |
836 | ||
2defaff4 | 837 | if (irq_is_ppi(irq)) { |
bb0c70bc SZ |
838 | if (vcpu->arch.pmu.irq_num != irq) |
839 | return false; | |
840 | } else { | |
841 | if (vcpu->arch.pmu.irq_num == irq) | |
842 | return false; | |
843 | } | |
844 | } | |
845 | ||
846 | return true; | |
847 | } | |
848 | ||
6ee7fca2 AE |
849 | static int kvm_arm_pmu_v3_set_pmu(struct kvm_vcpu *vcpu, int pmu_id) |
850 | { | |
851 | struct kvm *kvm = vcpu->kvm; | |
852 | struct arm_pmu_entry *entry; | |
853 | struct arm_pmu *arm_pmu; | |
854 | int ret = -ENXIO; | |
855 | ||
856 | mutex_lock(&kvm->lock); | |
857 | mutex_lock(&arm_pmus_lock); | |
858 | ||
859 | list_for_each_entry(entry, &arm_pmus, entry) { | |
860 | arm_pmu = entry->arm_pmu; | |
861 | if (arm_pmu->pmu.type == pmu_id) { | |
06394531 | 862 | if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags) || |
6ee7fca2 AE |
863 | (kvm->arch.pmu_filter && kvm->arch.arm_pmu != arm_pmu)) { |
864 | ret = -EBUSY; | |
865 | break; | |
866 | } | |
867 | ||
868 | kvm->arch.arm_pmu = arm_pmu; | |
583cda1b | 869 | cpumask_copy(kvm->arch.supported_cpus, &arm_pmu->supported_cpus); |
6ee7fca2 AE |
870 | ret = 0; |
871 | break; | |
872 | } | |
873 | } | |
874 | ||
875 | mutex_unlock(&arm_pmus_lock); | |
876 | mutex_unlock(&kvm->lock); | |
877 | return ret; | |
878 | } | |
879 | ||
bb0c70bc SZ |
880 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) |
881 | { | |
5177fe91 MZ |
882 | struct kvm *kvm = vcpu->kvm; |
883 | ||
77da4303 | 884 | if (!kvm_vcpu_has_pmu(vcpu)) |
42223fb1 MZ |
885 | return -ENODEV; |
886 | ||
887 | if (vcpu->arch.pmu.created) | |
888 | return -EBUSY; | |
889 | ||
46b18782 MZ |
890 | mutex_lock(&kvm->lock); |
891 | if (!kvm->arch.arm_pmu) { | |
892 | /* No PMU set, get the default one */ | |
893 | kvm->arch.arm_pmu = kvm_pmu_probe_armpmu(); | |
894 | if (!kvm->arch.arm_pmu) { | |
895 | mutex_unlock(&kvm->lock); | |
896 | return -ENODEV; | |
897 | } | |
898 | } | |
899 | mutex_unlock(&kvm->lock); | |
fd65a3b5 | 900 | |
bb0c70bc SZ |
901 | switch (attr->attr) { |
902 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
903 | int __user *uaddr = (int __user *)(long)attr->addr; | |
904 | int irq; | |
905 | ||
5177fe91 | 906 | if (!irqchip_in_kernel(kvm)) |
a2befacf CD |
907 | return -EINVAL; |
908 | ||
bb0c70bc SZ |
909 | if (get_user(irq, uaddr)) |
910 | return -EFAULT; | |
911 | ||
2defaff4 | 912 | /* The PMU overflow interrupt can be a PPI or a valid SPI. */ |
ebb127f2 | 913 | if (!(irq_is_ppi(irq) || irq_is_spi(irq))) |
2defaff4 AP |
914 | return -EINVAL; |
915 | ||
5177fe91 | 916 | if (!pmu_irq_is_valid(kvm, irq)) |
bb0c70bc SZ |
917 | return -EINVAL; |
918 | ||
919 | if (kvm_arm_pmu_irq_initialized(vcpu)) | |
920 | return -EBUSY; | |
921 | ||
922 | kvm_debug("Set kvm ARM PMU irq: %d\n", irq); | |
923 | vcpu->arch.pmu.irq_num = irq; | |
924 | return 0; | |
925 | } | |
d7eec236 MZ |
926 | case KVM_ARM_VCPU_PMU_V3_FILTER: { |
927 | struct kvm_pmu_event_filter __user *uaddr; | |
928 | struct kvm_pmu_event_filter filter; | |
929 | int nr_events; | |
930 | ||
5177fe91 | 931 | nr_events = kvm_pmu_event_mask(kvm) + 1; |
d7eec236 MZ |
932 | |
933 | uaddr = (struct kvm_pmu_event_filter __user *)(long)attr->addr; | |
934 | ||
935 | if (copy_from_user(&filter, uaddr, sizeof(filter))) | |
936 | return -EFAULT; | |
937 | ||
938 | if (((u32)filter.base_event + filter.nevents) > nr_events || | |
939 | (filter.action != KVM_PMU_EVENT_ALLOW && | |
940 | filter.action != KVM_PMU_EVENT_DENY)) | |
941 | return -EINVAL; | |
942 | ||
5177fe91 MZ |
943 | mutex_lock(&kvm->lock); |
944 | ||
06394531 | 945 | if (test_bit(KVM_ARCH_FLAG_HAS_RAN_ONCE, &kvm->arch.flags)) { |
5177fe91 MZ |
946 | mutex_unlock(&kvm->lock); |
947 | return -EBUSY; | |
948 | } | |
d7eec236 | 949 | |
5177fe91 MZ |
950 | if (!kvm->arch.pmu_filter) { |
951 | kvm->arch.pmu_filter = bitmap_alloc(nr_events, GFP_KERNEL_ACCOUNT); | |
952 | if (!kvm->arch.pmu_filter) { | |
953 | mutex_unlock(&kvm->lock); | |
d7eec236 MZ |
954 | return -ENOMEM; |
955 | } | |
956 | ||
957 | /* | |
958 | * The default depends on the first applied filter. | |
959 | * If it allows events, the default is to deny. | |
960 | * Conversely, if the first filter denies a set of | |
961 | * events, the default is to allow. | |
962 | */ | |
963 | if (filter.action == KVM_PMU_EVENT_ALLOW) | |
5177fe91 | 964 | bitmap_zero(kvm->arch.pmu_filter, nr_events); |
d7eec236 | 965 | else |
5177fe91 | 966 | bitmap_fill(kvm->arch.pmu_filter, nr_events); |
d7eec236 MZ |
967 | } |
968 | ||
969 | if (filter.action == KVM_PMU_EVENT_ALLOW) | |
5177fe91 | 970 | bitmap_set(kvm->arch.pmu_filter, filter.base_event, filter.nevents); |
d7eec236 | 971 | else |
5177fe91 | 972 | bitmap_clear(kvm->arch.pmu_filter, filter.base_event, filter.nevents); |
d7eec236 | 973 | |
5177fe91 | 974 | mutex_unlock(&kvm->lock); |
d7eec236 MZ |
975 | |
976 | return 0; | |
977 | } | |
6ee7fca2 AE |
978 | case KVM_ARM_VCPU_PMU_V3_SET_PMU: { |
979 | int __user *uaddr = (int __user *)(long)attr->addr; | |
980 | int pmu_id; | |
981 | ||
982 | if (get_user(pmu_id, uaddr)) | |
983 | return -EFAULT; | |
984 | ||
985 | return kvm_arm_pmu_v3_set_pmu(vcpu, pmu_id); | |
986 | } | |
bb0c70bc SZ |
987 | case KVM_ARM_VCPU_PMU_V3_INIT: |
988 | return kvm_arm_pmu_v3_init(vcpu); | |
989 | } | |
990 | ||
991 | return -ENXIO; | |
992 | } | |
993 | ||
994 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
995 | { | |
996 | switch (attr->attr) { | |
997 | case KVM_ARM_VCPU_PMU_V3_IRQ: { | |
998 | int __user *uaddr = (int __user *)(long)attr->addr; | |
999 | int irq; | |
1000 | ||
a2befacf CD |
1001 | if (!irqchip_in_kernel(vcpu->kvm)) |
1002 | return -EINVAL; | |
1003 | ||
14bda7a9 | 1004 | if (!kvm_vcpu_has_pmu(vcpu)) |
bb0c70bc SZ |
1005 | return -ENODEV; |
1006 | ||
1007 | if (!kvm_arm_pmu_irq_initialized(vcpu)) | |
1008 | return -ENXIO; | |
1009 | ||
1010 | irq = vcpu->arch.pmu.irq_num; | |
1011 | return put_user(irq, uaddr); | |
1012 | } | |
1013 | } | |
1014 | ||
1015 | return -ENXIO; | |
1016 | } | |
1017 | ||
1018 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr) | |
1019 | { | |
1020 | switch (attr->attr) { | |
1021 | case KVM_ARM_VCPU_PMU_V3_IRQ: | |
1022 | case KVM_ARM_VCPU_PMU_V3_INIT: | |
d7eec236 | 1023 | case KVM_ARM_VCPU_PMU_V3_FILTER: |
6ee7fca2 | 1024 | case KVM_ARM_VCPU_PMU_V3_SET_PMU: |
77da4303 | 1025 | if (kvm_vcpu_has_pmu(vcpu)) |
bb0c70bc SZ |
1026 | return 0; |
1027 | } | |
1028 | ||
1029 | return -ENXIO; | |
1030 | } |