Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
04fe4726 SZ |
2 | /* |
3 | * Copyright (C) 2015 Linaro Ltd. | |
4 | * Author: Shannon Zhao <shannon.zhao@linaro.org> | |
04fe4726 SZ |
5 | */ |
6 | ||
7 | #ifndef __ASM_ARM_KVM_PMU_H | |
8 | #define __ASM_ARM_KVM_PMU_H | |
9 | ||
04fe4726 | 10 | #include <linux/perf_event.h> |
7755cec6 | 11 | #include <linux/perf/arm_pmuv3.h> |
04fe4726 | 12 | |
051ff581 SZ |
13 | #define ARMV8_PMU_CYCLE_IDX (ARMV8_PMU_MAX_COUNTERS - 1) |
14 | ||
8cbebc41 | 15 | #ifdef CONFIG_HW_PERF_EVENTS |
0efce9da | 16 | |
04fe4726 SZ |
17 | struct kvm_pmc { |
18 | u8 idx; /* index into the pmu->pmc array */ | |
19 | struct perf_event *perf_event; | |
04fe4726 SZ |
20 | }; |
21 | ||
84d751a0 FT |
22 | struct kvm_pmu_events { |
23 | u32 events_host; | |
24 | u32 events_guest; | |
25 | }; | |
26 | ||
04fe4726 | 27 | struct kvm_pmu { |
e987a4c6 | 28 | struct irq_work overflow_work; |
84d751a0 | 29 | struct kvm_pmu_events events; |
04fe4726 | 30 | struct kvm_pmc pmc[ARMV8_PMU_MAX_COUNTERS]; |
e987a4c6 | 31 | int irq_num; |
a2befacf | 32 | bool created; |
b02386eb | 33 | bool irq_level; |
04fe4726 | 34 | }; |
ab946834 | 35 | |
db858060 AE |
36 | struct arm_pmu_entry { |
37 | struct list_head entry; | |
38 | struct arm_pmu *arm_pmu; | |
39 | }; | |
40 | ||
be399d82 SC |
41 | DECLARE_STATIC_KEY_FALSE(kvm_arm_pmu_available); |
42 | ||
43 | static __always_inline bool kvm_arm_support_pmu_v3(void) | |
44 | { | |
45 | return static_branch_likely(&kvm_arm_pmu_available); | |
46 | } | |
47 | ||
bb0c70bc | 48 | #define kvm_arm_pmu_irq_initialized(v) ((v)->arch.pmu.irq_num >= VGIC_NR_SGIS) |
051ff581 SZ |
49 | u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, u64 select_idx); |
50 | void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, u64 select_idx, u64 val); | |
96b0eebc | 51 | u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu); |
88865bec | 52 | u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1); |
bca031e2 | 53 | void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu); |
2aa36e98 | 54 | void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu); |
5f0a714a | 55 | void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu); |
418e5ca8 AM |
56 | void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val); |
57 | void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val); | |
b02386eb SZ |
58 | void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu); |
59 | void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu); | |
3dbbdf78 CD |
60 | bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu); |
61 | void kvm_pmu_update_run(struct kvm_vcpu *vcpu); | |
7a0adc70 | 62 | void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val); |
76993739 | 63 | void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val); |
7f766358 SZ |
64 | void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data, |
65 | u64 select_idx); | |
bb0c70bc SZ |
66 | int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, |
67 | struct kvm_device_attr *attr); | |
68 | int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, | |
69 | struct kvm_device_attr *attr); | |
70 | int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, | |
71 | struct kvm_device_attr *attr); | |
a2befacf | 72 | int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu); |
20492a62 MZ |
73 | |
74 | struct kvm_pmu_events *kvm_get_pmu_events(void); | |
75 | void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu); | |
76 | void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu); | |
77 | ||
78 | #define kvm_vcpu_has_pmu(vcpu) \ | |
79 | (test_bit(KVM_ARM_VCPU_PMU_V3, (vcpu)->arch.features)) | |
80 | ||
81 | /* | |
82 | * Updates the vcpu's view of the pmu events for this cpu. | |
83 | * Must be called before every vcpu run after disabling interrupts, to ensure | |
84 | * that an interrupt cannot fire and update the structure. | |
85 | */ | |
86 | #define kvm_pmu_update_vcpu_events(vcpu) \ | |
87 | do { \ | |
88 | if (!has_vhe() && kvm_vcpu_has_pmu(vcpu)) \ | |
89 | vcpu->arch.pmu.events = *kvm_get_pmu_events(); \ | |
90 | } while (0) | |
91 | ||
11af4c37 MZ |
92 | /* |
93 | * Evaluates as true when emulating PMUv3p5, and false otherwise. | |
94 | */ | |
95 | #define kvm_pmu_is_3p5(vcpu) \ | |
96 | (vcpu->kvm->arch.dfr0_pmuver.imp >= ID_AA64DFR0_EL1_PMUVer_V3P5) | |
97 | ||
3d0dba57 MZ |
98 | u8 kvm_arm_pmu_get_pmuver_limit(void); |
99 | ||
04fe4726 SZ |
100 | #else |
101 | struct kvm_pmu { | |
102 | }; | |
ab946834 | 103 | |
be399d82 SC |
104 | static inline bool kvm_arm_support_pmu_v3(void) |
105 | { | |
106 | return false; | |
107 | } | |
108 | ||
bb0c70bc | 109 | #define kvm_arm_pmu_irq_initialized(v) (false) |
051ff581 SZ |
110 | static inline u64 kvm_pmu_get_counter_value(struct kvm_vcpu *vcpu, |
111 | u64 select_idx) | |
112 | { | |
113 | return 0; | |
114 | } | |
115 | static inline void kvm_pmu_set_counter_value(struct kvm_vcpu *vcpu, | |
116 | u64 select_idx, u64 val) {} | |
96b0eebc SZ |
117 | static inline u64 kvm_pmu_valid_counter_mask(struct kvm_vcpu *vcpu) |
118 | { | |
119 | return 0; | |
120 | } | |
bca031e2 | 121 | static inline void kvm_pmu_vcpu_init(struct kvm_vcpu *vcpu) {} |
2aa36e98 | 122 | static inline void kvm_pmu_vcpu_reset(struct kvm_vcpu *vcpu) {} |
5f0a714a | 123 | static inline void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu) {} |
418e5ca8 AM |
124 | static inline void kvm_pmu_disable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} |
125 | static inline void kvm_pmu_enable_counter_mask(struct kvm_vcpu *vcpu, u64 val) {} | |
b02386eb SZ |
126 | static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {} |
127 | static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {} | |
3dbbdf78 CD |
128 | static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu) |
129 | { | |
130 | return false; | |
131 | } | |
132 | static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {} | |
7a0adc70 | 133 | static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {} |
76993739 | 134 | static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {} |
7f766358 SZ |
135 | static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, |
136 | u64 data, u64 select_idx) {} | |
bb0c70bc SZ |
137 | static inline int kvm_arm_pmu_v3_set_attr(struct kvm_vcpu *vcpu, |
138 | struct kvm_device_attr *attr) | |
139 | { | |
140 | return -ENXIO; | |
141 | } | |
142 | static inline int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu, | |
143 | struct kvm_device_attr *attr) | |
144 | { | |
145 | return -ENXIO; | |
146 | } | |
147 | static inline int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu, | |
148 | struct kvm_device_attr *attr) | |
149 | { | |
150 | return -ENXIO; | |
151 | } | |
a2befacf CD |
152 | static inline int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu) |
153 | { | |
154 | return 0; | |
155 | } | |
88865bec MZ |
156 | static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1) |
157 | { | |
158 | return 0; | |
159 | } | |
5421db1b | 160 | |
20492a62 | 161 | #define kvm_vcpu_has_pmu(vcpu) ({ false; }) |
11af4c37 | 162 | #define kvm_pmu_is_3p5(vcpu) ({ false; }) |
20492a62 MZ |
163 | static inline void kvm_pmu_update_vcpu_events(struct kvm_vcpu *vcpu) {} |
164 | static inline void kvm_vcpu_pmu_restore_guest(struct kvm_vcpu *vcpu) {} | |
165 | static inline void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu) {} | |
3d0dba57 MZ |
166 | static inline u8 kvm_arm_pmu_get_pmuver_limit(void) |
167 | { | |
168 | return 0; | |
169 | } | |
20492a62 | 170 | |
04fe4726 SZ |
171 | #endif |
172 | ||
173 | #endif |