Commit | Line | Data |
---|---|---|
3de6be7a RM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // CCI Cache Coherent Interconnect PMU driver | |
3 | // Copyright (C) 2013-2018 Arm Ltd. | |
4 | // Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com> | |
5 | ||
6 | #include <linux/arm-cci.h> | |
7 | #include <linux/io.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/module.h> | |
918dc87b | 10 | #include <linux/of.h> |
3de6be7a RM |
11 | #include <linux/perf_event.h> |
12 | #include <linux/platform_device.h> | |
13 | #include <linux/slab.h> | |
14 | #include <linux/spinlock.h> | |
15 | ||
3de6be7a RM |
16 | #define DRIVER_NAME "ARM-CCI PMU" |
17 | ||
18 | #define CCI_PMCR 0x0100 | |
19 | #define CCI_PID2 0x0fe8 | |
20 | ||
21 | #define CCI_PMCR_CEN 0x00000001 | |
22 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | |
23 | #define CCI_PMCR_NCNT_SHIFT 11 | |
24 | ||
25 | #define CCI_PID2_REV_MASK 0xf0 | |
26 | #define CCI_PID2_REV_SHIFT 4 | |
27 | ||
28 | #define CCI_PMU_EVT_SEL 0x000 | |
29 | #define CCI_PMU_CNTR 0x004 | |
30 | #define CCI_PMU_CNTR_CTRL 0x008 | |
31 | #define CCI_PMU_OVRFLW 0x00c | |
32 | ||
33 | #define CCI_PMU_OVRFLW_FLAG 1 | |
34 | ||
35 | #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) | |
36 | #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) | |
eb2b22f0 | 37 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) - 1) |
3de6be7a RM |
38 | #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) |
39 | ||
40 | #define CCI_PMU_MAX_HW_CNTRS(model) \ | |
41 | ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) | |
42 | ||
43 | /* Types of interfaces that can generate events */ | |
44 | enum { | |
45 | CCI_IF_SLAVE, | |
46 | CCI_IF_MASTER, | |
47 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
48 | CCI_IF_GLOBAL, | |
49 | #endif | |
50 | CCI_IF_MAX, | |
51 | }; | |
52 | ||
1201a5a2 KC |
53 | #define NUM_HW_CNTRS_CII_4XX 4 |
54 | #define NUM_HW_CNTRS_CII_5XX 8 | |
55 | #define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX | |
56 | ||
57 | #define FIXED_HW_CNTRS_CII_4XX 1 | |
58 | #define FIXED_HW_CNTRS_CII_5XX 0 | |
59 | #define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX | |
60 | ||
61 | #define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX) | |
62 | ||
3de6be7a RM |
63 | struct event_range { |
64 | u32 min; | |
65 | u32 max; | |
66 | }; | |
67 | ||
68 | struct cci_pmu_hw_events { | |
69 | struct perf_event **events; | |
70 | unsigned long *used_mask; | |
71 | raw_spinlock_t pmu_lock; | |
72 | }; | |
73 | ||
74 | struct cci_pmu; | |
75 | /* | |
76 | * struct cci_pmu_model: | |
77 | * @fixed_hw_cntrs - Number of fixed event counters | |
78 | * @num_hw_cntrs - Maximum number of programmable event counters | |
79 | * @cntr_size - Size of an event counter mapping | |
80 | */ | |
81 | struct cci_pmu_model { | |
82 | char *name; | |
83 | u32 fixed_hw_cntrs; | |
84 | u32 num_hw_cntrs; | |
85 | u32 cntr_size; | |
86 | struct attribute **format_attrs; | |
87 | struct attribute **event_attrs; | |
88 | struct event_range event_ranges[CCI_IF_MAX]; | |
89 | int (*validate_hw_event)(struct cci_pmu *, unsigned long); | |
90 | int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); | |
91 | void (*write_counters)(struct cci_pmu *, unsigned long *); | |
92 | }; | |
93 | ||
94 | static struct cci_pmu_model cci_pmu_models[]; | |
95 | ||
96 | struct cci_pmu { | |
97 | void __iomem *base; | |
e9c112c9 | 98 | void __iomem *ctrl_base; |
3de6be7a | 99 | struct pmu pmu; |
03057f26 | 100 | int cpu; |
3de6be7a RM |
101 | int nr_irqs; |
102 | int *irqs; | |
103 | unsigned long active_irqs; | |
104 | const struct cci_pmu_model *model; | |
105 | struct cci_pmu_hw_events hw_events; | |
106 | struct platform_device *plat_device; | |
107 | int num_cntrs; | |
108 | atomic_t active_events; | |
109 | struct mutex reserve_mutex; | |
3de6be7a RM |
110 | }; |
111 | ||
112 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | |
113 | ||
03057f26 RM |
114 | static struct cci_pmu *g_cci_pmu; |
115 | ||
3de6be7a RM |
116 | enum cci_models { |
117 | #ifdef CONFIG_ARM_CCI400_PMU | |
118 | CCI400_R0, | |
119 | CCI400_R1, | |
120 | #endif | |
121 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
122 | CCI500_R0, | |
123 | CCI550_R0, | |
124 | #endif | |
125 | CCI_MODEL_MAX | |
126 | }; | |
127 | ||
128 | static void pmu_write_counters(struct cci_pmu *cci_pmu, | |
129 | unsigned long *mask); | |
984e9cf1 | 130 | static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev, |
3de6be7a | 131 | struct device_attribute *attr, char *buf); |
984e9cf1 | 132 | static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev, |
3de6be7a RM |
133 | struct device_attribute *attr, char *buf); |
134 | ||
135 | #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ | |
136 | &((struct dev_ext_attribute[]) { \ | |
137 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ | |
138 | })[0].attr.attr | |
139 | ||
140 | #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ | |
141 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) | |
142 | #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
143 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) | |
144 | ||
145 | /* CCI400 PMU Specific definitions */ | |
146 | ||
147 | #ifdef CONFIG_ARM_CCI400_PMU | |
148 | ||
149 | /* Port ids */ | |
150 | #define CCI400_PORT_S0 0 | |
151 | #define CCI400_PORT_S1 1 | |
152 | #define CCI400_PORT_S2 2 | |
153 | #define CCI400_PORT_S3 3 | |
154 | #define CCI400_PORT_S4 4 | |
155 | #define CCI400_PORT_M0 5 | |
156 | #define CCI400_PORT_M1 6 | |
157 | #define CCI400_PORT_M2 7 | |
158 | ||
159 | #define CCI400_R1_PX 5 | |
160 | ||
161 | /* | |
162 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | |
163 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | |
164 | * make use of this event in hardware. | |
165 | */ | |
166 | enum cci400_perf_events { | |
167 | CCI400_PMU_CYCLES = 0xff | |
168 | }; | |
169 | ||
170 | #define CCI400_PMU_CYCLE_CNTR_IDX 0 | |
171 | #define CCI400_PMU_CNTR0_IDX 1 | |
172 | ||
173 | /* | |
174 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | |
175 | * ports and bits 4:0 are event codes. There are different event codes | |
176 | * associated with each port type. | |
177 | * | |
178 | * Additionally, the range of events associated with the port types changed | |
179 | * between Rev0 and Rev1. | |
180 | * | |
181 | * The constants below define the range of valid codes for each port type for | |
182 | * the different revisions and are used to validate the event to be monitored. | |
183 | */ | |
184 | ||
185 | #define CCI400_PMU_EVENT_MASK 0xffUL | |
186 | #define CCI400_PMU_EVENT_SOURCE_SHIFT 5 | |
187 | #define CCI400_PMU_EVENT_SOURCE_MASK 0x7 | |
188 | #define CCI400_PMU_EVENT_CODE_SHIFT 0 | |
189 | #define CCI400_PMU_EVENT_CODE_MASK 0x1f | |
190 | #define CCI400_PMU_EVENT_SOURCE(event) \ | |
191 | ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ | |
192 | CCI400_PMU_EVENT_SOURCE_MASK) | |
193 | #define CCI400_PMU_EVENT_CODE(event) \ | |
194 | ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) | |
195 | ||
196 | #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 | |
197 | #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 | |
198 | #define CCI400_R0_MASTER_PORT_MIN_EV 0x14 | |
199 | #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a | |
200 | ||
201 | #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 | |
202 | #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 | |
203 | #define CCI400_R1_MASTER_PORT_MIN_EV 0x00 | |
204 | #define CCI400_R1_MASTER_PORT_MAX_EV 0x11 | |
205 | ||
206 | #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
207 | CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ | |
208 | (unsigned long)_config) | |
209 | ||
210 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
211 | struct device_attribute *attr, char *buf); | |
212 | ||
213 | static struct attribute *cci400_pmu_format_attrs[] = { | |
214 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
215 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), | |
216 | NULL | |
217 | }; | |
218 | ||
219 | static struct attribute *cci400_r0_pmu_event_attrs[] = { | |
220 | /* Slave events */ | |
221 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
222 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
223 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
224 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
225 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
226 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
227 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
228 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
229 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
230 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
231 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
232 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
233 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
234 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
235 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
236 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
237 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
238 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
239 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
240 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
241 | /* Master events */ | |
242 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), | |
243 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), | |
244 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), | |
245 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), | |
246 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), | |
247 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), | |
248 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), | |
249 | /* Special event for cycles counter */ | |
250 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
251 | NULL | |
252 | }; | |
253 | ||
254 | static struct attribute *cci400_r1_pmu_event_attrs[] = { | |
255 | /* Slave events */ | |
256 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
257 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
258 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
259 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
260 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
261 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
262 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
263 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
264 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
265 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
266 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
267 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
268 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
269 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
270 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
271 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
272 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
273 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
274 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
275 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
276 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), | |
277 | /* Master events */ | |
278 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), | |
279 | CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), | |
280 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), | |
281 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), | |
282 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), | |
283 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), | |
284 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), | |
285 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), | |
286 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), | |
287 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), | |
288 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), | |
289 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), | |
290 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), | |
291 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), | |
292 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), | |
293 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), | |
294 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), | |
295 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), | |
296 | /* Special event for cycles counter */ | |
297 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
298 | NULL | |
299 | }; | |
300 | ||
301 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
302 | struct device_attribute *attr, char *buf) | |
303 | { | |
304 | struct dev_ext_attribute *eattr = container_of(attr, | |
305 | struct dev_ext_attribute, attr); | |
700a9cf0 | 306 | return sysfs_emit(buf, "config=0x%lx\n", (unsigned long)eattr->var); |
3de6be7a RM |
307 | } |
308 | ||
309 | static int cci400_get_event_idx(struct cci_pmu *cci_pmu, | |
310 | struct cci_pmu_hw_events *hw, | |
311 | unsigned long cci_event) | |
312 | { | |
313 | int idx; | |
314 | ||
315 | /* cycles event idx is fixed */ | |
316 | if (cci_event == CCI400_PMU_CYCLES) { | |
317 | if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | |
318 | return -EAGAIN; | |
319 | ||
320 | return CCI400_PMU_CYCLE_CNTR_IDX; | |
321 | } | |
322 | ||
323 | for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | |
324 | if (!test_and_set_bit(idx, hw->used_mask)) | |
325 | return idx; | |
326 | ||
327 | /* No counters available */ | |
328 | return -EAGAIN; | |
329 | } | |
330 | ||
331 | static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) | |
332 | { | |
333 | u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); | |
334 | u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); | |
335 | int if_type; | |
336 | ||
337 | if (hw_event & ~CCI400_PMU_EVENT_MASK) | |
338 | return -ENOENT; | |
339 | ||
340 | if (hw_event == CCI400_PMU_CYCLES) | |
341 | return hw_event; | |
342 | ||
343 | switch (ev_source) { | |
344 | case CCI400_PORT_S0: | |
345 | case CCI400_PORT_S1: | |
346 | case CCI400_PORT_S2: | |
347 | case CCI400_PORT_S3: | |
348 | case CCI400_PORT_S4: | |
349 | /* Slave Interface */ | |
350 | if_type = CCI_IF_SLAVE; | |
351 | break; | |
352 | case CCI400_PORT_M0: | |
353 | case CCI400_PORT_M1: | |
354 | case CCI400_PORT_M2: | |
355 | /* Master Interface */ | |
356 | if_type = CCI_IF_MASTER; | |
357 | break; | |
358 | default: | |
359 | return -ENOENT; | |
360 | } | |
361 | ||
362 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
363 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
364 | return hw_event; | |
365 | ||
366 | return -ENOENT; | |
367 | } | |
368 | ||
e9c112c9 | 369 | static int probe_cci400_revision(struct cci_pmu *cci_pmu) |
3de6be7a RM |
370 | { |
371 | int rev; | |
e9c112c9 | 372 | rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; |
3de6be7a RM |
373 | rev >>= CCI_PID2_REV_SHIFT; |
374 | ||
375 | if (rev < CCI400_R1_PX) | |
376 | return CCI400_R0; | |
377 | else | |
378 | return CCI400_R1; | |
379 | } | |
380 | ||
e9c112c9 | 381 | static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) |
3de6be7a RM |
382 | { |
383 | if (platform_has_secure_cci_access()) | |
e9c112c9 | 384 | return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; |
3de6be7a RM |
385 | return NULL; |
386 | } | |
387 | #else /* !CONFIG_ARM_CCI400_PMU */ | |
e9c112c9 | 388 | static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) |
3de6be7a RM |
389 | { |
390 | return NULL; | |
391 | } | |
392 | #endif /* CONFIG_ARM_CCI400_PMU */ | |
393 | ||
394 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
395 | ||
396 | /* | |
397 | * CCI5xx PMU event id is an 9-bit value made of two parts. | |
398 | * bits [8:5] - Source for the event | |
399 | * bits [4:0] - Event code (specific to type of interface) | |
400 | * | |
401 | * | |
402 | */ | |
403 | ||
404 | /* Port ids */ | |
405 | #define CCI5xx_PORT_S0 0x0 | |
406 | #define CCI5xx_PORT_S1 0x1 | |
407 | #define CCI5xx_PORT_S2 0x2 | |
408 | #define CCI5xx_PORT_S3 0x3 | |
409 | #define CCI5xx_PORT_S4 0x4 | |
410 | #define CCI5xx_PORT_S5 0x5 | |
411 | #define CCI5xx_PORT_S6 0x6 | |
412 | ||
413 | #define CCI5xx_PORT_M0 0x8 | |
414 | #define CCI5xx_PORT_M1 0x9 | |
415 | #define CCI5xx_PORT_M2 0xa | |
416 | #define CCI5xx_PORT_M3 0xb | |
417 | #define CCI5xx_PORT_M4 0xc | |
418 | #define CCI5xx_PORT_M5 0xd | |
419 | #define CCI5xx_PORT_M6 0xe | |
420 | ||
421 | #define CCI5xx_PORT_GLOBAL 0xf | |
422 | ||
423 | #define CCI5xx_PMU_EVENT_MASK 0x1ffUL | |
424 | #define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 | |
425 | #define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf | |
426 | #define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 | |
427 | #define CCI5xx_PMU_EVENT_CODE_MASK 0x1f | |
428 | ||
429 | #define CCI5xx_PMU_EVENT_SOURCE(event) \ | |
430 | ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) | |
431 | #define CCI5xx_PMU_EVENT_CODE(event) \ | |
432 | ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) | |
433 | ||
434 | #define CCI5xx_SLAVE_PORT_MIN_EV 0x00 | |
435 | #define CCI5xx_SLAVE_PORT_MAX_EV 0x1f | |
436 | #define CCI5xx_MASTER_PORT_MIN_EV 0x00 | |
437 | #define CCI5xx_MASTER_PORT_MAX_EV 0x06 | |
438 | #define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 | |
439 | #define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f | |
440 | ||
441 | ||
442 | #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
443 | CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ | |
444 | (unsigned long) _config) | |
445 | ||
446 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
447 | struct device_attribute *attr, char *buf); | |
448 | ||
449 | static struct attribute *cci5xx_pmu_format_attrs[] = { | |
450 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
451 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), | |
452 | NULL, | |
453 | }; | |
454 | ||
455 | static struct attribute *cci5xx_pmu_event_attrs[] = { | |
456 | /* Slave events */ | |
457 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), | |
458 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), | |
459 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), | |
460 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), | |
461 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), | |
462 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), | |
463 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), | |
464 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
465 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), | |
466 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), | |
467 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), | |
468 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), | |
469 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), | |
470 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), | |
471 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), | |
472 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), | |
473 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), | |
474 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), | |
475 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), | |
476 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), | |
477 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), | |
478 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), | |
479 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), | |
480 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), | |
481 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), | |
482 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), | |
483 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), | |
484 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), | |
485 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), | |
486 | CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), | |
487 | CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), | |
488 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), | |
489 | ||
490 | /* Master events */ | |
491 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), | |
492 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), | |
493 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), | |
494 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), | |
495 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), | |
496 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), | |
497 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), | |
498 | ||
499 | /* Global events */ | |
500 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), | |
501 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), | |
502 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), | |
503 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), | |
504 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), | |
505 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), | |
506 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), | |
507 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), | |
508 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), | |
509 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), | |
510 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), | |
511 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | |
512 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | |
513 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | |
514 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), | |
515 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | |
516 | NULL | |
517 | }; | |
518 | ||
519 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
520 | struct device_attribute *attr, char *buf) | |
521 | { | |
522 | struct dev_ext_attribute *eattr = container_of(attr, | |
523 | struct dev_ext_attribute, attr); | |
524 | /* Global events have single fixed source code */ | |
700a9cf0 ZT |
525 | return sysfs_emit(buf, "event=0x%lx,source=0x%x\n", |
526 | (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); | |
3de6be7a RM |
527 | } |
528 | ||
529 | /* | |
530 | * CCI500 provides 8 independent event counters that can count | |
531 | * any of the events available. | |
532 | * CCI500 PMU event source ids | |
533 | * 0x0-0x6 - Slave interfaces | |
534 | * 0x8-0xD - Master interfaces | |
535 | * 0xf - Global Events | |
536 | * 0x7,0xe - Reserved | |
537 | */ | |
538 | static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, | |
539 | unsigned long hw_event) | |
540 | { | |
541 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
542 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
543 | int if_type; | |
544 | ||
545 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
546 | return -ENOENT; | |
547 | ||
548 | switch (ev_source) { | |
549 | case CCI5xx_PORT_S0: | |
550 | case CCI5xx_PORT_S1: | |
551 | case CCI5xx_PORT_S2: | |
552 | case CCI5xx_PORT_S3: | |
553 | case CCI5xx_PORT_S4: | |
554 | case CCI5xx_PORT_S5: | |
555 | case CCI5xx_PORT_S6: | |
556 | if_type = CCI_IF_SLAVE; | |
557 | break; | |
558 | case CCI5xx_PORT_M0: | |
559 | case CCI5xx_PORT_M1: | |
560 | case CCI5xx_PORT_M2: | |
561 | case CCI5xx_PORT_M3: | |
562 | case CCI5xx_PORT_M4: | |
563 | case CCI5xx_PORT_M5: | |
564 | if_type = CCI_IF_MASTER; | |
565 | break; | |
566 | case CCI5xx_PORT_GLOBAL: | |
567 | if_type = CCI_IF_GLOBAL; | |
568 | break; | |
569 | default: | |
570 | return -ENOENT; | |
571 | } | |
572 | ||
573 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
574 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
575 | return hw_event; | |
576 | ||
577 | return -ENOENT; | |
578 | } | |
579 | ||
580 | /* | |
581 | * CCI550 provides 8 independent event counters that can count | |
582 | * any of the events available. | |
583 | * CCI550 PMU event source ids | |
584 | * 0x0-0x6 - Slave interfaces | |
585 | * 0x8-0xe - Master interfaces | |
586 | * 0xf - Global Events | |
587 | * 0x7 - Reserved | |
588 | */ | |
589 | static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, | |
590 | unsigned long hw_event) | |
591 | { | |
592 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
593 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
594 | int if_type; | |
595 | ||
596 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
597 | return -ENOENT; | |
598 | ||
599 | switch (ev_source) { | |
600 | case CCI5xx_PORT_S0: | |
601 | case CCI5xx_PORT_S1: | |
602 | case CCI5xx_PORT_S2: | |
603 | case CCI5xx_PORT_S3: | |
604 | case CCI5xx_PORT_S4: | |
605 | case CCI5xx_PORT_S5: | |
606 | case CCI5xx_PORT_S6: | |
607 | if_type = CCI_IF_SLAVE; | |
608 | break; | |
609 | case CCI5xx_PORT_M0: | |
610 | case CCI5xx_PORT_M1: | |
611 | case CCI5xx_PORT_M2: | |
612 | case CCI5xx_PORT_M3: | |
613 | case CCI5xx_PORT_M4: | |
614 | case CCI5xx_PORT_M5: | |
615 | case CCI5xx_PORT_M6: | |
616 | if_type = CCI_IF_MASTER; | |
617 | break; | |
618 | case CCI5xx_PORT_GLOBAL: | |
619 | if_type = CCI_IF_GLOBAL; | |
620 | break; | |
621 | default: | |
622 | return -ENOENT; | |
623 | } | |
624 | ||
625 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
626 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
627 | return hw_event; | |
628 | ||
629 | return -ENOENT; | |
630 | } | |
631 | ||
632 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
633 | ||
634 | /* | |
635 | * Program the CCI PMU counters which have PERF_HES_ARCH set | |
636 | * with the event period and mark them ready before we enable | |
637 | * PMU. | |
638 | */ | |
639 | static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) | |
640 | { | |
641 | int i; | |
642 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
1201a5a2 | 643 | DECLARE_BITMAP(mask, HW_CNTRS_MAX); |
3de6be7a | 644 | |
f818947a | 645 | bitmap_zero(mask, HW_CNTRS_MAX); |
3de6be7a RM |
646 | for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { |
647 | struct perf_event *event = cci_hw->events[i]; | |
648 | ||
649 | if (WARN_ON(!event)) | |
650 | continue; | |
651 | ||
652 | /* Leave the events which are not counting */ | |
653 | if (event->hw.state & PERF_HES_STOPPED) | |
654 | continue; | |
655 | if (event->hw.state & PERF_HES_ARCH) { | |
f818947a | 656 | __set_bit(i, mask); |
3de6be7a RM |
657 | event->hw.state &= ~PERF_HES_ARCH; |
658 | } | |
659 | } | |
660 | ||
661 | pmu_write_counters(cci_pmu, mask); | |
662 | } | |
663 | ||
664 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
665 | static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) | |
666 | { | |
667 | u32 val; | |
668 | ||
669 | /* Enable all the PMU counters. */ | |
e9c112c9 RM |
670 | val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; |
671 | writel(val, cci_pmu->ctrl_base + CCI_PMCR); | |
3de6be7a RM |
672 | } |
673 | ||
674 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
675 | static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) | |
676 | { | |
677 | cci_pmu_sync_counters(cci_pmu); | |
678 | __cci_pmu_enable_nosync(cci_pmu); | |
679 | } | |
680 | ||
681 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
e9c112c9 | 682 | static void __cci_pmu_disable(struct cci_pmu *cci_pmu) |
3de6be7a RM |
683 | { |
684 | u32 val; | |
685 | ||
686 | /* Disable all the PMU counters. */ | |
e9c112c9 RM |
687 | val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; |
688 | writel(val, cci_pmu->ctrl_base + CCI_PMCR); | |
3de6be7a RM |
689 | } |
690 | ||
691 | static ssize_t cci_pmu_format_show(struct device *dev, | |
692 | struct device_attribute *attr, char *buf) | |
693 | { | |
694 | struct dev_ext_attribute *eattr = container_of(attr, | |
695 | struct dev_ext_attribute, attr); | |
700a9cf0 | 696 | return sysfs_emit(buf, "%s\n", (char *)eattr->var); |
3de6be7a RM |
697 | } |
698 | ||
699 | static ssize_t cci_pmu_event_show(struct device *dev, | |
700 | struct device_attribute *attr, char *buf) | |
701 | { | |
702 | struct dev_ext_attribute *eattr = container_of(attr, | |
703 | struct dev_ext_attribute, attr); | |
704 | /* source parameter is mandatory for normal PMU events */ | |
700a9cf0 ZT |
705 | return sysfs_emit(buf, "source=?,event=0x%lx\n", |
706 | (unsigned long)eattr->var); | |
3de6be7a RM |
707 | } |
708 | ||
709 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) | |
710 | { | |
711 | return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); | |
712 | } | |
713 | ||
714 | static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) | |
715 | { | |
716 | return readl_relaxed(cci_pmu->base + | |
717 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
718 | } | |
719 | ||
720 | static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, | |
721 | int idx, unsigned int offset) | |
722 | { | |
723 | writel_relaxed(value, cci_pmu->base + | |
724 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
725 | } | |
726 | ||
727 | static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) | |
728 | { | |
729 | pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); | |
730 | } | |
731 | ||
732 | static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) | |
733 | { | |
734 | pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); | |
735 | } | |
736 | ||
737 | static bool __maybe_unused | |
738 | pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) | |
739 | { | |
740 | return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; | |
741 | } | |
742 | ||
743 | static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) | |
744 | { | |
745 | pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); | |
746 | } | |
747 | ||
748 | /* | |
749 | * For all counters on the CCI-PMU, disable any 'enabled' counters, | |
750 | * saving the changed counters in the mask, so that we can restore | |
751 | * it later using pmu_restore_counters. The mask is private to the | |
752 | * caller. We cannot rely on the used_mask maintained by the CCI_PMU | |
753 | * as it only tells us if the counter is assigned to perf_event or not. | |
754 | * The state of the perf_event cannot be locked by the PMU layer, hence | |
755 | * we check the individual counter status (which can be locked by | |
756 | * cci_pm->hw_events->pmu_lock). | |
757 | * | |
758 | * @mask should be initialised to empty by the caller. | |
759 | */ | |
760 | static void __maybe_unused | |
761 | pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
762 | { | |
763 | int i; | |
764 | ||
765 | for (i = 0; i < cci_pmu->num_cntrs; i++) { | |
766 | if (pmu_counter_is_enabled(cci_pmu, i)) { | |
767 | set_bit(i, mask); | |
768 | pmu_disable_counter(cci_pmu, i); | |
769 | } | |
770 | } | |
771 | } | |
772 | ||
773 | /* | |
774 | * Restore the status of the counters. Reversal of the pmu_save_counters(). | |
775 | * For each counter set in the mask, enable the counter back. | |
776 | */ | |
777 | static void __maybe_unused | |
778 | pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
779 | { | |
780 | int i; | |
781 | ||
782 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) | |
783 | pmu_enable_counter(cci_pmu, i); | |
784 | } | |
785 | ||
786 | /* | |
787 | * Returns the number of programmable counters actually implemented | |
788 | * by the cci | |
789 | */ | |
e9c112c9 | 790 | static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) |
3de6be7a | 791 | { |
e9c112c9 | 792 | return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & |
3de6be7a RM |
793 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; |
794 | } | |
795 | ||
796 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) | |
797 | { | |
798 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
799 | unsigned long cci_event = event->hw.config_base; | |
800 | int idx; | |
801 | ||
802 | if (cci_pmu->model->get_event_idx) | |
803 | return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); | |
804 | ||
805 | /* Generic code to find an unused idx from the mask */ | |
eb2b22f0 | 806 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) |
3de6be7a RM |
807 | if (!test_and_set_bit(idx, hw->used_mask)) |
808 | return idx; | |
809 | ||
810 | /* No counters available */ | |
811 | return -EAGAIN; | |
812 | } | |
813 | ||
814 | static int pmu_map_event(struct perf_event *event) | |
815 | { | |
816 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
817 | ||
818 | if (event->attr.type < PERF_TYPE_MAX || | |
819 | !cci_pmu->model->validate_hw_event) | |
820 | return -ENOENT; | |
821 | ||
822 | return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); | |
823 | } | |
824 | ||
825 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | |
826 | { | |
827 | int i; | |
828 | struct platform_device *pmu_device = cci_pmu->plat_device; | |
829 | ||
830 | if (unlikely(!pmu_device)) | |
831 | return -ENODEV; | |
832 | ||
833 | if (cci_pmu->nr_irqs < 1) { | |
834 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | |
835 | return -ENODEV; | |
836 | } | |
837 | ||
838 | /* | |
839 | * Register all available CCI PMU interrupts. In the interrupt handler | |
840 | * we iterate over the counters checking for interrupt source (the | |
841 | * overflowing counter) and clear it. | |
842 | * | |
843 | * This should allow handling of non-unique interrupt for the counters. | |
844 | */ | |
845 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
846 | int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, | |
847 | "arm-cci-pmu", cci_pmu); | |
848 | if (err) { | |
849 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | |
850 | cci_pmu->irqs[i]); | |
851 | return err; | |
852 | } | |
853 | ||
854 | set_bit(i, &cci_pmu->active_irqs); | |
855 | } | |
856 | ||
857 | return 0; | |
858 | } | |
859 | ||
860 | static void pmu_free_irq(struct cci_pmu *cci_pmu) | |
861 | { | |
862 | int i; | |
863 | ||
864 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
865 | if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) | |
866 | continue; | |
867 | ||
868 | free_irq(cci_pmu->irqs[i], cci_pmu); | |
869 | } | |
870 | } | |
871 | ||
872 | static u32 pmu_read_counter(struct perf_event *event) | |
873 | { | |
874 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
875 | struct hw_perf_event *hw_counter = &event->hw; | |
876 | int idx = hw_counter->idx; | |
877 | u32 value; | |
878 | ||
879 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
880 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
881 | return 0; | |
882 | } | |
883 | value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); | |
884 | ||
885 | return value; | |
886 | } | |
887 | ||
888 | static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) | |
889 | { | |
890 | pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); | |
891 | } | |
892 | ||
893 | static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
894 | { | |
895 | int i; | |
896 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
897 | ||
898 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
899 | struct perf_event *event = cci_hw->events[i]; | |
900 | ||
901 | if (WARN_ON(!event)) | |
902 | continue; | |
903 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
904 | } | |
905 | } | |
906 | ||
907 | static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
908 | { | |
909 | if (cci_pmu->model->write_counters) | |
910 | cci_pmu->model->write_counters(cci_pmu, mask); | |
911 | else | |
912 | __pmu_write_counters(cci_pmu, mask); | |
913 | } | |
914 | ||
915 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
916 | ||
917 | /* | |
918 | * CCI-500/CCI-550 has advanced power saving policies, which could gate the | |
919 | * clocks to the PMU counters, which makes the writes to them ineffective. | |
920 | * The only way to write to those counters is when the global counters | |
921 | * are enabled and the particular counter is enabled. | |
922 | * | |
923 | * So we do the following : | |
924 | * | |
925 | * 1) Disable all the PMU counters, saving their current state | |
926 | * 2) Enable the global PMU profiling, now that all counters are | |
927 | * disabled. | |
928 | * | |
929 | * For each counter to be programmed, repeat steps 3-7: | |
930 | * | |
931 | * 3) Write an invalid event code to the event control register for the | |
932 | counter, so that the counters are not modified. | |
933 | * 4) Enable the counter control for the counter. | |
934 | * 5) Set the counter value | |
935 | * 6) Disable the counter | |
936 | * 7) Restore the event in the target counter | |
937 | * | |
938 | * 8) Disable the global PMU. | |
939 | * 9) Restore the status of the rest of the counters. | |
940 | * | |
941 | * We choose an event which for CCI-5xx is guaranteed not to count. | |
942 | * We use the highest possible event code (0x1f) for the master interface 0. | |
943 | */ | |
944 | #define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ | |
945 | (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) | |
946 | static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
947 | { | |
948 | int i; | |
1201a5a2 | 949 | DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX); |
3de6be7a RM |
950 | |
951 | bitmap_zero(saved_mask, cci_pmu->num_cntrs); | |
952 | pmu_save_counters(cci_pmu, saved_mask); | |
953 | ||
954 | /* | |
955 | * Now that all the counters are disabled, we can safely turn the PMU on, | |
956 | * without syncing the status of the counters | |
957 | */ | |
958 | __cci_pmu_enable_nosync(cci_pmu); | |
959 | ||
960 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
961 | struct perf_event *event = cci_pmu->hw_events.events[i]; | |
962 | ||
963 | if (WARN_ON(!event)) | |
964 | continue; | |
965 | ||
966 | pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); | |
967 | pmu_enable_counter(cci_pmu, i); | |
968 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
969 | pmu_disable_counter(cci_pmu, i); | |
970 | pmu_set_event(cci_pmu, i, event->hw.config_base); | |
971 | } | |
972 | ||
e9c112c9 | 973 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
974 | |
975 | pmu_restore_counters(cci_pmu, saved_mask); | |
976 | } | |
977 | ||
978 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
979 | ||
980 | static u64 pmu_event_update(struct perf_event *event) | |
981 | { | |
982 | struct hw_perf_event *hwc = &event->hw; | |
983 | u64 delta, prev_raw_count, new_raw_count; | |
984 | ||
985 | do { | |
986 | prev_raw_count = local64_read(&hwc->prev_count); | |
987 | new_raw_count = pmu_read_counter(event); | |
988 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
989 | new_raw_count) != prev_raw_count); | |
990 | ||
991 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | |
992 | ||
993 | local64_add(delta, &event->count); | |
994 | ||
995 | return new_raw_count; | |
996 | } | |
997 | ||
998 | static void pmu_read(struct perf_event *event) | |
999 | { | |
1000 | pmu_event_update(event); | |
1001 | } | |
1002 | ||
1003 | static void pmu_event_set_period(struct perf_event *event) | |
1004 | { | |
1005 | struct hw_perf_event *hwc = &event->hw; | |
1006 | /* | |
1007 | * The CCI PMU counters have a period of 2^32. To account for the | |
1008 | * possiblity of extreme interrupt latency we program for a period of | |
1009 | * half that. Hopefully we can handle the interrupt before another 2^31 | |
1010 | * events occur and the counter overtakes its previous value. | |
1011 | */ | |
1012 | u64 val = 1ULL << 31; | |
1013 | local64_set(&hwc->prev_count, val); | |
1014 | ||
1015 | /* | |
1016 | * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose | |
1017 | * values needs to be sync-ed with the s/w state before the PMU is | |
1018 | * enabled. | |
1019 | * Mark this counter for sync. | |
1020 | */ | |
1021 | hwc->state |= PERF_HES_ARCH; | |
1022 | } | |
1023 | ||
1024 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |
1025 | { | |
3de6be7a RM |
1026 | struct cci_pmu *cci_pmu = dev; |
1027 | struct cci_pmu_hw_events *events = &cci_pmu->hw_events; | |
1028 | int idx, handled = IRQ_NONE; | |
1029 | ||
8ee37e0f | 1030 | raw_spin_lock(&events->pmu_lock); |
3de6be7a RM |
1031 | |
1032 | /* Disable the PMU while we walk through the counters */ | |
e9c112c9 | 1033 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
1034 | /* |
1035 | * Iterate over counters and update the corresponding perf events. | |
1036 | * This should work regardless of whether we have per-counter overflow | |
1037 | * interrupt or a combined overflow interrupt. | |
1038 | */ | |
1039 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | |
1040 | struct perf_event *event = events->events[idx]; | |
1041 | ||
1042 | if (!event) | |
1043 | continue; | |
1044 | ||
1045 | /* Did this counter overflow? */ | |
1046 | if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & | |
1047 | CCI_PMU_OVRFLW_FLAG)) | |
1048 | continue; | |
1049 | ||
1050 | pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, | |
1051 | CCI_PMU_OVRFLW); | |
1052 | ||
1053 | pmu_event_update(event); | |
1054 | pmu_event_set_period(event); | |
1055 | handled = IRQ_HANDLED; | |
1056 | } | |
1057 | ||
1058 | /* Enable the PMU and sync possibly overflowed counters */ | |
1059 | __cci_pmu_enable_sync(cci_pmu); | |
8ee37e0f | 1060 | raw_spin_unlock(&events->pmu_lock); |
3de6be7a RM |
1061 | |
1062 | return IRQ_RETVAL(handled); | |
1063 | } | |
1064 | ||
1065 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) | |
1066 | { | |
1067 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); | |
1068 | if (ret) { | |
1069 | pmu_free_irq(cci_pmu); | |
1070 | return ret; | |
1071 | } | |
1072 | return 0; | |
1073 | } | |
1074 | ||
1075 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) | |
1076 | { | |
1077 | pmu_free_irq(cci_pmu); | |
1078 | } | |
1079 | ||
1080 | static void hw_perf_event_destroy(struct perf_event *event) | |
1081 | { | |
1082 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1083 | atomic_t *active_events = &cci_pmu->active_events; | |
1084 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | |
1085 | ||
1086 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | |
1087 | cci_pmu_put_hw(cci_pmu); | |
1088 | mutex_unlock(reserve_mutex); | |
1089 | } | |
1090 | } | |
1091 | ||
1092 | static void cci_pmu_enable(struct pmu *pmu) | |
1093 | { | |
1094 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1095 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
95ed57c7 | 1096 | bool enabled = !bitmap_empty(hw_events->used_mask, cci_pmu->num_cntrs); |
3de6be7a RM |
1097 | unsigned long flags; |
1098 | ||
1099 | if (!enabled) | |
1100 | return; | |
1101 | ||
1102 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1103 | __cci_pmu_enable_sync(cci_pmu); | |
1104 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1105 | ||
1106 | } | |
1107 | ||
1108 | static void cci_pmu_disable(struct pmu *pmu) | |
1109 | { | |
1110 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1111 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1112 | unsigned long flags; | |
1113 | ||
1114 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
e9c112c9 | 1115 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
1116 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
1117 | } | |
1118 | ||
1119 | /* | |
1120 | * Check if the idx represents a non-programmable counter. | |
1121 | * All the fixed event counters are mapped before the programmable | |
1122 | * counters. | |
1123 | */ | |
1124 | static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) | |
1125 | { | |
1126 | return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); | |
1127 | } | |
1128 | ||
1129 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | |
1130 | { | |
1131 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1132 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1133 | struct hw_perf_event *hwc = &event->hw; | |
1134 | int idx = hwc->idx; | |
1135 | unsigned long flags; | |
1136 | ||
1137 | /* | |
1138 | * To handle interrupt latency, we always reprogram the period | |
9ba86a47 | 1139 | * regardless of PERF_EF_RELOAD. |
3de6be7a RM |
1140 | */ |
1141 | if (pmu_flags & PERF_EF_RELOAD) | |
1142 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
1143 | ||
1144 | hwc->state = 0; | |
1145 | ||
1146 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1147 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1148 | return; | |
1149 | } | |
1150 | ||
1151 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1152 | ||
1153 | /* Configure the counter unless you are counting a fixed event */ | |
1154 | if (!pmu_fixed_hw_idx(cci_pmu, idx)) | |
1155 | pmu_set_event(cci_pmu, idx, hwc->config_base); | |
1156 | ||
1157 | pmu_event_set_period(event); | |
1158 | pmu_enable_counter(cci_pmu, idx); | |
1159 | ||
1160 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1161 | } | |
1162 | ||
1163 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) | |
1164 | { | |
1165 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1166 | struct hw_perf_event *hwc = &event->hw; | |
1167 | int idx = hwc->idx; | |
1168 | ||
1169 | if (hwc->state & PERF_HES_STOPPED) | |
1170 | return; | |
1171 | ||
1172 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1173 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1174 | return; | |
1175 | } | |
1176 | ||
1177 | /* | |
1178 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | |
1179 | * cci_pmu_start() | |
1180 | */ | |
1181 | pmu_disable_counter(cci_pmu, idx); | |
1182 | pmu_event_update(event); | |
1183 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1184 | } | |
1185 | ||
1186 | static int cci_pmu_add(struct perf_event *event, int flags) | |
1187 | { | |
1188 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1189 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1190 | struct hw_perf_event *hwc = &event->hw; | |
1191 | int idx; | |
3de6be7a RM |
1192 | |
1193 | /* If we don't have a space for the counter then finish early. */ | |
1194 | idx = pmu_get_event_idx(hw_events, event); | |
28c01dc9 RM |
1195 | if (idx < 0) |
1196 | return idx; | |
3de6be7a RM |
1197 | |
1198 | event->hw.idx = idx; | |
1199 | hw_events->events[idx] = event; | |
1200 | ||
1201 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1202 | if (flags & PERF_EF_START) | |
1203 | cci_pmu_start(event, PERF_EF_RELOAD); | |
1204 | ||
1205 | /* Propagate our changes to the userspace mapping. */ | |
1206 | perf_event_update_userpage(event); | |
1207 | ||
28c01dc9 | 1208 | return 0; |
3de6be7a RM |
1209 | } |
1210 | ||
1211 | static void cci_pmu_del(struct perf_event *event, int flags) | |
1212 | { | |
1213 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1214 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1215 | struct hw_perf_event *hwc = &event->hw; | |
1216 | int idx = hwc->idx; | |
1217 | ||
1218 | cci_pmu_stop(event, PERF_EF_UPDATE); | |
1219 | hw_events->events[idx] = NULL; | |
1220 | clear_bit(idx, hw_events->used_mask); | |
1221 | ||
1222 | perf_event_update_userpage(event); | |
1223 | } | |
1224 | ||
1225 | static int validate_event(struct pmu *cci_pmu, | |
1226 | struct cci_pmu_hw_events *hw_events, | |
1227 | struct perf_event *event) | |
1228 | { | |
1229 | if (is_software_event(event)) | |
1230 | return 1; | |
1231 | ||
1232 | /* | |
1233 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
1234 | * core perf code won't check that the pmu->ctx == leader->ctx | |
1235 | * until after pmu->event_init(event). | |
1236 | */ | |
1237 | if (event->pmu != cci_pmu) | |
1238 | return 0; | |
1239 | ||
1240 | if (event->state < PERF_EVENT_STATE_OFF) | |
1241 | return 1; | |
1242 | ||
1243 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
1244 | return 1; | |
1245 | ||
1246 | return pmu_get_event_idx(hw_events, event) >= 0; | |
1247 | } | |
1248 | ||
1249 | static int validate_group(struct perf_event *event) | |
1250 | { | |
1251 | struct perf_event *sibling, *leader = event->group_leader; | |
1252 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1201a5a2 | 1253 | unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)]; |
3de6be7a RM |
1254 | struct cci_pmu_hw_events fake_pmu = { |
1255 | /* | |
1256 | * Initialise the fake PMU. We only need to populate the | |
1257 | * used_mask for the purposes of validation. | |
1258 | */ | |
1259 | .used_mask = mask, | |
1260 | }; | |
0e35850b | 1261 | bitmap_zero(mask, cci_pmu->num_cntrs); |
3de6be7a RM |
1262 | |
1263 | if (!validate_event(event->pmu, &fake_pmu, leader)) | |
1264 | return -EINVAL; | |
1265 | ||
38c23685 | 1266 | for_each_sibling_event(sibling, leader) { |
3de6be7a RM |
1267 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
1268 | return -EINVAL; | |
1269 | } | |
1270 | ||
1271 | if (!validate_event(event->pmu, &fake_pmu, event)) | |
1272 | return -EINVAL; | |
1273 | ||
1274 | return 0; | |
1275 | } | |
1276 | ||
1277 | static int __hw_perf_event_init(struct perf_event *event) | |
1278 | { | |
1279 | struct hw_perf_event *hwc = &event->hw; | |
1280 | int mapping; | |
1281 | ||
1282 | mapping = pmu_map_event(event); | |
1283 | ||
1284 | if (mapping < 0) { | |
1285 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
1286 | event->attr.config); | |
1287 | return mapping; | |
1288 | } | |
1289 | ||
1290 | /* | |
1291 | * We don't assign an index until we actually place the event onto | |
1292 | * hardware. Use -1 to signify that we haven't decided where to put it | |
1293 | * yet. | |
1294 | */ | |
1295 | hwc->idx = -1; | |
1296 | hwc->config_base = 0; | |
1297 | hwc->config = 0; | |
1298 | hwc->event_base = 0; | |
1299 | ||
1300 | /* | |
1301 | * Store the event encoding into the config_base field. | |
1302 | */ | |
1303 | hwc->config_base |= (unsigned long)mapping; | |
1304 | ||
3de6be7a RM |
1305 | if (event->group_leader != event) { |
1306 | if (validate_group(event) != 0) | |
1307 | return -EINVAL; | |
1308 | } | |
1309 | ||
1310 | return 0; | |
1311 | } | |
1312 | ||
1313 | static int cci_pmu_event_init(struct perf_event *event) | |
1314 | { | |
1315 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1316 | atomic_t *active_events = &cci_pmu->active_events; | |
1317 | int err = 0; | |
3de6be7a RM |
1318 | |
1319 | if (event->attr.type != event->pmu->type) | |
1320 | return -ENOENT; | |
1321 | ||
1322 | /* Shared by all CPUs, no meaningful state to sample */ | |
1323 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
1324 | return -EOPNOTSUPP; | |
1325 | ||
3de6be7a RM |
1326 | /* |
1327 | * Following the example set by other "uncore" PMUs, we accept any CPU | |
1328 | * and rewrite its affinity dynamically rather than having perf core | |
1329 | * handle cpu == -1 and pid == -1 for this case. | |
1330 | * | |
1331 | * The perf core will pin online CPUs for the duration of this call and | |
1332 | * the event being installed into its context, so the PMU's CPU can't | |
1333 | * change under our feet. | |
1334 | */ | |
03057f26 | 1335 | if (event->cpu < 0) |
3de6be7a | 1336 | return -EINVAL; |
03057f26 | 1337 | event->cpu = cci_pmu->cpu; |
3de6be7a RM |
1338 | |
1339 | event->destroy = hw_perf_event_destroy; | |
1340 | if (!atomic_inc_not_zero(active_events)) { | |
1341 | mutex_lock(&cci_pmu->reserve_mutex); | |
1342 | if (atomic_read(active_events) == 0) | |
1343 | err = cci_pmu_get_hw(cci_pmu); | |
1344 | if (!err) | |
1345 | atomic_inc(active_events); | |
1346 | mutex_unlock(&cci_pmu->reserve_mutex); | |
1347 | } | |
1348 | if (err) | |
1349 | return err; | |
1350 | ||
1351 | err = __hw_perf_event_init(event); | |
1352 | if (err) | |
1353 | hw_perf_event_destroy(event); | |
1354 | ||
1355 | return err; | |
1356 | } | |
1357 | ||
1358 | static ssize_t pmu_cpumask_attr_show(struct device *dev, | |
1359 | struct device_attribute *attr, char *buf) | |
1360 | { | |
1361 | struct pmu *pmu = dev_get_drvdata(dev); | |
1362 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1363 | ||
03057f26 | 1364 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); |
3de6be7a RM |
1365 | } |
1366 | ||
1367 | static struct device_attribute pmu_cpumask_attr = | |
1368 | __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); | |
1369 | ||
1370 | static struct attribute *pmu_attrs[] = { | |
1371 | &pmu_cpumask_attr.attr, | |
1372 | NULL, | |
1373 | }; | |
1374 | ||
f0c14048 | 1375 | static const struct attribute_group pmu_attr_group = { |
3de6be7a RM |
1376 | .attrs = pmu_attrs, |
1377 | }; | |
1378 | ||
1379 | static struct attribute_group pmu_format_attr_group = { | |
1380 | .name = "format", | |
1381 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1382 | }; | |
1383 | ||
1384 | static struct attribute_group pmu_event_attr_group = { | |
1385 | .name = "events", | |
1386 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1387 | }; | |
1388 | ||
1389 | static const struct attribute_group *pmu_attr_groups[] = { | |
1390 | &pmu_attr_group, | |
1391 | &pmu_format_attr_group, | |
1392 | &pmu_event_attr_group, | |
1393 | NULL | |
1394 | }; | |
1395 | ||
1396 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |
1397 | { | |
1398 | const struct cci_pmu_model *model = cci_pmu->model; | |
1399 | char *name = model->name; | |
1400 | u32 num_cntrs; | |
1401 | ||
1201a5a2 KC |
1402 | if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) |
1403 | return -EINVAL; | |
1404 | if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) | |
1405 | return -EINVAL; | |
1406 | ||
3de6be7a RM |
1407 | pmu_event_attr_group.attrs = model->event_attrs; |
1408 | pmu_format_attr_group.attrs = model->format_attrs; | |
1409 | ||
1410 | cci_pmu->pmu = (struct pmu) { | |
8b0c93c2 | 1411 | .module = THIS_MODULE, |
3de6be7a RM |
1412 | .name = cci_pmu->model->name, |
1413 | .task_ctx_nr = perf_invalid_context, | |
1414 | .pmu_enable = cci_pmu_enable, | |
1415 | .pmu_disable = cci_pmu_disable, | |
1416 | .event_init = cci_pmu_event_init, | |
1417 | .add = cci_pmu_add, | |
1418 | .del = cci_pmu_del, | |
1419 | .start = cci_pmu_start, | |
1420 | .stop = cci_pmu_stop, | |
1421 | .read = pmu_read, | |
1422 | .attr_groups = pmu_attr_groups, | |
30656398 | 1423 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
3de6be7a RM |
1424 | }; |
1425 | ||
1426 | cci_pmu->plat_device = pdev; | |
e9c112c9 | 1427 | num_cntrs = pmu_get_max_counters(cci_pmu); |
3de6be7a RM |
1428 | if (num_cntrs > cci_pmu->model->num_hw_cntrs) { |
1429 | dev_warn(&pdev->dev, | |
1430 | "PMU implements more counters(%d) than supported by" | |
1431 | " the model(%d), truncated.", | |
1432 | num_cntrs, cci_pmu->model->num_hw_cntrs); | |
1433 | num_cntrs = cci_pmu->model->num_hw_cntrs; | |
1434 | } | |
1435 | cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; | |
1436 | ||
1437 | return perf_pmu_register(&cci_pmu->pmu, name, -1); | |
1438 | } | |
1439 | ||
03057f26 | 1440 | static int cci_pmu_offline_cpu(unsigned int cpu) |
3de6be7a | 1441 | { |
03057f26 | 1442 | int target; |
3de6be7a | 1443 | |
03057f26 | 1444 | if (!g_cci_pmu || cpu != g_cci_pmu->cpu) |
3de6be7a | 1445 | return 0; |
03057f26 | 1446 | |
3de6be7a RM |
1447 | target = cpumask_any_but(cpu_online_mask, cpu); |
1448 | if (target >= nr_cpu_ids) | |
1449 | return 0; | |
03057f26 RM |
1450 | |
1451 | perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); | |
1452 | g_cci_pmu->cpu = target; | |
3de6be7a RM |
1453 | return 0; |
1454 | } | |
1455 | ||
984e9cf1 | 1456 | static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { |
3de6be7a RM |
1457 | #ifdef CONFIG_ARM_CCI400_PMU |
1458 | [CCI400_R0] = { | |
1459 | .name = "CCI_400", | |
1201a5a2 KC |
1460 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ |
1461 | .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, | |
3de6be7a RM |
1462 | .cntr_size = SZ_4K, |
1463 | .format_attrs = cci400_pmu_format_attrs, | |
1464 | .event_attrs = cci400_r0_pmu_event_attrs, | |
1465 | .event_ranges = { | |
1466 | [CCI_IF_SLAVE] = { | |
1467 | CCI400_R0_SLAVE_PORT_MIN_EV, | |
1468 | CCI400_R0_SLAVE_PORT_MAX_EV, | |
1469 | }, | |
1470 | [CCI_IF_MASTER] = { | |
1471 | CCI400_R0_MASTER_PORT_MIN_EV, | |
1472 | CCI400_R0_MASTER_PORT_MAX_EV, | |
1473 | }, | |
1474 | }, | |
1475 | .validate_hw_event = cci400_validate_hw_event, | |
1476 | .get_event_idx = cci400_get_event_idx, | |
1477 | }, | |
1478 | [CCI400_R1] = { | |
1479 | .name = "CCI_400_r1", | |
1201a5a2 KC |
1480 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ |
1481 | .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, | |
3de6be7a RM |
1482 | .cntr_size = SZ_4K, |
1483 | .format_attrs = cci400_pmu_format_attrs, | |
1484 | .event_attrs = cci400_r1_pmu_event_attrs, | |
1485 | .event_ranges = { | |
1486 | [CCI_IF_SLAVE] = { | |
1487 | CCI400_R1_SLAVE_PORT_MIN_EV, | |
1488 | CCI400_R1_SLAVE_PORT_MAX_EV, | |
1489 | }, | |
1490 | [CCI_IF_MASTER] = { | |
1491 | CCI400_R1_MASTER_PORT_MIN_EV, | |
1492 | CCI400_R1_MASTER_PORT_MAX_EV, | |
1493 | }, | |
1494 | }, | |
1495 | .validate_hw_event = cci400_validate_hw_event, | |
1496 | .get_event_idx = cci400_get_event_idx, | |
1497 | }, | |
1498 | #endif | |
1499 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1500 | [CCI500_R0] = { | |
1501 | .name = "CCI_500", | |
1201a5a2 KC |
1502 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, |
1503 | .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, | |
3de6be7a RM |
1504 | .cntr_size = SZ_64K, |
1505 | .format_attrs = cci5xx_pmu_format_attrs, | |
1506 | .event_attrs = cci5xx_pmu_event_attrs, | |
1507 | .event_ranges = { | |
1508 | [CCI_IF_SLAVE] = { | |
1509 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1510 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1511 | }, | |
1512 | [CCI_IF_MASTER] = { | |
1513 | CCI5xx_MASTER_PORT_MIN_EV, | |
1514 | CCI5xx_MASTER_PORT_MAX_EV, | |
1515 | }, | |
1516 | [CCI_IF_GLOBAL] = { | |
1517 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1518 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1519 | }, | |
1520 | }, | |
1521 | .validate_hw_event = cci500_validate_hw_event, | |
1522 | .write_counters = cci5xx_pmu_write_counters, | |
1523 | }, | |
1524 | [CCI550_R0] = { | |
1525 | .name = "CCI_550", | |
1201a5a2 KC |
1526 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, |
1527 | .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, | |
3de6be7a RM |
1528 | .cntr_size = SZ_64K, |
1529 | .format_attrs = cci5xx_pmu_format_attrs, | |
1530 | .event_attrs = cci5xx_pmu_event_attrs, | |
1531 | .event_ranges = { | |
1532 | [CCI_IF_SLAVE] = { | |
1533 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1534 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1535 | }, | |
1536 | [CCI_IF_MASTER] = { | |
1537 | CCI5xx_MASTER_PORT_MIN_EV, | |
1538 | CCI5xx_MASTER_PORT_MAX_EV, | |
1539 | }, | |
1540 | [CCI_IF_GLOBAL] = { | |
1541 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1542 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1543 | }, | |
1544 | }, | |
1545 | .validate_hw_event = cci550_validate_hw_event, | |
1546 | .write_counters = cci5xx_pmu_write_counters, | |
1547 | }, | |
1548 | #endif | |
1549 | }; | |
1550 | ||
1551 | static const struct of_device_id arm_cci_pmu_matches[] = { | |
1552 | #ifdef CONFIG_ARM_CCI400_PMU | |
1553 | { | |
1554 | .compatible = "arm,cci-400-pmu", | |
1555 | .data = NULL, | |
1556 | }, | |
1557 | { | |
1558 | .compatible = "arm,cci-400-pmu,r0", | |
1559 | .data = &cci_pmu_models[CCI400_R0], | |
1560 | }, | |
1561 | { | |
1562 | .compatible = "arm,cci-400-pmu,r1", | |
1563 | .data = &cci_pmu_models[CCI400_R1], | |
1564 | }, | |
1565 | #endif | |
1566 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1567 | { | |
1568 | .compatible = "arm,cci-500-pmu,r0", | |
1569 | .data = &cci_pmu_models[CCI500_R0], | |
1570 | }, | |
1571 | { | |
1572 | .compatible = "arm,cci-550-pmu,r0", | |
1573 | .data = &cci_pmu_models[CCI550_R0], | |
1574 | }, | |
1575 | #endif | |
1576 | {}, | |
1577 | }; | |
8b0c93c2 | 1578 | MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches); |
3de6be7a | 1579 | |
3de6be7a RM |
1580 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) |
1581 | { | |
1582 | int i; | |
1583 | ||
1584 | for (i = 0; i < nr_irqs; i++) | |
1585 | if (irq == irqs[i]) | |
1586 | return true; | |
1587 | ||
1588 | return false; | |
1589 | } | |
1590 | ||
32837954 | 1591 | static struct cci_pmu *cci_pmu_alloc(struct device *dev) |
3de6be7a RM |
1592 | { |
1593 | struct cci_pmu *cci_pmu; | |
1594 | const struct cci_pmu_model *model; | |
1595 | ||
1596 | /* | |
1597 | * All allocations are devm_* hence we don't have to free | |
1598 | * them explicitly on an error, as it would end up in driver | |
1599 | * detach. | |
1600 | */ | |
e9c112c9 RM |
1601 | cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); |
1602 | if (!cci_pmu) | |
1603 | return ERR_PTR(-ENOMEM); | |
1604 | ||
1605 | cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; | |
1606 | ||
32837954 RM |
1607 | model = of_device_get_match_data(dev); |
1608 | if (!model) { | |
1609 | dev_warn(dev, | |
1610 | "DEPRECATED compatible property, requires secure access to CCI registers"); | |
e9c112c9 | 1611 | model = probe_cci_model(cci_pmu); |
32837954 | 1612 | } |
3de6be7a | 1613 | if (!model) { |
32837954 | 1614 | dev_warn(dev, "CCI PMU version not supported\n"); |
3de6be7a RM |
1615 | return ERR_PTR(-ENODEV); |
1616 | } | |
1617 | ||
3de6be7a | 1618 | cci_pmu->model = model; |
32837954 | 1619 | cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), |
3de6be7a RM |
1620 | sizeof(*cci_pmu->irqs), GFP_KERNEL); |
1621 | if (!cci_pmu->irqs) | |
1622 | return ERR_PTR(-ENOMEM); | |
32837954 | 1623 | cci_pmu->hw_events.events = devm_kcalloc(dev, |
3de6be7a RM |
1624 | CCI_PMU_MAX_HW_CNTRS(model), |
1625 | sizeof(*cci_pmu->hw_events.events), | |
1626 | GFP_KERNEL); | |
1627 | if (!cci_pmu->hw_events.events) | |
1628 | return ERR_PTR(-ENOMEM); | |
0e35850b CJ |
1629 | cci_pmu->hw_events.used_mask = devm_bitmap_zalloc(dev, |
1630 | CCI_PMU_MAX_HW_CNTRS(model), | |
1631 | GFP_KERNEL); | |
3de6be7a RM |
1632 | if (!cci_pmu->hw_events.used_mask) |
1633 | return ERR_PTR(-ENOMEM); | |
1634 | ||
1635 | return cci_pmu; | |
1636 | } | |
1637 | ||
1638 | static int cci_pmu_probe(struct platform_device *pdev) | |
1639 | { | |
3de6be7a RM |
1640 | struct cci_pmu *cci_pmu; |
1641 | int i, ret, irq; | |
1642 | ||
32837954 | 1643 | cci_pmu = cci_pmu_alloc(&pdev->dev); |
3de6be7a RM |
1644 | if (IS_ERR(cci_pmu)) |
1645 | return PTR_ERR(cci_pmu); | |
1646 | ||
504db0f8 | 1647 | cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); |
3de6be7a RM |
1648 | if (IS_ERR(cci_pmu->base)) |
1649 | return -ENOMEM; | |
1650 | ||
1651 | /* | |
1652 | * CCI PMU has one overflow interrupt per counter; but some may be tied | |
1653 | * together to a common interrupt. | |
1654 | */ | |
1655 | cci_pmu->nr_irqs = 0; | |
1656 | for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { | |
1657 | irq = platform_get_irq(pdev, i); | |
1658 | if (irq < 0) | |
1659 | break; | |
1660 | ||
1661 | if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) | |
1662 | continue; | |
1663 | ||
1664 | cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; | |
1665 | } | |
1666 | ||
1667 | /* | |
1668 | * Ensure that the device tree has as many interrupts as the number | |
1669 | * of counters. | |
1670 | */ | |
1671 | if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { | |
1672 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | |
1673 | i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); | |
1674 | return -EINVAL; | |
1675 | } | |
1676 | ||
1677 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); | |
1678 | mutex_init(&cci_pmu->reserve_mutex); | |
1679 | atomic_set(&cci_pmu->active_events, 0); | |
3de6be7a | 1680 | |
0d2e2a82 RM |
1681 | cci_pmu->cpu = raw_smp_processor_id(); |
1682 | g_cci_pmu = cci_pmu; | |
03057f26 RM |
1683 | cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, |
1684 | "perf/arm/cci:online", NULL, | |
1685 | cci_pmu_offline_cpu); | |
0d2e2a82 RM |
1686 | |
1687 | ret = cci_pmu_init(cci_pmu, pdev); | |
1688 | if (ret) | |
1689 | goto error_pmu_init; | |
1690 | ||
3de6be7a RM |
1691 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
1692 | return 0; | |
0d2e2a82 RM |
1693 | |
1694 | error_pmu_init: | |
1695 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | |
1696 | g_cci_pmu = NULL; | |
1697 | return ret; | |
3de6be7a RM |
1698 | } |
1699 | ||
8b0c93c2 RM |
1700 | static int cci_pmu_remove(struct platform_device *pdev) |
1701 | { | |
1702 | if (!g_cci_pmu) | |
1703 | return 0; | |
1704 | ||
1705 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | |
1706 | perf_pmu_unregister(&g_cci_pmu->pmu); | |
1707 | g_cci_pmu = NULL; | |
1708 | ||
1709 | return 0; | |
1710 | } | |
1711 | ||
3de6be7a RM |
1712 | static struct platform_driver cci_pmu_driver = { |
1713 | .driver = { | |
1714 | .name = DRIVER_NAME, | |
1715 | .of_match_table = arm_cci_pmu_matches, | |
f32ed8eb | 1716 | .suppress_bind_attrs = true, |
3de6be7a RM |
1717 | }, |
1718 | .probe = cci_pmu_probe, | |
8b0c93c2 | 1719 | .remove = cci_pmu_remove, |
3de6be7a RM |
1720 | }; |
1721 | ||
8b0c93c2 | 1722 | module_platform_driver(cci_pmu_driver); |
75dc3441 | 1723 | MODULE_LICENSE("GPL v2"); |
3de6be7a | 1724 | MODULE_DESCRIPTION("ARM CCI PMU support"); |