Commit | Line | Data |
---|---|---|
3de6be7a RM |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | // CCI Cache Coherent Interconnect PMU driver | |
3 | // Copyright (C) 2013-2018 Arm Ltd. | |
4 | // Author: Punit Agrawal <punit.agrawal@arm.com>, Suzuki Poulose <suzuki.poulose@arm.com> | |
5 | ||
6 | #include <linux/arm-cci.h> | |
7 | #include <linux/io.h> | |
8 | #include <linux/interrupt.h> | |
9 | #include <linux/module.h> | |
10 | #include <linux/of_address.h> | |
32837954 | 11 | #include <linux/of_device.h> |
3de6be7a RM |
12 | #include <linux/of_irq.h> |
13 | #include <linux/of_platform.h> | |
14 | #include <linux/perf_event.h> | |
15 | #include <linux/platform_device.h> | |
16 | #include <linux/slab.h> | |
17 | #include <linux/spinlock.h> | |
18 | ||
3de6be7a RM |
19 | #define DRIVER_NAME "ARM-CCI PMU" |
20 | ||
21 | #define CCI_PMCR 0x0100 | |
22 | #define CCI_PID2 0x0fe8 | |
23 | ||
24 | #define CCI_PMCR_CEN 0x00000001 | |
25 | #define CCI_PMCR_NCNT_MASK 0x0000f800 | |
26 | #define CCI_PMCR_NCNT_SHIFT 11 | |
27 | ||
28 | #define CCI_PID2_REV_MASK 0xf0 | |
29 | #define CCI_PID2_REV_SHIFT 4 | |
30 | ||
31 | #define CCI_PMU_EVT_SEL 0x000 | |
32 | #define CCI_PMU_CNTR 0x004 | |
33 | #define CCI_PMU_CNTR_CTRL 0x008 | |
34 | #define CCI_PMU_OVRFLW 0x00c | |
35 | ||
36 | #define CCI_PMU_OVRFLW_FLAG 1 | |
37 | ||
38 | #define CCI_PMU_CNTR_SIZE(model) ((model)->cntr_size) | |
39 | #define CCI_PMU_CNTR_BASE(model, idx) ((idx) * CCI_PMU_CNTR_SIZE(model)) | |
40 | #define CCI_PMU_CNTR_MASK ((1ULL << 32) -1) | |
41 | #define CCI_PMU_CNTR_LAST(cci_pmu) (cci_pmu->num_cntrs - 1) | |
42 | ||
43 | #define CCI_PMU_MAX_HW_CNTRS(model) \ | |
44 | ((model)->num_hw_cntrs + (model)->fixed_hw_cntrs) | |
45 | ||
46 | /* Types of interfaces that can generate events */ | |
47 | enum { | |
48 | CCI_IF_SLAVE, | |
49 | CCI_IF_MASTER, | |
50 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
51 | CCI_IF_GLOBAL, | |
52 | #endif | |
53 | CCI_IF_MAX, | |
54 | }; | |
55 | ||
1201a5a2 KC |
56 | #define NUM_HW_CNTRS_CII_4XX 4 |
57 | #define NUM_HW_CNTRS_CII_5XX 8 | |
58 | #define NUM_HW_CNTRS_MAX NUM_HW_CNTRS_CII_5XX | |
59 | ||
60 | #define FIXED_HW_CNTRS_CII_4XX 1 | |
61 | #define FIXED_HW_CNTRS_CII_5XX 0 | |
62 | #define FIXED_HW_CNTRS_MAX FIXED_HW_CNTRS_CII_4XX | |
63 | ||
64 | #define HW_CNTRS_MAX (NUM_HW_CNTRS_MAX + FIXED_HW_CNTRS_MAX) | |
65 | ||
3de6be7a RM |
66 | struct event_range { |
67 | u32 min; | |
68 | u32 max; | |
69 | }; | |
70 | ||
71 | struct cci_pmu_hw_events { | |
72 | struct perf_event **events; | |
73 | unsigned long *used_mask; | |
74 | raw_spinlock_t pmu_lock; | |
75 | }; | |
76 | ||
77 | struct cci_pmu; | |
78 | /* | |
79 | * struct cci_pmu_model: | |
80 | * @fixed_hw_cntrs - Number of fixed event counters | |
81 | * @num_hw_cntrs - Maximum number of programmable event counters | |
82 | * @cntr_size - Size of an event counter mapping | |
83 | */ | |
84 | struct cci_pmu_model { | |
85 | char *name; | |
86 | u32 fixed_hw_cntrs; | |
87 | u32 num_hw_cntrs; | |
88 | u32 cntr_size; | |
89 | struct attribute **format_attrs; | |
90 | struct attribute **event_attrs; | |
91 | struct event_range event_ranges[CCI_IF_MAX]; | |
92 | int (*validate_hw_event)(struct cci_pmu *, unsigned long); | |
93 | int (*get_event_idx)(struct cci_pmu *, struct cci_pmu_hw_events *, unsigned long); | |
94 | void (*write_counters)(struct cci_pmu *, unsigned long *); | |
95 | }; | |
96 | ||
97 | static struct cci_pmu_model cci_pmu_models[]; | |
98 | ||
99 | struct cci_pmu { | |
100 | void __iomem *base; | |
e9c112c9 | 101 | void __iomem *ctrl_base; |
3de6be7a | 102 | struct pmu pmu; |
03057f26 | 103 | int cpu; |
3de6be7a RM |
104 | int nr_irqs; |
105 | int *irqs; | |
106 | unsigned long active_irqs; | |
107 | const struct cci_pmu_model *model; | |
108 | struct cci_pmu_hw_events hw_events; | |
109 | struct platform_device *plat_device; | |
110 | int num_cntrs; | |
111 | atomic_t active_events; | |
112 | struct mutex reserve_mutex; | |
3de6be7a RM |
113 | }; |
114 | ||
115 | #define to_cci_pmu(c) (container_of(c, struct cci_pmu, pmu)) | |
116 | ||
03057f26 RM |
117 | static struct cci_pmu *g_cci_pmu; |
118 | ||
3de6be7a RM |
119 | enum cci_models { |
120 | #ifdef CONFIG_ARM_CCI400_PMU | |
121 | CCI400_R0, | |
122 | CCI400_R1, | |
123 | #endif | |
124 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
125 | CCI500_R0, | |
126 | CCI550_R0, | |
127 | #endif | |
128 | CCI_MODEL_MAX | |
129 | }; | |
130 | ||
131 | static void pmu_write_counters(struct cci_pmu *cci_pmu, | |
132 | unsigned long *mask); | |
984e9cf1 | 133 | static ssize_t __maybe_unused cci_pmu_format_show(struct device *dev, |
3de6be7a | 134 | struct device_attribute *attr, char *buf); |
984e9cf1 | 135 | static ssize_t __maybe_unused cci_pmu_event_show(struct device *dev, |
3de6be7a RM |
136 | struct device_attribute *attr, char *buf); |
137 | ||
138 | #define CCI_EXT_ATTR_ENTRY(_name, _func, _config) \ | |
139 | &((struct dev_ext_attribute[]) { \ | |
140 | { __ATTR(_name, S_IRUGO, _func, NULL), (void *)_config } \ | |
141 | })[0].attr.attr | |
142 | ||
143 | #define CCI_FORMAT_EXT_ATTR_ENTRY(_name, _config) \ | |
144 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_format_show, (char *)_config) | |
145 | #define CCI_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
146 | CCI_EXT_ATTR_ENTRY(_name, cci_pmu_event_show, (unsigned long)_config) | |
147 | ||
148 | /* CCI400 PMU Specific definitions */ | |
149 | ||
150 | #ifdef CONFIG_ARM_CCI400_PMU | |
151 | ||
152 | /* Port ids */ | |
153 | #define CCI400_PORT_S0 0 | |
154 | #define CCI400_PORT_S1 1 | |
155 | #define CCI400_PORT_S2 2 | |
156 | #define CCI400_PORT_S3 3 | |
157 | #define CCI400_PORT_S4 4 | |
158 | #define CCI400_PORT_M0 5 | |
159 | #define CCI400_PORT_M1 6 | |
160 | #define CCI400_PORT_M2 7 | |
161 | ||
162 | #define CCI400_R1_PX 5 | |
163 | ||
164 | /* | |
165 | * Instead of an event id to monitor CCI cycles, a dedicated counter is | |
166 | * provided. Use 0xff to represent CCI cycles and hope that no future revisions | |
167 | * make use of this event in hardware. | |
168 | */ | |
169 | enum cci400_perf_events { | |
170 | CCI400_PMU_CYCLES = 0xff | |
171 | }; | |
172 | ||
173 | #define CCI400_PMU_CYCLE_CNTR_IDX 0 | |
174 | #define CCI400_PMU_CNTR0_IDX 1 | |
175 | ||
176 | /* | |
177 | * CCI PMU event id is an 8-bit value made of two parts - bits 7:5 for one of 8 | |
178 | * ports and bits 4:0 are event codes. There are different event codes | |
179 | * associated with each port type. | |
180 | * | |
181 | * Additionally, the range of events associated with the port types changed | |
182 | * between Rev0 and Rev1. | |
183 | * | |
184 | * The constants below define the range of valid codes for each port type for | |
185 | * the different revisions and are used to validate the event to be monitored. | |
186 | */ | |
187 | ||
188 | #define CCI400_PMU_EVENT_MASK 0xffUL | |
189 | #define CCI400_PMU_EVENT_SOURCE_SHIFT 5 | |
190 | #define CCI400_PMU_EVENT_SOURCE_MASK 0x7 | |
191 | #define CCI400_PMU_EVENT_CODE_SHIFT 0 | |
192 | #define CCI400_PMU_EVENT_CODE_MASK 0x1f | |
193 | #define CCI400_PMU_EVENT_SOURCE(event) \ | |
194 | ((event >> CCI400_PMU_EVENT_SOURCE_SHIFT) & \ | |
195 | CCI400_PMU_EVENT_SOURCE_MASK) | |
196 | #define CCI400_PMU_EVENT_CODE(event) \ | |
197 | ((event >> CCI400_PMU_EVENT_CODE_SHIFT) & CCI400_PMU_EVENT_CODE_MASK) | |
198 | ||
199 | #define CCI400_R0_SLAVE_PORT_MIN_EV 0x00 | |
200 | #define CCI400_R0_SLAVE_PORT_MAX_EV 0x13 | |
201 | #define CCI400_R0_MASTER_PORT_MIN_EV 0x14 | |
202 | #define CCI400_R0_MASTER_PORT_MAX_EV 0x1a | |
203 | ||
204 | #define CCI400_R1_SLAVE_PORT_MIN_EV 0x00 | |
205 | #define CCI400_R1_SLAVE_PORT_MAX_EV 0x14 | |
206 | #define CCI400_R1_MASTER_PORT_MIN_EV 0x00 | |
207 | #define CCI400_R1_MASTER_PORT_MAX_EV 0x11 | |
208 | ||
209 | #define CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
210 | CCI_EXT_ATTR_ENTRY(_name, cci400_pmu_cycle_event_show, \ | |
211 | (unsigned long)_config) | |
212 | ||
213 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
214 | struct device_attribute *attr, char *buf); | |
215 | ||
216 | static struct attribute *cci400_pmu_format_attrs[] = { | |
217 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
218 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-7"), | |
219 | NULL | |
220 | }; | |
221 | ||
222 | static struct attribute *cci400_r0_pmu_event_attrs[] = { | |
223 | /* Slave events */ | |
224 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
225 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
226 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
227 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
228 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
229 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
230 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
231 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
232 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
233 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
234 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
235 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
236 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
237 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
238 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
239 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
240 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
241 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
242 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
243 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
244 | /* Master events */ | |
245 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x14), | |
246 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_addr_hazard, 0x15), | |
247 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_id_hazard, 0x16), | |
248 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_tt_full, 0x17), | |
249 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x18), | |
250 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x19), | |
251 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_tt_full, 0x1A), | |
252 | /* Special event for cycles counter */ | |
253 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
254 | NULL | |
255 | }; | |
256 | ||
257 | static struct attribute *cci400_r1_pmu_event_attrs[] = { | |
258 | /* Slave events */ | |
259 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_any, 0x0), | |
260 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_device, 0x01), | |
261 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_normal_or_nonshareable, 0x2), | |
262 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_inner_or_outershareable, 0x3), | |
263 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maintenance, 0x4), | |
264 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_mem_barrier, 0x5), | |
265 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_sync_barrier, 0x6), | |
266 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
267 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg_sync, 0x8), | |
268 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_tt_full, 0x9), | |
269 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_last_hs_snoop, 0xA), | |
270 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall_rvalids_h_rready_l, 0xB), | |
271 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_any, 0xC), | |
272 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_device, 0xD), | |
273 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_normal_or_nonshareable, 0xE), | |
274 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_inner_or_outershare_wback_wclean, 0xF), | |
275 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_unique, 0x10), | |
276 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_write_line_unique, 0x11), | |
277 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_evict, 0x12), | |
278 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall_tt_full, 0x13), | |
279 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_slave_id_hazard, 0x14), | |
280 | /* Master events */ | |
281 | CCI_EVENT_EXT_ATTR_ENTRY(mi_retry_speculative_fetch, 0x0), | |
282 | CCI_EVENT_EXT_ATTR_ENTRY(mi_stall_cycle_addr_hazard, 0x1), | |
283 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_master_id_hazard, 0x2), | |
284 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_hi_prio_rtq_full, 0x3), | |
285 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_barrier_hazard, 0x4), | |
286 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_barrier_hazard, 0x5), | |
287 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_wtq_full, 0x6), | |
288 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_low_prio_rtq_full, 0x7), | |
289 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_mid_prio_rtq_full, 0x8), | |
290 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn0, 0x9), | |
291 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn1, 0xA), | |
292 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn2, 0xB), | |
293 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall_qvn_vn3, 0xC), | |
294 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn0, 0xD), | |
295 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn1, 0xE), | |
296 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn2, 0xF), | |
297 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall_qvn_vn3, 0x10), | |
298 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_unique_or_line_unique_addr_hazard, 0x11), | |
299 | /* Special event for cycles counter */ | |
300 | CCI400_CYCLE_EVENT_EXT_ATTR_ENTRY(cycles, 0xff), | |
301 | NULL | |
302 | }; | |
303 | ||
304 | static ssize_t cci400_pmu_cycle_event_show(struct device *dev, | |
305 | struct device_attribute *attr, char *buf) | |
306 | { | |
307 | struct dev_ext_attribute *eattr = container_of(attr, | |
308 | struct dev_ext_attribute, attr); | |
309 | return snprintf(buf, PAGE_SIZE, "config=0x%lx\n", (unsigned long)eattr->var); | |
310 | } | |
311 | ||
312 | static int cci400_get_event_idx(struct cci_pmu *cci_pmu, | |
313 | struct cci_pmu_hw_events *hw, | |
314 | unsigned long cci_event) | |
315 | { | |
316 | int idx; | |
317 | ||
318 | /* cycles event idx is fixed */ | |
319 | if (cci_event == CCI400_PMU_CYCLES) { | |
320 | if (test_and_set_bit(CCI400_PMU_CYCLE_CNTR_IDX, hw->used_mask)) | |
321 | return -EAGAIN; | |
322 | ||
323 | return CCI400_PMU_CYCLE_CNTR_IDX; | |
324 | } | |
325 | ||
326 | for (idx = CCI400_PMU_CNTR0_IDX; idx <= CCI_PMU_CNTR_LAST(cci_pmu); ++idx) | |
327 | if (!test_and_set_bit(idx, hw->used_mask)) | |
328 | return idx; | |
329 | ||
330 | /* No counters available */ | |
331 | return -EAGAIN; | |
332 | } | |
333 | ||
334 | static int cci400_validate_hw_event(struct cci_pmu *cci_pmu, unsigned long hw_event) | |
335 | { | |
336 | u8 ev_source = CCI400_PMU_EVENT_SOURCE(hw_event); | |
337 | u8 ev_code = CCI400_PMU_EVENT_CODE(hw_event); | |
338 | int if_type; | |
339 | ||
340 | if (hw_event & ~CCI400_PMU_EVENT_MASK) | |
341 | return -ENOENT; | |
342 | ||
343 | if (hw_event == CCI400_PMU_CYCLES) | |
344 | return hw_event; | |
345 | ||
346 | switch (ev_source) { | |
347 | case CCI400_PORT_S0: | |
348 | case CCI400_PORT_S1: | |
349 | case CCI400_PORT_S2: | |
350 | case CCI400_PORT_S3: | |
351 | case CCI400_PORT_S4: | |
352 | /* Slave Interface */ | |
353 | if_type = CCI_IF_SLAVE; | |
354 | break; | |
355 | case CCI400_PORT_M0: | |
356 | case CCI400_PORT_M1: | |
357 | case CCI400_PORT_M2: | |
358 | /* Master Interface */ | |
359 | if_type = CCI_IF_MASTER; | |
360 | break; | |
361 | default: | |
362 | return -ENOENT; | |
363 | } | |
364 | ||
365 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
366 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
367 | return hw_event; | |
368 | ||
369 | return -ENOENT; | |
370 | } | |
371 | ||
e9c112c9 | 372 | static int probe_cci400_revision(struct cci_pmu *cci_pmu) |
3de6be7a RM |
373 | { |
374 | int rev; | |
e9c112c9 | 375 | rev = readl_relaxed(cci_pmu->ctrl_base + CCI_PID2) & CCI_PID2_REV_MASK; |
3de6be7a RM |
376 | rev >>= CCI_PID2_REV_SHIFT; |
377 | ||
378 | if (rev < CCI400_R1_PX) | |
379 | return CCI400_R0; | |
380 | else | |
381 | return CCI400_R1; | |
382 | } | |
383 | ||
e9c112c9 | 384 | static const struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) |
3de6be7a RM |
385 | { |
386 | if (platform_has_secure_cci_access()) | |
e9c112c9 | 387 | return &cci_pmu_models[probe_cci400_revision(cci_pmu)]; |
3de6be7a RM |
388 | return NULL; |
389 | } | |
390 | #else /* !CONFIG_ARM_CCI400_PMU */ | |
e9c112c9 | 391 | static inline struct cci_pmu_model *probe_cci_model(struct cci_pmu *cci_pmu) |
3de6be7a RM |
392 | { |
393 | return NULL; | |
394 | } | |
395 | #endif /* CONFIG_ARM_CCI400_PMU */ | |
396 | ||
397 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
398 | ||
399 | /* | |
400 | * CCI5xx PMU event id is an 9-bit value made of two parts. | |
401 | * bits [8:5] - Source for the event | |
402 | * bits [4:0] - Event code (specific to type of interface) | |
403 | * | |
404 | * | |
405 | */ | |
406 | ||
407 | /* Port ids */ | |
408 | #define CCI5xx_PORT_S0 0x0 | |
409 | #define CCI5xx_PORT_S1 0x1 | |
410 | #define CCI5xx_PORT_S2 0x2 | |
411 | #define CCI5xx_PORT_S3 0x3 | |
412 | #define CCI5xx_PORT_S4 0x4 | |
413 | #define CCI5xx_PORT_S5 0x5 | |
414 | #define CCI5xx_PORT_S6 0x6 | |
415 | ||
416 | #define CCI5xx_PORT_M0 0x8 | |
417 | #define CCI5xx_PORT_M1 0x9 | |
418 | #define CCI5xx_PORT_M2 0xa | |
419 | #define CCI5xx_PORT_M3 0xb | |
420 | #define CCI5xx_PORT_M4 0xc | |
421 | #define CCI5xx_PORT_M5 0xd | |
422 | #define CCI5xx_PORT_M6 0xe | |
423 | ||
424 | #define CCI5xx_PORT_GLOBAL 0xf | |
425 | ||
426 | #define CCI5xx_PMU_EVENT_MASK 0x1ffUL | |
427 | #define CCI5xx_PMU_EVENT_SOURCE_SHIFT 0x5 | |
428 | #define CCI5xx_PMU_EVENT_SOURCE_MASK 0xf | |
429 | #define CCI5xx_PMU_EVENT_CODE_SHIFT 0x0 | |
430 | #define CCI5xx_PMU_EVENT_CODE_MASK 0x1f | |
431 | ||
432 | #define CCI5xx_PMU_EVENT_SOURCE(event) \ | |
433 | ((event >> CCI5xx_PMU_EVENT_SOURCE_SHIFT) & CCI5xx_PMU_EVENT_SOURCE_MASK) | |
434 | #define CCI5xx_PMU_EVENT_CODE(event) \ | |
435 | ((event >> CCI5xx_PMU_EVENT_CODE_SHIFT) & CCI5xx_PMU_EVENT_CODE_MASK) | |
436 | ||
437 | #define CCI5xx_SLAVE_PORT_MIN_EV 0x00 | |
438 | #define CCI5xx_SLAVE_PORT_MAX_EV 0x1f | |
439 | #define CCI5xx_MASTER_PORT_MIN_EV 0x00 | |
440 | #define CCI5xx_MASTER_PORT_MAX_EV 0x06 | |
441 | #define CCI5xx_GLOBAL_PORT_MIN_EV 0x00 | |
442 | #define CCI5xx_GLOBAL_PORT_MAX_EV 0x0f | |
443 | ||
444 | ||
445 | #define CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(_name, _config) \ | |
446 | CCI_EXT_ATTR_ENTRY(_name, cci5xx_pmu_global_event_show, \ | |
447 | (unsigned long) _config) | |
448 | ||
449 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
450 | struct device_attribute *attr, char *buf); | |
451 | ||
452 | static struct attribute *cci5xx_pmu_format_attrs[] = { | |
453 | CCI_FORMAT_EXT_ATTR_ENTRY(event, "config:0-4"), | |
454 | CCI_FORMAT_EXT_ATTR_ENTRY(source, "config:5-8"), | |
455 | NULL, | |
456 | }; | |
457 | ||
458 | static struct attribute *cci5xx_pmu_event_attrs[] = { | |
459 | /* Slave events */ | |
460 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_arvalid, 0x0), | |
461 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_dev, 0x1), | |
462 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_nonshareable, 0x2), | |
463 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_non_alloc, 0x3), | |
464 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_shareable_alloc, 0x4), | |
465 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_invalidate, 0x5), | |
466 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_cache_maint, 0x6), | |
467 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_dvm_msg, 0x7), | |
468 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rval, 0x8), | |
469 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_hs_rlast_snoop, 0x9), | |
470 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_hs_awalid, 0xA), | |
471 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_dev, 0xB), | |
472 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_non_shareable, 0xC), | |
473 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wb, 0xD), | |
474 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wlu, 0xE), | |
475 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_share_wunique, 0xF), | |
476 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_evict, 0x10), | |
477 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_wrevict, 0x11), | |
478 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_beat, 0x12), | |
479 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_acvalid, 0x13), | |
480 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_read, 0x14), | |
481 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_clean, 0x15), | |
482 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_data_transfer_low, 0x16), | |
483 | CCI_EVENT_EXT_ATTR_ENTRY(si_rrq_stall_arvalid, 0x17), | |
484 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_data_stall, 0x18), | |
485 | CCI_EVENT_EXT_ATTR_ENTRY(si_wrq_stall, 0x19), | |
486 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_data_stall, 0x1A), | |
487 | CCI_EVENT_EXT_ATTR_ENTRY(si_w_resp_stall, 0x1B), | |
488 | CCI_EVENT_EXT_ATTR_ENTRY(si_srq_stall, 0x1C), | |
489 | CCI_EVENT_EXT_ATTR_ENTRY(si_s_data_stall, 0x1D), | |
490 | CCI_EVENT_EXT_ATTR_ENTRY(si_rq_stall_ot_limit, 0x1E), | |
491 | CCI_EVENT_EXT_ATTR_ENTRY(si_r_stall_arbit, 0x1F), | |
492 | ||
493 | /* Master events */ | |
494 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_beat_any, 0x0), | |
495 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_beat_any, 0x1), | |
496 | CCI_EVENT_EXT_ATTR_ENTRY(mi_rrq_stall, 0x2), | |
497 | CCI_EVENT_EXT_ATTR_ENTRY(mi_r_data_stall, 0x3), | |
498 | CCI_EVENT_EXT_ATTR_ENTRY(mi_wrq_stall, 0x4), | |
499 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_data_stall, 0x5), | |
500 | CCI_EVENT_EXT_ATTR_ENTRY(mi_w_resp_stall, 0x6), | |
501 | ||
502 | /* Global events */ | |
503 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_0_1, 0x0), | |
504 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_2_3, 0x1), | |
505 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_4_5, 0x2), | |
506 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_filter_bank_6_7, 0x3), | |
507 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_0_1, 0x4), | |
508 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_2_3, 0x5), | |
509 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_4_5, 0x6), | |
510 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_access_miss_filter_bank_6_7, 0x7), | |
511 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_back_invalidation, 0x8), | |
512 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_alloc_busy, 0x9), | |
513 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_stall_tt_full, 0xA), | |
514 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_wrq, 0xB), | |
515 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_cd_hs, 0xC), | |
516 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_rq_stall_addr_hazard, 0xD), | |
517 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_stall_tt_full, 0xE), | |
518 | CCI5xx_GLOBAL_EVENT_EXT_ATTR_ENTRY(cci_snoop_rq_tzmp1_prot, 0xF), | |
519 | NULL | |
520 | }; | |
521 | ||
522 | static ssize_t cci5xx_pmu_global_event_show(struct device *dev, | |
523 | struct device_attribute *attr, char *buf) | |
524 | { | |
525 | struct dev_ext_attribute *eattr = container_of(attr, | |
526 | struct dev_ext_attribute, attr); | |
527 | /* Global events have single fixed source code */ | |
528 | return snprintf(buf, PAGE_SIZE, "event=0x%lx,source=0x%x\n", | |
529 | (unsigned long)eattr->var, CCI5xx_PORT_GLOBAL); | |
530 | } | |
531 | ||
532 | /* | |
533 | * CCI500 provides 8 independent event counters that can count | |
534 | * any of the events available. | |
535 | * CCI500 PMU event source ids | |
536 | * 0x0-0x6 - Slave interfaces | |
537 | * 0x8-0xD - Master interfaces | |
538 | * 0xf - Global Events | |
539 | * 0x7,0xe - Reserved | |
540 | */ | |
541 | static int cci500_validate_hw_event(struct cci_pmu *cci_pmu, | |
542 | unsigned long hw_event) | |
543 | { | |
544 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
545 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
546 | int if_type; | |
547 | ||
548 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
549 | return -ENOENT; | |
550 | ||
551 | switch (ev_source) { | |
552 | case CCI5xx_PORT_S0: | |
553 | case CCI5xx_PORT_S1: | |
554 | case CCI5xx_PORT_S2: | |
555 | case CCI5xx_PORT_S3: | |
556 | case CCI5xx_PORT_S4: | |
557 | case CCI5xx_PORT_S5: | |
558 | case CCI5xx_PORT_S6: | |
559 | if_type = CCI_IF_SLAVE; | |
560 | break; | |
561 | case CCI5xx_PORT_M0: | |
562 | case CCI5xx_PORT_M1: | |
563 | case CCI5xx_PORT_M2: | |
564 | case CCI5xx_PORT_M3: | |
565 | case CCI5xx_PORT_M4: | |
566 | case CCI5xx_PORT_M5: | |
567 | if_type = CCI_IF_MASTER; | |
568 | break; | |
569 | case CCI5xx_PORT_GLOBAL: | |
570 | if_type = CCI_IF_GLOBAL; | |
571 | break; | |
572 | default: | |
573 | return -ENOENT; | |
574 | } | |
575 | ||
576 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
577 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
578 | return hw_event; | |
579 | ||
580 | return -ENOENT; | |
581 | } | |
582 | ||
583 | /* | |
584 | * CCI550 provides 8 independent event counters that can count | |
585 | * any of the events available. | |
586 | * CCI550 PMU event source ids | |
587 | * 0x0-0x6 - Slave interfaces | |
588 | * 0x8-0xe - Master interfaces | |
589 | * 0xf - Global Events | |
590 | * 0x7 - Reserved | |
591 | */ | |
592 | static int cci550_validate_hw_event(struct cci_pmu *cci_pmu, | |
593 | unsigned long hw_event) | |
594 | { | |
595 | u32 ev_source = CCI5xx_PMU_EVENT_SOURCE(hw_event); | |
596 | u32 ev_code = CCI5xx_PMU_EVENT_CODE(hw_event); | |
597 | int if_type; | |
598 | ||
599 | if (hw_event & ~CCI5xx_PMU_EVENT_MASK) | |
600 | return -ENOENT; | |
601 | ||
602 | switch (ev_source) { | |
603 | case CCI5xx_PORT_S0: | |
604 | case CCI5xx_PORT_S1: | |
605 | case CCI5xx_PORT_S2: | |
606 | case CCI5xx_PORT_S3: | |
607 | case CCI5xx_PORT_S4: | |
608 | case CCI5xx_PORT_S5: | |
609 | case CCI5xx_PORT_S6: | |
610 | if_type = CCI_IF_SLAVE; | |
611 | break; | |
612 | case CCI5xx_PORT_M0: | |
613 | case CCI5xx_PORT_M1: | |
614 | case CCI5xx_PORT_M2: | |
615 | case CCI5xx_PORT_M3: | |
616 | case CCI5xx_PORT_M4: | |
617 | case CCI5xx_PORT_M5: | |
618 | case CCI5xx_PORT_M6: | |
619 | if_type = CCI_IF_MASTER; | |
620 | break; | |
621 | case CCI5xx_PORT_GLOBAL: | |
622 | if_type = CCI_IF_GLOBAL; | |
623 | break; | |
624 | default: | |
625 | return -ENOENT; | |
626 | } | |
627 | ||
628 | if (ev_code >= cci_pmu->model->event_ranges[if_type].min && | |
629 | ev_code <= cci_pmu->model->event_ranges[if_type].max) | |
630 | return hw_event; | |
631 | ||
632 | return -ENOENT; | |
633 | } | |
634 | ||
635 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
636 | ||
637 | /* | |
638 | * Program the CCI PMU counters which have PERF_HES_ARCH set | |
639 | * with the event period and mark them ready before we enable | |
640 | * PMU. | |
641 | */ | |
642 | static void cci_pmu_sync_counters(struct cci_pmu *cci_pmu) | |
643 | { | |
644 | int i; | |
645 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
1201a5a2 | 646 | DECLARE_BITMAP(mask, HW_CNTRS_MAX); |
3de6be7a RM |
647 | |
648 | bitmap_zero(mask, cci_pmu->num_cntrs); | |
649 | for_each_set_bit(i, cci_pmu->hw_events.used_mask, cci_pmu->num_cntrs) { | |
650 | struct perf_event *event = cci_hw->events[i]; | |
651 | ||
652 | if (WARN_ON(!event)) | |
653 | continue; | |
654 | ||
655 | /* Leave the events which are not counting */ | |
656 | if (event->hw.state & PERF_HES_STOPPED) | |
657 | continue; | |
658 | if (event->hw.state & PERF_HES_ARCH) { | |
659 | set_bit(i, mask); | |
660 | event->hw.state &= ~PERF_HES_ARCH; | |
661 | } | |
662 | } | |
663 | ||
664 | pmu_write_counters(cci_pmu, mask); | |
665 | } | |
666 | ||
667 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
668 | static void __cci_pmu_enable_nosync(struct cci_pmu *cci_pmu) | |
669 | { | |
670 | u32 val; | |
671 | ||
672 | /* Enable all the PMU counters. */ | |
e9c112c9 RM |
673 | val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) | CCI_PMCR_CEN; |
674 | writel(val, cci_pmu->ctrl_base + CCI_PMCR); | |
3de6be7a RM |
675 | } |
676 | ||
677 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
678 | static void __cci_pmu_enable_sync(struct cci_pmu *cci_pmu) | |
679 | { | |
680 | cci_pmu_sync_counters(cci_pmu); | |
681 | __cci_pmu_enable_nosync(cci_pmu); | |
682 | } | |
683 | ||
684 | /* Should be called with cci_pmu->hw_events->pmu_lock held */ | |
e9c112c9 | 685 | static void __cci_pmu_disable(struct cci_pmu *cci_pmu) |
3de6be7a RM |
686 | { |
687 | u32 val; | |
688 | ||
689 | /* Disable all the PMU counters. */ | |
e9c112c9 RM |
690 | val = readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & ~CCI_PMCR_CEN; |
691 | writel(val, cci_pmu->ctrl_base + CCI_PMCR); | |
3de6be7a RM |
692 | } |
693 | ||
694 | static ssize_t cci_pmu_format_show(struct device *dev, | |
695 | struct device_attribute *attr, char *buf) | |
696 | { | |
697 | struct dev_ext_attribute *eattr = container_of(attr, | |
698 | struct dev_ext_attribute, attr); | |
699 | return snprintf(buf, PAGE_SIZE, "%s\n", (char *)eattr->var); | |
700 | } | |
701 | ||
702 | static ssize_t cci_pmu_event_show(struct device *dev, | |
703 | struct device_attribute *attr, char *buf) | |
704 | { | |
705 | struct dev_ext_attribute *eattr = container_of(attr, | |
706 | struct dev_ext_attribute, attr); | |
707 | /* source parameter is mandatory for normal PMU events */ | |
708 | return snprintf(buf, PAGE_SIZE, "source=?,event=0x%lx\n", | |
709 | (unsigned long)eattr->var); | |
710 | } | |
711 | ||
712 | static int pmu_is_valid_counter(struct cci_pmu *cci_pmu, int idx) | |
713 | { | |
714 | return 0 <= idx && idx <= CCI_PMU_CNTR_LAST(cci_pmu); | |
715 | } | |
716 | ||
717 | static u32 pmu_read_register(struct cci_pmu *cci_pmu, int idx, unsigned int offset) | |
718 | { | |
719 | return readl_relaxed(cci_pmu->base + | |
720 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
721 | } | |
722 | ||
723 | static void pmu_write_register(struct cci_pmu *cci_pmu, u32 value, | |
724 | int idx, unsigned int offset) | |
725 | { | |
726 | writel_relaxed(value, cci_pmu->base + | |
727 | CCI_PMU_CNTR_BASE(cci_pmu->model, idx) + offset); | |
728 | } | |
729 | ||
730 | static void pmu_disable_counter(struct cci_pmu *cci_pmu, int idx) | |
731 | { | |
732 | pmu_write_register(cci_pmu, 0, idx, CCI_PMU_CNTR_CTRL); | |
733 | } | |
734 | ||
735 | static void pmu_enable_counter(struct cci_pmu *cci_pmu, int idx) | |
736 | { | |
737 | pmu_write_register(cci_pmu, 1, idx, CCI_PMU_CNTR_CTRL); | |
738 | } | |
739 | ||
740 | static bool __maybe_unused | |
741 | pmu_counter_is_enabled(struct cci_pmu *cci_pmu, int idx) | |
742 | { | |
743 | return (pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR_CTRL) & 0x1) != 0; | |
744 | } | |
745 | ||
746 | static void pmu_set_event(struct cci_pmu *cci_pmu, int idx, unsigned long event) | |
747 | { | |
748 | pmu_write_register(cci_pmu, event, idx, CCI_PMU_EVT_SEL); | |
749 | } | |
750 | ||
751 | /* | |
752 | * For all counters on the CCI-PMU, disable any 'enabled' counters, | |
753 | * saving the changed counters in the mask, so that we can restore | |
754 | * it later using pmu_restore_counters. The mask is private to the | |
755 | * caller. We cannot rely on the used_mask maintained by the CCI_PMU | |
756 | * as it only tells us if the counter is assigned to perf_event or not. | |
757 | * The state of the perf_event cannot be locked by the PMU layer, hence | |
758 | * we check the individual counter status (which can be locked by | |
759 | * cci_pm->hw_events->pmu_lock). | |
760 | * | |
761 | * @mask should be initialised to empty by the caller. | |
762 | */ | |
763 | static void __maybe_unused | |
764 | pmu_save_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
765 | { | |
766 | int i; | |
767 | ||
768 | for (i = 0; i < cci_pmu->num_cntrs; i++) { | |
769 | if (pmu_counter_is_enabled(cci_pmu, i)) { | |
770 | set_bit(i, mask); | |
771 | pmu_disable_counter(cci_pmu, i); | |
772 | } | |
773 | } | |
774 | } | |
775 | ||
776 | /* | |
777 | * Restore the status of the counters. Reversal of the pmu_save_counters(). | |
778 | * For each counter set in the mask, enable the counter back. | |
779 | */ | |
780 | static void __maybe_unused | |
781 | pmu_restore_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
782 | { | |
783 | int i; | |
784 | ||
785 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) | |
786 | pmu_enable_counter(cci_pmu, i); | |
787 | } | |
788 | ||
789 | /* | |
790 | * Returns the number of programmable counters actually implemented | |
791 | * by the cci | |
792 | */ | |
e9c112c9 | 793 | static u32 pmu_get_max_counters(struct cci_pmu *cci_pmu) |
3de6be7a | 794 | { |
e9c112c9 | 795 | return (readl_relaxed(cci_pmu->ctrl_base + CCI_PMCR) & |
3de6be7a RM |
796 | CCI_PMCR_NCNT_MASK) >> CCI_PMCR_NCNT_SHIFT; |
797 | } | |
798 | ||
799 | static int pmu_get_event_idx(struct cci_pmu_hw_events *hw, struct perf_event *event) | |
800 | { | |
801 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
802 | unsigned long cci_event = event->hw.config_base; | |
803 | int idx; | |
804 | ||
805 | if (cci_pmu->model->get_event_idx) | |
806 | return cci_pmu->model->get_event_idx(cci_pmu, hw, cci_event); | |
807 | ||
808 | /* Generic code to find an unused idx from the mask */ | |
809 | for(idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) | |
810 | if (!test_and_set_bit(idx, hw->used_mask)) | |
811 | return idx; | |
812 | ||
813 | /* No counters available */ | |
814 | return -EAGAIN; | |
815 | } | |
816 | ||
817 | static int pmu_map_event(struct perf_event *event) | |
818 | { | |
819 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
820 | ||
821 | if (event->attr.type < PERF_TYPE_MAX || | |
822 | !cci_pmu->model->validate_hw_event) | |
823 | return -ENOENT; | |
824 | ||
825 | return cci_pmu->model->validate_hw_event(cci_pmu, event->attr.config); | |
826 | } | |
827 | ||
828 | static int pmu_request_irq(struct cci_pmu *cci_pmu, irq_handler_t handler) | |
829 | { | |
830 | int i; | |
831 | struct platform_device *pmu_device = cci_pmu->plat_device; | |
832 | ||
833 | if (unlikely(!pmu_device)) | |
834 | return -ENODEV; | |
835 | ||
836 | if (cci_pmu->nr_irqs < 1) { | |
837 | dev_err(&pmu_device->dev, "no irqs for CCI PMUs defined\n"); | |
838 | return -ENODEV; | |
839 | } | |
840 | ||
841 | /* | |
842 | * Register all available CCI PMU interrupts. In the interrupt handler | |
843 | * we iterate over the counters checking for interrupt source (the | |
844 | * overflowing counter) and clear it. | |
845 | * | |
846 | * This should allow handling of non-unique interrupt for the counters. | |
847 | */ | |
848 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
849 | int err = request_irq(cci_pmu->irqs[i], handler, IRQF_SHARED, | |
850 | "arm-cci-pmu", cci_pmu); | |
851 | if (err) { | |
852 | dev_err(&pmu_device->dev, "unable to request IRQ%d for ARM CCI PMU counters\n", | |
853 | cci_pmu->irqs[i]); | |
854 | return err; | |
855 | } | |
856 | ||
857 | set_bit(i, &cci_pmu->active_irqs); | |
858 | } | |
859 | ||
860 | return 0; | |
861 | } | |
862 | ||
863 | static void pmu_free_irq(struct cci_pmu *cci_pmu) | |
864 | { | |
865 | int i; | |
866 | ||
867 | for (i = 0; i < cci_pmu->nr_irqs; i++) { | |
868 | if (!test_and_clear_bit(i, &cci_pmu->active_irqs)) | |
869 | continue; | |
870 | ||
871 | free_irq(cci_pmu->irqs[i], cci_pmu); | |
872 | } | |
873 | } | |
874 | ||
875 | static u32 pmu_read_counter(struct perf_event *event) | |
876 | { | |
877 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
878 | struct hw_perf_event *hw_counter = &event->hw; | |
879 | int idx = hw_counter->idx; | |
880 | u32 value; | |
881 | ||
882 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
883 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
884 | return 0; | |
885 | } | |
886 | value = pmu_read_register(cci_pmu, idx, CCI_PMU_CNTR); | |
887 | ||
888 | return value; | |
889 | } | |
890 | ||
891 | static void pmu_write_counter(struct cci_pmu *cci_pmu, u32 value, int idx) | |
892 | { | |
893 | pmu_write_register(cci_pmu, value, idx, CCI_PMU_CNTR); | |
894 | } | |
895 | ||
896 | static void __pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
897 | { | |
898 | int i; | |
899 | struct cci_pmu_hw_events *cci_hw = &cci_pmu->hw_events; | |
900 | ||
901 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
902 | struct perf_event *event = cci_hw->events[i]; | |
903 | ||
904 | if (WARN_ON(!event)) | |
905 | continue; | |
906 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
907 | } | |
908 | } | |
909 | ||
910 | static void pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
911 | { | |
912 | if (cci_pmu->model->write_counters) | |
913 | cci_pmu->model->write_counters(cci_pmu, mask); | |
914 | else | |
915 | __pmu_write_counters(cci_pmu, mask); | |
916 | } | |
917 | ||
918 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
919 | ||
920 | /* | |
921 | * CCI-500/CCI-550 has advanced power saving policies, which could gate the | |
922 | * clocks to the PMU counters, which makes the writes to them ineffective. | |
923 | * The only way to write to those counters is when the global counters | |
924 | * are enabled and the particular counter is enabled. | |
925 | * | |
926 | * So we do the following : | |
927 | * | |
928 | * 1) Disable all the PMU counters, saving their current state | |
929 | * 2) Enable the global PMU profiling, now that all counters are | |
930 | * disabled. | |
931 | * | |
932 | * For each counter to be programmed, repeat steps 3-7: | |
933 | * | |
934 | * 3) Write an invalid event code to the event control register for the | |
935 | counter, so that the counters are not modified. | |
936 | * 4) Enable the counter control for the counter. | |
937 | * 5) Set the counter value | |
938 | * 6) Disable the counter | |
939 | * 7) Restore the event in the target counter | |
940 | * | |
941 | * 8) Disable the global PMU. | |
942 | * 9) Restore the status of the rest of the counters. | |
943 | * | |
944 | * We choose an event which for CCI-5xx is guaranteed not to count. | |
945 | * We use the highest possible event code (0x1f) for the master interface 0. | |
946 | */ | |
947 | #define CCI5xx_INVALID_EVENT ((CCI5xx_PORT_M0 << CCI5xx_PMU_EVENT_SOURCE_SHIFT) | \ | |
948 | (CCI5xx_PMU_EVENT_CODE_MASK << CCI5xx_PMU_EVENT_CODE_SHIFT)) | |
949 | static void cci5xx_pmu_write_counters(struct cci_pmu *cci_pmu, unsigned long *mask) | |
950 | { | |
951 | int i; | |
1201a5a2 | 952 | DECLARE_BITMAP(saved_mask, HW_CNTRS_MAX); |
3de6be7a RM |
953 | |
954 | bitmap_zero(saved_mask, cci_pmu->num_cntrs); | |
955 | pmu_save_counters(cci_pmu, saved_mask); | |
956 | ||
957 | /* | |
958 | * Now that all the counters are disabled, we can safely turn the PMU on, | |
959 | * without syncing the status of the counters | |
960 | */ | |
961 | __cci_pmu_enable_nosync(cci_pmu); | |
962 | ||
963 | for_each_set_bit(i, mask, cci_pmu->num_cntrs) { | |
964 | struct perf_event *event = cci_pmu->hw_events.events[i]; | |
965 | ||
966 | if (WARN_ON(!event)) | |
967 | continue; | |
968 | ||
969 | pmu_set_event(cci_pmu, i, CCI5xx_INVALID_EVENT); | |
970 | pmu_enable_counter(cci_pmu, i); | |
971 | pmu_write_counter(cci_pmu, local64_read(&event->hw.prev_count), i); | |
972 | pmu_disable_counter(cci_pmu, i); | |
973 | pmu_set_event(cci_pmu, i, event->hw.config_base); | |
974 | } | |
975 | ||
e9c112c9 | 976 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
977 | |
978 | pmu_restore_counters(cci_pmu, saved_mask); | |
979 | } | |
980 | ||
981 | #endif /* CONFIG_ARM_CCI5xx_PMU */ | |
982 | ||
983 | static u64 pmu_event_update(struct perf_event *event) | |
984 | { | |
985 | struct hw_perf_event *hwc = &event->hw; | |
986 | u64 delta, prev_raw_count, new_raw_count; | |
987 | ||
988 | do { | |
989 | prev_raw_count = local64_read(&hwc->prev_count); | |
990 | new_raw_count = pmu_read_counter(event); | |
991 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
992 | new_raw_count) != prev_raw_count); | |
993 | ||
994 | delta = (new_raw_count - prev_raw_count) & CCI_PMU_CNTR_MASK; | |
995 | ||
996 | local64_add(delta, &event->count); | |
997 | ||
998 | return new_raw_count; | |
999 | } | |
1000 | ||
1001 | static void pmu_read(struct perf_event *event) | |
1002 | { | |
1003 | pmu_event_update(event); | |
1004 | } | |
1005 | ||
1006 | static void pmu_event_set_period(struct perf_event *event) | |
1007 | { | |
1008 | struct hw_perf_event *hwc = &event->hw; | |
1009 | /* | |
1010 | * The CCI PMU counters have a period of 2^32. To account for the | |
1011 | * possiblity of extreme interrupt latency we program for a period of | |
1012 | * half that. Hopefully we can handle the interrupt before another 2^31 | |
1013 | * events occur and the counter overtakes its previous value. | |
1014 | */ | |
1015 | u64 val = 1ULL << 31; | |
1016 | local64_set(&hwc->prev_count, val); | |
1017 | ||
1018 | /* | |
1019 | * CCI PMU uses PERF_HES_ARCH to keep track of the counters, whose | |
1020 | * values needs to be sync-ed with the s/w state before the PMU is | |
1021 | * enabled. | |
1022 | * Mark this counter for sync. | |
1023 | */ | |
1024 | hwc->state |= PERF_HES_ARCH; | |
1025 | } | |
1026 | ||
1027 | static irqreturn_t pmu_handle_irq(int irq_num, void *dev) | |
1028 | { | |
1029 | unsigned long flags; | |
1030 | struct cci_pmu *cci_pmu = dev; | |
1031 | struct cci_pmu_hw_events *events = &cci_pmu->hw_events; | |
1032 | int idx, handled = IRQ_NONE; | |
1033 | ||
1034 | raw_spin_lock_irqsave(&events->pmu_lock, flags); | |
1035 | ||
1036 | /* Disable the PMU while we walk through the counters */ | |
e9c112c9 | 1037 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
1038 | /* |
1039 | * Iterate over counters and update the corresponding perf events. | |
1040 | * This should work regardless of whether we have per-counter overflow | |
1041 | * interrupt or a combined overflow interrupt. | |
1042 | */ | |
1043 | for (idx = 0; idx <= CCI_PMU_CNTR_LAST(cci_pmu); idx++) { | |
1044 | struct perf_event *event = events->events[idx]; | |
1045 | ||
1046 | if (!event) | |
1047 | continue; | |
1048 | ||
1049 | /* Did this counter overflow? */ | |
1050 | if (!(pmu_read_register(cci_pmu, idx, CCI_PMU_OVRFLW) & | |
1051 | CCI_PMU_OVRFLW_FLAG)) | |
1052 | continue; | |
1053 | ||
1054 | pmu_write_register(cci_pmu, CCI_PMU_OVRFLW_FLAG, idx, | |
1055 | CCI_PMU_OVRFLW); | |
1056 | ||
1057 | pmu_event_update(event); | |
1058 | pmu_event_set_period(event); | |
1059 | handled = IRQ_HANDLED; | |
1060 | } | |
1061 | ||
1062 | /* Enable the PMU and sync possibly overflowed counters */ | |
1063 | __cci_pmu_enable_sync(cci_pmu); | |
1064 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); | |
1065 | ||
1066 | return IRQ_RETVAL(handled); | |
1067 | } | |
1068 | ||
1069 | static int cci_pmu_get_hw(struct cci_pmu *cci_pmu) | |
1070 | { | |
1071 | int ret = pmu_request_irq(cci_pmu, pmu_handle_irq); | |
1072 | if (ret) { | |
1073 | pmu_free_irq(cci_pmu); | |
1074 | return ret; | |
1075 | } | |
1076 | return 0; | |
1077 | } | |
1078 | ||
1079 | static void cci_pmu_put_hw(struct cci_pmu *cci_pmu) | |
1080 | { | |
1081 | pmu_free_irq(cci_pmu); | |
1082 | } | |
1083 | ||
1084 | static void hw_perf_event_destroy(struct perf_event *event) | |
1085 | { | |
1086 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1087 | atomic_t *active_events = &cci_pmu->active_events; | |
1088 | struct mutex *reserve_mutex = &cci_pmu->reserve_mutex; | |
1089 | ||
1090 | if (atomic_dec_and_mutex_lock(active_events, reserve_mutex)) { | |
1091 | cci_pmu_put_hw(cci_pmu); | |
1092 | mutex_unlock(reserve_mutex); | |
1093 | } | |
1094 | } | |
1095 | ||
1096 | static void cci_pmu_enable(struct pmu *pmu) | |
1097 | { | |
1098 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1099 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1100 | int enabled = bitmap_weight(hw_events->used_mask, cci_pmu->num_cntrs); | |
1101 | unsigned long flags; | |
1102 | ||
1103 | if (!enabled) | |
1104 | return; | |
1105 | ||
1106 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1107 | __cci_pmu_enable_sync(cci_pmu); | |
1108 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1109 | ||
1110 | } | |
1111 | ||
1112 | static void cci_pmu_disable(struct pmu *pmu) | |
1113 | { | |
1114 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1115 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1116 | unsigned long flags; | |
1117 | ||
1118 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
e9c112c9 | 1119 | __cci_pmu_disable(cci_pmu); |
3de6be7a RM |
1120 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); |
1121 | } | |
1122 | ||
1123 | /* | |
1124 | * Check if the idx represents a non-programmable counter. | |
1125 | * All the fixed event counters are mapped before the programmable | |
1126 | * counters. | |
1127 | */ | |
1128 | static bool pmu_fixed_hw_idx(struct cci_pmu *cci_pmu, int idx) | |
1129 | { | |
1130 | return (idx >= 0) && (idx < cci_pmu->model->fixed_hw_cntrs); | |
1131 | } | |
1132 | ||
1133 | static void cci_pmu_start(struct perf_event *event, int pmu_flags) | |
1134 | { | |
1135 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1136 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1137 | struct hw_perf_event *hwc = &event->hw; | |
1138 | int idx = hwc->idx; | |
1139 | unsigned long flags; | |
1140 | ||
1141 | /* | |
1142 | * To handle interrupt latency, we always reprogram the period | |
1143 | * regardlesss of PERF_EF_RELOAD. | |
1144 | */ | |
1145 | if (pmu_flags & PERF_EF_RELOAD) | |
1146 | WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE)); | |
1147 | ||
1148 | hwc->state = 0; | |
1149 | ||
1150 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1151 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1152 | return; | |
1153 | } | |
1154 | ||
1155 | raw_spin_lock_irqsave(&hw_events->pmu_lock, flags); | |
1156 | ||
1157 | /* Configure the counter unless you are counting a fixed event */ | |
1158 | if (!pmu_fixed_hw_idx(cci_pmu, idx)) | |
1159 | pmu_set_event(cci_pmu, idx, hwc->config_base); | |
1160 | ||
1161 | pmu_event_set_period(event); | |
1162 | pmu_enable_counter(cci_pmu, idx); | |
1163 | ||
1164 | raw_spin_unlock_irqrestore(&hw_events->pmu_lock, flags); | |
1165 | } | |
1166 | ||
1167 | static void cci_pmu_stop(struct perf_event *event, int pmu_flags) | |
1168 | { | |
1169 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1170 | struct hw_perf_event *hwc = &event->hw; | |
1171 | int idx = hwc->idx; | |
1172 | ||
1173 | if (hwc->state & PERF_HES_STOPPED) | |
1174 | return; | |
1175 | ||
1176 | if (unlikely(!pmu_is_valid_counter(cci_pmu, idx))) { | |
1177 | dev_err(&cci_pmu->plat_device->dev, "Invalid CCI PMU counter %d\n", idx); | |
1178 | return; | |
1179 | } | |
1180 | ||
1181 | /* | |
1182 | * We always reprogram the counter, so ignore PERF_EF_UPDATE. See | |
1183 | * cci_pmu_start() | |
1184 | */ | |
1185 | pmu_disable_counter(cci_pmu, idx); | |
1186 | pmu_event_update(event); | |
1187 | hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1188 | } | |
1189 | ||
1190 | static int cci_pmu_add(struct perf_event *event, int flags) | |
1191 | { | |
1192 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1193 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1194 | struct hw_perf_event *hwc = &event->hw; | |
1195 | int idx; | |
3de6be7a RM |
1196 | |
1197 | /* If we don't have a space for the counter then finish early. */ | |
1198 | idx = pmu_get_event_idx(hw_events, event); | |
28c01dc9 RM |
1199 | if (idx < 0) |
1200 | return idx; | |
3de6be7a RM |
1201 | |
1202 | event->hw.idx = idx; | |
1203 | hw_events->events[idx] = event; | |
1204 | ||
1205 | hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE; | |
1206 | if (flags & PERF_EF_START) | |
1207 | cci_pmu_start(event, PERF_EF_RELOAD); | |
1208 | ||
1209 | /* Propagate our changes to the userspace mapping. */ | |
1210 | perf_event_update_userpage(event); | |
1211 | ||
28c01dc9 | 1212 | return 0; |
3de6be7a RM |
1213 | } |
1214 | ||
1215 | static void cci_pmu_del(struct perf_event *event, int flags) | |
1216 | { | |
1217 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1218 | struct cci_pmu_hw_events *hw_events = &cci_pmu->hw_events; | |
1219 | struct hw_perf_event *hwc = &event->hw; | |
1220 | int idx = hwc->idx; | |
1221 | ||
1222 | cci_pmu_stop(event, PERF_EF_UPDATE); | |
1223 | hw_events->events[idx] = NULL; | |
1224 | clear_bit(idx, hw_events->used_mask); | |
1225 | ||
1226 | perf_event_update_userpage(event); | |
1227 | } | |
1228 | ||
1229 | static int validate_event(struct pmu *cci_pmu, | |
1230 | struct cci_pmu_hw_events *hw_events, | |
1231 | struct perf_event *event) | |
1232 | { | |
1233 | if (is_software_event(event)) | |
1234 | return 1; | |
1235 | ||
1236 | /* | |
1237 | * Reject groups spanning multiple HW PMUs (e.g. CPU + CCI). The | |
1238 | * core perf code won't check that the pmu->ctx == leader->ctx | |
1239 | * until after pmu->event_init(event). | |
1240 | */ | |
1241 | if (event->pmu != cci_pmu) | |
1242 | return 0; | |
1243 | ||
1244 | if (event->state < PERF_EVENT_STATE_OFF) | |
1245 | return 1; | |
1246 | ||
1247 | if (event->state == PERF_EVENT_STATE_OFF && !event->attr.enable_on_exec) | |
1248 | return 1; | |
1249 | ||
1250 | return pmu_get_event_idx(hw_events, event) >= 0; | |
1251 | } | |
1252 | ||
1253 | static int validate_group(struct perf_event *event) | |
1254 | { | |
1255 | struct perf_event *sibling, *leader = event->group_leader; | |
1256 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1201a5a2 | 1257 | unsigned long mask[BITS_TO_LONGS(HW_CNTRS_MAX)]; |
3de6be7a RM |
1258 | struct cci_pmu_hw_events fake_pmu = { |
1259 | /* | |
1260 | * Initialise the fake PMU. We only need to populate the | |
1261 | * used_mask for the purposes of validation. | |
1262 | */ | |
1263 | .used_mask = mask, | |
1264 | }; | |
1265 | memset(mask, 0, BITS_TO_LONGS(cci_pmu->num_cntrs) * sizeof(unsigned long)); | |
1266 | ||
1267 | if (!validate_event(event->pmu, &fake_pmu, leader)) | |
1268 | return -EINVAL; | |
1269 | ||
38c23685 | 1270 | for_each_sibling_event(sibling, leader) { |
3de6be7a RM |
1271 | if (!validate_event(event->pmu, &fake_pmu, sibling)) |
1272 | return -EINVAL; | |
1273 | } | |
1274 | ||
1275 | if (!validate_event(event->pmu, &fake_pmu, event)) | |
1276 | return -EINVAL; | |
1277 | ||
1278 | return 0; | |
1279 | } | |
1280 | ||
1281 | static int __hw_perf_event_init(struct perf_event *event) | |
1282 | { | |
1283 | struct hw_perf_event *hwc = &event->hw; | |
1284 | int mapping; | |
1285 | ||
1286 | mapping = pmu_map_event(event); | |
1287 | ||
1288 | if (mapping < 0) { | |
1289 | pr_debug("event %x:%llx not supported\n", event->attr.type, | |
1290 | event->attr.config); | |
1291 | return mapping; | |
1292 | } | |
1293 | ||
1294 | /* | |
1295 | * We don't assign an index until we actually place the event onto | |
1296 | * hardware. Use -1 to signify that we haven't decided where to put it | |
1297 | * yet. | |
1298 | */ | |
1299 | hwc->idx = -1; | |
1300 | hwc->config_base = 0; | |
1301 | hwc->config = 0; | |
1302 | hwc->event_base = 0; | |
1303 | ||
1304 | /* | |
1305 | * Store the event encoding into the config_base field. | |
1306 | */ | |
1307 | hwc->config_base |= (unsigned long)mapping; | |
1308 | ||
3de6be7a RM |
1309 | if (event->group_leader != event) { |
1310 | if (validate_group(event) != 0) | |
1311 | return -EINVAL; | |
1312 | } | |
1313 | ||
1314 | return 0; | |
1315 | } | |
1316 | ||
1317 | static int cci_pmu_event_init(struct perf_event *event) | |
1318 | { | |
1319 | struct cci_pmu *cci_pmu = to_cci_pmu(event->pmu); | |
1320 | atomic_t *active_events = &cci_pmu->active_events; | |
1321 | int err = 0; | |
3de6be7a RM |
1322 | |
1323 | if (event->attr.type != event->pmu->type) | |
1324 | return -ENOENT; | |
1325 | ||
1326 | /* Shared by all CPUs, no meaningful state to sample */ | |
1327 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
1328 | return -EOPNOTSUPP; | |
1329 | ||
3de6be7a RM |
1330 | /* |
1331 | * Following the example set by other "uncore" PMUs, we accept any CPU | |
1332 | * and rewrite its affinity dynamically rather than having perf core | |
1333 | * handle cpu == -1 and pid == -1 for this case. | |
1334 | * | |
1335 | * The perf core will pin online CPUs for the duration of this call and | |
1336 | * the event being installed into its context, so the PMU's CPU can't | |
1337 | * change under our feet. | |
1338 | */ | |
03057f26 | 1339 | if (event->cpu < 0) |
3de6be7a | 1340 | return -EINVAL; |
03057f26 | 1341 | event->cpu = cci_pmu->cpu; |
3de6be7a RM |
1342 | |
1343 | event->destroy = hw_perf_event_destroy; | |
1344 | if (!atomic_inc_not_zero(active_events)) { | |
1345 | mutex_lock(&cci_pmu->reserve_mutex); | |
1346 | if (atomic_read(active_events) == 0) | |
1347 | err = cci_pmu_get_hw(cci_pmu); | |
1348 | if (!err) | |
1349 | atomic_inc(active_events); | |
1350 | mutex_unlock(&cci_pmu->reserve_mutex); | |
1351 | } | |
1352 | if (err) | |
1353 | return err; | |
1354 | ||
1355 | err = __hw_perf_event_init(event); | |
1356 | if (err) | |
1357 | hw_perf_event_destroy(event); | |
1358 | ||
1359 | return err; | |
1360 | } | |
1361 | ||
1362 | static ssize_t pmu_cpumask_attr_show(struct device *dev, | |
1363 | struct device_attribute *attr, char *buf) | |
1364 | { | |
1365 | struct pmu *pmu = dev_get_drvdata(dev); | |
1366 | struct cci_pmu *cci_pmu = to_cci_pmu(pmu); | |
1367 | ||
03057f26 | 1368 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(cci_pmu->cpu)); |
3de6be7a RM |
1369 | } |
1370 | ||
1371 | static struct device_attribute pmu_cpumask_attr = | |
1372 | __ATTR(cpumask, S_IRUGO, pmu_cpumask_attr_show, NULL); | |
1373 | ||
1374 | static struct attribute *pmu_attrs[] = { | |
1375 | &pmu_cpumask_attr.attr, | |
1376 | NULL, | |
1377 | }; | |
1378 | ||
1379 | static struct attribute_group pmu_attr_group = { | |
1380 | .attrs = pmu_attrs, | |
1381 | }; | |
1382 | ||
1383 | static struct attribute_group pmu_format_attr_group = { | |
1384 | .name = "format", | |
1385 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1386 | }; | |
1387 | ||
1388 | static struct attribute_group pmu_event_attr_group = { | |
1389 | .name = "events", | |
1390 | .attrs = NULL, /* Filled in cci_pmu_init_attrs */ | |
1391 | }; | |
1392 | ||
1393 | static const struct attribute_group *pmu_attr_groups[] = { | |
1394 | &pmu_attr_group, | |
1395 | &pmu_format_attr_group, | |
1396 | &pmu_event_attr_group, | |
1397 | NULL | |
1398 | }; | |
1399 | ||
1400 | static int cci_pmu_init(struct cci_pmu *cci_pmu, struct platform_device *pdev) | |
1401 | { | |
1402 | const struct cci_pmu_model *model = cci_pmu->model; | |
1403 | char *name = model->name; | |
1404 | u32 num_cntrs; | |
1405 | ||
1201a5a2 KC |
1406 | if (WARN_ON(model->num_hw_cntrs > NUM_HW_CNTRS_MAX)) |
1407 | return -EINVAL; | |
1408 | if (WARN_ON(model->fixed_hw_cntrs > FIXED_HW_CNTRS_MAX)) | |
1409 | return -EINVAL; | |
1410 | ||
3de6be7a RM |
1411 | pmu_event_attr_group.attrs = model->event_attrs; |
1412 | pmu_format_attr_group.attrs = model->format_attrs; | |
1413 | ||
1414 | cci_pmu->pmu = (struct pmu) { | |
8b0c93c2 | 1415 | .module = THIS_MODULE, |
3de6be7a RM |
1416 | .name = cci_pmu->model->name, |
1417 | .task_ctx_nr = perf_invalid_context, | |
1418 | .pmu_enable = cci_pmu_enable, | |
1419 | .pmu_disable = cci_pmu_disable, | |
1420 | .event_init = cci_pmu_event_init, | |
1421 | .add = cci_pmu_add, | |
1422 | .del = cci_pmu_del, | |
1423 | .start = cci_pmu_start, | |
1424 | .stop = cci_pmu_stop, | |
1425 | .read = pmu_read, | |
1426 | .attr_groups = pmu_attr_groups, | |
30656398 | 1427 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
3de6be7a RM |
1428 | }; |
1429 | ||
1430 | cci_pmu->plat_device = pdev; | |
e9c112c9 | 1431 | num_cntrs = pmu_get_max_counters(cci_pmu); |
3de6be7a RM |
1432 | if (num_cntrs > cci_pmu->model->num_hw_cntrs) { |
1433 | dev_warn(&pdev->dev, | |
1434 | "PMU implements more counters(%d) than supported by" | |
1435 | " the model(%d), truncated.", | |
1436 | num_cntrs, cci_pmu->model->num_hw_cntrs); | |
1437 | num_cntrs = cci_pmu->model->num_hw_cntrs; | |
1438 | } | |
1439 | cci_pmu->num_cntrs = num_cntrs + cci_pmu->model->fixed_hw_cntrs; | |
1440 | ||
1441 | return perf_pmu_register(&cci_pmu->pmu, name, -1); | |
1442 | } | |
1443 | ||
03057f26 | 1444 | static int cci_pmu_offline_cpu(unsigned int cpu) |
3de6be7a | 1445 | { |
03057f26 | 1446 | int target; |
3de6be7a | 1447 | |
03057f26 | 1448 | if (!g_cci_pmu || cpu != g_cci_pmu->cpu) |
3de6be7a | 1449 | return 0; |
03057f26 | 1450 | |
3de6be7a RM |
1451 | target = cpumask_any_but(cpu_online_mask, cpu); |
1452 | if (target >= nr_cpu_ids) | |
1453 | return 0; | |
03057f26 RM |
1454 | |
1455 | perf_pmu_migrate_context(&g_cci_pmu->pmu, cpu, target); | |
1456 | g_cci_pmu->cpu = target; | |
3de6be7a RM |
1457 | return 0; |
1458 | } | |
1459 | ||
984e9cf1 | 1460 | static __maybe_unused struct cci_pmu_model cci_pmu_models[] = { |
3de6be7a RM |
1461 | #ifdef CONFIG_ARM_CCI400_PMU |
1462 | [CCI400_R0] = { | |
1463 | .name = "CCI_400", | |
1201a5a2 KC |
1464 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ |
1465 | .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, | |
3de6be7a RM |
1466 | .cntr_size = SZ_4K, |
1467 | .format_attrs = cci400_pmu_format_attrs, | |
1468 | .event_attrs = cci400_r0_pmu_event_attrs, | |
1469 | .event_ranges = { | |
1470 | [CCI_IF_SLAVE] = { | |
1471 | CCI400_R0_SLAVE_PORT_MIN_EV, | |
1472 | CCI400_R0_SLAVE_PORT_MAX_EV, | |
1473 | }, | |
1474 | [CCI_IF_MASTER] = { | |
1475 | CCI400_R0_MASTER_PORT_MIN_EV, | |
1476 | CCI400_R0_MASTER_PORT_MAX_EV, | |
1477 | }, | |
1478 | }, | |
1479 | .validate_hw_event = cci400_validate_hw_event, | |
1480 | .get_event_idx = cci400_get_event_idx, | |
1481 | }, | |
1482 | [CCI400_R1] = { | |
1483 | .name = "CCI_400_r1", | |
1201a5a2 KC |
1484 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_4XX, /* Cycle counter */ |
1485 | .num_hw_cntrs = NUM_HW_CNTRS_CII_4XX, | |
3de6be7a RM |
1486 | .cntr_size = SZ_4K, |
1487 | .format_attrs = cci400_pmu_format_attrs, | |
1488 | .event_attrs = cci400_r1_pmu_event_attrs, | |
1489 | .event_ranges = { | |
1490 | [CCI_IF_SLAVE] = { | |
1491 | CCI400_R1_SLAVE_PORT_MIN_EV, | |
1492 | CCI400_R1_SLAVE_PORT_MAX_EV, | |
1493 | }, | |
1494 | [CCI_IF_MASTER] = { | |
1495 | CCI400_R1_MASTER_PORT_MIN_EV, | |
1496 | CCI400_R1_MASTER_PORT_MAX_EV, | |
1497 | }, | |
1498 | }, | |
1499 | .validate_hw_event = cci400_validate_hw_event, | |
1500 | .get_event_idx = cci400_get_event_idx, | |
1501 | }, | |
1502 | #endif | |
1503 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1504 | [CCI500_R0] = { | |
1505 | .name = "CCI_500", | |
1201a5a2 KC |
1506 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, |
1507 | .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, | |
3de6be7a RM |
1508 | .cntr_size = SZ_64K, |
1509 | .format_attrs = cci5xx_pmu_format_attrs, | |
1510 | .event_attrs = cci5xx_pmu_event_attrs, | |
1511 | .event_ranges = { | |
1512 | [CCI_IF_SLAVE] = { | |
1513 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1514 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1515 | }, | |
1516 | [CCI_IF_MASTER] = { | |
1517 | CCI5xx_MASTER_PORT_MIN_EV, | |
1518 | CCI5xx_MASTER_PORT_MAX_EV, | |
1519 | }, | |
1520 | [CCI_IF_GLOBAL] = { | |
1521 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1522 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1523 | }, | |
1524 | }, | |
1525 | .validate_hw_event = cci500_validate_hw_event, | |
1526 | .write_counters = cci5xx_pmu_write_counters, | |
1527 | }, | |
1528 | [CCI550_R0] = { | |
1529 | .name = "CCI_550", | |
1201a5a2 KC |
1530 | .fixed_hw_cntrs = FIXED_HW_CNTRS_CII_5XX, |
1531 | .num_hw_cntrs = NUM_HW_CNTRS_CII_5XX, | |
3de6be7a RM |
1532 | .cntr_size = SZ_64K, |
1533 | .format_attrs = cci5xx_pmu_format_attrs, | |
1534 | .event_attrs = cci5xx_pmu_event_attrs, | |
1535 | .event_ranges = { | |
1536 | [CCI_IF_SLAVE] = { | |
1537 | CCI5xx_SLAVE_PORT_MIN_EV, | |
1538 | CCI5xx_SLAVE_PORT_MAX_EV, | |
1539 | }, | |
1540 | [CCI_IF_MASTER] = { | |
1541 | CCI5xx_MASTER_PORT_MIN_EV, | |
1542 | CCI5xx_MASTER_PORT_MAX_EV, | |
1543 | }, | |
1544 | [CCI_IF_GLOBAL] = { | |
1545 | CCI5xx_GLOBAL_PORT_MIN_EV, | |
1546 | CCI5xx_GLOBAL_PORT_MAX_EV, | |
1547 | }, | |
1548 | }, | |
1549 | .validate_hw_event = cci550_validate_hw_event, | |
1550 | .write_counters = cci5xx_pmu_write_counters, | |
1551 | }, | |
1552 | #endif | |
1553 | }; | |
1554 | ||
1555 | static const struct of_device_id arm_cci_pmu_matches[] = { | |
1556 | #ifdef CONFIG_ARM_CCI400_PMU | |
1557 | { | |
1558 | .compatible = "arm,cci-400-pmu", | |
1559 | .data = NULL, | |
1560 | }, | |
1561 | { | |
1562 | .compatible = "arm,cci-400-pmu,r0", | |
1563 | .data = &cci_pmu_models[CCI400_R0], | |
1564 | }, | |
1565 | { | |
1566 | .compatible = "arm,cci-400-pmu,r1", | |
1567 | .data = &cci_pmu_models[CCI400_R1], | |
1568 | }, | |
1569 | #endif | |
1570 | #ifdef CONFIG_ARM_CCI5xx_PMU | |
1571 | { | |
1572 | .compatible = "arm,cci-500-pmu,r0", | |
1573 | .data = &cci_pmu_models[CCI500_R0], | |
1574 | }, | |
1575 | { | |
1576 | .compatible = "arm,cci-550-pmu,r0", | |
1577 | .data = &cci_pmu_models[CCI550_R0], | |
1578 | }, | |
1579 | #endif | |
1580 | {}, | |
1581 | }; | |
8b0c93c2 | 1582 | MODULE_DEVICE_TABLE(of, arm_cci_pmu_matches); |
3de6be7a | 1583 | |
3de6be7a RM |
1584 | static bool is_duplicate_irq(int irq, int *irqs, int nr_irqs) |
1585 | { | |
1586 | int i; | |
1587 | ||
1588 | for (i = 0; i < nr_irqs; i++) | |
1589 | if (irq == irqs[i]) | |
1590 | return true; | |
1591 | ||
1592 | return false; | |
1593 | } | |
1594 | ||
32837954 | 1595 | static struct cci_pmu *cci_pmu_alloc(struct device *dev) |
3de6be7a RM |
1596 | { |
1597 | struct cci_pmu *cci_pmu; | |
1598 | const struct cci_pmu_model *model; | |
1599 | ||
1600 | /* | |
1601 | * All allocations are devm_* hence we don't have to free | |
1602 | * them explicitly on an error, as it would end up in driver | |
1603 | * detach. | |
1604 | */ | |
e9c112c9 RM |
1605 | cci_pmu = devm_kzalloc(dev, sizeof(*cci_pmu), GFP_KERNEL); |
1606 | if (!cci_pmu) | |
1607 | return ERR_PTR(-ENOMEM); | |
1608 | ||
1609 | cci_pmu->ctrl_base = *(void __iomem **)dev->platform_data; | |
1610 | ||
32837954 RM |
1611 | model = of_device_get_match_data(dev); |
1612 | if (!model) { | |
1613 | dev_warn(dev, | |
1614 | "DEPRECATED compatible property, requires secure access to CCI registers"); | |
e9c112c9 | 1615 | model = probe_cci_model(cci_pmu); |
32837954 | 1616 | } |
3de6be7a | 1617 | if (!model) { |
32837954 | 1618 | dev_warn(dev, "CCI PMU version not supported\n"); |
3de6be7a RM |
1619 | return ERR_PTR(-ENODEV); |
1620 | } | |
1621 | ||
3de6be7a | 1622 | cci_pmu->model = model; |
32837954 | 1623 | cci_pmu->irqs = devm_kcalloc(dev, CCI_PMU_MAX_HW_CNTRS(model), |
3de6be7a RM |
1624 | sizeof(*cci_pmu->irqs), GFP_KERNEL); |
1625 | if (!cci_pmu->irqs) | |
1626 | return ERR_PTR(-ENOMEM); | |
32837954 | 1627 | cci_pmu->hw_events.events = devm_kcalloc(dev, |
3de6be7a RM |
1628 | CCI_PMU_MAX_HW_CNTRS(model), |
1629 | sizeof(*cci_pmu->hw_events.events), | |
1630 | GFP_KERNEL); | |
1631 | if (!cci_pmu->hw_events.events) | |
1632 | return ERR_PTR(-ENOMEM); | |
32837954 | 1633 | cci_pmu->hw_events.used_mask = devm_kcalloc(dev, |
3de6be7a RM |
1634 | BITS_TO_LONGS(CCI_PMU_MAX_HW_CNTRS(model)), |
1635 | sizeof(*cci_pmu->hw_events.used_mask), | |
1636 | GFP_KERNEL); | |
1637 | if (!cci_pmu->hw_events.used_mask) | |
1638 | return ERR_PTR(-ENOMEM); | |
1639 | ||
1640 | return cci_pmu; | |
1641 | } | |
1642 | ||
1643 | static int cci_pmu_probe(struct platform_device *pdev) | |
1644 | { | |
3de6be7a RM |
1645 | struct cci_pmu *cci_pmu; |
1646 | int i, ret, irq; | |
1647 | ||
32837954 | 1648 | cci_pmu = cci_pmu_alloc(&pdev->dev); |
3de6be7a RM |
1649 | if (IS_ERR(cci_pmu)) |
1650 | return PTR_ERR(cci_pmu); | |
1651 | ||
504db0f8 | 1652 | cci_pmu->base = devm_platform_ioremap_resource(pdev, 0); |
3de6be7a RM |
1653 | if (IS_ERR(cci_pmu->base)) |
1654 | return -ENOMEM; | |
1655 | ||
1656 | /* | |
1657 | * CCI PMU has one overflow interrupt per counter; but some may be tied | |
1658 | * together to a common interrupt. | |
1659 | */ | |
1660 | cci_pmu->nr_irqs = 0; | |
1661 | for (i = 0; i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model); i++) { | |
1662 | irq = platform_get_irq(pdev, i); | |
1663 | if (irq < 0) | |
1664 | break; | |
1665 | ||
1666 | if (is_duplicate_irq(irq, cci_pmu->irqs, cci_pmu->nr_irqs)) | |
1667 | continue; | |
1668 | ||
1669 | cci_pmu->irqs[cci_pmu->nr_irqs++] = irq; | |
1670 | } | |
1671 | ||
1672 | /* | |
1673 | * Ensure that the device tree has as many interrupts as the number | |
1674 | * of counters. | |
1675 | */ | |
1676 | if (i < CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)) { | |
1677 | dev_warn(&pdev->dev, "In-correct number of interrupts: %d, should be %d\n", | |
1678 | i, CCI_PMU_MAX_HW_CNTRS(cci_pmu->model)); | |
1679 | return -EINVAL; | |
1680 | } | |
1681 | ||
1682 | raw_spin_lock_init(&cci_pmu->hw_events.pmu_lock); | |
1683 | mutex_init(&cci_pmu->reserve_mutex); | |
1684 | atomic_set(&cci_pmu->active_events, 0); | |
3de6be7a | 1685 | |
0d2e2a82 RM |
1686 | cci_pmu->cpu = raw_smp_processor_id(); |
1687 | g_cci_pmu = cci_pmu; | |
03057f26 RM |
1688 | cpuhp_setup_state_nocalls(CPUHP_AP_PERF_ARM_CCI_ONLINE, |
1689 | "perf/arm/cci:online", NULL, | |
1690 | cci_pmu_offline_cpu); | |
0d2e2a82 RM |
1691 | |
1692 | ret = cci_pmu_init(cci_pmu, pdev); | |
1693 | if (ret) | |
1694 | goto error_pmu_init; | |
1695 | ||
3de6be7a RM |
1696 | pr_info("ARM %s PMU driver probed", cci_pmu->model->name); |
1697 | return 0; | |
0d2e2a82 RM |
1698 | |
1699 | error_pmu_init: | |
1700 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | |
1701 | g_cci_pmu = NULL; | |
1702 | return ret; | |
3de6be7a RM |
1703 | } |
1704 | ||
8b0c93c2 RM |
1705 | static int cci_pmu_remove(struct platform_device *pdev) |
1706 | { | |
1707 | if (!g_cci_pmu) | |
1708 | return 0; | |
1709 | ||
1710 | cpuhp_remove_state(CPUHP_AP_PERF_ARM_CCI_ONLINE); | |
1711 | perf_pmu_unregister(&g_cci_pmu->pmu); | |
1712 | g_cci_pmu = NULL; | |
1713 | ||
1714 | return 0; | |
1715 | } | |
1716 | ||
3de6be7a RM |
1717 | static struct platform_driver cci_pmu_driver = { |
1718 | .driver = { | |
1719 | .name = DRIVER_NAME, | |
1720 | .of_match_table = arm_cci_pmu_matches, | |
1721 | }, | |
1722 | .probe = cci_pmu_probe, | |
8b0c93c2 | 1723 | .remove = cci_pmu_remove, |
3de6be7a RM |
1724 | }; |
1725 | ||
8b0c93c2 | 1726 | module_platform_driver(cci_pmu_driver); |
75dc3441 | 1727 | MODULE_LICENSE("GPL v2"); |
3de6be7a | 1728 | MODULE_DESCRIPTION("ARM CCI PMU support"); |