Commit | Line | Data |
---|---|---|
9a66d36c FL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2017 NXP | |
4 | * Copyright 2016 Freescale Semiconductor, Inc. | |
5 | */ | |
6 | ||
7 | #include <linux/bitfield.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/of.h> | |
13 | #include <linux/of_address.h> | |
14 | #include <linux/of_device.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/perf_event.h> | |
17 | #include <linux/slab.h> | |
18 | ||
19 | #define COUNTER_CNTL 0x0 | |
20 | #define COUNTER_READ 0x20 | |
21 | ||
22 | #define COUNTER_DPCR1 0x30 | |
23 | ||
24 | #define CNTL_OVER 0x1 | |
25 | #define CNTL_CLEAR 0x2 | |
26 | #define CNTL_EN 0x4 | |
27 | #define CNTL_EN_MASK 0xFFFFFFFB | |
28 | #define CNTL_CLEAR_MASK 0xFFFFFFFD | |
29 | #define CNTL_OVER_MASK 0xFFFFFFFE | |
30 | ||
31 | #define CNTL_CSV_SHIFT 24 | |
32 | #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) | |
33 | ||
34 | #define EVENT_CYCLES_ID 0 | |
35 | #define EVENT_CYCLES_COUNTER 0 | |
36 | #define NUM_COUNTERS 4 | |
37 | ||
c12c0288 JZ |
38 | #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ |
39 | ||
9a66d36c FL |
40 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) |
41 | ||
42 | #define DDR_PERF_DEV_NAME "imx8_ddr" | |
43 | #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" | |
44 | ||
45 | static DEFINE_IDA(ddr_ida); | |
46 | ||
c12c0288 | 47 | /* DDR Perf hardware feature */ |
44f8bd01 JZ |
48 | #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ |
49 | #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ | |
c12c0288 JZ |
50 | |
51 | struct fsl_ddr_devtype_data { | |
52 | unsigned int quirks; /* quirks needed for different DDR Perf core */ | |
881b0520 | 53 | const char *identifier; /* system PMU identifier for userspace */ |
c12c0288 JZ |
54 | }; |
55 | ||
56 | static const struct fsl_ddr_devtype_data imx8_devtype_data; | |
57 | ||
58 | static const struct fsl_ddr_devtype_data imx8m_devtype_data = { | |
59 | .quirks = DDR_CAP_AXI_ID_FILTER, | |
60 | }; | |
61 | ||
881b0520 JZ |
62 | static const struct fsl_ddr_devtype_data imx8mq_devtype_data = { |
63 | .quirks = DDR_CAP_AXI_ID_FILTER, | |
64 | .identifier = "i.MX8MQ", | |
65 | }; | |
66 | ||
67 | static const struct fsl_ddr_devtype_data imx8mm_devtype_data = { | |
68 | .quirks = DDR_CAP_AXI_ID_FILTER, | |
69 | .identifier = "i.MX8MM", | |
70 | }; | |
71 | ||
72 | static const struct fsl_ddr_devtype_data imx8mn_devtype_data = { | |
73 | .quirks = DDR_CAP_AXI_ID_FILTER, | |
74 | .identifier = "i.MX8MN", | |
75 | }; | |
76 | ||
d3eeece9 JZ |
77 | static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { |
78 | .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, | |
881b0520 | 79 | .identifier = "i.MX8MP", |
d3eeece9 JZ |
80 | }; |
81 | ||
9a66d36c | 82 | static const struct of_device_id imx_ddr_pmu_dt_ids[] = { |
c12c0288 JZ |
83 | { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, |
84 | { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, | |
881b0520 JZ |
85 | { .compatible = "fsl,imx8mq-ddr-pmu", .data = &imx8mq_devtype_data}, |
86 | { .compatible = "fsl,imx8mm-ddr-pmu", .data = &imx8mm_devtype_data}, | |
87 | { .compatible = "fsl,imx8mn-ddr-pmu", .data = &imx8mn_devtype_data}, | |
d3eeece9 | 88 | { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, |
9a66d36c FL |
89 | { /* sentinel */ } |
90 | }; | |
4b9ace9c | 91 | MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); |
9a66d36c FL |
92 | |
93 | struct ddr_pmu { | |
94 | struct pmu pmu; | |
95 | void __iomem *base; | |
96 | unsigned int cpu; | |
97 | struct hlist_node node; | |
98 | struct device *dev; | |
99 | struct perf_event *events[NUM_COUNTERS]; | |
100 | int active_events; | |
101 | enum cpuhp_state cpuhp_state; | |
c12c0288 | 102 | const struct fsl_ddr_devtype_data *devtype_data; |
9a66d36c FL |
103 | int irq; |
104 | int id; | |
105 | }; | |
106 | ||
881b0520 JZ |
107 | static ssize_t ddr_perf_identifier_show(struct device *dev, |
108 | struct device_attribute *attr, | |
109 | char *page) | |
110 | { | |
111 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
112 | ||
fb62d675 | 113 | return sysfs_emit(page, "%s\n", pmu->devtype_data->identifier); |
881b0520 JZ |
114 | } |
115 | ||
116 | static umode_t ddr_perf_identifier_attr_visible(struct kobject *kobj, | |
117 | struct attribute *attr, | |
118 | int n) | |
119 | { | |
120 | struct device *dev = kobj_to_dev(kobj); | |
121 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
122 | ||
123 | if (!pmu->devtype_data->identifier) | |
124 | return 0; | |
125 | return attr->mode; | |
126 | }; | |
127 | ||
128 | static struct device_attribute ddr_perf_identifier_attr = | |
129 | __ATTR(identifier, 0444, ddr_perf_identifier_show, NULL); | |
130 | ||
131 | static struct attribute *ddr_perf_identifier_attrs[] = { | |
132 | &ddr_perf_identifier_attr.attr, | |
133 | NULL, | |
134 | }; | |
135 | ||
3cb7d2da | 136 | static const struct attribute_group ddr_perf_identifier_attr_group = { |
881b0520 JZ |
137 | .attrs = ddr_perf_identifier_attrs, |
138 | .is_visible = ddr_perf_identifier_attr_visible, | |
139 | }; | |
140 | ||
f1d303a1 JZ |
141 | enum ddr_perf_filter_capabilities { |
142 | PERF_CAP_AXI_ID_FILTER = 0, | |
143 | PERF_CAP_AXI_ID_FILTER_ENHANCED, | |
144 | PERF_CAP_AXI_ID_FEAT_MAX, | |
145 | }; | |
146 | ||
147 | static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) | |
148 | { | |
149 | u32 quirks = pmu->devtype_data->quirks; | |
150 | ||
151 | switch (cap) { | |
152 | case PERF_CAP_AXI_ID_FILTER: | |
153 | return !!(quirks & DDR_CAP_AXI_ID_FILTER); | |
154 | case PERF_CAP_AXI_ID_FILTER_ENHANCED: | |
155 | quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; | |
156 | return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; | |
157 | default: | |
158 | WARN(1, "unknown filter cap %d\n", cap); | |
159 | } | |
160 | ||
161 | return 0; | |
162 | } | |
163 | ||
164 | static ssize_t ddr_perf_filter_cap_show(struct device *dev, | |
165 | struct device_attribute *attr, | |
166 | char *buf) | |
167 | { | |
168 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
169 | struct dev_ext_attribute *ea = | |
170 | container_of(attr, struct dev_ext_attribute, attr); | |
171 | int cap = (long)ea->var; | |
172 | ||
700a9cf0 | 173 | return sysfs_emit(buf, "%u\n", ddr_perf_filter_cap_get(pmu, cap)); |
f1d303a1 JZ |
174 | } |
175 | ||
176 | #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \ | |
177 | (&((struct dev_ext_attribute) { \ | |
178 | __ATTR(_name, 0444, _func, NULL), (void *)_var \ | |
179 | }).attr.attr) | |
180 | ||
181 | #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \ | |
182 | PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var) | |
183 | ||
184 | static struct attribute *ddr_perf_filter_cap_attr[] = { | |
185 | PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), | |
186 | PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), | |
187 | NULL, | |
188 | }; | |
189 | ||
3cb7d2da | 190 | static const struct attribute_group ddr_perf_filter_cap_attr_group = { |
f1d303a1 JZ |
191 | .name = "caps", |
192 | .attrs = ddr_perf_filter_cap_attr, | |
193 | }; | |
194 | ||
9a66d36c FL |
195 | static ssize_t ddr_perf_cpumask_show(struct device *dev, |
196 | struct device_attribute *attr, char *buf) | |
197 | { | |
198 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
199 | ||
200 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); | |
201 | } | |
202 | ||
203 | static struct device_attribute ddr_perf_cpumask_attr = | |
204 | __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); | |
205 | ||
206 | static struct attribute *ddr_perf_cpumask_attrs[] = { | |
207 | &ddr_perf_cpumask_attr.attr, | |
208 | NULL, | |
209 | }; | |
210 | ||
3cb7d2da | 211 | static const struct attribute_group ddr_perf_cpumask_attr_group = { |
9a66d36c FL |
212 | .attrs = ddr_perf_cpumask_attrs, |
213 | }; | |
214 | ||
215 | static ssize_t | |
216 | ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, | |
217 | char *page) | |
218 | { | |
219 | struct perf_pmu_events_attr *pmu_attr; | |
220 | ||
221 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | |
fb62d675 | 222 | return sysfs_emit(page, "event=0x%02llx\n", pmu_attr->id); |
9a66d36c FL |
223 | } |
224 | ||
773510f4 QL |
225 | #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ |
226 | PMU_EVENT_ATTR_ID(_name, ddr_pmu_event_show, _id) | |
9a66d36c FL |
227 | |
228 | static struct attribute *ddr_perf_events_attrs[] = { | |
229 | IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), | |
230 | IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), | |
231 | IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), | |
232 | IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), | |
233 | IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), | |
234 | IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), | |
235 | IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), | |
236 | IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), | |
237 | IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), | |
238 | IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), | |
239 | IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), | |
240 | IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), | |
241 | IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), | |
242 | IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), | |
243 | IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), | |
244 | IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), | |
245 | IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), | |
246 | IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), | |
247 | IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), | |
248 | IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), | |
249 | IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), | |
250 | IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), | |
251 | IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), | |
252 | IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), | |
253 | IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), | |
254 | IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), | |
255 | IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), | |
256 | IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), | |
257 | IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), | |
258 | IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), | |
c12c0288 JZ |
259 | IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), |
260 | IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), | |
9a66d36c FL |
261 | NULL, |
262 | }; | |
263 | ||
3cb7d2da | 264 | static const struct attribute_group ddr_perf_events_attr_group = { |
9a66d36c FL |
265 | .name = "events", |
266 | .attrs = ddr_perf_events_attrs, | |
267 | }; | |
268 | ||
269 | PMU_FORMAT_ATTR(event, "config:0-7"); | |
c12c0288 JZ |
270 | PMU_FORMAT_ATTR(axi_id, "config1:0-15"); |
271 | PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); | |
9a66d36c FL |
272 | |
273 | static struct attribute *ddr_perf_format_attrs[] = { | |
274 | &format_attr_event.attr, | |
c12c0288 JZ |
275 | &format_attr_axi_id.attr, |
276 | &format_attr_axi_mask.attr, | |
9a66d36c FL |
277 | NULL, |
278 | }; | |
279 | ||
3cb7d2da | 280 | static const struct attribute_group ddr_perf_format_attr_group = { |
9a66d36c FL |
281 | .name = "format", |
282 | .attrs = ddr_perf_format_attrs, | |
283 | }; | |
284 | ||
285 | static const struct attribute_group *attr_groups[] = { | |
286 | &ddr_perf_events_attr_group, | |
287 | &ddr_perf_format_attr_group, | |
288 | &ddr_perf_cpumask_attr_group, | |
f1d303a1 | 289 | &ddr_perf_filter_cap_attr_group, |
881b0520 | 290 | &ddr_perf_identifier_attr_group, |
9a66d36c FL |
291 | NULL, |
292 | }; | |
293 | ||
44f8bd01 JZ |
294 | static bool ddr_perf_is_filtered(struct perf_event *event) |
295 | { | |
296 | return event->attr.config == 0x41 || event->attr.config == 0x42; | |
297 | } | |
298 | ||
299 | static u32 ddr_perf_filter_val(struct perf_event *event) | |
300 | { | |
301 | return event->attr.config1; | |
302 | } | |
303 | ||
304 | static bool ddr_perf_filters_compatible(struct perf_event *a, | |
305 | struct perf_event *b) | |
306 | { | |
307 | if (!ddr_perf_is_filtered(a)) | |
308 | return true; | |
309 | if (!ddr_perf_is_filtered(b)) | |
310 | return true; | |
311 | return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); | |
312 | } | |
313 | ||
314 | static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) | |
315 | { | |
316 | unsigned int filt; | |
317 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
318 | ||
319 | filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; | |
320 | return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && | |
321 | ddr_perf_is_filtered(event); | |
322 | } | |
323 | ||
9a66d36c FL |
324 | static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) |
325 | { | |
326 | int i; | |
327 | ||
328 | /* | |
329 | * Always map cycle event to counter 0 | |
330 | * Cycles counter is dedicated for cycle event | |
331 | * can't used for the other events | |
332 | */ | |
333 | if (event == EVENT_CYCLES_ID) { | |
334 | if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
335 | return EVENT_CYCLES_COUNTER; | |
336 | else | |
337 | return -ENOENT; | |
338 | } | |
339 | ||
340 | for (i = 1; i < NUM_COUNTERS; i++) { | |
341 | if (pmu->events[i] == NULL) | |
342 | return i; | |
343 | } | |
344 | ||
345 | return -ENOENT; | |
346 | } | |
347 | ||
348 | static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) | |
349 | { | |
350 | pmu->events[counter] = NULL; | |
351 | } | |
352 | ||
353 | static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) | |
354 | { | |
44f8bd01 JZ |
355 | struct perf_event *event = pmu->events[counter]; |
356 | void __iomem *base = pmu->base; | |
c12c0288 | 357 | |
44f8bd01 JZ |
358 | /* |
359 | * return bytes instead of bursts from ddr transaction for | |
360 | * axid-read and axid-write event if PMU core supports enhanced | |
361 | * filter. | |
362 | */ | |
363 | base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : | |
364 | COUNTER_READ; | |
365 | return readl_relaxed(base + counter * 4); | |
c12c0288 JZ |
366 | } |
367 | ||
9a66d36c FL |
368 | static int ddr_perf_event_init(struct perf_event *event) |
369 | { | |
370 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
371 | struct hw_perf_event *hwc = &event->hw; | |
372 | struct perf_event *sibling; | |
373 | ||
374 | if (event->attr.type != event->pmu->type) | |
375 | return -ENOENT; | |
376 | ||
377 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
378 | return -EOPNOTSUPP; | |
379 | ||
380 | if (event->cpu < 0) { | |
381 | dev_warn(pmu->dev, "Can't provide per-task data!\n"); | |
382 | return -EOPNOTSUPP; | |
383 | } | |
384 | ||
385 | /* | |
386 | * We must NOT create groups containing mixed PMUs, although software | |
387 | * events are acceptable (for example to create a CCN group | |
388 | * periodically read when a hrtimer aka cpu-clock leader triggers). | |
389 | */ | |
390 | if (event->group_leader->pmu != event->pmu && | |
391 | !is_software_event(event->group_leader)) | |
392 | return -EINVAL; | |
393 | ||
c12c0288 JZ |
394 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { |
395 | if (!ddr_perf_filters_compatible(event, event->group_leader)) | |
396 | return -EINVAL; | |
397 | for_each_sibling_event(sibling, event->group_leader) { | |
398 | if (!ddr_perf_filters_compatible(event, sibling)) | |
399 | return -EINVAL; | |
400 | } | |
401 | } | |
402 | ||
9a66d36c FL |
403 | for_each_sibling_event(sibling, event->group_leader) { |
404 | if (sibling->pmu != event->pmu && | |
405 | !is_software_event(sibling)) | |
406 | return -EINVAL; | |
407 | } | |
408 | ||
409 | event->cpu = pmu->cpu; | |
410 | hwc->idx = -1; | |
411 | ||
412 | return 0; | |
413 | } | |
414 | ||
9a66d36c FL |
415 | static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, |
416 | int counter, bool enable) | |
417 | { | |
418 | u8 reg = counter * 4 + COUNTER_CNTL; | |
419 | int val; | |
420 | ||
421 | if (enable) { | |
422 | /* | |
049d9191 JZ |
423 | * cycle counter is special which should firstly write 0 then |
424 | * write 1 into CLEAR bit to clear it. Other counters only | |
425 | * need write 0 into CLEAR bit and it turns out to be 1 by | |
426 | * hardware. Below enable flow is harmless for all counters. | |
9a66d36c FL |
427 | */ |
428 | writel(0, pmu->base + reg); | |
429 | val = CNTL_EN | CNTL_CLEAR; | |
430 | val |= FIELD_PREP(CNTL_CSV_MASK, config); | |
431 | writel(val, pmu->base + reg); | |
432 | } else { | |
433 | /* Disable counter */ | |
049d9191 JZ |
434 | val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; |
435 | writel(val, pmu->base + reg); | |
9a66d36c FL |
436 | } |
437 | } | |
438 | ||
6b46338f JZ |
439 | static bool ddr_perf_counter_overflow(struct ddr_pmu *pmu, int counter) |
440 | { | |
441 | int val; | |
442 | ||
443 | val = readl_relaxed(pmu->base + counter * 4 + COUNTER_CNTL); | |
444 | ||
445 | return val & CNTL_OVER; | |
446 | } | |
447 | ||
448 | static void ddr_perf_counter_clear(struct ddr_pmu *pmu, int counter) | |
449 | { | |
450 | u8 reg = counter * 4 + COUNTER_CNTL; | |
451 | int val; | |
452 | ||
453 | val = readl_relaxed(pmu->base + reg); | |
454 | val &= ~CNTL_CLEAR; | |
455 | writel(val, pmu->base + reg); | |
456 | ||
457 | val |= CNTL_CLEAR; | |
458 | writel(val, pmu->base + reg); | |
459 | } | |
460 | ||
461 | static void ddr_perf_event_update(struct perf_event *event) | |
462 | { | |
463 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
464 | struct hw_perf_event *hwc = &event->hw; | |
465 | u64 new_raw_count; | |
466 | int counter = hwc->idx; | |
467 | int ret; | |
468 | ||
469 | new_raw_count = ddr_perf_read_counter(pmu, counter); | |
470 | local64_add(new_raw_count, &event->count); | |
471 | ||
472 | /* | |
473 | * For legacy SoCs: event counter continue counting when overflow, | |
474 | * no need to clear the counter. | |
475 | * For new SoCs: event counter stop counting when overflow, need | |
476 | * clear counter to let it count again. | |
477 | */ | |
478 | if (counter != EVENT_CYCLES_COUNTER) { | |
479 | ret = ddr_perf_counter_overflow(pmu, counter); | |
480 | if (ret) | |
481 | dev_warn_ratelimited(pmu->dev, "events lost due to counter overflow (config 0x%llx)\n", | |
482 | event->attr.config); | |
483 | } | |
484 | ||
485 | /* clear counter every time for both cycle counter and event counter */ | |
486 | ddr_perf_counter_clear(pmu, counter); | |
487 | } | |
488 | ||
9a66d36c FL |
489 | static void ddr_perf_event_start(struct perf_event *event, int flags) |
490 | { | |
491 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
492 | struct hw_perf_event *hwc = &event->hw; | |
493 | int counter = hwc->idx; | |
494 | ||
495 | local64_set(&hwc->prev_count, 0); | |
496 | ||
497 | ddr_perf_counter_enable(pmu, event->attr.config, counter, true); | |
498 | ||
499 | hwc->state = 0; | |
500 | } | |
501 | ||
502 | static int ddr_perf_event_add(struct perf_event *event, int flags) | |
503 | { | |
504 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
505 | struct hw_perf_event *hwc = &event->hw; | |
506 | int counter; | |
507 | int cfg = event->attr.config; | |
c12c0288 JZ |
508 | int cfg1 = event->attr.config1; |
509 | ||
510 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { | |
511 | int i; | |
512 | ||
513 | for (i = 1; i < NUM_COUNTERS; i++) { | |
514 | if (pmu->events[i] && | |
515 | !ddr_perf_filters_compatible(event, pmu->events[i])) | |
516 | return -EINVAL; | |
517 | } | |
518 | ||
519 | if (ddr_perf_is_filtered(event)) { | |
520 | /* revert axi id masking(axi_mask) value */ | |
521 | cfg1 ^= AXI_MASKING_REVERT; | |
522 | writel(cfg1, pmu->base + COUNTER_DPCR1); | |
523 | } | |
524 | } | |
9a66d36c FL |
525 | |
526 | counter = ddr_perf_alloc_counter(pmu, cfg); | |
527 | if (counter < 0) { | |
528 | dev_dbg(pmu->dev, "There are not enough counters\n"); | |
529 | return -EOPNOTSUPP; | |
530 | } | |
531 | ||
532 | pmu->events[counter] = event; | |
533 | pmu->active_events++; | |
534 | hwc->idx = counter; | |
535 | ||
536 | hwc->state |= PERF_HES_STOPPED; | |
537 | ||
538 | if (flags & PERF_EF_START) | |
539 | ddr_perf_event_start(event, flags); | |
540 | ||
541 | return 0; | |
542 | } | |
543 | ||
544 | static void ddr_perf_event_stop(struct perf_event *event, int flags) | |
545 | { | |
546 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
547 | struct hw_perf_event *hwc = &event->hw; | |
548 | int counter = hwc->idx; | |
549 | ||
550 | ddr_perf_counter_enable(pmu, event->attr.config, counter, false); | |
551 | ddr_perf_event_update(event); | |
552 | ||
553 | hwc->state |= PERF_HES_STOPPED; | |
554 | } | |
555 | ||
556 | static void ddr_perf_event_del(struct perf_event *event, int flags) | |
557 | { | |
558 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
559 | struct hw_perf_event *hwc = &event->hw; | |
560 | int counter = hwc->idx; | |
561 | ||
562 | ddr_perf_event_stop(event, PERF_EF_UPDATE); | |
563 | ||
564 | ddr_perf_free_counter(pmu, counter); | |
565 | pmu->active_events--; | |
566 | hwc->idx = -1; | |
567 | } | |
568 | ||
569 | static void ddr_perf_pmu_enable(struct pmu *pmu) | |
570 | { | |
571 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); | |
572 | ||
573 | /* enable cycle counter if cycle is not active event list */ | |
574 | if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
575 | ddr_perf_counter_enable(ddr_pmu, | |
576 | EVENT_CYCLES_ID, | |
577 | EVENT_CYCLES_COUNTER, | |
578 | true); | |
579 | } | |
580 | ||
581 | static void ddr_perf_pmu_disable(struct pmu *pmu) | |
582 | { | |
583 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); | |
584 | ||
585 | if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
586 | ddr_perf_counter_enable(ddr_pmu, | |
587 | EVENT_CYCLES_ID, | |
588 | EVENT_CYCLES_COUNTER, | |
589 | false); | |
590 | } | |
591 | ||
592 | static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, | |
593 | struct device *dev) | |
594 | { | |
595 | *pmu = (struct ddr_pmu) { | |
596 | .pmu = (struct pmu) { | |
bdc5c744 | 597 | .module = THIS_MODULE, |
9a66d36c FL |
598 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, |
599 | .task_ctx_nr = perf_invalid_context, | |
600 | .attr_groups = attr_groups, | |
601 | .event_init = ddr_perf_event_init, | |
602 | .add = ddr_perf_event_add, | |
603 | .del = ddr_perf_event_del, | |
604 | .start = ddr_perf_event_start, | |
605 | .stop = ddr_perf_event_stop, | |
606 | .read = ddr_perf_event_update, | |
607 | .pmu_enable = ddr_perf_pmu_enable, | |
608 | .pmu_disable = ddr_perf_pmu_disable, | |
609 | }, | |
610 | .base = base, | |
611 | .dev = dev, | |
612 | }; | |
613 | ||
614 | pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); | |
615 | return pmu->id; | |
616 | } | |
617 | ||
618 | static irqreturn_t ddr_perf_irq_handler(int irq, void *p) | |
619 | { | |
620 | int i; | |
621 | struct ddr_pmu *pmu = (struct ddr_pmu *) p; | |
6b46338f | 622 | struct perf_event *event; |
9a66d36c FL |
623 | |
624 | /* all counter will stop if cycle counter disabled */ | |
625 | ddr_perf_counter_enable(pmu, | |
626 | EVENT_CYCLES_ID, | |
627 | EVENT_CYCLES_COUNTER, | |
628 | false); | |
629 | /* | |
630 | * When the cycle counter overflows, all counters are stopped, | |
631 | * and an IRQ is raised. If any other counter overflows, it | |
6b46338f JZ |
632 | * continues counting, and no IRQ is raised. But for new SoCs, |
633 | * such as i.MX8MP, event counter would stop when overflow, so | |
634 | * we need use cycle counter to stop overflow of event counter. | |
9a66d36c FL |
635 | * |
636 | * Cycles occur at least 4 times as often as other events, so we | |
637 | * can update all events on a cycle counter overflow and not | |
638 | * lose events. | |
639 | * | |
640 | */ | |
641 | for (i = 0; i < NUM_COUNTERS; i++) { | |
642 | ||
643 | if (!pmu->events[i]) | |
644 | continue; | |
645 | ||
646 | event = pmu->events[i]; | |
647 | ||
648 | ddr_perf_event_update(event); | |
9a66d36c FL |
649 | } |
650 | ||
651 | ddr_perf_counter_enable(pmu, | |
652 | EVENT_CYCLES_ID, | |
653 | EVENT_CYCLES_COUNTER, | |
654 | true); | |
9a66d36c FL |
655 | |
656 | return IRQ_HANDLED; | |
657 | } | |
658 | ||
659 | static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) | |
660 | { | |
661 | struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); | |
662 | int target; | |
663 | ||
664 | if (cpu != pmu->cpu) | |
665 | return 0; | |
666 | ||
667 | target = cpumask_any_but(cpu_online_mask, cpu); | |
668 | if (target >= nr_cpu_ids) | |
669 | return 0; | |
670 | ||
671 | perf_pmu_migrate_context(&pmu->pmu, cpu, target); | |
672 | pmu->cpu = target; | |
673 | ||
ba4489fb | 674 | WARN_ON(irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu))); |
9a66d36c FL |
675 | |
676 | return 0; | |
677 | } | |
678 | ||
679 | static int ddr_perf_probe(struct platform_device *pdev) | |
680 | { | |
681 | struct ddr_pmu *pmu; | |
682 | struct device_node *np; | |
683 | void __iomem *base; | |
684 | char *name; | |
685 | int num; | |
686 | int ret; | |
687 | int irq; | |
688 | ||
689 | base = devm_platform_ioremap_resource(pdev, 0); | |
690 | if (IS_ERR(base)) | |
691 | return PTR_ERR(base); | |
692 | ||
693 | np = pdev->dev.of_node; | |
694 | ||
695 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | |
696 | if (!pmu) | |
697 | return -ENOMEM; | |
698 | ||
699 | num = ddr_perf_init(pmu, base, &pdev->dev); | |
700 | ||
701 | platform_set_drvdata(pdev, pmu); | |
702 | ||
703 | name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", | |
704 | num); | |
d96b1b8c JX |
705 | if (!name) { |
706 | ret = -ENOMEM; | |
707 | goto cpuhp_state_err; | |
708 | } | |
9a66d36c | 709 | |
c12c0288 JZ |
710 | pmu->devtype_data = of_device_get_match_data(&pdev->dev); |
711 | ||
9a66d36c FL |
712 | pmu->cpu = raw_smp_processor_id(); |
713 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | |
714 | DDR_CPUHP_CB_NAME, | |
715 | NULL, | |
716 | ddr_perf_offline_cpu); | |
717 | ||
718 | if (ret < 0) { | |
719 | dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); | |
9ee68b31 | 720 | goto cpuhp_state_err; |
9a66d36c FL |
721 | } |
722 | ||
723 | pmu->cpuhp_state = ret; | |
724 | ||
725 | /* Register the pmu instance for cpu hotplug */ | |
9ee68b31 LC |
726 | ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); |
727 | if (ret) { | |
728 | dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); | |
729 | goto cpuhp_instance_err; | |
730 | } | |
9a66d36c FL |
731 | |
732 | /* Request irq */ | |
733 | irq = of_irq_get(np, 0); | |
734 | if (irq < 0) { | |
735 | dev_err(&pdev->dev, "Failed to get irq: %d", irq); | |
736 | ret = irq; | |
737 | goto ddr_perf_err; | |
738 | } | |
739 | ||
740 | ret = devm_request_irq(&pdev->dev, irq, | |
741 | ddr_perf_irq_handler, | |
742 | IRQF_NOBALANCING | IRQF_NO_THREAD, | |
743 | DDR_CPUHP_CB_NAME, | |
744 | pmu); | |
745 | if (ret < 0) { | |
746 | dev_err(&pdev->dev, "Request irq failed: %d", ret); | |
747 | goto ddr_perf_err; | |
748 | } | |
749 | ||
750 | pmu->irq = irq; | |
ba4489fb | 751 | ret = irq_set_affinity(pmu->irq, cpumask_of(pmu->cpu)); |
9a66d36c FL |
752 | if (ret) { |
753 | dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); | |
754 | goto ddr_perf_err; | |
755 | } | |
756 | ||
757 | ret = perf_pmu_register(&pmu->pmu, name, -1); | |
758 | if (ret) | |
759 | goto ddr_perf_err; | |
760 | ||
761 | return 0; | |
762 | ||
763 | ddr_perf_err: | |
9ee68b31 LC |
764 | cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); |
765 | cpuhp_instance_err: | |
766 | cpuhp_remove_multi_state(pmu->cpuhp_state); | |
767 | cpuhp_state_err: | |
9a66d36c FL |
768 | ida_simple_remove(&ddr_ida, pmu->id); |
769 | dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); | |
770 | return ret; | |
771 | } | |
772 | ||
773 | static int ddr_perf_remove(struct platform_device *pdev) | |
774 | { | |
775 | struct ddr_pmu *pmu = platform_get_drvdata(pdev); | |
776 | ||
777 | cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); | |
9ee68b31 | 778 | cpuhp_remove_multi_state(pmu->cpuhp_state); |
9a66d36c FL |
779 | |
780 | perf_pmu_unregister(&pmu->pmu); | |
781 | ||
782 | ida_simple_remove(&ddr_ida, pmu->id); | |
783 | return 0; | |
784 | } | |
785 | ||
786 | static struct platform_driver imx_ddr_pmu_driver = { | |
787 | .driver = { | |
788 | .name = "imx-ddr-pmu", | |
789 | .of_match_table = imx_ddr_pmu_dt_ids, | |
f32ed8eb | 790 | .suppress_bind_attrs = true, |
9a66d36c FL |
791 | }, |
792 | .probe = ddr_perf_probe, | |
793 | .remove = ddr_perf_remove, | |
794 | }; | |
795 | ||
796 | module_platform_driver(imx_ddr_pmu_driver); | |
797 | MODULE_LICENSE("GPL v2"); |