Commit | Line | Data |
---|---|---|
9a66d36c FL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2017 NXP | |
4 | * Copyright 2016 Freescale Semiconductor, Inc. | |
5 | */ | |
6 | ||
7 | #include <linux/bitfield.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/of.h> | |
13 | #include <linux/of_address.h> | |
14 | #include <linux/of_device.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/perf_event.h> | |
17 | #include <linux/slab.h> | |
18 | ||
19 | #define COUNTER_CNTL 0x0 | |
20 | #define COUNTER_READ 0x20 | |
21 | ||
22 | #define COUNTER_DPCR1 0x30 | |
23 | ||
24 | #define CNTL_OVER 0x1 | |
25 | #define CNTL_CLEAR 0x2 | |
26 | #define CNTL_EN 0x4 | |
27 | #define CNTL_EN_MASK 0xFFFFFFFB | |
28 | #define CNTL_CLEAR_MASK 0xFFFFFFFD | |
29 | #define CNTL_OVER_MASK 0xFFFFFFFE | |
30 | ||
31 | #define CNTL_CSV_SHIFT 24 | |
32 | #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) | |
33 | ||
34 | #define EVENT_CYCLES_ID 0 | |
35 | #define EVENT_CYCLES_COUNTER 0 | |
36 | #define NUM_COUNTERS 4 | |
37 | ||
c12c0288 JZ |
38 | #define AXI_MASKING_REVERT 0xffff0000 /* AXI_MASKING(MSB 16bits) + AXI_ID(LSB 16bits) */ |
39 | ||
9a66d36c FL |
40 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) |
41 | ||
42 | #define DDR_PERF_DEV_NAME "imx8_ddr" | |
43 | #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" | |
44 | ||
45 | static DEFINE_IDA(ddr_ida); | |
46 | ||
c12c0288 | 47 | /* DDR Perf hardware feature */ |
44f8bd01 JZ |
48 | #define DDR_CAP_AXI_ID_FILTER 0x1 /* support AXI ID filter */ |
49 | #define DDR_CAP_AXI_ID_FILTER_ENHANCED 0x3 /* support enhanced AXI ID filter */ | |
c12c0288 JZ |
50 | |
51 | struct fsl_ddr_devtype_data { | |
52 | unsigned int quirks; /* quirks needed for different DDR Perf core */ | |
53 | }; | |
54 | ||
55 | static const struct fsl_ddr_devtype_data imx8_devtype_data; | |
56 | ||
57 | static const struct fsl_ddr_devtype_data imx8m_devtype_data = { | |
58 | .quirks = DDR_CAP_AXI_ID_FILTER, | |
59 | }; | |
60 | ||
d3eeece9 JZ |
61 | static const struct fsl_ddr_devtype_data imx8mp_devtype_data = { |
62 | .quirks = DDR_CAP_AXI_ID_FILTER_ENHANCED, | |
63 | }; | |
64 | ||
9a66d36c | 65 | static const struct of_device_id imx_ddr_pmu_dt_ids[] = { |
c12c0288 JZ |
66 | { .compatible = "fsl,imx8-ddr-pmu", .data = &imx8_devtype_data}, |
67 | { .compatible = "fsl,imx8m-ddr-pmu", .data = &imx8m_devtype_data}, | |
d3eeece9 | 68 | { .compatible = "fsl,imx8mp-ddr-pmu", .data = &imx8mp_devtype_data}, |
9a66d36c FL |
69 | { /* sentinel */ } |
70 | }; | |
4b9ace9c | 71 | MODULE_DEVICE_TABLE(of, imx_ddr_pmu_dt_ids); |
9a66d36c FL |
72 | |
73 | struct ddr_pmu { | |
74 | struct pmu pmu; | |
75 | void __iomem *base; | |
76 | unsigned int cpu; | |
77 | struct hlist_node node; | |
78 | struct device *dev; | |
79 | struct perf_event *events[NUM_COUNTERS]; | |
80 | int active_events; | |
81 | enum cpuhp_state cpuhp_state; | |
c12c0288 | 82 | const struct fsl_ddr_devtype_data *devtype_data; |
9a66d36c FL |
83 | int irq; |
84 | int id; | |
85 | }; | |
86 | ||
f1d303a1 JZ |
87 | enum ddr_perf_filter_capabilities { |
88 | PERF_CAP_AXI_ID_FILTER = 0, | |
89 | PERF_CAP_AXI_ID_FILTER_ENHANCED, | |
90 | PERF_CAP_AXI_ID_FEAT_MAX, | |
91 | }; | |
92 | ||
93 | static u32 ddr_perf_filter_cap_get(struct ddr_pmu *pmu, int cap) | |
94 | { | |
95 | u32 quirks = pmu->devtype_data->quirks; | |
96 | ||
97 | switch (cap) { | |
98 | case PERF_CAP_AXI_ID_FILTER: | |
99 | return !!(quirks & DDR_CAP_AXI_ID_FILTER); | |
100 | case PERF_CAP_AXI_ID_FILTER_ENHANCED: | |
101 | quirks &= DDR_CAP_AXI_ID_FILTER_ENHANCED; | |
102 | return quirks == DDR_CAP_AXI_ID_FILTER_ENHANCED; | |
103 | default: | |
104 | WARN(1, "unknown filter cap %d\n", cap); | |
105 | } | |
106 | ||
107 | return 0; | |
108 | } | |
109 | ||
110 | static ssize_t ddr_perf_filter_cap_show(struct device *dev, | |
111 | struct device_attribute *attr, | |
112 | char *buf) | |
113 | { | |
114 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
115 | struct dev_ext_attribute *ea = | |
116 | container_of(attr, struct dev_ext_attribute, attr); | |
117 | int cap = (long)ea->var; | |
118 | ||
119 | return snprintf(buf, PAGE_SIZE, "%u\n", | |
120 | ddr_perf_filter_cap_get(pmu, cap)); | |
121 | } | |
122 | ||
123 | #define PERF_EXT_ATTR_ENTRY(_name, _func, _var) \ | |
124 | (&((struct dev_ext_attribute) { \ | |
125 | __ATTR(_name, 0444, _func, NULL), (void *)_var \ | |
126 | }).attr.attr) | |
127 | ||
128 | #define PERF_FILTER_EXT_ATTR_ENTRY(_name, _var) \ | |
129 | PERF_EXT_ATTR_ENTRY(_name, ddr_perf_filter_cap_show, _var) | |
130 | ||
131 | static struct attribute *ddr_perf_filter_cap_attr[] = { | |
132 | PERF_FILTER_EXT_ATTR_ENTRY(filter, PERF_CAP_AXI_ID_FILTER), | |
133 | PERF_FILTER_EXT_ATTR_ENTRY(enhanced_filter, PERF_CAP_AXI_ID_FILTER_ENHANCED), | |
134 | NULL, | |
135 | }; | |
136 | ||
137 | static struct attribute_group ddr_perf_filter_cap_attr_group = { | |
138 | .name = "caps", | |
139 | .attrs = ddr_perf_filter_cap_attr, | |
140 | }; | |
141 | ||
9a66d36c FL |
142 | static ssize_t ddr_perf_cpumask_show(struct device *dev, |
143 | struct device_attribute *attr, char *buf) | |
144 | { | |
145 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
146 | ||
147 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); | |
148 | } | |
149 | ||
150 | static struct device_attribute ddr_perf_cpumask_attr = | |
151 | __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); | |
152 | ||
153 | static struct attribute *ddr_perf_cpumask_attrs[] = { | |
154 | &ddr_perf_cpumask_attr.attr, | |
155 | NULL, | |
156 | }; | |
157 | ||
158 | static struct attribute_group ddr_perf_cpumask_attr_group = { | |
159 | .attrs = ddr_perf_cpumask_attrs, | |
160 | }; | |
161 | ||
162 | static ssize_t | |
163 | ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, | |
164 | char *page) | |
165 | { | |
166 | struct perf_pmu_events_attr *pmu_attr; | |
167 | ||
168 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | |
169 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); | |
170 | } | |
171 | ||
172 | #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ | |
173 | (&((struct perf_pmu_events_attr[]) { \ | |
174 | { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ | |
175 | .id = _id, } \ | |
176 | })[0].attr.attr) | |
177 | ||
178 | static struct attribute *ddr_perf_events_attrs[] = { | |
179 | IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), | |
180 | IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), | |
181 | IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), | |
182 | IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), | |
183 | IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), | |
184 | IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), | |
185 | IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), | |
186 | IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), | |
187 | IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), | |
188 | IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), | |
189 | IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), | |
190 | IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), | |
191 | IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), | |
192 | IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), | |
193 | IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), | |
194 | IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), | |
195 | IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), | |
196 | IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), | |
197 | IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), | |
198 | IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), | |
199 | IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), | |
200 | IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), | |
201 | IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), | |
202 | IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), | |
203 | IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), | |
204 | IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), | |
205 | IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), | |
206 | IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), | |
207 | IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), | |
208 | IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), | |
c12c0288 JZ |
209 | IMX8_DDR_PMU_EVENT_ATTR(axid-read, 0x41), |
210 | IMX8_DDR_PMU_EVENT_ATTR(axid-write, 0x42), | |
9a66d36c FL |
211 | NULL, |
212 | }; | |
213 | ||
214 | static struct attribute_group ddr_perf_events_attr_group = { | |
215 | .name = "events", | |
216 | .attrs = ddr_perf_events_attrs, | |
217 | }; | |
218 | ||
219 | PMU_FORMAT_ATTR(event, "config:0-7"); | |
c12c0288 JZ |
220 | PMU_FORMAT_ATTR(axi_id, "config1:0-15"); |
221 | PMU_FORMAT_ATTR(axi_mask, "config1:16-31"); | |
9a66d36c FL |
222 | |
223 | static struct attribute *ddr_perf_format_attrs[] = { | |
224 | &format_attr_event.attr, | |
c12c0288 JZ |
225 | &format_attr_axi_id.attr, |
226 | &format_attr_axi_mask.attr, | |
9a66d36c FL |
227 | NULL, |
228 | }; | |
229 | ||
230 | static struct attribute_group ddr_perf_format_attr_group = { | |
231 | .name = "format", | |
232 | .attrs = ddr_perf_format_attrs, | |
233 | }; | |
234 | ||
235 | static const struct attribute_group *attr_groups[] = { | |
236 | &ddr_perf_events_attr_group, | |
237 | &ddr_perf_format_attr_group, | |
238 | &ddr_perf_cpumask_attr_group, | |
f1d303a1 | 239 | &ddr_perf_filter_cap_attr_group, |
9a66d36c FL |
240 | NULL, |
241 | }; | |
242 | ||
44f8bd01 JZ |
243 | static bool ddr_perf_is_filtered(struct perf_event *event) |
244 | { | |
245 | return event->attr.config == 0x41 || event->attr.config == 0x42; | |
246 | } | |
247 | ||
248 | static u32 ddr_perf_filter_val(struct perf_event *event) | |
249 | { | |
250 | return event->attr.config1; | |
251 | } | |
252 | ||
253 | static bool ddr_perf_filters_compatible(struct perf_event *a, | |
254 | struct perf_event *b) | |
255 | { | |
256 | if (!ddr_perf_is_filtered(a)) | |
257 | return true; | |
258 | if (!ddr_perf_is_filtered(b)) | |
259 | return true; | |
260 | return ddr_perf_filter_val(a) == ddr_perf_filter_val(b); | |
261 | } | |
262 | ||
263 | static bool ddr_perf_is_enhanced_filtered(struct perf_event *event) | |
264 | { | |
265 | unsigned int filt; | |
266 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
267 | ||
268 | filt = pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER_ENHANCED; | |
269 | return (filt == DDR_CAP_AXI_ID_FILTER_ENHANCED) && | |
270 | ddr_perf_is_filtered(event); | |
271 | } | |
272 | ||
9a66d36c FL |
273 | static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) |
274 | { | |
275 | int i; | |
276 | ||
277 | /* | |
278 | * Always map cycle event to counter 0 | |
279 | * Cycles counter is dedicated for cycle event | |
280 | * can't used for the other events | |
281 | */ | |
282 | if (event == EVENT_CYCLES_ID) { | |
283 | if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
284 | return EVENT_CYCLES_COUNTER; | |
285 | else | |
286 | return -ENOENT; | |
287 | } | |
288 | ||
289 | for (i = 1; i < NUM_COUNTERS; i++) { | |
290 | if (pmu->events[i] == NULL) | |
291 | return i; | |
292 | } | |
293 | ||
294 | return -ENOENT; | |
295 | } | |
296 | ||
297 | static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) | |
298 | { | |
299 | pmu->events[counter] = NULL; | |
300 | } | |
301 | ||
302 | static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) | |
303 | { | |
44f8bd01 JZ |
304 | struct perf_event *event = pmu->events[counter]; |
305 | void __iomem *base = pmu->base; | |
c12c0288 | 306 | |
44f8bd01 JZ |
307 | /* |
308 | * return bytes instead of bursts from ddr transaction for | |
309 | * axid-read and axid-write event if PMU core supports enhanced | |
310 | * filter. | |
311 | */ | |
312 | base += ddr_perf_is_enhanced_filtered(event) ? COUNTER_DPCR1 : | |
313 | COUNTER_READ; | |
314 | return readl_relaxed(base + counter * 4); | |
c12c0288 JZ |
315 | } |
316 | ||
9a66d36c FL |
317 | static int ddr_perf_event_init(struct perf_event *event) |
318 | { | |
319 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
320 | struct hw_perf_event *hwc = &event->hw; | |
321 | struct perf_event *sibling; | |
322 | ||
323 | if (event->attr.type != event->pmu->type) | |
324 | return -ENOENT; | |
325 | ||
326 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
327 | return -EOPNOTSUPP; | |
328 | ||
329 | if (event->cpu < 0) { | |
330 | dev_warn(pmu->dev, "Can't provide per-task data!\n"); | |
331 | return -EOPNOTSUPP; | |
332 | } | |
333 | ||
334 | /* | |
335 | * We must NOT create groups containing mixed PMUs, although software | |
336 | * events are acceptable (for example to create a CCN group | |
337 | * periodically read when a hrtimer aka cpu-clock leader triggers). | |
338 | */ | |
339 | if (event->group_leader->pmu != event->pmu && | |
340 | !is_software_event(event->group_leader)) | |
341 | return -EINVAL; | |
342 | ||
c12c0288 JZ |
343 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { |
344 | if (!ddr_perf_filters_compatible(event, event->group_leader)) | |
345 | return -EINVAL; | |
346 | for_each_sibling_event(sibling, event->group_leader) { | |
347 | if (!ddr_perf_filters_compatible(event, sibling)) | |
348 | return -EINVAL; | |
349 | } | |
350 | } | |
351 | ||
9a66d36c FL |
352 | for_each_sibling_event(sibling, event->group_leader) { |
353 | if (sibling->pmu != event->pmu && | |
354 | !is_software_event(sibling)) | |
355 | return -EINVAL; | |
356 | } | |
357 | ||
358 | event->cpu = pmu->cpu; | |
359 | hwc->idx = -1; | |
360 | ||
361 | return 0; | |
362 | } | |
363 | ||
364 | ||
365 | static void ddr_perf_event_update(struct perf_event *event) | |
366 | { | |
367 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
368 | struct hw_perf_event *hwc = &event->hw; | |
369 | u64 delta, prev_raw_count, new_raw_count; | |
370 | int counter = hwc->idx; | |
371 | ||
372 | do { | |
373 | prev_raw_count = local64_read(&hwc->prev_count); | |
374 | new_raw_count = ddr_perf_read_counter(pmu, counter); | |
375 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
376 | new_raw_count) != prev_raw_count); | |
377 | ||
378 | delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; | |
379 | ||
380 | local64_add(delta, &event->count); | |
381 | } | |
382 | ||
383 | static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, | |
384 | int counter, bool enable) | |
385 | { | |
386 | u8 reg = counter * 4 + COUNTER_CNTL; | |
387 | int val; | |
388 | ||
389 | if (enable) { | |
390 | /* | |
049d9191 JZ |
391 | * cycle counter is special which should firstly write 0 then |
392 | * write 1 into CLEAR bit to clear it. Other counters only | |
393 | * need write 0 into CLEAR bit and it turns out to be 1 by | |
394 | * hardware. Below enable flow is harmless for all counters. | |
9a66d36c FL |
395 | */ |
396 | writel(0, pmu->base + reg); | |
397 | val = CNTL_EN | CNTL_CLEAR; | |
398 | val |= FIELD_PREP(CNTL_CSV_MASK, config); | |
399 | writel(val, pmu->base + reg); | |
400 | } else { | |
401 | /* Disable counter */ | |
049d9191 JZ |
402 | val = readl_relaxed(pmu->base + reg) & CNTL_EN_MASK; |
403 | writel(val, pmu->base + reg); | |
9a66d36c FL |
404 | } |
405 | } | |
406 | ||
407 | static void ddr_perf_event_start(struct perf_event *event, int flags) | |
408 | { | |
409 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
410 | struct hw_perf_event *hwc = &event->hw; | |
411 | int counter = hwc->idx; | |
412 | ||
413 | local64_set(&hwc->prev_count, 0); | |
414 | ||
415 | ddr_perf_counter_enable(pmu, event->attr.config, counter, true); | |
416 | ||
417 | hwc->state = 0; | |
418 | } | |
419 | ||
420 | static int ddr_perf_event_add(struct perf_event *event, int flags) | |
421 | { | |
422 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
423 | struct hw_perf_event *hwc = &event->hw; | |
424 | int counter; | |
425 | int cfg = event->attr.config; | |
c12c0288 JZ |
426 | int cfg1 = event->attr.config1; |
427 | ||
428 | if (pmu->devtype_data->quirks & DDR_CAP_AXI_ID_FILTER) { | |
429 | int i; | |
430 | ||
431 | for (i = 1; i < NUM_COUNTERS; i++) { | |
432 | if (pmu->events[i] && | |
433 | !ddr_perf_filters_compatible(event, pmu->events[i])) | |
434 | return -EINVAL; | |
435 | } | |
436 | ||
437 | if (ddr_perf_is_filtered(event)) { | |
438 | /* revert axi id masking(axi_mask) value */ | |
439 | cfg1 ^= AXI_MASKING_REVERT; | |
440 | writel(cfg1, pmu->base + COUNTER_DPCR1); | |
441 | } | |
442 | } | |
9a66d36c FL |
443 | |
444 | counter = ddr_perf_alloc_counter(pmu, cfg); | |
445 | if (counter < 0) { | |
446 | dev_dbg(pmu->dev, "There are not enough counters\n"); | |
447 | return -EOPNOTSUPP; | |
448 | } | |
449 | ||
450 | pmu->events[counter] = event; | |
451 | pmu->active_events++; | |
452 | hwc->idx = counter; | |
453 | ||
454 | hwc->state |= PERF_HES_STOPPED; | |
455 | ||
456 | if (flags & PERF_EF_START) | |
457 | ddr_perf_event_start(event, flags); | |
458 | ||
459 | return 0; | |
460 | } | |
461 | ||
462 | static void ddr_perf_event_stop(struct perf_event *event, int flags) | |
463 | { | |
464 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
465 | struct hw_perf_event *hwc = &event->hw; | |
466 | int counter = hwc->idx; | |
467 | ||
468 | ddr_perf_counter_enable(pmu, event->attr.config, counter, false); | |
469 | ddr_perf_event_update(event); | |
470 | ||
471 | hwc->state |= PERF_HES_STOPPED; | |
472 | } | |
473 | ||
474 | static void ddr_perf_event_del(struct perf_event *event, int flags) | |
475 | { | |
476 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
477 | struct hw_perf_event *hwc = &event->hw; | |
478 | int counter = hwc->idx; | |
479 | ||
480 | ddr_perf_event_stop(event, PERF_EF_UPDATE); | |
481 | ||
482 | ddr_perf_free_counter(pmu, counter); | |
483 | pmu->active_events--; | |
484 | hwc->idx = -1; | |
485 | } | |
486 | ||
487 | static void ddr_perf_pmu_enable(struct pmu *pmu) | |
488 | { | |
489 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); | |
490 | ||
491 | /* enable cycle counter if cycle is not active event list */ | |
492 | if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
493 | ddr_perf_counter_enable(ddr_pmu, | |
494 | EVENT_CYCLES_ID, | |
495 | EVENT_CYCLES_COUNTER, | |
496 | true); | |
497 | } | |
498 | ||
499 | static void ddr_perf_pmu_disable(struct pmu *pmu) | |
500 | { | |
501 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); | |
502 | ||
503 | if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
504 | ddr_perf_counter_enable(ddr_pmu, | |
505 | EVENT_CYCLES_ID, | |
506 | EVENT_CYCLES_COUNTER, | |
507 | false); | |
508 | } | |
509 | ||
510 | static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, | |
511 | struct device *dev) | |
512 | { | |
513 | *pmu = (struct ddr_pmu) { | |
514 | .pmu = (struct pmu) { | |
515 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, | |
516 | .task_ctx_nr = perf_invalid_context, | |
517 | .attr_groups = attr_groups, | |
518 | .event_init = ddr_perf_event_init, | |
519 | .add = ddr_perf_event_add, | |
520 | .del = ddr_perf_event_del, | |
521 | .start = ddr_perf_event_start, | |
522 | .stop = ddr_perf_event_stop, | |
523 | .read = ddr_perf_event_update, | |
524 | .pmu_enable = ddr_perf_pmu_enable, | |
525 | .pmu_disable = ddr_perf_pmu_disable, | |
526 | }, | |
527 | .base = base, | |
528 | .dev = dev, | |
529 | }; | |
530 | ||
531 | pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); | |
532 | return pmu->id; | |
533 | } | |
534 | ||
535 | static irqreturn_t ddr_perf_irq_handler(int irq, void *p) | |
536 | { | |
537 | int i; | |
538 | struct ddr_pmu *pmu = (struct ddr_pmu *) p; | |
539 | struct perf_event *event, *cycle_event = NULL; | |
540 | ||
541 | /* all counter will stop if cycle counter disabled */ | |
542 | ddr_perf_counter_enable(pmu, | |
543 | EVENT_CYCLES_ID, | |
544 | EVENT_CYCLES_COUNTER, | |
545 | false); | |
546 | /* | |
547 | * When the cycle counter overflows, all counters are stopped, | |
548 | * and an IRQ is raised. If any other counter overflows, it | |
549 | * continues counting, and no IRQ is raised. | |
550 | * | |
551 | * Cycles occur at least 4 times as often as other events, so we | |
552 | * can update all events on a cycle counter overflow and not | |
553 | * lose events. | |
554 | * | |
555 | */ | |
556 | for (i = 0; i < NUM_COUNTERS; i++) { | |
557 | ||
558 | if (!pmu->events[i]) | |
559 | continue; | |
560 | ||
561 | event = pmu->events[i]; | |
562 | ||
563 | ddr_perf_event_update(event); | |
564 | ||
565 | if (event->hw.idx == EVENT_CYCLES_COUNTER) | |
566 | cycle_event = event; | |
567 | } | |
568 | ||
569 | ddr_perf_counter_enable(pmu, | |
570 | EVENT_CYCLES_ID, | |
571 | EVENT_CYCLES_COUNTER, | |
572 | true); | |
573 | if (cycle_event) | |
574 | ddr_perf_event_update(cycle_event); | |
575 | ||
576 | return IRQ_HANDLED; | |
577 | } | |
578 | ||
579 | static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) | |
580 | { | |
581 | struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); | |
582 | int target; | |
583 | ||
584 | if (cpu != pmu->cpu) | |
585 | return 0; | |
586 | ||
587 | target = cpumask_any_but(cpu_online_mask, cpu); | |
588 | if (target >= nr_cpu_ids) | |
589 | return 0; | |
590 | ||
591 | perf_pmu_migrate_context(&pmu->pmu, cpu, target); | |
592 | pmu->cpu = target; | |
593 | ||
594 | WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); | |
595 | ||
596 | return 0; | |
597 | } | |
598 | ||
599 | static int ddr_perf_probe(struct platform_device *pdev) | |
600 | { | |
601 | struct ddr_pmu *pmu; | |
602 | struct device_node *np; | |
603 | void __iomem *base; | |
604 | char *name; | |
605 | int num; | |
606 | int ret; | |
607 | int irq; | |
608 | ||
609 | base = devm_platform_ioremap_resource(pdev, 0); | |
610 | if (IS_ERR(base)) | |
611 | return PTR_ERR(base); | |
612 | ||
613 | np = pdev->dev.of_node; | |
614 | ||
615 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | |
616 | if (!pmu) | |
617 | return -ENOMEM; | |
618 | ||
619 | num = ddr_perf_init(pmu, base, &pdev->dev); | |
620 | ||
621 | platform_set_drvdata(pdev, pmu); | |
622 | ||
623 | name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", | |
624 | num); | |
625 | if (!name) | |
626 | return -ENOMEM; | |
627 | ||
c12c0288 JZ |
628 | pmu->devtype_data = of_device_get_match_data(&pdev->dev); |
629 | ||
9a66d36c FL |
630 | pmu->cpu = raw_smp_processor_id(); |
631 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | |
632 | DDR_CPUHP_CB_NAME, | |
633 | NULL, | |
634 | ddr_perf_offline_cpu); | |
635 | ||
636 | if (ret < 0) { | |
637 | dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); | |
9ee68b31 | 638 | goto cpuhp_state_err; |
9a66d36c FL |
639 | } |
640 | ||
641 | pmu->cpuhp_state = ret; | |
642 | ||
643 | /* Register the pmu instance for cpu hotplug */ | |
9ee68b31 LC |
644 | ret = cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); |
645 | if (ret) { | |
646 | dev_err(&pdev->dev, "Error %d registering hotplug\n", ret); | |
647 | goto cpuhp_instance_err; | |
648 | } | |
9a66d36c FL |
649 | |
650 | /* Request irq */ | |
651 | irq = of_irq_get(np, 0); | |
652 | if (irq < 0) { | |
653 | dev_err(&pdev->dev, "Failed to get irq: %d", irq); | |
654 | ret = irq; | |
655 | goto ddr_perf_err; | |
656 | } | |
657 | ||
658 | ret = devm_request_irq(&pdev->dev, irq, | |
659 | ddr_perf_irq_handler, | |
660 | IRQF_NOBALANCING | IRQF_NO_THREAD, | |
661 | DDR_CPUHP_CB_NAME, | |
662 | pmu); | |
663 | if (ret < 0) { | |
664 | dev_err(&pdev->dev, "Request irq failed: %d", ret); | |
665 | goto ddr_perf_err; | |
666 | } | |
667 | ||
668 | pmu->irq = irq; | |
669 | ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); | |
670 | if (ret) { | |
671 | dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); | |
672 | goto ddr_perf_err; | |
673 | } | |
674 | ||
675 | ret = perf_pmu_register(&pmu->pmu, name, -1); | |
676 | if (ret) | |
677 | goto ddr_perf_err; | |
678 | ||
679 | return 0; | |
680 | ||
681 | ddr_perf_err: | |
9ee68b31 LC |
682 | cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); |
683 | cpuhp_instance_err: | |
684 | cpuhp_remove_multi_state(pmu->cpuhp_state); | |
685 | cpuhp_state_err: | |
9a66d36c FL |
686 | ida_simple_remove(&ddr_ida, pmu->id); |
687 | dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); | |
688 | return ret; | |
689 | } | |
690 | ||
691 | static int ddr_perf_remove(struct platform_device *pdev) | |
692 | { | |
693 | struct ddr_pmu *pmu = platform_get_drvdata(pdev); | |
694 | ||
695 | cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); | |
9ee68b31 | 696 | cpuhp_remove_multi_state(pmu->cpuhp_state); |
9a66d36c FL |
697 | irq_set_affinity_hint(pmu->irq, NULL); |
698 | ||
699 | perf_pmu_unregister(&pmu->pmu); | |
700 | ||
701 | ida_simple_remove(&ddr_ida, pmu->id); | |
702 | return 0; | |
703 | } | |
704 | ||
705 | static struct platform_driver imx_ddr_pmu_driver = { | |
706 | .driver = { | |
707 | .name = "imx-ddr-pmu", | |
708 | .of_match_table = imx_ddr_pmu_dt_ids, | |
709 | }, | |
710 | .probe = ddr_perf_probe, | |
711 | .remove = ddr_perf_remove, | |
712 | }; | |
713 | ||
714 | module_platform_driver(imx_ddr_pmu_driver); | |
715 | MODULE_LICENSE("GPL v2"); |