Commit | Line | Data |
---|---|---|
9a66d36c FL |
1 | // SPDX-License-Identifier: GPL-2.0 |
2 | /* | |
3 | * Copyright 2017 NXP | |
4 | * Copyright 2016 Freescale Semiconductor, Inc. | |
5 | */ | |
6 | ||
7 | #include <linux/bitfield.h> | |
8 | #include <linux/init.h> | |
9 | #include <linux/interrupt.h> | |
10 | #include <linux/io.h> | |
11 | #include <linux/module.h> | |
12 | #include <linux/of.h> | |
13 | #include <linux/of_address.h> | |
14 | #include <linux/of_device.h> | |
15 | #include <linux/of_irq.h> | |
16 | #include <linux/perf_event.h> | |
17 | #include <linux/slab.h> | |
18 | ||
19 | #define COUNTER_CNTL 0x0 | |
20 | #define COUNTER_READ 0x20 | |
21 | ||
22 | #define COUNTER_DPCR1 0x30 | |
23 | ||
24 | #define CNTL_OVER 0x1 | |
25 | #define CNTL_CLEAR 0x2 | |
26 | #define CNTL_EN 0x4 | |
27 | #define CNTL_EN_MASK 0xFFFFFFFB | |
28 | #define CNTL_CLEAR_MASK 0xFFFFFFFD | |
29 | #define CNTL_OVER_MASK 0xFFFFFFFE | |
30 | ||
31 | #define CNTL_CSV_SHIFT 24 | |
32 | #define CNTL_CSV_MASK (0xFF << CNTL_CSV_SHIFT) | |
33 | ||
34 | #define EVENT_CYCLES_ID 0 | |
35 | #define EVENT_CYCLES_COUNTER 0 | |
36 | #define NUM_COUNTERS 4 | |
37 | ||
38 | #define to_ddr_pmu(p) container_of(p, struct ddr_pmu, pmu) | |
39 | ||
40 | #define DDR_PERF_DEV_NAME "imx8_ddr" | |
41 | #define DDR_CPUHP_CB_NAME DDR_PERF_DEV_NAME "_perf_pmu" | |
42 | ||
43 | static DEFINE_IDA(ddr_ida); | |
44 | ||
45 | static const struct of_device_id imx_ddr_pmu_dt_ids[] = { | |
46 | { .compatible = "fsl,imx8-ddr-pmu",}, | |
47 | { .compatible = "fsl,imx8m-ddr-pmu",}, | |
48 | { /* sentinel */ } | |
49 | }; | |
50 | ||
51 | struct ddr_pmu { | |
52 | struct pmu pmu; | |
53 | void __iomem *base; | |
54 | unsigned int cpu; | |
55 | struct hlist_node node; | |
56 | struct device *dev; | |
57 | struct perf_event *events[NUM_COUNTERS]; | |
58 | int active_events; | |
59 | enum cpuhp_state cpuhp_state; | |
60 | int irq; | |
61 | int id; | |
62 | }; | |
63 | ||
64 | static ssize_t ddr_perf_cpumask_show(struct device *dev, | |
65 | struct device_attribute *attr, char *buf) | |
66 | { | |
67 | struct ddr_pmu *pmu = dev_get_drvdata(dev); | |
68 | ||
69 | return cpumap_print_to_pagebuf(true, buf, cpumask_of(pmu->cpu)); | |
70 | } | |
71 | ||
72 | static struct device_attribute ddr_perf_cpumask_attr = | |
73 | __ATTR(cpumask, 0444, ddr_perf_cpumask_show, NULL); | |
74 | ||
75 | static struct attribute *ddr_perf_cpumask_attrs[] = { | |
76 | &ddr_perf_cpumask_attr.attr, | |
77 | NULL, | |
78 | }; | |
79 | ||
80 | static struct attribute_group ddr_perf_cpumask_attr_group = { | |
81 | .attrs = ddr_perf_cpumask_attrs, | |
82 | }; | |
83 | ||
84 | static ssize_t | |
85 | ddr_pmu_event_show(struct device *dev, struct device_attribute *attr, | |
86 | char *page) | |
87 | { | |
88 | struct perf_pmu_events_attr *pmu_attr; | |
89 | ||
90 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | |
91 | return sprintf(page, "event=0x%02llx\n", pmu_attr->id); | |
92 | } | |
93 | ||
94 | #define IMX8_DDR_PMU_EVENT_ATTR(_name, _id) \ | |
95 | (&((struct perf_pmu_events_attr[]) { \ | |
96 | { .attr = __ATTR(_name, 0444, ddr_pmu_event_show, NULL),\ | |
97 | .id = _id, } \ | |
98 | })[0].attr.attr) | |
99 | ||
100 | static struct attribute *ddr_perf_events_attrs[] = { | |
101 | IMX8_DDR_PMU_EVENT_ATTR(cycles, EVENT_CYCLES_ID), | |
102 | IMX8_DDR_PMU_EVENT_ATTR(selfresh, 0x01), | |
103 | IMX8_DDR_PMU_EVENT_ATTR(read-accesses, 0x04), | |
104 | IMX8_DDR_PMU_EVENT_ATTR(write-accesses, 0x05), | |
105 | IMX8_DDR_PMU_EVENT_ATTR(read-queue-depth, 0x08), | |
106 | IMX8_DDR_PMU_EVENT_ATTR(write-queue-depth, 0x09), | |
107 | IMX8_DDR_PMU_EVENT_ATTR(lp-read-credit-cnt, 0x10), | |
108 | IMX8_DDR_PMU_EVENT_ATTR(hp-read-credit-cnt, 0x11), | |
109 | IMX8_DDR_PMU_EVENT_ATTR(write-credit-cnt, 0x12), | |
110 | IMX8_DDR_PMU_EVENT_ATTR(read-command, 0x20), | |
111 | IMX8_DDR_PMU_EVENT_ATTR(write-command, 0x21), | |
112 | IMX8_DDR_PMU_EVENT_ATTR(read-modify-write-command, 0x22), | |
113 | IMX8_DDR_PMU_EVENT_ATTR(hp-read, 0x23), | |
114 | IMX8_DDR_PMU_EVENT_ATTR(hp-req-nocredit, 0x24), | |
115 | IMX8_DDR_PMU_EVENT_ATTR(hp-xact-credit, 0x25), | |
116 | IMX8_DDR_PMU_EVENT_ATTR(lp-req-nocredit, 0x26), | |
117 | IMX8_DDR_PMU_EVENT_ATTR(lp-xact-credit, 0x27), | |
118 | IMX8_DDR_PMU_EVENT_ATTR(wr-xact-credit, 0x29), | |
119 | IMX8_DDR_PMU_EVENT_ATTR(read-cycles, 0x2a), | |
120 | IMX8_DDR_PMU_EVENT_ATTR(write-cycles, 0x2b), | |
121 | IMX8_DDR_PMU_EVENT_ATTR(read-write-transition, 0x30), | |
122 | IMX8_DDR_PMU_EVENT_ATTR(precharge, 0x31), | |
123 | IMX8_DDR_PMU_EVENT_ATTR(activate, 0x32), | |
124 | IMX8_DDR_PMU_EVENT_ATTR(load-mode, 0x33), | |
125 | IMX8_DDR_PMU_EVENT_ATTR(perf-mwr, 0x34), | |
126 | IMX8_DDR_PMU_EVENT_ATTR(read, 0x35), | |
127 | IMX8_DDR_PMU_EVENT_ATTR(read-activate, 0x36), | |
128 | IMX8_DDR_PMU_EVENT_ATTR(refresh, 0x37), | |
129 | IMX8_DDR_PMU_EVENT_ATTR(write, 0x38), | |
130 | IMX8_DDR_PMU_EVENT_ATTR(raw-hazard, 0x39), | |
131 | NULL, | |
132 | }; | |
133 | ||
134 | static struct attribute_group ddr_perf_events_attr_group = { | |
135 | .name = "events", | |
136 | .attrs = ddr_perf_events_attrs, | |
137 | }; | |
138 | ||
139 | PMU_FORMAT_ATTR(event, "config:0-7"); | |
140 | ||
141 | static struct attribute *ddr_perf_format_attrs[] = { | |
142 | &format_attr_event.attr, | |
143 | NULL, | |
144 | }; | |
145 | ||
146 | static struct attribute_group ddr_perf_format_attr_group = { | |
147 | .name = "format", | |
148 | .attrs = ddr_perf_format_attrs, | |
149 | }; | |
150 | ||
151 | static const struct attribute_group *attr_groups[] = { | |
152 | &ddr_perf_events_attr_group, | |
153 | &ddr_perf_format_attr_group, | |
154 | &ddr_perf_cpumask_attr_group, | |
155 | NULL, | |
156 | }; | |
157 | ||
158 | static u32 ddr_perf_alloc_counter(struct ddr_pmu *pmu, int event) | |
159 | { | |
160 | int i; | |
161 | ||
162 | /* | |
163 | * Always map cycle event to counter 0 | |
164 | * Cycles counter is dedicated for cycle event | |
165 | * can't used for the other events | |
166 | */ | |
167 | if (event == EVENT_CYCLES_ID) { | |
168 | if (pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
169 | return EVENT_CYCLES_COUNTER; | |
170 | else | |
171 | return -ENOENT; | |
172 | } | |
173 | ||
174 | for (i = 1; i < NUM_COUNTERS; i++) { | |
175 | if (pmu->events[i] == NULL) | |
176 | return i; | |
177 | } | |
178 | ||
179 | return -ENOENT; | |
180 | } | |
181 | ||
182 | static void ddr_perf_free_counter(struct ddr_pmu *pmu, int counter) | |
183 | { | |
184 | pmu->events[counter] = NULL; | |
185 | } | |
186 | ||
187 | static u32 ddr_perf_read_counter(struct ddr_pmu *pmu, int counter) | |
188 | { | |
189 | return readl_relaxed(pmu->base + COUNTER_READ + counter * 4); | |
190 | } | |
191 | ||
192 | static int ddr_perf_event_init(struct perf_event *event) | |
193 | { | |
194 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
195 | struct hw_perf_event *hwc = &event->hw; | |
196 | struct perf_event *sibling; | |
197 | ||
198 | if (event->attr.type != event->pmu->type) | |
199 | return -ENOENT; | |
200 | ||
201 | if (is_sampling_event(event) || event->attach_state & PERF_ATTACH_TASK) | |
202 | return -EOPNOTSUPP; | |
203 | ||
204 | if (event->cpu < 0) { | |
205 | dev_warn(pmu->dev, "Can't provide per-task data!\n"); | |
206 | return -EOPNOTSUPP; | |
207 | } | |
208 | ||
209 | /* | |
210 | * We must NOT create groups containing mixed PMUs, although software | |
211 | * events are acceptable (for example to create a CCN group | |
212 | * periodically read when a hrtimer aka cpu-clock leader triggers). | |
213 | */ | |
214 | if (event->group_leader->pmu != event->pmu && | |
215 | !is_software_event(event->group_leader)) | |
216 | return -EINVAL; | |
217 | ||
218 | for_each_sibling_event(sibling, event->group_leader) { | |
219 | if (sibling->pmu != event->pmu && | |
220 | !is_software_event(sibling)) | |
221 | return -EINVAL; | |
222 | } | |
223 | ||
224 | event->cpu = pmu->cpu; | |
225 | hwc->idx = -1; | |
226 | ||
227 | return 0; | |
228 | } | |
229 | ||
230 | ||
231 | static void ddr_perf_event_update(struct perf_event *event) | |
232 | { | |
233 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
234 | struct hw_perf_event *hwc = &event->hw; | |
235 | u64 delta, prev_raw_count, new_raw_count; | |
236 | int counter = hwc->idx; | |
237 | ||
238 | do { | |
239 | prev_raw_count = local64_read(&hwc->prev_count); | |
240 | new_raw_count = ddr_perf_read_counter(pmu, counter); | |
241 | } while (local64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
242 | new_raw_count) != prev_raw_count); | |
243 | ||
244 | delta = (new_raw_count - prev_raw_count) & 0xFFFFFFFF; | |
245 | ||
246 | local64_add(delta, &event->count); | |
247 | } | |
248 | ||
249 | static void ddr_perf_counter_enable(struct ddr_pmu *pmu, int config, | |
250 | int counter, bool enable) | |
251 | { | |
252 | u8 reg = counter * 4 + COUNTER_CNTL; | |
253 | int val; | |
254 | ||
255 | if (enable) { | |
256 | /* | |
257 | * must disable first, then enable again | |
258 | * otherwise, cycle counter will not work | |
259 | * if previous state is enabled. | |
260 | */ | |
261 | writel(0, pmu->base + reg); | |
262 | val = CNTL_EN | CNTL_CLEAR; | |
263 | val |= FIELD_PREP(CNTL_CSV_MASK, config); | |
264 | writel(val, pmu->base + reg); | |
265 | } else { | |
266 | /* Disable counter */ | |
267 | writel(0, pmu->base + reg); | |
268 | } | |
269 | } | |
270 | ||
271 | static void ddr_perf_event_start(struct perf_event *event, int flags) | |
272 | { | |
273 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
274 | struct hw_perf_event *hwc = &event->hw; | |
275 | int counter = hwc->idx; | |
276 | ||
277 | local64_set(&hwc->prev_count, 0); | |
278 | ||
279 | ddr_perf_counter_enable(pmu, event->attr.config, counter, true); | |
280 | ||
281 | hwc->state = 0; | |
282 | } | |
283 | ||
284 | static int ddr_perf_event_add(struct perf_event *event, int flags) | |
285 | { | |
286 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
287 | struct hw_perf_event *hwc = &event->hw; | |
288 | int counter; | |
289 | int cfg = event->attr.config; | |
290 | ||
291 | counter = ddr_perf_alloc_counter(pmu, cfg); | |
292 | if (counter < 0) { | |
293 | dev_dbg(pmu->dev, "There are not enough counters\n"); | |
294 | return -EOPNOTSUPP; | |
295 | } | |
296 | ||
297 | pmu->events[counter] = event; | |
298 | pmu->active_events++; | |
299 | hwc->idx = counter; | |
300 | ||
301 | hwc->state |= PERF_HES_STOPPED; | |
302 | ||
303 | if (flags & PERF_EF_START) | |
304 | ddr_perf_event_start(event, flags); | |
305 | ||
306 | return 0; | |
307 | } | |
308 | ||
309 | static void ddr_perf_event_stop(struct perf_event *event, int flags) | |
310 | { | |
311 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
312 | struct hw_perf_event *hwc = &event->hw; | |
313 | int counter = hwc->idx; | |
314 | ||
315 | ddr_perf_counter_enable(pmu, event->attr.config, counter, false); | |
316 | ddr_perf_event_update(event); | |
317 | ||
318 | hwc->state |= PERF_HES_STOPPED; | |
319 | } | |
320 | ||
321 | static void ddr_perf_event_del(struct perf_event *event, int flags) | |
322 | { | |
323 | struct ddr_pmu *pmu = to_ddr_pmu(event->pmu); | |
324 | struct hw_perf_event *hwc = &event->hw; | |
325 | int counter = hwc->idx; | |
326 | ||
327 | ddr_perf_event_stop(event, PERF_EF_UPDATE); | |
328 | ||
329 | ddr_perf_free_counter(pmu, counter); | |
330 | pmu->active_events--; | |
331 | hwc->idx = -1; | |
332 | } | |
333 | ||
334 | static void ddr_perf_pmu_enable(struct pmu *pmu) | |
335 | { | |
336 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); | |
337 | ||
338 | /* enable cycle counter if cycle is not active event list */ | |
339 | if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
340 | ddr_perf_counter_enable(ddr_pmu, | |
341 | EVENT_CYCLES_ID, | |
342 | EVENT_CYCLES_COUNTER, | |
343 | true); | |
344 | } | |
345 | ||
346 | static void ddr_perf_pmu_disable(struct pmu *pmu) | |
347 | { | |
348 | struct ddr_pmu *ddr_pmu = to_ddr_pmu(pmu); | |
349 | ||
350 | if (ddr_pmu->events[EVENT_CYCLES_COUNTER] == NULL) | |
351 | ddr_perf_counter_enable(ddr_pmu, | |
352 | EVENT_CYCLES_ID, | |
353 | EVENT_CYCLES_COUNTER, | |
354 | false); | |
355 | } | |
356 | ||
357 | static int ddr_perf_init(struct ddr_pmu *pmu, void __iomem *base, | |
358 | struct device *dev) | |
359 | { | |
360 | *pmu = (struct ddr_pmu) { | |
361 | .pmu = (struct pmu) { | |
362 | .capabilities = PERF_PMU_CAP_NO_EXCLUDE, | |
363 | .task_ctx_nr = perf_invalid_context, | |
364 | .attr_groups = attr_groups, | |
365 | .event_init = ddr_perf_event_init, | |
366 | .add = ddr_perf_event_add, | |
367 | .del = ddr_perf_event_del, | |
368 | .start = ddr_perf_event_start, | |
369 | .stop = ddr_perf_event_stop, | |
370 | .read = ddr_perf_event_update, | |
371 | .pmu_enable = ddr_perf_pmu_enable, | |
372 | .pmu_disable = ddr_perf_pmu_disable, | |
373 | }, | |
374 | .base = base, | |
375 | .dev = dev, | |
376 | }; | |
377 | ||
378 | pmu->id = ida_simple_get(&ddr_ida, 0, 0, GFP_KERNEL); | |
379 | return pmu->id; | |
380 | } | |
381 | ||
382 | static irqreturn_t ddr_perf_irq_handler(int irq, void *p) | |
383 | { | |
384 | int i; | |
385 | struct ddr_pmu *pmu = (struct ddr_pmu *) p; | |
386 | struct perf_event *event, *cycle_event = NULL; | |
387 | ||
388 | /* all counter will stop if cycle counter disabled */ | |
389 | ddr_perf_counter_enable(pmu, | |
390 | EVENT_CYCLES_ID, | |
391 | EVENT_CYCLES_COUNTER, | |
392 | false); | |
393 | /* | |
394 | * When the cycle counter overflows, all counters are stopped, | |
395 | * and an IRQ is raised. If any other counter overflows, it | |
396 | * continues counting, and no IRQ is raised. | |
397 | * | |
398 | * Cycles occur at least 4 times as often as other events, so we | |
399 | * can update all events on a cycle counter overflow and not | |
400 | * lose events. | |
401 | * | |
402 | */ | |
403 | for (i = 0; i < NUM_COUNTERS; i++) { | |
404 | ||
405 | if (!pmu->events[i]) | |
406 | continue; | |
407 | ||
408 | event = pmu->events[i]; | |
409 | ||
410 | ddr_perf_event_update(event); | |
411 | ||
412 | if (event->hw.idx == EVENT_CYCLES_COUNTER) | |
413 | cycle_event = event; | |
414 | } | |
415 | ||
416 | ddr_perf_counter_enable(pmu, | |
417 | EVENT_CYCLES_ID, | |
418 | EVENT_CYCLES_COUNTER, | |
419 | true); | |
420 | if (cycle_event) | |
421 | ddr_perf_event_update(cycle_event); | |
422 | ||
423 | return IRQ_HANDLED; | |
424 | } | |
425 | ||
426 | static int ddr_perf_offline_cpu(unsigned int cpu, struct hlist_node *node) | |
427 | { | |
428 | struct ddr_pmu *pmu = hlist_entry_safe(node, struct ddr_pmu, node); | |
429 | int target; | |
430 | ||
431 | if (cpu != pmu->cpu) | |
432 | return 0; | |
433 | ||
434 | target = cpumask_any_but(cpu_online_mask, cpu); | |
435 | if (target >= nr_cpu_ids) | |
436 | return 0; | |
437 | ||
438 | perf_pmu_migrate_context(&pmu->pmu, cpu, target); | |
439 | pmu->cpu = target; | |
440 | ||
441 | WARN_ON(irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu))); | |
442 | ||
443 | return 0; | |
444 | } | |
445 | ||
446 | static int ddr_perf_probe(struct platform_device *pdev) | |
447 | { | |
448 | struct ddr_pmu *pmu; | |
449 | struct device_node *np; | |
450 | void __iomem *base; | |
451 | char *name; | |
452 | int num; | |
453 | int ret; | |
454 | int irq; | |
455 | ||
456 | base = devm_platform_ioremap_resource(pdev, 0); | |
457 | if (IS_ERR(base)) | |
458 | return PTR_ERR(base); | |
459 | ||
460 | np = pdev->dev.of_node; | |
461 | ||
462 | pmu = devm_kzalloc(&pdev->dev, sizeof(*pmu), GFP_KERNEL); | |
463 | if (!pmu) | |
464 | return -ENOMEM; | |
465 | ||
466 | num = ddr_perf_init(pmu, base, &pdev->dev); | |
467 | ||
468 | platform_set_drvdata(pdev, pmu); | |
469 | ||
470 | name = devm_kasprintf(&pdev->dev, GFP_KERNEL, DDR_PERF_DEV_NAME "%d", | |
471 | num); | |
472 | if (!name) | |
473 | return -ENOMEM; | |
474 | ||
475 | pmu->cpu = raw_smp_processor_id(); | |
476 | ret = cpuhp_setup_state_multi(CPUHP_AP_ONLINE_DYN, | |
477 | DDR_CPUHP_CB_NAME, | |
478 | NULL, | |
479 | ddr_perf_offline_cpu); | |
480 | ||
481 | if (ret < 0) { | |
482 | dev_err(&pdev->dev, "cpuhp_setup_state_multi failed\n"); | |
483 | goto ddr_perf_err; | |
484 | } | |
485 | ||
486 | pmu->cpuhp_state = ret; | |
487 | ||
488 | /* Register the pmu instance for cpu hotplug */ | |
489 | cpuhp_state_add_instance_nocalls(pmu->cpuhp_state, &pmu->node); | |
490 | ||
491 | /* Request irq */ | |
492 | irq = of_irq_get(np, 0); | |
493 | if (irq < 0) { | |
494 | dev_err(&pdev->dev, "Failed to get irq: %d", irq); | |
495 | ret = irq; | |
496 | goto ddr_perf_err; | |
497 | } | |
498 | ||
499 | ret = devm_request_irq(&pdev->dev, irq, | |
500 | ddr_perf_irq_handler, | |
501 | IRQF_NOBALANCING | IRQF_NO_THREAD, | |
502 | DDR_CPUHP_CB_NAME, | |
503 | pmu); | |
504 | if (ret < 0) { | |
505 | dev_err(&pdev->dev, "Request irq failed: %d", ret); | |
506 | goto ddr_perf_err; | |
507 | } | |
508 | ||
509 | pmu->irq = irq; | |
510 | ret = irq_set_affinity_hint(pmu->irq, cpumask_of(pmu->cpu)); | |
511 | if (ret) { | |
512 | dev_err(pmu->dev, "Failed to set interrupt affinity!\n"); | |
513 | goto ddr_perf_err; | |
514 | } | |
515 | ||
516 | ret = perf_pmu_register(&pmu->pmu, name, -1); | |
517 | if (ret) | |
518 | goto ddr_perf_err; | |
519 | ||
520 | return 0; | |
521 | ||
522 | ddr_perf_err: | |
523 | if (pmu->cpuhp_state) | |
524 | cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); | |
525 | ||
526 | ida_simple_remove(&ddr_ida, pmu->id); | |
527 | dev_warn(&pdev->dev, "i.MX8 DDR Perf PMU failed (%d), disabled\n", ret); | |
528 | return ret; | |
529 | } | |
530 | ||
531 | static int ddr_perf_remove(struct platform_device *pdev) | |
532 | { | |
533 | struct ddr_pmu *pmu = platform_get_drvdata(pdev); | |
534 | ||
535 | cpuhp_state_remove_instance_nocalls(pmu->cpuhp_state, &pmu->node); | |
536 | irq_set_affinity_hint(pmu->irq, NULL); | |
537 | ||
538 | perf_pmu_unregister(&pmu->pmu); | |
539 | ||
540 | ida_simple_remove(&ddr_ida, pmu->id); | |
541 | return 0; | |
542 | } | |
543 | ||
544 | static struct platform_driver imx_ddr_pmu_driver = { | |
545 | .driver = { | |
546 | .name = "imx-ddr-pmu", | |
547 | .of_match_table = imx_ddr_pmu_dt_ids, | |
548 | }, | |
549 | .probe = ddr_perf_probe, | |
550 | .remove = ddr_perf_remove, | |
551 | }; | |
552 | ||
553 | module_platform_driver(imx_ddr_pmu_driver); | |
554 | MODULE_LICENSE("GPL v2"); |