Commit | Line | Data |
---|---|---|
ad0dfdfd | 1 | // SPDX-License-Identifier: GPL-2.0 |
0bcbf2e3 MP |
2 | /* |
3 | * Copyright(C) 2015 Linaro Limited. All rights reserved. | |
4 | * Author: Mathieu Poirier <mathieu.poirier@linaro.org> | |
0bcbf2e3 MP |
5 | */ |
6 | ||
7 | #include <linux/coresight.h> | |
8 | #include <linux/coresight-pmu.h> | |
9 | #include <linux/cpumask.h> | |
10 | #include <linux/device.h> | |
11 | #include <linux/list.h> | |
12 | #include <linux/mm.h> | |
ca48fa22 | 13 | #include <linux/init.h> |
0bcbf2e3 | 14 | #include <linux/perf_event.h> |
5ecabe4a | 15 | #include <linux/percpu-defs.h> |
0bcbf2e3 | 16 | #include <linux/slab.h> |
bb8e370b | 17 | #include <linux/stringhash.h> |
0bcbf2e3 MP |
18 | #include <linux/types.h> |
19 | #include <linux/workqueue.h> | |
20 | ||
ca878b14 | 21 | #include "coresight-etm-perf.h" |
0bcbf2e3 MP |
22 | #include "coresight-priv.h" |
23 | ||
24 | static struct pmu etm_pmu; | |
25 | static bool etm_perf_up; | |
26 | ||
0bcbf2e3 MP |
27 | static DEFINE_PER_CPU(struct perf_output_handle, ctx_handle); |
28 | static DEFINE_PER_CPU(struct coresight_device *, csdev_src); | |
29 | ||
30 | /* ETMv3.5/PTM's ETMCR is 'config' */ | |
31 | PMU_FORMAT_ATTR(cycacc, "config:" __stringify(ETM_OPT_CYCACC)); | |
82500a81 | 32 | PMU_FORMAT_ATTR(contextid, "config:" __stringify(ETM_OPT_CTXTID)); |
0bcbf2e3 | 33 | PMU_FORMAT_ATTR(timestamp, "config:" __stringify(ETM_OPT_TS)); |
b97971be | 34 | PMU_FORMAT_ATTR(retstack, "config:" __stringify(ETM_OPT_RETSTK)); |
22644392 MP |
35 | /* Sink ID - same for all ETMs */ |
36 | PMU_FORMAT_ATTR(sinkid, "config2:0-31"); | |
0bcbf2e3 MP |
37 | |
38 | static struct attribute *etm_config_formats_attr[] = { | |
39 | &format_attr_cycacc.attr, | |
82500a81 | 40 | &format_attr_contextid.attr, |
0bcbf2e3 | 41 | &format_attr_timestamp.attr, |
b97971be | 42 | &format_attr_retstack.attr, |
22644392 | 43 | &format_attr_sinkid.attr, |
0bcbf2e3 MP |
44 | NULL, |
45 | }; | |
46 | ||
89f00a1a | 47 | static const struct attribute_group etm_pmu_format_group = { |
0bcbf2e3 MP |
48 | .name = "format", |
49 | .attrs = etm_config_formats_attr, | |
50 | }; | |
51 | ||
bb8e370b MP |
52 | static struct attribute *etm_config_sinks_attr[] = { |
53 | NULL, | |
54 | }; | |
55 | ||
56 | static const struct attribute_group etm_pmu_sinks_group = { | |
57 | .name = "sinks", | |
58 | .attrs = etm_config_sinks_attr, | |
59 | }; | |
60 | ||
0bcbf2e3 MP |
61 | static const struct attribute_group *etm_pmu_attr_groups[] = { |
62 | &etm_pmu_format_group, | |
bb8e370b | 63 | &etm_pmu_sinks_group, |
0bcbf2e3 MP |
64 | NULL, |
65 | }; | |
66 | ||
5ecabe4a SP |
67 | static inline struct list_head ** |
68 | etm_event_cpu_path_ptr(struct etm_event_data *data, int cpu) | |
69 | { | |
70 | return per_cpu_ptr(data->path, cpu); | |
71 | } | |
72 | ||
73 | static inline struct list_head * | |
74 | etm_event_cpu_path(struct etm_event_data *data, int cpu) | |
75 | { | |
76 | return *etm_event_cpu_path_ptr(data, cpu); | |
77 | } | |
78 | ||
0bcbf2e3 MP |
79 | static void etm_event_read(struct perf_event *event) {} |
80 | ||
ca878b14 | 81 | static int etm_addr_filters_alloc(struct perf_event *event) |
0bcbf2e3 | 82 | { |
ca878b14 MP |
83 | struct etm_filters *filters; |
84 | int node = event->cpu == -1 ? -1 : cpu_to_node(event->cpu); | |
85 | ||
86 | filters = kzalloc_node(sizeof(struct etm_filters), GFP_KERNEL, node); | |
87 | if (!filters) | |
88 | return -ENOMEM; | |
89 | ||
90 | if (event->parent) | |
91 | memcpy(filters, event->parent->hw.addr_filters, | |
92 | sizeof(*filters)); | |
93 | ||
94 | event->hw.addr_filters = filters; | |
0bcbf2e3 MP |
95 | |
96 | return 0; | |
97 | } | |
98 | ||
ca878b14 MP |
99 | static void etm_event_destroy(struct perf_event *event) |
100 | { | |
101 | kfree(event->hw.addr_filters); | |
102 | event->hw.addr_filters = NULL; | |
103 | } | |
104 | ||
105 | static int etm_event_init(struct perf_event *event) | |
106 | { | |
107 | int ret = 0; | |
108 | ||
109 | if (event->attr.type != etm_pmu.type) { | |
110 | ret = -ENOENT; | |
111 | goto out; | |
112 | } | |
113 | ||
114 | ret = etm_addr_filters_alloc(event); | |
115 | if (ret) | |
116 | goto out; | |
117 | ||
118 | event->destroy = etm_event_destroy; | |
119 | out: | |
120 | return ret; | |
121 | } | |
122 | ||
f5200aa9 MP |
123 | static void free_sink_buffer(struct etm_event_data *event_data) |
124 | { | |
125 | int cpu; | |
126 | cpumask_t *mask = &event_data->mask; | |
127 | struct coresight_device *sink; | |
128 | ||
129 | if (WARN_ON(cpumask_empty(mask))) | |
130 | return; | |
131 | ||
132 | if (!event_data->snk_config) | |
133 | return; | |
134 | ||
135 | cpu = cpumask_first(mask); | |
136 | sink = coresight_get_sink(etm_event_cpu_path(event_data, cpu)); | |
137 | sink_ops(sink)->free_buffer(event_data->snk_config); | |
138 | } | |
139 | ||
0bcbf2e3 MP |
140 | static void free_event_data(struct work_struct *work) |
141 | { | |
142 | int cpu; | |
143 | cpumask_t *mask; | |
144 | struct etm_event_data *event_data; | |
0bcbf2e3 MP |
145 | |
146 | event_data = container_of(work, struct etm_event_data, work); | |
147 | mask = &event_data->mask; | |
f9d81a65 SP |
148 | |
149 | /* Free the sink buffers, if there are any */ | |
f5200aa9 | 150 | free_sink_buffer(event_data); |
0bcbf2e3 MP |
151 | |
152 | for_each_cpu(cpu, mask) { | |
5ecabe4a SP |
153 | struct list_head **ppath; |
154 | ||
155 | ppath = etm_event_cpu_path_ptr(event_data, cpu); | |
156 | if (!(IS_ERR_OR_NULL(*ppath))) | |
157 | coresight_release_path(*ppath); | |
158 | *ppath = NULL; | |
0bcbf2e3 MP |
159 | } |
160 | ||
5ecabe4a | 161 | free_percpu(event_data->path); |
0bcbf2e3 MP |
162 | kfree(event_data); |
163 | } | |
164 | ||
165 | static void *alloc_event_data(int cpu) | |
166 | { | |
0bcbf2e3 MP |
167 | cpumask_t *mask; |
168 | struct etm_event_data *event_data; | |
169 | ||
170 | /* First get memory for the session's data */ | |
171 | event_data = kzalloc(sizeof(struct etm_event_data), GFP_KERNEL); | |
172 | if (!event_data) | |
173 | return NULL; | |
174 | ||
0bcbf2e3 MP |
175 | |
176 | mask = &event_data->mask; | |
177 | if (cpu != -1) | |
178 | cpumask_set_cpu(cpu, mask); | |
179 | else | |
f9d81a65 | 180 | cpumask_copy(mask, cpu_present_mask); |
0bcbf2e3 MP |
181 | |
182 | /* | |
183 | * Each CPU has a single path between source and destination. As such | |
184 | * allocate an array using CPU numbers as indexes. That way a path | |
185 | * for any CPU can easily be accessed at any given time. We proceed | |
186 | * the same way for sessions involving a single CPU. The cost of | |
187 | * unused memory when dealing with single CPU trace scenarios is small | |
188 | * compared to the cost of searching through an optimized array. | |
189 | */ | |
5ecabe4a SP |
190 | event_data->path = alloc_percpu(struct list_head *); |
191 | ||
0bcbf2e3 MP |
192 | if (!event_data->path) { |
193 | kfree(event_data); | |
194 | return NULL; | |
195 | } | |
196 | ||
197 | return event_data; | |
198 | } | |
199 | ||
200 | static void etm_free_aux(void *data) | |
201 | { | |
202 | struct etm_event_data *event_data = data; | |
203 | ||
204 | schedule_work(&event_data->work); | |
205 | } | |
206 | ||
84001866 | 207 | static void *etm_setup_aux(struct perf_event *event, void **pages, |
0bcbf2e3 MP |
208 | int nr_pages, bool overwrite) |
209 | { | |
22644392 | 210 | u32 id; |
84001866 | 211 | int cpu = event->cpu; |
0bcbf2e3 MP |
212 | cpumask_t *mask; |
213 | struct coresight_device *sink; | |
214 | struct etm_event_data *event_data = NULL; | |
215 | ||
84001866 | 216 | event_data = alloc_event_data(cpu); |
0bcbf2e3 MP |
217 | if (!event_data) |
218 | return NULL; | |
d755209f | 219 | INIT_WORK(&event_data->work, free_event_data); |
0bcbf2e3 | 220 | |
22644392 MP |
221 | /* First get the selected sink from user space. */ |
222 | if (event->attr.config2) { | |
223 | id = (u32)event->attr.config2; | |
224 | sink = coresight_get_sink_by_id(id); | |
225 | } else { | |
226 | sink = coresight_get_enabled_sink(true); | |
227 | } | |
228 | ||
02d5c897 | 229 | if (!sink) |
ec98960e | 230 | goto err; |
d52c9750 | 231 | |
0bcbf2e3 MP |
232 | mask = &event_data->mask; |
233 | ||
f9d81a65 SP |
234 | /* |
235 | * Setup the path for each CPU in a trace session. We try to build | |
236 | * trace path for each CPU in the mask. If we don't find an ETM | |
237 | * for the CPU or fail to build a path, we clear the CPU from the | |
238 | * mask and continue with the rest. If ever we try to trace on those | |
239 | * CPUs, we can handle it and fail the session. | |
240 | */ | |
0bcbf2e3 | 241 | for_each_cpu(cpu, mask) { |
5ecabe4a | 242 | struct list_head *path; |
0bcbf2e3 MP |
243 | struct coresight_device *csdev; |
244 | ||
245 | csdev = per_cpu(csdev_src, cpu); | |
f9d81a65 SP |
246 | /* |
247 | * If there is no ETM associated with this CPU clear it from | |
248 | * the mask and continue with the rest. If ever we try to trace | |
249 | * on this CPU, we handle it accordingly. | |
250 | */ | |
251 | if (!csdev) { | |
252 | cpumask_clear_cpu(cpu, mask); | |
253 | continue; | |
254 | } | |
0bcbf2e3 MP |
255 | |
256 | /* | |
257 | * Building a path doesn't enable it, it simply builds a | |
258 | * list of devices from source to sink that can be | |
259 | * referenced later when the path is actually needed. | |
260 | */ | |
5ecabe4a | 261 | path = coresight_build_path(csdev, sink); |
f9d81a65 SP |
262 | if (IS_ERR(path)) { |
263 | cpumask_clear_cpu(cpu, mask); | |
264 | continue; | |
265 | } | |
5ecabe4a SP |
266 | |
267 | *etm_event_cpu_path_ptr(event_data, cpu) = path; | |
0bcbf2e3 MP |
268 | } |
269 | ||
f9d81a65 SP |
270 | /* If we don't have any CPUs ready for tracing, abort */ |
271 | cpu = cpumask_first(mask); | |
272 | if (cpu >= nr_cpu_ids) | |
0bcbf2e3 MP |
273 | goto err; |
274 | ||
02d5c897 MP |
275 | if (!sink_ops(sink)->alloc_buffer || !sink_ops(sink)->free_buffer) |
276 | goto err; | |
277 | ||
f9d81a65 | 278 | /* Allocate the sink buffer for this session */ |
0bcbf2e3 | 279 | event_data->snk_config = |
a0f08a6a | 280 | sink_ops(sink)->alloc_buffer(sink, event, pages, |
0bcbf2e3 MP |
281 | nr_pages, overwrite); |
282 | if (!event_data->snk_config) | |
283 | goto err; | |
284 | ||
285 | out: | |
286 | return event_data; | |
287 | ||
288 | err: | |
289 | etm_free_aux(event_data); | |
290 | event_data = NULL; | |
291 | goto out; | |
292 | } | |
293 | ||
294 | static void etm_event_start(struct perf_event *event, int flags) | |
295 | { | |
296 | int cpu = smp_processor_id(); | |
297 | struct etm_event_data *event_data; | |
298 | struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); | |
299 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); | |
5ecabe4a | 300 | struct list_head *path; |
0bcbf2e3 MP |
301 | |
302 | if (!csdev) | |
303 | goto fail; | |
304 | ||
305 | /* | |
306 | * Deal with the ring buffer API and get a handle on the | |
307 | * session's information. | |
308 | */ | |
309 | event_data = perf_aux_output_begin(handle, event); | |
310 | if (!event_data) | |
311 | goto fail; | |
312 | ||
5ecabe4a | 313 | path = etm_event_cpu_path(event_data, cpu); |
0bcbf2e3 | 314 | /* We need a sink, no need to continue without one */ |
5ecabe4a | 315 | sink = coresight_get_sink(path); |
3d6e8935 | 316 | if (WARN_ON_ONCE(!sink)) |
0bcbf2e3 MP |
317 | goto fail_end_stop; |
318 | ||
319 | /* Nothing will happen without a path */ | |
3d6e8935 | 320 | if (coresight_enable_path(path, CS_MODE_PERF, handle)) |
0bcbf2e3 MP |
321 | goto fail_end_stop; |
322 | ||
323 | /* Tell the perf core the event is alive */ | |
324 | event->hw.state = 0; | |
325 | ||
326 | /* Finally enable the tracer */ | |
68905d73 | 327 | if (source_ops(csdev)->enable(csdev, event, CS_MODE_PERF)) |
4f8ef210 | 328 | goto fail_disable_path; |
0bcbf2e3 MP |
329 | |
330 | out: | |
331 | return; | |
332 | ||
4f8ef210 SP |
333 | fail_disable_path: |
334 | coresight_disable_path(path); | |
0bcbf2e3 | 335 | fail_end_stop: |
f4c0b0aa WD |
336 | perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED); |
337 | perf_aux_output_end(handle, 0); | |
0bcbf2e3 MP |
338 | fail: |
339 | event->hw.state = PERF_HES_STOPPED; | |
340 | goto out; | |
341 | } | |
342 | ||
343 | static void etm_event_stop(struct perf_event *event, int mode) | |
344 | { | |
0bcbf2e3 MP |
345 | int cpu = smp_processor_id(); |
346 | unsigned long size; | |
347 | struct coresight_device *sink, *csdev = per_cpu(csdev_src, cpu); | |
348 | struct perf_output_handle *handle = this_cpu_ptr(&ctx_handle); | |
349 | struct etm_event_data *event_data = perf_get_aux(handle); | |
5ecabe4a | 350 | struct list_head *path; |
0bcbf2e3 MP |
351 | |
352 | if (event->hw.state == PERF_HES_STOPPED) | |
353 | return; | |
354 | ||
355 | if (!csdev) | |
356 | return; | |
357 | ||
5ecabe4a SP |
358 | path = etm_event_cpu_path(event_data, cpu); |
359 | if (!path) | |
360 | return; | |
361 | ||
362 | sink = coresight_get_sink(path); | |
0bcbf2e3 MP |
363 | if (!sink) |
364 | return; | |
365 | ||
366 | /* stop tracer */ | |
68905d73 | 367 | source_ops(csdev)->disable(csdev, event); |
0bcbf2e3 MP |
368 | |
369 | /* tell the core */ | |
370 | event->hw.state = PERF_HES_STOPPED; | |
371 | ||
372 | if (mode & PERF_EF_UPDATE) { | |
373 | if (WARN_ON_ONCE(handle->event != event)) | |
374 | return; | |
375 | ||
376 | /* update trace information */ | |
377 | if (!sink_ops(sink)->update_buffer) | |
378 | return; | |
379 | ||
7ec786ad | 380 | size = sink_ops(sink)->update_buffer(sink, handle, |
0bcbf2e3 | 381 | event_data->snk_config); |
f4c0b0aa | 382 | perf_aux_output_end(handle, size); |
0bcbf2e3 MP |
383 | } |
384 | ||
385 | /* Disabling the path make its elements available to other sessions */ | |
5ecabe4a | 386 | coresight_disable_path(path); |
0bcbf2e3 MP |
387 | } |
388 | ||
389 | static int etm_event_add(struct perf_event *event, int mode) | |
390 | { | |
391 | int ret = 0; | |
392 | struct hw_perf_event *hwc = &event->hw; | |
393 | ||
394 | if (mode & PERF_EF_START) { | |
395 | etm_event_start(event, 0); | |
396 | if (hwc->state & PERF_HES_STOPPED) | |
397 | ret = -EINVAL; | |
398 | } else { | |
399 | hwc->state = PERF_HES_STOPPED; | |
400 | } | |
401 | ||
402 | return ret; | |
403 | } | |
404 | ||
405 | static void etm_event_del(struct perf_event *event, int mode) | |
406 | { | |
407 | etm_event_stop(event, PERF_EF_UPDATE); | |
408 | } | |
409 | ||
ca878b14 MP |
410 | static int etm_addr_filters_validate(struct list_head *filters) |
411 | { | |
412 | bool range = false, address = false; | |
413 | int index = 0; | |
414 | struct perf_addr_filter *filter; | |
415 | ||
416 | list_for_each_entry(filter, filters, entry) { | |
417 | /* | |
418 | * No need to go further if there's no more | |
419 | * room for filters. | |
420 | */ | |
421 | if (++index > ETM_ADDR_CMP_MAX) | |
422 | return -EOPNOTSUPP; | |
423 | ||
6ed70cf3 AS |
424 | /* filter::size==0 means single address trigger */ |
425 | if (filter->size) { | |
426 | /* | |
427 | * The existing code relies on START/STOP filters | |
428 | * being address filters. | |
429 | */ | |
430 | if (filter->action == PERF_ADDR_FILTER_ACTION_START || | |
431 | filter->action == PERF_ADDR_FILTER_ACTION_STOP) | |
432 | return -EOPNOTSUPP; | |
433 | ||
434 | range = true; | |
435 | } else | |
436 | address = true; | |
437 | ||
ca878b14 | 438 | /* |
ca878b14 MP |
439 | * At this time we don't allow range and start/stop filtering |
440 | * to cohabitate, they have to be mutually exclusive. | |
441 | */ | |
6ed70cf3 | 442 | if (range && address) |
ca878b14 | 443 | return -EOPNOTSUPP; |
ca878b14 MP |
444 | } |
445 | ||
446 | return 0; | |
447 | } | |
448 | ||
449 | static void etm_addr_filters_sync(struct perf_event *event) | |
450 | { | |
451 | struct perf_addr_filters_head *head = perf_event_addr_filters(event); | |
c60f83b8 AS |
452 | unsigned long start, stop; |
453 | struct perf_addr_filter_range *fr = event->addr_filter_ranges; | |
ca878b14 MP |
454 | struct etm_filters *filters = event->hw.addr_filters; |
455 | struct etm_filter *etm_filter; | |
456 | struct perf_addr_filter *filter; | |
457 | int i = 0; | |
458 | ||
459 | list_for_each_entry(filter, &head->list, entry) { | |
c60f83b8 AS |
460 | start = fr[i].start; |
461 | stop = start + fr[i].size; | |
ca878b14 MP |
462 | etm_filter = &filters->etm_filter[i]; |
463 | ||
6ed70cf3 AS |
464 | switch (filter->action) { |
465 | case PERF_ADDR_FILTER_ACTION_FILTER: | |
ca878b14 MP |
466 | etm_filter->start_addr = start; |
467 | etm_filter->stop_addr = stop; | |
468 | etm_filter->type = ETM_ADDR_TYPE_RANGE; | |
6ed70cf3 AS |
469 | break; |
470 | case PERF_ADDR_FILTER_ACTION_START: | |
471 | etm_filter->start_addr = start; | |
472 | etm_filter->type = ETM_ADDR_TYPE_START; | |
473 | break; | |
474 | case PERF_ADDR_FILTER_ACTION_STOP: | |
475 | etm_filter->stop_addr = stop; | |
476 | etm_filter->type = ETM_ADDR_TYPE_STOP; | |
477 | break; | |
ca878b14 MP |
478 | } |
479 | i++; | |
480 | } | |
481 | ||
482 | filters->nr_filters = i; | |
483 | } | |
484 | ||
0bcbf2e3 MP |
485 | int etm_perf_symlink(struct coresight_device *csdev, bool link) |
486 | { | |
487 | char entry[sizeof("cpu9999999")]; | |
488 | int ret = 0, cpu = source_ops(csdev)->cpu_id(csdev); | |
489 | struct device *pmu_dev = etm_pmu.dev; | |
490 | struct device *cs_dev = &csdev->dev; | |
491 | ||
492 | sprintf(entry, "cpu%d", cpu); | |
493 | ||
494 | if (!etm_perf_up) | |
495 | return -EPROBE_DEFER; | |
496 | ||
497 | if (link) { | |
498 | ret = sysfs_create_link(&pmu_dev->kobj, &cs_dev->kobj, entry); | |
499 | if (ret) | |
500 | return ret; | |
501 | per_cpu(csdev_src, cpu) = csdev; | |
502 | } else { | |
503 | sysfs_remove_link(&pmu_dev->kobj, entry); | |
504 | per_cpu(csdev_src, cpu) = NULL; | |
505 | } | |
506 | ||
507 | return 0; | |
508 | } | |
509 | ||
bb8e370b MP |
510 | static ssize_t etm_perf_sink_name_show(struct device *dev, |
511 | struct device_attribute *dattr, | |
512 | char *buf) | |
513 | { | |
514 | struct dev_ext_attribute *ea; | |
515 | ||
516 | ea = container_of(dattr, struct dev_ext_attribute, attr); | |
517 | return scnprintf(buf, PAGE_SIZE, "0x%lx\n", (unsigned long)(ea->var)); | |
518 | } | |
519 | ||
520 | int etm_perf_add_symlink_sink(struct coresight_device *csdev) | |
521 | { | |
522 | int ret; | |
523 | unsigned long hash; | |
524 | const char *name; | |
525 | struct device *pmu_dev = etm_pmu.dev; | |
6887cfa0 | 526 | struct device *dev = &csdev->dev; |
bb8e370b MP |
527 | struct dev_ext_attribute *ea; |
528 | ||
529 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && | |
530 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) | |
531 | return -EINVAL; | |
532 | ||
533 | if (csdev->ea != NULL) | |
534 | return -EINVAL; | |
535 | ||
536 | if (!etm_perf_up) | |
537 | return -EPROBE_DEFER; | |
538 | ||
6887cfa0 | 539 | ea = devm_kzalloc(dev, sizeof(*ea), GFP_KERNEL); |
bb8e370b MP |
540 | if (!ea) |
541 | return -ENOMEM; | |
542 | ||
6887cfa0 | 543 | name = dev_name(dev); |
bb8e370b MP |
544 | /* See function coresight_get_sink_by_id() to know where this is used */ |
545 | hash = hashlen_hash(hashlen_string(NULL, name)); | |
546 | ||
5511c0c3 | 547 | sysfs_attr_init(&ea->attr.attr); |
6887cfa0 | 548 | ea->attr.attr.name = devm_kstrdup(dev, name, GFP_KERNEL); |
bb8e370b MP |
549 | if (!ea->attr.attr.name) |
550 | return -ENOMEM; | |
551 | ||
552 | ea->attr.attr.mode = 0444; | |
553 | ea->attr.show = etm_perf_sink_name_show; | |
554 | ea->var = (unsigned long *)hash; | |
555 | ||
556 | ret = sysfs_add_file_to_group(&pmu_dev->kobj, | |
557 | &ea->attr.attr, "sinks"); | |
558 | ||
559 | if (!ret) | |
560 | csdev->ea = ea; | |
561 | ||
562 | return ret; | |
563 | } | |
564 | ||
565 | void etm_perf_del_symlink_sink(struct coresight_device *csdev) | |
566 | { | |
567 | struct device *pmu_dev = etm_pmu.dev; | |
568 | struct dev_ext_attribute *ea = csdev->ea; | |
569 | ||
570 | if (csdev->type != CORESIGHT_DEV_TYPE_SINK && | |
571 | csdev->type != CORESIGHT_DEV_TYPE_LINKSINK) | |
572 | return; | |
573 | ||
574 | if (!ea) | |
575 | return; | |
576 | ||
577 | sysfs_remove_file_from_group(&pmu_dev->kobj, | |
578 | &ea->attr.attr, "sinks"); | |
579 | csdev->ea = NULL; | |
580 | } | |
581 | ||
0bcbf2e3 MP |
582 | static int __init etm_perf_init(void) |
583 | { | |
584 | int ret; | |
585 | ||
6fcdba33 MP |
586 | etm_pmu.capabilities = (PERF_PMU_CAP_EXCLUSIVE | |
587 | PERF_PMU_CAP_ITRACE); | |
ca878b14 MP |
588 | |
589 | etm_pmu.attr_groups = etm_pmu_attr_groups; | |
590 | etm_pmu.task_ctx_nr = perf_sw_context; | |
591 | etm_pmu.read = etm_event_read; | |
592 | etm_pmu.event_init = etm_event_init; | |
593 | etm_pmu.setup_aux = etm_setup_aux; | |
594 | etm_pmu.free_aux = etm_free_aux; | |
595 | etm_pmu.start = etm_event_start; | |
596 | etm_pmu.stop = etm_event_stop; | |
597 | etm_pmu.add = etm_event_add; | |
598 | etm_pmu.del = etm_event_del; | |
599 | etm_pmu.addr_filters_sync = etm_addr_filters_sync; | |
600 | etm_pmu.addr_filters_validate = etm_addr_filters_validate; | |
601 | etm_pmu.nr_addr_filters = ETM_ADDR_CMP_MAX; | |
0bcbf2e3 MP |
602 | |
603 | ret = perf_pmu_register(&etm_pmu, CORESIGHT_ETM_PMU_NAME, -1); | |
604 | if (ret == 0) | |
605 | etm_perf_up = true; | |
606 | ||
607 | return ret; | |
608 | } | |
ca48fa22 | 609 | device_initcall(etm_perf_init); |