Commit | Line | Data |
---|---|---|
a939fc5a PP |
1 | /* Copyright (c) 2011-2012, The Linux Foundation. All rights reserved. |
2 | * | |
3 | * This program is free software; you can redistribute it and/or modify | |
4 | * it under the terms of the GNU General Public License version 2 and | |
5 | * only version 2 as published by the Free Software Foundation. | |
6 | * | |
7 | * This program is distributed in the hope that it will be useful, | |
8 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
9 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
10 | * GNU General Public License for more details. | |
11 | */ | |
12 | ||
13 | #include <linux/kernel.h> | |
14 | #include <linux/module.h> | |
15 | #include <linux/init.h> | |
16 | #include <linux/types.h> | |
17 | #include <linux/device.h> | |
18 | #include <linux/io.h> | |
19 | #include <linux/err.h> | |
20 | #include <linux/fs.h> | |
21 | #include <linux/slab.h> | |
22 | #include <linux/delay.h> | |
23 | #include <linux/smp.h> | |
24 | #include <linux/sysfs.h> | |
25 | #include <linux/stat.h> | |
26 | #include <linux/clk.h> | |
27 | #include <linux/cpu.h> | |
28 | #include <linux/of.h> | |
29 | #include <linux/coresight.h> | |
30 | #include <linux/amba/bus.h> | |
31 | #include <linux/seq_file.h> | |
32 | #include <linux/uaccess.h> | |
33 | #include <asm/sections.h> | |
34 | ||
35 | #include "coresight-etm.h" | |
36 | ||
37 | #ifdef CONFIG_CORESIGHT_SOURCE_ETM_DEFAULT_ENABLE | |
38 | static int boot_enable = 1; | |
39 | #else | |
40 | static int boot_enable; | |
41 | #endif | |
42 | module_param_named( | |
43 | boot_enable, boot_enable, int, S_IRUGO | |
44 | ); | |
45 | ||
46 | /* The number of ETM/PTM currently registered */ | |
47 | static int etm_count; | |
48 | static struct etm_drvdata *etmdrvdata[NR_CPUS]; | |
49 | ||
50 | static inline void etm_writel(struct etm_drvdata *drvdata, | |
51 | u32 val, u32 off) | |
52 | { | |
53 | if (drvdata->use_cp14) { | |
54 | if (etm_writel_cp14(off, val)) { | |
55 | dev_err(drvdata->dev, | |
56 | "invalid CP14 access to ETM reg: %#x", off); | |
57 | } | |
58 | } else { | |
59 | writel_relaxed(val, drvdata->base + off); | |
60 | } | |
61 | } | |
62 | ||
63 | static inline unsigned int etm_readl(struct etm_drvdata *drvdata, u32 off) | |
64 | { | |
65 | u32 val; | |
66 | ||
67 | if (drvdata->use_cp14) { | |
68 | if (etm_readl_cp14(off, &val)) { | |
69 | dev_err(drvdata->dev, | |
70 | "invalid CP14 access to ETM reg: %#x", off); | |
71 | } | |
72 | } else { | |
73 | val = readl_relaxed(drvdata->base + off); | |
74 | } | |
75 | ||
76 | return val; | |
77 | } | |
78 | ||
79 | /* | |
80 | * Memory mapped writes to clear os lock are not supported on some processors | |
81 | * and OS lock must be unlocked before any memory mapped access on such | |
82 | * processors, otherwise memory mapped reads/writes will be invalid. | |
83 | */ | |
84 | static void etm_os_unlock(void *info) | |
85 | { | |
86 | struct etm_drvdata *drvdata = (struct etm_drvdata *)info; | |
87 | /* Writing any value to ETMOSLAR unlocks the trace registers */ | |
88 | etm_writel(drvdata, 0x0, ETMOSLAR); | |
89 | isb(); | |
90 | } | |
91 | ||
92 | static void etm_set_pwrdwn(struct etm_drvdata *drvdata) | |
93 | { | |
94 | u32 etmcr; | |
95 | ||
96 | /* Ensure pending cp14 accesses complete before setting pwrdwn */ | |
97 | mb(); | |
98 | isb(); | |
99 | etmcr = etm_readl(drvdata, ETMCR); | |
100 | etmcr |= ETMCR_PWD_DWN; | |
101 | etm_writel(drvdata, etmcr, ETMCR); | |
102 | } | |
103 | ||
104 | static void etm_clr_pwrdwn(struct etm_drvdata *drvdata) | |
105 | { | |
106 | u32 etmcr; | |
107 | ||
108 | etmcr = etm_readl(drvdata, ETMCR); | |
109 | etmcr &= ~ETMCR_PWD_DWN; | |
110 | etm_writel(drvdata, etmcr, ETMCR); | |
111 | /* Ensure pwrup completes before subsequent cp14 accesses */ | |
112 | mb(); | |
113 | isb(); | |
114 | } | |
115 | ||
116 | static void etm_set_pwrup(struct etm_drvdata *drvdata) | |
117 | { | |
118 | u32 etmpdcr; | |
119 | ||
120 | etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); | |
121 | etmpdcr |= ETMPDCR_PWD_UP; | |
122 | writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); | |
123 | /* Ensure pwrup completes before subsequent cp14 accesses */ | |
124 | mb(); | |
125 | isb(); | |
126 | } | |
127 | ||
128 | static void etm_clr_pwrup(struct etm_drvdata *drvdata) | |
129 | { | |
130 | u32 etmpdcr; | |
131 | ||
132 | /* Ensure pending cp14 accesses complete before clearing pwrup */ | |
133 | mb(); | |
134 | isb(); | |
135 | etmpdcr = readl_relaxed(drvdata->base + ETMPDCR); | |
136 | etmpdcr &= ~ETMPDCR_PWD_UP; | |
137 | writel_relaxed(etmpdcr, drvdata->base + ETMPDCR); | |
138 | } | |
139 | ||
140 | /** | |
141 | * coresight_timeout_etm - loop until a bit has changed to a specific state. | |
142 | * @drvdata: etm's private data structure. | |
143 | * @offset: address of a register, starting from @addr. | |
144 | * @position: the position of the bit of interest. | |
145 | * @value: the value the bit should have. | |
146 | * | |
147 | * Basically the same as @coresight_timeout except for the register access | |
148 | * method where we have to account for CP14 configurations. | |
149 | ||
150 | * Return: 0 as soon as the bit has taken the desired state or -EAGAIN if | |
151 | * TIMEOUT_US has elapsed, which ever happens first. | |
152 | */ | |
153 | ||
154 | static int coresight_timeout_etm(struct etm_drvdata *drvdata, u32 offset, | |
155 | int position, int value) | |
156 | { | |
157 | int i; | |
158 | u32 val; | |
159 | ||
160 | for (i = TIMEOUT_US; i > 0; i--) { | |
161 | val = etm_readl(drvdata, offset); | |
162 | /* Waiting on the bit to go from 0 to 1 */ | |
163 | if (value) { | |
164 | if (val & BIT(position)) | |
165 | return 0; | |
166 | /* Waiting on the bit to go from 1 to 0 */ | |
167 | } else { | |
168 | if (!(val & BIT(position))) | |
169 | return 0; | |
170 | } | |
171 | ||
172 | /* | |
173 | * Delay is arbitrary - the specification doesn't say how long | |
174 | * we are expected to wait. Extra check required to make sure | |
175 | * we don't wait needlessly on the last iteration. | |
176 | */ | |
177 | if (i - 1) | |
178 | udelay(1); | |
179 | } | |
180 | ||
181 | return -EAGAIN; | |
182 | } | |
183 | ||
184 | ||
185 | static void etm_set_prog(struct etm_drvdata *drvdata) | |
186 | { | |
187 | u32 etmcr; | |
188 | ||
189 | etmcr = etm_readl(drvdata, ETMCR); | |
190 | etmcr |= ETMCR_ETM_PRG; | |
191 | etm_writel(drvdata, etmcr, ETMCR); | |
192 | /* | |
193 | * Recommended by spec for cp14 accesses to ensure etmcr write is | |
194 | * complete before polling etmsr | |
195 | */ | |
196 | isb(); | |
197 | if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 1)) { | |
198 | dev_err(drvdata->dev, | |
199 | "timeout observed when probing at offset %#x\n", ETMSR); | |
200 | } | |
201 | } | |
202 | ||
203 | static void etm_clr_prog(struct etm_drvdata *drvdata) | |
204 | { | |
205 | u32 etmcr; | |
206 | ||
207 | etmcr = etm_readl(drvdata, ETMCR); | |
208 | etmcr &= ~ETMCR_ETM_PRG; | |
209 | etm_writel(drvdata, etmcr, ETMCR); | |
210 | /* | |
211 | * Recommended by spec for cp14 accesses to ensure etmcr write is | |
212 | * complete before polling etmsr | |
213 | */ | |
214 | isb(); | |
215 | if (coresight_timeout_etm(drvdata, ETMSR, ETMSR_PROG_BIT, 0)) { | |
216 | dev_err(drvdata->dev, | |
217 | "timeout observed when probing at offset %#x\n", ETMSR); | |
218 | } | |
219 | } | |
220 | ||
221 | static void etm_set_default(struct etm_drvdata *drvdata) | |
222 | { | |
223 | int i; | |
224 | ||
225 | drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; | |
226 | drvdata->enable_event = ETM_HARD_WIRE_RES_A; | |
227 | ||
228 | drvdata->seq_12_event = ETM_DEFAULT_EVENT_VAL; | |
229 | drvdata->seq_21_event = ETM_DEFAULT_EVENT_VAL; | |
230 | drvdata->seq_23_event = ETM_DEFAULT_EVENT_VAL; | |
231 | drvdata->seq_31_event = ETM_DEFAULT_EVENT_VAL; | |
232 | drvdata->seq_32_event = ETM_DEFAULT_EVENT_VAL; | |
233 | drvdata->seq_13_event = ETM_DEFAULT_EVENT_VAL; | |
234 | drvdata->timestamp_event = ETM_DEFAULT_EVENT_VAL; | |
235 | ||
236 | for (i = 0; i < drvdata->nr_cntr; i++) { | |
237 | drvdata->cntr_rld_val[i] = 0x0; | |
238 | drvdata->cntr_event[i] = ETM_DEFAULT_EVENT_VAL; | |
239 | drvdata->cntr_rld_event[i] = ETM_DEFAULT_EVENT_VAL; | |
240 | drvdata->cntr_val[i] = 0x0; | |
241 | } | |
242 | ||
243 | drvdata->seq_curr_state = 0x0; | |
244 | drvdata->ctxid_idx = 0x0; | |
245 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) | |
246 | drvdata->ctxid_val[i] = 0x0; | |
247 | drvdata->ctxid_mask = 0x0; | |
248 | } | |
249 | ||
250 | static void etm_enable_hw(void *info) | |
251 | { | |
252 | int i; | |
253 | u32 etmcr; | |
254 | struct etm_drvdata *drvdata = info; | |
255 | ||
256 | CS_UNLOCK(drvdata->base); | |
257 | ||
258 | /* Turn engine on */ | |
259 | etm_clr_pwrdwn(drvdata); | |
260 | /* Apply power to trace registers */ | |
261 | etm_set_pwrup(drvdata); | |
262 | /* Make sure all registers are accessible */ | |
263 | etm_os_unlock(drvdata); | |
264 | ||
265 | etm_set_prog(drvdata); | |
266 | ||
267 | etmcr = etm_readl(drvdata, ETMCR); | |
268 | etmcr &= (ETMCR_PWD_DWN | ETMCR_ETM_PRG); | |
269 | etmcr |= drvdata->port_size; | |
270 | etm_writel(drvdata, drvdata->ctrl | etmcr, ETMCR); | |
271 | etm_writel(drvdata, drvdata->trigger_event, ETMTRIGGER); | |
272 | etm_writel(drvdata, drvdata->startstop_ctrl, ETMTSSCR); | |
273 | etm_writel(drvdata, drvdata->enable_event, ETMTEEVR); | |
274 | etm_writel(drvdata, drvdata->enable_ctrl1, ETMTECR1); | |
275 | etm_writel(drvdata, drvdata->fifofull_level, ETMFFLR); | |
276 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | |
277 | etm_writel(drvdata, drvdata->addr_val[i], ETMACVRn(i)); | |
278 | etm_writel(drvdata, drvdata->addr_acctype[i], ETMACTRn(i)); | |
279 | } | |
280 | for (i = 0; i < drvdata->nr_cntr; i++) { | |
281 | etm_writel(drvdata, drvdata->cntr_rld_val[i], ETMCNTRLDVRn(i)); | |
282 | etm_writel(drvdata, drvdata->cntr_event[i], ETMCNTENRn(i)); | |
283 | etm_writel(drvdata, drvdata->cntr_rld_event[i], | |
284 | ETMCNTRLDEVRn(i)); | |
285 | etm_writel(drvdata, drvdata->cntr_val[i], ETMCNTVRn(i)); | |
286 | } | |
287 | etm_writel(drvdata, drvdata->seq_12_event, ETMSQ12EVR); | |
288 | etm_writel(drvdata, drvdata->seq_21_event, ETMSQ21EVR); | |
289 | etm_writel(drvdata, drvdata->seq_23_event, ETMSQ23EVR); | |
290 | etm_writel(drvdata, drvdata->seq_31_event, ETMSQ31EVR); | |
291 | etm_writel(drvdata, drvdata->seq_32_event, ETMSQ32EVR); | |
292 | etm_writel(drvdata, drvdata->seq_13_event, ETMSQ13EVR); | |
293 | etm_writel(drvdata, drvdata->seq_curr_state, ETMSQR); | |
294 | for (i = 0; i < drvdata->nr_ext_out; i++) | |
295 | etm_writel(drvdata, ETM_DEFAULT_EVENT_VAL, ETMEXTOUTEVRn(i)); | |
296 | for (i = 0; i < drvdata->nr_ctxid_cmp; i++) | |
297 | etm_writel(drvdata, drvdata->ctxid_val[i], ETMCIDCVRn(i)); | |
298 | etm_writel(drvdata, drvdata->ctxid_mask, ETMCIDCMR); | |
299 | etm_writel(drvdata, drvdata->sync_freq, ETMSYNCFR); | |
300 | /* No external input selected */ | |
301 | etm_writel(drvdata, 0x0, ETMEXTINSELR); | |
302 | etm_writel(drvdata, drvdata->timestamp_event, ETMTSEVR); | |
303 | /* No auxiliary control selected */ | |
304 | etm_writel(drvdata, 0x0, ETMAUXCR); | |
305 | etm_writel(drvdata, drvdata->traceid, ETMTRACEIDR); | |
306 | /* No VMID comparator value selected */ | |
307 | etm_writel(drvdata, 0x0, ETMVMIDCVR); | |
308 | ||
309 | /* Ensures trace output is enabled from this ETM */ | |
310 | etm_writel(drvdata, drvdata->ctrl | ETMCR_ETM_EN | etmcr, ETMCR); | |
311 | ||
312 | etm_clr_prog(drvdata); | |
313 | CS_LOCK(drvdata->base); | |
314 | ||
315 | dev_dbg(drvdata->dev, "cpu: %d enable smp call done\n", drvdata->cpu); | |
316 | } | |
317 | ||
318 | static int etm_trace_id_simple(struct etm_drvdata *drvdata) | |
319 | { | |
320 | if (!drvdata->enable) | |
321 | return drvdata->traceid; | |
322 | ||
323 | return (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); | |
324 | } | |
325 | ||
326 | static int etm_trace_id(struct coresight_device *csdev) | |
327 | { | |
328 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | |
329 | unsigned long flags; | |
330 | int trace_id = -1; | |
331 | ||
332 | if (!drvdata->enable) | |
333 | return drvdata->traceid; | |
334 | ||
335 | if (clk_prepare_enable(drvdata->clk)) | |
336 | goto out; | |
337 | ||
338 | spin_lock_irqsave(&drvdata->spinlock, flags); | |
339 | ||
340 | CS_UNLOCK(drvdata->base); | |
341 | trace_id = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); | |
342 | CS_LOCK(drvdata->base); | |
343 | ||
344 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | |
345 | clk_disable_unprepare(drvdata->clk); | |
346 | out: | |
347 | return trace_id; | |
348 | } | |
349 | ||
350 | static int etm_enable(struct coresight_device *csdev) | |
351 | { | |
352 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | |
353 | int ret; | |
354 | ||
355 | ret = clk_prepare_enable(drvdata->clk); | |
356 | if (ret) | |
357 | goto err_clk; | |
358 | ||
359 | spin_lock(&drvdata->spinlock); | |
360 | ||
361 | /* | |
362 | * Configure the ETM only if the CPU is online. If it isn't online | |
363 | * hw configuration will take place when 'CPU_STARTING' is received | |
364 | * in @etm_cpu_callback. | |
365 | */ | |
366 | if (cpu_online(drvdata->cpu)) { | |
367 | ret = smp_call_function_single(drvdata->cpu, | |
368 | etm_enable_hw, drvdata, 1); | |
369 | if (ret) | |
370 | goto err; | |
371 | } | |
372 | ||
373 | drvdata->enable = true; | |
374 | drvdata->sticky_enable = true; | |
375 | ||
376 | spin_unlock(&drvdata->spinlock); | |
377 | ||
378 | dev_info(drvdata->dev, "ETM tracing enabled\n"); | |
379 | return 0; | |
380 | err: | |
381 | spin_unlock(&drvdata->spinlock); | |
382 | clk_disable_unprepare(drvdata->clk); | |
383 | err_clk: | |
384 | return ret; | |
385 | } | |
386 | ||
387 | static void etm_disable_hw(void *info) | |
388 | { | |
389 | int i; | |
390 | struct etm_drvdata *drvdata = info; | |
391 | ||
392 | CS_UNLOCK(drvdata->base); | |
393 | etm_set_prog(drvdata); | |
394 | ||
395 | /* Program trace enable to low by using always false event */ | |
396 | etm_writel(drvdata, ETM_HARD_WIRE_RES_A | ETM_EVENT_NOT_A, ETMTEEVR); | |
397 | ||
398 | /* Read back sequencer and counters for post trace analysis */ | |
399 | drvdata->seq_curr_state = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | |
400 | ||
401 | for (i = 0; i < drvdata->nr_cntr; i++) | |
402 | drvdata->cntr_val[i] = etm_readl(drvdata, ETMCNTVRn(i)); | |
403 | ||
404 | etm_set_pwrdwn(drvdata); | |
405 | CS_LOCK(drvdata->base); | |
406 | ||
407 | dev_dbg(drvdata->dev, "cpu: %d disable smp call done\n", drvdata->cpu); | |
408 | } | |
409 | ||
410 | static void etm_disable(struct coresight_device *csdev) | |
411 | { | |
412 | struct etm_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent); | |
413 | ||
414 | /* | |
415 | * Taking hotplug lock here protects from clocks getting disabled | |
416 | * with tracing being left on (crash scenario) if user disable occurs | |
417 | * after cpu online mask indicates the cpu is offline but before the | |
418 | * DYING hotplug callback is serviced by the ETM driver. | |
419 | */ | |
420 | get_online_cpus(); | |
421 | spin_lock(&drvdata->spinlock); | |
422 | ||
423 | /* | |
424 | * Executing etm_disable_hw on the cpu whose ETM is being disabled | |
425 | * ensures that register writes occur when cpu is powered. | |
426 | */ | |
427 | smp_call_function_single(drvdata->cpu, etm_disable_hw, drvdata, 1); | |
428 | drvdata->enable = false; | |
429 | ||
430 | spin_unlock(&drvdata->spinlock); | |
431 | put_online_cpus(); | |
432 | ||
433 | clk_disable_unprepare(drvdata->clk); | |
434 | ||
435 | dev_info(drvdata->dev, "ETM tracing disabled\n"); | |
436 | } | |
437 | ||
438 | static const struct coresight_ops_source etm_source_ops = { | |
439 | .trace_id = etm_trace_id, | |
440 | .enable = etm_enable, | |
441 | .disable = etm_disable, | |
442 | }; | |
443 | ||
444 | static const struct coresight_ops etm_cs_ops = { | |
445 | .source_ops = &etm_source_ops, | |
446 | }; | |
447 | ||
448 | static ssize_t nr_addr_cmp_show(struct device *dev, | |
449 | struct device_attribute *attr, char *buf) | |
450 | { | |
451 | unsigned long val; | |
452 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
453 | ||
454 | val = drvdata->nr_addr_cmp; | |
455 | return sprintf(buf, "%#lx\n", val); | |
456 | } | |
457 | static DEVICE_ATTR_RO(nr_addr_cmp); | |
458 | ||
459 | static ssize_t nr_cntr_show(struct device *dev, | |
460 | struct device_attribute *attr, char *buf) | |
461 | { unsigned long val; | |
462 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
463 | ||
464 | val = drvdata->nr_cntr; | |
465 | return sprintf(buf, "%#lx\n", val); | |
466 | } | |
467 | static DEVICE_ATTR_RO(nr_cntr); | |
468 | ||
469 | static ssize_t nr_ctxid_cmp_show(struct device *dev, | |
470 | struct device_attribute *attr, char *buf) | |
471 | { | |
472 | unsigned long val; | |
473 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
474 | ||
475 | val = drvdata->nr_ctxid_cmp; | |
476 | return sprintf(buf, "%#lx\n", val); | |
477 | } | |
478 | static DEVICE_ATTR_RO(nr_ctxid_cmp); | |
479 | ||
480 | static ssize_t etmsr_show(struct device *dev, | |
481 | struct device_attribute *attr, char *buf) | |
482 | { | |
483 | int ret; | |
484 | unsigned long flags, val; | |
485 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
486 | ||
487 | ret = clk_prepare_enable(drvdata->clk); | |
488 | if (ret) | |
489 | return ret; | |
490 | ||
491 | spin_lock_irqsave(&drvdata->spinlock, flags); | |
492 | CS_UNLOCK(drvdata->base); | |
493 | ||
494 | val = etm_readl(drvdata, ETMSR); | |
495 | ||
496 | CS_LOCK(drvdata->base); | |
497 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | |
498 | clk_disable_unprepare(drvdata->clk); | |
499 | ||
500 | return sprintf(buf, "%#lx\n", val); | |
501 | } | |
502 | static DEVICE_ATTR_RO(etmsr); | |
503 | ||
504 | static ssize_t reset_store(struct device *dev, | |
505 | struct device_attribute *attr, | |
506 | const char *buf, size_t size) | |
507 | { | |
508 | int i, ret; | |
509 | unsigned long val; | |
510 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
511 | ||
512 | ret = kstrtoul(buf, 16, &val); | |
513 | if (ret) | |
514 | return ret; | |
515 | ||
516 | if (val) { | |
517 | spin_lock(&drvdata->spinlock); | |
518 | drvdata->mode = ETM_MODE_EXCLUDE; | |
519 | drvdata->ctrl = 0x0; | |
520 | drvdata->trigger_event = ETM_DEFAULT_EVENT_VAL; | |
521 | drvdata->startstop_ctrl = 0x0; | |
522 | drvdata->addr_idx = 0x0; | |
523 | for (i = 0; i < drvdata->nr_addr_cmp; i++) { | |
524 | drvdata->addr_val[i] = 0x0; | |
525 | drvdata->addr_acctype[i] = 0x0; | |
526 | drvdata->addr_type[i] = ETM_ADDR_TYPE_NONE; | |
527 | } | |
528 | drvdata->cntr_idx = 0x0; | |
529 | ||
530 | etm_set_default(drvdata); | |
531 | spin_unlock(&drvdata->spinlock); | |
532 | } | |
533 | ||
534 | return size; | |
535 | } | |
536 | static DEVICE_ATTR_WO(reset); | |
537 | ||
538 | static ssize_t mode_show(struct device *dev, | |
539 | struct device_attribute *attr, char *buf) | |
540 | { | |
541 | unsigned long val; | |
542 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
543 | ||
544 | val = drvdata->mode; | |
545 | return sprintf(buf, "%#lx\n", val); | |
546 | } | |
547 | ||
548 | static ssize_t mode_store(struct device *dev, | |
549 | struct device_attribute *attr, | |
550 | const char *buf, size_t size) | |
551 | { | |
552 | int ret; | |
553 | unsigned long val; | |
554 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
555 | ||
556 | ret = kstrtoul(buf, 16, &val); | |
557 | if (ret) | |
558 | return ret; | |
559 | ||
560 | spin_lock(&drvdata->spinlock); | |
561 | drvdata->mode = val & ETM_MODE_ALL; | |
562 | ||
563 | if (drvdata->mode & ETM_MODE_EXCLUDE) | |
564 | drvdata->enable_ctrl1 |= ETMTECR1_INC_EXC; | |
565 | else | |
566 | drvdata->enable_ctrl1 &= ~ETMTECR1_INC_EXC; | |
567 | ||
568 | if (drvdata->mode & ETM_MODE_CYCACC) | |
569 | drvdata->ctrl |= ETMCR_CYC_ACC; | |
570 | else | |
571 | drvdata->ctrl &= ~ETMCR_CYC_ACC; | |
572 | ||
573 | if (drvdata->mode & ETM_MODE_STALL) { | |
574 | if (!(drvdata->etmccr & ETMCCR_FIFOFULL)) { | |
575 | dev_warn(drvdata->dev, "stall mode not supported\n"); | |
576 | return -EINVAL; | |
577 | } | |
578 | drvdata->ctrl |= ETMCR_STALL_MODE; | |
579 | } else | |
580 | drvdata->ctrl &= ~ETMCR_STALL_MODE; | |
581 | ||
582 | if (drvdata->mode & ETM_MODE_TIMESTAMP) { | |
583 | if (!(drvdata->etmccer & ETMCCER_TIMESTAMP)) { | |
584 | dev_warn(drvdata->dev, "timestamp not supported\n"); | |
585 | return -EINVAL; | |
586 | } | |
587 | drvdata->ctrl |= ETMCR_TIMESTAMP_EN; | |
588 | } else | |
589 | drvdata->ctrl &= ~ETMCR_TIMESTAMP_EN; | |
590 | ||
591 | if (drvdata->mode & ETM_MODE_CTXID) | |
592 | drvdata->ctrl |= ETMCR_CTXID_SIZE; | |
593 | else | |
594 | drvdata->ctrl &= ~ETMCR_CTXID_SIZE; | |
595 | spin_unlock(&drvdata->spinlock); | |
596 | ||
597 | return size; | |
598 | } | |
599 | static DEVICE_ATTR_RW(mode); | |
600 | ||
601 | static ssize_t trigger_event_show(struct device *dev, | |
602 | struct device_attribute *attr, char *buf) | |
603 | { | |
604 | unsigned long val; | |
605 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
606 | ||
607 | val = drvdata->trigger_event; | |
608 | return sprintf(buf, "%#lx\n", val); | |
609 | } | |
610 | ||
611 | static ssize_t trigger_event_store(struct device *dev, | |
612 | struct device_attribute *attr, | |
613 | const char *buf, size_t size) | |
614 | { | |
615 | int ret; | |
616 | unsigned long val; | |
617 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
618 | ||
619 | ret = kstrtoul(buf, 16, &val); | |
620 | if (ret) | |
621 | return ret; | |
622 | ||
623 | drvdata->trigger_event = val & ETM_EVENT_MASK; | |
624 | ||
625 | return size; | |
626 | } | |
627 | static DEVICE_ATTR_RW(trigger_event); | |
628 | ||
629 | static ssize_t enable_event_show(struct device *dev, | |
630 | struct device_attribute *attr, char *buf) | |
631 | { | |
632 | unsigned long val; | |
633 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
634 | ||
635 | val = drvdata->enable_event; | |
636 | return sprintf(buf, "%#lx\n", val); | |
637 | } | |
638 | ||
639 | static ssize_t enable_event_store(struct device *dev, | |
640 | struct device_attribute *attr, | |
641 | const char *buf, size_t size) | |
642 | { | |
643 | int ret; | |
644 | unsigned long val; | |
645 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
646 | ||
647 | ret = kstrtoul(buf, 16, &val); | |
648 | if (ret) | |
649 | return ret; | |
650 | ||
651 | drvdata->enable_event = val & ETM_EVENT_MASK; | |
652 | ||
653 | return size; | |
654 | } | |
655 | static DEVICE_ATTR_RW(enable_event); | |
656 | ||
657 | static ssize_t fifofull_level_show(struct device *dev, | |
658 | struct device_attribute *attr, char *buf) | |
659 | { | |
660 | unsigned long val; | |
661 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
662 | ||
663 | val = drvdata->fifofull_level; | |
664 | return sprintf(buf, "%#lx\n", val); | |
665 | } | |
666 | ||
667 | static ssize_t fifofull_level_store(struct device *dev, | |
668 | struct device_attribute *attr, | |
669 | const char *buf, size_t size) | |
670 | { | |
671 | int ret; | |
672 | unsigned long val; | |
673 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
674 | ||
675 | ret = kstrtoul(buf, 16, &val); | |
676 | if (ret) | |
677 | return ret; | |
678 | ||
679 | drvdata->fifofull_level = val; | |
680 | ||
681 | return size; | |
682 | } | |
683 | static DEVICE_ATTR_RW(fifofull_level); | |
684 | ||
685 | static ssize_t addr_idx_show(struct device *dev, | |
686 | struct device_attribute *attr, char *buf) | |
687 | { | |
688 | unsigned long val; | |
689 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
690 | ||
691 | val = drvdata->addr_idx; | |
692 | return sprintf(buf, "%#lx\n", val); | |
693 | } | |
694 | ||
695 | static ssize_t addr_idx_store(struct device *dev, | |
696 | struct device_attribute *attr, | |
697 | const char *buf, size_t size) | |
698 | { | |
699 | int ret; | |
700 | unsigned long val; | |
701 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
702 | ||
703 | ret = kstrtoul(buf, 16, &val); | |
704 | if (ret) | |
705 | return ret; | |
706 | ||
707 | if (val >= drvdata->nr_addr_cmp) | |
708 | return -EINVAL; | |
709 | ||
710 | /* | |
711 | * Use spinlock to ensure index doesn't change while it gets | |
712 | * dereferenced multiple times within a spinlock block elsewhere. | |
713 | */ | |
714 | spin_lock(&drvdata->spinlock); | |
715 | drvdata->addr_idx = val; | |
716 | spin_unlock(&drvdata->spinlock); | |
717 | ||
718 | return size; | |
719 | } | |
720 | static DEVICE_ATTR_RW(addr_idx); | |
721 | ||
722 | static ssize_t addr_single_show(struct device *dev, | |
723 | struct device_attribute *attr, char *buf) | |
724 | { | |
725 | u8 idx; | |
726 | unsigned long val; | |
727 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
728 | ||
729 | spin_lock(&drvdata->spinlock); | |
730 | idx = drvdata->addr_idx; | |
731 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | |
732 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | |
733 | spin_unlock(&drvdata->spinlock); | |
734 | return -EINVAL; | |
735 | } | |
736 | ||
737 | val = drvdata->addr_val[idx]; | |
738 | spin_unlock(&drvdata->spinlock); | |
739 | ||
740 | return sprintf(buf, "%#lx\n", val); | |
741 | } | |
742 | ||
743 | static ssize_t addr_single_store(struct device *dev, | |
744 | struct device_attribute *attr, | |
745 | const char *buf, size_t size) | |
746 | { | |
747 | u8 idx; | |
748 | int ret; | |
749 | unsigned long val; | |
750 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
751 | ||
752 | ret = kstrtoul(buf, 16, &val); | |
753 | if (ret) | |
754 | return ret; | |
755 | ||
756 | spin_lock(&drvdata->spinlock); | |
757 | idx = drvdata->addr_idx; | |
758 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | |
759 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_SINGLE)) { | |
760 | spin_unlock(&drvdata->spinlock); | |
761 | return -EINVAL; | |
762 | } | |
763 | ||
764 | drvdata->addr_val[idx] = val; | |
765 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_SINGLE; | |
766 | spin_unlock(&drvdata->spinlock); | |
767 | ||
768 | return size; | |
769 | } | |
770 | static DEVICE_ATTR_RW(addr_single); | |
771 | ||
772 | static ssize_t addr_range_show(struct device *dev, | |
773 | struct device_attribute *attr, char *buf) | |
774 | { | |
775 | u8 idx; | |
776 | unsigned long val1, val2; | |
777 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
778 | ||
779 | spin_lock(&drvdata->spinlock); | |
780 | idx = drvdata->addr_idx; | |
781 | if (idx % 2 != 0) { | |
782 | spin_unlock(&drvdata->spinlock); | |
783 | return -EPERM; | |
784 | } | |
785 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | |
786 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | |
787 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | |
788 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | |
789 | spin_unlock(&drvdata->spinlock); | |
790 | return -EPERM; | |
791 | } | |
792 | ||
793 | val1 = drvdata->addr_val[idx]; | |
794 | val2 = drvdata->addr_val[idx + 1]; | |
795 | spin_unlock(&drvdata->spinlock); | |
796 | ||
797 | return sprintf(buf, "%#lx %#lx\n", val1, val2); | |
798 | } | |
799 | ||
800 | static ssize_t addr_range_store(struct device *dev, | |
801 | struct device_attribute *attr, | |
802 | const char *buf, size_t size) | |
803 | { | |
804 | u8 idx; | |
805 | unsigned long val1, val2; | |
806 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
807 | ||
808 | if (sscanf(buf, "%lx %lx", &val1, &val2) != 2) | |
809 | return -EINVAL; | |
810 | /* Lower address comparator cannot have a higher address value */ | |
811 | if (val1 > val2) | |
812 | return -EINVAL; | |
813 | ||
814 | spin_lock(&drvdata->spinlock); | |
815 | idx = drvdata->addr_idx; | |
816 | if (idx % 2 != 0) { | |
817 | spin_unlock(&drvdata->spinlock); | |
818 | return -EPERM; | |
819 | } | |
820 | if (!((drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE && | |
821 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_NONE) || | |
822 | (drvdata->addr_type[idx] == ETM_ADDR_TYPE_RANGE && | |
823 | drvdata->addr_type[idx + 1] == ETM_ADDR_TYPE_RANGE))) { | |
824 | spin_unlock(&drvdata->spinlock); | |
825 | return -EPERM; | |
826 | } | |
827 | ||
828 | drvdata->addr_val[idx] = val1; | |
829 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_RANGE; | |
830 | drvdata->addr_val[idx + 1] = val2; | |
831 | drvdata->addr_type[idx + 1] = ETM_ADDR_TYPE_RANGE; | |
832 | drvdata->enable_ctrl1 |= (1 << (idx/2)); | |
833 | spin_unlock(&drvdata->spinlock); | |
834 | ||
835 | return size; | |
836 | } | |
837 | static DEVICE_ATTR_RW(addr_range); | |
838 | ||
839 | static ssize_t addr_start_show(struct device *dev, | |
840 | struct device_attribute *attr, char *buf) | |
841 | { | |
842 | u8 idx; | |
843 | unsigned long val; | |
844 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
845 | ||
846 | spin_lock(&drvdata->spinlock); | |
847 | idx = drvdata->addr_idx; | |
848 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | |
849 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | |
850 | spin_unlock(&drvdata->spinlock); | |
851 | return -EPERM; | |
852 | } | |
853 | ||
854 | val = drvdata->addr_val[idx]; | |
855 | spin_unlock(&drvdata->spinlock); | |
856 | ||
857 | return sprintf(buf, "%#lx\n", val); | |
858 | } | |
859 | ||
860 | static ssize_t addr_start_store(struct device *dev, | |
861 | struct device_attribute *attr, | |
862 | const char *buf, size_t size) | |
863 | { | |
864 | u8 idx; | |
865 | int ret; | |
866 | unsigned long val; | |
867 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
868 | ||
869 | ret = kstrtoul(buf, 16, &val); | |
870 | if (ret) | |
871 | return ret; | |
872 | ||
873 | spin_lock(&drvdata->spinlock); | |
874 | idx = drvdata->addr_idx; | |
875 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | |
876 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_START)) { | |
877 | spin_unlock(&drvdata->spinlock); | |
878 | return -EPERM; | |
879 | } | |
880 | ||
881 | drvdata->addr_val[idx] = val; | |
882 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_START; | |
883 | drvdata->startstop_ctrl |= (1 << idx); | |
884 | drvdata->enable_ctrl1 |= BIT(25); | |
885 | spin_unlock(&drvdata->spinlock); | |
886 | ||
887 | return size; | |
888 | } | |
889 | static DEVICE_ATTR_RW(addr_start); | |
890 | ||
891 | static ssize_t addr_stop_show(struct device *dev, | |
892 | struct device_attribute *attr, char *buf) | |
893 | { | |
894 | u8 idx; | |
895 | unsigned long val; | |
896 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
897 | ||
898 | spin_lock(&drvdata->spinlock); | |
899 | idx = drvdata->addr_idx; | |
900 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | |
901 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | |
902 | spin_unlock(&drvdata->spinlock); | |
903 | return -EPERM; | |
904 | } | |
905 | ||
906 | val = drvdata->addr_val[idx]; | |
907 | spin_unlock(&drvdata->spinlock); | |
908 | ||
909 | return sprintf(buf, "%#lx\n", val); | |
910 | } | |
911 | ||
912 | static ssize_t addr_stop_store(struct device *dev, | |
913 | struct device_attribute *attr, | |
914 | const char *buf, size_t size) | |
915 | { | |
916 | u8 idx; | |
917 | int ret; | |
918 | unsigned long val; | |
919 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
920 | ||
921 | ret = kstrtoul(buf, 16, &val); | |
922 | if (ret) | |
923 | return ret; | |
924 | ||
925 | spin_lock(&drvdata->spinlock); | |
926 | idx = drvdata->addr_idx; | |
927 | if (!(drvdata->addr_type[idx] == ETM_ADDR_TYPE_NONE || | |
928 | drvdata->addr_type[idx] == ETM_ADDR_TYPE_STOP)) { | |
929 | spin_unlock(&drvdata->spinlock); | |
930 | return -EPERM; | |
931 | } | |
932 | ||
933 | drvdata->addr_val[idx] = val; | |
934 | drvdata->addr_type[idx] = ETM_ADDR_TYPE_STOP; | |
935 | drvdata->startstop_ctrl |= (1 << (idx + 16)); | |
936 | drvdata->enable_ctrl1 |= ETMTECR1_START_STOP; | |
937 | spin_unlock(&drvdata->spinlock); | |
938 | ||
939 | return size; | |
940 | } | |
941 | static DEVICE_ATTR_RW(addr_stop); | |
942 | ||
943 | static ssize_t addr_acctype_show(struct device *dev, | |
944 | struct device_attribute *attr, char *buf) | |
945 | { | |
946 | unsigned long val; | |
947 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
948 | ||
949 | spin_lock(&drvdata->spinlock); | |
950 | val = drvdata->addr_acctype[drvdata->addr_idx]; | |
951 | spin_unlock(&drvdata->spinlock); | |
952 | ||
953 | return sprintf(buf, "%#lx\n", val); | |
954 | } | |
955 | ||
956 | static ssize_t addr_acctype_store(struct device *dev, | |
957 | struct device_attribute *attr, | |
958 | const char *buf, size_t size) | |
959 | { | |
960 | int ret; | |
961 | unsigned long val; | |
962 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
963 | ||
964 | ret = kstrtoul(buf, 16, &val); | |
965 | if (ret) | |
966 | return ret; | |
967 | ||
968 | spin_lock(&drvdata->spinlock); | |
969 | drvdata->addr_acctype[drvdata->addr_idx] = val; | |
970 | spin_unlock(&drvdata->spinlock); | |
971 | ||
972 | return size; | |
973 | } | |
974 | static DEVICE_ATTR_RW(addr_acctype); | |
975 | ||
976 | static ssize_t cntr_idx_show(struct device *dev, | |
977 | struct device_attribute *attr, char *buf) | |
978 | { | |
979 | unsigned long val; | |
980 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
981 | ||
982 | val = drvdata->cntr_idx; | |
983 | return sprintf(buf, "%#lx\n", val); | |
984 | } | |
985 | ||
986 | static ssize_t cntr_idx_store(struct device *dev, | |
987 | struct device_attribute *attr, | |
988 | const char *buf, size_t size) | |
989 | { | |
990 | int ret; | |
991 | unsigned long val; | |
992 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
993 | ||
994 | ret = kstrtoul(buf, 16, &val); | |
995 | if (ret) | |
996 | return ret; | |
997 | ||
998 | if (val >= drvdata->nr_cntr) | |
999 | return -EINVAL; | |
1000 | /* | |
1001 | * Use spinlock to ensure index doesn't change while it gets | |
1002 | * dereferenced multiple times within a spinlock block elsewhere. | |
1003 | */ | |
1004 | spin_lock(&drvdata->spinlock); | |
1005 | drvdata->cntr_idx = val; | |
1006 | spin_unlock(&drvdata->spinlock); | |
1007 | ||
1008 | return size; | |
1009 | } | |
1010 | static DEVICE_ATTR_RW(cntr_idx); | |
1011 | ||
1012 | static ssize_t cntr_rld_val_show(struct device *dev, | |
1013 | struct device_attribute *attr, char *buf) | |
1014 | { | |
1015 | unsigned long val; | |
1016 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1017 | ||
1018 | spin_lock(&drvdata->spinlock); | |
1019 | val = drvdata->cntr_rld_val[drvdata->cntr_idx]; | |
1020 | spin_unlock(&drvdata->spinlock); | |
1021 | ||
1022 | return sprintf(buf, "%#lx\n", val); | |
1023 | } | |
1024 | ||
1025 | static ssize_t cntr_rld_val_store(struct device *dev, | |
1026 | struct device_attribute *attr, | |
1027 | const char *buf, size_t size) | |
1028 | { | |
1029 | int ret; | |
1030 | unsigned long val; | |
1031 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1032 | ||
1033 | ret = kstrtoul(buf, 16, &val); | |
1034 | if (ret) | |
1035 | return ret; | |
1036 | ||
1037 | spin_lock(&drvdata->spinlock); | |
1038 | drvdata->cntr_rld_val[drvdata->cntr_idx] = val; | |
1039 | spin_unlock(&drvdata->spinlock); | |
1040 | ||
1041 | return size; | |
1042 | } | |
1043 | static DEVICE_ATTR_RW(cntr_rld_val); | |
1044 | ||
1045 | static ssize_t cntr_event_show(struct device *dev, | |
1046 | struct device_attribute *attr, char *buf) | |
1047 | { | |
1048 | unsigned long val; | |
1049 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1050 | ||
1051 | spin_lock(&drvdata->spinlock); | |
1052 | val = drvdata->cntr_event[drvdata->cntr_idx]; | |
1053 | spin_unlock(&drvdata->spinlock); | |
1054 | ||
1055 | return sprintf(buf, "%#lx\n", val); | |
1056 | } | |
1057 | ||
1058 | static ssize_t cntr_event_store(struct device *dev, | |
1059 | struct device_attribute *attr, | |
1060 | const char *buf, size_t size) | |
1061 | { | |
1062 | int ret; | |
1063 | unsigned long val; | |
1064 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1065 | ||
1066 | ret = kstrtoul(buf, 16, &val); | |
1067 | if (ret) | |
1068 | return ret; | |
1069 | ||
1070 | spin_lock(&drvdata->spinlock); | |
1071 | drvdata->cntr_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; | |
1072 | spin_unlock(&drvdata->spinlock); | |
1073 | ||
1074 | return size; | |
1075 | } | |
1076 | static DEVICE_ATTR_RW(cntr_event); | |
1077 | ||
1078 | static ssize_t cntr_rld_event_show(struct device *dev, | |
1079 | struct device_attribute *attr, char *buf) | |
1080 | { | |
1081 | unsigned long val; | |
1082 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1083 | ||
1084 | spin_lock(&drvdata->spinlock); | |
1085 | val = drvdata->cntr_rld_event[drvdata->cntr_idx]; | |
1086 | spin_unlock(&drvdata->spinlock); | |
1087 | ||
1088 | return sprintf(buf, "%#lx\n", val); | |
1089 | } | |
1090 | ||
1091 | static ssize_t cntr_rld_event_store(struct device *dev, | |
1092 | struct device_attribute *attr, | |
1093 | const char *buf, size_t size) | |
1094 | { | |
1095 | int ret; | |
1096 | unsigned long val; | |
1097 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1098 | ||
1099 | ret = kstrtoul(buf, 16, &val); | |
1100 | if (ret) | |
1101 | return ret; | |
1102 | ||
1103 | spin_lock(&drvdata->spinlock); | |
1104 | drvdata->cntr_rld_event[drvdata->cntr_idx] = val & ETM_EVENT_MASK; | |
1105 | spin_unlock(&drvdata->spinlock); | |
1106 | ||
1107 | return size; | |
1108 | } | |
1109 | static DEVICE_ATTR_RW(cntr_rld_event); | |
1110 | ||
1111 | static ssize_t cntr_val_show(struct device *dev, | |
1112 | struct device_attribute *attr, char *buf) | |
1113 | { | |
1114 | int i, ret = 0; | |
1115 | u32 val; | |
1116 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1117 | ||
1118 | if (!drvdata->enable) { | |
1119 | spin_lock(&drvdata->spinlock); | |
1120 | for (i = 0; i < drvdata->nr_cntr; i++) | |
1121 | ret += sprintf(buf, "counter %d: %x\n", | |
1122 | i, drvdata->cntr_val[i]); | |
1123 | spin_unlock(&drvdata->spinlock); | |
1124 | return ret; | |
1125 | } | |
1126 | ||
1127 | for (i = 0; i < drvdata->nr_cntr; i++) { | |
1128 | val = etm_readl(drvdata, ETMCNTVRn(i)); | |
1129 | ret += sprintf(buf, "counter %d: %x\n", i, val); | |
1130 | } | |
1131 | ||
1132 | return ret; | |
1133 | } | |
1134 | ||
1135 | static ssize_t cntr_val_store(struct device *dev, | |
1136 | struct device_attribute *attr, | |
1137 | const char *buf, size_t size) | |
1138 | { | |
1139 | int ret; | |
1140 | unsigned long val; | |
1141 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1142 | ||
1143 | ret = kstrtoul(buf, 16, &val); | |
1144 | if (ret) | |
1145 | return ret; | |
1146 | ||
1147 | spin_lock(&drvdata->spinlock); | |
1148 | drvdata->cntr_val[drvdata->cntr_idx] = val; | |
1149 | spin_unlock(&drvdata->spinlock); | |
1150 | ||
1151 | return size; | |
1152 | } | |
1153 | static DEVICE_ATTR_RW(cntr_val); | |
1154 | ||
1155 | static ssize_t seq_12_event_show(struct device *dev, | |
1156 | struct device_attribute *attr, char *buf) | |
1157 | { | |
1158 | unsigned long val; | |
1159 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1160 | ||
1161 | val = drvdata->seq_12_event; | |
1162 | return sprintf(buf, "%#lx\n", val); | |
1163 | } | |
1164 | ||
1165 | static ssize_t seq_12_event_store(struct device *dev, | |
1166 | struct device_attribute *attr, | |
1167 | const char *buf, size_t size) | |
1168 | { | |
1169 | int ret; | |
1170 | unsigned long val; | |
1171 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1172 | ||
1173 | ret = kstrtoul(buf, 16, &val); | |
1174 | if (ret) | |
1175 | return ret; | |
1176 | ||
1177 | drvdata->seq_12_event = val & ETM_EVENT_MASK; | |
1178 | return size; | |
1179 | } | |
1180 | static DEVICE_ATTR_RW(seq_12_event); | |
1181 | ||
1182 | static ssize_t seq_21_event_show(struct device *dev, | |
1183 | struct device_attribute *attr, char *buf) | |
1184 | { | |
1185 | unsigned long val; | |
1186 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1187 | ||
1188 | val = drvdata->seq_21_event; | |
1189 | return sprintf(buf, "%#lx\n", val); | |
1190 | } | |
1191 | ||
1192 | static ssize_t seq_21_event_store(struct device *dev, | |
1193 | struct device_attribute *attr, | |
1194 | const char *buf, size_t size) | |
1195 | { | |
1196 | int ret; | |
1197 | unsigned long val; | |
1198 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1199 | ||
1200 | ret = kstrtoul(buf, 16, &val); | |
1201 | if (ret) | |
1202 | return ret; | |
1203 | ||
1204 | drvdata->seq_21_event = val & ETM_EVENT_MASK; | |
1205 | return size; | |
1206 | } | |
1207 | static DEVICE_ATTR_RW(seq_21_event); | |
1208 | ||
1209 | static ssize_t seq_23_event_show(struct device *dev, | |
1210 | struct device_attribute *attr, char *buf) | |
1211 | { | |
1212 | unsigned long val; | |
1213 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1214 | ||
1215 | val = drvdata->seq_23_event; | |
1216 | return sprintf(buf, "%#lx\n", val); | |
1217 | } | |
1218 | ||
1219 | static ssize_t seq_23_event_store(struct device *dev, | |
1220 | struct device_attribute *attr, | |
1221 | const char *buf, size_t size) | |
1222 | { | |
1223 | int ret; | |
1224 | unsigned long val; | |
1225 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1226 | ||
1227 | ret = kstrtoul(buf, 16, &val); | |
1228 | if (ret) | |
1229 | return ret; | |
1230 | ||
1231 | drvdata->seq_23_event = val & ETM_EVENT_MASK; | |
1232 | return size; | |
1233 | } | |
1234 | static DEVICE_ATTR_RW(seq_23_event); | |
1235 | ||
1236 | static ssize_t seq_31_event_show(struct device *dev, | |
1237 | struct device_attribute *attr, char *buf) | |
1238 | { | |
1239 | unsigned long val; | |
1240 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1241 | ||
1242 | val = drvdata->seq_31_event; | |
1243 | return sprintf(buf, "%#lx\n", val); | |
1244 | } | |
1245 | ||
1246 | static ssize_t seq_31_event_store(struct device *dev, | |
1247 | struct device_attribute *attr, | |
1248 | const char *buf, size_t size) | |
1249 | { | |
1250 | int ret; | |
1251 | unsigned long val; | |
1252 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1253 | ||
1254 | ret = kstrtoul(buf, 16, &val); | |
1255 | if (ret) | |
1256 | return ret; | |
1257 | ||
1258 | drvdata->seq_31_event = val & ETM_EVENT_MASK; | |
1259 | return size; | |
1260 | } | |
1261 | static DEVICE_ATTR_RW(seq_31_event); | |
1262 | ||
1263 | static ssize_t seq_32_event_show(struct device *dev, | |
1264 | struct device_attribute *attr, char *buf) | |
1265 | { | |
1266 | unsigned long val; | |
1267 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1268 | ||
1269 | val = drvdata->seq_32_event; | |
1270 | return sprintf(buf, "%#lx\n", val); | |
1271 | } | |
1272 | ||
1273 | static ssize_t seq_32_event_store(struct device *dev, | |
1274 | struct device_attribute *attr, | |
1275 | const char *buf, size_t size) | |
1276 | { | |
1277 | int ret; | |
1278 | unsigned long val; | |
1279 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1280 | ||
1281 | ret = kstrtoul(buf, 16, &val); | |
1282 | if (ret) | |
1283 | return ret; | |
1284 | ||
1285 | drvdata->seq_32_event = val & ETM_EVENT_MASK; | |
1286 | return size; | |
1287 | } | |
1288 | static DEVICE_ATTR_RW(seq_32_event); | |
1289 | ||
1290 | static ssize_t seq_13_event_show(struct device *dev, | |
1291 | struct device_attribute *attr, char *buf) | |
1292 | { | |
1293 | unsigned long val; | |
1294 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1295 | ||
1296 | val = drvdata->seq_13_event; | |
1297 | return sprintf(buf, "%#lx\n", val); | |
1298 | } | |
1299 | ||
1300 | static ssize_t seq_13_event_store(struct device *dev, | |
1301 | struct device_attribute *attr, | |
1302 | const char *buf, size_t size) | |
1303 | { | |
1304 | int ret; | |
1305 | unsigned long val; | |
1306 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1307 | ||
1308 | ret = kstrtoul(buf, 16, &val); | |
1309 | if (ret) | |
1310 | return ret; | |
1311 | ||
1312 | drvdata->seq_13_event = val & ETM_EVENT_MASK; | |
1313 | return size; | |
1314 | } | |
1315 | static DEVICE_ATTR_RW(seq_13_event); | |
1316 | ||
1317 | static ssize_t seq_curr_state_show(struct device *dev, | |
1318 | struct device_attribute *attr, char *buf) | |
1319 | { | |
1320 | int ret; | |
1321 | unsigned long val, flags; | |
1322 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1323 | ||
1324 | if (!drvdata->enable) { | |
1325 | val = drvdata->seq_curr_state; | |
1326 | goto out; | |
1327 | } | |
1328 | ||
1329 | ret = clk_prepare_enable(drvdata->clk); | |
1330 | if (ret) | |
1331 | return ret; | |
1332 | ||
1333 | spin_lock_irqsave(&drvdata->spinlock, flags); | |
1334 | ||
1335 | CS_UNLOCK(drvdata->base); | |
1336 | val = (etm_readl(drvdata, ETMSQR) & ETM_SQR_MASK); | |
1337 | CS_LOCK(drvdata->base); | |
1338 | ||
1339 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | |
1340 | clk_disable_unprepare(drvdata->clk); | |
1341 | out: | |
1342 | return sprintf(buf, "%#lx\n", val); | |
1343 | } | |
1344 | ||
1345 | static ssize_t seq_curr_state_store(struct device *dev, | |
1346 | struct device_attribute *attr, | |
1347 | const char *buf, size_t size) | |
1348 | { | |
1349 | int ret; | |
1350 | unsigned long val; | |
1351 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1352 | ||
1353 | ret = kstrtoul(buf, 16, &val); | |
1354 | if (ret) | |
1355 | return ret; | |
1356 | ||
1357 | if (val > ETM_SEQ_STATE_MAX_VAL) | |
1358 | return -EINVAL; | |
1359 | ||
1360 | drvdata->seq_curr_state = val; | |
1361 | ||
1362 | return size; | |
1363 | } | |
1364 | static DEVICE_ATTR_RW(seq_curr_state); | |
1365 | ||
1366 | static ssize_t ctxid_idx_show(struct device *dev, | |
1367 | struct device_attribute *attr, char *buf) | |
1368 | { | |
1369 | unsigned long val; | |
1370 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1371 | ||
1372 | val = drvdata->ctxid_idx; | |
1373 | return sprintf(buf, "%#lx\n", val); | |
1374 | } | |
1375 | ||
1376 | static ssize_t ctxid_idx_store(struct device *dev, | |
1377 | struct device_attribute *attr, | |
1378 | const char *buf, size_t size) | |
1379 | { | |
1380 | int ret; | |
1381 | unsigned long val; | |
1382 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1383 | ||
1384 | ret = kstrtoul(buf, 16, &val); | |
1385 | if (ret) | |
1386 | return ret; | |
1387 | ||
1388 | if (val >= drvdata->nr_ctxid_cmp) | |
1389 | return -EINVAL; | |
1390 | ||
1391 | /* | |
1392 | * Use spinlock to ensure index doesn't change while it gets | |
1393 | * dereferenced multiple times within a spinlock block elsewhere. | |
1394 | */ | |
1395 | spin_lock(&drvdata->spinlock); | |
1396 | drvdata->ctxid_idx = val; | |
1397 | spin_unlock(&drvdata->spinlock); | |
1398 | ||
1399 | return size; | |
1400 | } | |
1401 | static DEVICE_ATTR_RW(ctxid_idx); | |
1402 | ||
1403 | static ssize_t ctxid_val_show(struct device *dev, | |
1404 | struct device_attribute *attr, char *buf) | |
1405 | { | |
1406 | unsigned long val; | |
1407 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1408 | ||
1409 | spin_lock(&drvdata->spinlock); | |
1410 | val = drvdata->ctxid_val[drvdata->ctxid_idx]; | |
1411 | spin_unlock(&drvdata->spinlock); | |
1412 | ||
1413 | return sprintf(buf, "%#lx\n", val); | |
1414 | } | |
1415 | ||
1416 | static ssize_t ctxid_val_store(struct device *dev, | |
1417 | struct device_attribute *attr, | |
1418 | const char *buf, size_t size) | |
1419 | { | |
1420 | int ret; | |
1421 | unsigned long val; | |
1422 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1423 | ||
1424 | ret = kstrtoul(buf, 16, &val); | |
1425 | if (ret) | |
1426 | return ret; | |
1427 | ||
1428 | spin_lock(&drvdata->spinlock); | |
1429 | drvdata->ctxid_val[drvdata->ctxid_idx] = val; | |
1430 | spin_unlock(&drvdata->spinlock); | |
1431 | ||
1432 | return size; | |
1433 | } | |
1434 | static DEVICE_ATTR_RW(ctxid_val); | |
1435 | ||
1436 | static ssize_t ctxid_mask_show(struct device *dev, | |
1437 | struct device_attribute *attr, char *buf) | |
1438 | { | |
1439 | unsigned long val; | |
1440 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1441 | ||
1442 | val = drvdata->ctxid_mask; | |
1443 | return sprintf(buf, "%#lx\n", val); | |
1444 | } | |
1445 | ||
1446 | static ssize_t ctxid_mask_store(struct device *dev, | |
1447 | struct device_attribute *attr, | |
1448 | const char *buf, size_t size) | |
1449 | { | |
1450 | int ret; | |
1451 | unsigned long val; | |
1452 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1453 | ||
1454 | ret = kstrtoul(buf, 16, &val); | |
1455 | if (ret) | |
1456 | return ret; | |
1457 | ||
1458 | drvdata->ctxid_mask = val; | |
1459 | return size; | |
1460 | } | |
1461 | static DEVICE_ATTR_RW(ctxid_mask); | |
1462 | ||
1463 | static ssize_t sync_freq_show(struct device *dev, | |
1464 | struct device_attribute *attr, char *buf) | |
1465 | { | |
1466 | unsigned long val; | |
1467 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1468 | ||
1469 | val = drvdata->sync_freq; | |
1470 | return sprintf(buf, "%#lx\n", val); | |
1471 | } | |
1472 | ||
1473 | static ssize_t sync_freq_store(struct device *dev, | |
1474 | struct device_attribute *attr, | |
1475 | const char *buf, size_t size) | |
1476 | { | |
1477 | int ret; | |
1478 | unsigned long val; | |
1479 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1480 | ||
1481 | ret = kstrtoul(buf, 16, &val); | |
1482 | if (ret) | |
1483 | return ret; | |
1484 | ||
1485 | drvdata->sync_freq = val & ETM_SYNC_MASK; | |
1486 | return size; | |
1487 | } | |
1488 | static DEVICE_ATTR_RW(sync_freq); | |
1489 | ||
1490 | static ssize_t timestamp_event_show(struct device *dev, | |
1491 | struct device_attribute *attr, char *buf) | |
1492 | { | |
1493 | unsigned long val; | |
1494 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1495 | ||
1496 | val = drvdata->timestamp_event; | |
1497 | return sprintf(buf, "%#lx\n", val); | |
1498 | } | |
1499 | ||
1500 | static ssize_t timestamp_event_store(struct device *dev, | |
1501 | struct device_attribute *attr, | |
1502 | const char *buf, size_t size) | |
1503 | { | |
1504 | int ret; | |
1505 | unsigned long val; | |
1506 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1507 | ||
1508 | ret = kstrtoul(buf, 16, &val); | |
1509 | if (ret) | |
1510 | return ret; | |
1511 | ||
1512 | drvdata->timestamp_event = val & ETM_EVENT_MASK; | |
1513 | return size; | |
1514 | } | |
1515 | static DEVICE_ATTR_RW(timestamp_event); | |
1516 | ||
1517 | static ssize_t status_show(struct device *dev, | |
1518 | struct device_attribute *attr, char *buf) | |
1519 | { | |
1520 | int ret; | |
1521 | unsigned long flags; | |
1522 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1523 | ||
1524 | ret = clk_prepare_enable(drvdata->clk); | |
1525 | if (ret) | |
1526 | return ret; | |
1527 | ||
1528 | spin_lock_irqsave(&drvdata->spinlock, flags); | |
1529 | ||
1530 | CS_UNLOCK(drvdata->base); | |
1531 | ret = sprintf(buf, | |
1532 | "ETMCCR: 0x%08x\n" | |
1533 | "ETMCCER: 0x%08x\n" | |
1534 | "ETMSCR: 0x%08x\n" | |
1535 | "ETMIDR: 0x%08x\n" | |
1536 | "ETMCR: 0x%08x\n" | |
1537 | "ETMTRACEIDR: 0x%08x\n" | |
1538 | "Enable event: 0x%08x\n" | |
1539 | "Enable start/stop: 0x%08x\n" | |
1540 | "Enable control: CR1 0x%08x CR2 0x%08x\n" | |
1541 | "CPU affinity: %d\n", | |
1542 | drvdata->etmccr, drvdata->etmccer, | |
1543 | etm_readl(drvdata, ETMSCR), etm_readl(drvdata, ETMIDR), | |
1544 | etm_readl(drvdata, ETMCR), etm_trace_id_simple(drvdata), | |
1545 | etm_readl(drvdata, ETMTEEVR), | |
1546 | etm_readl(drvdata, ETMTSSCR), | |
1547 | etm_readl(drvdata, ETMTECR1), | |
1548 | etm_readl(drvdata, ETMTECR2), | |
1549 | drvdata->cpu); | |
1550 | CS_LOCK(drvdata->base); | |
1551 | ||
1552 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | |
1553 | clk_disable_unprepare(drvdata->clk); | |
1554 | ||
1555 | return ret; | |
1556 | } | |
1557 | static DEVICE_ATTR_RO(status); | |
1558 | ||
1559 | static ssize_t traceid_show(struct device *dev, | |
1560 | struct device_attribute *attr, char *buf) | |
1561 | { | |
1562 | int ret; | |
1563 | unsigned long val, flags; | |
1564 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1565 | ||
1566 | if (!drvdata->enable) { | |
1567 | val = drvdata->traceid; | |
1568 | goto out; | |
1569 | } | |
1570 | ||
1571 | ret = clk_prepare_enable(drvdata->clk); | |
1572 | if (ret) | |
1573 | return ret; | |
1574 | ||
1575 | spin_lock_irqsave(&drvdata->spinlock, flags); | |
1576 | CS_UNLOCK(drvdata->base); | |
1577 | ||
1578 | val = (etm_readl(drvdata, ETMTRACEIDR) & ETM_TRACEID_MASK); | |
1579 | ||
1580 | CS_LOCK(drvdata->base); | |
1581 | spin_unlock_irqrestore(&drvdata->spinlock, flags); | |
1582 | clk_disable_unprepare(drvdata->clk); | |
1583 | out: | |
1584 | return sprintf(buf, "%#lx\n", val); | |
1585 | } | |
1586 | ||
1587 | static ssize_t traceid_store(struct device *dev, | |
1588 | struct device_attribute *attr, | |
1589 | const char *buf, size_t size) | |
1590 | { | |
1591 | int ret; | |
1592 | unsigned long val; | |
1593 | struct etm_drvdata *drvdata = dev_get_drvdata(dev->parent); | |
1594 | ||
1595 | ret = kstrtoul(buf, 16, &val); | |
1596 | if (ret) | |
1597 | return ret; | |
1598 | ||
1599 | drvdata->traceid = val & ETM_TRACEID_MASK; | |
1600 | return size; | |
1601 | } | |
1602 | static DEVICE_ATTR_RW(traceid); | |
1603 | ||
1604 | static struct attribute *coresight_etm_attrs[] = { | |
1605 | &dev_attr_nr_addr_cmp.attr, | |
1606 | &dev_attr_nr_cntr.attr, | |
1607 | &dev_attr_nr_ctxid_cmp.attr, | |
1608 | &dev_attr_etmsr.attr, | |
1609 | &dev_attr_reset.attr, | |
1610 | &dev_attr_mode.attr, | |
1611 | &dev_attr_trigger_event.attr, | |
1612 | &dev_attr_enable_event.attr, | |
1613 | &dev_attr_fifofull_level.attr, | |
1614 | &dev_attr_addr_idx.attr, | |
1615 | &dev_attr_addr_single.attr, | |
1616 | &dev_attr_addr_range.attr, | |
1617 | &dev_attr_addr_start.attr, | |
1618 | &dev_attr_addr_stop.attr, | |
1619 | &dev_attr_addr_acctype.attr, | |
1620 | &dev_attr_cntr_idx.attr, | |
1621 | &dev_attr_cntr_rld_val.attr, | |
1622 | &dev_attr_cntr_event.attr, | |
1623 | &dev_attr_cntr_rld_event.attr, | |
1624 | &dev_attr_cntr_val.attr, | |
1625 | &dev_attr_seq_12_event.attr, | |
1626 | &dev_attr_seq_21_event.attr, | |
1627 | &dev_attr_seq_23_event.attr, | |
1628 | &dev_attr_seq_31_event.attr, | |
1629 | &dev_attr_seq_32_event.attr, | |
1630 | &dev_attr_seq_13_event.attr, | |
1631 | &dev_attr_seq_curr_state.attr, | |
1632 | &dev_attr_ctxid_idx.attr, | |
1633 | &dev_attr_ctxid_val.attr, | |
1634 | &dev_attr_ctxid_mask.attr, | |
1635 | &dev_attr_sync_freq.attr, | |
1636 | &dev_attr_timestamp_event.attr, | |
1637 | &dev_attr_status.attr, | |
1638 | &dev_attr_traceid.attr, | |
1639 | NULL, | |
1640 | }; | |
1641 | ATTRIBUTE_GROUPS(coresight_etm); | |
1642 | ||
1643 | static int etm_cpu_callback(struct notifier_block *nfb, unsigned long action, | |
1644 | void *hcpu) | |
1645 | { | |
1646 | unsigned int cpu = (unsigned long)hcpu; | |
1647 | ||
1648 | if (!etmdrvdata[cpu]) | |
1649 | goto out; | |
1650 | ||
1651 | switch (action & (~CPU_TASKS_FROZEN)) { | |
1652 | case CPU_STARTING: | |
1653 | spin_lock(&etmdrvdata[cpu]->spinlock); | |
1654 | if (!etmdrvdata[cpu]->os_unlock) { | |
1655 | etm_os_unlock(etmdrvdata[cpu]); | |
1656 | etmdrvdata[cpu]->os_unlock = true; | |
1657 | } | |
1658 | ||
1659 | if (etmdrvdata[cpu]->enable) | |
1660 | etm_enable_hw(etmdrvdata[cpu]); | |
1661 | spin_unlock(&etmdrvdata[cpu]->spinlock); | |
1662 | break; | |
1663 | ||
1664 | case CPU_ONLINE: | |
1665 | if (etmdrvdata[cpu]->boot_enable && | |
1666 | !etmdrvdata[cpu]->sticky_enable) | |
1667 | coresight_enable(etmdrvdata[cpu]->csdev); | |
1668 | break; | |
1669 | ||
1670 | case CPU_DYING: | |
1671 | spin_lock(&etmdrvdata[cpu]->spinlock); | |
1672 | if (etmdrvdata[cpu]->enable) | |
1673 | etm_disable_hw(etmdrvdata[cpu]); | |
1674 | spin_unlock(&etmdrvdata[cpu]->spinlock); | |
1675 | break; | |
1676 | } | |
1677 | out: | |
1678 | return NOTIFY_OK; | |
1679 | } | |
1680 | ||
1681 | static struct notifier_block etm_cpu_notifier = { | |
1682 | .notifier_call = etm_cpu_callback, | |
1683 | }; | |
1684 | ||
1685 | static bool etm_arch_supported(u8 arch) | |
1686 | { | |
1687 | switch (arch) { | |
1688 | case ETM_ARCH_V3_3: | |
1689 | break; | |
1690 | case ETM_ARCH_V3_5: | |
1691 | break; | |
1692 | case PFT_ARCH_V1_0: | |
1693 | break; | |
1694 | case PFT_ARCH_V1_1: | |
1695 | break; | |
1696 | default: | |
1697 | return false; | |
1698 | } | |
1699 | return true; | |
1700 | } | |
1701 | ||
1702 | static void etm_init_arch_data(void *info) | |
1703 | { | |
1704 | u32 etmidr; | |
1705 | u32 etmccr; | |
1706 | struct etm_drvdata *drvdata = info; | |
1707 | ||
1708 | CS_UNLOCK(drvdata->base); | |
1709 | ||
1710 | /* First dummy read */ | |
1711 | (void)etm_readl(drvdata, ETMPDSR); | |
1712 | /* Provide power to ETM: ETMPDCR[3] == 1 */ | |
1713 | etm_set_pwrup(drvdata); | |
1714 | /* | |
1715 | * Clear power down bit since when this bit is set writes to | |
1716 | * certain registers might be ignored. | |
1717 | */ | |
1718 | etm_clr_pwrdwn(drvdata); | |
1719 | /* | |
1720 | * Set prog bit. It will be set from reset but this is included to | |
1721 | * ensure it is set | |
1722 | */ | |
1723 | etm_set_prog(drvdata); | |
1724 | ||
1725 | /* Find all capabilities */ | |
1726 | etmidr = etm_readl(drvdata, ETMIDR); | |
1727 | drvdata->arch = BMVAL(etmidr, 4, 11); | |
1728 | drvdata->port_size = etm_readl(drvdata, ETMCR) & PORT_SIZE_MASK; | |
1729 | ||
1730 | drvdata->etmccer = etm_readl(drvdata, ETMCCER); | |
1731 | etmccr = etm_readl(drvdata, ETMCCR); | |
1732 | drvdata->etmccr = etmccr; | |
1733 | drvdata->nr_addr_cmp = BMVAL(etmccr, 0, 3) * 2; | |
1734 | drvdata->nr_cntr = BMVAL(etmccr, 13, 15); | |
1735 | drvdata->nr_ext_inp = BMVAL(etmccr, 17, 19); | |
1736 | drvdata->nr_ext_out = BMVAL(etmccr, 20, 22); | |
1737 | drvdata->nr_ctxid_cmp = BMVAL(etmccr, 24, 25); | |
1738 | ||
1739 | etm_set_pwrdwn(drvdata); | |
1740 | etm_clr_pwrup(drvdata); | |
1741 | CS_LOCK(drvdata->base); | |
1742 | } | |
1743 | ||
1744 | static void etm_init_default_data(struct etm_drvdata *drvdata) | |
1745 | { | |
1746 | static int etm3x_traceid; | |
1747 | ||
1748 | u32 flags = (1 << 0 | /* instruction execute*/ | |
1749 | 3 << 3 | /* ARM instruction */ | |
1750 | 0 << 5 | /* No data value comparison */ | |
1751 | 0 << 7 | /* No exact mach */ | |
1752 | 0 << 8 | /* Ignore context ID */ | |
1753 | 0 << 10); /* Security ignored */ | |
1754 | ||
1755 | /* | |
1756 | * Initial configuration only - guarantees sources handled by | |
1757 | * this driver have a unique ID at startup time but not between | |
1758 | * all other types of sources. For that we lean on the core | |
1759 | * framework. | |
1760 | */ | |
1761 | drvdata->traceid = etm3x_traceid++; | |
1762 | drvdata->ctrl = (ETMCR_CYC_ACC | ETMCR_TIMESTAMP_EN); | |
1763 | drvdata->enable_ctrl1 = ETMTECR1_ADDR_COMP_1; | |
1764 | if (drvdata->nr_addr_cmp >= 2) { | |
1765 | drvdata->addr_val[0] = (u32) _stext; | |
1766 | drvdata->addr_val[1] = (u32) _etext; | |
1767 | drvdata->addr_acctype[0] = flags; | |
1768 | drvdata->addr_acctype[1] = flags; | |
1769 | drvdata->addr_type[0] = ETM_ADDR_TYPE_RANGE; | |
1770 | drvdata->addr_type[1] = ETM_ADDR_TYPE_RANGE; | |
1771 | } | |
1772 | ||
1773 | etm_set_default(drvdata); | |
1774 | } | |
1775 | ||
1776 | static int etm_probe(struct amba_device *adev, const struct amba_id *id) | |
1777 | { | |
1778 | int ret; | |
1779 | void __iomem *base; | |
1780 | struct device *dev = &adev->dev; | |
1781 | struct coresight_platform_data *pdata = NULL; | |
1782 | struct etm_drvdata *drvdata; | |
1783 | struct resource *res = &adev->res; | |
1784 | struct coresight_desc *desc; | |
1785 | struct device_node *np = adev->dev.of_node; | |
1786 | ||
1787 | desc = devm_kzalloc(dev, sizeof(*desc), GFP_KERNEL); | |
1788 | if (!desc) | |
1789 | return -ENOMEM; | |
1790 | ||
1791 | drvdata = devm_kzalloc(dev, sizeof(*drvdata), GFP_KERNEL); | |
1792 | if (!drvdata) | |
1793 | return -ENOMEM; | |
1794 | ||
1795 | if (np) { | |
1796 | pdata = of_get_coresight_platform_data(dev, np); | |
1797 | if (IS_ERR(pdata)) | |
1798 | return PTR_ERR(pdata); | |
1799 | ||
1800 | adev->dev.platform_data = pdata; | |
1801 | drvdata->use_cp14 = of_property_read_bool(np, "arm,cp14"); | |
1802 | } | |
1803 | ||
1804 | drvdata->dev = &adev->dev; | |
1805 | dev_set_drvdata(dev, drvdata); | |
1806 | ||
1807 | /* Validity for the resource is already checked by the AMBA core */ | |
1808 | base = devm_ioremap_resource(dev, res); | |
1809 | if (IS_ERR(base)) | |
1810 | return PTR_ERR(base); | |
1811 | ||
1812 | drvdata->base = base; | |
1813 | ||
1814 | spin_lock_init(&drvdata->spinlock); | |
1815 | ||
1816 | drvdata->clk = adev->pclk; | |
1817 | ret = clk_prepare_enable(drvdata->clk); | |
1818 | if (ret) | |
1819 | return ret; | |
1820 | ||
1821 | drvdata->cpu = pdata ? pdata->cpu : 0; | |
1822 | ||
1823 | get_online_cpus(); | |
1824 | etmdrvdata[drvdata->cpu] = drvdata; | |
1825 | ||
1826 | if (!smp_call_function_single(drvdata->cpu, etm_os_unlock, drvdata, 1)) | |
1827 | drvdata->os_unlock = true; | |
1828 | ||
1829 | if (smp_call_function_single(drvdata->cpu, | |
1830 | etm_init_arch_data, drvdata, 1)) | |
1831 | dev_err(dev, "ETM arch init failed\n"); | |
1832 | ||
1833 | if (!etm_count++) | |
1834 | register_hotcpu_notifier(&etm_cpu_notifier); | |
1835 | ||
1836 | put_online_cpus(); | |
1837 | ||
1838 | if (etm_arch_supported(drvdata->arch) == false) { | |
1839 | ret = -EINVAL; | |
1840 | goto err_arch_supported; | |
1841 | } | |
1842 | etm_init_default_data(drvdata); | |
1843 | ||
1844 | clk_disable_unprepare(drvdata->clk); | |
1845 | ||
1846 | desc->type = CORESIGHT_DEV_TYPE_SOURCE; | |
1847 | desc->subtype.source_subtype = CORESIGHT_DEV_SUBTYPE_SOURCE_PROC; | |
1848 | desc->ops = &etm_cs_ops; | |
1849 | desc->pdata = pdata; | |
1850 | desc->dev = dev; | |
1851 | desc->groups = coresight_etm_groups; | |
1852 | drvdata->csdev = coresight_register(desc); | |
1853 | if (IS_ERR(drvdata->csdev)) { | |
1854 | ret = PTR_ERR(drvdata->csdev); | |
1855 | goto err_arch_supported; | |
1856 | } | |
1857 | ||
1858 | dev_info(dev, "ETM initialized\n"); | |
1859 | ||
1860 | if (boot_enable) { | |
1861 | coresight_enable(drvdata->csdev); | |
1862 | drvdata->boot_enable = true; | |
1863 | } | |
1864 | ||
1865 | return 0; | |
1866 | ||
1867 | err_arch_supported: | |
1868 | clk_disable_unprepare(drvdata->clk); | |
1869 | if (--etm_count == 0) | |
1870 | unregister_hotcpu_notifier(&etm_cpu_notifier); | |
1871 | return ret; | |
1872 | } | |
1873 | ||
1874 | static int etm_remove(struct amba_device *adev) | |
1875 | { | |
1876 | struct etm_drvdata *drvdata = amba_get_drvdata(adev); | |
1877 | ||
1878 | coresight_unregister(drvdata->csdev); | |
1879 | if (--etm_count == 0) | |
1880 | unregister_hotcpu_notifier(&etm_cpu_notifier); | |
1881 | ||
1882 | return 0; | |
1883 | } | |
1884 | ||
1885 | static struct amba_id etm_ids[] = { | |
1886 | { /* ETM 3.3 */ | |
1887 | .id = 0x0003b921, | |
1888 | .mask = 0x0003ffff, | |
1889 | }, | |
1890 | { /* ETM 3.5 */ | |
1891 | .id = 0x0003b956, | |
1892 | .mask = 0x0003ffff, | |
1893 | }, | |
1894 | { /* PTM 1.0 */ | |
1895 | .id = 0x0003b950, | |
1896 | .mask = 0x0003ffff, | |
1897 | }, | |
1898 | { /* PTM 1.1 */ | |
1899 | .id = 0x0003b95f, | |
1900 | .mask = 0x0003ffff, | |
1901 | }, | |
1902 | { 0, 0}, | |
1903 | }; | |
1904 | ||
1905 | static struct amba_driver etm_driver = { | |
1906 | .drv = { | |
1907 | .name = "coresight-etm3x", | |
1908 | .owner = THIS_MODULE, | |
1909 | }, | |
1910 | .probe = etm_probe, | |
1911 | .remove = etm_remove, | |
1912 | .id_table = etm_ids, | |
1913 | }; | |
1914 | ||
1915 | int __init etm_init(void) | |
1916 | { | |
1917 | return amba_driver_register(&etm_driver); | |
1918 | } | |
1919 | module_init(etm_init); | |
1920 | ||
1921 | void __exit etm_exit(void) | |
1922 | { | |
1923 | amba_driver_unregister(&etm_driver); | |
1924 | } | |
1925 | module_exit(etm_exit); | |
1926 | ||
1927 | MODULE_LICENSE("GPL v2"); | |
1928 | MODULE_DESCRIPTION("CoreSight Program Flow Trace driver"); |