Merge tag 'asoc-v5.6-2' of https://git.kernel.org/pub/scm/linux/kernel/git/broonie...
[linux-2.6-block.git] / drivers / hwtracing / coresight / coresight-tmc-etf.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright(C) 2016 Linaro Limited. All rights reserved.
4  * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
5  */
6
7 #include <linux/atomic.h>
8 #include <linux/circ_buf.h>
9 #include <linux/coresight.h>
10 #include <linux/perf_event.h>
11 #include <linux/slab.h>
12 #include "coresight-priv.h"
13 #include "coresight-tmc.h"
14 #include "coresight-etm-perf.h"
15
16 static int tmc_set_etf_buffer(struct coresight_device *csdev,
17                               struct perf_output_handle *handle);
18
19 static void __tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
20 {
21         CS_UNLOCK(drvdata->base);
22
23         /* Wait for TMCSReady bit to be set */
24         tmc_wait_for_tmcready(drvdata);
25
26         writel_relaxed(TMC_MODE_CIRCULAR_BUFFER, drvdata->base + TMC_MODE);
27         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI |
28                        TMC_FFCR_FON_FLIN | TMC_FFCR_FON_TRIG_EVT |
29                        TMC_FFCR_TRIGON_TRIGIN,
30                        drvdata->base + TMC_FFCR);
31
32         writel_relaxed(drvdata->trigger_cntr, drvdata->base + TMC_TRG);
33         tmc_enable_hw(drvdata);
34
35         CS_LOCK(drvdata->base);
36 }
37
38 static int tmc_etb_enable_hw(struct tmc_drvdata *drvdata)
39 {
40         int rc = coresight_claim_device(drvdata->base);
41
42         if (rc)
43                 return rc;
44
45         __tmc_etb_enable_hw(drvdata);
46         return 0;
47 }
48
49 static void tmc_etb_dump_hw(struct tmc_drvdata *drvdata)
50 {
51         char *bufp;
52         u32 read_data, lost;
53
54         /* Check if the buffer wrapped around. */
55         lost = readl_relaxed(drvdata->base + TMC_STS) & TMC_STS_FULL;
56         bufp = drvdata->buf;
57         drvdata->len = 0;
58         while (1) {
59                 read_data = readl_relaxed(drvdata->base + TMC_RRD);
60                 if (read_data == 0xFFFFFFFF)
61                         break;
62                 memcpy(bufp, &read_data, 4);
63                 bufp += 4;
64                 drvdata->len += 4;
65         }
66
67         if (lost)
68                 coresight_insert_barrier_packet(drvdata->buf);
69         return;
70 }
71
72 static void __tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
73 {
74         CS_UNLOCK(drvdata->base);
75
76         tmc_flush_and_stop(drvdata);
77         /*
78          * When operating in sysFS mode the content of the buffer needs to be
79          * read before the TMC is disabled.
80          */
81         if (drvdata->mode == CS_MODE_SYSFS)
82                 tmc_etb_dump_hw(drvdata);
83         tmc_disable_hw(drvdata);
84
85         CS_LOCK(drvdata->base);
86 }
87
88 static void tmc_etb_disable_hw(struct tmc_drvdata *drvdata)
89 {
90         __tmc_etb_disable_hw(drvdata);
91         coresight_disclaim_device(drvdata->base);
92 }
93
94 static void __tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
95 {
96         CS_UNLOCK(drvdata->base);
97
98         /* Wait for TMCSReady bit to be set */
99         tmc_wait_for_tmcready(drvdata);
100
101         writel_relaxed(TMC_MODE_HARDWARE_FIFO, drvdata->base + TMC_MODE);
102         writel_relaxed(TMC_FFCR_EN_FMT | TMC_FFCR_EN_TI,
103                        drvdata->base + TMC_FFCR);
104         writel_relaxed(0x0, drvdata->base + TMC_BUFWM);
105         tmc_enable_hw(drvdata);
106
107         CS_LOCK(drvdata->base);
108 }
109
110 static int tmc_etf_enable_hw(struct tmc_drvdata *drvdata)
111 {
112         int rc = coresight_claim_device(drvdata->base);
113
114         if (rc)
115                 return rc;
116
117         __tmc_etf_enable_hw(drvdata);
118         return 0;
119 }
120
121 static void tmc_etf_disable_hw(struct tmc_drvdata *drvdata)
122 {
123         CS_UNLOCK(drvdata->base);
124
125         tmc_flush_and_stop(drvdata);
126         tmc_disable_hw(drvdata);
127         coresight_disclaim_device_unlocked(drvdata->base);
128         CS_LOCK(drvdata->base);
129 }
130
131 /*
132  * Return the available trace data in the buffer from @pos, with
133  * a maximum limit of @len, updating the @bufpp on where to
134  * find it.
135  */
136 ssize_t tmc_etb_get_sysfs_trace(struct tmc_drvdata *drvdata,
137                                 loff_t pos, size_t len, char **bufpp)
138 {
139         ssize_t actual = len;
140
141         /* Adjust the len to available size @pos */
142         if (pos + actual > drvdata->len)
143                 actual = drvdata->len - pos;
144         if (actual > 0)
145                 *bufpp = drvdata->buf + pos;
146         return actual;
147 }
148
149 static int tmc_enable_etf_sink_sysfs(struct coresight_device *csdev)
150 {
151         int ret = 0;
152         bool used = false;
153         char *buf = NULL;
154         unsigned long flags;
155         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
156
157         /*
158          * If we don't have a buffer release the lock and allocate memory.
159          * Otherwise keep the lock and move along.
160          */
161         spin_lock_irqsave(&drvdata->spinlock, flags);
162         if (!drvdata->buf) {
163                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
164
165                 /* Allocating the memory here while outside of the spinlock */
166                 buf = kzalloc(drvdata->size, GFP_KERNEL);
167                 if (!buf)
168                         return -ENOMEM;
169
170                 /* Let's try again */
171                 spin_lock_irqsave(&drvdata->spinlock, flags);
172         }
173
174         if (drvdata->reading) {
175                 ret = -EBUSY;
176                 goto out;
177         }
178
179         /*
180          * In sysFS mode we can have multiple writers per sink.  Since this
181          * sink is already enabled no memory is needed and the HW need not be
182          * touched.
183          */
184         if (drvdata->mode == CS_MODE_SYSFS) {
185                 atomic_inc(csdev->refcnt);
186                 goto out;
187         }
188
189         /*
190          * If drvdata::buf isn't NULL, memory was allocated for a previous
191          * trace run but wasn't read.  If so simply zero-out the memory.
192          * Otherwise use the memory allocated above.
193          *
194          * The memory is freed when users read the buffer using the
195          * /dev/xyz.{etf|etb} interface.  See tmc_read_unprepare_etf() for
196          * details.
197          */
198         if (drvdata->buf) {
199                 memset(drvdata->buf, 0, drvdata->size);
200         } else {
201                 used = true;
202                 drvdata->buf = buf;
203         }
204
205         ret = tmc_etb_enable_hw(drvdata);
206         if (!ret) {
207                 drvdata->mode = CS_MODE_SYSFS;
208                 atomic_inc(csdev->refcnt);
209         } else {
210                 /* Free up the buffer if we failed to enable */
211                 used = false;
212         }
213 out:
214         spin_unlock_irqrestore(&drvdata->spinlock, flags);
215
216         /* Free memory outside the spinlock if need be */
217         if (!used)
218                 kfree(buf);
219
220         return ret;
221 }
222
223 static int tmc_enable_etf_sink_perf(struct coresight_device *csdev, void *data)
224 {
225         int ret = 0;
226         pid_t pid;
227         unsigned long flags;
228         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
229         struct perf_output_handle *handle = data;
230
231         spin_lock_irqsave(&drvdata->spinlock, flags);
232         do {
233                 ret = -EINVAL;
234                 if (drvdata->reading)
235                         break;
236                 /*
237                  * No need to continue if the ETB/ETF is already operated
238                  * from sysFS.
239                  */
240                 if (drvdata->mode == CS_MODE_SYSFS) {
241                         ret = -EBUSY;
242                         break;
243                 }
244
245                 /* Get a handle on the pid of the process to monitor */
246                 pid = task_pid_nr(handle->event->owner);
247
248                 if (drvdata->pid != -1 && drvdata->pid != pid) {
249                         ret = -EBUSY;
250                         break;
251                 }
252
253                 ret = tmc_set_etf_buffer(csdev, handle);
254                 if (ret)
255                         break;
256
257                 /*
258                  * No HW configuration is needed if the sink is already in
259                  * use for this session.
260                  */
261                 if (drvdata->pid == pid) {
262                         atomic_inc(csdev->refcnt);
263                         break;
264                 }
265
266                 ret  = tmc_etb_enable_hw(drvdata);
267                 if (!ret) {
268                         /* Associate with monitored process. */
269                         drvdata->pid = pid;
270                         drvdata->mode = CS_MODE_PERF;
271                         atomic_inc(csdev->refcnt);
272                 }
273         } while (0);
274         spin_unlock_irqrestore(&drvdata->spinlock, flags);
275
276         return ret;
277 }
278
279 static int tmc_enable_etf_sink(struct coresight_device *csdev,
280                                u32 mode, void *data)
281 {
282         int ret;
283
284         switch (mode) {
285         case CS_MODE_SYSFS:
286                 ret = tmc_enable_etf_sink_sysfs(csdev);
287                 break;
288         case CS_MODE_PERF:
289                 ret = tmc_enable_etf_sink_perf(csdev, data);
290                 break;
291         /* We shouldn't be here */
292         default:
293                 ret = -EINVAL;
294                 break;
295         }
296
297         if (ret)
298                 return ret;
299
300         dev_dbg(&csdev->dev, "TMC-ETB/ETF enabled\n");
301         return 0;
302 }
303
304 static int tmc_disable_etf_sink(struct coresight_device *csdev)
305 {
306         unsigned long flags;
307         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
308
309         spin_lock_irqsave(&drvdata->spinlock, flags);
310
311         if (drvdata->reading) {
312                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
313                 return -EBUSY;
314         }
315
316         if (atomic_dec_return(csdev->refcnt)) {
317                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
318                 return -EBUSY;
319         }
320
321         /* Complain if we (somehow) got out of sync */
322         WARN_ON_ONCE(drvdata->mode == CS_MODE_DISABLED);
323         tmc_etb_disable_hw(drvdata);
324         /* Dissociate from monitored process. */
325         drvdata->pid = -1;
326         drvdata->mode = CS_MODE_DISABLED;
327
328         spin_unlock_irqrestore(&drvdata->spinlock, flags);
329
330         dev_dbg(&csdev->dev, "TMC-ETB/ETF disabled\n");
331         return 0;
332 }
333
334 static int tmc_enable_etf_link(struct coresight_device *csdev,
335                                int inport, int outport)
336 {
337         int ret = 0;
338         unsigned long flags;
339         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
340         bool first_enable = false;
341
342         spin_lock_irqsave(&drvdata->spinlock, flags);
343         if (drvdata->reading) {
344                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
345                 return -EBUSY;
346         }
347
348         if (atomic_read(&csdev->refcnt[0]) == 0) {
349                 ret = tmc_etf_enable_hw(drvdata);
350                 if (!ret) {
351                         drvdata->mode = CS_MODE_SYSFS;
352                         first_enable = true;
353                 }
354         }
355         if (!ret)
356                 atomic_inc(&csdev->refcnt[0]);
357         spin_unlock_irqrestore(&drvdata->spinlock, flags);
358
359         if (first_enable)
360                 dev_dbg(&csdev->dev, "TMC-ETF enabled\n");
361         return ret;
362 }
363
364 static void tmc_disable_etf_link(struct coresight_device *csdev,
365                                  int inport, int outport)
366 {
367         unsigned long flags;
368         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
369         bool last_disable = false;
370
371         spin_lock_irqsave(&drvdata->spinlock, flags);
372         if (drvdata->reading) {
373                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
374                 return;
375         }
376
377         if (atomic_dec_return(&csdev->refcnt[0]) == 0) {
378                 tmc_etf_disable_hw(drvdata);
379                 drvdata->mode = CS_MODE_DISABLED;
380                 last_disable = true;
381         }
382         spin_unlock_irqrestore(&drvdata->spinlock, flags);
383
384         if (last_disable)
385                 dev_dbg(&csdev->dev, "TMC-ETF disabled\n");
386 }
387
388 static void *tmc_alloc_etf_buffer(struct coresight_device *csdev,
389                                   struct perf_event *event, void **pages,
390                                   int nr_pages, bool overwrite)
391 {
392         int node;
393         struct cs_buffers *buf;
394
395         node = (event->cpu == -1) ? NUMA_NO_NODE : cpu_to_node(event->cpu);
396
397         /* Allocate memory structure for interaction with Perf */
398         buf = kzalloc_node(sizeof(struct cs_buffers), GFP_KERNEL, node);
399         if (!buf)
400                 return NULL;
401
402         buf->snapshot = overwrite;
403         buf->nr_pages = nr_pages;
404         buf->data_pages = pages;
405
406         return buf;
407 }
408
409 static void tmc_free_etf_buffer(void *config)
410 {
411         struct cs_buffers *buf = config;
412
413         kfree(buf);
414 }
415
416 static int tmc_set_etf_buffer(struct coresight_device *csdev,
417                               struct perf_output_handle *handle)
418 {
419         int ret = 0;
420         unsigned long head;
421         struct cs_buffers *buf = etm_perf_sink_config(handle);
422
423         if (!buf)
424                 return -EINVAL;
425
426         /* wrap head around to the amount of space we have */
427         head = handle->head & ((buf->nr_pages << PAGE_SHIFT) - 1);
428
429         /* find the page to write to */
430         buf->cur = head / PAGE_SIZE;
431
432         /* and offset within that page */
433         buf->offset = head % PAGE_SIZE;
434
435         local_set(&buf->data_size, 0);
436
437         return ret;
438 }
439
440 static unsigned long tmc_update_etf_buffer(struct coresight_device *csdev,
441                                   struct perf_output_handle *handle,
442                                   void *sink_config)
443 {
444         bool lost = false;
445         int i, cur;
446         const u32 *barrier;
447         u32 *buf_ptr;
448         u64 read_ptr, write_ptr;
449         u32 status;
450         unsigned long offset, to_read = 0, flags;
451         struct cs_buffers *buf = sink_config;
452         struct tmc_drvdata *drvdata = dev_get_drvdata(csdev->dev.parent);
453
454         if (!buf)
455                 return 0;
456
457         /* This shouldn't happen */
458         if (WARN_ON_ONCE(drvdata->mode != CS_MODE_PERF))
459                 return 0;
460
461         spin_lock_irqsave(&drvdata->spinlock, flags);
462
463         /* Don't do anything if another tracer is using this sink */
464         if (atomic_read(csdev->refcnt) != 1)
465                 goto out;
466
467         CS_UNLOCK(drvdata->base);
468
469         tmc_flush_and_stop(drvdata);
470
471         read_ptr = tmc_read_rrp(drvdata);
472         write_ptr = tmc_read_rwp(drvdata);
473
474         /*
475          * Get a hold of the status register and see if a wrap around
476          * has occurred.  If so adjust things accordingly.
477          */
478         status = readl_relaxed(drvdata->base + TMC_STS);
479         if (status & TMC_STS_FULL) {
480                 lost = true;
481                 to_read = drvdata->size;
482         } else {
483                 to_read = CIRC_CNT(write_ptr, read_ptr, drvdata->size);
484         }
485
486         /*
487          * The TMC RAM buffer may be bigger than the space available in the
488          * perf ring buffer (handle->size).  If so advance the RRP so that we
489          * get the latest trace data.  In snapshot mode none of that matters
490          * since we are expected to clobber stale data in favour of the latest
491          * traces.
492          */
493         if (!buf->snapshot && to_read > handle->size) {
494                 u32 mask = tmc_get_memwidth_mask(drvdata);
495
496                 /*
497                  * Make sure the new size is aligned in accordance with the
498                  * requirement explained in function tmc_get_memwidth_mask().
499                  */
500                 to_read = handle->size & mask;
501                 /* Move the RAM read pointer up */
502                 read_ptr = (write_ptr + drvdata->size) - to_read;
503                 /* Make sure we are still within our limits */
504                 if (read_ptr > (drvdata->size - 1))
505                         read_ptr -= drvdata->size;
506                 /* Tell the HW */
507                 tmc_write_rrp(drvdata, read_ptr);
508                 lost = true;
509         }
510
511         /*
512          * Don't set the TRUNCATED flag in snapshot mode because 1) the
513          * captured buffer is expected to be truncated and 2) a full buffer
514          * prevents the event from being re-enabled by the perf core,
515          * resulting in stale data being send to user space.
516          */
517         if (!buf->snapshot && lost)
518                 perf_aux_output_flag(handle, PERF_AUX_FLAG_TRUNCATED);
519
520         cur = buf->cur;
521         offset = buf->offset;
522         barrier = barrier_pkt;
523
524         /* for every byte to read */
525         for (i = 0; i < to_read; i += 4) {
526                 buf_ptr = buf->data_pages[cur] + offset;
527                 *buf_ptr = readl_relaxed(drvdata->base + TMC_RRD);
528
529                 if (lost && *barrier) {
530                         *buf_ptr = *barrier;
531                         barrier++;
532                 }
533
534                 offset += 4;
535                 if (offset >= PAGE_SIZE) {
536                         offset = 0;
537                         cur++;
538                         /* wrap around at the end of the buffer */
539                         cur &= buf->nr_pages - 1;
540                 }
541         }
542
543         /*
544          * In snapshot mode we simply increment the head by the number of byte
545          * that were written.  User space function  cs_etm_find_snapshot() will
546          * figure out how many bytes to get from the AUX buffer based on the
547          * position of the head.
548          */
549         if (buf->snapshot)
550                 handle->head += to_read;
551
552         CS_LOCK(drvdata->base);
553 out:
554         spin_unlock_irqrestore(&drvdata->spinlock, flags);
555
556         return to_read;
557 }
558
559 static const struct coresight_ops_sink tmc_etf_sink_ops = {
560         .enable         = tmc_enable_etf_sink,
561         .disable        = tmc_disable_etf_sink,
562         .alloc_buffer   = tmc_alloc_etf_buffer,
563         .free_buffer    = tmc_free_etf_buffer,
564         .update_buffer  = tmc_update_etf_buffer,
565 };
566
567 static const struct coresight_ops_link tmc_etf_link_ops = {
568         .enable         = tmc_enable_etf_link,
569         .disable        = tmc_disable_etf_link,
570 };
571
572 const struct coresight_ops tmc_etb_cs_ops = {
573         .sink_ops       = &tmc_etf_sink_ops,
574 };
575
576 const struct coresight_ops tmc_etf_cs_ops = {
577         .sink_ops       = &tmc_etf_sink_ops,
578         .link_ops       = &tmc_etf_link_ops,
579 };
580
581 int tmc_read_prepare_etb(struct tmc_drvdata *drvdata)
582 {
583         enum tmc_mode mode;
584         int ret = 0;
585         unsigned long flags;
586
587         /* config types are set a boot time and never change */
588         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
589                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
590                 return -EINVAL;
591
592         spin_lock_irqsave(&drvdata->spinlock, flags);
593
594         if (drvdata->reading) {
595                 ret = -EBUSY;
596                 goto out;
597         }
598
599         /* There is no point in reading a TMC in HW FIFO mode */
600         mode = readl_relaxed(drvdata->base + TMC_MODE);
601         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
602                 ret = -EINVAL;
603                 goto out;
604         }
605
606         /* Don't interfere if operated from Perf */
607         if (drvdata->mode == CS_MODE_PERF) {
608                 ret = -EINVAL;
609                 goto out;
610         }
611
612         /* If drvdata::buf is NULL the trace data has been read already */
613         if (drvdata->buf == NULL) {
614                 ret = -EINVAL;
615                 goto out;
616         }
617
618         /* Disable the TMC if need be */
619         if (drvdata->mode == CS_MODE_SYSFS)
620                 __tmc_etb_disable_hw(drvdata);
621
622         drvdata->reading = true;
623 out:
624         spin_unlock_irqrestore(&drvdata->spinlock, flags);
625
626         return ret;
627 }
628
629 int tmc_read_unprepare_etb(struct tmc_drvdata *drvdata)
630 {
631         char *buf = NULL;
632         enum tmc_mode mode;
633         unsigned long flags;
634
635         /* config types are set a boot time and never change */
636         if (WARN_ON_ONCE(drvdata->config_type != TMC_CONFIG_TYPE_ETB &&
637                          drvdata->config_type != TMC_CONFIG_TYPE_ETF))
638                 return -EINVAL;
639
640         spin_lock_irqsave(&drvdata->spinlock, flags);
641
642         /* There is no point in reading a TMC in HW FIFO mode */
643         mode = readl_relaxed(drvdata->base + TMC_MODE);
644         if (mode != TMC_MODE_CIRCULAR_BUFFER) {
645                 spin_unlock_irqrestore(&drvdata->spinlock, flags);
646                 return -EINVAL;
647         }
648
649         /* Re-enable the TMC if need be */
650         if (drvdata->mode == CS_MODE_SYSFS) {
651                 /*
652                  * The trace run will continue with the same allocated trace
653                  * buffer. As such zero-out the buffer so that we don't end
654                  * up with stale data.
655                  *
656                  * Since the tracer is still enabled drvdata::buf
657                  * can't be NULL.
658                  */
659                 memset(drvdata->buf, 0, drvdata->size);
660                 __tmc_etb_enable_hw(drvdata);
661         } else {
662                 /*
663                  * The ETB/ETF is not tracing and the buffer was just read.
664                  * As such prepare to free the trace buffer.
665                  */
666                 buf = drvdata->buf;
667                 drvdata->buf = NULL;
668         }
669
670         drvdata->reading = false;
671         spin_unlock_irqrestore(&drvdata->spinlock, flags);
672
673         /*
674          * Free allocated memory outside of the spinlock.  There is no need
675          * to assert the validity of 'buf' since calling kfree(NULL) is safe.
676          */
677         kfree(buf);
678
679         return 0;
680 }