perf maps: Add functions to access maps
[linux-2.6-block.git] / tools / perf / util / cs-etm.c
CommitLineData
8a9fd832 1// SPDX-License-Identifier: GPL-2.0
440a23b3 2/*
440a23b3
MP
3 * Copyright(C) 2015-2018 Linaro Limited.
4 *
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 */
8
9#include <linux/bitops.h>
47f0d94c 10#include <linux/coresight-pmu.h>
440a23b3
MP
11#include <linux/err.h>
12#include <linux/kernel.h>
13#include <linux/log2.h>
14#include <linux/types.h>
7f7c536f 15#include <linux/zalloc.h>
440a23b3 16
06220bf4 17#include <opencsd/ocsd_if_types.h>
440a23b3
MP
18#include <stdlib.h>
19
20#include "auxtrace.h"
21#include "color.h"
22#include "cs-etm.h"
68ffe390 23#include "cs-etm-decoder/cs-etm-decoder.h"
440a23b3 24#include "debug.h"
4a3cec84 25#include "dso.h"
440a23b3
MP
26#include "evlist.h"
27#include "intlist.h"
28#include "machine.h"
29#include "map.h"
30#include "perf.h"
f2a39fe8 31#include "session.h"
d3300a3c
ACM
32#include "map_symbol.h"
33#include "branch.h"
859dcf64 34#include "symbol.h"
4a3cec84 35#include "tool.h"
440a23b3 36#include "thread.h"
440a23b3 37#include "thread-stack.h"
a7fe9a44 38#include "tsc.h"
c152d4d4 39#include <tools/libc_compat.h>
055c67ed 40#include "util/synthetic-events.h"
440a23b3 41
440a23b3
MP
42struct cs_etm_auxtrace {
43 struct auxtrace auxtrace;
44 struct auxtrace_queues queues;
45 struct auxtrace_heap heap;
46 struct itrace_synth_opts synth_opts;
47 struct perf_session *session;
48 struct machine *machine;
49 struct thread *unknown_thread;
a7fe9a44 50 struct perf_tsc_conversion tc;
440a23b3
MP
51
52 u8 timeless_decoding;
53 u8 snapshot_mode;
54 u8 data_queued;
a7fe9a44 55 u8 has_virtual_ts; /* Virtual/Kernel timestamps in the trace. */
440a23b3
MP
56
57 int num_cpu;
1ac9e0b5 58 u64 latest_kernel_timestamp;
440a23b3
MP
59 u32 auxtrace_type;
60 u64 branches_sample_type;
61 u64 branches_id;
e573e978
RW
62 u64 instructions_sample_type;
63 u64 instructions_sample_period;
64 u64 instructions_id;
440a23b3 65 u64 **metadata;
440a23b3
MP
66 unsigned int pmu_type;
67};
68
c7bfa2fd
MP
69struct cs_etm_traceid_queue {
70 u8 trace_chan_id;
0abb868b 71 pid_t pid, tid;
c7bfa2fd
MP
72 u64 period_instructions;
73 size_t last_branch_pos;
74 union perf_event *event_buf;
3c21d7d8 75 struct thread *thread;
c7bfa2fd
MP
76 struct branch_stack *last_branch;
77 struct branch_stack *last_branch_rb;
78 struct cs_etm_packet *prev_packet;
79 struct cs_etm_packet *packet;
80 struct cs_etm_packet_queue packet_queue;
81};
82
440a23b3
MP
83struct cs_etm_queue {
84 struct cs_etm_auxtrace *etm;
440a23b3
MP
85 struct cs_etm_decoder *decoder;
86 struct auxtrace_buffer *buffer;
440a23b3 87 unsigned int queue_nr;
aadd6ba4 88 u8 pending_timestamp_chan_id;
440a23b3 89 u64 offset;
23cfcd6d
MP
90 const unsigned char *buf;
91 size_t buf_len, buf_used;
c152d4d4
MP
92 /* Conversion between traceID and index in traceid_queues array */
93 struct intlist *traceid_queues_list;
94 struct cs_etm_traceid_queue **traceid_queues;
440a23b3
MP
95};
96
168200b6
LY
97/* RB tree for quick conversion between traceID and metadata pointers */
98static struct intlist *traceid_list;
99
21fe8dc1 100static int cs_etm__process_queues(struct cs_etm_auxtrace *etm);
9f878b29 101static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
fc7ac413 102 pid_t tid);
21fe8dc1
MP
103static int cs_etm__get_data_block(struct cs_etm_queue *etmq);
104static int cs_etm__decode_data_block(struct cs_etm_queue *etmq);
9f878b29 105
15a5cd19
MP
106/* PTMs ETMIDR [11:8] set to b0011 */
107#define ETMIDR_PTM_VERSION 0x00000300
108
21fe8dc1
MP
109/*
110 * A struct auxtrace_heap_item only has a queue_nr and a timestamp to
111 * work with. One option is to modify to auxtrace_heap_XYZ() API or simply
112 * encode the etm queue number as the upper 16 bit and the channel as
113 * the lower 16 bit.
114 */
9d604aad 115#define TO_CS_QUEUE_NR(queue_nr, trace_chan_id) \
21fe8dc1
MP
116 (queue_nr << 16 | trace_chan_id)
117#define TO_QUEUE_NR(cs_queue_nr) (cs_queue_nr >> 16)
118#define TO_TRACE_CHAN_ID(cs_queue_nr) (cs_queue_nr & 0x0000ffff)
119
15a5cd19
MP
120static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
121{
122 etmidr &= ETMIDR_PTM_VERSION;
123
124 if (etmidr == ETMIDR_PTM_VERSION)
125 return CS_ETM_PROTO_PTM;
126
127 return CS_ETM_PROTO_ETMV3;
128}
129
96dce7f4
LY
130static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
131{
132 struct int_node *inode;
133 u64 *metadata;
134
135 inode = intlist__find(traceid_list, trace_chan_id);
136 if (!inode)
137 return -EINVAL;
138
139 metadata = inode->priv;
140 *magic = metadata[CS_ETM_MAGIC];
141 return 0;
142}
143
95c6fe97
LY
144int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
145{
146 struct int_node *inode;
147 u64 *metadata;
148
149 inode = intlist__find(traceid_list, trace_chan_id);
150 if (!inode)
151 return -EINVAL;
152
153 metadata = inode->priv;
154 *cpu = (int)metadata[CS_ETM_CPU];
155 return 0;
156}
157
47f0d94c
LY
158/*
159 * The returned PID format is presented by two bits:
160 *
161 * Bit ETM_OPT_CTXTID: CONTEXTIDR or CONTEXTIDR_EL1 is traced;
162 * Bit ETM_OPT_CTXTID2: CONTEXTIDR_EL2 is traced.
163 *
164 * It's possible that the two bits ETM_OPT_CTXTID and ETM_OPT_CTXTID2
165 * are enabled at the same time when the session runs on an EL2 kernel.
166 * This means the CONTEXTIDR_EL1 and CONTEXTIDR_EL2 both will be
167 * recorded in the trace data, the tool will selectively use
168 * CONTEXTIDR_EL2 as PID.
169 */
170int cs_etm__get_pid_fmt(u8 trace_chan_id, u64 *pid_fmt)
171{
172 struct int_node *inode;
173 u64 *metadata, val;
174
175 inode = intlist__find(traceid_list, trace_chan_id);
176 if (!inode)
177 return -EINVAL;
178
179 metadata = inode->priv;
180
181 if (metadata[CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
182 val = metadata[CS_ETM_ETMCR];
183 /* CONTEXTIDR is traced */
184 if (val & BIT(ETM_OPT_CTXTID))
185 *pid_fmt = BIT(ETM_OPT_CTXTID);
186 } else {
187 val = metadata[CS_ETMV4_TRCCONFIGR];
188 /* CONTEXTIDR_EL2 is traced */
189 if (val & (BIT(ETM4_CFG_BIT_VMID) | BIT(ETM4_CFG_BIT_VMID_OPT)))
190 *pid_fmt = BIT(ETM_OPT_CTXTID2);
191 /* CONTEXTIDR_EL1 is traced */
192 else if (val & BIT(ETM4_CFG_BIT_CTXTID))
193 *pid_fmt = BIT(ETM_OPT_CTXTID);
194 }
195
196 return 0;
197}
198
09277295
ML
199static int cs_etm__map_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
200{
201 struct int_node *inode;
202
203 /* Get an RB node for this CPU */
204 inode = intlist__findnew(traceid_list, trace_chan_id);
205
206 /* Something went wrong, no need to continue */
207 if (!inode)
208 return -ENOMEM;
209
210 /*
211 * The node for that CPU should not be taken.
212 * Back out if that's the case.
213 */
214 if (inode->priv)
215 return -EINVAL;
216
217 /* All good, associate the traceID with the metadata pointer */
218 inode->priv = cpu_metadata;
219
220 return 0;
221}
222
b6521ea2
ML
223static int cs_etm__metadata_get_trace_id(u8 *trace_chan_id, u64 *cpu_metadata)
224{
225 u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
226
227 switch (cs_etm_magic) {
228 case __perf_cs_etmv3_magic:
229 *trace_chan_id = (u8)(cpu_metadata[CS_ETM_ETMTRACEIDR] &
230 CORESIGHT_TRACE_ID_VAL_MASK);
231 break;
232 case __perf_cs_etmv4_magic:
233 case __perf_cs_ete_magic:
234 *trace_chan_id = (u8)(cpu_metadata[CS_ETMV4_TRCTRACEIDR] &
235 CORESIGHT_TRACE_ID_VAL_MASK);
236 break;
237 default:
238 return -EINVAL;
239 }
240 return 0;
241}
242
243/*
244 * update metadata trace ID from the value found in the AUX_HW_INFO packet.
245 * This will also clear the CORESIGHT_TRACE_ID_UNUSED_FLAG flag if present.
246 */
247static int cs_etm__metadata_set_trace_id(u8 trace_chan_id, u64 *cpu_metadata)
248{
249 u64 cs_etm_magic = cpu_metadata[CS_ETM_MAGIC];
250
251 switch (cs_etm_magic) {
252 case __perf_cs_etmv3_magic:
253 cpu_metadata[CS_ETM_ETMTRACEIDR] = trace_chan_id;
254 break;
255 case __perf_cs_etmv4_magic:
256 case __perf_cs_ete_magic:
257 cpu_metadata[CS_ETMV4_TRCTRACEIDR] = trace_chan_id;
258 break;
259
260 default:
261 return -EINVAL;
262 }
263 return 0;
264}
265
266/*
267 * FIELD_GET (linux/bitfield.h) not available outside kernel code,
268 * and the header contains too many dependencies to just copy over,
269 * so roll our own based on the original
270 */
271#define __bf_shf(x) (__builtin_ffsll(x) - 1)
272#define FIELD_GET(_mask, _reg) \
273 ({ \
274 (typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
275 })
276
277/*
278 * Handle the PERF_RECORD_AUX_OUTPUT_HW_ID event.
279 *
280 * The payload associates the Trace ID and the CPU.
281 * The routine is tolerant of seeing multiple packets with the same association,
282 * but a CPU / Trace ID association changing during a session is an error.
283 */
284static int cs_etm__process_aux_output_hw_id(struct perf_session *session,
285 union perf_event *event)
286{
287 struct cs_etm_auxtrace *etm;
288 struct perf_sample sample;
289 struct int_node *inode;
290 struct evsel *evsel;
291 u64 *cpu_data;
292 u64 hw_id;
293 int cpu, version, err;
294 u8 trace_chan_id, curr_chan_id;
295
296 /* extract and parse the HW ID */
297 hw_id = event->aux_output_hw_id.hw_id;
298 version = FIELD_GET(CS_AUX_HW_ID_VERSION_MASK, hw_id);
299 trace_chan_id = FIELD_GET(CS_AUX_HW_ID_TRACE_ID_MASK, hw_id);
300
301 /* check that we can handle this version */
302 if (version > CS_AUX_HW_ID_CURR_VERSION)
303 return -EINVAL;
304
305 /* get access to the etm metadata */
306 etm = container_of(session->auxtrace, struct cs_etm_auxtrace, auxtrace);
307 if (!etm || !etm->metadata)
308 return -EINVAL;
309
310 /* parse the sample to get the CPU */
311 evsel = evlist__event2evsel(session->evlist, event);
312 if (!evsel)
313 return -EINVAL;
314 err = evsel__parse_sample(evsel, event, &sample);
315 if (err)
316 return err;
317 cpu = sample.cpu;
318 if (cpu == -1) {
319 /* no CPU in the sample - possibly recorded with an old version of perf */
320 pr_err("CS_ETM: no CPU AUX_OUTPUT_HW_ID sample. Use compatible perf to record.");
321 return -EINVAL;
322 }
323
324 /* See if the ID is mapped to a CPU, and it matches the current CPU */
325 inode = intlist__find(traceid_list, trace_chan_id);
326 if (inode) {
327 cpu_data = inode->priv;
328 if ((int)cpu_data[CS_ETM_CPU] != cpu) {
329 pr_err("CS_ETM: map mismatch between HW_ID packet CPU and Trace ID\n");
330 return -EINVAL;
331 }
332
333 /* check that the mapped ID matches */
334 err = cs_etm__metadata_get_trace_id(&curr_chan_id, cpu_data);
335 if (err)
336 return err;
337 if (curr_chan_id != trace_chan_id) {
338 pr_err("CS_ETM: mismatch between CPU trace ID and HW_ID packet ID\n");
339 return -EINVAL;
340 }
341
342 /* mapped and matched - return OK */
343 return 0;
344 }
345
346 /* not one we've seen before - lets map it */
347 cpu_data = etm->metadata[cpu];
348 err = cs_etm__map_trace_id(trace_chan_id, cpu_data);
349 if (err)
350 return err;
351
352 /*
353 * if we are picking up the association from the packet, need to plug
354 * the correct trace ID into the metadata for setting up decoders later.
355 */
356 err = cs_etm__metadata_set_trace_id(trace_chan_id, cpu_data);
357 return err;
358}
359
675f302f
MP
360void cs_etm__etmq_set_traceid_queue_timestamp(struct cs_etm_queue *etmq,
361 u8 trace_chan_id)
362{
363 /*
4d39c89f 364 * When a timestamp packet is encountered the backend code
675f302f
MP
365 * is stopped so that the front end has time to process packets
366 * that were accumulated in the traceID queue. Since there can
367 * be more than one channel per cs_etm_queue, we need to specify
368 * what traceID queue needs servicing.
369 */
aadd6ba4 370 etmq->pending_timestamp_chan_id = trace_chan_id;
675f302f
MP
371}
372
21fe8dc1
MP
373static u64 cs_etm__etmq_get_timestamp(struct cs_etm_queue *etmq,
374 u8 *trace_chan_id)
375{
376 struct cs_etm_packet_queue *packet_queue;
377
aadd6ba4 378 if (!etmq->pending_timestamp_chan_id)
21fe8dc1
MP
379 return 0;
380
381 if (trace_chan_id)
aadd6ba4 382 *trace_chan_id = etmq->pending_timestamp_chan_id;
21fe8dc1
MP
383
384 packet_queue = cs_etm__etmq_get_packet_queue(etmq,
aadd6ba4 385 etmq->pending_timestamp_chan_id);
21fe8dc1
MP
386 if (!packet_queue)
387 return 0;
388
389 /* Acknowledge pending status */
aadd6ba4 390 etmq->pending_timestamp_chan_id = 0;
21fe8dc1
MP
391
392 /* See function cs_etm_decoder__do_{hard|soft}_timestamp() */
aadd6ba4 393 return packet_queue->cs_timestamp;
21fe8dc1
MP
394}
395
5f7cb035
MP
396static void cs_etm__clear_packet_queue(struct cs_etm_packet_queue *queue)
397{
398 int i;
399
400 queue->head = 0;
401 queue->tail = 0;
402 queue->packet_count = 0;
403 for (i = 0; i < CS_ETM_PACKET_MAX_BUFFER; i++) {
404 queue->packet_buffer[i].isa = CS_ETM_ISA_UNKNOWN;
405 queue->packet_buffer[i].start_addr = CS_ETM_INVAL_ADDR;
406 queue->packet_buffer[i].end_addr = CS_ETM_INVAL_ADDR;
407 queue->packet_buffer[i].instr_count = 0;
408 queue->packet_buffer[i].last_instr_taken_branch = false;
409 queue->packet_buffer[i].last_instr_size = 0;
410 queue->packet_buffer[i].last_instr_type = 0;
411 queue->packet_buffer[i].last_instr_subtype = 0;
412 queue->packet_buffer[i].last_instr_cond = 0;
413 queue->packet_buffer[i].flags = 0;
414 queue->packet_buffer[i].exception_number = UINT32_MAX;
415 queue->packet_buffer[i].trace_chan_id = UINT8_MAX;
416 queue->packet_buffer[i].cpu = INT_MIN;
417 }
418}
419
21fe8dc1
MP
420static void cs_etm__clear_all_packet_queues(struct cs_etm_queue *etmq)
421{
422 int idx;
423 struct int_node *inode;
424 struct cs_etm_traceid_queue *tidq;
425 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
426
427 intlist__for_each_entry(inode, traceid_queues_list) {
428 idx = (int)(intptr_t)inode->priv;
429 tidq = etmq->traceid_queues[idx];
430 cs_etm__clear_packet_queue(&tidq->packet_queue);
431 }
432}
433
c7bfa2fd
MP
434static int cs_etm__init_traceid_queue(struct cs_etm_queue *etmq,
435 struct cs_etm_traceid_queue *tidq,
436 u8 trace_chan_id)
437{
438 int rc = -ENOMEM;
0abb868b 439 struct auxtrace_queue *queue;
c7bfa2fd
MP
440 struct cs_etm_auxtrace *etm = etmq->etm;
441
442 cs_etm__clear_packet_queue(&tidq->packet_queue);
443
0abb868b
MP
444 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
445 tidq->tid = queue->tid;
446 tidq->pid = -1;
c7bfa2fd
MP
447 tidq->trace_chan_id = trace_chan_id;
448
449 tidq->packet = zalloc(sizeof(struct cs_etm_packet));
450 if (!tidq->packet)
451 goto out;
452
453 tidq->prev_packet = zalloc(sizeof(struct cs_etm_packet));
454 if (!tidq->prev_packet)
455 goto out_free;
456
457 if (etm->synth_opts.last_branch) {
458 size_t sz = sizeof(struct branch_stack);
459
460 sz += etm->synth_opts.last_branch_sz *
461 sizeof(struct branch_entry);
462 tidq->last_branch = zalloc(sz);
463 if (!tidq->last_branch)
464 goto out_free;
465 tidq->last_branch_rb = zalloc(sz);
466 if (!tidq->last_branch_rb)
467 goto out_free;
468 }
469
470 tidq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
471 if (!tidq->event_buf)
472 goto out_free;
473
474 return 0;
475
476out_free:
477 zfree(&tidq->last_branch_rb);
478 zfree(&tidq->last_branch);
479 zfree(&tidq->prev_packet);
480 zfree(&tidq->packet);
481out:
482 return rc;
483}
484
485static struct cs_etm_traceid_queue
486*cs_etm__etmq_get_traceid_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
487{
c152d4d4
MP
488 int idx;
489 struct int_node *inode;
490 struct intlist *traceid_queues_list;
491 struct cs_etm_traceid_queue *tidq, **traceid_queues;
c7bfa2fd
MP
492 struct cs_etm_auxtrace *etm = etmq->etm;
493
c152d4d4
MP
494 if (etm->timeless_decoding)
495 trace_chan_id = CS_ETM_PER_THREAD_TRACEID;
c7bfa2fd 496
c152d4d4 497 traceid_queues_list = etmq->traceid_queues_list;
c7bfa2fd 498
c152d4d4
MP
499 /*
500 * Check if the traceid_queue exist for this traceID by looking
501 * in the queue list.
502 */
503 inode = intlist__find(traceid_queues_list, trace_chan_id);
504 if (inode) {
505 idx = (int)(intptr_t)inode->priv;
506 return etmq->traceid_queues[idx];
507 }
c7bfa2fd 508
c152d4d4 509 /* We couldn't find a traceid_queue for this traceID, allocate one */
c7bfa2fd
MP
510 tidq = malloc(sizeof(*tidq));
511 if (!tidq)
512 return NULL;
513
514 memset(tidq, 0, sizeof(*tidq));
515
c152d4d4
MP
516 /* Get a valid index for the new traceid_queue */
517 idx = intlist__nr_entries(traceid_queues_list);
518 /* Memory for the inode is free'ed in cs_etm_free_traceid_queues () */
519 inode = intlist__findnew(traceid_queues_list, trace_chan_id);
520 if (!inode)
521 goto out_free;
522
523 /* Associate this traceID with this index */
524 inode->priv = (void *)(intptr_t)idx;
525
c7bfa2fd
MP
526 if (cs_etm__init_traceid_queue(etmq, tidq, trace_chan_id))
527 goto out_free;
528
c152d4d4
MP
529 /* Grow the traceid_queues array by one unit */
530 traceid_queues = etmq->traceid_queues;
531 traceid_queues = reallocarray(traceid_queues,
532 idx + 1,
533 sizeof(*traceid_queues));
534
535 /*
536 * On failure reallocarray() returns NULL and the original block of
537 * memory is left untouched.
538 */
539 if (!traceid_queues)
540 goto out_free;
541
542 traceid_queues[idx] = tidq;
543 etmq->traceid_queues = traceid_queues;
c7bfa2fd 544
c152d4d4 545 return etmq->traceid_queues[idx];
c7bfa2fd
MP
546
547out_free:
c152d4d4
MP
548 /*
549 * Function intlist__remove() removes the inode from the list
550 * and delete the memory associated to it.
551 */
552 intlist__remove(traceid_queues_list, inode);
c7bfa2fd
MP
553 free(tidq);
554
555 return NULL;
556}
557
5f7cb035 558struct cs_etm_packet_queue
c7bfa2fd 559*cs_etm__etmq_get_packet_queue(struct cs_etm_queue *etmq, u8 trace_chan_id)
5f7cb035 560{
c7bfa2fd
MP
561 struct cs_etm_traceid_queue *tidq;
562
563 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
564 if (tidq)
565 return &tidq->packet_queue;
566
567 return NULL;
5f7cb035
MP
568}
569
d0175156
LY
570static void cs_etm__packet_swap(struct cs_etm_auxtrace *etm,
571 struct cs_etm_traceid_queue *tidq)
572{
573 struct cs_etm_packet *tmp;
574
0b31ea66
JC
575 if (etm->synth_opts.branches || etm->synth_opts.last_branch ||
576 etm->synth_opts.instructions) {
d0175156
LY
577 /*
578 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
579 * the next incoming packet.
580 */
581 tmp = tidq->packet;
582 tidq->packet = tidq->prev_packet;
583 tidq->prev_packet = tmp;
584 }
585}
586
68ffe390
MP
587static void cs_etm__packet_dump(const char *pkt_string)
588{
589 const char *color = PERF_COLOR_BLUE;
590 int len = strlen(pkt_string);
591
592 if (len && (pkt_string[len-1] == '\n'))
593 color_fprintf(stdout, color, " %s", pkt_string);
594 else
595 color_fprintf(stdout, color, " %s\n", pkt_string);
596
597 fflush(stdout);
598}
599
2507a3d9
MP
600static void cs_etm__set_trace_param_etmv3(struct cs_etm_trace_params *t_params,
601 struct cs_etm_auxtrace *etm, int idx,
602 u32 etmidr)
603{
604 u64 **metadata = etm->metadata;
605
606 t_params[idx].protocol = cs_etm__get_v7_protocol_version(etmidr);
607 t_params[idx].etmv3.reg_ctrl = metadata[idx][CS_ETM_ETMCR];
608 t_params[idx].etmv3.reg_trc_id = metadata[idx][CS_ETM_ETMTRACEIDR];
609}
610
611static void cs_etm__set_trace_param_etmv4(struct cs_etm_trace_params *t_params,
612 struct cs_etm_auxtrace *etm, int idx)
613{
614 u64 **metadata = etm->metadata;
615
616 t_params[idx].protocol = CS_ETM_PROTO_ETMV4i;
617 t_params[idx].etmv4.reg_idr0 = metadata[idx][CS_ETMV4_TRCIDR0];
618 t_params[idx].etmv4.reg_idr1 = metadata[idx][CS_ETMV4_TRCIDR1];
619 t_params[idx].etmv4.reg_idr2 = metadata[idx][CS_ETMV4_TRCIDR2];
620 t_params[idx].etmv4.reg_idr8 = metadata[idx][CS_ETMV4_TRCIDR8];
621 t_params[idx].etmv4.reg_configr = metadata[idx][CS_ETMV4_TRCCONFIGR];
622 t_params[idx].etmv4.reg_traceidr = metadata[idx][CS_ETMV4_TRCTRACEIDR];
623}
624
779f414a
JC
625static void cs_etm__set_trace_param_ete(struct cs_etm_trace_params *t_params,
626 struct cs_etm_auxtrace *etm, int idx)
627{
628 u64 **metadata = etm->metadata;
629
630 t_params[idx].protocol = CS_ETM_PROTO_ETE;
326163c5
GG
631 t_params[idx].ete.reg_idr0 = metadata[idx][CS_ETE_TRCIDR0];
632 t_params[idx].ete.reg_idr1 = metadata[idx][CS_ETE_TRCIDR1];
633 t_params[idx].ete.reg_idr2 = metadata[idx][CS_ETE_TRCIDR2];
634 t_params[idx].ete.reg_idr8 = metadata[idx][CS_ETE_TRCIDR8];
635 t_params[idx].ete.reg_configr = metadata[idx][CS_ETE_TRCCONFIGR];
636 t_params[idx].ete.reg_traceidr = metadata[idx][CS_ETE_TRCTRACEIDR];
779f414a
JC
637 t_params[idx].ete.reg_devarch = metadata[idx][CS_ETE_TRCDEVARCH];
638}
639
2507a3d9 640static int cs_etm__init_trace_params(struct cs_etm_trace_params *t_params,
9182f04a
JC
641 struct cs_etm_auxtrace *etm,
642 int decoders)
2507a3d9
MP
643{
644 int i;
645 u32 etmidr;
646 u64 architecture;
647
9182f04a 648 for (i = 0; i < decoders; i++) {
2507a3d9
MP
649 architecture = etm->metadata[i][CS_ETM_MAGIC];
650
651 switch (architecture) {
652 case __perf_cs_etmv3_magic:
653 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
654 cs_etm__set_trace_param_etmv3(t_params, etm, i, etmidr);
655 break;
656 case __perf_cs_etmv4_magic:
657 cs_etm__set_trace_param_etmv4(t_params, etm, i);
658 break;
779f414a
JC
659 case __perf_cs_ete_magic:
660 cs_etm__set_trace_param_ete(t_params, etm, i);
661 break;
2507a3d9
MP
662 default:
663 return -EINVAL;
664 }
665 }
666
667 return 0;
668}
669
65963e5b
MP
670static int cs_etm__init_decoder_params(struct cs_etm_decoder_params *d_params,
671 struct cs_etm_queue *etmq,
9182f04a
JC
672 enum cs_etm_decoder_operation mode,
673 bool formatted)
65963e5b
MP
674{
675 int ret = -EINVAL;
676
677 if (!(mode < CS_ETM_OPERATION_MAX))
678 goto out;
679
680 d_params->packet_printer = cs_etm__packet_dump;
681 d_params->operation = mode;
682 d_params->data = etmq;
9182f04a 683 d_params->formatted = formatted;
65963e5b
MP
684 d_params->fsyncs = false;
685 d_params->hsyncs = false;
686 d_params->frame_aligned = true;
687
688 ret = 0;
689out:
690 return ret;
691}
692
04aaad26 693static void cs_etm__dump_event(struct cs_etm_queue *etmq,
68ffe390
MP
694 struct auxtrace_buffer *buffer)
695{
2507a3d9 696 int ret;
68ffe390 697 const char *color = PERF_COLOR_BLUE;
68ffe390
MP
698 size_t buffer_used = 0;
699
700 fprintf(stdout, "\n");
701 color_fprintf(stdout, color,
d54e50b7 702 ". ... CoreSight %s Trace data: size %#zx bytes\n",
56c62f52 703 cs_etm_decoder__get_name(etmq->decoder), buffer->size);
68ffe390 704
68ffe390
MP
705 do {
706 size_t consumed;
707
708 ret = cs_etm_decoder__process_data_block(
04aaad26 709 etmq->decoder, buffer->offset,
68ffe390
MP
710 &((u8 *)buffer->data)[buffer_used],
711 buffer->size - buffer_used, &consumed);
712 if (ret)
713 break;
714
715 buffer_used += consumed;
716 } while (buffer_used < buffer->size);
717
04aaad26 718 cs_etm_decoder__reset(etmq->decoder);
68ffe390
MP
719}
720
440a23b3
MP
721static int cs_etm__flush_events(struct perf_session *session,
722 struct perf_tool *tool)
723{
9f878b29
MP
724 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
725 struct cs_etm_auxtrace,
726 auxtrace);
727 if (dump_trace)
728 return 0;
729
730 if (!tool->ordered_events)
731 return -EINVAL;
732
21fe8dc1
MP
733 if (etm->timeless_decoding)
734 return cs_etm__process_timeless_queues(etm, -1);
735
736 return cs_etm__process_queues(etm);
440a23b3
MP
737}
738
c152d4d4
MP
739static void cs_etm__free_traceid_queues(struct cs_etm_queue *etmq)
740{
741 int idx;
742 uintptr_t priv;
743 struct int_node *inode, *tmp;
744 struct cs_etm_traceid_queue *tidq;
745 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
746
747 intlist__for_each_entry_safe(inode, tmp, traceid_queues_list) {
748 priv = (uintptr_t)inode->priv;
749 idx = priv;
750
751 /* Free this traceid_queue from the array */
752 tidq = etmq->traceid_queues[idx];
753 thread__zput(tidq->thread);
754 zfree(&tidq->event_buf);
755 zfree(&tidq->last_branch);
756 zfree(&tidq->last_branch_rb);
757 zfree(&tidq->prev_packet);
758 zfree(&tidq->packet);
759 zfree(&tidq);
760
761 /*
762 * Function intlist__remove() removes the inode from the list
763 * and delete the memory associated to it.
764 */
765 intlist__remove(traceid_queues_list, inode);
766 }
767
768 /* Then the RB tree itself */
769 intlist__delete(traceid_queues_list);
770 etmq->traceid_queues_list = NULL;
771
772 /* finally free the traceid_queues array */
d8f9da24 773 zfree(&etmq->traceid_queues);
c152d4d4
MP
774}
775
440a23b3
MP
776static void cs_etm__free_queue(void *priv)
777{
778 struct cs_etm_queue *etmq = priv;
779
099c1130
MP
780 if (!etmq)
781 return;
782
099c1130 783 cs_etm_decoder__free(etmq->decoder);
c152d4d4 784 cs_etm__free_traceid_queues(etmq);
440a23b3
MP
785 free(etmq);
786}
787
788static void cs_etm__free_events(struct perf_session *session)
789{
790 unsigned int i;
791 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
792 struct cs_etm_auxtrace,
793 auxtrace);
794 struct auxtrace_queues *queues = &aux->queues;
795
796 for (i = 0; i < queues->nr_queues; i++) {
797 cs_etm__free_queue(queues->queue_array[i].priv);
798 queues->queue_array[i].priv = NULL;
799 }
800
801 auxtrace_queues__free(queues);
802}
803
804static void cs_etm__free(struct perf_session *session)
805{
cd8bfd8c
TJ
806 int i;
807 struct int_node *inode, *tmp;
440a23b3
MP
808 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
809 struct cs_etm_auxtrace,
810 auxtrace);
811 cs_etm__free_events(session);
812 session->auxtrace = NULL;
813
95c6fe97 814 /* First remove all traceID/metadata nodes for the RB tree */
cd8bfd8c
TJ
815 intlist__for_each_entry_safe(inode, tmp, traceid_list)
816 intlist__remove(traceid_list, inode);
817 /* Then the RB tree itself */
818 intlist__delete(traceid_list);
819
820 for (i = 0; i < aux->num_cpu; i++)
821 zfree(&aux->metadata[i]);
822
46d53620 823 thread__zput(aux->unknown_thread);
cd8bfd8c 824 zfree(&aux->metadata);
440a23b3
MP
825 zfree(&aux);
826}
827
a58ab57c
AH
828static bool cs_etm__evsel_is_auxtrace(struct perf_session *session,
829 struct evsel *evsel)
830{
831 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
832 struct cs_etm_auxtrace,
833 auxtrace);
834
835 return evsel->core.attr.type == aux->pmu_type;
836}
837
d6c9c05f
LY
838static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
839{
840 struct machine *machine;
841
842 machine = etmq->etm->machine;
843
6f38e115 844 if (address >= machine__kernel_start(machine)) {
d6c9c05f
LY
845 if (machine__is_host(machine))
846 return PERF_RECORD_MISC_KERNEL;
847 else
848 return PERF_RECORD_MISC_GUEST_KERNEL;
849 } else {
850 if (machine__is_host(machine))
851 return PERF_RECORD_MISC_USER;
852 else if (perf_guest)
853 return PERF_RECORD_MISC_GUEST_USER;
854 else
855 return PERF_RECORD_MISC_HYPERVISOR;
856 }
857}
858
af21577c
MP
859static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u8 trace_chan_id,
860 u64 address, size_t size, u8 *buffer)
20d9c478
MP
861{
862 u8 cpumode;
863 u64 offset;
864 int len;
c152d4d4
MP
865 struct thread *thread;
866 struct machine *machine;
867 struct addr_location al;
868 struct cs_etm_traceid_queue *tidq;
af21577c 869
20d9c478 870 if (!etmq)
d3267ad4 871 return 0;
20d9c478
MP
872
873 machine = etmq->etm->machine;
d6c9c05f 874 cpumode = cs_etm__cpu_mode(etmq, address);
c152d4d4
MP
875 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
876 if (!tidq)
877 return 0;
20d9c478 878
c152d4d4 879 thread = tidq->thread;
20d9c478
MP
880 if (!thread) {
881 if (cpumode != PERF_RECORD_MISC_KERNEL)
d3267ad4 882 return 0;
20d9c478
MP
883 thread = etmq->etm->unknown_thread;
884 }
885
71a84b5a 886 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
20d9c478
MP
887 return 0;
888
889 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
890 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
891 return 0;
892
893 offset = al.map->map_ip(al.map, address);
894
895 map__load(al.map);
896
897 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
898
9c38b671
JC
899 if (len <= 0) {
900 ui__warning_once("CS ETM Trace: Missing DSO. Use 'perf archive' or debuginfod to export data from the traced system.\n"
901 " Enable CONFIG_PROC_KCORE or use option '-k /path/to/vmlinux' for kernel symbols.\n");
902 if (!al.map->dso->auxtrace_warned) {
903 pr_err("CS ETM Trace: Debug data not found for address %#"PRIx64" in %s\n",
904 address,
905 al.map->dso->long_name ? al.map->dso->long_name : "Unknown");
906 al.map->dso->auxtrace_warned = true;
907 }
20d9c478 908 return 0;
9c38b671 909 }
20d9c478
MP
910
911 return len;
912}
913
9182f04a
JC
914static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
915 bool formatted)
20d9c478 916{
20d9c478 917 struct cs_etm_decoder_params d_params;
ae4d9f52 918 struct cs_etm_trace_params *t_params = NULL;
20d9c478 919 struct cs_etm_queue *etmq;
9182f04a
JC
920 /*
921 * Each queue can only contain data from one CPU when unformatted, so only one decoder is
922 * needed.
923 */
924 int decoders = formatted ? etm->num_cpu : 1;
20d9c478
MP
925
926 etmq = zalloc(sizeof(*etmq));
927 if (!etmq)
928 return NULL;
929
c152d4d4
MP
930 etmq->traceid_queues_list = intlist__new(NULL);
931 if (!etmq->traceid_queues_list)
932 goto out_free;
933
20d9c478 934 /* Use metadata to fill in trace parameters for trace decoder */
9182f04a 935 t_params = zalloc(sizeof(*t_params) * decoders);
20d9c478
MP
936
937 if (!t_params)
938 goto out_free;
939
9182f04a 940 if (cs_etm__init_trace_params(t_params, etm, decoders))
2507a3d9 941 goto out_free;
20d9c478 942
e4aa592d 943 /* Set decoder parameters to decode trace packets */
65963e5b 944 if (cs_etm__init_decoder_params(&d_params, etmq,
04aaad26 945 dump_trace ? CS_ETM_OPERATION_PRINT :
9182f04a
JC
946 CS_ETM_OPERATION_DECODE,
947 formatted))
65963e5b 948 goto out_free;
20d9c478 949
9182f04a
JC
950 etmq->decoder = cs_etm_decoder__new(decoders, &d_params,
951 t_params);
20d9c478 952
20d9c478
MP
953 if (!etmq->decoder)
954 goto out_free;
955
956 /*
957 * Register a function to handle all memory accesses required by
958 * the trace decoder library.
959 */
960 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
961 0x0L, ((u64) -1L),
962 cs_etm__mem_access))
963 goto out_free_decoder;
964
ae4d9f52 965 zfree(&t_params);
20d9c478
MP
966 return etmq;
967
968out_free_decoder:
969 cs_etm_decoder__free(etmq->decoder);
970out_free:
c152d4d4 971 intlist__delete(etmq->traceid_queues_list);
20d9c478
MP
972 free(etmq);
973
974 return NULL;
975}
976
977static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
978 struct auxtrace_queue *queue,
9182f04a
JC
979 unsigned int queue_nr,
980 bool formatted)
20d9c478
MP
981{
982 struct cs_etm_queue *etmq = queue->priv;
983
984 if (list_empty(&queue->head) || etmq)
9ac8afd5 985 return 0;
20d9c478 986
9182f04a 987 etmq = cs_etm__alloc_queue(etm, formatted);
20d9c478 988
9ac8afd5
JC
989 if (!etmq)
990 return -ENOMEM;
20d9c478
MP
991
992 queue->priv = etmq;
4f5b3713
MP
993 etmq->etm = etm;
994 etmq->queue_nr = queue_nr;
4f5b3713 995 etmq->offset = 0;
20d9c478 996
9ac8afd5
JC
997 return 0;
998}
999
1000static int cs_etm__queue_first_cs_timestamp(struct cs_etm_auxtrace *etm,
1001 struct cs_etm_queue *etmq,
1002 unsigned int queue_nr)
1003{
1004 int ret = 0;
1005 unsigned int cs_queue_nr;
1006 u8 trace_chan_id;
1007 u64 cs_timestamp;
21fe8dc1
MP
1008
1009 /*
1010 * We are under a CPU-wide trace scenario. As such we need to know
1011 * when the code that generated the traces started to execute so that
1012 * it can be correlated with execution on other CPUs. So we get a
1013 * handle on the beginning of traces and decode until we find a
1014 * timestamp. The timestamp is then added to the auxtrace min heap
1015 * in order to know what nibble (of all the etmqs) to decode first.
1016 */
1017 while (1) {
1018 /*
1019 * Fetch an aux_buffer from this etmq. Bail if no more
1020 * blocks or an error has been encountered.
1021 */
1022 ret = cs_etm__get_data_block(etmq);
1023 if (ret <= 0)
1024 goto out;
1025
1026 /*
1027 * Run decoder on the trace block. The decoder will stop when
aadd6ba4 1028 * encountering a CS timestamp, a full packet queue or the end of
21fe8dc1
MP
1029 * trace for that block.
1030 */
1031 ret = cs_etm__decode_data_block(etmq);
1032 if (ret)
1033 goto out;
1034
1035 /*
1036 * Function cs_etm_decoder__do_{hard|soft}_timestamp() does all
1037 * the timestamp calculation for us.
1038 */
aadd6ba4 1039 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
21fe8dc1
MP
1040
1041 /* We found a timestamp, no need to continue. */
aadd6ba4 1042 if (cs_timestamp)
21fe8dc1
MP
1043 break;
1044
1045 /*
1046 * We didn't find a timestamp so empty all the traceid packet
1047 * queues before looking for another timestamp packet, either
1048 * in the current data block or a new one. Packets that were
1049 * just decoded are useless since no timestamp has been
1050 * associated with them. As such simply discard them.
1051 */
1052 cs_etm__clear_all_packet_queues(etmq);
1053 }
1054
1055 /*
1056 * We have a timestamp. Add it to the min heap to reflect when
1057 * instructions conveyed by the range packets of this traceID queue
1058 * started to execute. Once the same has been done for all the traceID
1059 * queues of each etmq, redenring and decoding can start in
1060 * chronological order.
1061 *
1062 * Note that packets decoded above are still in the traceID's packet
1063 * queue and will be processed in cs_etm__process_queues().
1064 */
9d604aad 1065 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
aadd6ba4 1066 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
4f5b3713
MP
1067out:
1068 return ret;
20d9c478
MP
1069}
1070
c7bfa2fd
MP
1071static inline
1072void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq,
1073 struct cs_etm_traceid_queue *tidq)
e573e978 1074{
c7bfa2fd
MP
1075 struct branch_stack *bs_src = tidq->last_branch_rb;
1076 struct branch_stack *bs_dst = tidq->last_branch;
e573e978
RW
1077 size_t nr = 0;
1078
1079 /*
1080 * Set the number of records before early exit: ->nr is used to
1081 * determine how many branches to copy from ->entries.
1082 */
1083 bs_dst->nr = bs_src->nr;
1084
1085 /*
1086 * Early exit when there is nothing to copy.
1087 */
1088 if (!bs_src->nr)
1089 return;
1090
1091 /*
1092 * As bs_src->entries is a circular buffer, we need to copy from it in
1093 * two steps. First, copy the branches from the most recently inserted
1094 * branch ->last_branch_pos until the end of bs_src->entries buffer.
1095 */
c7bfa2fd 1096 nr = etmq->etm->synth_opts.last_branch_sz - tidq->last_branch_pos;
e573e978 1097 memcpy(&bs_dst->entries[0],
c7bfa2fd 1098 &bs_src->entries[tidq->last_branch_pos],
e573e978
RW
1099 sizeof(struct branch_entry) * nr);
1100
1101 /*
1102 * If we wrapped around at least once, the branches from the beginning
1103 * of the bs_src->entries buffer and until the ->last_branch_pos element
1104 * are older valid branches: copy them over. The total number of
1105 * branches copied over will be equal to the number of branches asked by
1106 * the user in last_branch_sz.
1107 */
1108 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
1109 memcpy(&bs_dst->entries[nr],
1110 &bs_src->entries[0],
c7bfa2fd 1111 sizeof(struct branch_entry) * tidq->last_branch_pos);
e573e978
RW
1112 }
1113}
1114
c7bfa2fd
MP
1115static inline
1116void cs_etm__reset_last_branch_rb(struct cs_etm_traceid_queue *tidq)
e573e978 1117{
c7bfa2fd
MP
1118 tidq->last_branch_pos = 0;
1119 tidq->last_branch_rb->nr = 0;
e573e978
RW
1120}
1121
a7ee4d62 1122static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
af21577c
MP
1123 u8 trace_chan_id, u64 addr)
1124{
a7ee4d62 1125 u8 instrBytes[2];
6035b680 1126
af21577c
MP
1127 cs_etm__mem_access(etmq, trace_chan_id, addr,
1128 ARRAY_SIZE(instrBytes), instrBytes);
e573e978 1129 /*
a7ee4d62
RW
1130 * T32 instruction size is indicated by bits[15:11] of the first
1131 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
1132 * denote a 32-bit instruction.
e573e978 1133 */
a7ee4d62 1134 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
e573e978
RW
1135}
1136
6035b680
LY
1137static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
1138{
49ccf87b
LY
1139 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
1140 if (packet->sample_type == CS_ETM_DISCONTINUITY)
6035b680
LY
1141 return 0;
1142
1143 return packet->start_addr;
1144}
1145
a7ee4d62
RW
1146static inline
1147u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
e573e978 1148{
49ccf87b
LY
1149 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
1150 if (packet->sample_type == CS_ETM_DISCONTINUITY)
a7ee4d62
RW
1151 return 0;
1152
1153 return packet->end_addr - packet->last_instr_size;
e573e978
RW
1154}
1155
a7ee4d62 1156static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
af21577c 1157 u64 trace_chan_id,
a7ee4d62 1158 const struct cs_etm_packet *packet,
e573e978
RW
1159 u64 offset)
1160{
a7ee4d62
RW
1161 if (packet->isa == CS_ETM_ISA_T32) {
1162 u64 addr = packet->start_addr;
1163
bc010dd6 1164 while (offset) {
af21577c
MP
1165 addr += cs_etm__t32_instr_size(etmq,
1166 trace_chan_id, addr);
a7ee4d62
RW
1167 offset--;
1168 }
1169 return addr;
1170 }
1171
1172 /* Assume a 4 byte instruction size (A32/A64) */
1173 return packet->start_addr + offset * 4;
e573e978
RW
1174}
1175
c7bfa2fd
MP
1176static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq,
1177 struct cs_etm_traceid_queue *tidq)
e573e978 1178{
c7bfa2fd 1179 struct branch_stack *bs = tidq->last_branch_rb;
e573e978
RW
1180 struct branch_entry *be;
1181
1182 /*
1183 * The branches are recorded in a circular buffer in reverse
1184 * chronological order: we start recording from the last element of the
1185 * buffer down. After writing the first element of the stack, move the
1186 * insert position back to the end of the buffer.
1187 */
c7bfa2fd
MP
1188 if (!tidq->last_branch_pos)
1189 tidq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
e573e978 1190
c7bfa2fd 1191 tidq->last_branch_pos -= 1;
e573e978 1192
c7bfa2fd
MP
1193 be = &bs->entries[tidq->last_branch_pos];
1194 be->from = cs_etm__last_executed_instr(tidq->prev_packet);
1195 be->to = cs_etm__first_executed_instr(tidq->packet);
e573e978
RW
1196 /* No support for mispredict */
1197 be->flags.mispred = 0;
1198 be->flags.predicted = 1;
1199
1200 /*
1201 * Increment bs->nr until reaching the number of last branches asked by
1202 * the user on the command line.
1203 */
1204 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
1205 bs->nr += 1;
1206}
1207
1208static int cs_etm__inject_event(union perf_event *event,
1209 struct perf_sample *sample, u64 type)
1210{
1211 event->header.size = perf_event__sample_event_size(sample, type, 0);
1212 return perf_event__synthesize_sample(event, type, 0, sample);
1213}
1214
1215
9f878b29 1216static int
23cfcd6d 1217cs_etm__get_trace(struct cs_etm_queue *etmq)
9f878b29
MP
1218{
1219 struct auxtrace_buffer *aux_buffer = etmq->buffer;
1220 struct auxtrace_buffer *old_buffer = aux_buffer;
1221 struct auxtrace_queue *queue;
1222
1223 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
1224
1225 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
1226
1227 /* If no more data, drop the previous auxtrace_buffer and return */
1228 if (!aux_buffer) {
1229 if (old_buffer)
1230 auxtrace_buffer__drop_data(old_buffer);
23cfcd6d 1231 etmq->buf_len = 0;
9f878b29
MP
1232 return 0;
1233 }
1234
1235 etmq->buffer = aux_buffer;
1236
1237 /* If the aux_buffer doesn't have data associated, try to load it */
1238 if (!aux_buffer->data) {
1239 /* get the file desc associated with the perf data file */
1240 int fd = perf_data__fd(etmq->etm->session->data);
1241
1242 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
1243 if (!aux_buffer->data)
1244 return -ENOMEM;
1245 }
1246
1247 /* If valid, drop the previous buffer */
1248 if (old_buffer)
1249 auxtrace_buffer__drop_data(old_buffer);
1250
23cfcd6d
MP
1251 etmq->buf_used = 0;
1252 etmq->buf_len = aux_buffer->size;
1253 etmq->buf = aux_buffer->data;
9f878b29 1254
23cfcd6d 1255 return etmq->buf_len;
9f878b29
MP
1256}
1257
3a088799 1258static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
0abb868b 1259 struct cs_etm_traceid_queue *tidq)
9f878b29 1260{
0abb868b 1261 if ((!tidq->thread) && (tidq->tid != -1))
3c21d7d8 1262 tidq->thread = machine__find_thread(etm->machine, -1,
0abb868b 1263 tidq->tid);
9f878b29 1264
3c21d7d8 1265 if (tidq->thread)
0abb868b 1266 tidq->pid = tidq->thread->pid_;
9f878b29
MP
1267}
1268
0a6be300
MP
1269int cs_etm__etmq_set_tid(struct cs_etm_queue *etmq,
1270 pid_t tid, u8 trace_chan_id)
1271{
1272 int cpu, err = -EINVAL;
1273 struct cs_etm_auxtrace *etm = etmq->etm;
1274 struct cs_etm_traceid_queue *tidq;
1275
1276 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
1277 if (!tidq)
1278 return err;
1279
1280 if (cs_etm__get_cpu(trace_chan_id, &cpu) < 0)
1281 return err;
1282
1283 err = machine__set_current_tid(etm->machine, cpu, tid, tid);
1284 if (err)
1285 return err;
1286
1287 tidq->tid = tid;
1288 thread__zput(tidq->thread);
1289
1290 cs_etm__set_pid_tid_cpu(etm, tidq);
1291 return 0;
1292}
1293
675f302f
MP
1294bool cs_etm__etmq_is_timeless(struct cs_etm_queue *etmq)
1295{
1296 return !!etmq->etm->timeless_decoding;
1297}
1298
a4973d8f
LY
1299static void cs_etm__copy_insn(struct cs_etm_queue *etmq,
1300 u64 trace_chan_id,
1301 const struct cs_etm_packet *packet,
1302 struct perf_sample *sample)
1303{
1304 /*
1305 * It's pointless to read instructions for the CS_ETM_DISCONTINUITY
1306 * packet, so directly bail out with 'insn_len' = 0.
1307 */
1308 if (packet->sample_type == CS_ETM_DISCONTINUITY) {
1309 sample->insn_len = 0;
1310 return;
1311 }
1312
1313 /*
1314 * T32 instruction size might be 32-bit or 16-bit, decide by calling
1315 * cs_etm__t32_instr_size().
1316 */
1317 if (packet->isa == CS_ETM_ISA_T32)
1318 sample->insn_len = cs_etm__t32_instr_size(etmq, trace_chan_id,
1319 sample->ip);
1320 /* Otherwise, A64 and A32 instruction size are always 32-bit. */
1321 else
1322 sample->insn_len = 4;
1323
1324 cs_etm__mem_access(etmq, trace_chan_id, sample->ip,
1325 sample->insn_len, (void *)sample->insn);
1326}
1327
a7fe9a44
GG
1328u64 cs_etm__convert_sample_time(struct cs_etm_queue *etmq, u64 cs_timestamp)
1329{
1330 struct cs_etm_auxtrace *etm = etmq->etm;
1331
1332 if (etm->has_virtual_ts)
1333 return tsc_to_perf_time(cs_timestamp, &etm->tc);
1334 else
1335 return cs_timestamp;
1336}
1337
1338static inline u64 cs_etm__resolve_sample_time(struct cs_etm_queue *etmq,
1339 struct cs_etm_traceid_queue *tidq)
1340{
1341 struct cs_etm_auxtrace *etm = etmq->etm;
1342 struct cs_etm_packet_queue *packet_queue = &tidq->packet_queue;
1343
1344 if (etm->timeless_decoding)
1345 return 0;
1346 else if (etm->has_virtual_ts)
1347 return packet_queue->cs_timestamp;
1348 else
1349 return etm->latest_kernel_timestamp;
1350}
1351
e573e978 1352static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
c7bfa2fd 1353 struct cs_etm_traceid_queue *tidq,
e573e978
RW
1354 u64 addr, u64 period)
1355{
1356 int ret = 0;
1357 struct cs_etm_auxtrace *etm = etmq->etm;
c7bfa2fd 1358 union perf_event *event = tidq->event_buf;
e573e978
RW
1359 struct perf_sample sample = {.ip = 0,};
1360
1361 event->sample.header.type = PERF_RECORD_SAMPLE;
d6c9c05f 1362 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
e573e978
RW
1363 event->sample.header.size = sizeof(struct perf_event_header);
1364
a7fe9a44
GG
1365 /* Set time field based on etm auxtrace config. */
1366 sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1367
e573e978 1368 sample.ip = addr;
0abb868b
MP
1369 sample.pid = tidq->pid;
1370 sample.tid = tidq->tid;
e573e978
RW
1371 sample.id = etmq->etm->instructions_id;
1372 sample.stream_id = etmq->etm->instructions_id;
1373 sample.period = period;
c7bfa2fd
MP
1374 sample.cpu = tidq->packet->cpu;
1375 sample.flags = tidq->prev_packet->flags;
d6c9c05f 1376 sample.cpumode = event->sample.header.misc;
e573e978 1377
a4973d8f
LY
1378 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->packet, &sample);
1379
695378b5 1380 if (etm->synth_opts.last_branch)
c7bfa2fd 1381 sample.branch_stack = tidq->last_branch;
e573e978
RW
1382
1383 if (etm->synth_opts.inject) {
1384 ret = cs_etm__inject_event(event, &sample,
1385 etm->instructions_sample_type);
1386 if (ret)
1387 return ret;
1388 }
1389
1390 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1391
1392 if (ret)
1393 pr_err(
1394 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1395 ret);
1396
e573e978
RW
1397 return ret;
1398}
1399
b12235b1
MP
1400/*
1401 * The cs etm packet encodes an instruction range between a branch target
1402 * and the next taken branch. Generate sample accordingly.
1403 */
c7bfa2fd
MP
1404static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq,
1405 struct cs_etm_traceid_queue *tidq)
b12235b1
MP
1406{
1407 int ret = 0;
1408 struct cs_etm_auxtrace *etm = etmq->etm;
1409 struct perf_sample sample = {.ip = 0,};
c7bfa2fd 1410 union perf_event *event = tidq->event_buf;
e573e978
RW
1411 struct dummy_branch_stack {
1412 u64 nr;
42bbabed 1413 u64 hw_idx;
e573e978
RW
1414 struct branch_entry entries;
1415 } dummy_bs;
d6c9c05f
LY
1416 u64 ip;
1417
c7bfa2fd 1418 ip = cs_etm__last_executed_instr(tidq->prev_packet);
b12235b1
MP
1419
1420 event->sample.header.type = PERF_RECORD_SAMPLE;
d6c9c05f 1421 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
b12235b1
MP
1422 event->sample.header.size = sizeof(struct perf_event_header);
1423
a7fe9a44
GG
1424 /* Set time field based on etm auxtrace config. */
1425 sample.time = cs_etm__resolve_sample_time(etmq, tidq);
1426
d6c9c05f 1427 sample.ip = ip;
0abb868b
MP
1428 sample.pid = tidq->pid;
1429 sample.tid = tidq->tid;
c7bfa2fd 1430 sample.addr = cs_etm__first_executed_instr(tidq->packet);
b12235b1
MP
1431 sample.id = etmq->etm->branches_id;
1432 sample.stream_id = etmq->etm->branches_id;
1433 sample.period = 1;
c7bfa2fd
MP
1434 sample.cpu = tidq->packet->cpu;
1435 sample.flags = tidq->prev_packet->flags;
d6c9c05f 1436 sample.cpumode = event->sample.header.misc;
b12235b1 1437
a4973d8f
LY
1438 cs_etm__copy_insn(etmq, tidq->trace_chan_id, tidq->prev_packet,
1439 &sample);
1440
e573e978
RW
1441 /*
1442 * perf report cannot handle events without a branch stack
1443 */
1444 if (etm->synth_opts.last_branch) {
1445 dummy_bs = (struct dummy_branch_stack){
1446 .nr = 1,
42bbabed 1447 .hw_idx = -1ULL,
e573e978
RW
1448 .entries = {
1449 .from = sample.ip,
1450 .to = sample.addr,
1451 },
1452 };
1453 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1454 }
1455
1456 if (etm->synth_opts.inject) {
1457 ret = cs_etm__inject_event(event, &sample,
1458 etm->branches_sample_type);
1459 if (ret)
1460 return ret;
1461 }
1462
b12235b1
MP
1463 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
1464
1465 if (ret)
1466 pr_err(
1467 "CS ETM Trace: failed to deliver instruction event, error %d\n",
1468 ret);
1469
1470 return ret;
1471}
1472
1473struct cs_etm_synth {
1474 struct perf_tool dummy_tool;
1475 struct perf_session *session;
1476};
1477
1478static int cs_etm__event_synth(struct perf_tool *tool,
1479 union perf_event *event,
1480 struct perf_sample *sample __maybe_unused,
1481 struct machine *machine __maybe_unused)
1482{
1483 struct cs_etm_synth *cs_etm_synth =
1484 container_of(tool, struct cs_etm_synth, dummy_tool);
1485
1486 return perf_session__deliver_synth_event(cs_etm_synth->session,
1487 event, NULL);
1488}
1489
1490static int cs_etm__synth_event(struct perf_session *session,
1491 struct perf_event_attr *attr, u64 id)
1492{
1493 struct cs_etm_synth cs_etm_synth;
1494
1495 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
1496 cs_etm_synth.session = session;
1497
1498 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
1499 &id, cs_etm__event_synth);
1500}
1501
1502static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
1503 struct perf_session *session)
1504{
63503dba 1505 struct evlist *evlist = session->evlist;
32dcd021 1506 struct evsel *evsel;
b12235b1
MP
1507 struct perf_event_attr attr;
1508 bool found = false;
1509 u64 id;
1510 int err;
1511
1512 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1513 if (evsel->core.attr.type == etm->pmu_type) {
b12235b1
MP
1514 found = true;
1515 break;
1516 }
1517 }
1518
1519 if (!found) {
1520 pr_debug("No selected events with CoreSight Trace data\n");
1521 return 0;
1522 }
1523
1524 memset(&attr, 0, sizeof(struct perf_event_attr));
1525 attr.size = sizeof(struct perf_event_attr);
1526 attr.type = PERF_TYPE_HARDWARE;
1fc632ce 1527 attr.sample_type = evsel->core.attr.sample_type & PERF_SAMPLE_MASK;
b12235b1
MP
1528 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
1529 PERF_SAMPLE_PERIOD;
1530 if (etm->timeless_decoding)
1531 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
1532 else
1533 attr.sample_type |= PERF_SAMPLE_TIME;
1534
1fc632ce
JO
1535 attr.exclude_user = evsel->core.attr.exclude_user;
1536 attr.exclude_kernel = evsel->core.attr.exclude_kernel;
1537 attr.exclude_hv = evsel->core.attr.exclude_hv;
1538 attr.exclude_host = evsel->core.attr.exclude_host;
1539 attr.exclude_guest = evsel->core.attr.exclude_guest;
1540 attr.sample_id_all = evsel->core.attr.sample_id_all;
1541 attr.read_format = evsel->core.attr.read_format;
b12235b1
MP
1542
1543 /* create new id val to be a fixed offset from evsel id */
deaf3219 1544 id = evsel->core.id[0] + 1000000000;
b12235b1
MP
1545
1546 if (!id)
1547 id = 1;
1548
1549 if (etm->synth_opts.branches) {
1550 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
1551 attr.sample_period = 1;
1552 attr.sample_type |= PERF_SAMPLE_ADDR;
1553 err = cs_etm__synth_event(session, &attr, id);
1554 if (err)
1555 return err;
b12235b1
MP
1556 etm->branches_sample_type = attr.sample_type;
1557 etm->branches_id = id;
e573e978
RW
1558 id += 1;
1559 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
1560 }
1561
f5f8e7e5 1562 if (etm->synth_opts.last_branch) {
e573e978 1563 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
f5f8e7e5
AG
1564 /*
1565 * We don't use the hardware index, but the sample generation
1566 * code uses the new format branch_stack with this field,
1567 * so the event attributes must indicate that it's present.
1568 */
1569 attr.branch_sample_type |= PERF_SAMPLE_BRANCH_HW_INDEX;
1570 }
e573e978
RW
1571
1572 if (etm->synth_opts.instructions) {
1573 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
1574 attr.sample_period = etm->synth_opts.period;
1575 etm->instructions_sample_period = attr.sample_period;
1576 err = cs_etm__synth_event(session, &attr, id);
1577 if (err)
1578 return err;
e573e978
RW
1579 etm->instructions_sample_type = attr.sample_type;
1580 etm->instructions_id = id;
1581 id += 1;
b12235b1
MP
1582 }
1583
1584 return 0;
1585}
1586
c7bfa2fd
MP
1587static int cs_etm__sample(struct cs_etm_queue *etmq,
1588 struct cs_etm_traceid_queue *tidq)
b12235b1 1589{
e573e978 1590 struct cs_etm_auxtrace *etm = etmq->etm;
b12235b1 1591 int ret;
af21577c 1592 u8 trace_chan_id = tidq->trace_chan_id;
c9f5baa1 1593 u64 instrs_prev;
b12235b1 1594
c9f5baa1
LY
1595 /* Get instructions remainder from previous packet */
1596 instrs_prev = tidq->period_instructions;
1597
1598 tidq->period_instructions += tidq->packet->instr_count;
e573e978
RW
1599
1600 /*
1601 * Record a branch when the last instruction in
1602 * PREV_PACKET is a branch.
1603 */
1604 if (etm->synth_opts.last_branch &&
c7bfa2fd
MP
1605 tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1606 tidq->prev_packet->last_instr_taken_branch)
1607 cs_etm__update_last_branch_rb(etmq, tidq);
e573e978 1608
0b31ea66 1609 if (etm->synth_opts.instructions &&
c7bfa2fd 1610 tidq->period_instructions >= etm->instructions_sample_period) {
e573e978
RW
1611 /*
1612 * Emit instruction sample periodically
1613 * TODO: allow period to be defined in cycles and clock time
1614 */
1615
e573e978 1616 /*
c9f5baa1
LY
1617 * Below diagram demonstrates the instruction samples
1618 * generation flows:
1619 *
1620 * Instrs Instrs Instrs Instrs
1621 * Sample(n) Sample(n+1) Sample(n+2) Sample(n+3)
1622 * | | | |
1623 * V V V V
1624 * --------------------------------------------------
1625 * ^ ^
1626 * | |
1627 * Period Period
1628 * instructions(Pi) instructions(Pi')
1629 *
1630 * | |
1631 * \---------------- -----------------/
1632 * V
1633 * tidq->packet->instr_count
1634 *
1635 * Instrs Sample(n...) are the synthesised samples occurring
1636 * every etm->instructions_sample_period instructions - as
1637 * defined on the perf command line. Sample(n) is being the
1638 * last sample before the current etm packet, n+1 to n+3
1639 * samples are generated from the current etm packet.
1640 *
1641 * tidq->packet->instr_count represents the number of
1642 * instructions in the current etm packet.
1643 *
2c91cd88 1644 * Period instructions (Pi) contains the number of
c9f5baa1
LY
1645 * instructions executed after the sample point(n) from the
1646 * previous etm packet. This will always be less than
1647 * etm->instructions_sample_period.
1648 *
1649 * When generate new samples, it combines with two parts
1650 * instructions, one is the tail of the old packet and another
1651 * is the head of the new coming packet, to generate
1652 * sample(n+1); sample(n+2) and sample(n+3) consume the
1653 * instructions with sample period. After sample(n+3), the rest
1654 * instructions will be used by later packet and it is assigned
1655 * to tidq->period_instructions for next round calculation.
e573e978 1656 */
e573e978 1657
c9f5baa1
LY
1658 /*
1659 * Get the initial offset into the current packet instructions;
1660 * entry conditions ensure that instrs_prev is less than
1661 * etm->instructions_sample_period.
1662 */
1663 u64 offset = etm->instructions_sample_period - instrs_prev;
1664 u64 addr;
1665
695378b5
LY
1666 /* Prepare last branches for instruction sample */
1667 if (etm->synth_opts.last_branch)
1668 cs_etm__copy_last_branch_rb(etmq, tidq);
1669
c9f5baa1
LY
1670 while (tidq->period_instructions >=
1671 etm->instructions_sample_period) {
1672 /*
1673 * Calculate the address of the sampled instruction (-1
1674 * as sample is reported as though instruction has just
1675 * been executed, but PC has not advanced to next
1676 * instruction)
1677 */
1678 addr = cs_etm__instr_addr(etmq, trace_chan_id,
1679 tidq->packet, offset - 1);
1680 ret = cs_etm__synth_instruction_sample(
1681 etmq, tidq, addr,
1682 etm->instructions_sample_period);
1683 if (ret)
1684 return ret;
e573e978 1685
c9f5baa1
LY
1686 offset += etm->instructions_sample_period;
1687 tidq->period_instructions -=
1688 etm->instructions_sample_period;
1689 }
e573e978
RW
1690 }
1691
0b31ea66 1692 if (etm->synth_opts.branches) {
14a85b1e
LY
1693 bool generate_sample = false;
1694
1695 /* Generate sample for tracing on packet */
c7bfa2fd 1696 if (tidq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
14a85b1e
LY
1697 generate_sample = true;
1698
1699 /* Generate sample for branch taken packet */
c7bfa2fd
MP
1700 if (tidq->prev_packet->sample_type == CS_ETM_RANGE &&
1701 tidq->prev_packet->last_instr_taken_branch)
14a85b1e
LY
1702 generate_sample = true;
1703
1704 if (generate_sample) {
c7bfa2fd 1705 ret = cs_etm__synth_branch_sample(etmq, tidq);
14a85b1e
LY
1706 if (ret)
1707 return ret;
1708 }
e573e978 1709 }
b12235b1 1710
d0175156 1711 cs_etm__packet_swap(etm, tidq);
b12235b1
MP
1712
1713 return 0;
1714}
1715
c7bfa2fd 1716static int cs_etm__exception(struct cs_etm_traceid_queue *tidq)
7100b12c
LY
1717{
1718 /*
1719 * When the exception packet is inserted, whether the last instruction
1720 * in previous range packet is taken branch or not, we need to force
1721 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
1722 * to generate branch sample for the instruction range before the
1723 * exception is trapped to kernel or before the exception returning.
1724 *
1725 * The exception packet includes the dummy address values, so don't
1726 * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
1727 * for generating instruction and branch samples.
1728 */
c7bfa2fd
MP
1729 if (tidq->prev_packet->sample_type == CS_ETM_RANGE)
1730 tidq->prev_packet->last_instr_taken_branch = true;
7100b12c
LY
1731
1732 return 0;
1733}
1734
c7bfa2fd
MP
1735static int cs_etm__flush(struct cs_etm_queue *etmq,
1736 struct cs_etm_traceid_queue *tidq)
256e751c
RW
1737{
1738 int err = 0;
d603b4e9 1739 struct cs_etm_auxtrace *etm = etmq->etm;
256e751c 1740
3eb3e07b 1741 /* Handle start tracing packet */
c7bfa2fd 1742 if (tidq->prev_packet->sample_type == CS_ETM_EMPTY)
3eb3e07b
LY
1743 goto swap_packet;
1744
256e751c 1745 if (etmq->etm->synth_opts.last_branch &&
9de07369 1746 etmq->etm->synth_opts.instructions &&
c7bfa2fd 1747 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
695378b5
LY
1748 u64 addr;
1749
1750 /* Prepare last branches for instruction sample */
1751 cs_etm__copy_last_branch_rb(etmq, tidq);
1752
256e751c
RW
1753 /*
1754 * Generate a last branch event for the branches left in the
1755 * circular buffer at the end of the trace.
1756 *
1757 * Use the address of the end of the last reported execution
1758 * range
1759 */
695378b5 1760 addr = cs_etm__last_executed_instr(tidq->prev_packet);
256e751c
RW
1761
1762 err = cs_etm__synth_instruction_sample(
c7bfa2fd
MP
1763 etmq, tidq, addr,
1764 tidq->period_instructions);
6cd4ac6a
LY
1765 if (err)
1766 return err;
1767
c7bfa2fd 1768 tidq->period_instructions = 0;
256e751c 1769
3eb3e07b
LY
1770 }
1771
0b31ea66 1772 if (etm->synth_opts.branches &&
c7bfa2fd
MP
1773 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
1774 err = cs_etm__synth_branch_sample(etmq, tidq);
d603b4e9
LY
1775 if (err)
1776 return err;
1777 }
1778
3eb3e07b 1779swap_packet:
d0175156 1780 cs_etm__packet_swap(etm, tidq);
256e751c 1781
f1410028
LY
1782 /* Reset last branches after flush the trace */
1783 if (etm->synth_opts.last_branch)
1784 cs_etm__reset_last_branch_rb(tidq);
1785
256e751c
RW
1786 return err;
1787}
1788
c7bfa2fd
MP
1789static int cs_etm__end_block(struct cs_etm_queue *etmq,
1790 struct cs_etm_traceid_queue *tidq)
24fff5eb
LY
1791{
1792 int err;
1793
1794 /*
1795 * It has no new packet coming and 'etmq->packet' contains the stale
1796 * packet which was set at the previous time with packets swapping;
1797 * so skip to generate branch sample to avoid stale packet.
1798 *
1799 * For this case only flush branch stack and generate a last branch
1800 * event for the branches left in the circular buffer at the end of
1801 * the trace.
1802 */
1803 if (etmq->etm->synth_opts.last_branch &&
9de07369 1804 etmq->etm->synth_opts.instructions &&
c7bfa2fd 1805 tidq->prev_packet->sample_type == CS_ETM_RANGE) {
695378b5
LY
1806 u64 addr;
1807
1808 /* Prepare last branches for instruction sample */
1809 cs_etm__copy_last_branch_rb(etmq, tidq);
1810
24fff5eb
LY
1811 /*
1812 * Use the address of the end of the last reported execution
1813 * range.
1814 */
695378b5 1815 addr = cs_etm__last_executed_instr(tidq->prev_packet);
24fff5eb
LY
1816
1817 err = cs_etm__synth_instruction_sample(
c7bfa2fd
MP
1818 etmq, tidq, addr,
1819 tidq->period_instructions);
24fff5eb
LY
1820 if (err)
1821 return err;
1822
c7bfa2fd 1823 tidq->period_instructions = 0;
24fff5eb
LY
1824 }
1825
1826 return 0;
1827}
8224531c
MP
1828/*
1829 * cs_etm__get_data_block: Fetch a block from the auxtrace_buffer queue
1830 * if need be.
1831 * Returns: < 0 if error
1832 * = 0 if no more auxtrace_buffer to read
1833 * > 0 if the current buffer isn't empty yet
1834 */
1835static int cs_etm__get_data_block(struct cs_etm_queue *etmq)
1836{
1837 int ret;
1838
1839 if (!etmq->buf_len) {
1840 ret = cs_etm__get_trace(etmq);
1841 if (ret <= 0)
1842 return ret;
1843 /*
1844 * We cannot assume consecutive blocks in the data file
1845 * are contiguous, reset the decoder to force re-sync.
1846 */
1847 ret = cs_etm_decoder__reset(etmq->decoder);
1848 if (ret)
1849 return ret;
1850 }
1851
1852 return etmq->buf_len;
1853}
24fff5eb 1854
af21577c 1855static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq, u8 trace_chan_id,
96dce7f4
LY
1856 struct cs_etm_packet *packet,
1857 u64 end_addr)
1858{
c152d4d4
MP
1859 /* Initialise to keep compiler happy */
1860 u16 instr16 = 0;
1861 u32 instr32 = 0;
96dce7f4
LY
1862 u64 addr;
1863
1864 switch (packet->isa) {
1865 case CS_ETM_ISA_T32:
1866 /*
1867 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1868 *
1869 * b'15 b'8
1870 * +-----------------+--------+
1871 * | 1 1 0 1 1 1 1 1 | imm8 |
1872 * +-----------------+--------+
1873 *
4d39c89f 1874 * According to the specification, it only defines SVC for T32
96dce7f4
LY
1875 * with 16 bits instruction and has no definition for 32bits;
1876 * so below only read 2 bytes as instruction size for T32.
1877 */
1878 addr = end_addr - 2;
af21577c
MP
1879 cs_etm__mem_access(etmq, trace_chan_id, addr,
1880 sizeof(instr16), (u8 *)&instr16);
96dce7f4
LY
1881 if ((instr16 & 0xFF00) == 0xDF00)
1882 return true;
1883
1884 break;
1885 case CS_ETM_ISA_A32:
1886 /*
1887 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1888 *
1889 * b'31 b'28 b'27 b'24
1890 * +---------+---------+-------------------------+
1891 * | !1111 | 1 1 1 1 | imm24 |
1892 * +---------+---------+-------------------------+
1893 */
1894 addr = end_addr - 4;
af21577c
MP
1895 cs_etm__mem_access(etmq, trace_chan_id, addr,
1896 sizeof(instr32), (u8 *)&instr32);
96dce7f4
LY
1897 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1898 (instr32 & 0xF0000000) != 0xF0000000)
1899 return true;
1900
1901 break;
1902 case CS_ETM_ISA_A64:
1903 /*
1904 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1905 *
1906 * b'31 b'21 b'4 b'0
1907 * +-----------------------+---------+-----------+
1908 * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
1909 * +-----------------------+---------+-----------+
1910 */
1911 addr = end_addr - 4;
af21577c
MP
1912 cs_etm__mem_access(etmq, trace_chan_id, addr,
1913 sizeof(instr32), (u8 *)&instr32);
96dce7f4
LY
1914 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1915 return true;
1916
1917 break;
1918 case CS_ETM_ISA_UNKNOWN:
1919 default:
1920 break;
1921 }
1922
1923 return false;
1924}
1925
c7bfa2fd
MP
1926static bool cs_etm__is_syscall(struct cs_etm_queue *etmq,
1927 struct cs_etm_traceid_queue *tidq, u64 magic)
96dce7f4 1928{
af21577c 1929 u8 trace_chan_id = tidq->trace_chan_id;
c7bfa2fd
MP
1930 struct cs_etm_packet *packet = tidq->packet;
1931 struct cs_etm_packet *prev_packet = tidq->prev_packet;
96dce7f4
LY
1932
1933 if (magic == __perf_cs_etmv3_magic)
1934 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1935 return true;
1936
1937 /*
1938 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1939 * HVC cases; need to check if it's SVC instruction based on
1940 * packet address.
1941 */
1942 if (magic == __perf_cs_etmv4_magic) {
1943 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
af21577c 1944 cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
96dce7f4
LY
1945 prev_packet->end_addr))
1946 return true;
1947 }
1948
1949 return false;
1950}
1951
c7bfa2fd
MP
1952static bool cs_etm__is_async_exception(struct cs_etm_traceid_queue *tidq,
1953 u64 magic)
96dce7f4 1954{
c7bfa2fd 1955 struct cs_etm_packet *packet = tidq->packet;
96dce7f4
LY
1956
1957 if (magic == __perf_cs_etmv3_magic)
1958 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1959 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1960 packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1961 packet->exception_number == CS_ETMV3_EXC_IRQ ||
1962 packet->exception_number == CS_ETMV3_EXC_FIQ)
1963 return true;
1964
1965 if (magic == __perf_cs_etmv4_magic)
1966 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1967 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1968 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1969 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1970 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1971 packet->exception_number == CS_ETMV4_EXC_IRQ ||
1972 packet->exception_number == CS_ETMV4_EXC_FIQ)
1973 return true;
1974
1975 return false;
1976}
1977
c7bfa2fd
MP
1978static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq,
1979 struct cs_etm_traceid_queue *tidq,
1980 u64 magic)
96dce7f4 1981{
af21577c 1982 u8 trace_chan_id = tidq->trace_chan_id;
c7bfa2fd
MP
1983 struct cs_etm_packet *packet = tidq->packet;
1984 struct cs_etm_packet *prev_packet = tidq->prev_packet;
96dce7f4
LY
1985
1986 if (magic == __perf_cs_etmv3_magic)
1987 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1988 packet->exception_number == CS_ETMV3_EXC_HYP ||
1989 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1990 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1991 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1992 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1993 packet->exception_number == CS_ETMV3_EXC_GENERIC)
1994 return true;
1995
1996 if (magic == __perf_cs_etmv4_magic) {
1997 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1998 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1999 packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
2000 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
2001 return true;
2002
2003 /*
2004 * For CS_ETMV4_EXC_CALL, except SVC other instructions
2005 * (SMC, HVC) are taken as sync exceptions.
2006 */
2007 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
af21577c 2008 !cs_etm__is_svc_instr(etmq, trace_chan_id, prev_packet,
96dce7f4
LY
2009 prev_packet->end_addr))
2010 return true;
2011
2012 /*
2013 * ETMv4 has 5 bits for exception number; if the numbers
2014 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
2015 * they are implementation defined exceptions.
2016 *
2017 * For this case, simply take it as sync exception.
2018 */
2019 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
2020 packet->exception_number <= CS_ETMV4_EXC_END)
2021 return true;
2022 }
2023
2024 return false;
2025}
2026
c7bfa2fd
MP
2027static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq,
2028 struct cs_etm_traceid_queue *tidq)
06220bf4 2029{
c7bfa2fd
MP
2030 struct cs_etm_packet *packet = tidq->packet;
2031 struct cs_etm_packet *prev_packet = tidq->prev_packet;
af21577c 2032 u8 trace_chan_id = tidq->trace_chan_id;
96dce7f4
LY
2033 u64 magic;
2034 int ret;
06220bf4
LY
2035
2036 switch (packet->sample_type) {
2037 case CS_ETM_RANGE:
2038 /*
2039 * Immediate branch instruction without neither link nor
2040 * return flag, it's normal branch instruction within
2041 * the function.
2042 */
2043 if (packet->last_instr_type == OCSD_INSTR_BR &&
2044 packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
2045 packet->flags = PERF_IP_FLAG_BRANCH;
2046
2047 if (packet->last_instr_cond)
2048 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
2049 }
2050
2051 /*
2052 * Immediate branch instruction with link (e.g. BL), this is
2053 * branch instruction for function call.
2054 */
2055 if (packet->last_instr_type == OCSD_INSTR_BR &&
2056 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
2057 packet->flags = PERF_IP_FLAG_BRANCH |
2058 PERF_IP_FLAG_CALL;
2059
2060 /*
2061 * Indirect branch instruction with link (e.g. BLR), this is
2062 * branch instruction for function call.
2063 */
2064 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2065 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
2066 packet->flags = PERF_IP_FLAG_BRANCH |
2067 PERF_IP_FLAG_CALL;
2068
2069 /*
2070 * Indirect branch instruction with subtype of
2071 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
2072 * function return for A32/T32.
2073 */
2074 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2075 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
2076 packet->flags = PERF_IP_FLAG_BRANCH |
2077 PERF_IP_FLAG_RETURN;
2078
2079 /*
2080 * Indirect branch instruction without link (e.g. BR), usually
2081 * this is used for function return, especially for functions
2082 * within dynamic link lib.
2083 */
2084 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2085 packet->last_instr_subtype == OCSD_S_INSTR_NONE)
2086 packet->flags = PERF_IP_FLAG_BRANCH |
2087 PERF_IP_FLAG_RETURN;
2088
2089 /* Return instruction for function return. */
2090 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
2091 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
2092 packet->flags = PERF_IP_FLAG_BRANCH |
2093 PERF_IP_FLAG_RETURN;
465eaaa8
LY
2094
2095 /*
2096 * Decoder might insert a discontinuity in the middle of
2097 * instruction packets, fixup prev_packet with flag
2098 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
2099 */
2100 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
2101 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
2102 PERF_IP_FLAG_TRACE_BEGIN;
173e65f6
LY
2103
2104 /*
2105 * If the previous packet is an exception return packet
4d39c89f 2106 * and the return address just follows SVC instruction,
173e65f6
LY
2107 * it needs to calibrate the previous packet sample flags
2108 * as PERF_IP_FLAG_SYSCALLRET.
2109 */
2110 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
2111 PERF_IP_FLAG_RETURN |
2112 PERF_IP_FLAG_INTERRUPT) &&
af21577c
MP
2113 cs_etm__is_svc_instr(etmq, trace_chan_id,
2114 packet, packet->start_addr))
173e65f6
LY
2115 prev_packet->flags = PERF_IP_FLAG_BRANCH |
2116 PERF_IP_FLAG_RETURN |
2117 PERF_IP_FLAG_SYSCALLRET;
06220bf4
LY
2118 break;
2119 case CS_ETM_DISCONTINUITY:
465eaaa8
LY
2120 /*
2121 * The trace is discontinuous, if the previous packet is
2122 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
2123 * for previous packet.
2124 */
2125 if (prev_packet->sample_type == CS_ETM_RANGE)
2126 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
2127 PERF_IP_FLAG_TRACE_END;
2128 break;
06220bf4 2129 case CS_ETM_EXCEPTION:
96dce7f4
LY
2130 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
2131 if (ret)
2132 return ret;
2133
2134 /* The exception is for system call. */
c7bfa2fd 2135 if (cs_etm__is_syscall(etmq, tidq, magic))
96dce7f4
LY
2136 packet->flags = PERF_IP_FLAG_BRANCH |
2137 PERF_IP_FLAG_CALL |
2138 PERF_IP_FLAG_SYSCALLRET;
2139 /*
2140 * The exceptions are triggered by external signals from bus,
2141 * interrupt controller, debug module, PE reset or halt.
2142 */
c7bfa2fd 2143 else if (cs_etm__is_async_exception(tidq, magic))
96dce7f4
LY
2144 packet->flags = PERF_IP_FLAG_BRANCH |
2145 PERF_IP_FLAG_CALL |
2146 PERF_IP_FLAG_ASYNC |
2147 PERF_IP_FLAG_INTERRUPT;
2148 /*
2149 * Otherwise, exception is caused by trap, instruction &
2150 * data fault, or alignment errors.
2151 */
c7bfa2fd 2152 else if (cs_etm__is_sync_exception(etmq, tidq, magic))
96dce7f4
LY
2153 packet->flags = PERF_IP_FLAG_BRANCH |
2154 PERF_IP_FLAG_CALL |
2155 PERF_IP_FLAG_INTERRUPT;
2156
2157 /*
2158 * When the exception packet is inserted, since exception
2159 * packet is not used standalone for generating samples
2160 * and it's affiliation to the previous instruction range
2161 * packet; so set previous range packet flags to tell perf
2162 * it is an exception taken branch.
2163 */
2164 if (prev_packet->sample_type == CS_ETM_RANGE)
2165 prev_packet->flags = packet->flags;
2166 break;
06220bf4 2167 case CS_ETM_EXCEPTION_RET:
173e65f6
LY
2168 /*
2169 * When the exception return packet is inserted, since
2170 * exception return packet is not used standalone for
2171 * generating samples and it's affiliation to the previous
2172 * instruction range packet; so set previous range packet
2173 * flags to tell perf it is an exception return branch.
2174 *
2175 * The exception return can be for either system call or
2176 * other exception types; unfortunately the packet doesn't
2177 * contain exception type related info so we cannot decide
2178 * the exception type purely based on exception return packet.
2179 * If we record the exception number from exception packet and
4d39c89f 2180 * reuse it for exception return packet, this is not reliable
173e65f6
LY
2181 * due the trace can be discontinuity or the interrupt can
2182 * be nested, thus the recorded exception number cannot be
2183 * used for exception return packet for these two cases.
2184 *
2185 * For exception return packet, we only need to distinguish the
2186 * packet is for system call or for other types. Thus the
2187 * decision can be deferred when receive the next packet which
2188 * contains the return address, based on the return address we
2189 * can read out the previous instruction and check if it's a
2190 * system call instruction and then calibrate the sample flag
2191 * as needed.
2192 */
2193 if (prev_packet->sample_type == CS_ETM_RANGE)
2194 prev_packet->flags = PERF_IP_FLAG_BRANCH |
2195 PERF_IP_FLAG_RETURN |
2196 PERF_IP_FLAG_INTERRUPT;
2197 break;
06220bf4
LY
2198 case CS_ETM_EMPTY:
2199 default:
2200 break;
2201 }
2202
2203 return 0;
2204}
2205
f74f349c
MP
2206static int cs_etm__decode_data_block(struct cs_etm_queue *etmq)
2207{
2208 int ret = 0;
2209 size_t processed = 0;
2210
2211 /*
2212 * Packets are decoded and added to the decoder's packet queue
2213 * until the decoder packet processing callback has requested that
2214 * processing stops or there is nothing left in the buffer. Normal
2215 * operations that stop processing are a timestamp packet or a full
2216 * decoder buffer queue.
2217 */
2218 ret = cs_etm_decoder__process_data_block(etmq->decoder,
2219 etmq->offset,
2220 &etmq->buf[etmq->buf_used],
2221 etmq->buf_len,
2222 &processed);
2223 if (ret)
2224 goto out;
2225
2226 etmq->offset += processed;
2227 etmq->buf_used += processed;
2228 etmq->buf_len -= processed;
2229
2230out:
2231 return ret;
2232}
2233
c7bfa2fd
MP
2234static int cs_etm__process_traceid_queue(struct cs_etm_queue *etmq,
2235 struct cs_etm_traceid_queue *tidq)
3fa0e83e
MP
2236{
2237 int ret;
5f7cb035
MP
2238 struct cs_etm_packet_queue *packet_queue;
2239
c7bfa2fd 2240 packet_queue = &tidq->packet_queue;
3fa0e83e 2241
882f4874
MP
2242 /* Process each packet in this chunk */
2243 while (1) {
2244 ret = cs_etm_decoder__get_packet(packet_queue,
c7bfa2fd 2245 tidq->packet);
882f4874
MP
2246 if (ret <= 0)
2247 /*
2248 * Stop processing this chunk on
2249 * end of data or error
2250 */
2251 break;
3fa0e83e 2252
882f4874
MP
2253 /*
2254 * Since packet addresses are swapped in packet
2255 * handling within below switch() statements,
2256 * thus setting sample flags must be called
2257 * prior to switch() statement to use address
2258 * information before packets swapping.
2259 */
c7bfa2fd 2260 ret = cs_etm__set_sample_flags(etmq, tidq);
882f4874
MP
2261 if (ret < 0)
2262 break;
2263
c7bfa2fd 2264 switch (tidq->packet->sample_type) {
882f4874
MP
2265 case CS_ETM_RANGE:
2266 /*
2267 * If the packet contains an instruction
2268 * range, generate instruction sequence
2269 * events.
2270 */
c7bfa2fd 2271 cs_etm__sample(etmq, tidq);
882f4874
MP
2272 break;
2273 case CS_ETM_EXCEPTION:
2274 case CS_ETM_EXCEPTION_RET:
3fa0e83e 2275 /*
882f4874
MP
2276 * If the exception packet is coming,
2277 * make sure the previous instruction
2278 * range packet to be handled properly.
3fa0e83e 2279 */
c7bfa2fd 2280 cs_etm__exception(tidq);
882f4874
MP
2281 break;
2282 case CS_ETM_DISCONTINUITY:
2283 /*
2284 * Discontinuity in trace, flush
2285 * previous branch stack
2286 */
c7bfa2fd 2287 cs_etm__flush(etmq, tidq);
882f4874
MP
2288 break;
2289 case CS_ETM_EMPTY:
2290 /*
2291 * Should not receive empty packet,
2292 * report error.
2293 */
2294 pr_err("CS ETM Trace: empty packet\n");
2295 return -EINVAL;
2296 default:
2297 break;
3fa0e83e 2298 }
882f4874 2299 }
3fa0e83e
MP
2300
2301 return ret;
2302}
2303
21fe8dc1
MP
2304static void cs_etm__clear_all_traceid_queues(struct cs_etm_queue *etmq)
2305{
2306 int idx;
2307 struct int_node *inode;
2308 struct cs_etm_traceid_queue *tidq;
2309 struct intlist *traceid_queues_list = etmq->traceid_queues_list;
2310
2311 intlist__for_each_entry(inode, traceid_queues_list) {
2312 idx = (int)(intptr_t)inode->priv;
2313 tidq = etmq->traceid_queues[idx];
2314
2315 /* Ignore return value */
2316 cs_etm__process_traceid_queue(etmq, tidq);
2317
2318 /*
2319 * Generate an instruction sample with the remaining
2320 * branchstack entries.
2321 */
2322 cs_etm__flush(etmq, tidq);
2323 }
2324}
2325
9f878b29
MP
2326static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
2327{
9f878b29 2328 int err = 0;
c7bfa2fd
MP
2329 struct cs_etm_traceid_queue *tidq;
2330
2331 tidq = cs_etm__etmq_get_traceid_queue(etmq, CS_ETM_PER_THREAD_TRACEID);
2332 if (!tidq)
2333 return -EINVAL;
9f878b29 2334
9f878b29 2335 /* Go through each buffer in the queue and decode them one by one */
e573e978 2336 while (1) {
8224531c
MP
2337 err = cs_etm__get_data_block(etmq);
2338 if (err <= 0)
2339 return err;
9f878b29 2340
e573e978
RW
2341 /* Run trace decoder until buffer consumed or end of trace */
2342 do {
f74f349c 2343 err = cs_etm__decode_data_block(etmq);
e573e978
RW
2344 if (err)
2345 return err;
2346
3fa0e83e
MP
2347 /*
2348 * Process each packet in this chunk, nothing to do if
2349 * an error occurs other than hoping the next one will
2350 * be better.
2351 */
c7bfa2fd 2352 err = cs_etm__process_traceid_queue(etmq, tidq);
e573e978 2353
23cfcd6d 2354 } while (etmq->buf_len);
b12235b1 2355
256e751c
RW
2356 if (err == 0)
2357 /* Flush any remaining branch stack entries */
c7bfa2fd 2358 err = cs_etm__end_block(etmq, tidq);
e573e978 2359 }
9f878b29
MP
2360
2361 return err;
2362}
2363
2364static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
fc7ac413 2365 pid_t tid)
9f878b29
MP
2366{
2367 unsigned int i;
2368 struct auxtrace_queues *queues = &etm->queues;
2369
2370 for (i = 0; i < queues->nr_queues; i++) {
2371 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
2372 struct cs_etm_queue *etmq = queue->priv;
0abb868b
MP
2373 struct cs_etm_traceid_queue *tidq;
2374
2375 if (!etmq)
2376 continue;
2377
2378 tidq = cs_etm__etmq_get_traceid_queue(etmq,
2379 CS_ETM_PER_THREAD_TRACEID);
2380
2381 if (!tidq)
2382 continue;
9f878b29 2383
0abb868b 2384 if ((tid == -1) || (tidq->tid == tid)) {
0a6be300 2385 cs_etm__set_pid_tid_cpu(etm, tidq);
9f878b29
MP
2386 cs_etm__run_decoder(etmq);
2387 }
2388 }
2389
2390 return 0;
2391}
2392
21fe8dc1
MP
2393static int cs_etm__process_queues(struct cs_etm_auxtrace *etm)
2394{
2395 int ret = 0;
9ac8afd5 2396 unsigned int cs_queue_nr, queue_nr, i;
21fe8dc1 2397 u8 trace_chan_id;
aadd6ba4 2398 u64 cs_timestamp;
21fe8dc1
MP
2399 struct auxtrace_queue *queue;
2400 struct cs_etm_queue *etmq;
2401 struct cs_etm_traceid_queue *tidq;
2402
9ac8afd5
JC
2403 /*
2404 * Pre-populate the heap with one entry from each queue so that we can
2405 * start processing in time order across all queues.
2406 */
2407 for (i = 0; i < etm->queues.nr_queues; i++) {
2408 etmq = etm->queues.queue_array[i].priv;
2409 if (!etmq)
2410 continue;
2411
2412 ret = cs_etm__queue_first_cs_timestamp(etm, etmq, i);
2413 if (ret)
2414 return ret;
2415 }
2416
21fe8dc1
MP
2417 while (1) {
2418 if (!etm->heap.heap_cnt)
2419 goto out;
2420
2421 /* Take the entry at the top of the min heap */
2422 cs_queue_nr = etm->heap.heap_array[0].queue_nr;
2423 queue_nr = TO_QUEUE_NR(cs_queue_nr);
2424 trace_chan_id = TO_TRACE_CHAN_ID(cs_queue_nr);
2425 queue = &etm->queues.queue_array[queue_nr];
2426 etmq = queue->priv;
2427
2428 /*
2429 * Remove the top entry from the heap since we are about
2430 * to process it.
2431 */
2432 auxtrace_heap__pop(&etm->heap);
2433
2434 tidq = cs_etm__etmq_get_traceid_queue(etmq, trace_chan_id);
2435 if (!tidq) {
2436 /*
2437 * No traceID queue has been allocated for this traceID,
2438 * which means something somewhere went very wrong. No
2439 * other choice than simply exit.
2440 */
2441 ret = -EINVAL;
2442 goto out;
2443 }
2444
2445 /*
2446 * Packets associated with this timestamp are already in
2447 * the etmq's traceID queue, so process them.
2448 */
2449 ret = cs_etm__process_traceid_queue(etmq, tidq);
2450 if (ret < 0)
2451 goto out;
2452
2453 /*
2454 * Packets for this timestamp have been processed, time to
2455 * move on to the next timestamp, fetching a new auxtrace_buffer
2456 * if need be.
2457 */
2458refetch:
2459 ret = cs_etm__get_data_block(etmq);
2460 if (ret < 0)
2461 goto out;
2462
2463 /*
2464 * No more auxtrace_buffers to process in this etmq, simply
2465 * move on to another entry in the auxtrace_heap.
2466 */
2467 if (!ret)
2468 continue;
2469
2470 ret = cs_etm__decode_data_block(etmq);
2471 if (ret)
2472 goto out;
2473
aadd6ba4 2474 cs_timestamp = cs_etm__etmq_get_timestamp(etmq, &trace_chan_id);
21fe8dc1 2475
aadd6ba4 2476 if (!cs_timestamp) {
21fe8dc1
MP
2477 /*
2478 * Function cs_etm__decode_data_block() returns when
2479 * there is no more traces to decode in the current
2480 * auxtrace_buffer OR when a timestamp has been
2481 * encountered on any of the traceID queues. Since we
2482 * did not get a timestamp, there is no more traces to
2483 * process in this auxtrace_buffer. As such empty and
2484 * flush all traceID queues.
2485 */
2486 cs_etm__clear_all_traceid_queues(etmq);
2487
2488 /* Fetch another auxtrace_buffer for this etmq */
2489 goto refetch;
2490 }
2491
2492 /*
2493 * Add to the min heap the timestamp for packets that have
2494 * just been decoded. They will be processed and synthesized
2495 * during the next call to cs_etm__process_traceid_queue() for
2496 * this queue/traceID.
2497 */
2498 cs_queue_nr = TO_CS_QUEUE_NR(queue_nr, trace_chan_id);
aadd6ba4 2499 ret = auxtrace_heap__add(&etm->heap, cs_queue_nr, cs_timestamp);
21fe8dc1
MP
2500 }
2501
2502out:
2503 return ret;
2504}
2505
a465f3c3
MP
2506static int cs_etm__process_itrace_start(struct cs_etm_auxtrace *etm,
2507 union perf_event *event)
2508{
2509 struct thread *th;
2510
2511 if (etm->timeless_decoding)
2512 return 0;
2513
2514 /*
2515 * Add the tid/pid to the log so that we can get a match when
2516 * we get a contextID from the decoder.
2517 */
2518 th = machine__findnew_thread(etm->machine,
2519 event->itrace_start.pid,
2520 event->itrace_start.tid);
2521 if (!th)
2522 return -ENOMEM;
2523
2524 thread__put(th);
2525
2526 return 0;
2527}
2528
e0d170fa
MP
2529static int cs_etm__process_switch_cpu_wide(struct cs_etm_auxtrace *etm,
2530 union perf_event *event)
2531{
2532 struct thread *th;
2533 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
2534
2535 /*
2536 * Context switch in per-thread mode are irrelevant since perf
2537 * will start/stop tracing as the process is scheduled.
2538 */
2539 if (etm->timeless_decoding)
2540 return 0;
2541
2542 /*
2543 * SWITCH_IN events carry the next process to be switched out while
2544 * SWITCH_OUT events carry the process to be switched in. As such
2545 * we don't care about IN events.
2546 */
2547 if (!out)
2548 return 0;
2549
2550 /*
2551 * Add the tid/pid to the log so that we can get a match when
2552 * we get a contextID from the decoder.
2553 */
2554 th = machine__findnew_thread(etm->machine,
2555 event->context_switch.next_prev_pid,
2556 event->context_switch.next_prev_tid);
2557 if (!th)
2558 return -ENOMEM;
2559
2560 thread__put(th);
2561
2562 return 0;
2563}
2564
440a23b3
MP
2565static int cs_etm__process_event(struct perf_session *session,
2566 union perf_event *event,
2567 struct perf_sample *sample,
2568 struct perf_tool *tool)
2569{
aadd6ba4 2570 u64 sample_kernel_timestamp;
20d9c478
MP
2571 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2572 struct cs_etm_auxtrace,
2573 auxtrace);
2574
20d9c478
MP
2575 if (dump_trace)
2576 return 0;
2577
2578 if (!tool->ordered_events) {
2579 pr_err("CoreSight ETM Trace requires ordered events\n");
2580 return -EINVAL;
2581 }
2582
20d9c478 2583 if (sample->time && (sample->time != (u64) -1))
aadd6ba4 2584 sample_kernel_timestamp = sample->time;
20d9c478 2585 else
aadd6ba4 2586 sample_kernel_timestamp = 0;
20d9c478 2587
0323dea3
JC
2588 /*
2589 * Don't wait for cs_etm__flush_events() in per-thread/timeless mode to start the decode. We
2590 * need the tid of the PERF_RECORD_EXIT event to assign to the synthesised samples because
2591 * ETM_OPT_CTXTID is not enabled.
2592 */
21fe8dc1
MP
2593 if (etm->timeless_decoding &&
2594 event->header.type == PERF_RECORD_EXIT)
9f878b29 2595 return cs_etm__process_timeless_queues(etm,
fc7ac413 2596 event->fork.tid);
9f878b29 2597
a465f3c3
MP
2598 if (event->header.type == PERF_RECORD_ITRACE_START)
2599 return cs_etm__process_itrace_start(etm, event);
e0d170fa
MP
2600 else if (event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2601 return cs_etm__process_switch_cpu_wide(etm, event);
a465f3c3 2602
1ac9e0b5
JC
2603 if (!etm->timeless_decoding && event->header.type == PERF_RECORD_AUX) {
2604 /*
2605 * Record the latest kernel timestamp available in the header
2606 * for samples so that synthesised samples occur from this point
2607 * onwards.
2608 */
2609 etm->latest_kernel_timestamp = sample_kernel_timestamp;
1ac9e0b5 2610 }
21fe8dc1 2611
440a23b3
MP
2612 return 0;
2613}
2614
48e8a7b5
JC
2615static void dump_queued_data(struct cs_etm_auxtrace *etm,
2616 struct perf_record_auxtrace *event)
2617{
2618 struct auxtrace_buffer *buf;
2619 unsigned int i;
2620 /*
2621 * Find all buffers with same reference in the queues and dump them.
2622 * This is because the queues can contain multiple entries of the same
2623 * buffer that were split on aux records.
2624 */
2625 for (i = 0; i < etm->queues.nr_queues; ++i)
2626 list_for_each_entry(buf, &etm->queues.queue_array[i].head, list)
2627 if (buf->reference == event->reference)
04aaad26 2628 cs_etm__dump_event(etm->queues.queue_array[i].priv, buf);
48e8a7b5
JC
2629}
2630
440a23b3
MP
2631static int cs_etm__process_auxtrace_event(struct perf_session *session,
2632 union perf_event *event,
68ffe390 2633 struct perf_tool *tool __maybe_unused)
440a23b3 2634{
68ffe390
MP
2635 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2636 struct cs_etm_auxtrace,
2637 auxtrace);
2638 if (!etm->data_queued) {
2639 struct auxtrace_buffer *buffer;
2640 off_t data_offset;
2641 int fd = perf_data__fd(session->data);
2642 bool is_pipe = perf_data__is_pipe(session->data);
2643 int err;
ca50db59 2644 int idx = event->auxtrace.idx;
68ffe390
MP
2645
2646 if (is_pipe)
2647 data_offset = 0;
2648 else {
2649 data_offset = lseek(fd, 0, SEEK_CUR);
2650 if (data_offset == -1)
2651 return -errno;
2652 }
2653
2654 err = auxtrace_queues__add_event(&etm->queues, session,
2655 event, data_offset, &buffer);
2656 if (err)
2657 return err;
2658
9182f04a
JC
2659 /*
2660 * Knowing if the trace is formatted or not requires a lookup of
2661 * the aux record so only works in non-piped mode where data is
2662 * queued in cs_etm__queue_aux_records(). Always assume
2663 * formatted in piped mode (true).
2664 */
ca50db59 2665 err = cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
9182f04a 2666 idx, true);
ca50db59
JC
2667 if (err)
2668 return err;
2669
68ffe390
MP
2670 if (dump_trace)
2671 if (auxtrace_buffer__get_data(buffer, fd)) {
04aaad26 2672 cs_etm__dump_event(etm->queues.queue_array[idx].priv, buffer);
68ffe390
MP
2673 auxtrace_buffer__put_data(buffer);
2674 }
48e8a7b5
JC
2675 } else if (dump_trace)
2676 dump_queued_data(etm, &event->auxtrace);
68ffe390 2677
440a23b3
MP
2678 return 0;
2679}
2680
2681static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
2682{
32dcd021 2683 struct evsel *evsel;
63503dba 2684 struct evlist *evlist = etm->session->evlist;
440a23b3
MP
2685 bool timeless_decoding = true;
2686
c36c1ef6
JC
2687 /* Override timeless mode with user input from --itrace=Z */
2688 if (etm->synth_opts.timeless_decoding)
2689 return true;
2690
440a23b3
MP
2691 /*
2692 * Circle through the list of event and complain if we find one
2693 * with the time bit set.
2694 */
2695 evlist__for_each_entry(evlist, evsel) {
1fc632ce 2696 if ((evsel->core.attr.sample_type & PERF_SAMPLE_TIME))
440a23b3
MP
2697 timeless_decoding = false;
2698 }
2699
2700 return timeless_decoding;
2701}
2702
42b2b570
ML
2703/*
2704 * Read a single cpu parameter block from the auxtrace_info priv block.
2705 *
2706 * For version 1 there is a per cpu nr_params entry. If we are handling
2707 * version 1 file, then there may be less, the same, or more params
2708 * indicated by this value than the compile time number we understand.
2709 *
2710 * For a version 0 info block, there are a fixed number, and we need to
2711 * fill out the nr_param value in the metadata we create.
2712 */
2713static u64 *cs_etm__create_meta_blk(u64 *buff_in, int *buff_in_offset,
2714 int out_blk_size, int nr_params_v0)
2715{
2716 u64 *metadata = NULL;
2717 int hdr_version;
2718 int nr_in_params, nr_out_params, nr_cmn_params;
2719 int i, k;
2720
2721 metadata = zalloc(sizeof(*metadata) * out_blk_size);
2722 if (!metadata)
2723 return NULL;
2724
2725 /* read block current index & version */
2726 i = *buff_in_offset;
2727 hdr_version = buff_in[CS_HEADER_VERSION];
2728
2729 if (!hdr_version) {
2730 /* read version 0 info block into a version 1 metadata block */
2731 nr_in_params = nr_params_v0;
2732 metadata[CS_ETM_MAGIC] = buff_in[i + CS_ETM_MAGIC];
2733 metadata[CS_ETM_CPU] = buff_in[i + CS_ETM_CPU];
2734 metadata[CS_ETM_NR_TRC_PARAMS] = nr_in_params;
2735 /* remaining block params at offset +1 from source */
2736 for (k = CS_ETM_COMMON_BLK_MAX_V1 - 1; k < nr_in_params; k++)
2737 metadata[k + 1] = buff_in[i + k];
2738 /* version 0 has 2 common params */
2739 nr_cmn_params = 2;
2740 } else {
2741 /* read version 1 info block - input and output nr_params may differ */
2742 /* version 1 has 3 common params */
2743 nr_cmn_params = 3;
2744 nr_in_params = buff_in[i + CS_ETM_NR_TRC_PARAMS];
2745
2746 /* if input has more params than output - skip excess */
2747 nr_out_params = nr_in_params + nr_cmn_params;
2748 if (nr_out_params > out_blk_size)
2749 nr_out_params = out_blk_size;
2750
2751 for (k = CS_ETM_MAGIC; k < nr_out_params; k++)
2752 metadata[k] = buff_in[i + k];
2753
2754 /* record the actual nr params we copied */
2755 metadata[CS_ETM_NR_TRC_PARAMS] = nr_out_params - nr_cmn_params;
2756 }
2757
2758 /* adjust in offset by number of in params used */
2759 i += nr_in_params + nr_cmn_params;
2760 *buff_in_offset = i;
2761 return metadata;
2762}
2763
83d1fc92
JC
2764/**
2765 * Puts a fragment of an auxtrace buffer into the auxtrace queues based
2766 * on the bounds of aux_event, if it matches with the buffer that's at
2767 * file_offset.
2768 *
2769 * Normally, whole auxtrace buffers would be added to the queue. But we
2770 * want to reset the decoder for every PERF_RECORD_AUX event, and the decoder
2771 * is reset across each buffer, so splitting the buffers up in advance has
2772 * the same effect.
2773 */
2774static int cs_etm__queue_aux_fragment(struct perf_session *session, off_t file_offset, size_t sz,
2775 struct perf_record_aux *aux_event, struct perf_sample *sample)
2776{
2777 int err;
2778 char buf[PERF_SAMPLE_MAX_SIZE];
2779 union perf_event *auxtrace_event_union;
2780 struct perf_record_auxtrace *auxtrace_event;
2781 union perf_event auxtrace_fragment;
2782 __u64 aux_offset, aux_size;
ca50db59 2783 __u32 idx;
9182f04a 2784 bool formatted;
83d1fc92
JC
2785
2786 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
2787 struct cs_etm_auxtrace,
2788 auxtrace);
2789
2790 /*
2791 * There should be a PERF_RECORD_AUXTRACE event at the file_offset that we got
2792 * from looping through the auxtrace index.
2793 */
2794 err = perf_session__peek_event(session, file_offset, buf,
2795 PERF_SAMPLE_MAX_SIZE, &auxtrace_event_union, NULL);
2796 if (err)
2797 return err;
2798 auxtrace_event = &auxtrace_event_union->auxtrace;
2799 if (auxtrace_event->header.type != PERF_RECORD_AUXTRACE)
2800 return -EINVAL;
2801
2802 if (auxtrace_event->header.size < sizeof(struct perf_record_auxtrace) ||
2803 auxtrace_event->header.size != sz) {
2804 return -EINVAL;
2805 }
2806
2807 /*
b6521ea2
ML
2808 * In per-thread mode, auxtrace CPU is set to -1, but TID will be set instead. See
2809 * auxtrace_mmap_params__set_idx(). However, the sample AUX event will contain a
2810 * CPU as we set this always for the AUX_OUTPUT_HW_ID event.
2811 * So now compare only TIDs if auxtrace CPU is -1, and CPUs if auxtrace CPU is not -1.
2812 * Return 'not found' if mismatch.
83d1fc92 2813 */
b6521ea2
ML
2814 if (auxtrace_event->cpu == (__u32) -1) {
2815 if (auxtrace_event->tid != sample->tid)
2816 return 1;
2817 } else if (auxtrace_event->cpu != sample->cpu)
83d1fc92
JC
2818 return 1;
2819
2820 if (aux_event->flags & PERF_AUX_FLAG_OVERWRITE) {
2821 /*
2822 * Clamp size in snapshot mode. The buffer size is clamped in
2823 * __auxtrace_mmap__read() for snapshots, so the aux record size doesn't reflect
2824 * the buffer size.
2825 */
2826 aux_size = min(aux_event->aux_size, auxtrace_event->size);
2827
2828 /*
2829 * In this mode, the head also points to the end of the buffer so aux_offset
2830 * needs to have the size subtracted so it points to the beginning as in normal mode
2831 */
2832 aux_offset = aux_event->aux_offset - aux_size;
2833 } else {
2834 aux_size = aux_event->aux_size;
2835 aux_offset = aux_event->aux_offset;
2836 }
2837
2838 if (aux_offset >= auxtrace_event->offset &&
2839 aux_offset + aux_size <= auxtrace_event->offset + auxtrace_event->size) {
2840 /*
2841 * If this AUX event was inside this buffer somewhere, create a new auxtrace event
2842 * based on the sizes of the aux event, and queue that fragment.
2843 */
2844 auxtrace_fragment.auxtrace = *auxtrace_event;
2845 auxtrace_fragment.auxtrace.size = aux_size;
2846 auxtrace_fragment.auxtrace.offset = aux_offset;
2847 file_offset += aux_offset - auxtrace_event->offset + auxtrace_event->header.size;
2848
2849 pr_debug3("CS ETM: Queue buffer size: %#"PRI_lx64" offset: %#"PRI_lx64
2850 " tid: %d cpu: %d\n", aux_size, aux_offset, sample->tid, sample->cpu);
ca50db59
JC
2851 err = auxtrace_queues__add_event(&etm->queues, session, &auxtrace_fragment,
2852 file_offset, NULL);
2853 if (err)
2854 return err;
2855
2856 idx = auxtrace_event->idx;
9182f04a
JC
2857 formatted = !(aux_event->flags & PERF_AUX_FLAG_CORESIGHT_FORMAT_RAW);
2858 return cs_etm__setup_queue(etm, &etm->queues.queue_array[idx],
2859 idx, formatted);
83d1fc92
JC
2860 }
2861
2862 /* Wasn't inside this buffer, but there were no parse errors. 1 == 'not found' */
2863 return 1;
2864}
2865
b6521ea2
ML
2866static int cs_etm__process_aux_hw_id_cb(struct perf_session *session, union perf_event *event,
2867 u64 offset __maybe_unused, void *data __maybe_unused)
2868{
2869 /* look to handle PERF_RECORD_AUX_OUTPUT_HW_ID early to ensure decoders can be set up */
2870 if (event->header.type == PERF_RECORD_AUX_OUTPUT_HW_ID) {
2871 (*(int *)data)++; /* increment found count */
2872 return cs_etm__process_aux_output_hw_id(session, event);
2873 }
2874 return 0;
2875}
2876
83d1fc92
JC
2877static int cs_etm__queue_aux_records_cb(struct perf_session *session, union perf_event *event,
2878 u64 offset __maybe_unused, void *data __maybe_unused)
2879{
2880 struct perf_sample sample;
2881 int ret;
2882 struct auxtrace_index_entry *ent;
2883 struct auxtrace_index *auxtrace_index;
2884 struct evsel *evsel;
2885 size_t i;
2886
2887 /* Don't care about any other events, we're only queuing buffers for AUX events */
2888 if (event->header.type != PERF_RECORD_AUX)
2889 return 0;
2890
2891 if (event->header.size < sizeof(struct perf_record_aux))
2892 return -EINVAL;
2893
2894 /* Truncated Aux records can have 0 size and shouldn't result in anything being queued. */
2895 if (!event->aux.aux_size)
2896 return 0;
2897
2898 /*
2899 * Parse the sample, we need the sample_id_all data that comes after the event so that the
2900 * CPU or PID can be matched to an AUXTRACE buffer's CPU or PID.
2901 */
2902 evsel = evlist__event2evsel(session->evlist, event);
2903 if (!evsel)
2904 return -EINVAL;
2905 ret = evsel__parse_sample(evsel, event, &sample);
2906 if (ret)
2907 return ret;
2908
2909 /*
2910 * Loop through the auxtrace index to find the buffer that matches up with this aux event.
2911 */
2912 list_for_each_entry(auxtrace_index, &session->auxtrace_index, list) {
2913 for (i = 0; i < auxtrace_index->nr; i++) {
2914 ent = &auxtrace_index->entries[i];
2915 ret = cs_etm__queue_aux_fragment(session, ent->file_offset,
2916 ent->sz, &event->aux, &sample);
2917 /*
2918 * Stop search on error or successful values. Continue search on
2919 * 1 ('not found')
2920 */
2921 if (ret != 1)
2922 return ret;
2923 }
2924 }
2925
2926 /*
2927 * Couldn't find the buffer corresponding to this aux record, something went wrong. Warn but
2928 * don't exit with an error because it will still be possible to decode other aux records.
2929 */
2930 pr_err("CS ETM: Couldn't find auxtrace buffer for aux_offset: %#"PRI_lx64
2931 " tid: %d cpu: %d\n", event->aux.aux_offset, sample.tid, sample.cpu);
2932 return 0;
2933}
2934
2935static int cs_etm__queue_aux_records(struct perf_session *session)
2936{
2937 struct auxtrace_index *index = list_first_entry_or_null(&session->auxtrace_index,
2938 struct auxtrace_index, list);
2939 if (index && index->nr > 0)
2940 return perf_session__peek_events(session, session->header.data_offset,
2941 session->header.data_size,
2942 cs_etm__queue_aux_records_cb, NULL);
2943
2944 /*
2945 * We would get here if there are no entries in the index (either no auxtrace
2946 * buffers or no index at all). Fail silently as there is the possibility of
2947 * queueing them in cs_etm__process_auxtrace_event() if etm->data_queued is still
2948 * false.
2949 *
2950 * In that scenario, buffers will not be split by AUX records.
2951 */
2952 return 0;
2953}
2954
a7fe9a44
GG
2955#define HAS_PARAM(j, type, param) (metadata[(j)][CS_ETM_NR_TRC_PARAMS] <= \
2956 (CS_##type##_##param - CS_ETM_COMMON_BLK_MAX_V1))
2957
2958/*
2959 * Loop through the ETMs and complain if we find at least one where ts_source != 1 (virtual
2960 * timestamps).
2961 */
2962static bool cs_etm__has_virtual_ts(u64 **metadata, int num_cpu)
2963{
2964 int j;
2965
2966 for (j = 0; j < num_cpu; j++) {
2967 switch (metadata[j][CS_ETM_MAGIC]) {
2968 case __perf_cs_etmv4_magic:
2969 if (HAS_PARAM(j, ETMV4, TS_SOURCE) || metadata[j][CS_ETMV4_TS_SOURCE] != 1)
2970 return false;
2971 break;
2972 case __perf_cs_ete_magic:
2973 if (HAS_PARAM(j, ETE, TS_SOURCE) || metadata[j][CS_ETE_TS_SOURCE] != 1)
2974 return false;
2975 break;
2976 default:
2977 /* Unknown / unsupported magic number. */
2978 return false;
2979 }
2980 }
2981 return true;
2982}
2983
09277295
ML
2984/* map trace ids to correct metadata block, from information in metadata */
2985static int cs_etm__map_trace_ids_metadata(int num_cpu, u64 **metadata)
2986{
2987 u64 cs_etm_magic;
2988 u8 trace_chan_id;
2989 int i, err;
2990
2991 for (i = 0; i < num_cpu; i++) {
2992 cs_etm_magic = metadata[i][CS_ETM_MAGIC];
2993 switch (cs_etm_magic) {
2994 case __perf_cs_etmv3_magic:
b6521ea2
ML
2995 metadata[i][CS_ETM_ETMTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
2996 trace_chan_id = (u8)(metadata[i][CS_ETM_ETMTRACEIDR]);
09277295
ML
2997 break;
2998 case __perf_cs_etmv4_magic:
2999 case __perf_cs_ete_magic:
b6521ea2
ML
3000 metadata[i][CS_ETMV4_TRCTRACEIDR] &= CORESIGHT_TRACE_ID_VAL_MASK;
3001 trace_chan_id = (u8)(metadata[i][CS_ETMV4_TRCTRACEIDR]);
09277295
ML
3002 break;
3003 default:
3004 /* unknown magic number */
3005 return -EINVAL;
3006 }
3007 err = cs_etm__map_trace_id(trace_chan_id, metadata[i]);
3008 if (err)
3009 return err;
3010 }
3011 return 0;
3012}
3013
b6521ea2
ML
3014/*
3015 * If we found AUX_HW_ID packets, then set any metadata marked as unused to the
3016 * unused value to reduce the number of unneeded decoders created.
3017 */
3018static int cs_etm__clear_unused_trace_ids_metadata(int num_cpu, u64 **metadata)
3019{
3020 u64 cs_etm_magic;
3021 int i;
3022
3023 for (i = 0; i < num_cpu; i++) {
3024 cs_etm_magic = metadata[i][CS_ETM_MAGIC];
3025 switch (cs_etm_magic) {
3026 case __perf_cs_etmv3_magic:
3027 if (metadata[i][CS_ETM_ETMTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
3028 metadata[i][CS_ETM_ETMTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
3029 break;
3030 case __perf_cs_etmv4_magic:
3031 case __perf_cs_ete_magic:
3032 if (metadata[i][CS_ETMV4_TRCTRACEIDR] & CORESIGHT_TRACE_ID_UNUSED_FLAG)
3033 metadata[i][CS_ETMV4_TRCTRACEIDR] = CORESIGHT_TRACE_ID_UNUSED_VAL;
3034 break;
3035 default:
3036 /* unknown magic number */
3037 return -EINVAL;
3038 }
3039 }
3040 return 0;
3041}
3042
55c1de99
JC
3043int cs_etm__process_auxtrace_info_full(union perf_event *event,
3044 struct perf_session *session)
440a23b3 3045{
72932371 3046 struct perf_record_auxtrace_info *auxtrace_info = &event->auxtrace_info;
440a23b3 3047 struct cs_etm_auxtrace *etm = NULL;
a7fe9a44 3048 struct perf_record_time_conv *tc = &session->time_conv;
440a23b3 3049 int event_header_size = sizeof(struct perf_event_header);
440a23b3 3050 int total_size = auxtrace_info->header.size;
cd8bfd8c 3051 int priv_size = 0;
09277295 3052 int num_cpu;
42b2b570 3053 int err = 0;
b6521ea2 3054 int aux_hw_id_found;
42b2b570 3055 int i, j;
fd63091f 3056 u64 *ptr = NULL;
cd8bfd8c 3057 u64 **metadata = NULL;
b00204f5 3058
cd8bfd8c 3059 /*
95c6fe97
LY
3060 * Create an RB tree for traceID-metadata tuple. Since the conversion
3061 * has to be made for each packet that gets decoded, optimizing access
3062 * in anything other than a sequential array is worth doing.
cd8bfd8c
TJ
3063 */
3064 traceid_list = intlist__new(NULL);
fd63091f
JC
3065 if (!traceid_list)
3066 return -ENOMEM;
cd8bfd8c 3067
55c1de99
JC
3068 /* First the global part */
3069 ptr = (u64 *) auxtrace_info->priv;
3070 num_cpu = ptr[CS_PMU_TYPE_CPUS] & 0xffffffff;
cd8bfd8c
TJ
3071 metadata = zalloc(sizeof(*metadata) * num_cpu);
3072 if (!metadata) {
3073 err = -ENOMEM;
3074 goto err_free_traceid_list;
3075 }
3076
fd63091f
JC
3077 /* Start parsing after the common part of the header */
3078 i = CS_HEADER_VERSION_MAX;
3079
cd8bfd8c
TJ
3080 /*
3081 * The metadata is stored in the auxtrace_info section and encodes
3082 * the configuration of the ARM embedded trace macrocell which is
3083 * required by the trace decoder to properly decode the trace due
3084 * to its highly compressed nature.
3085 */
3086 for (j = 0; j < num_cpu; j++) {
3087 if (ptr[i] == __perf_cs_etmv3_magic) {
42b2b570
ML
3088 metadata[j] =
3089 cs_etm__create_meta_blk(ptr, &i,
3090 CS_ETM_PRIV_MAX,
3091 CS_ETM_NR_TRC_PARAMS_V0);
cd8bfd8c 3092 } else if (ptr[i] == __perf_cs_etmv4_magic) {
42b2b570
ML
3093 metadata[j] =
3094 cs_etm__create_meta_blk(ptr, &i,
3095 CS_ETMV4_PRIV_MAX,
3096 CS_ETMV4_NR_TRC_PARAMS_V0);
51ba8811
JC
3097 } else if (ptr[i] == __perf_cs_ete_magic) {
3098 metadata[j] = cs_etm__create_meta_blk(ptr, &i, CS_ETE_PRIV_MAX, -1);
a80aea64
JC
3099 } else {
3100 ui__error("CS ETM Trace: Unrecognised magic number %#"PRIx64". File could be from a newer version of perf.\n",
3101 ptr[i]);
3102 err = -EINVAL;
3103 goto err_free_metadata;
42b2b570
ML
3104 }
3105
3106 if (!metadata[j]) {
3107 err = -ENOMEM;
3108 goto err_free_metadata;
cd8bfd8c 3109 }
cd8bfd8c
TJ
3110 }
3111
3112 /*
42b2b570 3113 * Each of CS_HEADER_VERSION_MAX, CS_ETM_PRIV_MAX and
cd8bfd8c
TJ
3114 * CS_ETMV4_PRIV_MAX mark how many double words are in the
3115 * global metadata, and each cpu's metadata respectively.
3116 * The following tests if the correct number of double words was
3117 * present in the auxtrace info section.
3118 */
55c1de99 3119 priv_size = total_size - event_header_size - INFO_HEADER_SIZE;
cd8bfd8c
TJ
3120 if (i * 8 != priv_size) {
3121 err = -EINVAL;
3122 goto err_free_metadata;
3123 }
3124
440a23b3
MP
3125 etm = zalloc(sizeof(*etm));
3126
cd8bfd8c 3127 if (!etm) {
440a23b3 3128 err = -ENOMEM;
cd8bfd8c
TJ
3129 goto err_free_metadata;
3130 }
440a23b3
MP
3131
3132 err = auxtrace_queues__init(&etm->queues);
3133 if (err)
3134 goto err_free_etm;
3135
cac31418
JC
3136 if (session->itrace_synth_opts->set) {
3137 etm->synth_opts = *session->itrace_synth_opts;
3138 } else {
3139 itrace_synth_opts__set_default(&etm->synth_opts,
3140 session->itrace_synth_opts->default_no_sample);
3141 etm->synth_opts.callchain = false;
3142 }
3143
440a23b3
MP
3144 etm->session = session;
3145 etm->machine = &session->machines.host;
3146
cd8bfd8c 3147 etm->num_cpu = num_cpu;
55c1de99 3148 etm->pmu_type = (unsigned int) ((ptr[CS_PMU_TYPE_CPUS] >> 32) & 0xffffffff);
fd63091f 3149 etm->snapshot_mode = (ptr[CS_ETM_SNAPSHOT] != 0);
cd8bfd8c 3150 etm->metadata = metadata;
440a23b3
MP
3151 etm->auxtrace_type = auxtrace_info->type;
3152 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
3153
a7fe9a44
GG
3154 /* Use virtual timestamps if all ETMs report ts_source = 1 */
3155 etm->has_virtual_ts = cs_etm__has_virtual_ts(metadata, num_cpu);
3156
3157 if (!etm->has_virtual_ts)
3158 ui__warning("Virtual timestamps are not enabled, or not supported by the traced system.\n"
3159 "The time field of the samples will not be set accurately.\n\n");
3160
440a23b3
MP
3161 etm->auxtrace.process_event = cs_etm__process_event;
3162 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
3163 etm->auxtrace.flush_events = cs_etm__flush_events;
3164 etm->auxtrace.free_events = cs_etm__free_events;
3165 etm->auxtrace.free = cs_etm__free;
a58ab57c 3166 etm->auxtrace.evsel_is_auxtrace = cs_etm__evsel_is_auxtrace;
440a23b3
MP
3167 session->auxtrace = &etm->auxtrace;
3168
46d53620 3169 etm->unknown_thread = thread__new(999999999, 999999999);
6285bd15
Y
3170 if (!etm->unknown_thread) {
3171 err = -ENOMEM;
46d53620 3172 goto err_free_queues;
6285bd15 3173 }
46d53620
LY
3174
3175 /*
3176 * Initialize list node so that at thread__zput() we can avoid
3177 * segmentation fault at list_del_init().
3178 */
3179 INIT_LIST_HEAD(&etm->unknown_thread->node);
3180
3181 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
3182 if (err)
3183 goto err_delete_thread;
3184
79b6bb73 3185 if (thread__init_maps(etm->unknown_thread, etm->machine)) {
6285bd15 3186 err = -ENOMEM;
46d53620 3187 goto err_delete_thread;
6285bd15 3188 }
46d53620 3189
a7fe9a44
GG
3190 etm->tc.time_shift = tc->time_shift;
3191 etm->tc.time_mult = tc->time_mult;
3192 etm->tc.time_zero = tc->time_zero;
3193 if (event_contains(*tc, time_cycles)) {
3194 etm->tc.time_cycles = tc->time_cycles;
3195 etm->tc.time_mask = tc->time_mask;
3196 etm->tc.cap_user_time_zero = tc->cap_user_time_zero;
3197 etm->tc.cap_user_time_short = tc->cap_user_time_short;
3198 }
b12235b1
MP
3199 err = cs_etm__synth_events(etm, session);
3200 if (err)
46d53620 3201 goto err_delete_thread;
b12235b1 3202
b6521ea2
ML
3203 /*
3204 * Map Trace ID values to CPU metadata.
3205 *
3206 * Trace metadata will always contain Trace ID values from the legacy algorithm. If the
3207 * files has been recorded by a "new" perf updated to handle AUX_HW_ID then the metadata
3208 * ID value will also have the CORESIGHT_TRACE_ID_UNUSED_FLAG set.
3209 *
3210 * The updated kernel drivers that use AUX_HW_ID to sent Trace IDs will attempt to use
3211 * the same IDs as the old algorithm as far as is possible, unless there are clashes
3212 * in which case a different value will be used. This means an older perf may still
3213 * be able to record and read files generate on a newer system.
3214 *
3215 * For a perf able to interpret AUX_HW_ID packets we first check for the presence of
3216 * those packets. If they are there then the values will be mapped and plugged into
3217 * the metadata. We then set any remaining metadata values with the used flag to a
3218 * value CORESIGHT_TRACE_ID_UNUSED_VAL - which indicates no decoder is required.
3219 *
3220 * If no AUX_HW_ID packets are present - which means a file recorded on an old kernel
3221 * then we map Trace ID values to CPU directly from the metadata - clearing any unused
3222 * flags if present.
3223 */
3224
3225 /* first scan for AUX_OUTPUT_HW_ID records to map trace ID values to CPU metadata */
3226 aux_hw_id_found = 0;
3227 err = perf_session__peek_events(session, session->header.data_offset,
3228 session->header.data_size,
3229 cs_etm__process_aux_hw_id_cb, &aux_hw_id_found);
3230 if (err)
3231 goto err_delete_thread;
3232
3233 /* if HW ID found then clear any unused metadata ID values */
3234 if (aux_hw_id_found)
3235 err = cs_etm__clear_unused_trace_ids_metadata(num_cpu, metadata);
3236 /* otherwise, this is a file with metadata values only, map from metadata */
3237 else
3238 err = cs_etm__map_trace_ids_metadata(num_cpu, metadata);
3239
09277295
ML
3240 if (err)
3241 goto err_delete_thread;
3242
83d1fc92 3243 err = cs_etm__queue_aux_records(session);
440a23b3 3244 if (err)
46d53620 3245 goto err_delete_thread;
440a23b3
MP
3246
3247 etm->data_queued = etm->queues.populated;
440a23b3
MP
3248 return 0;
3249
46d53620
LY
3250err_delete_thread:
3251 thread__zput(etm->unknown_thread);
440a23b3
MP
3252err_free_queues:
3253 auxtrace_queues__free(&etm->queues);
3254 session->auxtrace = NULL;
3255err_free_etm:
3256 zfree(&etm);
cd8bfd8c
TJ
3257err_free_metadata:
3258 /* No need to check @metadata[j], free(NULL) is supported */
3259 for (j = 0; j < num_cpu; j++)
d8f9da24 3260 zfree(&metadata[j]);
cd8bfd8c
TJ
3261 zfree(&metadata);
3262err_free_traceid_list:
3263 intlist__delete(traceid_list);
6285bd15 3264 return err;
440a23b3 3265}