perf cs-etm: Fix wrong return values in error path
[linux-2.6-block.git] / tools / perf / util / cs-etm.c
CommitLineData
8a9fd832 1// SPDX-License-Identifier: GPL-2.0
440a23b3 2/*
440a23b3
MP
3 * Copyright(C) 2015-2018 Linaro Limited.
4 *
5 * Author: Tor Jeremiassen <tor@ti.com>
6 * Author: Mathieu Poirier <mathieu.poirier@linaro.org>
7 */
8
9#include <linux/bitops.h>
10#include <linux/err.h>
11#include <linux/kernel.h>
12#include <linux/log2.h>
13#include <linux/types.h>
14
06220bf4 15#include <opencsd/ocsd_if_types.h>
440a23b3
MP
16#include <stdlib.h>
17
18#include "auxtrace.h"
19#include "color.h"
20#include "cs-etm.h"
68ffe390 21#include "cs-etm-decoder/cs-etm-decoder.h"
440a23b3
MP
22#include "debug.h"
23#include "evlist.h"
24#include "intlist.h"
25#include "machine.h"
26#include "map.h"
27#include "perf.h"
859dcf64 28#include "symbol.h"
440a23b3
MP
29#include "thread.h"
30#include "thread_map.h"
31#include "thread-stack.h"
32#include "util.h"
33
34#define MAX_TIMESTAMP (~0ULL)
35
36struct cs_etm_auxtrace {
37 struct auxtrace auxtrace;
38 struct auxtrace_queues queues;
39 struct auxtrace_heap heap;
40 struct itrace_synth_opts synth_opts;
41 struct perf_session *session;
42 struct machine *machine;
43 struct thread *unknown_thread;
44
45 u8 timeless_decoding;
46 u8 snapshot_mode;
47 u8 data_queued;
48 u8 sample_branches;
e573e978 49 u8 sample_instructions;
440a23b3
MP
50
51 int num_cpu;
52 u32 auxtrace_type;
53 u64 branches_sample_type;
54 u64 branches_id;
e573e978
RW
55 u64 instructions_sample_type;
56 u64 instructions_sample_period;
57 u64 instructions_id;
440a23b3
MP
58 u64 **metadata;
59 u64 kernel_start;
60 unsigned int pmu_type;
61};
62
63struct cs_etm_queue {
64 struct cs_etm_auxtrace *etm;
65 struct thread *thread;
66 struct cs_etm_decoder *decoder;
67 struct auxtrace_buffer *buffer;
440a23b3
MP
68 union perf_event *event_buf;
69 unsigned int queue_nr;
70 pid_t pid, tid;
71 int cpu;
440a23b3 72 u64 offset;
e573e978
RW
73 u64 period_instructions;
74 struct branch_stack *last_branch;
75 struct branch_stack *last_branch_rb;
76 size_t last_branch_pos;
77 struct cs_etm_packet *prev_packet;
78 struct cs_etm_packet *packet;
440a23b3
MP
79};
80
9f878b29
MP
81static int cs_etm__update_queues(struct cs_etm_auxtrace *etm);
82static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
fc7ac413 83 pid_t tid);
9f878b29 84
15a5cd19
MP
85/* PTMs ETMIDR [11:8] set to b0011 */
86#define ETMIDR_PTM_VERSION 0x00000300
87
88static u32 cs_etm__get_v7_protocol_version(u32 etmidr)
89{
90 etmidr &= ETMIDR_PTM_VERSION;
91
92 if (etmidr == ETMIDR_PTM_VERSION)
93 return CS_ETM_PROTO_PTM;
94
95 return CS_ETM_PROTO_ETMV3;
96}
97
96dce7f4
LY
98static int cs_etm__get_magic(u8 trace_chan_id, u64 *magic)
99{
100 struct int_node *inode;
101 u64 *metadata;
102
103 inode = intlist__find(traceid_list, trace_chan_id);
104 if (!inode)
105 return -EINVAL;
106
107 metadata = inode->priv;
108 *magic = metadata[CS_ETM_MAGIC];
109 return 0;
110}
111
95c6fe97
LY
112int cs_etm__get_cpu(u8 trace_chan_id, int *cpu)
113{
114 struct int_node *inode;
115 u64 *metadata;
116
117 inode = intlist__find(traceid_list, trace_chan_id);
118 if (!inode)
119 return -EINVAL;
120
121 metadata = inode->priv;
122 *cpu = (int)metadata[CS_ETM_CPU];
123 return 0;
124}
125
68ffe390
MP
126static void cs_etm__packet_dump(const char *pkt_string)
127{
128 const char *color = PERF_COLOR_BLUE;
129 int len = strlen(pkt_string);
130
131 if (len && (pkt_string[len-1] == '\n'))
132 color_fprintf(stdout, color, " %s", pkt_string);
133 else
134 color_fprintf(stdout, color, " %s\n", pkt_string);
135
136 fflush(stdout);
137}
138
139static void cs_etm__dump_event(struct cs_etm_auxtrace *etm,
140 struct auxtrace_buffer *buffer)
141{
142 int i, ret;
143 const char *color = PERF_COLOR_BLUE;
144 struct cs_etm_decoder_params d_params;
145 struct cs_etm_trace_params *t_params;
146 struct cs_etm_decoder *decoder;
147 size_t buffer_used = 0;
148
149 fprintf(stdout, "\n");
150 color_fprintf(stdout, color,
151 ". ... CoreSight ETM Trace data: size %zu bytes\n",
152 buffer->size);
153
154 /* Use metadata to fill in trace parameters for trace decoder */
155 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
156 for (i = 0; i < etm->num_cpu; i++) {
78688342 157 if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
15a5cd19
MP
158 u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
159
160 t_params[i].protocol =
161 cs_etm__get_v7_protocol_version(etmidr);
78688342
MP
162 t_params[i].etmv3.reg_ctrl =
163 etm->metadata[i][CS_ETM_ETMCR];
164 t_params[i].etmv3.reg_trc_id =
165 etm->metadata[i][CS_ETM_ETMTRACEIDR];
166 } else if (etm->metadata[i][CS_ETM_MAGIC] ==
167 __perf_cs_etmv4_magic) {
168 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
169 t_params[i].etmv4.reg_idr0 =
170 etm->metadata[i][CS_ETMV4_TRCIDR0];
171 t_params[i].etmv4.reg_idr1 =
172 etm->metadata[i][CS_ETMV4_TRCIDR1];
173 t_params[i].etmv4.reg_idr2 =
174 etm->metadata[i][CS_ETMV4_TRCIDR2];
175 t_params[i].etmv4.reg_idr8 =
176 etm->metadata[i][CS_ETMV4_TRCIDR8];
177 t_params[i].etmv4.reg_configr =
68ffe390 178 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
78688342 179 t_params[i].etmv4.reg_traceidr =
68ffe390 180 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
78688342 181 }
68ffe390
MP
182 }
183
184 /* Set decoder parameters to simply print the trace packets */
185 d_params.packet_printer = cs_etm__packet_dump;
186 d_params.operation = CS_ETM_OPERATION_PRINT;
187 d_params.formatted = true;
188 d_params.fsyncs = false;
189 d_params.hsyncs = false;
190 d_params.frame_aligned = true;
191
192 decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
193
194 zfree(&t_params);
195
196 if (!decoder)
197 return;
198 do {
199 size_t consumed;
200
201 ret = cs_etm_decoder__process_data_block(
202 decoder, buffer->offset,
203 &((u8 *)buffer->data)[buffer_used],
204 buffer->size - buffer_used, &consumed);
205 if (ret)
206 break;
207
208 buffer_used += consumed;
209 } while (buffer_used < buffer->size);
210
211 cs_etm_decoder__free(decoder);
212}
213
440a23b3
MP
214static int cs_etm__flush_events(struct perf_session *session,
215 struct perf_tool *tool)
216{
9f878b29
MP
217 int ret;
218 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
219 struct cs_etm_auxtrace,
220 auxtrace);
221 if (dump_trace)
222 return 0;
223
224 if (!tool->ordered_events)
225 return -EINVAL;
226
227 if (!etm->timeless_decoding)
228 return -EINVAL;
229
230 ret = cs_etm__update_queues(etm);
231
232 if (ret < 0)
233 return ret;
234
fc7ac413 235 return cs_etm__process_timeless_queues(etm, -1);
440a23b3
MP
236}
237
238static void cs_etm__free_queue(void *priv)
239{
240 struct cs_etm_queue *etmq = priv;
241
099c1130
MP
242 if (!etmq)
243 return;
244
245 thread__zput(etmq->thread);
246 cs_etm_decoder__free(etmq->decoder);
247 zfree(&etmq->event_buf);
e573e978
RW
248 zfree(&etmq->last_branch);
249 zfree(&etmq->last_branch_rb);
250 zfree(&etmq->prev_packet);
251 zfree(&etmq->packet);
440a23b3
MP
252 free(etmq);
253}
254
255static void cs_etm__free_events(struct perf_session *session)
256{
257 unsigned int i;
258 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
259 struct cs_etm_auxtrace,
260 auxtrace);
261 struct auxtrace_queues *queues = &aux->queues;
262
263 for (i = 0; i < queues->nr_queues; i++) {
264 cs_etm__free_queue(queues->queue_array[i].priv);
265 queues->queue_array[i].priv = NULL;
266 }
267
268 auxtrace_queues__free(queues);
269}
270
271static void cs_etm__free(struct perf_session *session)
272{
cd8bfd8c
TJ
273 int i;
274 struct int_node *inode, *tmp;
440a23b3
MP
275 struct cs_etm_auxtrace *aux = container_of(session->auxtrace,
276 struct cs_etm_auxtrace,
277 auxtrace);
278 cs_etm__free_events(session);
279 session->auxtrace = NULL;
280
95c6fe97 281 /* First remove all traceID/metadata nodes for the RB tree */
cd8bfd8c
TJ
282 intlist__for_each_entry_safe(inode, tmp, traceid_list)
283 intlist__remove(traceid_list, inode);
284 /* Then the RB tree itself */
285 intlist__delete(traceid_list);
286
287 for (i = 0; i < aux->num_cpu; i++)
288 zfree(&aux->metadata[i]);
289
46d53620 290 thread__zput(aux->unknown_thread);
cd8bfd8c 291 zfree(&aux->metadata);
440a23b3
MP
292 zfree(&aux);
293}
294
d6c9c05f
LY
295static u8 cs_etm__cpu_mode(struct cs_etm_queue *etmq, u64 address)
296{
297 struct machine *machine;
298
299 machine = etmq->etm->machine;
300
301 if (address >= etmq->etm->kernel_start) {
302 if (machine__is_host(machine))
303 return PERF_RECORD_MISC_KERNEL;
304 else
305 return PERF_RECORD_MISC_GUEST_KERNEL;
306 } else {
307 if (machine__is_host(machine))
308 return PERF_RECORD_MISC_USER;
309 else if (perf_guest)
310 return PERF_RECORD_MISC_GUEST_USER;
311 else
312 return PERF_RECORD_MISC_HYPERVISOR;
313 }
314}
315
20d9c478
MP
316static u32 cs_etm__mem_access(struct cs_etm_queue *etmq, u64 address,
317 size_t size, u8 *buffer)
318{
319 u8 cpumode;
320 u64 offset;
321 int len;
322 struct thread *thread;
323 struct machine *machine;
324 struct addr_location al;
325
326 if (!etmq)
d3267ad4 327 return 0;
20d9c478
MP
328
329 machine = etmq->etm->machine;
d6c9c05f 330 cpumode = cs_etm__cpu_mode(etmq, address);
20d9c478
MP
331
332 thread = etmq->thread;
333 if (!thread) {
334 if (cpumode != PERF_RECORD_MISC_KERNEL)
d3267ad4 335 return 0;
20d9c478
MP
336 thread = etmq->etm->unknown_thread;
337 }
338
71a84b5a 339 if (!thread__find_map(thread, cpumode, address, &al) || !al.map->dso)
20d9c478
MP
340 return 0;
341
342 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
343 dso__data_status_seen(al.map->dso, DSO_DATA_STATUS_SEEN_ITRACE))
344 return 0;
345
346 offset = al.map->map_ip(al.map, address);
347
348 map__load(al.map);
349
350 len = dso__data_read_offset(al.map->dso, machine, offset, buffer, size);
351
352 if (len <= 0)
353 return 0;
354
355 return len;
356}
357
358static struct cs_etm_queue *cs_etm__alloc_queue(struct cs_etm_auxtrace *etm,
359 unsigned int queue_nr)
360{
361 int i;
362 struct cs_etm_decoder_params d_params;
363 struct cs_etm_trace_params *t_params;
364 struct cs_etm_queue *etmq;
e573e978 365 size_t szp = sizeof(struct cs_etm_packet);
20d9c478
MP
366
367 etmq = zalloc(sizeof(*etmq));
368 if (!etmq)
369 return NULL;
370
e573e978
RW
371 etmq->packet = zalloc(szp);
372 if (!etmq->packet)
373 goto out_free;
374
375 if (etm->synth_opts.last_branch || etm->sample_branches) {
376 etmq->prev_packet = zalloc(szp);
377 if (!etmq->prev_packet)
378 goto out_free;
379 }
380
381 if (etm->synth_opts.last_branch) {
382 size_t sz = sizeof(struct branch_stack);
383
384 sz += etm->synth_opts.last_branch_sz *
385 sizeof(struct branch_entry);
386 etmq->last_branch = zalloc(sz);
387 if (!etmq->last_branch)
388 goto out_free;
389 etmq->last_branch_rb = zalloc(sz);
390 if (!etmq->last_branch_rb)
391 goto out_free;
392 }
393
20d9c478
MP
394 etmq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
395 if (!etmq->event_buf)
396 goto out_free;
397
398 etmq->etm = etm;
399 etmq->queue_nr = queue_nr;
400 etmq->pid = -1;
401 etmq->tid = -1;
402 etmq->cpu = -1;
403
404 /* Use metadata to fill in trace parameters for trace decoder */
405 t_params = zalloc(sizeof(*t_params) * etm->num_cpu);
406
407 if (!t_params)
408 goto out_free;
409
410 for (i = 0; i < etm->num_cpu; i++) {
78688342 411 if (etm->metadata[i][CS_ETM_MAGIC] == __perf_cs_etmv3_magic) {
15a5cd19
MP
412 u32 etmidr = etm->metadata[i][CS_ETM_ETMIDR];
413
414 t_params[i].protocol =
415 cs_etm__get_v7_protocol_version(etmidr);
78688342
MP
416 t_params[i].etmv3.reg_ctrl =
417 etm->metadata[i][CS_ETM_ETMCR];
418 t_params[i].etmv3.reg_trc_id =
419 etm->metadata[i][CS_ETM_ETMTRACEIDR];
420 } else if (etm->metadata[i][CS_ETM_MAGIC] ==
421 __perf_cs_etmv4_magic) {
422 t_params[i].protocol = CS_ETM_PROTO_ETMV4i;
423 t_params[i].etmv4.reg_idr0 =
424 etm->metadata[i][CS_ETMV4_TRCIDR0];
425 t_params[i].etmv4.reg_idr1 =
426 etm->metadata[i][CS_ETMV4_TRCIDR1];
427 t_params[i].etmv4.reg_idr2 =
428 etm->metadata[i][CS_ETMV4_TRCIDR2];
429 t_params[i].etmv4.reg_idr8 =
430 etm->metadata[i][CS_ETMV4_TRCIDR8];
431 t_params[i].etmv4.reg_configr =
20d9c478 432 etm->metadata[i][CS_ETMV4_TRCCONFIGR];
78688342 433 t_params[i].etmv4.reg_traceidr =
20d9c478 434 etm->metadata[i][CS_ETMV4_TRCTRACEIDR];
78688342 435 }
20d9c478
MP
436 }
437
438 /* Set decoder parameters to simply print the trace packets */
439 d_params.packet_printer = cs_etm__packet_dump;
440 d_params.operation = CS_ETM_OPERATION_DECODE;
441 d_params.formatted = true;
442 d_params.fsyncs = false;
443 d_params.hsyncs = false;
444 d_params.frame_aligned = true;
445 d_params.data = etmq;
446
447 etmq->decoder = cs_etm_decoder__new(etm->num_cpu, &d_params, t_params);
448
449 zfree(&t_params);
450
451 if (!etmq->decoder)
452 goto out_free;
453
454 /*
455 * Register a function to handle all memory accesses required by
456 * the trace decoder library.
457 */
458 if (cs_etm_decoder__add_mem_access_cb(etmq->decoder,
459 0x0L, ((u64) -1L),
460 cs_etm__mem_access))
461 goto out_free_decoder;
462
463 etmq->offset = 0;
e573e978 464 etmq->period_instructions = 0;
20d9c478
MP
465
466 return etmq;
467
468out_free_decoder:
469 cs_etm_decoder__free(etmq->decoder);
470out_free:
471 zfree(&etmq->event_buf);
e573e978
RW
472 zfree(&etmq->last_branch);
473 zfree(&etmq->last_branch_rb);
474 zfree(&etmq->prev_packet);
475 zfree(&etmq->packet);
20d9c478
MP
476 free(etmq);
477
478 return NULL;
479}
480
481static int cs_etm__setup_queue(struct cs_etm_auxtrace *etm,
482 struct auxtrace_queue *queue,
483 unsigned int queue_nr)
484{
485 struct cs_etm_queue *etmq = queue->priv;
486
487 if (list_empty(&queue->head) || etmq)
488 return 0;
489
490 etmq = cs_etm__alloc_queue(etm, queue_nr);
491
492 if (!etmq)
493 return -ENOMEM;
494
495 queue->priv = etmq;
496
497 if (queue->cpu != -1)
498 etmq->cpu = queue->cpu;
499
500 etmq->tid = queue->tid;
501
502 return 0;
503}
504
505static int cs_etm__setup_queues(struct cs_etm_auxtrace *etm)
506{
507 unsigned int i;
508 int ret;
509
510 for (i = 0; i < etm->queues.nr_queues; i++) {
511 ret = cs_etm__setup_queue(etm, &etm->queues.queue_array[i], i);
512 if (ret)
513 return ret;
514 }
515
516 return 0;
517}
518
519static int cs_etm__update_queues(struct cs_etm_auxtrace *etm)
520{
521 if (etm->queues.new_data) {
522 etm->queues.new_data = false;
523 return cs_etm__setup_queues(etm);
524 }
525
526 return 0;
527}
528
e573e978
RW
529static inline void cs_etm__copy_last_branch_rb(struct cs_etm_queue *etmq)
530{
531 struct branch_stack *bs_src = etmq->last_branch_rb;
532 struct branch_stack *bs_dst = etmq->last_branch;
533 size_t nr = 0;
534
535 /*
536 * Set the number of records before early exit: ->nr is used to
537 * determine how many branches to copy from ->entries.
538 */
539 bs_dst->nr = bs_src->nr;
540
541 /*
542 * Early exit when there is nothing to copy.
543 */
544 if (!bs_src->nr)
545 return;
546
547 /*
548 * As bs_src->entries is a circular buffer, we need to copy from it in
549 * two steps. First, copy the branches from the most recently inserted
550 * branch ->last_branch_pos until the end of bs_src->entries buffer.
551 */
552 nr = etmq->etm->synth_opts.last_branch_sz - etmq->last_branch_pos;
553 memcpy(&bs_dst->entries[0],
554 &bs_src->entries[etmq->last_branch_pos],
555 sizeof(struct branch_entry) * nr);
556
557 /*
558 * If we wrapped around at least once, the branches from the beginning
559 * of the bs_src->entries buffer and until the ->last_branch_pos element
560 * are older valid branches: copy them over. The total number of
561 * branches copied over will be equal to the number of branches asked by
562 * the user in last_branch_sz.
563 */
564 if (bs_src->nr >= etmq->etm->synth_opts.last_branch_sz) {
565 memcpy(&bs_dst->entries[nr],
566 &bs_src->entries[0],
567 sizeof(struct branch_entry) * etmq->last_branch_pos);
568 }
569}
570
571static inline void cs_etm__reset_last_branch_rb(struct cs_etm_queue *etmq)
572{
573 etmq->last_branch_pos = 0;
574 etmq->last_branch_rb->nr = 0;
575}
576
a7ee4d62
RW
577static inline int cs_etm__t32_instr_size(struct cs_etm_queue *etmq,
578 u64 addr) {
579 u8 instrBytes[2];
6035b680 580
a7ee4d62 581 cs_etm__mem_access(etmq, addr, ARRAY_SIZE(instrBytes), instrBytes);
e573e978 582 /*
a7ee4d62
RW
583 * T32 instruction size is indicated by bits[15:11] of the first
584 * 16-bit word of the instruction: 0b11101, 0b11110 and 0b11111
585 * denote a 32-bit instruction.
e573e978 586 */
a7ee4d62 587 return ((instrBytes[1] & 0xF8) >= 0xE8) ? 4 : 2;
e573e978
RW
588}
589
6035b680
LY
590static inline u64 cs_etm__first_executed_instr(struct cs_etm_packet *packet)
591{
49ccf87b
LY
592 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
593 if (packet->sample_type == CS_ETM_DISCONTINUITY)
6035b680
LY
594 return 0;
595
596 return packet->start_addr;
597}
598
a7ee4d62
RW
599static inline
600u64 cs_etm__last_executed_instr(const struct cs_etm_packet *packet)
e573e978 601{
49ccf87b
LY
602 /* Returns 0 for the CS_ETM_DISCONTINUITY packet */
603 if (packet->sample_type == CS_ETM_DISCONTINUITY)
a7ee4d62
RW
604 return 0;
605
606 return packet->end_addr - packet->last_instr_size;
e573e978
RW
607}
608
a7ee4d62
RW
609static inline u64 cs_etm__instr_addr(struct cs_etm_queue *etmq,
610 const struct cs_etm_packet *packet,
e573e978
RW
611 u64 offset)
612{
a7ee4d62
RW
613 if (packet->isa == CS_ETM_ISA_T32) {
614 u64 addr = packet->start_addr;
615
616 while (offset > 0) {
617 addr += cs_etm__t32_instr_size(etmq, addr);
618 offset--;
619 }
620 return addr;
621 }
622
623 /* Assume a 4 byte instruction size (A32/A64) */
624 return packet->start_addr + offset * 4;
e573e978
RW
625}
626
627static void cs_etm__update_last_branch_rb(struct cs_etm_queue *etmq)
628{
629 struct branch_stack *bs = etmq->last_branch_rb;
630 struct branch_entry *be;
631
632 /*
633 * The branches are recorded in a circular buffer in reverse
634 * chronological order: we start recording from the last element of the
635 * buffer down. After writing the first element of the stack, move the
636 * insert position back to the end of the buffer.
637 */
638 if (!etmq->last_branch_pos)
639 etmq->last_branch_pos = etmq->etm->synth_opts.last_branch_sz;
640
641 etmq->last_branch_pos -= 1;
642
643 be = &bs->entries[etmq->last_branch_pos];
644 be->from = cs_etm__last_executed_instr(etmq->prev_packet);
6035b680 645 be->to = cs_etm__first_executed_instr(etmq->packet);
e573e978
RW
646 /* No support for mispredict */
647 be->flags.mispred = 0;
648 be->flags.predicted = 1;
649
650 /*
651 * Increment bs->nr until reaching the number of last branches asked by
652 * the user on the command line.
653 */
654 if (bs->nr < etmq->etm->synth_opts.last_branch_sz)
655 bs->nr += 1;
656}
657
658static int cs_etm__inject_event(union perf_event *event,
659 struct perf_sample *sample, u64 type)
660{
661 event->header.size = perf_event__sample_event_size(sample, type, 0);
662 return perf_event__synthesize_sample(event, type, 0, sample);
663}
664
665
9f878b29
MP
666static int
667cs_etm__get_trace(struct cs_etm_buffer *buff, struct cs_etm_queue *etmq)
668{
669 struct auxtrace_buffer *aux_buffer = etmq->buffer;
670 struct auxtrace_buffer *old_buffer = aux_buffer;
671 struct auxtrace_queue *queue;
672
673 queue = &etmq->etm->queues.queue_array[etmq->queue_nr];
674
675 aux_buffer = auxtrace_buffer__next(queue, aux_buffer);
676
677 /* If no more data, drop the previous auxtrace_buffer and return */
678 if (!aux_buffer) {
679 if (old_buffer)
680 auxtrace_buffer__drop_data(old_buffer);
681 buff->len = 0;
682 return 0;
683 }
684
685 etmq->buffer = aux_buffer;
686
687 /* If the aux_buffer doesn't have data associated, try to load it */
688 if (!aux_buffer->data) {
689 /* get the file desc associated with the perf data file */
690 int fd = perf_data__fd(etmq->etm->session->data);
691
692 aux_buffer->data = auxtrace_buffer__get_data(aux_buffer, fd);
693 if (!aux_buffer->data)
694 return -ENOMEM;
695 }
696
697 /* If valid, drop the previous buffer */
698 if (old_buffer)
699 auxtrace_buffer__drop_data(old_buffer);
700
701 buff->offset = aux_buffer->offset;
702 buff->len = aux_buffer->size;
703 buff->buf = aux_buffer->data;
704
705 buff->ref_timestamp = aux_buffer->reference;
706
707 return buff->len;
708}
709
3a088799
LY
710static void cs_etm__set_pid_tid_cpu(struct cs_etm_auxtrace *etm,
711 struct auxtrace_queue *queue)
9f878b29
MP
712{
713 struct cs_etm_queue *etmq = queue->priv;
714
715 /* CPU-wide tracing isn't supported yet */
716 if (queue->tid == -1)
717 return;
718
719 if ((!etmq->thread) && (etmq->tid != -1))
720 etmq->thread = machine__find_thread(etm->machine, -1,
721 etmq->tid);
722
723 if (etmq->thread) {
724 etmq->pid = etmq->thread->pid_;
725 if (queue->cpu == -1)
726 etmq->cpu = etmq->thread->cpu;
727 }
728}
729
e573e978
RW
730static int cs_etm__synth_instruction_sample(struct cs_etm_queue *etmq,
731 u64 addr, u64 period)
732{
733 int ret = 0;
734 struct cs_etm_auxtrace *etm = etmq->etm;
735 union perf_event *event = etmq->event_buf;
736 struct perf_sample sample = {.ip = 0,};
737
738 event->sample.header.type = PERF_RECORD_SAMPLE;
d6c9c05f 739 event->sample.header.misc = cs_etm__cpu_mode(etmq, addr);
e573e978
RW
740 event->sample.header.size = sizeof(struct perf_event_header);
741
742 sample.ip = addr;
743 sample.pid = etmq->pid;
744 sample.tid = etmq->tid;
745 sample.id = etmq->etm->instructions_id;
746 sample.stream_id = etmq->etm->instructions_id;
747 sample.period = period;
748 sample.cpu = etmq->packet->cpu;
06220bf4 749 sample.flags = etmq->prev_packet->flags;
e573e978 750 sample.insn_len = 1;
d6c9c05f 751 sample.cpumode = event->sample.header.misc;
e573e978
RW
752
753 if (etm->synth_opts.last_branch) {
754 cs_etm__copy_last_branch_rb(etmq);
755 sample.branch_stack = etmq->last_branch;
756 }
757
758 if (etm->synth_opts.inject) {
759 ret = cs_etm__inject_event(event, &sample,
760 etm->instructions_sample_type);
761 if (ret)
762 return ret;
763 }
764
765 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
766
767 if (ret)
768 pr_err(
769 "CS ETM Trace: failed to deliver instruction event, error %d\n",
770 ret);
771
772 if (etm->synth_opts.last_branch)
773 cs_etm__reset_last_branch_rb(etmq);
774
775 return ret;
776}
777
b12235b1
MP
778/*
779 * The cs etm packet encodes an instruction range between a branch target
780 * and the next taken branch. Generate sample accordingly.
781 */
e573e978 782static int cs_etm__synth_branch_sample(struct cs_etm_queue *etmq)
b12235b1
MP
783{
784 int ret = 0;
785 struct cs_etm_auxtrace *etm = etmq->etm;
786 struct perf_sample sample = {.ip = 0,};
787 union perf_event *event = etmq->event_buf;
e573e978
RW
788 struct dummy_branch_stack {
789 u64 nr;
790 struct branch_entry entries;
791 } dummy_bs;
d6c9c05f
LY
792 u64 ip;
793
794 ip = cs_etm__last_executed_instr(etmq->prev_packet);
b12235b1
MP
795
796 event->sample.header.type = PERF_RECORD_SAMPLE;
d6c9c05f 797 event->sample.header.misc = cs_etm__cpu_mode(etmq, ip);
b12235b1
MP
798 event->sample.header.size = sizeof(struct perf_event_header);
799
d6c9c05f 800 sample.ip = ip;
b12235b1
MP
801 sample.pid = etmq->pid;
802 sample.tid = etmq->tid;
6035b680 803 sample.addr = cs_etm__first_executed_instr(etmq->packet);
b12235b1
MP
804 sample.id = etmq->etm->branches_id;
805 sample.stream_id = etmq->etm->branches_id;
806 sample.period = 1;
e573e978 807 sample.cpu = etmq->packet->cpu;
06220bf4 808 sample.flags = etmq->prev_packet->flags;
d6c9c05f 809 sample.cpumode = event->sample.header.misc;
b12235b1 810
e573e978
RW
811 /*
812 * perf report cannot handle events without a branch stack
813 */
814 if (etm->synth_opts.last_branch) {
815 dummy_bs = (struct dummy_branch_stack){
816 .nr = 1,
817 .entries = {
818 .from = sample.ip,
819 .to = sample.addr,
820 },
821 };
822 sample.branch_stack = (struct branch_stack *)&dummy_bs;
823 }
824
825 if (etm->synth_opts.inject) {
826 ret = cs_etm__inject_event(event, &sample,
827 etm->branches_sample_type);
828 if (ret)
829 return ret;
830 }
831
b12235b1
MP
832 ret = perf_session__deliver_synth_event(etm->session, event, &sample);
833
834 if (ret)
835 pr_err(
836 "CS ETM Trace: failed to deliver instruction event, error %d\n",
837 ret);
838
839 return ret;
840}
841
842struct cs_etm_synth {
843 struct perf_tool dummy_tool;
844 struct perf_session *session;
845};
846
847static int cs_etm__event_synth(struct perf_tool *tool,
848 union perf_event *event,
849 struct perf_sample *sample __maybe_unused,
850 struct machine *machine __maybe_unused)
851{
852 struct cs_etm_synth *cs_etm_synth =
853 container_of(tool, struct cs_etm_synth, dummy_tool);
854
855 return perf_session__deliver_synth_event(cs_etm_synth->session,
856 event, NULL);
857}
858
859static int cs_etm__synth_event(struct perf_session *session,
860 struct perf_event_attr *attr, u64 id)
861{
862 struct cs_etm_synth cs_etm_synth;
863
864 memset(&cs_etm_synth, 0, sizeof(struct cs_etm_synth));
865 cs_etm_synth.session = session;
866
867 return perf_event__synthesize_attr(&cs_etm_synth.dummy_tool, attr, 1,
868 &id, cs_etm__event_synth);
869}
870
871static int cs_etm__synth_events(struct cs_etm_auxtrace *etm,
872 struct perf_session *session)
873{
874 struct perf_evlist *evlist = session->evlist;
875 struct perf_evsel *evsel;
876 struct perf_event_attr attr;
877 bool found = false;
878 u64 id;
879 int err;
880
881 evlist__for_each_entry(evlist, evsel) {
882 if (evsel->attr.type == etm->pmu_type) {
883 found = true;
884 break;
885 }
886 }
887
888 if (!found) {
889 pr_debug("No selected events with CoreSight Trace data\n");
890 return 0;
891 }
892
893 memset(&attr, 0, sizeof(struct perf_event_attr));
894 attr.size = sizeof(struct perf_event_attr);
895 attr.type = PERF_TYPE_HARDWARE;
896 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
897 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
898 PERF_SAMPLE_PERIOD;
899 if (etm->timeless_decoding)
900 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
901 else
902 attr.sample_type |= PERF_SAMPLE_TIME;
903
904 attr.exclude_user = evsel->attr.exclude_user;
905 attr.exclude_kernel = evsel->attr.exclude_kernel;
906 attr.exclude_hv = evsel->attr.exclude_hv;
907 attr.exclude_host = evsel->attr.exclude_host;
908 attr.exclude_guest = evsel->attr.exclude_guest;
909 attr.sample_id_all = evsel->attr.sample_id_all;
910 attr.read_format = evsel->attr.read_format;
911
912 /* create new id val to be a fixed offset from evsel id */
913 id = evsel->id[0] + 1000000000;
914
915 if (!id)
916 id = 1;
917
918 if (etm->synth_opts.branches) {
919 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
920 attr.sample_period = 1;
921 attr.sample_type |= PERF_SAMPLE_ADDR;
922 err = cs_etm__synth_event(session, &attr, id);
923 if (err)
924 return err;
925 etm->sample_branches = true;
926 etm->branches_sample_type = attr.sample_type;
927 etm->branches_id = id;
e573e978
RW
928 id += 1;
929 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
930 }
931
932 if (etm->synth_opts.last_branch)
933 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
934
935 if (etm->synth_opts.instructions) {
936 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
937 attr.sample_period = etm->synth_opts.period;
938 etm->instructions_sample_period = attr.sample_period;
939 err = cs_etm__synth_event(session, &attr, id);
940 if (err)
941 return err;
942 etm->sample_instructions = true;
943 etm->instructions_sample_type = attr.sample_type;
944 etm->instructions_id = id;
945 id += 1;
b12235b1
MP
946 }
947
948 return 0;
949}
950
951static int cs_etm__sample(struct cs_etm_queue *etmq)
952{
e573e978
RW
953 struct cs_etm_auxtrace *etm = etmq->etm;
954 struct cs_etm_packet *tmp;
b12235b1 955 int ret;
a7ee4d62 956 u64 instrs_executed = etmq->packet->instr_count;
b12235b1 957
e573e978
RW
958 etmq->period_instructions += instrs_executed;
959
960 /*
961 * Record a branch when the last instruction in
962 * PREV_PACKET is a branch.
963 */
964 if (etm->synth_opts.last_branch &&
965 etmq->prev_packet &&
256e751c 966 etmq->prev_packet->sample_type == CS_ETM_RANGE &&
e573e978
RW
967 etmq->prev_packet->last_instr_taken_branch)
968 cs_etm__update_last_branch_rb(etmq);
969
970 if (etm->sample_instructions &&
971 etmq->period_instructions >= etm->instructions_sample_period) {
972 /*
973 * Emit instruction sample periodically
974 * TODO: allow period to be defined in cycles and clock time
975 */
976
977 /* Get number of instructions executed after the sample point */
978 u64 instrs_over = etmq->period_instructions -
979 etm->instructions_sample_period;
980
981 /*
982 * Calculate the address of the sampled instruction (-1 as
983 * sample is reported as though instruction has just been
984 * executed, but PC has not advanced to next instruction)
985 */
986 u64 offset = (instrs_executed - instrs_over - 1);
a7ee4d62 987 u64 addr = cs_etm__instr_addr(etmq, etmq->packet, offset);
e573e978
RW
988
989 ret = cs_etm__synth_instruction_sample(
990 etmq, addr, etm->instructions_sample_period);
991 if (ret)
992 return ret;
993
994 /* Carry remaining instructions into next sample period */
995 etmq->period_instructions = instrs_over;
996 }
997
14a85b1e
LY
998 if (etm->sample_branches && etmq->prev_packet) {
999 bool generate_sample = false;
1000
1001 /* Generate sample for tracing on packet */
49ccf87b 1002 if (etmq->prev_packet->sample_type == CS_ETM_DISCONTINUITY)
14a85b1e
LY
1003 generate_sample = true;
1004
1005 /* Generate sample for branch taken packet */
1006 if (etmq->prev_packet->sample_type == CS_ETM_RANGE &&
1007 etmq->prev_packet->last_instr_taken_branch)
1008 generate_sample = true;
1009
1010 if (generate_sample) {
1011 ret = cs_etm__synth_branch_sample(etmq);
1012 if (ret)
1013 return ret;
1014 }
e573e978 1015 }
b12235b1 1016
e573e978 1017 if (etm->sample_branches || etm->synth_opts.last_branch) {
b12235b1 1018 /*
e573e978
RW
1019 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
1020 * the next incoming packet.
b12235b1 1021 */
e573e978
RW
1022 tmp = etmq->packet;
1023 etmq->packet = etmq->prev_packet;
1024 etmq->prev_packet = tmp;
b12235b1
MP
1025 }
1026
1027 return 0;
1028}
1029
7100b12c
LY
1030static int cs_etm__exception(struct cs_etm_queue *etmq)
1031{
1032 /*
1033 * When the exception packet is inserted, whether the last instruction
1034 * in previous range packet is taken branch or not, we need to force
1035 * to set 'prev_packet->last_instr_taken_branch' to true. This ensures
1036 * to generate branch sample for the instruction range before the
1037 * exception is trapped to kernel or before the exception returning.
1038 *
1039 * The exception packet includes the dummy address values, so don't
1040 * swap PACKET with PREV_PACKET. This keeps PREV_PACKET to be useful
1041 * for generating instruction and branch samples.
1042 */
1043 if (etmq->prev_packet->sample_type == CS_ETM_RANGE)
1044 etmq->prev_packet->last_instr_taken_branch = true;
1045
1046 return 0;
1047}
1048
256e751c
RW
1049static int cs_etm__flush(struct cs_etm_queue *etmq)
1050{
1051 int err = 0;
d603b4e9 1052 struct cs_etm_auxtrace *etm = etmq->etm;
256e751c
RW
1053 struct cs_etm_packet *tmp;
1054
3eb3e07b
LY
1055 if (!etmq->prev_packet)
1056 return 0;
1057
1058 /* Handle start tracing packet */
1059 if (etmq->prev_packet->sample_type == CS_ETM_EMPTY)
1060 goto swap_packet;
1061
256e751c 1062 if (etmq->etm->synth_opts.last_branch &&
256e751c
RW
1063 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1064 /*
1065 * Generate a last branch event for the branches left in the
1066 * circular buffer at the end of the trace.
1067 *
1068 * Use the address of the end of the last reported execution
1069 * range
1070 */
1071 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
1072
1073 err = cs_etm__synth_instruction_sample(
1074 etmq, addr,
1075 etmq->period_instructions);
6cd4ac6a
LY
1076 if (err)
1077 return err;
1078
256e751c
RW
1079 etmq->period_instructions = 0;
1080
3eb3e07b
LY
1081 }
1082
d603b4e9
LY
1083 if (etm->sample_branches &&
1084 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1085 err = cs_etm__synth_branch_sample(etmq);
1086 if (err)
1087 return err;
1088 }
1089
3eb3e07b 1090swap_packet:
43fd5666 1091 if (etm->sample_branches || etm->synth_opts.last_branch) {
256e751c
RW
1092 /*
1093 * Swap PACKET with PREV_PACKET: PACKET becomes PREV_PACKET for
1094 * the next incoming packet.
1095 */
1096 tmp = etmq->packet;
1097 etmq->packet = etmq->prev_packet;
1098 etmq->prev_packet = tmp;
1099 }
1100
1101 return err;
1102}
1103
24fff5eb
LY
1104static int cs_etm__end_block(struct cs_etm_queue *etmq)
1105{
1106 int err;
1107
1108 /*
1109 * It has no new packet coming and 'etmq->packet' contains the stale
1110 * packet which was set at the previous time with packets swapping;
1111 * so skip to generate branch sample to avoid stale packet.
1112 *
1113 * For this case only flush branch stack and generate a last branch
1114 * event for the branches left in the circular buffer at the end of
1115 * the trace.
1116 */
1117 if (etmq->etm->synth_opts.last_branch &&
1118 etmq->prev_packet->sample_type == CS_ETM_RANGE) {
1119 /*
1120 * Use the address of the end of the last reported execution
1121 * range.
1122 */
1123 u64 addr = cs_etm__last_executed_instr(etmq->prev_packet);
1124
1125 err = cs_etm__synth_instruction_sample(
1126 etmq, addr,
1127 etmq->period_instructions);
1128 if (err)
1129 return err;
1130
1131 etmq->period_instructions = 0;
1132 }
1133
1134 return 0;
1135}
1136
96dce7f4
LY
1137static bool cs_etm__is_svc_instr(struct cs_etm_queue *etmq,
1138 struct cs_etm_packet *packet,
1139 u64 end_addr)
1140{
1141 u16 instr16;
1142 u32 instr32;
1143 u64 addr;
1144
1145 switch (packet->isa) {
1146 case CS_ETM_ISA_T32:
1147 /*
1148 * The SVC of T32 is defined in ARM DDI 0487D.a, F5.1.247:
1149 *
1150 * b'15 b'8
1151 * +-----------------+--------+
1152 * | 1 1 0 1 1 1 1 1 | imm8 |
1153 * +-----------------+--------+
1154 *
1155 * According to the specifiction, it only defines SVC for T32
1156 * with 16 bits instruction and has no definition for 32bits;
1157 * so below only read 2 bytes as instruction size for T32.
1158 */
1159 addr = end_addr - 2;
1160 cs_etm__mem_access(etmq, addr, sizeof(instr16), (u8 *)&instr16);
1161 if ((instr16 & 0xFF00) == 0xDF00)
1162 return true;
1163
1164 break;
1165 case CS_ETM_ISA_A32:
1166 /*
1167 * The SVC of A32 is defined in ARM DDI 0487D.a, F5.1.247:
1168 *
1169 * b'31 b'28 b'27 b'24
1170 * +---------+---------+-------------------------+
1171 * | !1111 | 1 1 1 1 | imm24 |
1172 * +---------+---------+-------------------------+
1173 */
1174 addr = end_addr - 4;
1175 cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
1176 if ((instr32 & 0x0F000000) == 0x0F000000 &&
1177 (instr32 & 0xF0000000) != 0xF0000000)
1178 return true;
1179
1180 break;
1181 case CS_ETM_ISA_A64:
1182 /*
1183 * The SVC of A64 is defined in ARM DDI 0487D.a, C6.2.294:
1184 *
1185 * b'31 b'21 b'4 b'0
1186 * +-----------------------+---------+-----------+
1187 * | 1 1 0 1 0 1 0 0 0 0 0 | imm16 | 0 0 0 0 1 |
1188 * +-----------------------+---------+-----------+
1189 */
1190 addr = end_addr - 4;
1191 cs_etm__mem_access(etmq, addr, sizeof(instr32), (u8 *)&instr32);
1192 if ((instr32 & 0xFFE0001F) == 0xd4000001)
1193 return true;
1194
1195 break;
1196 case CS_ETM_ISA_UNKNOWN:
1197 default:
1198 break;
1199 }
1200
1201 return false;
1202}
1203
1204static bool cs_etm__is_syscall(struct cs_etm_queue *etmq, u64 magic)
1205{
1206 struct cs_etm_packet *packet = etmq->packet;
1207 struct cs_etm_packet *prev_packet = etmq->prev_packet;
1208
1209 if (magic == __perf_cs_etmv3_magic)
1210 if (packet->exception_number == CS_ETMV3_EXC_SVC)
1211 return true;
1212
1213 /*
1214 * ETMv4 exception type CS_ETMV4_EXC_CALL covers SVC, SMC and
1215 * HVC cases; need to check if it's SVC instruction based on
1216 * packet address.
1217 */
1218 if (magic == __perf_cs_etmv4_magic) {
1219 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1220 cs_etm__is_svc_instr(etmq, prev_packet,
1221 prev_packet->end_addr))
1222 return true;
1223 }
1224
1225 return false;
1226}
1227
1228static bool cs_etm__is_async_exception(struct cs_etm_queue *etmq, u64 magic)
1229{
1230 struct cs_etm_packet *packet = etmq->packet;
1231
1232 if (magic == __perf_cs_etmv3_magic)
1233 if (packet->exception_number == CS_ETMV3_EXC_DEBUG_HALT ||
1234 packet->exception_number == CS_ETMV3_EXC_ASYNC_DATA_ABORT ||
1235 packet->exception_number == CS_ETMV3_EXC_PE_RESET ||
1236 packet->exception_number == CS_ETMV3_EXC_IRQ ||
1237 packet->exception_number == CS_ETMV3_EXC_FIQ)
1238 return true;
1239
1240 if (magic == __perf_cs_etmv4_magic)
1241 if (packet->exception_number == CS_ETMV4_EXC_RESET ||
1242 packet->exception_number == CS_ETMV4_EXC_DEBUG_HALT ||
1243 packet->exception_number == CS_ETMV4_EXC_SYSTEM_ERROR ||
1244 packet->exception_number == CS_ETMV4_EXC_INST_DEBUG ||
1245 packet->exception_number == CS_ETMV4_EXC_DATA_DEBUG ||
1246 packet->exception_number == CS_ETMV4_EXC_IRQ ||
1247 packet->exception_number == CS_ETMV4_EXC_FIQ)
1248 return true;
1249
1250 return false;
1251}
1252
1253static bool cs_etm__is_sync_exception(struct cs_etm_queue *etmq, u64 magic)
1254{
1255 struct cs_etm_packet *packet = etmq->packet;
1256 struct cs_etm_packet *prev_packet = etmq->prev_packet;
1257
1258 if (magic == __perf_cs_etmv3_magic)
1259 if (packet->exception_number == CS_ETMV3_EXC_SMC ||
1260 packet->exception_number == CS_ETMV3_EXC_HYP ||
1261 packet->exception_number == CS_ETMV3_EXC_JAZELLE_THUMBEE ||
1262 packet->exception_number == CS_ETMV3_EXC_UNDEFINED_INSTR ||
1263 packet->exception_number == CS_ETMV3_EXC_PREFETCH_ABORT ||
1264 packet->exception_number == CS_ETMV3_EXC_DATA_FAULT ||
1265 packet->exception_number == CS_ETMV3_EXC_GENERIC)
1266 return true;
1267
1268 if (magic == __perf_cs_etmv4_magic) {
1269 if (packet->exception_number == CS_ETMV4_EXC_TRAP ||
1270 packet->exception_number == CS_ETMV4_EXC_ALIGNMENT ||
1271 packet->exception_number == CS_ETMV4_EXC_INST_FAULT ||
1272 packet->exception_number == CS_ETMV4_EXC_DATA_FAULT)
1273 return true;
1274
1275 /*
1276 * For CS_ETMV4_EXC_CALL, except SVC other instructions
1277 * (SMC, HVC) are taken as sync exceptions.
1278 */
1279 if (packet->exception_number == CS_ETMV4_EXC_CALL &&
1280 !cs_etm__is_svc_instr(etmq, prev_packet,
1281 prev_packet->end_addr))
1282 return true;
1283
1284 /*
1285 * ETMv4 has 5 bits for exception number; if the numbers
1286 * are in the range ( CS_ETMV4_EXC_FIQ, CS_ETMV4_EXC_END ]
1287 * they are implementation defined exceptions.
1288 *
1289 * For this case, simply take it as sync exception.
1290 */
1291 if (packet->exception_number > CS_ETMV4_EXC_FIQ &&
1292 packet->exception_number <= CS_ETMV4_EXC_END)
1293 return true;
1294 }
1295
1296 return false;
1297}
1298
06220bf4
LY
1299static int cs_etm__set_sample_flags(struct cs_etm_queue *etmq)
1300{
1301 struct cs_etm_packet *packet = etmq->packet;
465eaaa8 1302 struct cs_etm_packet *prev_packet = etmq->prev_packet;
96dce7f4
LY
1303 u64 magic;
1304 int ret;
06220bf4
LY
1305
1306 switch (packet->sample_type) {
1307 case CS_ETM_RANGE:
1308 /*
1309 * Immediate branch instruction without neither link nor
1310 * return flag, it's normal branch instruction within
1311 * the function.
1312 */
1313 if (packet->last_instr_type == OCSD_INSTR_BR &&
1314 packet->last_instr_subtype == OCSD_S_INSTR_NONE) {
1315 packet->flags = PERF_IP_FLAG_BRANCH;
1316
1317 if (packet->last_instr_cond)
1318 packet->flags |= PERF_IP_FLAG_CONDITIONAL;
1319 }
1320
1321 /*
1322 * Immediate branch instruction with link (e.g. BL), this is
1323 * branch instruction for function call.
1324 */
1325 if (packet->last_instr_type == OCSD_INSTR_BR &&
1326 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1327 packet->flags = PERF_IP_FLAG_BRANCH |
1328 PERF_IP_FLAG_CALL;
1329
1330 /*
1331 * Indirect branch instruction with link (e.g. BLR), this is
1332 * branch instruction for function call.
1333 */
1334 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1335 packet->last_instr_subtype == OCSD_S_INSTR_BR_LINK)
1336 packet->flags = PERF_IP_FLAG_BRANCH |
1337 PERF_IP_FLAG_CALL;
1338
1339 /*
1340 * Indirect branch instruction with subtype of
1341 * OCSD_S_INSTR_V7_IMPLIED_RET, this is explicit hint for
1342 * function return for A32/T32.
1343 */
1344 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1345 packet->last_instr_subtype == OCSD_S_INSTR_V7_IMPLIED_RET)
1346 packet->flags = PERF_IP_FLAG_BRANCH |
1347 PERF_IP_FLAG_RETURN;
1348
1349 /*
1350 * Indirect branch instruction without link (e.g. BR), usually
1351 * this is used for function return, especially for functions
1352 * within dynamic link lib.
1353 */
1354 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1355 packet->last_instr_subtype == OCSD_S_INSTR_NONE)
1356 packet->flags = PERF_IP_FLAG_BRANCH |
1357 PERF_IP_FLAG_RETURN;
1358
1359 /* Return instruction for function return. */
1360 if (packet->last_instr_type == OCSD_INSTR_BR_INDIRECT &&
1361 packet->last_instr_subtype == OCSD_S_INSTR_V8_RET)
1362 packet->flags = PERF_IP_FLAG_BRANCH |
1363 PERF_IP_FLAG_RETURN;
465eaaa8
LY
1364
1365 /*
1366 * Decoder might insert a discontinuity in the middle of
1367 * instruction packets, fixup prev_packet with flag
1368 * PERF_IP_FLAG_TRACE_BEGIN to indicate restarting trace.
1369 */
1370 if (prev_packet->sample_type == CS_ETM_DISCONTINUITY)
1371 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1372 PERF_IP_FLAG_TRACE_BEGIN;
173e65f6
LY
1373
1374 /*
1375 * If the previous packet is an exception return packet
1376 * and the return address just follows SVC instuction,
1377 * it needs to calibrate the previous packet sample flags
1378 * as PERF_IP_FLAG_SYSCALLRET.
1379 */
1380 if (prev_packet->flags == (PERF_IP_FLAG_BRANCH |
1381 PERF_IP_FLAG_RETURN |
1382 PERF_IP_FLAG_INTERRUPT) &&
1383 cs_etm__is_svc_instr(etmq, packet, packet->start_addr))
1384 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1385 PERF_IP_FLAG_RETURN |
1386 PERF_IP_FLAG_SYSCALLRET;
06220bf4
LY
1387 break;
1388 case CS_ETM_DISCONTINUITY:
465eaaa8
LY
1389 /*
1390 * The trace is discontinuous, if the previous packet is
1391 * instruction packet, set flag PERF_IP_FLAG_TRACE_END
1392 * for previous packet.
1393 */
1394 if (prev_packet->sample_type == CS_ETM_RANGE)
1395 prev_packet->flags |= PERF_IP_FLAG_BRANCH |
1396 PERF_IP_FLAG_TRACE_END;
1397 break;
06220bf4 1398 case CS_ETM_EXCEPTION:
96dce7f4
LY
1399 ret = cs_etm__get_magic(packet->trace_chan_id, &magic);
1400 if (ret)
1401 return ret;
1402
1403 /* The exception is for system call. */
1404 if (cs_etm__is_syscall(etmq, magic))
1405 packet->flags = PERF_IP_FLAG_BRANCH |
1406 PERF_IP_FLAG_CALL |
1407 PERF_IP_FLAG_SYSCALLRET;
1408 /*
1409 * The exceptions are triggered by external signals from bus,
1410 * interrupt controller, debug module, PE reset or halt.
1411 */
1412 else if (cs_etm__is_async_exception(etmq, magic))
1413 packet->flags = PERF_IP_FLAG_BRANCH |
1414 PERF_IP_FLAG_CALL |
1415 PERF_IP_FLAG_ASYNC |
1416 PERF_IP_FLAG_INTERRUPT;
1417 /*
1418 * Otherwise, exception is caused by trap, instruction &
1419 * data fault, or alignment errors.
1420 */
1421 else if (cs_etm__is_sync_exception(etmq, magic))
1422 packet->flags = PERF_IP_FLAG_BRANCH |
1423 PERF_IP_FLAG_CALL |
1424 PERF_IP_FLAG_INTERRUPT;
1425
1426 /*
1427 * When the exception packet is inserted, since exception
1428 * packet is not used standalone for generating samples
1429 * and it's affiliation to the previous instruction range
1430 * packet; so set previous range packet flags to tell perf
1431 * it is an exception taken branch.
1432 */
1433 if (prev_packet->sample_type == CS_ETM_RANGE)
1434 prev_packet->flags = packet->flags;
1435 break;
06220bf4 1436 case CS_ETM_EXCEPTION_RET:
173e65f6
LY
1437 /*
1438 * When the exception return packet is inserted, since
1439 * exception return packet is not used standalone for
1440 * generating samples and it's affiliation to the previous
1441 * instruction range packet; so set previous range packet
1442 * flags to tell perf it is an exception return branch.
1443 *
1444 * The exception return can be for either system call or
1445 * other exception types; unfortunately the packet doesn't
1446 * contain exception type related info so we cannot decide
1447 * the exception type purely based on exception return packet.
1448 * If we record the exception number from exception packet and
1449 * reuse it for excpetion return packet, this is not reliable
1450 * due the trace can be discontinuity or the interrupt can
1451 * be nested, thus the recorded exception number cannot be
1452 * used for exception return packet for these two cases.
1453 *
1454 * For exception return packet, we only need to distinguish the
1455 * packet is for system call or for other types. Thus the
1456 * decision can be deferred when receive the next packet which
1457 * contains the return address, based on the return address we
1458 * can read out the previous instruction and check if it's a
1459 * system call instruction and then calibrate the sample flag
1460 * as needed.
1461 */
1462 if (prev_packet->sample_type == CS_ETM_RANGE)
1463 prev_packet->flags = PERF_IP_FLAG_BRANCH |
1464 PERF_IP_FLAG_RETURN |
1465 PERF_IP_FLAG_INTERRUPT;
1466 break;
06220bf4
LY
1467 case CS_ETM_EMPTY:
1468 default:
1469 break;
1470 }
1471
1472 return 0;
1473}
1474
9f878b29
MP
1475static int cs_etm__run_decoder(struct cs_etm_queue *etmq)
1476{
1477 struct cs_etm_auxtrace *etm = etmq->etm;
1478 struct cs_etm_buffer buffer;
1479 size_t buffer_used, processed;
1480 int err = 0;
1481
1482 if (!etm->kernel_start)
1483 etm->kernel_start = machine__kernel_start(etm->machine);
1484
1485 /* Go through each buffer in the queue and decode them one by one */
e573e978
RW
1486 while (1) {
1487 buffer_used = 0;
1488 memset(&buffer, 0, sizeof(buffer));
1489 err = cs_etm__get_trace(&buffer, etmq);
1490 if (err <= 0)
1491 return err;
1492 /*
1493 * We cannot assume consecutive blocks in the data file are
1494 * contiguous, reset the decoder to force re-sync.
1495 */
1496 err = cs_etm_decoder__reset(etmq->decoder);
1497 if (err != 0)
9f878b29
MP
1498 return err;
1499
e573e978
RW
1500 /* Run trace decoder until buffer consumed or end of trace */
1501 do {
1502 processed = 0;
1503 err = cs_etm_decoder__process_data_block(
1504 etmq->decoder,
1505 etmq->offset,
1506 &buffer.buf[buffer_used],
1507 buffer.len - buffer_used,
1508 &processed);
1509 if (err)
1510 return err;
1511
1512 etmq->offset += processed;
1513 buffer_used += processed;
1514
1515 /* Process each packet in this chunk */
1516 while (1) {
1517 err = cs_etm_decoder__get_packet(etmq->decoder,
1518 etmq->packet);
1519 if (err <= 0)
1520 /*
1521 * Stop processing this chunk on
1522 * end of data or error
1523 */
1524 break;
1525
06220bf4
LY
1526 /*
1527 * Since packet addresses are swapped in packet
1528 * handling within below switch() statements,
1529 * thus setting sample flags must be called
1530 * prior to switch() statement to use address
1531 * information before packets swapping.
1532 */
1533 err = cs_etm__set_sample_flags(etmq);
1534 if (err < 0)
1535 break;
1536
256e751c
RW
1537 switch (etmq->packet->sample_type) {
1538 case CS_ETM_RANGE:
1539 /*
1540 * If the packet contains an instruction
1541 * range, generate instruction sequence
1542 * events.
1543 */
1544 cs_etm__sample(etmq);
1545 break;
7100b12c
LY
1546 case CS_ETM_EXCEPTION:
1547 case CS_ETM_EXCEPTION_RET:
1548 /*
1549 * If the exception packet is coming,
1550 * make sure the previous instruction
1551 * range packet to be handled properly.
1552 */
1553 cs_etm__exception(etmq);
1554 break;
49ccf87b 1555 case CS_ETM_DISCONTINUITY:
256e751c
RW
1556 /*
1557 * Discontinuity in trace, flush
1558 * previous branch stack
1559 */
1560 cs_etm__flush(etmq);
1561 break;
3eb3e07b
LY
1562 case CS_ETM_EMPTY:
1563 /*
1564 * Should not receive empty packet,
1565 * report error.
1566 */
1567 pr_err("CS ETM Trace: empty packet\n");
1568 return -EINVAL;
256e751c
RW
1569 default:
1570 break;
1571 }
e573e978
RW
1572 }
1573 } while (buffer.len > buffer_used);
b12235b1 1574
256e751c
RW
1575 if (err == 0)
1576 /* Flush any remaining branch stack entries */
24fff5eb 1577 err = cs_etm__end_block(etmq);
e573e978 1578 }
9f878b29
MP
1579
1580 return err;
1581}
1582
1583static int cs_etm__process_timeless_queues(struct cs_etm_auxtrace *etm,
fc7ac413 1584 pid_t tid)
9f878b29
MP
1585{
1586 unsigned int i;
1587 struct auxtrace_queues *queues = &etm->queues;
1588
1589 for (i = 0; i < queues->nr_queues; i++) {
1590 struct auxtrace_queue *queue = &etm->queues.queue_array[i];
1591 struct cs_etm_queue *etmq = queue->priv;
1592
1593 if (etmq && ((tid == -1) || (etmq->tid == tid))) {
9f878b29
MP
1594 cs_etm__set_pid_tid_cpu(etm, queue);
1595 cs_etm__run_decoder(etmq);
1596 }
1597 }
1598
1599 return 0;
1600}
1601
440a23b3
MP
1602static int cs_etm__process_event(struct perf_session *session,
1603 union perf_event *event,
1604 struct perf_sample *sample,
1605 struct perf_tool *tool)
1606{
20d9c478
MP
1607 int err = 0;
1608 u64 timestamp;
1609 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1610 struct cs_etm_auxtrace,
1611 auxtrace);
1612
20d9c478
MP
1613 if (dump_trace)
1614 return 0;
1615
1616 if (!tool->ordered_events) {
1617 pr_err("CoreSight ETM Trace requires ordered events\n");
1618 return -EINVAL;
1619 }
1620
1621 if (!etm->timeless_decoding)
1622 return -EINVAL;
1623
1624 if (sample->time && (sample->time != (u64) -1))
1625 timestamp = sample->time;
1626 else
1627 timestamp = 0;
1628
1629 if (timestamp || etm->timeless_decoding) {
1630 err = cs_etm__update_queues(etm);
1631 if (err)
1632 return err;
1633 }
1634
9f878b29
MP
1635 if (event->header.type == PERF_RECORD_EXIT)
1636 return cs_etm__process_timeless_queues(etm,
fc7ac413 1637 event->fork.tid);
9f878b29 1638
440a23b3
MP
1639 return 0;
1640}
1641
1642static int cs_etm__process_auxtrace_event(struct perf_session *session,
1643 union perf_event *event,
68ffe390 1644 struct perf_tool *tool __maybe_unused)
440a23b3 1645{
68ffe390
MP
1646 struct cs_etm_auxtrace *etm = container_of(session->auxtrace,
1647 struct cs_etm_auxtrace,
1648 auxtrace);
1649 if (!etm->data_queued) {
1650 struct auxtrace_buffer *buffer;
1651 off_t data_offset;
1652 int fd = perf_data__fd(session->data);
1653 bool is_pipe = perf_data__is_pipe(session->data);
1654 int err;
1655
1656 if (is_pipe)
1657 data_offset = 0;
1658 else {
1659 data_offset = lseek(fd, 0, SEEK_CUR);
1660 if (data_offset == -1)
1661 return -errno;
1662 }
1663
1664 err = auxtrace_queues__add_event(&etm->queues, session,
1665 event, data_offset, &buffer);
1666 if (err)
1667 return err;
1668
1669 if (dump_trace)
1670 if (auxtrace_buffer__get_data(buffer, fd)) {
1671 cs_etm__dump_event(etm, buffer);
1672 auxtrace_buffer__put_data(buffer);
1673 }
1674 }
1675
440a23b3
MP
1676 return 0;
1677}
1678
1679static bool cs_etm__is_timeless_decoding(struct cs_etm_auxtrace *etm)
1680{
1681 struct perf_evsel *evsel;
1682 struct perf_evlist *evlist = etm->session->evlist;
1683 bool timeless_decoding = true;
1684
1685 /*
1686 * Circle through the list of event and complain if we find one
1687 * with the time bit set.
1688 */
1689 evlist__for_each_entry(evlist, evsel) {
1690 if ((evsel->attr.sample_type & PERF_SAMPLE_TIME))
1691 timeless_decoding = false;
1692 }
1693
1694 return timeless_decoding;
1695}
1696
cd8bfd8c
TJ
1697static const char * const cs_etm_global_header_fmts[] = {
1698 [CS_HEADER_VERSION_0] = " Header version %llx\n",
1699 [CS_PMU_TYPE_CPUS] = " PMU type/num cpus %llx\n",
1700 [CS_ETM_SNAPSHOT] = " Snapshot %llx\n",
1701};
1702
1703static const char * const cs_etm_priv_fmts[] = {
1704 [CS_ETM_MAGIC] = " Magic number %llx\n",
1705 [CS_ETM_CPU] = " CPU %lld\n",
1706 [CS_ETM_ETMCR] = " ETMCR %llx\n",
1707 [CS_ETM_ETMTRACEIDR] = " ETMTRACEIDR %llx\n",
1708 [CS_ETM_ETMCCER] = " ETMCCER %llx\n",
1709 [CS_ETM_ETMIDR] = " ETMIDR %llx\n",
1710};
1711
1712static const char * const cs_etmv4_priv_fmts[] = {
1713 [CS_ETM_MAGIC] = " Magic number %llx\n",
1714 [CS_ETM_CPU] = " CPU %lld\n",
1715 [CS_ETMV4_TRCCONFIGR] = " TRCCONFIGR %llx\n",
1716 [CS_ETMV4_TRCTRACEIDR] = " TRCTRACEIDR %llx\n",
1717 [CS_ETMV4_TRCIDR0] = " TRCIDR0 %llx\n",
1718 [CS_ETMV4_TRCIDR1] = " TRCIDR1 %llx\n",
1719 [CS_ETMV4_TRCIDR2] = " TRCIDR2 %llx\n",
1720 [CS_ETMV4_TRCIDR8] = " TRCIDR8 %llx\n",
1721 [CS_ETMV4_TRCAUTHSTATUS] = " TRCAUTHSTATUS %llx\n",
1722};
1723
1724static void cs_etm__print_auxtrace_info(u64 *val, int num)
1725{
1726 int i, j, cpu = 0;
1727
1728 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1729 fprintf(stdout, cs_etm_global_header_fmts[i], val[i]);
1730
1731 for (i = CS_HEADER_VERSION_0_MAX; cpu < num; cpu++) {
1732 if (val[i] == __perf_cs_etmv3_magic)
1733 for (j = 0; j < CS_ETM_PRIV_MAX; j++, i++)
1734 fprintf(stdout, cs_etm_priv_fmts[j], val[i]);
1735 else if (val[i] == __perf_cs_etmv4_magic)
1736 for (j = 0; j < CS_ETMV4_PRIV_MAX; j++, i++)
1737 fprintf(stdout, cs_etmv4_priv_fmts[j], val[i]);
1738 else
1739 /* failure.. return */
1740 return;
1741 }
1742}
1743
440a23b3
MP
1744int cs_etm__process_auxtrace_info(union perf_event *event,
1745 struct perf_session *session)
1746{
1747 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
1748 struct cs_etm_auxtrace *etm = NULL;
cd8bfd8c
TJ
1749 struct int_node *inode;
1750 unsigned int pmu_type;
440a23b3
MP
1751 int event_header_size = sizeof(struct perf_event_header);
1752 int info_header_size;
1753 int total_size = auxtrace_info->header.size;
cd8bfd8c
TJ
1754 int priv_size = 0;
1755 int num_cpu;
1756 int err = 0, idx = -1;
1757 int i, j, k;
1758 u64 *ptr, *hdr = NULL;
1759 u64 **metadata = NULL;
440a23b3
MP
1760
1761 /*
1762 * sizeof(auxtrace_info_event::type) +
1763 * sizeof(auxtrace_info_event::reserved) == 8
1764 */
1765 info_header_size = 8;
1766
1767 if (total_size < (event_header_size + info_header_size))
1768 return -EINVAL;
1769
cd8bfd8c
TJ
1770 priv_size = total_size - event_header_size - info_header_size;
1771
1772 /* First the global part */
1773 ptr = (u64 *) auxtrace_info->priv;
1774
1775 /* Look for version '0' of the header */
1776 if (ptr[0] != 0)
1777 return -EINVAL;
1778
1779 hdr = zalloc(sizeof(*hdr) * CS_HEADER_VERSION_0_MAX);
1780 if (!hdr)
1781 return -ENOMEM;
1782
1783 /* Extract header information - see cs-etm.h for format */
1784 for (i = 0; i < CS_HEADER_VERSION_0_MAX; i++)
1785 hdr[i] = ptr[i];
1786 num_cpu = hdr[CS_PMU_TYPE_CPUS] & 0xffffffff;
1787 pmu_type = (unsigned int) ((hdr[CS_PMU_TYPE_CPUS] >> 32) &
1788 0xffffffff);
1789
1790 /*
95c6fe97
LY
1791 * Create an RB tree for traceID-metadata tuple. Since the conversion
1792 * has to be made for each packet that gets decoded, optimizing access
1793 * in anything other than a sequential array is worth doing.
cd8bfd8c
TJ
1794 */
1795 traceid_list = intlist__new(NULL);
1796 if (!traceid_list) {
1797 err = -ENOMEM;
1798 goto err_free_hdr;
1799 }
1800
1801 metadata = zalloc(sizeof(*metadata) * num_cpu);
1802 if (!metadata) {
1803 err = -ENOMEM;
1804 goto err_free_traceid_list;
1805 }
1806
1807 /*
1808 * The metadata is stored in the auxtrace_info section and encodes
1809 * the configuration of the ARM embedded trace macrocell which is
1810 * required by the trace decoder to properly decode the trace due
1811 * to its highly compressed nature.
1812 */
1813 for (j = 0; j < num_cpu; j++) {
1814 if (ptr[i] == __perf_cs_etmv3_magic) {
1815 metadata[j] = zalloc(sizeof(*metadata[j]) *
1816 CS_ETM_PRIV_MAX);
1817 if (!metadata[j]) {
1818 err = -ENOMEM;
1819 goto err_free_metadata;
1820 }
1821 for (k = 0; k < CS_ETM_PRIV_MAX; k++)
1822 metadata[j][k] = ptr[i + k];
1823
1824 /* The traceID is our handle */
1825 idx = metadata[j][CS_ETM_ETMTRACEIDR];
1826 i += CS_ETM_PRIV_MAX;
1827 } else if (ptr[i] == __perf_cs_etmv4_magic) {
1828 metadata[j] = zalloc(sizeof(*metadata[j]) *
1829 CS_ETMV4_PRIV_MAX);
1830 if (!metadata[j]) {
1831 err = -ENOMEM;
1832 goto err_free_metadata;
1833 }
1834 for (k = 0; k < CS_ETMV4_PRIV_MAX; k++)
1835 metadata[j][k] = ptr[i + k];
1836
1837 /* The traceID is our handle */
1838 idx = metadata[j][CS_ETMV4_TRCTRACEIDR];
1839 i += CS_ETMV4_PRIV_MAX;
1840 }
1841
1842 /* Get an RB node for this CPU */
1843 inode = intlist__findnew(traceid_list, idx);
1844
1845 /* Something went wrong, no need to continue */
1846 if (!inode) {
1847 err = PTR_ERR(inode);
1848 goto err_free_metadata;
1849 }
1850
1851 /*
1852 * The node for that CPU should not be taken.
1853 * Back out if that's the case.
1854 */
1855 if (inode->priv) {
1856 err = -EINVAL;
1857 goto err_free_metadata;
1858 }
95c6fe97
LY
1859 /* All good, associate the traceID with the metadata pointer */
1860 inode->priv = metadata[j];
cd8bfd8c
TJ
1861 }
1862
1863 /*
1864 * Each of CS_HEADER_VERSION_0_MAX, CS_ETM_PRIV_MAX and
1865 * CS_ETMV4_PRIV_MAX mark how many double words are in the
1866 * global metadata, and each cpu's metadata respectively.
1867 * The following tests if the correct number of double words was
1868 * present in the auxtrace info section.
1869 */
1870 if (i * 8 != priv_size) {
1871 err = -EINVAL;
1872 goto err_free_metadata;
1873 }
1874
440a23b3
MP
1875 etm = zalloc(sizeof(*etm));
1876
cd8bfd8c 1877 if (!etm) {
440a23b3 1878 err = -ENOMEM;
cd8bfd8c
TJ
1879 goto err_free_metadata;
1880 }
440a23b3
MP
1881
1882 err = auxtrace_queues__init(&etm->queues);
1883 if (err)
1884 goto err_free_etm;
1885
1886 etm->session = session;
1887 etm->machine = &session->machines.host;
1888
cd8bfd8c
TJ
1889 etm->num_cpu = num_cpu;
1890 etm->pmu_type = pmu_type;
1891 etm->snapshot_mode = (hdr[CS_ETM_SNAPSHOT] != 0);
1892 etm->metadata = metadata;
440a23b3
MP
1893 etm->auxtrace_type = auxtrace_info->type;
1894 etm->timeless_decoding = cs_etm__is_timeless_decoding(etm);
1895
1896 etm->auxtrace.process_event = cs_etm__process_event;
1897 etm->auxtrace.process_auxtrace_event = cs_etm__process_auxtrace_event;
1898 etm->auxtrace.flush_events = cs_etm__flush_events;
1899 etm->auxtrace.free_events = cs_etm__free_events;
1900 etm->auxtrace.free = cs_etm__free;
1901 session->auxtrace = &etm->auxtrace;
1902
46d53620
LY
1903 etm->unknown_thread = thread__new(999999999, 999999999);
1904 if (!etm->unknown_thread)
1905 goto err_free_queues;
1906
1907 /*
1908 * Initialize list node so that at thread__zput() we can avoid
1909 * segmentation fault at list_del_init().
1910 */
1911 INIT_LIST_HEAD(&etm->unknown_thread->node);
1912
1913 err = thread__set_comm(etm->unknown_thread, "unknown", 0);
1914 if (err)
1915 goto err_delete_thread;
1916
1917 if (thread__init_map_groups(etm->unknown_thread, etm->machine))
1918 goto err_delete_thread;
1919
cd8bfd8c
TJ
1920 if (dump_trace) {
1921 cs_etm__print_auxtrace_info(auxtrace_info->priv, num_cpu);
440a23b3 1922 return 0;
cd8bfd8c 1923 }
440a23b3 1924
b12235b1
MP
1925 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
1926 etm->synth_opts = *session->itrace_synth_opts;
1927 } else {
4eb06815
AK
1928 itrace_synth_opts__set_default(&etm->synth_opts,
1929 session->itrace_synth_opts->default_no_sample);
b12235b1
MP
1930 etm->synth_opts.callchain = false;
1931 }
1932
1933 err = cs_etm__synth_events(etm, session);
1934 if (err)
46d53620 1935 goto err_delete_thread;
b12235b1 1936
440a23b3
MP
1937 err = auxtrace_queues__process_index(&etm->queues, session);
1938 if (err)
46d53620 1939 goto err_delete_thread;
440a23b3
MP
1940
1941 etm->data_queued = etm->queues.populated;
1942
1943 return 0;
1944
46d53620
LY
1945err_delete_thread:
1946 thread__zput(etm->unknown_thread);
440a23b3
MP
1947err_free_queues:
1948 auxtrace_queues__free(&etm->queues);
1949 session->auxtrace = NULL;
1950err_free_etm:
1951 zfree(&etm);
cd8bfd8c
TJ
1952err_free_metadata:
1953 /* No need to check @metadata[j], free(NULL) is supported */
1954 for (j = 0; j < num_cpu; j++)
1955 free(metadata[j]);
1956 zfree(&metadata);
1957err_free_traceid_list:
1958 intlist__delete(traceid_list);
1959err_free_hdr:
1960 zfree(&hdr);
440a23b3
MP
1961
1962 return -EINVAL;
1963}