perf intel-pt: Add support for samples to contain IPC ratio
[linux-2.6-block.git] / tools / perf / util / intel-pt.c
1 /*
2  * intel_pt.c: Intel Processor Trace support
3  * Copyright (c) 2013-2015, Intel Corporation.
4  *
5  * This program is free software; you can redistribute it and/or modify it
6  * under the terms and conditions of the GNU General Public License,
7  * version 2, as published by the Free Software Foundation.
8  *
9  * This program is distributed in the hope it will be useful, but WITHOUT
10  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
12  * more details.
13  *
14  */
15
16 #include <inttypes.h>
17 #include <stdio.h>
18 #include <stdbool.h>
19 #include <errno.h>
20 #include <linux/kernel.h>
21 #include <linux/types.h>
22
23 #include "../perf.h"
24 #include "session.h"
25 #include "machine.h"
26 #include "memswap.h"
27 #include "sort.h"
28 #include "tool.h"
29 #include "event.h"
30 #include "evlist.h"
31 #include "evsel.h"
32 #include "map.h"
33 #include "color.h"
34 #include "util.h"
35 #include "thread.h"
36 #include "thread-stack.h"
37 #include "symbol.h"
38 #include "callchain.h"
39 #include "dso.h"
40 #include "debug.h"
41 #include "auxtrace.h"
42 #include "tsc.h"
43 #include "intel-pt.h"
44 #include "config.h"
45
46 #include "intel-pt-decoder/intel-pt-log.h"
47 #include "intel-pt-decoder/intel-pt-decoder.h"
48 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
49 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
50
51 #define MAX_TIMESTAMP (~0ULL)
52
53 struct intel_pt {
54         struct auxtrace auxtrace;
55         struct auxtrace_queues queues;
56         struct auxtrace_heap heap;
57         u32 auxtrace_type;
58         struct perf_session *session;
59         struct machine *machine;
60         struct perf_evsel *switch_evsel;
61         struct thread *unknown_thread;
62         bool timeless_decoding;
63         bool sampling_mode;
64         bool snapshot_mode;
65         bool per_cpu_mmaps;
66         bool have_tsc;
67         bool data_queued;
68         bool est_tsc;
69         bool sync_switch;
70         bool mispred_all;
71         int have_sched_switch;
72         u32 pmu_type;
73         u64 kernel_start;
74         u64 switch_ip;
75         u64 ptss_ip;
76
77         struct perf_tsc_conversion tc;
78         bool cap_user_time_zero;
79
80         struct itrace_synth_opts synth_opts;
81
82         bool sample_instructions;
83         u64 instructions_sample_type;
84         u64 instructions_id;
85
86         bool sample_branches;
87         u32 branches_filter;
88         u64 branches_sample_type;
89         u64 branches_id;
90
91         bool sample_transactions;
92         u64 transactions_sample_type;
93         u64 transactions_id;
94
95         bool sample_ptwrites;
96         u64 ptwrites_sample_type;
97         u64 ptwrites_id;
98
99         bool sample_pwr_events;
100         u64 pwr_events_sample_type;
101         u64 mwait_id;
102         u64 pwre_id;
103         u64 exstop_id;
104         u64 pwrx_id;
105         u64 cbr_id;
106
107         u64 tsc_bit;
108         u64 mtc_bit;
109         u64 mtc_freq_bits;
110         u32 tsc_ctc_ratio_n;
111         u32 tsc_ctc_ratio_d;
112         u64 cyc_bit;
113         u64 noretcomp_bit;
114         unsigned max_non_turbo_ratio;
115         unsigned cbr2khz;
116
117         unsigned long num_events;
118
119         char *filter;
120         struct addr_filters filts;
121 };
122
123 enum switch_state {
124         INTEL_PT_SS_NOT_TRACING,
125         INTEL_PT_SS_UNKNOWN,
126         INTEL_PT_SS_TRACING,
127         INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
128         INTEL_PT_SS_EXPECTING_SWITCH_IP,
129 };
130
131 struct intel_pt_queue {
132         struct intel_pt *pt;
133         unsigned int queue_nr;
134         struct auxtrace_buffer *buffer;
135         struct auxtrace_buffer *old_buffer;
136         void *decoder;
137         const struct intel_pt_state *state;
138         struct ip_callchain *chain;
139         struct branch_stack *last_branch;
140         struct branch_stack *last_branch_rb;
141         size_t last_branch_pos;
142         union perf_event *event_buf;
143         bool on_heap;
144         bool stop;
145         bool step_through_buffers;
146         bool use_buffer_pid_tid;
147         bool sync_switch;
148         pid_t pid, tid;
149         int cpu;
150         int switch_state;
151         pid_t next_tid;
152         struct thread *thread;
153         bool exclude_kernel;
154         bool have_sample;
155         u64 time;
156         u64 timestamp;
157         u32 flags;
158         u16 insn_len;
159         u64 last_insn_cnt;
160         u64 ipc_insn_cnt;
161         u64 ipc_cyc_cnt;
162         u64 last_in_insn_cnt;
163         u64 last_in_cyc_cnt;
164         u64 last_br_insn_cnt;
165         u64 last_br_cyc_cnt;
166         char insn[INTEL_PT_INSN_BUF_SZ];
167 };
168
169 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
170                           unsigned char *buf, size_t len)
171 {
172         struct intel_pt_pkt packet;
173         size_t pos = 0;
174         int ret, pkt_len, i;
175         char desc[INTEL_PT_PKT_DESC_MAX];
176         const char *color = PERF_COLOR_BLUE;
177
178         color_fprintf(stdout, color,
179                       ". ... Intel Processor Trace data: size %zu bytes\n",
180                       len);
181
182         while (len) {
183                 ret = intel_pt_get_packet(buf, len, &packet);
184                 if (ret > 0)
185                         pkt_len = ret;
186                 else
187                         pkt_len = 1;
188                 printf(".");
189                 color_fprintf(stdout, color, "  %08x: ", pos);
190                 for (i = 0; i < pkt_len; i++)
191                         color_fprintf(stdout, color, " %02x", buf[i]);
192                 for (; i < 16; i++)
193                         color_fprintf(stdout, color, "   ");
194                 if (ret > 0) {
195                         ret = intel_pt_pkt_desc(&packet, desc,
196                                                 INTEL_PT_PKT_DESC_MAX);
197                         if (ret > 0)
198                                 color_fprintf(stdout, color, " %s\n", desc);
199                 } else {
200                         color_fprintf(stdout, color, " Bad packet!\n");
201                 }
202                 pos += pkt_len;
203                 buf += pkt_len;
204                 len -= pkt_len;
205         }
206 }
207
208 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
209                                 size_t len)
210 {
211         printf(".\n");
212         intel_pt_dump(pt, buf, len);
213 }
214
215 static void intel_pt_log_event(union perf_event *event)
216 {
217         FILE *f = intel_pt_log_fp();
218
219         if (!intel_pt_enable_logging || !f)
220                 return;
221
222         perf_event__fprintf(event, f);
223 }
224
225 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
226                                    struct auxtrace_buffer *b)
227 {
228         bool consecutive = false;
229         void *start;
230
231         start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
232                                       pt->have_tsc, &consecutive);
233         if (!start)
234                 return -EINVAL;
235         b->use_size = b->data + b->size - start;
236         b->use_data = start;
237         if (b->use_size && consecutive)
238                 b->consecutive = true;
239         return 0;
240 }
241
242 /* This function assumes data is processed sequentially only */
243 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
244 {
245         struct intel_pt_queue *ptq = data;
246         struct auxtrace_buffer *buffer = ptq->buffer;
247         struct auxtrace_buffer *old_buffer = ptq->old_buffer;
248         struct auxtrace_queue *queue;
249         bool might_overlap;
250
251         if (ptq->stop) {
252                 b->len = 0;
253                 return 0;
254         }
255
256         queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
257
258         buffer = auxtrace_buffer__next(queue, buffer);
259         if (!buffer) {
260                 if (old_buffer)
261                         auxtrace_buffer__drop_data(old_buffer);
262                 b->len = 0;
263                 return 0;
264         }
265
266         ptq->buffer = buffer;
267
268         if (!buffer->data) {
269                 int fd = perf_data__fd(ptq->pt->session->data);
270
271                 buffer->data = auxtrace_buffer__get_data(buffer, fd);
272                 if (!buffer->data)
273                         return -ENOMEM;
274         }
275
276         might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
277         if (might_overlap && !buffer->consecutive && old_buffer &&
278             intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
279                 return -ENOMEM;
280
281         if (buffer->use_data) {
282                 b->len = buffer->use_size;
283                 b->buf = buffer->use_data;
284         } else {
285                 b->len = buffer->size;
286                 b->buf = buffer->data;
287         }
288         b->ref_timestamp = buffer->reference;
289
290         if (!old_buffer || (might_overlap && !buffer->consecutive)) {
291                 b->consecutive = false;
292                 b->trace_nr = buffer->buffer_nr + 1;
293         } else {
294                 b->consecutive = true;
295         }
296
297         if (ptq->step_through_buffers)
298                 ptq->stop = true;
299
300         if (b->len) {
301                 if (old_buffer)
302                         auxtrace_buffer__drop_data(old_buffer);
303                 ptq->old_buffer = buffer;
304         } else {
305                 auxtrace_buffer__drop_data(buffer);
306                 return intel_pt_get_trace(b, data);
307         }
308
309         return 0;
310 }
311
312 struct intel_pt_cache_entry {
313         struct auxtrace_cache_entry     entry;
314         u64                             insn_cnt;
315         u64                             byte_cnt;
316         enum intel_pt_insn_op           op;
317         enum intel_pt_insn_branch       branch;
318         int                             length;
319         int32_t                         rel;
320         char                            insn[INTEL_PT_INSN_BUF_SZ];
321 };
322
323 static int intel_pt_config_div(const char *var, const char *value, void *data)
324 {
325         int *d = data;
326         long val;
327
328         if (!strcmp(var, "intel-pt.cache-divisor")) {
329                 val = strtol(value, NULL, 0);
330                 if (val > 0 && val <= INT_MAX)
331                         *d = val;
332         }
333
334         return 0;
335 }
336
337 static int intel_pt_cache_divisor(void)
338 {
339         static int d;
340
341         if (d)
342                 return d;
343
344         perf_config(intel_pt_config_div, &d);
345
346         if (!d)
347                 d = 64;
348
349         return d;
350 }
351
352 static unsigned int intel_pt_cache_size(struct dso *dso,
353                                         struct machine *machine)
354 {
355         off_t size;
356
357         size = dso__data_size(dso, machine);
358         size /= intel_pt_cache_divisor();
359         if (size < 1000)
360                 return 10;
361         if (size > (1 << 21))
362                 return 21;
363         return 32 - __builtin_clz(size);
364 }
365
366 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
367                                              struct machine *machine)
368 {
369         struct auxtrace_cache *c;
370         unsigned int bits;
371
372         if (dso->auxtrace_cache)
373                 return dso->auxtrace_cache;
374
375         bits = intel_pt_cache_size(dso, machine);
376
377         /* Ignoring cache creation failure */
378         c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
379
380         dso->auxtrace_cache = c;
381
382         return c;
383 }
384
385 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
386                               u64 offset, u64 insn_cnt, u64 byte_cnt,
387                               struct intel_pt_insn *intel_pt_insn)
388 {
389         struct auxtrace_cache *c = intel_pt_cache(dso, machine);
390         struct intel_pt_cache_entry *e;
391         int err;
392
393         if (!c)
394                 return -ENOMEM;
395
396         e = auxtrace_cache__alloc_entry(c);
397         if (!e)
398                 return -ENOMEM;
399
400         e->insn_cnt = insn_cnt;
401         e->byte_cnt = byte_cnt;
402         e->op = intel_pt_insn->op;
403         e->branch = intel_pt_insn->branch;
404         e->length = intel_pt_insn->length;
405         e->rel = intel_pt_insn->rel;
406         memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
407
408         err = auxtrace_cache__add(c, offset, &e->entry);
409         if (err)
410                 auxtrace_cache__free_entry(c, e);
411
412         return err;
413 }
414
415 static struct intel_pt_cache_entry *
416 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
417 {
418         struct auxtrace_cache *c = intel_pt_cache(dso, machine);
419
420         if (!c)
421                 return NULL;
422
423         return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
424 }
425
426 static inline u8 intel_pt_cpumode(struct intel_pt *pt, uint64_t ip)
427 {
428         return ip >= pt->kernel_start ?
429                PERF_RECORD_MISC_KERNEL :
430                PERF_RECORD_MISC_USER;
431 }
432
433 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
434                                    uint64_t *insn_cnt_ptr, uint64_t *ip,
435                                    uint64_t to_ip, uint64_t max_insn_cnt,
436                                    void *data)
437 {
438         struct intel_pt_queue *ptq = data;
439         struct machine *machine = ptq->pt->machine;
440         struct thread *thread;
441         struct addr_location al;
442         unsigned char buf[INTEL_PT_INSN_BUF_SZ];
443         ssize_t len;
444         int x86_64;
445         u8 cpumode;
446         u64 offset, start_offset, start_ip;
447         u64 insn_cnt = 0;
448         bool one_map = true;
449
450         intel_pt_insn->length = 0;
451
452         if (to_ip && *ip == to_ip)
453                 goto out_no_cache;
454
455         cpumode = intel_pt_cpumode(ptq->pt, *ip);
456
457         thread = ptq->thread;
458         if (!thread) {
459                 if (cpumode != PERF_RECORD_MISC_KERNEL)
460                         return -EINVAL;
461                 thread = ptq->pt->unknown_thread;
462         }
463
464         while (1) {
465                 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
466                         return -EINVAL;
467
468                 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
469                     dso__data_status_seen(al.map->dso,
470                                           DSO_DATA_STATUS_SEEN_ITRACE))
471                         return -ENOENT;
472
473                 offset = al.map->map_ip(al.map, *ip);
474
475                 if (!to_ip && one_map) {
476                         struct intel_pt_cache_entry *e;
477
478                         e = intel_pt_cache_lookup(al.map->dso, machine, offset);
479                         if (e &&
480                             (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
481                                 *insn_cnt_ptr = e->insn_cnt;
482                                 *ip += e->byte_cnt;
483                                 intel_pt_insn->op = e->op;
484                                 intel_pt_insn->branch = e->branch;
485                                 intel_pt_insn->length = e->length;
486                                 intel_pt_insn->rel = e->rel;
487                                 memcpy(intel_pt_insn->buf, e->insn,
488                                        INTEL_PT_INSN_BUF_SZ);
489                                 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
490                                 return 0;
491                         }
492                 }
493
494                 start_offset = offset;
495                 start_ip = *ip;
496
497                 /* Load maps to ensure dso->is_64_bit has been updated */
498                 map__load(al.map);
499
500                 x86_64 = al.map->dso->is_64_bit;
501
502                 while (1) {
503                         len = dso__data_read_offset(al.map->dso, machine,
504                                                     offset, buf,
505                                                     INTEL_PT_INSN_BUF_SZ);
506                         if (len <= 0)
507                                 return -EINVAL;
508
509                         if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
510                                 return -EINVAL;
511
512                         intel_pt_log_insn(intel_pt_insn, *ip);
513
514                         insn_cnt += 1;
515
516                         if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
517                                 goto out;
518
519                         if (max_insn_cnt && insn_cnt >= max_insn_cnt)
520                                 goto out_no_cache;
521
522                         *ip += intel_pt_insn->length;
523
524                         if (to_ip && *ip == to_ip)
525                                 goto out_no_cache;
526
527                         if (*ip >= al.map->end)
528                                 break;
529
530                         offset += intel_pt_insn->length;
531                 }
532                 one_map = false;
533         }
534 out:
535         *insn_cnt_ptr = insn_cnt;
536
537         if (!one_map)
538                 goto out_no_cache;
539
540         /*
541          * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
542          * entries.
543          */
544         if (to_ip) {
545                 struct intel_pt_cache_entry *e;
546
547                 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
548                 if (e)
549                         return 0;
550         }
551
552         /* Ignore cache errors */
553         intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
554                            *ip - start_ip, intel_pt_insn);
555
556         return 0;
557
558 out_no_cache:
559         *insn_cnt_ptr = insn_cnt;
560         return 0;
561 }
562
563 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
564                                   uint64_t offset, const char *filename)
565 {
566         struct addr_filter *filt;
567         bool have_filter   = false;
568         bool hit_tracestop = false;
569         bool hit_filter    = false;
570
571         list_for_each_entry(filt, &pt->filts.head, list) {
572                 if (filt->start)
573                         have_filter = true;
574
575                 if ((filename && !filt->filename) ||
576                     (!filename && filt->filename) ||
577                     (filename && strcmp(filename, filt->filename)))
578                         continue;
579
580                 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
581                         continue;
582
583                 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
584                              ip, offset, filename ? filename : "[kernel]",
585                              filt->start ? "filter" : "stop",
586                              filt->addr, filt->size);
587
588                 if (filt->start)
589                         hit_filter = true;
590                 else
591                         hit_tracestop = true;
592         }
593
594         if (!hit_tracestop && !hit_filter)
595                 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
596                              ip, offset, filename ? filename : "[kernel]");
597
598         return hit_tracestop || (have_filter && !hit_filter);
599 }
600
601 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
602 {
603         struct intel_pt_queue *ptq = data;
604         struct thread *thread;
605         struct addr_location al;
606         u8 cpumode;
607         u64 offset;
608
609         if (ip >= ptq->pt->kernel_start)
610                 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
611
612         cpumode = PERF_RECORD_MISC_USER;
613
614         thread = ptq->thread;
615         if (!thread)
616                 return -EINVAL;
617
618         if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
619                 return -EINVAL;
620
621         offset = al.map->map_ip(al.map, ip);
622
623         return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
624                                      al.map->dso->long_name);
625 }
626
627 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
628 {
629         return __intel_pt_pgd_ip(ip, data) > 0;
630 }
631
632 static bool intel_pt_get_config(struct intel_pt *pt,
633                                 struct perf_event_attr *attr, u64 *config)
634 {
635         if (attr->type == pt->pmu_type) {
636                 if (config)
637                         *config = attr->config;
638                 return true;
639         }
640
641         return false;
642 }
643
644 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
645 {
646         struct perf_evsel *evsel;
647
648         evlist__for_each_entry(pt->session->evlist, evsel) {
649                 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
650                     !evsel->attr.exclude_kernel)
651                         return false;
652         }
653         return true;
654 }
655
656 static bool intel_pt_return_compression(struct intel_pt *pt)
657 {
658         struct perf_evsel *evsel;
659         u64 config;
660
661         if (!pt->noretcomp_bit)
662                 return true;
663
664         evlist__for_each_entry(pt->session->evlist, evsel) {
665                 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
666                     (config & pt->noretcomp_bit))
667                         return false;
668         }
669         return true;
670 }
671
672 static bool intel_pt_branch_enable(struct intel_pt *pt)
673 {
674         struct perf_evsel *evsel;
675         u64 config;
676
677         evlist__for_each_entry(pt->session->evlist, evsel) {
678                 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
679                     (config & 1) && !(config & 0x2000))
680                         return false;
681         }
682         return true;
683 }
684
685 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
686 {
687         struct perf_evsel *evsel;
688         unsigned int shift;
689         u64 config;
690
691         if (!pt->mtc_freq_bits)
692                 return 0;
693
694         for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
695                 config >>= 1;
696
697         evlist__for_each_entry(pt->session->evlist, evsel) {
698                 if (intel_pt_get_config(pt, &evsel->attr, &config))
699                         return (config & pt->mtc_freq_bits) >> shift;
700         }
701         return 0;
702 }
703
704 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
705 {
706         struct perf_evsel *evsel;
707         bool timeless_decoding = true;
708         u64 config;
709
710         if (!pt->tsc_bit || !pt->cap_user_time_zero)
711                 return true;
712
713         evlist__for_each_entry(pt->session->evlist, evsel) {
714                 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
715                         return true;
716                 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
717                         if (config & pt->tsc_bit)
718                                 timeless_decoding = false;
719                         else
720                                 return true;
721                 }
722         }
723         return timeless_decoding;
724 }
725
726 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
727 {
728         struct perf_evsel *evsel;
729
730         evlist__for_each_entry(pt->session->evlist, evsel) {
731                 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
732                     !evsel->attr.exclude_kernel)
733                         return true;
734         }
735         return false;
736 }
737
738 static bool intel_pt_have_tsc(struct intel_pt *pt)
739 {
740         struct perf_evsel *evsel;
741         bool have_tsc = false;
742         u64 config;
743
744         if (!pt->tsc_bit)
745                 return false;
746
747         evlist__for_each_entry(pt->session->evlist, evsel) {
748                 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
749                         if (config & pt->tsc_bit)
750                                 have_tsc = true;
751                         else
752                                 return false;
753                 }
754         }
755         return have_tsc;
756 }
757
758 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
759 {
760         u64 quot, rem;
761
762         quot = ns / pt->tc.time_mult;
763         rem  = ns % pt->tc.time_mult;
764         return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
765                 pt->tc.time_mult;
766 }
767
768 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
769                                                    unsigned int queue_nr)
770 {
771         struct intel_pt_params params = { .get_trace = 0, };
772         struct perf_env *env = pt->machine->env;
773         struct intel_pt_queue *ptq;
774
775         ptq = zalloc(sizeof(struct intel_pt_queue));
776         if (!ptq)
777                 return NULL;
778
779         if (pt->synth_opts.callchain) {
780                 size_t sz = sizeof(struct ip_callchain);
781
782                 /* Add 1 to callchain_sz for callchain context */
783                 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
784                 ptq->chain = zalloc(sz);
785                 if (!ptq->chain)
786                         goto out_free;
787         }
788
789         if (pt->synth_opts.last_branch) {
790                 size_t sz = sizeof(struct branch_stack);
791
792                 sz += pt->synth_opts.last_branch_sz *
793                       sizeof(struct branch_entry);
794                 ptq->last_branch = zalloc(sz);
795                 if (!ptq->last_branch)
796                         goto out_free;
797                 ptq->last_branch_rb = zalloc(sz);
798                 if (!ptq->last_branch_rb)
799                         goto out_free;
800         }
801
802         ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
803         if (!ptq->event_buf)
804                 goto out_free;
805
806         ptq->pt = pt;
807         ptq->queue_nr = queue_nr;
808         ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
809         ptq->pid = -1;
810         ptq->tid = -1;
811         ptq->cpu = -1;
812         ptq->next_tid = -1;
813
814         params.get_trace = intel_pt_get_trace;
815         params.walk_insn = intel_pt_walk_next_insn;
816         params.data = ptq;
817         params.return_compression = intel_pt_return_compression(pt);
818         params.branch_enable = intel_pt_branch_enable(pt);
819         params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
820         params.mtc_period = intel_pt_mtc_period(pt);
821         params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
822         params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
823
824         if (pt->filts.cnt > 0)
825                 params.pgd_ip = intel_pt_pgd_ip;
826
827         if (pt->synth_opts.instructions) {
828                 if (pt->synth_opts.period) {
829                         switch (pt->synth_opts.period_type) {
830                         case PERF_ITRACE_PERIOD_INSTRUCTIONS:
831                                 params.period_type =
832                                                 INTEL_PT_PERIOD_INSTRUCTIONS;
833                                 params.period = pt->synth_opts.period;
834                                 break;
835                         case PERF_ITRACE_PERIOD_TICKS:
836                                 params.period_type = INTEL_PT_PERIOD_TICKS;
837                                 params.period = pt->synth_opts.period;
838                                 break;
839                         case PERF_ITRACE_PERIOD_NANOSECS:
840                                 params.period_type = INTEL_PT_PERIOD_TICKS;
841                                 params.period = intel_pt_ns_to_ticks(pt,
842                                                         pt->synth_opts.period);
843                                 break;
844                         default:
845                                 break;
846                         }
847                 }
848
849                 if (!params.period) {
850                         params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
851                         params.period = 1;
852                 }
853         }
854
855         if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
856                 params.flags |= INTEL_PT_FUP_WITH_NLIP;
857
858         ptq->decoder = intel_pt_decoder_new(&params);
859         if (!ptq->decoder)
860                 goto out_free;
861
862         return ptq;
863
864 out_free:
865         zfree(&ptq->event_buf);
866         zfree(&ptq->last_branch);
867         zfree(&ptq->last_branch_rb);
868         zfree(&ptq->chain);
869         free(ptq);
870         return NULL;
871 }
872
873 static void intel_pt_free_queue(void *priv)
874 {
875         struct intel_pt_queue *ptq = priv;
876
877         if (!ptq)
878                 return;
879         thread__zput(ptq->thread);
880         intel_pt_decoder_free(ptq->decoder);
881         zfree(&ptq->event_buf);
882         zfree(&ptq->last_branch);
883         zfree(&ptq->last_branch_rb);
884         zfree(&ptq->chain);
885         free(ptq);
886 }
887
888 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
889                                      struct auxtrace_queue *queue)
890 {
891         struct intel_pt_queue *ptq = queue->priv;
892
893         if (queue->tid == -1 || pt->have_sched_switch) {
894                 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
895                 thread__zput(ptq->thread);
896         }
897
898         if (!ptq->thread && ptq->tid != -1)
899                 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
900
901         if (ptq->thread) {
902                 ptq->pid = ptq->thread->pid_;
903                 if (queue->cpu == -1)
904                         ptq->cpu = ptq->thread->cpu;
905         }
906 }
907
908 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
909 {
910         if (ptq->state->flags & INTEL_PT_ABORT_TX) {
911                 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
912         } else if (ptq->state->flags & INTEL_PT_ASYNC) {
913                 if (ptq->state->to_ip)
914                         ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
915                                      PERF_IP_FLAG_ASYNC |
916                                      PERF_IP_FLAG_INTERRUPT;
917                 else
918                         ptq->flags = PERF_IP_FLAG_BRANCH |
919                                      PERF_IP_FLAG_TRACE_END;
920                 ptq->insn_len = 0;
921         } else {
922                 if (ptq->state->from_ip)
923                         ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
924                 else
925                         ptq->flags = PERF_IP_FLAG_BRANCH |
926                                      PERF_IP_FLAG_TRACE_BEGIN;
927                 if (ptq->state->flags & INTEL_PT_IN_TX)
928                         ptq->flags |= PERF_IP_FLAG_IN_TX;
929                 ptq->insn_len = ptq->state->insn_len;
930                 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
931         }
932
933         if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
934                 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
935         if (ptq->state->type & INTEL_PT_TRACE_END)
936                 ptq->flags |= PERF_IP_FLAG_TRACE_END;
937 }
938
939 static int intel_pt_setup_queue(struct intel_pt *pt,
940                                 struct auxtrace_queue *queue,
941                                 unsigned int queue_nr)
942 {
943         struct intel_pt_queue *ptq = queue->priv;
944
945         if (list_empty(&queue->head))
946                 return 0;
947
948         if (!ptq) {
949                 ptq = intel_pt_alloc_queue(pt, queue_nr);
950                 if (!ptq)
951                         return -ENOMEM;
952                 queue->priv = ptq;
953
954                 if (queue->cpu != -1)
955                         ptq->cpu = queue->cpu;
956                 ptq->tid = queue->tid;
957
958                 if (pt->sampling_mode && !pt->snapshot_mode &&
959                     pt->timeless_decoding)
960                         ptq->step_through_buffers = true;
961
962                 ptq->sync_switch = pt->sync_switch;
963         }
964
965         if (!ptq->on_heap &&
966             (!ptq->sync_switch ||
967              ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
968                 const struct intel_pt_state *state;
969                 int ret;
970
971                 if (pt->timeless_decoding)
972                         return 0;
973
974                 intel_pt_log("queue %u getting timestamp\n", queue_nr);
975                 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
976                              queue_nr, ptq->cpu, ptq->pid, ptq->tid);
977                 while (1) {
978                         state = intel_pt_decode(ptq->decoder);
979                         if (state->err) {
980                                 if (state->err == INTEL_PT_ERR_NODATA) {
981                                         intel_pt_log("queue %u has no timestamp\n",
982                                                      queue_nr);
983                                         return 0;
984                                 }
985                                 continue;
986                         }
987                         if (state->timestamp)
988                                 break;
989                 }
990
991                 ptq->timestamp = state->timestamp;
992                 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
993                              queue_nr, ptq->timestamp);
994                 ptq->state = state;
995                 ptq->have_sample = true;
996                 intel_pt_sample_flags(ptq);
997                 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
998                 if (ret)
999                         return ret;
1000                 ptq->on_heap = true;
1001         }
1002
1003         return 0;
1004 }
1005
1006 static int intel_pt_setup_queues(struct intel_pt *pt)
1007 {
1008         unsigned int i;
1009         int ret;
1010
1011         for (i = 0; i < pt->queues.nr_queues; i++) {
1012                 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
1013                 if (ret)
1014                         return ret;
1015         }
1016         return 0;
1017 }
1018
1019 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1020 {
1021         struct branch_stack *bs_src = ptq->last_branch_rb;
1022         struct branch_stack *bs_dst = ptq->last_branch;
1023         size_t nr = 0;
1024
1025         bs_dst->nr = bs_src->nr;
1026
1027         if (!bs_src->nr)
1028                 return;
1029
1030         nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1031         memcpy(&bs_dst->entries[0],
1032                &bs_src->entries[ptq->last_branch_pos],
1033                sizeof(struct branch_entry) * nr);
1034
1035         if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1036                 memcpy(&bs_dst->entries[nr],
1037                        &bs_src->entries[0],
1038                        sizeof(struct branch_entry) * ptq->last_branch_pos);
1039         }
1040 }
1041
1042 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1043 {
1044         ptq->last_branch_pos = 0;
1045         ptq->last_branch_rb->nr = 0;
1046 }
1047
1048 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1049 {
1050         const struct intel_pt_state *state = ptq->state;
1051         struct branch_stack *bs = ptq->last_branch_rb;
1052         struct branch_entry *be;
1053
1054         if (!ptq->last_branch_pos)
1055                 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1056
1057         ptq->last_branch_pos -= 1;
1058
1059         be              = &bs->entries[ptq->last_branch_pos];
1060         be->from        = state->from_ip;
1061         be->to          = state->to_ip;
1062         be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1063         be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1064         /* No support for mispredict */
1065         be->flags.mispred = ptq->pt->mispred_all;
1066
1067         if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1068                 bs->nr += 1;
1069 }
1070
1071 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1072 {
1073         return pt->synth_opts.initial_skip &&
1074                pt->num_events++ < pt->synth_opts.initial_skip;
1075 }
1076
1077 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1078                                    struct intel_pt_queue *ptq,
1079                                    union perf_event *event,
1080                                    struct perf_sample *sample)
1081 {
1082         if (!pt->timeless_decoding)
1083                 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1084
1085         sample->ip = ptq->state->from_ip;
1086         sample->cpumode = intel_pt_cpumode(pt, sample->ip);
1087         sample->pid = ptq->pid;
1088         sample->tid = ptq->tid;
1089         sample->addr = ptq->state->to_ip;
1090         sample->period = 1;
1091         sample->cpu = ptq->cpu;
1092         sample->flags = ptq->flags;
1093         sample->insn_len = ptq->insn_len;
1094         memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1095
1096         event->sample.header.type = PERF_RECORD_SAMPLE;
1097         event->sample.header.misc = sample->cpumode;
1098         event->sample.header.size = sizeof(struct perf_event_header);
1099 }
1100
1101 static int intel_pt_inject_event(union perf_event *event,
1102                                  struct perf_sample *sample, u64 type)
1103 {
1104         event->header.size = perf_event__sample_event_size(sample, type, 0);
1105         return perf_event__synthesize_sample(event, type, 0, sample);
1106 }
1107
1108 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1109                                       union perf_event *event,
1110                                       struct perf_sample *sample, u64 type)
1111 {
1112         if (!pt->synth_opts.inject)
1113                 return 0;
1114
1115         return intel_pt_inject_event(event, sample, type);
1116 }
1117
1118 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1119                                           union perf_event *event,
1120                                           struct perf_sample *sample, u64 type)
1121 {
1122         int ret;
1123
1124         ret = intel_pt_opt_inject(pt, event, sample, type);
1125         if (ret)
1126                 return ret;
1127
1128         ret = perf_session__deliver_synth_event(pt->session, event, sample);
1129         if (ret)
1130                 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1131
1132         return ret;
1133 }
1134
1135 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1136 {
1137         struct intel_pt *pt = ptq->pt;
1138         union perf_event *event = ptq->event_buf;
1139         struct perf_sample sample = { .ip = 0, };
1140         struct dummy_branch_stack {
1141                 u64                     nr;
1142                 struct branch_entry     entries;
1143         } dummy_bs;
1144
1145         if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1146                 return 0;
1147
1148         if (intel_pt_skip_event(pt))
1149                 return 0;
1150
1151         intel_pt_prep_b_sample(pt, ptq, event, &sample);
1152
1153         sample.id = ptq->pt->branches_id;
1154         sample.stream_id = ptq->pt->branches_id;
1155
1156         /*
1157          * perf report cannot handle events without a branch stack when using
1158          * SORT_MODE__BRANCH so make a dummy one.
1159          */
1160         if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1161                 dummy_bs = (struct dummy_branch_stack){
1162                         .nr = 1,
1163                         .entries = {
1164                                 .from = sample.ip,
1165                                 .to = sample.addr,
1166                         },
1167                 };
1168                 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1169         }
1170
1171         sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_br_cyc_cnt;
1172         if (sample.cyc_cnt) {
1173                 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_br_insn_cnt;
1174                 ptq->last_br_insn_cnt = ptq->ipc_insn_cnt;
1175                 ptq->last_br_cyc_cnt = ptq->ipc_cyc_cnt;
1176         }
1177
1178         return intel_pt_deliver_synth_b_event(pt, event, &sample,
1179                                               pt->branches_sample_type);
1180 }
1181
1182 static void intel_pt_prep_sample(struct intel_pt *pt,
1183                                  struct intel_pt_queue *ptq,
1184                                  union perf_event *event,
1185                                  struct perf_sample *sample)
1186 {
1187         intel_pt_prep_b_sample(pt, ptq, event, sample);
1188
1189         if (pt->synth_opts.callchain) {
1190                 thread_stack__sample(ptq->thread, ptq->cpu, ptq->chain,
1191                                      pt->synth_opts.callchain_sz + 1,
1192                                      sample->ip, pt->kernel_start);
1193                 sample->callchain = ptq->chain;
1194         }
1195
1196         if (pt->synth_opts.last_branch) {
1197                 intel_pt_copy_last_branch_rb(ptq);
1198                 sample->branch_stack = ptq->last_branch;
1199         }
1200 }
1201
1202 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1203                                                struct intel_pt_queue *ptq,
1204                                                union perf_event *event,
1205                                                struct perf_sample *sample,
1206                                                u64 type)
1207 {
1208         int ret;
1209
1210         ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1211
1212         if (pt->synth_opts.last_branch)
1213                 intel_pt_reset_last_branch_rb(ptq);
1214
1215         return ret;
1216 }
1217
1218 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1219 {
1220         struct intel_pt *pt = ptq->pt;
1221         union perf_event *event = ptq->event_buf;
1222         struct perf_sample sample = { .ip = 0, };
1223
1224         if (intel_pt_skip_event(pt))
1225                 return 0;
1226
1227         intel_pt_prep_sample(pt, ptq, event, &sample);
1228
1229         sample.id = ptq->pt->instructions_id;
1230         sample.stream_id = ptq->pt->instructions_id;
1231         sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1232
1233         sample.cyc_cnt = ptq->ipc_cyc_cnt - ptq->last_in_cyc_cnt;
1234         if (sample.cyc_cnt) {
1235                 sample.insn_cnt = ptq->ipc_insn_cnt - ptq->last_in_insn_cnt;
1236                 ptq->last_in_insn_cnt = ptq->ipc_insn_cnt;
1237                 ptq->last_in_cyc_cnt = ptq->ipc_cyc_cnt;
1238         }
1239
1240         ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1241
1242         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1243                                             pt->instructions_sample_type);
1244 }
1245
1246 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1247 {
1248         struct intel_pt *pt = ptq->pt;
1249         union perf_event *event = ptq->event_buf;
1250         struct perf_sample sample = { .ip = 0, };
1251
1252         if (intel_pt_skip_event(pt))
1253                 return 0;
1254
1255         intel_pt_prep_sample(pt, ptq, event, &sample);
1256
1257         sample.id = ptq->pt->transactions_id;
1258         sample.stream_id = ptq->pt->transactions_id;
1259
1260         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1261                                             pt->transactions_sample_type);
1262 }
1263
1264 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1265                                    struct intel_pt_queue *ptq,
1266                                    union perf_event *event,
1267                                    struct perf_sample *sample)
1268 {
1269         intel_pt_prep_sample(pt, ptq, event, sample);
1270
1271         /*
1272          * Zero IP is used to mean "trace start" but that is not the case for
1273          * power or PTWRITE events with no IP, so clear the flags.
1274          */
1275         if (!sample->ip)
1276                 sample->flags = 0;
1277 }
1278
1279 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1280 {
1281         struct intel_pt *pt = ptq->pt;
1282         union perf_event *event = ptq->event_buf;
1283         struct perf_sample sample = { .ip = 0, };
1284         struct perf_synth_intel_ptwrite raw;
1285
1286         if (intel_pt_skip_event(pt))
1287                 return 0;
1288
1289         intel_pt_prep_p_sample(pt, ptq, event, &sample);
1290
1291         sample.id = ptq->pt->ptwrites_id;
1292         sample.stream_id = ptq->pt->ptwrites_id;
1293
1294         raw.flags = 0;
1295         raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1296         raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1297
1298         sample.raw_size = perf_synth__raw_size(raw);
1299         sample.raw_data = perf_synth__raw_data(&raw);
1300
1301         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1302                                             pt->ptwrites_sample_type);
1303 }
1304
1305 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1306 {
1307         struct intel_pt *pt = ptq->pt;
1308         union perf_event *event = ptq->event_buf;
1309         struct perf_sample sample = { .ip = 0, };
1310         struct perf_synth_intel_cbr raw;
1311         u32 flags;
1312
1313         if (intel_pt_skip_event(pt))
1314                 return 0;
1315
1316         intel_pt_prep_p_sample(pt, ptq, event, &sample);
1317
1318         sample.id = ptq->pt->cbr_id;
1319         sample.stream_id = ptq->pt->cbr_id;
1320
1321         flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1322         raw.flags = cpu_to_le32(flags);
1323         raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1324         raw.reserved3 = 0;
1325
1326         sample.raw_size = perf_synth__raw_size(raw);
1327         sample.raw_data = perf_synth__raw_data(&raw);
1328
1329         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1330                                             pt->pwr_events_sample_type);
1331 }
1332
1333 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1334 {
1335         struct intel_pt *pt = ptq->pt;
1336         union perf_event *event = ptq->event_buf;
1337         struct perf_sample sample = { .ip = 0, };
1338         struct perf_synth_intel_mwait raw;
1339
1340         if (intel_pt_skip_event(pt))
1341                 return 0;
1342
1343         intel_pt_prep_p_sample(pt, ptq, event, &sample);
1344
1345         sample.id = ptq->pt->mwait_id;
1346         sample.stream_id = ptq->pt->mwait_id;
1347
1348         raw.reserved = 0;
1349         raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1350
1351         sample.raw_size = perf_synth__raw_size(raw);
1352         sample.raw_data = perf_synth__raw_data(&raw);
1353
1354         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1355                                             pt->pwr_events_sample_type);
1356 }
1357
1358 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1359 {
1360         struct intel_pt *pt = ptq->pt;
1361         union perf_event *event = ptq->event_buf;
1362         struct perf_sample sample = { .ip = 0, };
1363         struct perf_synth_intel_pwre raw;
1364
1365         if (intel_pt_skip_event(pt))
1366                 return 0;
1367
1368         intel_pt_prep_p_sample(pt, ptq, event, &sample);
1369
1370         sample.id = ptq->pt->pwre_id;
1371         sample.stream_id = ptq->pt->pwre_id;
1372
1373         raw.reserved = 0;
1374         raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1375
1376         sample.raw_size = perf_synth__raw_size(raw);
1377         sample.raw_data = perf_synth__raw_data(&raw);
1378
1379         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1380                                             pt->pwr_events_sample_type);
1381 }
1382
1383 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1384 {
1385         struct intel_pt *pt = ptq->pt;
1386         union perf_event *event = ptq->event_buf;
1387         struct perf_sample sample = { .ip = 0, };
1388         struct perf_synth_intel_exstop raw;
1389
1390         if (intel_pt_skip_event(pt))
1391                 return 0;
1392
1393         intel_pt_prep_p_sample(pt, ptq, event, &sample);
1394
1395         sample.id = ptq->pt->exstop_id;
1396         sample.stream_id = ptq->pt->exstop_id;
1397
1398         raw.flags = 0;
1399         raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1400
1401         sample.raw_size = perf_synth__raw_size(raw);
1402         sample.raw_data = perf_synth__raw_data(&raw);
1403
1404         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1405                                             pt->pwr_events_sample_type);
1406 }
1407
1408 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1409 {
1410         struct intel_pt *pt = ptq->pt;
1411         union perf_event *event = ptq->event_buf;
1412         struct perf_sample sample = { .ip = 0, };
1413         struct perf_synth_intel_pwrx raw;
1414
1415         if (intel_pt_skip_event(pt))
1416                 return 0;
1417
1418         intel_pt_prep_p_sample(pt, ptq, event, &sample);
1419
1420         sample.id = ptq->pt->pwrx_id;
1421         sample.stream_id = ptq->pt->pwrx_id;
1422
1423         raw.reserved = 0;
1424         raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1425
1426         sample.raw_size = perf_synth__raw_size(raw);
1427         sample.raw_data = perf_synth__raw_data(&raw);
1428
1429         return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1430                                             pt->pwr_events_sample_type);
1431 }
1432
1433 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1434                                 pid_t pid, pid_t tid, u64 ip, u64 timestamp)
1435 {
1436         union perf_event event;
1437         char msg[MAX_AUXTRACE_ERROR_MSG];
1438         int err;
1439
1440         intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1441
1442         auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1443                              code, cpu, pid, tid, ip, msg, timestamp);
1444
1445         err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1446         if (err)
1447                 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1448                        err);
1449
1450         return err;
1451 }
1452
1453 static int intel_ptq_synth_error(struct intel_pt_queue *ptq,
1454                                  const struct intel_pt_state *state)
1455 {
1456         struct intel_pt *pt = ptq->pt;
1457         u64 tm = ptq->timestamp;
1458
1459         tm = pt->timeless_decoding ? 0 : tsc_to_perf_time(tm, &pt->tc);
1460
1461         return intel_pt_synth_error(pt, state->err, ptq->cpu, ptq->pid,
1462                                     ptq->tid, state->from_ip, tm);
1463 }
1464
1465 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1466 {
1467         struct auxtrace_queue *queue;
1468         pid_t tid = ptq->next_tid;
1469         int err;
1470
1471         if (tid == -1)
1472                 return 0;
1473
1474         intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1475
1476         err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1477
1478         queue = &pt->queues.queue_array[ptq->queue_nr];
1479         intel_pt_set_pid_tid_cpu(pt, queue);
1480
1481         ptq->next_tid = -1;
1482
1483         return err;
1484 }
1485
1486 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1487 {
1488         struct intel_pt *pt = ptq->pt;
1489
1490         return ip == pt->switch_ip &&
1491                (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1492                !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1493                                PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1494 }
1495
1496 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1497                           INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
1498                           INTEL_PT_CBR_CHG)
1499
1500 static int intel_pt_sample(struct intel_pt_queue *ptq)
1501 {
1502         const struct intel_pt_state *state = ptq->state;
1503         struct intel_pt *pt = ptq->pt;
1504         int err;
1505
1506         if (!ptq->have_sample)
1507                 return 0;
1508
1509         ptq->have_sample = false;
1510
1511         if (ptq->state->tot_cyc_cnt > ptq->ipc_cyc_cnt) {
1512                 /*
1513                  * Cycle count and instruction count only go together to create
1514                  * a valid IPC ratio when the cycle count changes.
1515                  */
1516                 ptq->ipc_insn_cnt = ptq->state->tot_insn_cnt;
1517                 ptq->ipc_cyc_cnt = ptq->state->tot_cyc_cnt;
1518         }
1519
1520         if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
1521                 if (state->type & INTEL_PT_CBR_CHG) {
1522                         err = intel_pt_synth_cbr_sample(ptq);
1523                         if (err)
1524                                 return err;
1525                 }
1526                 if (state->type & INTEL_PT_MWAIT_OP) {
1527                         err = intel_pt_synth_mwait_sample(ptq);
1528                         if (err)
1529                                 return err;
1530                 }
1531                 if (state->type & INTEL_PT_PWR_ENTRY) {
1532                         err = intel_pt_synth_pwre_sample(ptq);
1533                         if (err)
1534                                 return err;
1535                 }
1536                 if (state->type & INTEL_PT_EX_STOP) {
1537                         err = intel_pt_synth_exstop_sample(ptq);
1538                         if (err)
1539                                 return err;
1540                 }
1541                 if (state->type & INTEL_PT_PWR_EXIT) {
1542                         err = intel_pt_synth_pwrx_sample(ptq);
1543                         if (err)
1544                                 return err;
1545                 }
1546         }
1547
1548         if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1549                 err = intel_pt_synth_instruction_sample(ptq);
1550                 if (err)
1551                         return err;
1552         }
1553
1554         if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1555                 err = intel_pt_synth_transaction_sample(ptq);
1556                 if (err)
1557                         return err;
1558         }
1559
1560         if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1561                 err = intel_pt_synth_ptwrite_sample(ptq);
1562                 if (err)
1563                         return err;
1564         }
1565
1566         if (!(state->type & INTEL_PT_BRANCH))
1567                 return 0;
1568
1569         if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1570                 thread_stack__event(ptq->thread, ptq->cpu, ptq->flags, state->from_ip,
1571                                     state->to_ip, ptq->insn_len,
1572                                     state->trace_nr);
1573         else
1574                 thread_stack__set_trace_nr(ptq->thread, ptq->cpu, state->trace_nr);
1575
1576         if (pt->sample_branches) {
1577                 err = intel_pt_synth_branch_sample(ptq);
1578                 if (err)
1579                         return err;
1580         }
1581
1582         if (pt->synth_opts.last_branch)
1583                 intel_pt_update_last_branch_rb(ptq);
1584
1585         if (!ptq->sync_switch)
1586                 return 0;
1587
1588         if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1589                 switch (ptq->switch_state) {
1590                 case INTEL_PT_SS_NOT_TRACING:
1591                 case INTEL_PT_SS_UNKNOWN:
1592                 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1593                         err = intel_pt_next_tid(pt, ptq);
1594                         if (err)
1595                                 return err;
1596                         ptq->switch_state = INTEL_PT_SS_TRACING;
1597                         break;
1598                 default:
1599                         ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1600                         return 1;
1601                 }
1602         } else if (!state->to_ip) {
1603                 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1604         } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1605                 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1606         } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1607                    state->to_ip == pt->ptss_ip &&
1608                    (ptq->flags & PERF_IP_FLAG_CALL)) {
1609                 ptq->switch_state = INTEL_PT_SS_TRACING;
1610         }
1611
1612         return 0;
1613 }
1614
1615 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1616 {
1617         struct machine *machine = pt->machine;
1618         struct map *map;
1619         struct symbol *sym, *start;
1620         u64 ip, switch_ip = 0;
1621         const char *ptss;
1622
1623         if (ptss_ip)
1624                 *ptss_ip = 0;
1625
1626         map = machine__kernel_map(machine);
1627         if (!map)
1628                 return 0;
1629
1630         if (map__load(map))
1631                 return 0;
1632
1633         start = dso__first_symbol(map->dso);
1634
1635         for (sym = start; sym; sym = dso__next_symbol(sym)) {
1636                 if (sym->binding == STB_GLOBAL &&
1637                     !strcmp(sym->name, "__switch_to")) {
1638                         ip = map->unmap_ip(map, sym->start);
1639                         if (ip >= map->start && ip < map->end) {
1640                                 switch_ip = ip;
1641                                 break;
1642                         }
1643                 }
1644         }
1645
1646         if (!switch_ip || !ptss_ip)
1647                 return 0;
1648
1649         if (pt->have_sched_switch == 1)
1650                 ptss = "perf_trace_sched_switch";
1651         else
1652                 ptss = "__perf_event_task_sched_out";
1653
1654         for (sym = start; sym; sym = dso__next_symbol(sym)) {
1655                 if (!strcmp(sym->name, ptss)) {
1656                         ip = map->unmap_ip(map, sym->start);
1657                         if (ip >= map->start && ip < map->end) {
1658                                 *ptss_ip = ip;
1659                                 break;
1660                         }
1661                 }
1662         }
1663
1664         return switch_ip;
1665 }
1666
1667 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1668 {
1669         unsigned int i;
1670
1671         pt->sync_switch = true;
1672
1673         for (i = 0; i < pt->queues.nr_queues; i++) {
1674                 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1675                 struct intel_pt_queue *ptq = queue->priv;
1676
1677                 if (ptq)
1678                         ptq->sync_switch = true;
1679         }
1680 }
1681
1682 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1683 {
1684         const struct intel_pt_state *state = ptq->state;
1685         struct intel_pt *pt = ptq->pt;
1686         int err;
1687
1688         if (!pt->kernel_start) {
1689                 pt->kernel_start = machine__kernel_start(pt->machine);
1690                 if (pt->per_cpu_mmaps &&
1691                     (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1692                     !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1693                     !pt->sampling_mode) {
1694                         pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1695                         if (pt->switch_ip) {
1696                                 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1697                                              pt->switch_ip, pt->ptss_ip);
1698                                 intel_pt_enable_sync_switch(pt);
1699                         }
1700                 }
1701         }
1702
1703         intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1704                      ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1705         while (1) {
1706                 err = intel_pt_sample(ptq);
1707                 if (err)
1708                         return err;
1709
1710                 state = intel_pt_decode(ptq->decoder);
1711                 if (state->err) {
1712                         if (state->err == INTEL_PT_ERR_NODATA)
1713                                 return 1;
1714                         if (ptq->sync_switch &&
1715                             state->from_ip >= pt->kernel_start) {
1716                                 ptq->sync_switch = false;
1717                                 intel_pt_next_tid(pt, ptq);
1718                         }
1719                         if (pt->synth_opts.errors) {
1720                                 err = intel_ptq_synth_error(ptq, state);
1721                                 if (err)
1722                                         return err;
1723                         }
1724                         continue;
1725                 }
1726
1727                 ptq->state = state;
1728                 ptq->have_sample = true;
1729                 intel_pt_sample_flags(ptq);
1730
1731                 /* Use estimated TSC upon return to user space */
1732                 if (pt->est_tsc &&
1733                     (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1734                     state->to_ip && state->to_ip < pt->kernel_start) {
1735                         intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1736                                      state->timestamp, state->est_timestamp);
1737                         ptq->timestamp = state->est_timestamp;
1738                 /* Use estimated TSC in unknown switch state */
1739                 } else if (ptq->sync_switch &&
1740                            ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1741                            intel_pt_is_switch_ip(ptq, state->to_ip) &&
1742                            ptq->next_tid == -1) {
1743                         intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1744                                      state->timestamp, state->est_timestamp);
1745                         ptq->timestamp = state->est_timestamp;
1746                 } else if (state->timestamp > ptq->timestamp) {
1747                         ptq->timestamp = state->timestamp;
1748                 }
1749
1750                 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1751                         *timestamp = ptq->timestamp;
1752                         return 0;
1753                 }
1754         }
1755         return 0;
1756 }
1757
1758 static inline int intel_pt_update_queues(struct intel_pt *pt)
1759 {
1760         if (pt->queues.new_data) {
1761                 pt->queues.new_data = false;
1762                 return intel_pt_setup_queues(pt);
1763         }
1764         return 0;
1765 }
1766
1767 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1768 {
1769         unsigned int queue_nr;
1770         u64 ts;
1771         int ret;
1772
1773         while (1) {
1774                 struct auxtrace_queue *queue;
1775                 struct intel_pt_queue *ptq;
1776
1777                 if (!pt->heap.heap_cnt)
1778                         return 0;
1779
1780                 if (pt->heap.heap_array[0].ordinal >= timestamp)
1781                         return 0;
1782
1783                 queue_nr = pt->heap.heap_array[0].queue_nr;
1784                 queue = &pt->queues.queue_array[queue_nr];
1785                 ptq = queue->priv;
1786
1787                 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1788                              queue_nr, pt->heap.heap_array[0].ordinal,
1789                              timestamp);
1790
1791                 auxtrace_heap__pop(&pt->heap);
1792
1793                 if (pt->heap.heap_cnt) {
1794                         ts = pt->heap.heap_array[0].ordinal + 1;
1795                         if (ts > timestamp)
1796                                 ts = timestamp;
1797                 } else {
1798                         ts = timestamp;
1799                 }
1800
1801                 intel_pt_set_pid_tid_cpu(pt, queue);
1802
1803                 ret = intel_pt_run_decoder(ptq, &ts);
1804
1805                 if (ret < 0) {
1806                         auxtrace_heap__add(&pt->heap, queue_nr, ts);
1807                         return ret;
1808                 }
1809
1810                 if (!ret) {
1811                         ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1812                         if (ret < 0)
1813                                 return ret;
1814                 } else {
1815                         ptq->on_heap = false;
1816                 }
1817         }
1818
1819         return 0;
1820 }
1821
1822 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1823                                             u64 time_)
1824 {
1825         struct auxtrace_queues *queues = &pt->queues;
1826         unsigned int i;
1827         u64 ts = 0;
1828
1829         for (i = 0; i < queues->nr_queues; i++) {
1830                 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1831                 struct intel_pt_queue *ptq = queue->priv;
1832
1833                 if (ptq && (tid == -1 || ptq->tid == tid)) {
1834                         ptq->time = time_;
1835                         intel_pt_set_pid_tid_cpu(pt, queue);
1836                         intel_pt_run_decoder(ptq, &ts);
1837                 }
1838         }
1839         return 0;
1840 }
1841
1842 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1843 {
1844         return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1845                                     sample->pid, sample->tid, 0, sample->time);
1846 }
1847
1848 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1849 {
1850         unsigned i, j;
1851
1852         if (cpu < 0 || !pt->queues.nr_queues)
1853                 return NULL;
1854
1855         if ((unsigned)cpu >= pt->queues.nr_queues)
1856                 i = pt->queues.nr_queues - 1;
1857         else
1858                 i = cpu;
1859
1860         if (pt->queues.queue_array[i].cpu == cpu)
1861                 return pt->queues.queue_array[i].priv;
1862
1863         for (j = 0; i > 0; j++) {
1864                 if (pt->queues.queue_array[--i].cpu == cpu)
1865                         return pt->queues.queue_array[i].priv;
1866         }
1867
1868         for (; j < pt->queues.nr_queues; j++) {
1869                 if (pt->queues.queue_array[j].cpu == cpu)
1870                         return pt->queues.queue_array[j].priv;
1871         }
1872
1873         return NULL;
1874 }
1875
1876 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1877                                 u64 timestamp)
1878 {
1879         struct intel_pt_queue *ptq;
1880         int err;
1881
1882         if (!pt->sync_switch)
1883                 return 1;
1884
1885         ptq = intel_pt_cpu_to_ptq(pt, cpu);
1886         if (!ptq || !ptq->sync_switch)
1887                 return 1;
1888
1889         switch (ptq->switch_state) {
1890         case INTEL_PT_SS_NOT_TRACING:
1891                 break;
1892         case INTEL_PT_SS_UNKNOWN:
1893         case INTEL_PT_SS_TRACING:
1894                 ptq->next_tid = tid;
1895                 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1896                 return 0;
1897         case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1898                 if (!ptq->on_heap) {
1899                         ptq->timestamp = perf_time_to_tsc(timestamp,
1900                                                           &pt->tc);
1901                         err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1902                                                  ptq->timestamp);
1903                         if (err)
1904                                 return err;
1905                         ptq->on_heap = true;
1906                 }
1907                 ptq->switch_state = INTEL_PT_SS_TRACING;
1908                 break;
1909         case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1910                 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1911                 break;
1912         default:
1913                 break;
1914         }
1915
1916         ptq->next_tid = -1;
1917
1918         return 1;
1919 }
1920
1921 static int intel_pt_process_switch(struct intel_pt *pt,
1922                                    struct perf_sample *sample)
1923 {
1924         struct perf_evsel *evsel;
1925         pid_t tid;
1926         int cpu, ret;
1927
1928         evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1929         if (evsel != pt->switch_evsel)
1930                 return 0;
1931
1932         tid = perf_evsel__intval(evsel, sample, "next_pid");
1933         cpu = sample->cpu;
1934
1935         intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1936                      cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1937                      &pt->tc));
1938
1939         ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1940         if (ret <= 0)
1941                 return ret;
1942
1943         return machine__set_current_tid(pt->machine, cpu, -1, tid);
1944 }
1945
1946 static int intel_pt_context_switch_in(struct intel_pt *pt,
1947                                       struct perf_sample *sample)
1948 {
1949         pid_t pid = sample->pid;
1950         pid_t tid = sample->tid;
1951         int cpu = sample->cpu;
1952
1953         if (pt->sync_switch) {
1954                 struct intel_pt_queue *ptq;
1955
1956                 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1957                 if (ptq && ptq->sync_switch) {
1958                         ptq->next_tid = -1;
1959                         switch (ptq->switch_state) {
1960                         case INTEL_PT_SS_NOT_TRACING:
1961                         case INTEL_PT_SS_UNKNOWN:
1962                         case INTEL_PT_SS_TRACING:
1963                                 break;
1964                         case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1965                         case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1966                                 ptq->switch_state = INTEL_PT_SS_TRACING;
1967                                 break;
1968                         default:
1969                                 break;
1970                         }
1971                 }
1972         }
1973
1974         /*
1975          * If the current tid has not been updated yet, ensure it is now that
1976          * a "switch in" event has occurred.
1977          */
1978         if (machine__get_current_tid(pt->machine, cpu) == tid)
1979                 return 0;
1980
1981         return machine__set_current_tid(pt->machine, cpu, pid, tid);
1982 }
1983
1984 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1985                                    struct perf_sample *sample)
1986 {
1987         bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1988         pid_t pid, tid;
1989         int cpu, ret;
1990
1991         cpu = sample->cpu;
1992
1993         if (pt->have_sched_switch == 3) {
1994                 if (!out)
1995                         return intel_pt_context_switch_in(pt, sample);
1996                 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1997                         pr_err("Expecting CPU-wide context switch event\n");
1998                         return -EINVAL;
1999                 }
2000                 pid = event->context_switch.next_prev_pid;
2001                 tid = event->context_switch.next_prev_tid;
2002         } else {
2003                 if (out)
2004                         return 0;
2005                 pid = sample->pid;
2006                 tid = sample->tid;
2007         }
2008
2009         if (tid == -1) {
2010                 pr_err("context_switch event has no tid\n");
2011                 return -EINVAL;
2012         }
2013
2014         intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2015                      cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
2016                      &pt->tc));
2017
2018         ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
2019         if (ret <= 0)
2020                 return ret;
2021
2022         return machine__set_current_tid(pt->machine, cpu, pid, tid);
2023 }
2024
2025 static int intel_pt_process_itrace_start(struct intel_pt *pt,
2026                                          union perf_event *event,
2027                                          struct perf_sample *sample)
2028 {
2029         if (!pt->per_cpu_mmaps)
2030                 return 0;
2031
2032         intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
2033                      sample->cpu, event->itrace_start.pid,
2034                      event->itrace_start.tid, sample->time,
2035                      perf_time_to_tsc(sample->time, &pt->tc));
2036
2037         return machine__set_current_tid(pt->machine, sample->cpu,
2038                                         event->itrace_start.pid,
2039                                         event->itrace_start.tid);
2040 }
2041
2042 static int intel_pt_process_event(struct perf_session *session,
2043                                   union perf_event *event,
2044                                   struct perf_sample *sample,
2045                                   struct perf_tool *tool)
2046 {
2047         struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2048                                            auxtrace);
2049         u64 timestamp;
2050         int err = 0;
2051
2052         if (dump_trace)
2053                 return 0;
2054
2055         if (!tool->ordered_events) {
2056                 pr_err("Intel Processor Trace requires ordered events\n");
2057                 return -EINVAL;
2058         }
2059
2060         if (sample->time && sample->time != (u64)-1)
2061                 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
2062         else
2063                 timestamp = 0;
2064
2065         if (timestamp || pt->timeless_decoding) {
2066                 err = intel_pt_update_queues(pt);
2067                 if (err)
2068                         return err;
2069         }
2070
2071         if (pt->timeless_decoding) {
2072                 if (event->header.type == PERF_RECORD_EXIT) {
2073                         err = intel_pt_process_timeless_queues(pt,
2074                                                                event->fork.tid,
2075                                                                sample->time);
2076                 }
2077         } else if (timestamp) {
2078                 err = intel_pt_process_queues(pt, timestamp);
2079         }
2080         if (err)
2081                 return err;
2082
2083         if (event->header.type == PERF_RECORD_AUX &&
2084             (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
2085             pt->synth_opts.errors) {
2086                 err = intel_pt_lost(pt, sample);
2087                 if (err)
2088                         return err;
2089         }
2090
2091         if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2092                 err = intel_pt_process_switch(pt, sample);
2093         else if (event->header.type == PERF_RECORD_ITRACE_START)
2094                 err = intel_pt_process_itrace_start(pt, event, sample);
2095         else if (event->header.type == PERF_RECORD_SWITCH ||
2096                  event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2097                 err = intel_pt_context_switch(pt, event, sample);
2098
2099         intel_pt_log("event %u: cpu %d time %"PRIu64" tsc %#"PRIx64" ",
2100                      event->header.type, sample->cpu, sample->time, timestamp);
2101         intel_pt_log_event(event);
2102
2103         return err;
2104 }
2105
2106 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2107 {
2108         struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2109                                            auxtrace);
2110         int ret;
2111
2112         if (dump_trace)
2113                 return 0;
2114
2115         if (!tool->ordered_events)
2116                 return -EINVAL;
2117
2118         ret = intel_pt_update_queues(pt);
2119         if (ret < 0)
2120                 return ret;
2121
2122         if (pt->timeless_decoding)
2123                 return intel_pt_process_timeless_queues(pt, -1,
2124                                                         MAX_TIMESTAMP - 1);
2125
2126         return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2127 }
2128
2129 static void intel_pt_free_events(struct perf_session *session)
2130 {
2131         struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2132                                            auxtrace);
2133         struct auxtrace_queues *queues = &pt->queues;
2134         unsigned int i;
2135
2136         for (i = 0; i < queues->nr_queues; i++) {
2137                 intel_pt_free_queue(queues->queue_array[i].priv);
2138                 queues->queue_array[i].priv = NULL;
2139         }
2140         intel_pt_log_disable();
2141         auxtrace_queues__free(queues);
2142 }
2143
2144 static void intel_pt_free(struct perf_session *session)
2145 {
2146         struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2147                                            auxtrace);
2148
2149         auxtrace_heap__free(&pt->heap);
2150         intel_pt_free_events(session);
2151         session->auxtrace = NULL;
2152         thread__put(pt->unknown_thread);
2153         addr_filters__exit(&pt->filts);
2154         zfree(&pt->filter);
2155         free(pt);
2156 }
2157
2158 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2159                                            union perf_event *event,
2160                                            struct perf_tool *tool __maybe_unused)
2161 {
2162         struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2163                                            auxtrace);
2164
2165         if (!pt->data_queued) {
2166                 struct auxtrace_buffer *buffer;
2167                 off_t data_offset;
2168                 int fd = perf_data__fd(session->data);
2169                 int err;
2170
2171                 if (perf_data__is_pipe(session->data)) {
2172                         data_offset = 0;
2173                 } else {
2174                         data_offset = lseek(fd, 0, SEEK_CUR);
2175                         if (data_offset == -1)
2176                                 return -errno;
2177                 }
2178
2179                 err = auxtrace_queues__add_event(&pt->queues, session, event,
2180                                                  data_offset, &buffer);
2181                 if (err)
2182                         return err;
2183
2184                 /* Dump here now we have copied a piped trace out of the pipe */
2185                 if (dump_trace) {
2186                         if (auxtrace_buffer__get_data(buffer, fd)) {
2187                                 intel_pt_dump_event(pt, buffer->data,
2188                                                     buffer->size);
2189                                 auxtrace_buffer__put_data(buffer);
2190                         }
2191                 }
2192         }
2193
2194         return 0;
2195 }
2196
2197 struct intel_pt_synth {
2198         struct perf_tool dummy_tool;
2199         struct perf_session *session;
2200 };
2201
2202 static int intel_pt_event_synth(struct perf_tool *tool,
2203                                 union perf_event *event,
2204                                 struct perf_sample *sample __maybe_unused,
2205                                 struct machine *machine __maybe_unused)
2206 {
2207         struct intel_pt_synth *intel_pt_synth =
2208                         container_of(tool, struct intel_pt_synth, dummy_tool);
2209
2210         return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2211                                                  NULL);
2212 }
2213
2214 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2215                                 struct perf_event_attr *attr, u64 id)
2216 {
2217         struct intel_pt_synth intel_pt_synth;
2218         int err;
2219
2220         pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2221                  name, id, (u64)attr->sample_type);
2222
2223         memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2224         intel_pt_synth.session = session;
2225
2226         err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2227                                           &id, intel_pt_event_synth);
2228         if (err)
2229                 pr_err("%s: failed to synthesize '%s' event type\n",
2230                        __func__, name);
2231
2232         return err;
2233 }
2234
2235 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2236                                     const char *name)
2237 {
2238         struct perf_evsel *evsel;
2239
2240         evlist__for_each_entry(evlist, evsel) {
2241                 if (evsel->id && evsel->id[0] == id) {
2242                         if (evsel->name)
2243                                 zfree(&evsel->name);
2244                         evsel->name = strdup(name);
2245                         break;
2246                 }
2247         }
2248 }
2249
2250 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2251                                          struct perf_evlist *evlist)
2252 {
2253         struct perf_evsel *evsel;
2254
2255         evlist__for_each_entry(evlist, evsel) {
2256                 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2257                         return evsel;
2258         }
2259
2260         return NULL;
2261 }
2262
2263 static int intel_pt_synth_events(struct intel_pt *pt,
2264                                  struct perf_session *session)
2265 {
2266         struct perf_evlist *evlist = session->evlist;
2267         struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
2268         struct perf_event_attr attr;
2269         u64 id;
2270         int err;
2271
2272         if (!evsel) {
2273                 pr_debug("There are no selected events with Intel Processor Trace data\n");
2274                 return 0;
2275         }
2276
2277         memset(&attr, 0, sizeof(struct perf_event_attr));
2278         attr.size = sizeof(struct perf_event_attr);
2279         attr.type = PERF_TYPE_HARDWARE;
2280         attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2281         attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2282                             PERF_SAMPLE_PERIOD;
2283         if (pt->timeless_decoding)
2284                 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2285         else
2286                 attr.sample_type |= PERF_SAMPLE_TIME;
2287         if (!pt->per_cpu_mmaps)
2288                 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2289         attr.exclude_user = evsel->attr.exclude_user;
2290         attr.exclude_kernel = evsel->attr.exclude_kernel;
2291         attr.exclude_hv = evsel->attr.exclude_hv;
2292         attr.exclude_host = evsel->attr.exclude_host;
2293         attr.exclude_guest = evsel->attr.exclude_guest;
2294         attr.sample_id_all = evsel->attr.sample_id_all;
2295         attr.read_format = evsel->attr.read_format;
2296
2297         id = evsel->id[0] + 1000000000;
2298         if (!id)
2299                 id = 1;
2300
2301         if (pt->synth_opts.branches) {
2302                 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2303                 attr.sample_period = 1;
2304                 attr.sample_type |= PERF_SAMPLE_ADDR;
2305                 err = intel_pt_synth_event(session, "branches", &attr, id);
2306                 if (err)
2307                         return err;
2308                 pt->sample_branches = true;
2309                 pt->branches_sample_type = attr.sample_type;
2310                 pt->branches_id = id;
2311                 id += 1;
2312                 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2313         }
2314
2315         if (pt->synth_opts.callchain)
2316                 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2317         if (pt->synth_opts.last_branch)
2318                 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2319
2320         if (pt->synth_opts.instructions) {
2321                 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2322                 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2323                         attr.sample_period =
2324                                 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2325                 else
2326                         attr.sample_period = pt->synth_opts.period;
2327                 err = intel_pt_synth_event(session, "instructions", &attr, id);
2328                 if (err)
2329                         return err;
2330                 pt->sample_instructions = true;
2331                 pt->instructions_sample_type = attr.sample_type;
2332                 pt->instructions_id = id;
2333                 id += 1;
2334         }
2335
2336         attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2337         attr.sample_period = 1;
2338
2339         if (pt->synth_opts.transactions) {
2340                 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2341                 err = intel_pt_synth_event(session, "transactions", &attr, id);
2342                 if (err)
2343                         return err;
2344                 pt->sample_transactions = true;
2345                 pt->transactions_sample_type = attr.sample_type;
2346                 pt->transactions_id = id;
2347                 intel_pt_set_event_name(evlist, id, "transactions");
2348                 id += 1;
2349         }
2350
2351         attr.type = PERF_TYPE_SYNTH;
2352         attr.sample_type |= PERF_SAMPLE_RAW;
2353
2354         if (pt->synth_opts.ptwrites) {
2355                 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2356                 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2357                 if (err)
2358                         return err;
2359                 pt->sample_ptwrites = true;
2360                 pt->ptwrites_sample_type = attr.sample_type;
2361                 pt->ptwrites_id = id;
2362                 intel_pt_set_event_name(evlist, id, "ptwrite");
2363                 id += 1;
2364         }
2365
2366         if (pt->synth_opts.pwr_events) {
2367                 pt->sample_pwr_events = true;
2368                 pt->pwr_events_sample_type = attr.sample_type;
2369
2370                 attr.config = PERF_SYNTH_INTEL_CBR;
2371                 err = intel_pt_synth_event(session, "cbr", &attr, id);
2372                 if (err)
2373                         return err;
2374                 pt->cbr_id = id;
2375                 intel_pt_set_event_name(evlist, id, "cbr");
2376                 id += 1;
2377         }
2378
2379         if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2380                 attr.config = PERF_SYNTH_INTEL_MWAIT;
2381                 err = intel_pt_synth_event(session, "mwait", &attr, id);
2382                 if (err)
2383                         return err;
2384                 pt->mwait_id = id;
2385                 intel_pt_set_event_name(evlist, id, "mwait");
2386                 id += 1;
2387
2388                 attr.config = PERF_SYNTH_INTEL_PWRE;
2389                 err = intel_pt_synth_event(session, "pwre", &attr, id);
2390                 if (err)
2391                         return err;
2392                 pt->pwre_id = id;
2393                 intel_pt_set_event_name(evlist, id, "pwre");
2394                 id += 1;
2395
2396                 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2397                 err = intel_pt_synth_event(session, "exstop", &attr, id);
2398                 if (err)
2399                         return err;
2400                 pt->exstop_id = id;
2401                 intel_pt_set_event_name(evlist, id, "exstop");
2402                 id += 1;
2403
2404                 attr.config = PERF_SYNTH_INTEL_PWRX;
2405                 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2406                 if (err)
2407                         return err;
2408                 pt->pwrx_id = id;
2409                 intel_pt_set_event_name(evlist, id, "pwrx");
2410                 id += 1;
2411         }
2412
2413         return 0;
2414 }
2415
2416 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2417 {
2418         struct perf_evsel *evsel;
2419
2420         evlist__for_each_entry_reverse(evlist, evsel) {
2421                 const char *name = perf_evsel__name(evsel);
2422
2423                 if (!strcmp(name, "sched:sched_switch"))
2424                         return evsel;
2425         }
2426
2427         return NULL;
2428 }
2429
2430 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2431 {
2432         struct perf_evsel *evsel;
2433
2434         evlist__for_each_entry(evlist, evsel) {
2435                 if (evsel->attr.context_switch)
2436                         return true;
2437         }
2438
2439         return false;
2440 }
2441
2442 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2443 {
2444         struct intel_pt *pt = data;
2445
2446         if (!strcmp(var, "intel-pt.mispred-all"))
2447                 pt->mispred_all = perf_config_bool(var, value);
2448
2449         return 0;
2450 }
2451
2452 static const char * const intel_pt_info_fmts[] = {
2453         [INTEL_PT_PMU_TYPE]             = "  PMU Type            %"PRId64"\n",
2454         [INTEL_PT_TIME_SHIFT]           = "  Time Shift          %"PRIu64"\n",
2455         [INTEL_PT_TIME_MULT]            = "  Time Muliplier      %"PRIu64"\n",
2456         [INTEL_PT_TIME_ZERO]            = "  Time Zero           %"PRIu64"\n",
2457         [INTEL_PT_CAP_USER_TIME_ZERO]   = "  Cap Time Zero       %"PRId64"\n",
2458         [INTEL_PT_TSC_BIT]              = "  TSC bit             %#"PRIx64"\n",
2459         [INTEL_PT_NORETCOMP_BIT]        = "  NoRETComp bit       %#"PRIx64"\n",
2460         [INTEL_PT_HAVE_SCHED_SWITCH]    = "  Have sched_switch   %"PRId64"\n",
2461         [INTEL_PT_SNAPSHOT_MODE]        = "  Snapshot mode       %"PRId64"\n",
2462         [INTEL_PT_PER_CPU_MMAPS]        = "  Per-cpu maps        %"PRId64"\n",
2463         [INTEL_PT_MTC_BIT]              = "  MTC bit             %#"PRIx64"\n",
2464         [INTEL_PT_TSC_CTC_N]            = "  TSC:CTC numerator   %"PRIu64"\n",
2465         [INTEL_PT_TSC_CTC_D]            = "  TSC:CTC denominator %"PRIu64"\n",
2466         [INTEL_PT_CYC_BIT]              = "  CYC bit             %#"PRIx64"\n",
2467         [INTEL_PT_MAX_NONTURBO_RATIO]   = "  Max non-turbo ratio %"PRIu64"\n",
2468         [INTEL_PT_FILTER_STR_LEN]       = "  Filter string len.  %"PRIu64"\n",
2469 };
2470
2471 static void intel_pt_print_info(u64 *arr, int start, int finish)
2472 {
2473         int i;
2474
2475         if (!dump_trace)
2476                 return;
2477
2478         for (i = start; i <= finish; i++)
2479                 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2480 }
2481
2482 static void intel_pt_print_info_str(const char *name, const char *str)
2483 {
2484         if (!dump_trace)
2485                 return;
2486
2487         fprintf(stdout, "  %-20s%s\n", name, str ? str : "");
2488 }
2489
2490 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2491 {
2492         return auxtrace_info->header.size >=
2493                 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2494 }
2495
2496 int intel_pt_process_auxtrace_info(union perf_event *event,
2497                                    struct perf_session *session)
2498 {
2499         struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2500         size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2501         struct intel_pt *pt;
2502         void *info_end;
2503         u64 *info;
2504         int err;
2505
2506         if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2507                                         min_sz)
2508                 return -EINVAL;
2509
2510         pt = zalloc(sizeof(struct intel_pt));
2511         if (!pt)
2512                 return -ENOMEM;
2513
2514         addr_filters__init(&pt->filts);
2515
2516         err = perf_config(intel_pt_perf_config, pt);
2517         if (err)
2518                 goto err_free;
2519
2520         err = auxtrace_queues__init(&pt->queues);
2521         if (err)
2522                 goto err_free;
2523
2524         intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2525
2526         pt->session = session;
2527         pt->machine = &session->machines.host; /* No kvm support */
2528         pt->auxtrace_type = auxtrace_info->type;
2529         pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2530         pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2531         pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2532         pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2533         pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2534         pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2535         pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2536         pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2537         pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2538         pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2539         intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2540                             INTEL_PT_PER_CPU_MMAPS);
2541
2542         if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2543                 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2544                 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2545                 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2546                 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2547                 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2548                 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2549                                     INTEL_PT_CYC_BIT);
2550         }
2551
2552         if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2553                 pt->max_non_turbo_ratio =
2554                         auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2555                 intel_pt_print_info(&auxtrace_info->priv[0],
2556                                     INTEL_PT_MAX_NONTURBO_RATIO,
2557                                     INTEL_PT_MAX_NONTURBO_RATIO);
2558         }
2559
2560         info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2561         info_end = (void *)info + auxtrace_info->header.size;
2562
2563         if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2564                 size_t len;
2565
2566                 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2567                 intel_pt_print_info(&auxtrace_info->priv[0],
2568                                     INTEL_PT_FILTER_STR_LEN,
2569                                     INTEL_PT_FILTER_STR_LEN);
2570                 if (len) {
2571                         const char *filter = (const char *)info;
2572
2573                         len = roundup(len + 1, 8);
2574                         info += len >> 3;
2575                         if ((void *)info > info_end) {
2576                                 pr_err("%s: bad filter string length\n", __func__);
2577                                 err = -EINVAL;
2578                                 goto err_free_queues;
2579                         }
2580                         pt->filter = memdup(filter, len);
2581                         if (!pt->filter) {
2582                                 err = -ENOMEM;
2583                                 goto err_free_queues;
2584                         }
2585                         if (session->header.needs_swap)
2586                                 mem_bswap_64(pt->filter, len);
2587                         if (pt->filter[len - 1]) {
2588                                 pr_err("%s: filter string not null terminated\n", __func__);
2589                                 err = -EINVAL;
2590                                 goto err_free_queues;
2591                         }
2592                         err = addr_filters__parse_bare_filter(&pt->filts,
2593                                                               filter);
2594                         if (err)
2595                                 goto err_free_queues;
2596                 }
2597                 intel_pt_print_info_str("Filter string", pt->filter);
2598         }
2599
2600         pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2601         if (pt->timeless_decoding && !pt->tc.time_mult)
2602                 pt->tc.time_mult = 1;
2603         pt->have_tsc = intel_pt_have_tsc(pt);
2604         pt->sampling_mode = false;
2605         pt->est_tsc = !pt->timeless_decoding;
2606
2607         pt->unknown_thread = thread__new(999999999, 999999999);
2608         if (!pt->unknown_thread) {
2609                 err = -ENOMEM;
2610                 goto err_free_queues;
2611         }
2612
2613         /*
2614          * Since this thread will not be kept in any rbtree not in a
2615          * list, initialize its list node so that at thread__put() the
2616          * current thread lifetime assuption is kept and we don't segfault
2617          * at list_del_init().
2618          */
2619         INIT_LIST_HEAD(&pt->unknown_thread->node);
2620
2621         err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2622         if (err)
2623                 goto err_delete_thread;
2624         if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2625                 err = -ENOMEM;
2626                 goto err_delete_thread;
2627         }
2628
2629         pt->auxtrace.process_event = intel_pt_process_event;
2630         pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2631         pt->auxtrace.flush_events = intel_pt_flush;
2632         pt->auxtrace.free_events = intel_pt_free_events;
2633         pt->auxtrace.free = intel_pt_free;
2634         session->auxtrace = &pt->auxtrace;
2635
2636         if (dump_trace)
2637                 return 0;
2638
2639         if (pt->have_sched_switch == 1) {
2640                 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2641                 if (!pt->switch_evsel) {
2642                         pr_err("%s: missing sched_switch event\n", __func__);
2643                         err = -EINVAL;
2644                         goto err_delete_thread;
2645                 }
2646         } else if (pt->have_sched_switch == 2 &&
2647                    !intel_pt_find_switch(session->evlist)) {
2648                 pr_err("%s: missing context_switch attribute flag\n", __func__);
2649                 err = -EINVAL;
2650                 goto err_delete_thread;
2651         }
2652
2653         if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2654                 pt->synth_opts = *session->itrace_synth_opts;
2655         } else {
2656                 itrace_synth_opts__set_default(&pt->synth_opts,
2657                                 session->itrace_synth_opts->default_no_sample);
2658                 if (!session->itrace_synth_opts->default_no_sample &&
2659                     !session->itrace_synth_opts->inject) {
2660                         pt->synth_opts.branches = false;
2661                         pt->synth_opts.callchain = true;
2662                 }
2663                 if (session->itrace_synth_opts)
2664                         pt->synth_opts.thread_stack =
2665                                 session->itrace_synth_opts->thread_stack;
2666         }
2667
2668         if (pt->synth_opts.log)
2669                 intel_pt_log_enable();
2670
2671         /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2672         if (pt->tc.time_mult) {
2673                 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2674
2675                 if (!pt->max_non_turbo_ratio)
2676                         pt->max_non_turbo_ratio =
2677                                         (tsc_freq + 50000000) / 100000000;
2678                 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2679                 intel_pt_log("Maximum non-turbo ratio %u\n",
2680                              pt->max_non_turbo_ratio);
2681                 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
2682         }
2683
2684         if (pt->synth_opts.calls)
2685                 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2686                                        PERF_IP_FLAG_TRACE_END;
2687         if (pt->synth_opts.returns)
2688                 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2689                                        PERF_IP_FLAG_TRACE_BEGIN;
2690
2691         if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2692                 symbol_conf.use_callchain = true;
2693                 if (callchain_register_param(&callchain_param) < 0) {
2694                         symbol_conf.use_callchain = false;
2695                         pt->synth_opts.callchain = false;
2696                 }
2697         }
2698
2699         err = intel_pt_synth_events(pt, session);
2700         if (err)
2701                 goto err_delete_thread;
2702
2703         err = auxtrace_queues__process_index(&pt->queues, session);
2704         if (err)
2705                 goto err_delete_thread;
2706
2707         if (pt->queues.populated)
2708                 pt->data_queued = true;
2709
2710         if (pt->timeless_decoding)
2711                 pr_debug2("Intel PT decoding without timestamps\n");
2712
2713         return 0;
2714
2715 err_delete_thread:
2716         thread__zput(pt->unknown_thread);
2717 err_free_queues:
2718         intel_pt_log_disable();
2719         auxtrace_queues__free(&pt->queues);
2720         session->auxtrace = NULL;
2721 err_free:
2722         addr_filters__exit(&pt->filts);
2723         zfree(&pt->filter);
2724         free(pt);
2725         return err;
2726 }