2 * intel_pt.c: Intel Processor Trace support
3 * Copyright (c) 2013-2015, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 #include <linux/kernel.h>
21 #include <linux/types.h>
36 #include "thread-stack.h"
38 #include "callchain.h"
46 #include "intel-pt-decoder/intel-pt-log.h"
47 #include "intel-pt-decoder/intel-pt-decoder.h"
48 #include "intel-pt-decoder/intel-pt-insn-decoder.h"
49 #include "intel-pt-decoder/intel-pt-pkt-decoder.h"
51 #define MAX_TIMESTAMP (~0ULL)
54 struct auxtrace auxtrace;
55 struct auxtrace_queues queues;
56 struct auxtrace_heap heap;
58 struct perf_session *session;
59 struct machine *machine;
60 struct perf_evsel *switch_evsel;
61 struct thread *unknown_thread;
62 bool timeless_decoding;
71 int have_sched_switch;
77 struct perf_tsc_conversion tc;
78 bool cap_user_time_zero;
80 struct itrace_synth_opts synth_opts;
82 bool sample_instructions;
83 u64 instructions_sample_type;
88 u64 branches_sample_type;
91 bool sample_transactions;
92 u64 transactions_sample_type;
96 u64 ptwrites_sample_type;
99 bool sample_pwr_events;
100 u64 pwr_events_sample_type;
114 unsigned max_non_turbo_ratio;
117 unsigned long num_events;
120 struct addr_filters filts;
124 INTEL_PT_SS_NOT_TRACING,
127 INTEL_PT_SS_EXPECTING_SWITCH_EVENT,
128 INTEL_PT_SS_EXPECTING_SWITCH_IP,
131 struct intel_pt_queue {
133 unsigned int queue_nr;
134 struct auxtrace_buffer *buffer;
135 struct auxtrace_buffer *old_buffer;
137 const struct intel_pt_state *state;
138 struct ip_callchain *chain;
139 struct branch_stack *last_branch;
140 struct branch_stack *last_branch_rb;
141 size_t last_branch_pos;
142 union perf_event *event_buf;
145 bool step_through_buffers;
146 bool use_buffer_pid_tid;
152 struct thread *thread;
160 char insn[INTEL_PT_INSN_BUF_SZ];
163 static void intel_pt_dump(struct intel_pt *pt __maybe_unused,
164 unsigned char *buf, size_t len)
166 struct intel_pt_pkt packet;
169 char desc[INTEL_PT_PKT_DESC_MAX];
170 const char *color = PERF_COLOR_BLUE;
172 color_fprintf(stdout, color,
173 ". ... Intel Processor Trace data: size %zu bytes\n",
177 ret = intel_pt_get_packet(buf, len, &packet);
183 color_fprintf(stdout, color, " %08x: ", pos);
184 for (i = 0; i < pkt_len; i++)
185 color_fprintf(stdout, color, " %02x", buf[i]);
187 color_fprintf(stdout, color, " ");
189 ret = intel_pt_pkt_desc(&packet, desc,
190 INTEL_PT_PKT_DESC_MAX);
192 color_fprintf(stdout, color, " %s\n", desc);
194 color_fprintf(stdout, color, " Bad packet!\n");
202 static void intel_pt_dump_event(struct intel_pt *pt, unsigned char *buf,
206 intel_pt_dump(pt, buf, len);
209 static int intel_pt_do_fix_overlap(struct intel_pt *pt, struct auxtrace_buffer *a,
210 struct auxtrace_buffer *b)
212 bool consecutive = false;
215 start = intel_pt_find_overlap(a->data, a->size, b->data, b->size,
216 pt->have_tsc, &consecutive);
219 b->use_size = b->data + b->size - start;
221 if (b->use_size && consecutive)
222 b->consecutive = true;
226 /* This function assumes data is processed sequentially only */
227 static int intel_pt_get_trace(struct intel_pt_buffer *b, void *data)
229 struct intel_pt_queue *ptq = data;
230 struct auxtrace_buffer *buffer = ptq->buffer;
231 struct auxtrace_buffer *old_buffer = ptq->old_buffer;
232 struct auxtrace_queue *queue;
240 queue = &ptq->pt->queues.queue_array[ptq->queue_nr];
242 buffer = auxtrace_buffer__next(queue, buffer);
245 auxtrace_buffer__drop_data(old_buffer);
250 ptq->buffer = buffer;
253 int fd = perf_data__fd(ptq->pt->session->data);
255 buffer->data = auxtrace_buffer__get_data(buffer, fd);
260 might_overlap = ptq->pt->snapshot_mode || ptq->pt->sampling_mode;
261 if (might_overlap && !buffer->consecutive && old_buffer &&
262 intel_pt_do_fix_overlap(ptq->pt, old_buffer, buffer))
265 if (buffer->use_data) {
266 b->len = buffer->use_size;
267 b->buf = buffer->use_data;
269 b->len = buffer->size;
270 b->buf = buffer->data;
272 b->ref_timestamp = buffer->reference;
274 if (!old_buffer || (might_overlap && !buffer->consecutive)) {
275 b->consecutive = false;
276 b->trace_nr = buffer->buffer_nr + 1;
278 b->consecutive = true;
281 if (ptq->step_through_buffers)
286 auxtrace_buffer__drop_data(old_buffer);
287 ptq->old_buffer = buffer;
289 auxtrace_buffer__drop_data(buffer);
290 return intel_pt_get_trace(b, data);
296 struct intel_pt_cache_entry {
297 struct auxtrace_cache_entry entry;
300 enum intel_pt_insn_op op;
301 enum intel_pt_insn_branch branch;
304 char insn[INTEL_PT_INSN_BUF_SZ];
307 static int intel_pt_config_div(const char *var, const char *value, void *data)
312 if (!strcmp(var, "intel-pt.cache-divisor")) {
313 val = strtol(value, NULL, 0);
314 if (val > 0 && val <= INT_MAX)
321 static int intel_pt_cache_divisor(void)
328 perf_config(intel_pt_config_div, &d);
336 static unsigned int intel_pt_cache_size(struct dso *dso,
337 struct machine *machine)
341 size = dso__data_size(dso, machine);
342 size /= intel_pt_cache_divisor();
345 if (size > (1 << 21))
347 return 32 - __builtin_clz(size);
350 static struct auxtrace_cache *intel_pt_cache(struct dso *dso,
351 struct machine *machine)
353 struct auxtrace_cache *c;
356 if (dso->auxtrace_cache)
357 return dso->auxtrace_cache;
359 bits = intel_pt_cache_size(dso, machine);
361 /* Ignoring cache creation failure */
362 c = auxtrace_cache__new(bits, sizeof(struct intel_pt_cache_entry), 200);
364 dso->auxtrace_cache = c;
369 static int intel_pt_cache_add(struct dso *dso, struct machine *machine,
370 u64 offset, u64 insn_cnt, u64 byte_cnt,
371 struct intel_pt_insn *intel_pt_insn)
373 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
374 struct intel_pt_cache_entry *e;
380 e = auxtrace_cache__alloc_entry(c);
384 e->insn_cnt = insn_cnt;
385 e->byte_cnt = byte_cnt;
386 e->op = intel_pt_insn->op;
387 e->branch = intel_pt_insn->branch;
388 e->length = intel_pt_insn->length;
389 e->rel = intel_pt_insn->rel;
390 memcpy(e->insn, intel_pt_insn->buf, INTEL_PT_INSN_BUF_SZ);
392 err = auxtrace_cache__add(c, offset, &e->entry);
394 auxtrace_cache__free_entry(c, e);
399 static struct intel_pt_cache_entry *
400 intel_pt_cache_lookup(struct dso *dso, struct machine *machine, u64 offset)
402 struct auxtrace_cache *c = intel_pt_cache(dso, machine);
407 return auxtrace_cache__lookup(dso->auxtrace_cache, offset);
410 static int intel_pt_walk_next_insn(struct intel_pt_insn *intel_pt_insn,
411 uint64_t *insn_cnt_ptr, uint64_t *ip,
412 uint64_t to_ip, uint64_t max_insn_cnt,
415 struct intel_pt_queue *ptq = data;
416 struct machine *machine = ptq->pt->machine;
417 struct thread *thread;
418 struct addr_location al;
419 unsigned char buf[INTEL_PT_INSN_BUF_SZ];
423 u64 offset, start_offset, start_ip;
427 intel_pt_insn->length = 0;
429 if (to_ip && *ip == to_ip)
432 if (*ip >= ptq->pt->kernel_start)
433 cpumode = PERF_RECORD_MISC_KERNEL;
435 cpumode = PERF_RECORD_MISC_USER;
437 thread = ptq->thread;
439 if (cpumode != PERF_RECORD_MISC_KERNEL)
441 thread = ptq->pt->unknown_thread;
445 if (!thread__find_map(thread, cpumode, *ip, &al) || !al.map->dso)
448 if (al.map->dso->data.status == DSO_DATA_STATUS_ERROR &&
449 dso__data_status_seen(al.map->dso,
450 DSO_DATA_STATUS_SEEN_ITRACE))
453 offset = al.map->map_ip(al.map, *ip);
455 if (!to_ip && one_map) {
456 struct intel_pt_cache_entry *e;
458 e = intel_pt_cache_lookup(al.map->dso, machine, offset);
460 (!max_insn_cnt || e->insn_cnt <= max_insn_cnt)) {
461 *insn_cnt_ptr = e->insn_cnt;
463 intel_pt_insn->op = e->op;
464 intel_pt_insn->branch = e->branch;
465 intel_pt_insn->length = e->length;
466 intel_pt_insn->rel = e->rel;
467 memcpy(intel_pt_insn->buf, e->insn,
468 INTEL_PT_INSN_BUF_SZ);
469 intel_pt_log_insn_no_data(intel_pt_insn, *ip);
474 start_offset = offset;
477 /* Load maps to ensure dso->is_64_bit has been updated */
480 x86_64 = al.map->dso->is_64_bit;
483 len = dso__data_read_offset(al.map->dso, machine,
485 INTEL_PT_INSN_BUF_SZ);
489 if (intel_pt_get_insn(buf, len, x86_64, intel_pt_insn))
492 intel_pt_log_insn(intel_pt_insn, *ip);
496 if (intel_pt_insn->branch != INTEL_PT_BR_NO_BRANCH)
499 if (max_insn_cnt && insn_cnt >= max_insn_cnt)
502 *ip += intel_pt_insn->length;
504 if (to_ip && *ip == to_ip)
507 if (*ip >= al.map->end)
510 offset += intel_pt_insn->length;
515 *insn_cnt_ptr = insn_cnt;
521 * Didn't lookup in the 'to_ip' case, so do it now to prevent duplicate
525 struct intel_pt_cache_entry *e;
527 e = intel_pt_cache_lookup(al.map->dso, machine, start_offset);
532 /* Ignore cache errors */
533 intel_pt_cache_add(al.map->dso, machine, start_offset, insn_cnt,
534 *ip - start_ip, intel_pt_insn);
539 *insn_cnt_ptr = insn_cnt;
543 static bool intel_pt_match_pgd_ip(struct intel_pt *pt, uint64_t ip,
544 uint64_t offset, const char *filename)
546 struct addr_filter *filt;
547 bool have_filter = false;
548 bool hit_tracestop = false;
549 bool hit_filter = false;
551 list_for_each_entry(filt, &pt->filts.head, list) {
555 if ((filename && !filt->filename) ||
556 (!filename && filt->filename) ||
557 (filename && strcmp(filename, filt->filename)))
560 if (!(offset >= filt->addr && offset < filt->addr + filt->size))
563 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s hit filter: %s offset %#"PRIx64" size %#"PRIx64"\n",
564 ip, offset, filename ? filename : "[kernel]",
565 filt->start ? "filter" : "stop",
566 filt->addr, filt->size);
571 hit_tracestop = true;
574 if (!hit_tracestop && !hit_filter)
575 intel_pt_log("TIP.PGD ip %#"PRIx64" offset %#"PRIx64" in %s is not in a filter region\n",
576 ip, offset, filename ? filename : "[kernel]");
578 return hit_tracestop || (have_filter && !hit_filter);
581 static int __intel_pt_pgd_ip(uint64_t ip, void *data)
583 struct intel_pt_queue *ptq = data;
584 struct thread *thread;
585 struct addr_location al;
589 if (ip >= ptq->pt->kernel_start)
590 return intel_pt_match_pgd_ip(ptq->pt, ip, ip, NULL);
592 cpumode = PERF_RECORD_MISC_USER;
594 thread = ptq->thread;
598 if (!thread__find_map(thread, cpumode, ip, &al) || !al.map->dso)
601 offset = al.map->map_ip(al.map, ip);
603 return intel_pt_match_pgd_ip(ptq->pt, ip, offset,
604 al.map->dso->long_name);
607 static bool intel_pt_pgd_ip(uint64_t ip, void *data)
609 return __intel_pt_pgd_ip(ip, data) > 0;
612 static bool intel_pt_get_config(struct intel_pt *pt,
613 struct perf_event_attr *attr, u64 *config)
615 if (attr->type == pt->pmu_type) {
617 *config = attr->config;
624 static bool intel_pt_exclude_kernel(struct intel_pt *pt)
626 struct perf_evsel *evsel;
628 evlist__for_each_entry(pt->session->evlist, evsel) {
629 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
630 !evsel->attr.exclude_kernel)
636 static bool intel_pt_return_compression(struct intel_pt *pt)
638 struct perf_evsel *evsel;
641 if (!pt->noretcomp_bit)
644 evlist__for_each_entry(pt->session->evlist, evsel) {
645 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
646 (config & pt->noretcomp_bit))
652 static bool intel_pt_branch_enable(struct intel_pt *pt)
654 struct perf_evsel *evsel;
657 evlist__for_each_entry(pt->session->evlist, evsel) {
658 if (intel_pt_get_config(pt, &evsel->attr, &config) &&
659 (config & 1) && !(config & 0x2000))
665 static unsigned int intel_pt_mtc_period(struct intel_pt *pt)
667 struct perf_evsel *evsel;
671 if (!pt->mtc_freq_bits)
674 for (shift = 0, config = pt->mtc_freq_bits; !(config & 1); shift++)
677 evlist__for_each_entry(pt->session->evlist, evsel) {
678 if (intel_pt_get_config(pt, &evsel->attr, &config))
679 return (config & pt->mtc_freq_bits) >> shift;
684 static bool intel_pt_timeless_decoding(struct intel_pt *pt)
686 struct perf_evsel *evsel;
687 bool timeless_decoding = true;
690 if (!pt->tsc_bit || !pt->cap_user_time_zero)
693 evlist__for_each_entry(pt->session->evlist, evsel) {
694 if (!(evsel->attr.sample_type & PERF_SAMPLE_TIME))
696 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
697 if (config & pt->tsc_bit)
698 timeless_decoding = false;
703 return timeless_decoding;
706 static bool intel_pt_tracing_kernel(struct intel_pt *pt)
708 struct perf_evsel *evsel;
710 evlist__for_each_entry(pt->session->evlist, evsel) {
711 if (intel_pt_get_config(pt, &evsel->attr, NULL) &&
712 !evsel->attr.exclude_kernel)
718 static bool intel_pt_have_tsc(struct intel_pt *pt)
720 struct perf_evsel *evsel;
721 bool have_tsc = false;
727 evlist__for_each_entry(pt->session->evlist, evsel) {
728 if (intel_pt_get_config(pt, &evsel->attr, &config)) {
729 if (config & pt->tsc_bit)
738 static u64 intel_pt_ns_to_ticks(const struct intel_pt *pt, u64 ns)
742 quot = ns / pt->tc.time_mult;
743 rem = ns % pt->tc.time_mult;
744 return (quot << pt->tc.time_shift) + (rem << pt->tc.time_shift) /
748 static struct intel_pt_queue *intel_pt_alloc_queue(struct intel_pt *pt,
749 unsigned int queue_nr)
751 struct intel_pt_params params = { .get_trace = 0, };
752 struct perf_env *env = pt->machine->env;
753 struct intel_pt_queue *ptq;
755 ptq = zalloc(sizeof(struct intel_pt_queue));
759 if (pt->synth_opts.callchain) {
760 size_t sz = sizeof(struct ip_callchain);
762 /* Add 1 to callchain_sz for callchain context */
763 sz += (pt->synth_opts.callchain_sz + 1) * sizeof(u64);
764 ptq->chain = zalloc(sz);
769 if (pt->synth_opts.last_branch) {
770 size_t sz = sizeof(struct branch_stack);
772 sz += pt->synth_opts.last_branch_sz *
773 sizeof(struct branch_entry);
774 ptq->last_branch = zalloc(sz);
775 if (!ptq->last_branch)
777 ptq->last_branch_rb = zalloc(sz);
778 if (!ptq->last_branch_rb)
782 ptq->event_buf = malloc(PERF_SAMPLE_MAX_SIZE);
787 ptq->queue_nr = queue_nr;
788 ptq->exclude_kernel = intel_pt_exclude_kernel(pt);
794 params.get_trace = intel_pt_get_trace;
795 params.walk_insn = intel_pt_walk_next_insn;
797 params.return_compression = intel_pt_return_compression(pt);
798 params.branch_enable = intel_pt_branch_enable(pt);
799 params.max_non_turbo_ratio = pt->max_non_turbo_ratio;
800 params.mtc_period = intel_pt_mtc_period(pt);
801 params.tsc_ctc_ratio_n = pt->tsc_ctc_ratio_n;
802 params.tsc_ctc_ratio_d = pt->tsc_ctc_ratio_d;
804 if (pt->filts.cnt > 0)
805 params.pgd_ip = intel_pt_pgd_ip;
807 if (pt->synth_opts.instructions) {
808 if (pt->synth_opts.period) {
809 switch (pt->synth_opts.period_type) {
810 case PERF_ITRACE_PERIOD_INSTRUCTIONS:
812 INTEL_PT_PERIOD_INSTRUCTIONS;
813 params.period = pt->synth_opts.period;
815 case PERF_ITRACE_PERIOD_TICKS:
816 params.period_type = INTEL_PT_PERIOD_TICKS;
817 params.period = pt->synth_opts.period;
819 case PERF_ITRACE_PERIOD_NANOSECS:
820 params.period_type = INTEL_PT_PERIOD_TICKS;
821 params.period = intel_pt_ns_to_ticks(pt,
822 pt->synth_opts.period);
829 if (!params.period) {
830 params.period_type = INTEL_PT_PERIOD_INSTRUCTIONS;
835 if (env->cpuid && !strncmp(env->cpuid, "GenuineIntel,6,92,", 18))
836 params.flags |= INTEL_PT_FUP_WITH_NLIP;
838 ptq->decoder = intel_pt_decoder_new(¶ms);
845 zfree(&ptq->event_buf);
846 zfree(&ptq->last_branch);
847 zfree(&ptq->last_branch_rb);
853 static void intel_pt_free_queue(void *priv)
855 struct intel_pt_queue *ptq = priv;
859 thread__zput(ptq->thread);
860 intel_pt_decoder_free(ptq->decoder);
861 zfree(&ptq->event_buf);
862 zfree(&ptq->last_branch);
863 zfree(&ptq->last_branch_rb);
868 static void intel_pt_set_pid_tid_cpu(struct intel_pt *pt,
869 struct auxtrace_queue *queue)
871 struct intel_pt_queue *ptq = queue->priv;
873 if (queue->tid == -1 || pt->have_sched_switch) {
874 ptq->tid = machine__get_current_tid(pt->machine, ptq->cpu);
875 thread__zput(ptq->thread);
878 if (!ptq->thread && ptq->tid != -1)
879 ptq->thread = machine__find_thread(pt->machine, -1, ptq->tid);
882 ptq->pid = ptq->thread->pid_;
883 if (queue->cpu == -1)
884 ptq->cpu = ptq->thread->cpu;
888 static void intel_pt_sample_flags(struct intel_pt_queue *ptq)
890 if (ptq->state->flags & INTEL_PT_ABORT_TX) {
891 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_TX_ABORT;
892 } else if (ptq->state->flags & INTEL_PT_ASYNC) {
893 if (ptq->state->to_ip)
894 ptq->flags = PERF_IP_FLAG_BRANCH | PERF_IP_FLAG_CALL |
896 PERF_IP_FLAG_INTERRUPT;
898 ptq->flags = PERF_IP_FLAG_BRANCH |
899 PERF_IP_FLAG_TRACE_END;
902 if (ptq->state->from_ip)
903 ptq->flags = intel_pt_insn_type(ptq->state->insn_op);
905 ptq->flags = PERF_IP_FLAG_BRANCH |
906 PERF_IP_FLAG_TRACE_BEGIN;
907 if (ptq->state->flags & INTEL_PT_IN_TX)
908 ptq->flags |= PERF_IP_FLAG_IN_TX;
909 ptq->insn_len = ptq->state->insn_len;
910 memcpy(ptq->insn, ptq->state->insn, INTEL_PT_INSN_BUF_SZ);
913 if (ptq->state->type & INTEL_PT_TRACE_BEGIN)
914 ptq->flags |= PERF_IP_FLAG_TRACE_BEGIN;
915 if (ptq->state->type & INTEL_PT_TRACE_END)
916 ptq->flags |= PERF_IP_FLAG_TRACE_END;
919 static int intel_pt_setup_queue(struct intel_pt *pt,
920 struct auxtrace_queue *queue,
921 unsigned int queue_nr)
923 struct intel_pt_queue *ptq = queue->priv;
925 if (list_empty(&queue->head))
929 ptq = intel_pt_alloc_queue(pt, queue_nr);
934 if (queue->cpu != -1)
935 ptq->cpu = queue->cpu;
936 ptq->tid = queue->tid;
938 if (pt->sampling_mode && !pt->snapshot_mode &&
939 pt->timeless_decoding)
940 ptq->step_through_buffers = true;
942 ptq->sync_switch = pt->sync_switch;
946 (!ptq->sync_switch ||
947 ptq->switch_state != INTEL_PT_SS_EXPECTING_SWITCH_EVENT)) {
948 const struct intel_pt_state *state;
951 if (pt->timeless_decoding)
954 intel_pt_log("queue %u getting timestamp\n", queue_nr);
955 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
956 queue_nr, ptq->cpu, ptq->pid, ptq->tid);
958 state = intel_pt_decode(ptq->decoder);
960 if (state->err == INTEL_PT_ERR_NODATA) {
961 intel_pt_log("queue %u has no timestamp\n",
967 if (state->timestamp)
971 ptq->timestamp = state->timestamp;
972 intel_pt_log("queue %u timestamp 0x%" PRIx64 "\n",
973 queue_nr, ptq->timestamp);
975 ptq->have_sample = true;
976 intel_pt_sample_flags(ptq);
977 ret = auxtrace_heap__add(&pt->heap, queue_nr, ptq->timestamp);
986 static int intel_pt_setup_queues(struct intel_pt *pt)
991 for (i = 0; i < pt->queues.nr_queues; i++) {
992 ret = intel_pt_setup_queue(pt, &pt->queues.queue_array[i], i);
999 static inline void intel_pt_copy_last_branch_rb(struct intel_pt_queue *ptq)
1001 struct branch_stack *bs_src = ptq->last_branch_rb;
1002 struct branch_stack *bs_dst = ptq->last_branch;
1005 bs_dst->nr = bs_src->nr;
1010 nr = ptq->pt->synth_opts.last_branch_sz - ptq->last_branch_pos;
1011 memcpy(&bs_dst->entries[0],
1012 &bs_src->entries[ptq->last_branch_pos],
1013 sizeof(struct branch_entry) * nr);
1015 if (bs_src->nr >= ptq->pt->synth_opts.last_branch_sz) {
1016 memcpy(&bs_dst->entries[nr],
1017 &bs_src->entries[0],
1018 sizeof(struct branch_entry) * ptq->last_branch_pos);
1022 static inline void intel_pt_reset_last_branch_rb(struct intel_pt_queue *ptq)
1024 ptq->last_branch_pos = 0;
1025 ptq->last_branch_rb->nr = 0;
1028 static void intel_pt_update_last_branch_rb(struct intel_pt_queue *ptq)
1030 const struct intel_pt_state *state = ptq->state;
1031 struct branch_stack *bs = ptq->last_branch_rb;
1032 struct branch_entry *be;
1034 if (!ptq->last_branch_pos)
1035 ptq->last_branch_pos = ptq->pt->synth_opts.last_branch_sz;
1037 ptq->last_branch_pos -= 1;
1039 be = &bs->entries[ptq->last_branch_pos];
1040 be->from = state->from_ip;
1041 be->to = state->to_ip;
1042 be->flags.abort = !!(state->flags & INTEL_PT_ABORT_TX);
1043 be->flags.in_tx = !!(state->flags & INTEL_PT_IN_TX);
1044 /* No support for mispredict */
1045 be->flags.mispred = ptq->pt->mispred_all;
1047 if (bs->nr < ptq->pt->synth_opts.last_branch_sz)
1051 static inline bool intel_pt_skip_event(struct intel_pt *pt)
1053 return pt->synth_opts.initial_skip &&
1054 pt->num_events++ < pt->synth_opts.initial_skip;
1057 static void intel_pt_prep_b_sample(struct intel_pt *pt,
1058 struct intel_pt_queue *ptq,
1059 union perf_event *event,
1060 struct perf_sample *sample)
1062 event->sample.header.type = PERF_RECORD_SAMPLE;
1063 event->sample.header.misc = PERF_RECORD_MISC_USER;
1064 event->sample.header.size = sizeof(struct perf_event_header);
1066 if (!pt->timeless_decoding)
1067 sample->time = tsc_to_perf_time(ptq->timestamp, &pt->tc);
1069 sample->cpumode = PERF_RECORD_MISC_USER;
1070 sample->ip = ptq->state->from_ip;
1071 sample->pid = ptq->pid;
1072 sample->tid = ptq->tid;
1073 sample->addr = ptq->state->to_ip;
1075 sample->cpu = ptq->cpu;
1076 sample->flags = ptq->flags;
1077 sample->insn_len = ptq->insn_len;
1078 memcpy(sample->insn, ptq->insn, INTEL_PT_INSN_BUF_SZ);
1081 static int intel_pt_inject_event(union perf_event *event,
1082 struct perf_sample *sample, u64 type)
1084 event->header.size = perf_event__sample_event_size(sample, type, 0);
1085 return perf_event__synthesize_sample(event, type, 0, sample);
1088 static inline int intel_pt_opt_inject(struct intel_pt *pt,
1089 union perf_event *event,
1090 struct perf_sample *sample, u64 type)
1092 if (!pt->synth_opts.inject)
1095 return intel_pt_inject_event(event, sample, type);
1098 static int intel_pt_deliver_synth_b_event(struct intel_pt *pt,
1099 union perf_event *event,
1100 struct perf_sample *sample, u64 type)
1104 ret = intel_pt_opt_inject(pt, event, sample, type);
1108 ret = perf_session__deliver_synth_event(pt->session, event, sample);
1110 pr_err("Intel PT: failed to deliver event, error %d\n", ret);
1115 static int intel_pt_synth_branch_sample(struct intel_pt_queue *ptq)
1117 struct intel_pt *pt = ptq->pt;
1118 union perf_event *event = ptq->event_buf;
1119 struct perf_sample sample = { .ip = 0, };
1120 struct dummy_branch_stack {
1122 struct branch_entry entries;
1125 if (pt->branches_filter && !(pt->branches_filter & ptq->flags))
1128 if (intel_pt_skip_event(pt))
1131 intel_pt_prep_b_sample(pt, ptq, event, &sample);
1133 sample.id = ptq->pt->branches_id;
1134 sample.stream_id = ptq->pt->branches_id;
1137 * perf report cannot handle events without a branch stack when using
1138 * SORT_MODE__BRANCH so make a dummy one.
1140 if (pt->synth_opts.last_branch && sort__mode == SORT_MODE__BRANCH) {
1141 dummy_bs = (struct dummy_branch_stack){
1148 sample.branch_stack = (struct branch_stack *)&dummy_bs;
1151 return intel_pt_deliver_synth_b_event(pt, event, &sample,
1152 pt->branches_sample_type);
1155 static void intel_pt_prep_sample(struct intel_pt *pt,
1156 struct intel_pt_queue *ptq,
1157 union perf_event *event,
1158 struct perf_sample *sample)
1160 intel_pt_prep_b_sample(pt, ptq, event, sample);
1162 if (pt->synth_opts.callchain) {
1163 thread_stack__sample(ptq->thread, ptq->chain,
1164 pt->synth_opts.callchain_sz + 1,
1165 sample->ip, pt->kernel_start);
1166 sample->callchain = ptq->chain;
1169 if (pt->synth_opts.last_branch) {
1170 intel_pt_copy_last_branch_rb(ptq);
1171 sample->branch_stack = ptq->last_branch;
1175 static inline int intel_pt_deliver_synth_event(struct intel_pt *pt,
1176 struct intel_pt_queue *ptq,
1177 union perf_event *event,
1178 struct perf_sample *sample,
1183 ret = intel_pt_deliver_synth_b_event(pt, event, sample, type);
1185 if (pt->synth_opts.last_branch)
1186 intel_pt_reset_last_branch_rb(ptq);
1191 static int intel_pt_synth_instruction_sample(struct intel_pt_queue *ptq)
1193 struct intel_pt *pt = ptq->pt;
1194 union perf_event *event = ptq->event_buf;
1195 struct perf_sample sample = { .ip = 0, };
1197 if (intel_pt_skip_event(pt))
1200 intel_pt_prep_sample(pt, ptq, event, &sample);
1202 sample.id = ptq->pt->instructions_id;
1203 sample.stream_id = ptq->pt->instructions_id;
1204 sample.period = ptq->state->tot_insn_cnt - ptq->last_insn_cnt;
1206 ptq->last_insn_cnt = ptq->state->tot_insn_cnt;
1208 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1209 pt->instructions_sample_type);
1212 static int intel_pt_synth_transaction_sample(struct intel_pt_queue *ptq)
1214 struct intel_pt *pt = ptq->pt;
1215 union perf_event *event = ptq->event_buf;
1216 struct perf_sample sample = { .ip = 0, };
1218 if (intel_pt_skip_event(pt))
1221 intel_pt_prep_sample(pt, ptq, event, &sample);
1223 sample.id = ptq->pt->transactions_id;
1224 sample.stream_id = ptq->pt->transactions_id;
1226 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1227 pt->transactions_sample_type);
1230 static void intel_pt_prep_p_sample(struct intel_pt *pt,
1231 struct intel_pt_queue *ptq,
1232 union perf_event *event,
1233 struct perf_sample *sample)
1235 intel_pt_prep_sample(pt, ptq, event, sample);
1238 * Zero IP is used to mean "trace start" but that is not the case for
1239 * power or PTWRITE events with no IP, so clear the flags.
1245 static int intel_pt_synth_ptwrite_sample(struct intel_pt_queue *ptq)
1247 struct intel_pt *pt = ptq->pt;
1248 union perf_event *event = ptq->event_buf;
1249 struct perf_sample sample = { .ip = 0, };
1250 struct perf_synth_intel_ptwrite raw;
1252 if (intel_pt_skip_event(pt))
1255 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1257 sample.id = ptq->pt->ptwrites_id;
1258 sample.stream_id = ptq->pt->ptwrites_id;
1261 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1262 raw.payload = cpu_to_le64(ptq->state->ptw_payload);
1264 sample.raw_size = perf_synth__raw_size(raw);
1265 sample.raw_data = perf_synth__raw_data(&raw);
1267 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1268 pt->ptwrites_sample_type);
1271 static int intel_pt_synth_cbr_sample(struct intel_pt_queue *ptq)
1273 struct intel_pt *pt = ptq->pt;
1274 union perf_event *event = ptq->event_buf;
1275 struct perf_sample sample = { .ip = 0, };
1276 struct perf_synth_intel_cbr raw;
1279 if (intel_pt_skip_event(pt))
1282 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1284 sample.id = ptq->pt->cbr_id;
1285 sample.stream_id = ptq->pt->cbr_id;
1287 flags = (u16)ptq->state->cbr_payload | (pt->max_non_turbo_ratio << 16);
1288 raw.flags = cpu_to_le32(flags);
1289 raw.freq = cpu_to_le32(raw.cbr * pt->cbr2khz);
1292 sample.raw_size = perf_synth__raw_size(raw);
1293 sample.raw_data = perf_synth__raw_data(&raw);
1295 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1296 pt->pwr_events_sample_type);
1299 static int intel_pt_synth_mwait_sample(struct intel_pt_queue *ptq)
1301 struct intel_pt *pt = ptq->pt;
1302 union perf_event *event = ptq->event_buf;
1303 struct perf_sample sample = { .ip = 0, };
1304 struct perf_synth_intel_mwait raw;
1306 if (intel_pt_skip_event(pt))
1309 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1311 sample.id = ptq->pt->mwait_id;
1312 sample.stream_id = ptq->pt->mwait_id;
1315 raw.payload = cpu_to_le64(ptq->state->mwait_payload);
1317 sample.raw_size = perf_synth__raw_size(raw);
1318 sample.raw_data = perf_synth__raw_data(&raw);
1320 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1321 pt->pwr_events_sample_type);
1324 static int intel_pt_synth_pwre_sample(struct intel_pt_queue *ptq)
1326 struct intel_pt *pt = ptq->pt;
1327 union perf_event *event = ptq->event_buf;
1328 struct perf_sample sample = { .ip = 0, };
1329 struct perf_synth_intel_pwre raw;
1331 if (intel_pt_skip_event(pt))
1334 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1336 sample.id = ptq->pt->pwre_id;
1337 sample.stream_id = ptq->pt->pwre_id;
1340 raw.payload = cpu_to_le64(ptq->state->pwre_payload);
1342 sample.raw_size = perf_synth__raw_size(raw);
1343 sample.raw_data = perf_synth__raw_data(&raw);
1345 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1346 pt->pwr_events_sample_type);
1349 static int intel_pt_synth_exstop_sample(struct intel_pt_queue *ptq)
1351 struct intel_pt *pt = ptq->pt;
1352 union perf_event *event = ptq->event_buf;
1353 struct perf_sample sample = { .ip = 0, };
1354 struct perf_synth_intel_exstop raw;
1356 if (intel_pt_skip_event(pt))
1359 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1361 sample.id = ptq->pt->exstop_id;
1362 sample.stream_id = ptq->pt->exstop_id;
1365 raw.ip = !!(ptq->state->flags & INTEL_PT_FUP_IP);
1367 sample.raw_size = perf_synth__raw_size(raw);
1368 sample.raw_data = perf_synth__raw_data(&raw);
1370 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1371 pt->pwr_events_sample_type);
1374 static int intel_pt_synth_pwrx_sample(struct intel_pt_queue *ptq)
1376 struct intel_pt *pt = ptq->pt;
1377 union perf_event *event = ptq->event_buf;
1378 struct perf_sample sample = { .ip = 0, };
1379 struct perf_synth_intel_pwrx raw;
1381 if (intel_pt_skip_event(pt))
1384 intel_pt_prep_p_sample(pt, ptq, event, &sample);
1386 sample.id = ptq->pt->pwrx_id;
1387 sample.stream_id = ptq->pt->pwrx_id;
1390 raw.payload = cpu_to_le64(ptq->state->pwrx_payload);
1392 sample.raw_size = perf_synth__raw_size(raw);
1393 sample.raw_data = perf_synth__raw_data(&raw);
1395 return intel_pt_deliver_synth_event(pt, ptq, event, &sample,
1396 pt->pwr_events_sample_type);
1399 static int intel_pt_synth_error(struct intel_pt *pt, int code, int cpu,
1400 pid_t pid, pid_t tid, u64 ip)
1402 union perf_event event;
1403 char msg[MAX_AUXTRACE_ERROR_MSG];
1406 intel_pt__strerror(code, msg, MAX_AUXTRACE_ERROR_MSG);
1408 auxtrace_synth_error(&event.auxtrace_error, PERF_AUXTRACE_ERROR_ITRACE,
1409 code, cpu, pid, tid, ip, msg);
1411 err = perf_session__deliver_synth_event(pt->session, &event, NULL);
1413 pr_err("Intel Processor Trace: failed to deliver error event, error %d\n",
1419 static int intel_pt_next_tid(struct intel_pt *pt, struct intel_pt_queue *ptq)
1421 struct auxtrace_queue *queue;
1422 pid_t tid = ptq->next_tid;
1428 intel_pt_log("switch: cpu %d tid %d\n", ptq->cpu, tid);
1430 err = machine__set_current_tid(pt->machine, ptq->cpu, -1, tid);
1432 queue = &pt->queues.queue_array[ptq->queue_nr];
1433 intel_pt_set_pid_tid_cpu(pt, queue);
1440 static inline bool intel_pt_is_switch_ip(struct intel_pt_queue *ptq, u64 ip)
1442 struct intel_pt *pt = ptq->pt;
1444 return ip == pt->switch_ip &&
1445 (ptq->flags & PERF_IP_FLAG_BRANCH) &&
1446 !(ptq->flags & (PERF_IP_FLAG_CONDITIONAL | PERF_IP_FLAG_ASYNC |
1447 PERF_IP_FLAG_INTERRUPT | PERF_IP_FLAG_TX_ABORT));
1450 #define INTEL_PT_PWR_EVT (INTEL_PT_MWAIT_OP | INTEL_PT_PWR_ENTRY | \
1451 INTEL_PT_EX_STOP | INTEL_PT_PWR_EXIT | \
1454 static int intel_pt_sample(struct intel_pt_queue *ptq)
1456 const struct intel_pt_state *state = ptq->state;
1457 struct intel_pt *pt = ptq->pt;
1460 if (!ptq->have_sample)
1463 ptq->have_sample = false;
1465 if (pt->sample_pwr_events && (state->type & INTEL_PT_PWR_EVT)) {
1466 if (state->type & INTEL_PT_CBR_CHG) {
1467 err = intel_pt_synth_cbr_sample(ptq);
1471 if (state->type & INTEL_PT_MWAIT_OP) {
1472 err = intel_pt_synth_mwait_sample(ptq);
1476 if (state->type & INTEL_PT_PWR_ENTRY) {
1477 err = intel_pt_synth_pwre_sample(ptq);
1481 if (state->type & INTEL_PT_EX_STOP) {
1482 err = intel_pt_synth_exstop_sample(ptq);
1486 if (state->type & INTEL_PT_PWR_EXIT) {
1487 err = intel_pt_synth_pwrx_sample(ptq);
1493 if (pt->sample_instructions && (state->type & INTEL_PT_INSTRUCTION)) {
1494 err = intel_pt_synth_instruction_sample(ptq);
1499 if (pt->sample_transactions && (state->type & INTEL_PT_TRANSACTION)) {
1500 err = intel_pt_synth_transaction_sample(ptq);
1505 if (pt->sample_ptwrites && (state->type & INTEL_PT_PTW)) {
1506 err = intel_pt_synth_ptwrite_sample(ptq);
1511 if (!(state->type & INTEL_PT_BRANCH))
1514 if (pt->synth_opts.callchain || pt->synth_opts.thread_stack)
1515 thread_stack__event(ptq->thread, ptq->flags, state->from_ip,
1516 state->to_ip, ptq->insn_len,
1519 thread_stack__set_trace_nr(ptq->thread, state->trace_nr);
1521 if (pt->sample_branches) {
1522 err = intel_pt_synth_branch_sample(ptq);
1527 if (pt->synth_opts.last_branch)
1528 intel_pt_update_last_branch_rb(ptq);
1530 if (!ptq->sync_switch)
1533 if (intel_pt_is_switch_ip(ptq, state->to_ip)) {
1534 switch (ptq->switch_state) {
1535 case INTEL_PT_SS_NOT_TRACING:
1536 case INTEL_PT_SS_UNKNOWN:
1537 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1538 err = intel_pt_next_tid(pt, ptq);
1541 ptq->switch_state = INTEL_PT_SS_TRACING;
1544 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_EVENT;
1547 } else if (!state->to_ip) {
1548 ptq->switch_state = INTEL_PT_SS_NOT_TRACING;
1549 } else if (ptq->switch_state == INTEL_PT_SS_NOT_TRACING) {
1550 ptq->switch_state = INTEL_PT_SS_UNKNOWN;
1551 } else if (ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1552 state->to_ip == pt->ptss_ip &&
1553 (ptq->flags & PERF_IP_FLAG_CALL)) {
1554 ptq->switch_state = INTEL_PT_SS_TRACING;
1560 static u64 intel_pt_switch_ip(struct intel_pt *pt, u64 *ptss_ip)
1562 struct machine *machine = pt->machine;
1564 struct symbol *sym, *start;
1565 u64 ip, switch_ip = 0;
1571 map = machine__kernel_map(machine);
1578 start = dso__first_symbol(map->dso);
1580 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1581 if (sym->binding == STB_GLOBAL &&
1582 !strcmp(sym->name, "__switch_to")) {
1583 ip = map->unmap_ip(map, sym->start);
1584 if (ip >= map->start && ip < map->end) {
1591 if (!switch_ip || !ptss_ip)
1594 if (pt->have_sched_switch == 1)
1595 ptss = "perf_trace_sched_switch";
1597 ptss = "__perf_event_task_sched_out";
1599 for (sym = start; sym; sym = dso__next_symbol(sym)) {
1600 if (!strcmp(sym->name, ptss)) {
1601 ip = map->unmap_ip(map, sym->start);
1602 if (ip >= map->start && ip < map->end) {
1612 static void intel_pt_enable_sync_switch(struct intel_pt *pt)
1616 pt->sync_switch = true;
1618 for (i = 0; i < pt->queues.nr_queues; i++) {
1619 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1620 struct intel_pt_queue *ptq = queue->priv;
1623 ptq->sync_switch = true;
1627 static int intel_pt_run_decoder(struct intel_pt_queue *ptq, u64 *timestamp)
1629 const struct intel_pt_state *state = ptq->state;
1630 struct intel_pt *pt = ptq->pt;
1633 if (!pt->kernel_start) {
1634 pt->kernel_start = machine__kernel_start(pt->machine);
1635 if (pt->per_cpu_mmaps &&
1636 (pt->have_sched_switch == 1 || pt->have_sched_switch == 3) &&
1637 !pt->timeless_decoding && intel_pt_tracing_kernel(pt) &&
1638 !pt->sampling_mode) {
1639 pt->switch_ip = intel_pt_switch_ip(pt, &pt->ptss_ip);
1640 if (pt->switch_ip) {
1641 intel_pt_log("switch_ip: %"PRIx64" ptss_ip: %"PRIx64"\n",
1642 pt->switch_ip, pt->ptss_ip);
1643 intel_pt_enable_sync_switch(pt);
1648 intel_pt_log("queue %u decoding cpu %d pid %d tid %d\n",
1649 ptq->queue_nr, ptq->cpu, ptq->pid, ptq->tid);
1651 err = intel_pt_sample(ptq);
1655 state = intel_pt_decode(ptq->decoder);
1657 if (state->err == INTEL_PT_ERR_NODATA)
1659 if (ptq->sync_switch &&
1660 state->from_ip >= pt->kernel_start) {
1661 ptq->sync_switch = false;
1662 intel_pt_next_tid(pt, ptq);
1664 if (pt->synth_opts.errors) {
1665 err = intel_pt_synth_error(pt, state->err,
1676 ptq->have_sample = true;
1677 intel_pt_sample_flags(ptq);
1679 /* Use estimated TSC upon return to user space */
1681 (state->from_ip >= pt->kernel_start || !state->from_ip) &&
1682 state->to_ip && state->to_ip < pt->kernel_start) {
1683 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1684 state->timestamp, state->est_timestamp);
1685 ptq->timestamp = state->est_timestamp;
1686 /* Use estimated TSC in unknown switch state */
1687 } else if (ptq->sync_switch &&
1688 ptq->switch_state == INTEL_PT_SS_UNKNOWN &&
1689 intel_pt_is_switch_ip(ptq, state->to_ip) &&
1690 ptq->next_tid == -1) {
1691 intel_pt_log("TSC %"PRIx64" est. TSC %"PRIx64"\n",
1692 state->timestamp, state->est_timestamp);
1693 ptq->timestamp = state->est_timestamp;
1694 } else if (state->timestamp > ptq->timestamp) {
1695 ptq->timestamp = state->timestamp;
1698 if (!pt->timeless_decoding && ptq->timestamp >= *timestamp) {
1699 *timestamp = ptq->timestamp;
1706 static inline int intel_pt_update_queues(struct intel_pt *pt)
1708 if (pt->queues.new_data) {
1709 pt->queues.new_data = false;
1710 return intel_pt_setup_queues(pt);
1715 static int intel_pt_process_queues(struct intel_pt *pt, u64 timestamp)
1717 unsigned int queue_nr;
1722 struct auxtrace_queue *queue;
1723 struct intel_pt_queue *ptq;
1725 if (!pt->heap.heap_cnt)
1728 if (pt->heap.heap_array[0].ordinal >= timestamp)
1731 queue_nr = pt->heap.heap_array[0].queue_nr;
1732 queue = &pt->queues.queue_array[queue_nr];
1735 intel_pt_log("queue %u processing 0x%" PRIx64 " to 0x%" PRIx64 "\n",
1736 queue_nr, pt->heap.heap_array[0].ordinal,
1739 auxtrace_heap__pop(&pt->heap);
1741 if (pt->heap.heap_cnt) {
1742 ts = pt->heap.heap_array[0].ordinal + 1;
1749 intel_pt_set_pid_tid_cpu(pt, queue);
1751 ret = intel_pt_run_decoder(ptq, &ts);
1754 auxtrace_heap__add(&pt->heap, queue_nr, ts);
1759 ret = auxtrace_heap__add(&pt->heap, queue_nr, ts);
1763 ptq->on_heap = false;
1770 static int intel_pt_process_timeless_queues(struct intel_pt *pt, pid_t tid,
1773 struct auxtrace_queues *queues = &pt->queues;
1777 for (i = 0; i < queues->nr_queues; i++) {
1778 struct auxtrace_queue *queue = &pt->queues.queue_array[i];
1779 struct intel_pt_queue *ptq = queue->priv;
1781 if (ptq && (tid == -1 || ptq->tid == tid)) {
1783 intel_pt_set_pid_tid_cpu(pt, queue);
1784 intel_pt_run_decoder(ptq, &ts);
1790 static int intel_pt_lost(struct intel_pt *pt, struct perf_sample *sample)
1792 return intel_pt_synth_error(pt, INTEL_PT_ERR_LOST, sample->cpu,
1793 sample->pid, sample->tid, 0);
1796 static struct intel_pt_queue *intel_pt_cpu_to_ptq(struct intel_pt *pt, int cpu)
1800 if (cpu < 0 || !pt->queues.nr_queues)
1803 if ((unsigned)cpu >= pt->queues.nr_queues)
1804 i = pt->queues.nr_queues - 1;
1808 if (pt->queues.queue_array[i].cpu == cpu)
1809 return pt->queues.queue_array[i].priv;
1811 for (j = 0; i > 0; j++) {
1812 if (pt->queues.queue_array[--i].cpu == cpu)
1813 return pt->queues.queue_array[i].priv;
1816 for (; j < pt->queues.nr_queues; j++) {
1817 if (pt->queues.queue_array[j].cpu == cpu)
1818 return pt->queues.queue_array[j].priv;
1824 static int intel_pt_sync_switch(struct intel_pt *pt, int cpu, pid_t tid,
1827 struct intel_pt_queue *ptq;
1830 if (!pt->sync_switch)
1833 ptq = intel_pt_cpu_to_ptq(pt, cpu);
1834 if (!ptq || !ptq->sync_switch)
1837 switch (ptq->switch_state) {
1838 case INTEL_PT_SS_NOT_TRACING:
1841 case INTEL_PT_SS_UNKNOWN:
1842 case INTEL_PT_SS_TRACING:
1843 ptq->next_tid = tid;
1844 ptq->switch_state = INTEL_PT_SS_EXPECTING_SWITCH_IP;
1846 case INTEL_PT_SS_EXPECTING_SWITCH_EVENT:
1847 if (!ptq->on_heap) {
1848 ptq->timestamp = perf_time_to_tsc(timestamp,
1850 err = auxtrace_heap__add(&pt->heap, ptq->queue_nr,
1854 ptq->on_heap = true;
1856 ptq->switch_state = INTEL_PT_SS_TRACING;
1858 case INTEL_PT_SS_EXPECTING_SWITCH_IP:
1859 ptq->next_tid = tid;
1860 intel_pt_log("ERROR: cpu %d expecting switch ip\n", cpu);
1869 static int intel_pt_process_switch(struct intel_pt *pt,
1870 struct perf_sample *sample)
1872 struct perf_evsel *evsel;
1876 evsel = perf_evlist__id2evsel(pt->session->evlist, sample->id);
1877 if (evsel != pt->switch_evsel)
1880 tid = perf_evsel__intval(evsel, sample, "next_pid");
1883 intel_pt_log("sched_switch: cpu %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1884 cpu, tid, sample->time, perf_time_to_tsc(sample->time,
1887 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1891 return machine__set_current_tid(pt->machine, cpu, -1, tid);
1894 static int intel_pt_context_switch(struct intel_pt *pt, union perf_event *event,
1895 struct perf_sample *sample)
1897 bool out = event->header.misc & PERF_RECORD_MISC_SWITCH_OUT;
1903 if (pt->have_sched_switch == 3) {
1906 if (event->header.type != PERF_RECORD_SWITCH_CPU_WIDE) {
1907 pr_err("Expecting CPU-wide context switch event\n");
1910 pid = event->context_switch.next_prev_pid;
1911 tid = event->context_switch.next_prev_tid;
1920 pr_err("context_switch event has no tid\n");
1924 intel_pt_log("context_switch: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1925 cpu, pid, tid, sample->time, perf_time_to_tsc(sample->time,
1928 ret = intel_pt_sync_switch(pt, cpu, tid, sample->time);
1932 return machine__set_current_tid(pt->machine, cpu, pid, tid);
1935 static int intel_pt_process_itrace_start(struct intel_pt *pt,
1936 union perf_event *event,
1937 struct perf_sample *sample)
1939 if (!pt->per_cpu_mmaps)
1942 intel_pt_log("itrace_start: cpu %d pid %d tid %d time %"PRIu64" tsc %#"PRIx64"\n",
1943 sample->cpu, event->itrace_start.pid,
1944 event->itrace_start.tid, sample->time,
1945 perf_time_to_tsc(sample->time, &pt->tc));
1947 return machine__set_current_tid(pt->machine, sample->cpu,
1948 event->itrace_start.pid,
1949 event->itrace_start.tid);
1952 static int intel_pt_process_event(struct perf_session *session,
1953 union perf_event *event,
1954 struct perf_sample *sample,
1955 struct perf_tool *tool)
1957 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
1965 if (!tool->ordered_events) {
1966 pr_err("Intel Processor Trace requires ordered events\n");
1970 if (sample->time && sample->time != (u64)-1)
1971 timestamp = perf_time_to_tsc(sample->time, &pt->tc);
1975 if (timestamp || pt->timeless_decoding) {
1976 err = intel_pt_update_queues(pt);
1981 if (pt->timeless_decoding) {
1982 if (event->header.type == PERF_RECORD_EXIT) {
1983 err = intel_pt_process_timeless_queues(pt,
1987 } else if (timestamp) {
1988 err = intel_pt_process_queues(pt, timestamp);
1993 if (event->header.type == PERF_RECORD_AUX &&
1994 (event->aux.flags & PERF_AUX_FLAG_TRUNCATED) &&
1995 pt->synth_opts.errors) {
1996 err = intel_pt_lost(pt, sample);
2001 if (pt->switch_evsel && event->header.type == PERF_RECORD_SAMPLE)
2002 err = intel_pt_process_switch(pt, sample);
2003 else if (event->header.type == PERF_RECORD_ITRACE_START)
2004 err = intel_pt_process_itrace_start(pt, event, sample);
2005 else if (event->header.type == PERF_RECORD_SWITCH ||
2006 event->header.type == PERF_RECORD_SWITCH_CPU_WIDE)
2007 err = intel_pt_context_switch(pt, event, sample);
2009 intel_pt_log("event %s (%u): cpu %d time %"PRIu64" tsc %#"PRIx64"\n",
2010 perf_event__name(event->header.type), event->header.type,
2011 sample->cpu, sample->time, timestamp);
2016 static int intel_pt_flush(struct perf_session *session, struct perf_tool *tool)
2018 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2025 if (!tool->ordered_events)
2028 ret = intel_pt_update_queues(pt);
2032 if (pt->timeless_decoding)
2033 return intel_pt_process_timeless_queues(pt, -1,
2036 return intel_pt_process_queues(pt, MAX_TIMESTAMP);
2039 static void intel_pt_free_events(struct perf_session *session)
2041 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2043 struct auxtrace_queues *queues = &pt->queues;
2046 for (i = 0; i < queues->nr_queues; i++) {
2047 intel_pt_free_queue(queues->queue_array[i].priv);
2048 queues->queue_array[i].priv = NULL;
2050 intel_pt_log_disable();
2051 auxtrace_queues__free(queues);
2054 static void intel_pt_free(struct perf_session *session)
2056 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2059 auxtrace_heap__free(&pt->heap);
2060 intel_pt_free_events(session);
2061 session->auxtrace = NULL;
2062 thread__put(pt->unknown_thread);
2063 addr_filters__exit(&pt->filts);
2068 static int intel_pt_process_auxtrace_event(struct perf_session *session,
2069 union perf_event *event,
2070 struct perf_tool *tool __maybe_unused)
2072 struct intel_pt *pt = container_of(session->auxtrace, struct intel_pt,
2075 if (!pt->data_queued) {
2076 struct auxtrace_buffer *buffer;
2078 int fd = perf_data__fd(session->data);
2081 if (perf_data__is_pipe(session->data)) {
2084 data_offset = lseek(fd, 0, SEEK_CUR);
2085 if (data_offset == -1)
2089 err = auxtrace_queues__add_event(&pt->queues, session, event,
2090 data_offset, &buffer);
2094 /* Dump here now we have copied a piped trace out of the pipe */
2096 if (auxtrace_buffer__get_data(buffer, fd)) {
2097 intel_pt_dump_event(pt, buffer->data,
2099 auxtrace_buffer__put_data(buffer);
2107 struct intel_pt_synth {
2108 struct perf_tool dummy_tool;
2109 struct perf_session *session;
2112 static int intel_pt_event_synth(struct perf_tool *tool,
2113 union perf_event *event,
2114 struct perf_sample *sample __maybe_unused,
2115 struct machine *machine __maybe_unused)
2117 struct intel_pt_synth *intel_pt_synth =
2118 container_of(tool, struct intel_pt_synth, dummy_tool);
2120 return perf_session__deliver_synth_event(intel_pt_synth->session, event,
2124 static int intel_pt_synth_event(struct perf_session *session, const char *name,
2125 struct perf_event_attr *attr, u64 id)
2127 struct intel_pt_synth intel_pt_synth;
2130 pr_debug("Synthesizing '%s' event with id %" PRIu64 " sample type %#" PRIx64 "\n",
2131 name, id, (u64)attr->sample_type);
2133 memset(&intel_pt_synth, 0, sizeof(struct intel_pt_synth));
2134 intel_pt_synth.session = session;
2136 err = perf_event__synthesize_attr(&intel_pt_synth.dummy_tool, attr, 1,
2137 &id, intel_pt_event_synth);
2139 pr_err("%s: failed to synthesize '%s' event type\n",
2145 static void intel_pt_set_event_name(struct perf_evlist *evlist, u64 id,
2148 struct perf_evsel *evsel;
2150 evlist__for_each_entry(evlist, evsel) {
2151 if (evsel->id && evsel->id[0] == id) {
2153 zfree(&evsel->name);
2154 evsel->name = strdup(name);
2160 static struct perf_evsel *intel_pt_evsel(struct intel_pt *pt,
2161 struct perf_evlist *evlist)
2163 struct perf_evsel *evsel;
2165 evlist__for_each_entry(evlist, evsel) {
2166 if (evsel->attr.type == pt->pmu_type && evsel->ids)
2173 static int intel_pt_synth_events(struct intel_pt *pt,
2174 struct perf_session *session)
2176 struct perf_evlist *evlist = session->evlist;
2177 struct perf_evsel *evsel = intel_pt_evsel(pt, evlist);
2178 struct perf_event_attr attr;
2183 pr_debug("There are no selected events with Intel Processor Trace data\n");
2187 memset(&attr, 0, sizeof(struct perf_event_attr));
2188 attr.size = sizeof(struct perf_event_attr);
2189 attr.type = PERF_TYPE_HARDWARE;
2190 attr.sample_type = evsel->attr.sample_type & PERF_SAMPLE_MASK;
2191 attr.sample_type |= PERF_SAMPLE_IP | PERF_SAMPLE_TID |
2193 if (pt->timeless_decoding)
2194 attr.sample_type &= ~(u64)PERF_SAMPLE_TIME;
2196 attr.sample_type |= PERF_SAMPLE_TIME;
2197 if (!pt->per_cpu_mmaps)
2198 attr.sample_type &= ~(u64)PERF_SAMPLE_CPU;
2199 attr.exclude_user = evsel->attr.exclude_user;
2200 attr.exclude_kernel = evsel->attr.exclude_kernel;
2201 attr.exclude_hv = evsel->attr.exclude_hv;
2202 attr.exclude_host = evsel->attr.exclude_host;
2203 attr.exclude_guest = evsel->attr.exclude_guest;
2204 attr.sample_id_all = evsel->attr.sample_id_all;
2205 attr.read_format = evsel->attr.read_format;
2207 id = evsel->id[0] + 1000000000;
2211 if (pt->synth_opts.branches) {
2212 attr.config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS;
2213 attr.sample_period = 1;
2214 attr.sample_type |= PERF_SAMPLE_ADDR;
2215 err = intel_pt_synth_event(session, "branches", &attr, id);
2218 pt->sample_branches = true;
2219 pt->branches_sample_type = attr.sample_type;
2220 pt->branches_id = id;
2222 attr.sample_type &= ~(u64)PERF_SAMPLE_ADDR;
2225 if (pt->synth_opts.callchain)
2226 attr.sample_type |= PERF_SAMPLE_CALLCHAIN;
2227 if (pt->synth_opts.last_branch)
2228 attr.sample_type |= PERF_SAMPLE_BRANCH_STACK;
2230 if (pt->synth_opts.instructions) {
2231 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2232 if (pt->synth_opts.period_type == PERF_ITRACE_PERIOD_NANOSECS)
2233 attr.sample_period =
2234 intel_pt_ns_to_ticks(pt, pt->synth_opts.period);
2236 attr.sample_period = pt->synth_opts.period;
2237 err = intel_pt_synth_event(session, "instructions", &attr, id);
2240 pt->sample_instructions = true;
2241 pt->instructions_sample_type = attr.sample_type;
2242 pt->instructions_id = id;
2246 attr.sample_type &= ~(u64)PERF_SAMPLE_PERIOD;
2247 attr.sample_period = 1;
2249 if (pt->synth_opts.transactions) {
2250 attr.config = PERF_COUNT_HW_INSTRUCTIONS;
2251 err = intel_pt_synth_event(session, "transactions", &attr, id);
2254 pt->sample_transactions = true;
2255 pt->transactions_sample_type = attr.sample_type;
2256 pt->transactions_id = id;
2257 intel_pt_set_event_name(evlist, id, "transactions");
2261 attr.type = PERF_TYPE_SYNTH;
2262 attr.sample_type |= PERF_SAMPLE_RAW;
2264 if (pt->synth_opts.ptwrites) {
2265 attr.config = PERF_SYNTH_INTEL_PTWRITE;
2266 err = intel_pt_synth_event(session, "ptwrite", &attr, id);
2269 pt->sample_ptwrites = true;
2270 pt->ptwrites_sample_type = attr.sample_type;
2271 pt->ptwrites_id = id;
2272 intel_pt_set_event_name(evlist, id, "ptwrite");
2276 if (pt->synth_opts.pwr_events) {
2277 pt->sample_pwr_events = true;
2278 pt->pwr_events_sample_type = attr.sample_type;
2280 attr.config = PERF_SYNTH_INTEL_CBR;
2281 err = intel_pt_synth_event(session, "cbr", &attr, id);
2285 intel_pt_set_event_name(evlist, id, "cbr");
2289 if (pt->synth_opts.pwr_events && (evsel->attr.config & 0x10)) {
2290 attr.config = PERF_SYNTH_INTEL_MWAIT;
2291 err = intel_pt_synth_event(session, "mwait", &attr, id);
2295 intel_pt_set_event_name(evlist, id, "mwait");
2298 attr.config = PERF_SYNTH_INTEL_PWRE;
2299 err = intel_pt_synth_event(session, "pwre", &attr, id);
2303 intel_pt_set_event_name(evlist, id, "pwre");
2306 attr.config = PERF_SYNTH_INTEL_EXSTOP;
2307 err = intel_pt_synth_event(session, "exstop", &attr, id);
2311 intel_pt_set_event_name(evlist, id, "exstop");
2314 attr.config = PERF_SYNTH_INTEL_PWRX;
2315 err = intel_pt_synth_event(session, "pwrx", &attr, id);
2319 intel_pt_set_event_name(evlist, id, "pwrx");
2326 static struct perf_evsel *intel_pt_find_sched_switch(struct perf_evlist *evlist)
2328 struct perf_evsel *evsel;
2330 evlist__for_each_entry_reverse(evlist, evsel) {
2331 const char *name = perf_evsel__name(evsel);
2333 if (!strcmp(name, "sched:sched_switch"))
2340 static bool intel_pt_find_switch(struct perf_evlist *evlist)
2342 struct perf_evsel *evsel;
2344 evlist__for_each_entry(evlist, evsel) {
2345 if (evsel->attr.context_switch)
2352 static int intel_pt_perf_config(const char *var, const char *value, void *data)
2354 struct intel_pt *pt = data;
2356 if (!strcmp(var, "intel-pt.mispred-all"))
2357 pt->mispred_all = perf_config_bool(var, value);
2362 static const char * const intel_pt_info_fmts[] = {
2363 [INTEL_PT_PMU_TYPE] = " PMU Type %"PRId64"\n",
2364 [INTEL_PT_TIME_SHIFT] = " Time Shift %"PRIu64"\n",
2365 [INTEL_PT_TIME_MULT] = " Time Muliplier %"PRIu64"\n",
2366 [INTEL_PT_TIME_ZERO] = " Time Zero %"PRIu64"\n",
2367 [INTEL_PT_CAP_USER_TIME_ZERO] = " Cap Time Zero %"PRId64"\n",
2368 [INTEL_PT_TSC_BIT] = " TSC bit %#"PRIx64"\n",
2369 [INTEL_PT_NORETCOMP_BIT] = " NoRETComp bit %#"PRIx64"\n",
2370 [INTEL_PT_HAVE_SCHED_SWITCH] = " Have sched_switch %"PRId64"\n",
2371 [INTEL_PT_SNAPSHOT_MODE] = " Snapshot mode %"PRId64"\n",
2372 [INTEL_PT_PER_CPU_MMAPS] = " Per-cpu maps %"PRId64"\n",
2373 [INTEL_PT_MTC_BIT] = " MTC bit %#"PRIx64"\n",
2374 [INTEL_PT_TSC_CTC_N] = " TSC:CTC numerator %"PRIu64"\n",
2375 [INTEL_PT_TSC_CTC_D] = " TSC:CTC denominator %"PRIu64"\n",
2376 [INTEL_PT_CYC_BIT] = " CYC bit %#"PRIx64"\n",
2377 [INTEL_PT_MAX_NONTURBO_RATIO] = " Max non-turbo ratio %"PRIu64"\n",
2378 [INTEL_PT_FILTER_STR_LEN] = " Filter string len. %"PRIu64"\n",
2381 static void intel_pt_print_info(u64 *arr, int start, int finish)
2388 for (i = start; i <= finish; i++)
2389 fprintf(stdout, intel_pt_info_fmts[i], arr[i]);
2392 static void intel_pt_print_info_str(const char *name, const char *str)
2397 fprintf(stdout, " %-20s%s\n", name, str ? str : "");
2400 static bool intel_pt_has(struct auxtrace_info_event *auxtrace_info, int pos)
2402 return auxtrace_info->header.size >=
2403 sizeof(struct auxtrace_info_event) + (sizeof(u64) * (pos + 1));
2406 int intel_pt_process_auxtrace_info(union perf_event *event,
2407 struct perf_session *session)
2409 struct auxtrace_info_event *auxtrace_info = &event->auxtrace_info;
2410 size_t min_sz = sizeof(u64) * INTEL_PT_PER_CPU_MMAPS;
2411 struct intel_pt *pt;
2416 if (auxtrace_info->header.size < sizeof(struct auxtrace_info_event) +
2420 pt = zalloc(sizeof(struct intel_pt));
2424 addr_filters__init(&pt->filts);
2426 err = perf_config(intel_pt_perf_config, pt);
2430 err = auxtrace_queues__init(&pt->queues);
2434 intel_pt_log_set_name(INTEL_PT_PMU_NAME);
2436 pt->session = session;
2437 pt->machine = &session->machines.host; /* No kvm support */
2438 pt->auxtrace_type = auxtrace_info->type;
2439 pt->pmu_type = auxtrace_info->priv[INTEL_PT_PMU_TYPE];
2440 pt->tc.time_shift = auxtrace_info->priv[INTEL_PT_TIME_SHIFT];
2441 pt->tc.time_mult = auxtrace_info->priv[INTEL_PT_TIME_MULT];
2442 pt->tc.time_zero = auxtrace_info->priv[INTEL_PT_TIME_ZERO];
2443 pt->cap_user_time_zero = auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO];
2444 pt->tsc_bit = auxtrace_info->priv[INTEL_PT_TSC_BIT];
2445 pt->noretcomp_bit = auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT];
2446 pt->have_sched_switch = auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH];
2447 pt->snapshot_mode = auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE];
2448 pt->per_cpu_mmaps = auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS];
2449 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_PMU_TYPE,
2450 INTEL_PT_PER_CPU_MMAPS);
2452 if (intel_pt_has(auxtrace_info, INTEL_PT_CYC_BIT)) {
2453 pt->mtc_bit = auxtrace_info->priv[INTEL_PT_MTC_BIT];
2454 pt->mtc_freq_bits = auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS];
2455 pt->tsc_ctc_ratio_n = auxtrace_info->priv[INTEL_PT_TSC_CTC_N];
2456 pt->tsc_ctc_ratio_d = auxtrace_info->priv[INTEL_PT_TSC_CTC_D];
2457 pt->cyc_bit = auxtrace_info->priv[INTEL_PT_CYC_BIT];
2458 intel_pt_print_info(&auxtrace_info->priv[0], INTEL_PT_MTC_BIT,
2462 if (intel_pt_has(auxtrace_info, INTEL_PT_MAX_NONTURBO_RATIO)) {
2463 pt->max_non_turbo_ratio =
2464 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO];
2465 intel_pt_print_info(&auxtrace_info->priv[0],
2466 INTEL_PT_MAX_NONTURBO_RATIO,
2467 INTEL_PT_MAX_NONTURBO_RATIO);
2470 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
2471 info_end = (void *)info + auxtrace_info->header.size;
2473 if (intel_pt_has(auxtrace_info, INTEL_PT_FILTER_STR_LEN)) {
2476 len = auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN];
2477 intel_pt_print_info(&auxtrace_info->priv[0],
2478 INTEL_PT_FILTER_STR_LEN,
2479 INTEL_PT_FILTER_STR_LEN);
2481 const char *filter = (const char *)info;
2483 len = roundup(len + 1, 8);
2485 if ((void *)info > info_end) {
2486 pr_err("%s: bad filter string length\n", __func__);
2488 goto err_free_queues;
2490 pt->filter = memdup(filter, len);
2493 goto err_free_queues;
2495 if (session->header.needs_swap)
2496 mem_bswap_64(pt->filter, len);
2497 if (pt->filter[len - 1]) {
2498 pr_err("%s: filter string not null terminated\n", __func__);
2500 goto err_free_queues;
2502 err = addr_filters__parse_bare_filter(&pt->filts,
2505 goto err_free_queues;
2507 intel_pt_print_info_str("Filter string", pt->filter);
2510 pt->timeless_decoding = intel_pt_timeless_decoding(pt);
2511 pt->have_tsc = intel_pt_have_tsc(pt);
2512 pt->sampling_mode = false;
2513 pt->est_tsc = !pt->timeless_decoding;
2515 pt->unknown_thread = thread__new(999999999, 999999999);
2516 if (!pt->unknown_thread) {
2518 goto err_free_queues;
2522 * Since this thread will not be kept in any rbtree not in a
2523 * list, initialize its list node so that at thread__put() the
2524 * current thread lifetime assuption is kept and we don't segfault
2525 * at list_del_init().
2527 INIT_LIST_HEAD(&pt->unknown_thread->node);
2529 err = thread__set_comm(pt->unknown_thread, "unknown", 0);
2531 goto err_delete_thread;
2532 if (thread__init_map_groups(pt->unknown_thread, pt->machine)) {
2534 goto err_delete_thread;
2537 pt->auxtrace.process_event = intel_pt_process_event;
2538 pt->auxtrace.process_auxtrace_event = intel_pt_process_auxtrace_event;
2539 pt->auxtrace.flush_events = intel_pt_flush;
2540 pt->auxtrace.free_events = intel_pt_free_events;
2541 pt->auxtrace.free = intel_pt_free;
2542 session->auxtrace = &pt->auxtrace;
2547 if (pt->have_sched_switch == 1) {
2548 pt->switch_evsel = intel_pt_find_sched_switch(session->evlist);
2549 if (!pt->switch_evsel) {
2550 pr_err("%s: missing sched_switch event\n", __func__);
2552 goto err_delete_thread;
2554 } else if (pt->have_sched_switch == 2 &&
2555 !intel_pt_find_switch(session->evlist)) {
2556 pr_err("%s: missing context_switch attribute flag\n", __func__);
2558 goto err_delete_thread;
2561 if (session->itrace_synth_opts && session->itrace_synth_opts->set) {
2562 pt->synth_opts = *session->itrace_synth_opts;
2564 itrace_synth_opts__set_default(&pt->synth_opts,
2565 session->itrace_synth_opts->default_no_sample);
2566 if (use_browser != -1) {
2567 pt->synth_opts.branches = false;
2568 pt->synth_opts.callchain = true;
2570 if (session->itrace_synth_opts)
2571 pt->synth_opts.thread_stack =
2572 session->itrace_synth_opts->thread_stack;
2575 if (pt->synth_opts.log)
2576 intel_pt_log_enable();
2578 /* Maximum non-turbo ratio is TSC freq / 100 MHz */
2579 if (pt->tc.time_mult) {
2580 u64 tsc_freq = intel_pt_ns_to_ticks(pt, 1000000000);
2582 if (!pt->max_non_turbo_ratio)
2583 pt->max_non_turbo_ratio =
2584 (tsc_freq + 50000000) / 100000000;
2585 intel_pt_log("TSC frequency %"PRIu64"\n", tsc_freq);
2586 intel_pt_log("Maximum non-turbo ratio %u\n",
2587 pt->max_non_turbo_ratio);
2588 pt->cbr2khz = tsc_freq / pt->max_non_turbo_ratio / 1000;
2591 if (pt->synth_opts.calls)
2592 pt->branches_filter |= PERF_IP_FLAG_CALL | PERF_IP_FLAG_ASYNC |
2593 PERF_IP_FLAG_TRACE_END;
2594 if (pt->synth_opts.returns)
2595 pt->branches_filter |= PERF_IP_FLAG_RETURN |
2596 PERF_IP_FLAG_TRACE_BEGIN;
2598 if (pt->synth_opts.callchain && !symbol_conf.use_callchain) {
2599 symbol_conf.use_callchain = true;
2600 if (callchain_register_param(&callchain_param) < 0) {
2601 symbol_conf.use_callchain = false;
2602 pt->synth_opts.callchain = false;
2606 err = intel_pt_synth_events(pt, session);
2608 goto err_delete_thread;
2610 err = auxtrace_queues__process_index(&pt->queues, session);
2612 goto err_delete_thread;
2614 if (pt->queues.populated)
2615 pt->data_queued = true;
2617 if (pt->timeless_decoding)
2618 pr_debug2("Intel PT decoding without timestamps\n");
2623 thread__zput(pt->unknown_thread);
2625 intel_pt_log_disable();
2626 auxtrace_queues__free(&pt->queues);
2627 session->auxtrace = NULL;
2629 addr_filters__exit(&pt->filts);