1 // SPDX-License-Identifier: GPL-2.0-only
3 * intel_pt.c: Intel Processor Trace support
4 * Copyright (c) 2013-2015, Intel Corporation.
9 #include <linux/kernel.h>
10 #include <linux/types.h>
11 #include <linux/bitops.h>
12 #include <linux/log2.h>
15 #include "../../perf.h"
16 #include "../../util/session.h"
17 #include "../../util/event.h"
18 #include "../../util/evlist.h"
19 #include "../../util/evsel.h"
20 #include "../../util/cpumap.h"
21 #include <subcmd/parse-options.h>
22 #include "../../util/parse-events.h"
23 #include "../../util/pmu.h"
24 #include "../../util/debug.h"
25 #include "../../util/auxtrace.h"
26 #include "../../util/tsc.h"
27 #include "../../util/intel-pt.h"
29 #define KiB(x) ((x) * 1024)
30 #define MiB(x) ((x) * 1024 * 1024)
31 #define KiB_MASK(x) (KiB(x) - 1)
32 #define MiB_MASK(x) (MiB(x) - 1)
34 #define INTEL_PT_PSB_PERIOD_NEAR 256
36 struct intel_pt_snapshot_ref {
42 struct intel_pt_recording {
43 struct auxtrace_record itr;
44 struct perf_pmu *intel_pt_pmu;
45 int have_sched_switch;
46 struct perf_evlist *evlist;
48 bool snapshot_init_done;
50 size_t snapshot_ref_buf_size;
52 struct intel_pt_snapshot_ref *snapshot_refs;
56 static int intel_pt_parse_terms_with_default(struct list_head *formats,
60 struct list_head *terms;
61 struct perf_event_attr attr = { .size = 0, };
64 terms = malloc(sizeof(struct list_head));
68 INIT_LIST_HEAD(terms);
70 err = parse_events_terms(terms, str);
74 attr.config = *config;
75 err = perf_pmu__config_terms(formats, &attr, terms, true, NULL);
79 *config = attr.config;
81 parse_events_terms__delete(terms);
85 static int intel_pt_parse_terms(struct list_head *formats, const char *str,
89 return intel_pt_parse_terms_with_default(formats, str, config);
92 static u64 intel_pt_masked_bits(u64 mask, u64 bits)
94 const u64 top_bit = 1ULL << 63;
98 for (i = 0; i < 64; i++) {
111 static int intel_pt_read_config(struct perf_pmu *intel_pt_pmu, const char *str,
112 struct perf_evlist *evlist, u64 *res)
114 struct perf_evsel *evsel;
119 mask = perf_pmu__format_bits(&intel_pt_pmu->format, str);
123 evlist__for_each_entry(evlist, evsel) {
124 if (evsel->attr.type == intel_pt_pmu->type) {
125 *res = intel_pt_masked_bits(mask, evsel->attr.config);
133 static size_t intel_pt_psb_period(struct perf_pmu *intel_pt_pmu,
134 struct perf_evlist *evlist)
137 int err, topa_multiple_entries;
140 if (perf_pmu__scan_file(intel_pt_pmu, "caps/topa_multiple_entries",
141 "%d", &topa_multiple_entries) != 1)
142 topa_multiple_entries = 0;
145 * Use caps/topa_multiple_entries to indicate early hardware that had
146 * extra frequent PSBs.
148 if (!topa_multiple_entries) {
153 err = intel_pt_read_config(intel_pt_pmu, "psb_period", evlist, &val);
157 psb_period = 1 << (val + 11);
159 pr_debug2("%s psb_period %zu\n", intel_pt_pmu->name, psb_period);
163 static int intel_pt_pick_bit(int bits, int target)
167 for (pos = 0; bits; bits >>= 1, pos++) {
169 if (pos <= target || pick < 0)
179 static u64 intel_pt_default_config(struct perf_pmu *intel_pt_pmu)
182 int mtc, mtc_periods = 0, mtc_period;
183 int psb_cyc, psb_periods, psb_period;
188 pos += scnprintf(buf + pos, sizeof(buf) - pos, "tsc");
190 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc", "%d",
195 if (perf_pmu__scan_file(intel_pt_pmu, "caps/mtc_periods", "%x",
199 mtc_period = intel_pt_pick_bit(mtc_periods, 3);
200 pos += scnprintf(buf + pos, sizeof(buf) - pos,
201 ",mtc,mtc_period=%d", mtc_period);
205 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_cyc", "%d",
209 if (psb_cyc && mtc_periods) {
210 if (perf_pmu__scan_file(intel_pt_pmu, "caps/psb_periods", "%x",
214 psb_period = intel_pt_pick_bit(psb_periods, 3);
215 pos += scnprintf(buf + pos, sizeof(buf) - pos,
216 ",psb_period=%d", psb_period);
220 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
221 perf_pmu__scan_file(intel_pt_pmu, "format/branch", "%c", &c) == 1)
222 pos += scnprintf(buf + pos, sizeof(buf) - pos, ",pt,branch");
224 pr_debug2("%s default config: %s\n", intel_pt_pmu->name, buf);
226 intel_pt_parse_terms(&intel_pt_pmu->format, buf, &config);
231 static int intel_pt_parse_snapshot_options(struct auxtrace_record *itr,
232 struct record_opts *opts,
235 struct intel_pt_recording *ptr =
236 container_of(itr, struct intel_pt_recording, itr);
237 unsigned long long snapshot_size = 0;
241 snapshot_size = strtoull(str, &endptr, 0);
242 if (*endptr || snapshot_size > SIZE_MAX)
246 opts->auxtrace_snapshot_mode = true;
247 opts->auxtrace_snapshot_size = snapshot_size;
249 ptr->snapshot_size = snapshot_size;
254 struct perf_event_attr *
255 intel_pt_pmu_default_config(struct perf_pmu *intel_pt_pmu)
257 struct perf_event_attr *attr;
259 attr = zalloc(sizeof(struct perf_event_attr));
263 attr->config = intel_pt_default_config(intel_pt_pmu);
265 intel_pt_pmu->selectable = true;
270 static const char *intel_pt_find_filter(struct perf_evlist *evlist,
271 struct perf_pmu *intel_pt_pmu)
273 struct perf_evsel *evsel;
275 evlist__for_each_entry(evlist, evsel) {
276 if (evsel->attr.type == intel_pt_pmu->type)
277 return evsel->filter;
283 static size_t intel_pt_filter_bytes(const char *filter)
285 size_t len = filter ? strlen(filter) : 0;
287 return len ? roundup(len + 1, 8) : 0;
291 intel_pt_info_priv_size(struct auxtrace_record *itr, struct perf_evlist *evlist)
293 struct intel_pt_recording *ptr =
294 container_of(itr, struct intel_pt_recording, itr);
295 const char *filter = intel_pt_find_filter(evlist, ptr->intel_pt_pmu);
297 ptr->priv_size = (INTEL_PT_AUXTRACE_PRIV_MAX * sizeof(u64)) +
298 intel_pt_filter_bytes(filter);
300 return ptr->priv_size;
303 static void intel_pt_tsc_ctc_ratio(u32 *n, u32 *d)
305 unsigned int eax = 0, ebx = 0, ecx = 0, edx = 0;
307 __get_cpuid(0x15, &eax, &ebx, &ecx, &edx);
312 static int intel_pt_info_fill(struct auxtrace_record *itr,
313 struct perf_session *session,
314 struct auxtrace_info_event *auxtrace_info,
317 struct intel_pt_recording *ptr =
318 container_of(itr, struct intel_pt_recording, itr);
319 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
320 struct perf_event_mmap_page *pc;
321 struct perf_tsc_conversion tc = { .time_mult = 0, };
322 bool cap_user_time_zero = false, per_cpu_mmaps;
323 u64 tsc_bit, mtc_bit, mtc_freq_bits, cyc_bit, noretcomp_bit;
324 u32 tsc_ctc_ratio_n, tsc_ctc_ratio_d;
325 unsigned long max_non_turbo_ratio;
326 size_t filter_str_len;
331 if (priv_size != ptr->priv_size)
334 intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
335 intel_pt_parse_terms(&intel_pt_pmu->format, "noretcomp",
337 intel_pt_parse_terms(&intel_pt_pmu->format, "mtc", &mtc_bit);
338 mtc_freq_bits = perf_pmu__format_bits(&intel_pt_pmu->format,
340 intel_pt_parse_terms(&intel_pt_pmu->format, "cyc", &cyc_bit);
342 intel_pt_tsc_ctc_ratio(&tsc_ctc_ratio_n, &tsc_ctc_ratio_d);
344 if (perf_pmu__scan_file(intel_pt_pmu, "max_nonturbo_ratio",
345 "%lu", &max_non_turbo_ratio) != 1)
346 max_non_turbo_ratio = 0;
348 filter = intel_pt_find_filter(session->evlist, ptr->intel_pt_pmu);
349 filter_str_len = filter ? strlen(filter) : 0;
351 if (!session->evlist->nr_mmaps)
354 pc = session->evlist->mmap[0].base;
356 err = perf_read_tsc_conversion(pc, &tc);
358 if (err != -EOPNOTSUPP)
361 cap_user_time_zero = tc.time_mult != 0;
363 if (!cap_user_time_zero)
364 ui__warning("Intel Processor Trace: TSC not available\n");
367 per_cpu_mmaps = !cpu_map__empty(session->evlist->cpus);
369 auxtrace_info->type = PERF_AUXTRACE_INTEL_PT;
370 auxtrace_info->priv[INTEL_PT_PMU_TYPE] = intel_pt_pmu->type;
371 auxtrace_info->priv[INTEL_PT_TIME_SHIFT] = tc.time_shift;
372 auxtrace_info->priv[INTEL_PT_TIME_MULT] = tc.time_mult;
373 auxtrace_info->priv[INTEL_PT_TIME_ZERO] = tc.time_zero;
374 auxtrace_info->priv[INTEL_PT_CAP_USER_TIME_ZERO] = cap_user_time_zero;
375 auxtrace_info->priv[INTEL_PT_TSC_BIT] = tsc_bit;
376 auxtrace_info->priv[INTEL_PT_NORETCOMP_BIT] = noretcomp_bit;
377 auxtrace_info->priv[INTEL_PT_HAVE_SCHED_SWITCH] = ptr->have_sched_switch;
378 auxtrace_info->priv[INTEL_PT_SNAPSHOT_MODE] = ptr->snapshot_mode;
379 auxtrace_info->priv[INTEL_PT_PER_CPU_MMAPS] = per_cpu_mmaps;
380 auxtrace_info->priv[INTEL_PT_MTC_BIT] = mtc_bit;
381 auxtrace_info->priv[INTEL_PT_MTC_FREQ_BITS] = mtc_freq_bits;
382 auxtrace_info->priv[INTEL_PT_TSC_CTC_N] = tsc_ctc_ratio_n;
383 auxtrace_info->priv[INTEL_PT_TSC_CTC_D] = tsc_ctc_ratio_d;
384 auxtrace_info->priv[INTEL_PT_CYC_BIT] = cyc_bit;
385 auxtrace_info->priv[INTEL_PT_MAX_NONTURBO_RATIO] = max_non_turbo_ratio;
386 auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] = filter_str_len;
388 info = &auxtrace_info->priv[INTEL_PT_FILTER_STR_LEN] + 1;
390 if (filter_str_len) {
391 size_t len = intel_pt_filter_bytes(filter);
393 strncpy((char *)info, filter, len);
400 static int intel_pt_track_switches(struct perf_evlist *evlist)
402 const char *sched_switch = "sched:sched_switch";
403 struct perf_evsel *evsel;
406 if (!perf_evlist__can_select_event(evlist, sched_switch))
409 err = parse_events(evlist, sched_switch, NULL);
411 pr_debug2("%s: failed to parse %s, error %d\n",
412 __func__, sched_switch, err);
416 evsel = perf_evlist__last(evlist);
418 perf_evsel__set_sample_bit(evsel, CPU);
419 perf_evsel__set_sample_bit(evsel, TIME);
421 evsel->system_wide = true;
422 evsel->no_aux_samples = true;
423 evsel->immediate = true;
428 static void intel_pt_valid_str(char *str, size_t len, u64 valid)
430 unsigned int val, last = 0, state = 1;
435 for (val = 0; val <= 64; val++, valid >>= 1) {
440 p += scnprintf(str + p, len - p, ",");
443 p += scnprintf(str + p, len - p, "%u", val);
458 p += scnprintf(str + p, len - p, ",%u", last);
462 p += scnprintf(str + p, len - p, "-%u", last);
474 static int intel_pt_val_config_term(struct perf_pmu *intel_pt_pmu,
475 const char *caps, const char *name,
476 const char *supported, u64 config)
480 unsigned long long valid;
484 if (perf_pmu__scan_file(intel_pt_pmu, caps, "%llx", &valid) != 1)
488 perf_pmu__scan_file(intel_pt_pmu, supported, "%d", &ok) == 1 && !ok)
493 bits = perf_pmu__format_bits(&intel_pt_pmu->format, name);
497 for (shift = 0; bits && !(bits & 1); shift++)
505 if (valid & (1 << config))
508 intel_pt_valid_str(valid_str, sizeof(valid_str), valid);
509 pr_err("Invalid %s for %s. Valid values are: %s\n",
510 name, INTEL_PT_PMU_NAME, valid_str);
514 static int intel_pt_validate_config(struct perf_pmu *intel_pt_pmu,
515 struct perf_evsel *evsel)
524 * If supported, force pass-through config term (pt=1) even if user
525 * sets pt=0, which avoids senseless kernel errors.
527 if (perf_pmu__scan_file(intel_pt_pmu, "format/pt", "%c", &c) == 1 &&
528 !(evsel->attr.config & 1)) {
529 pr_warning("pt=0 doesn't make sense, forcing pt=1\n");
530 evsel->attr.config |= 1;
533 err = intel_pt_val_config_term(intel_pt_pmu, "caps/cycle_thresholds",
534 "cyc_thresh", "caps/psb_cyc",
539 err = intel_pt_val_config_term(intel_pt_pmu, "caps/mtc_periods",
540 "mtc_period", "caps/mtc",
545 return intel_pt_val_config_term(intel_pt_pmu, "caps/psb_periods",
546 "psb_period", "caps/psb_cyc",
550 static int intel_pt_recording_options(struct auxtrace_record *itr,
551 struct perf_evlist *evlist,
552 struct record_opts *opts)
554 struct intel_pt_recording *ptr =
555 container_of(itr, struct intel_pt_recording, itr);
556 struct perf_pmu *intel_pt_pmu = ptr->intel_pt_pmu;
557 bool have_timing_info, need_immediate = false;
558 struct perf_evsel *evsel, *intel_pt_evsel = NULL;
559 const struct cpu_map *cpus = evlist->cpus;
560 bool privileged = geteuid() == 0 || perf_event_paranoid() < 0;
564 ptr->evlist = evlist;
565 ptr->snapshot_mode = opts->auxtrace_snapshot_mode;
567 evlist__for_each_entry(evlist, evsel) {
568 if (evsel->attr.type == intel_pt_pmu->type) {
569 if (intel_pt_evsel) {
570 pr_err("There may be only one " INTEL_PT_PMU_NAME " event\n");
573 evsel->attr.freq = 0;
574 evsel->attr.sample_period = 1;
575 intel_pt_evsel = evsel;
576 opts->full_auxtrace = true;
580 if (opts->auxtrace_snapshot_mode && !opts->full_auxtrace) {
581 pr_err("Snapshot mode (-S option) requires " INTEL_PT_PMU_NAME " PMU event (-e " INTEL_PT_PMU_NAME ")\n");
585 if (opts->use_clockid) {
586 pr_err("Cannot use clockid (-k option) with " INTEL_PT_PMU_NAME "\n");
590 if (!opts->full_auxtrace)
593 err = intel_pt_validate_config(intel_pt_pmu, intel_pt_evsel);
597 /* Set default sizes for snapshot mode */
598 if (opts->auxtrace_snapshot_mode) {
599 size_t psb_period = intel_pt_psb_period(intel_pt_pmu, evlist);
601 if (!opts->auxtrace_snapshot_size && !opts->auxtrace_mmap_pages) {
603 opts->auxtrace_mmap_pages = MiB(4) / page_size;
605 opts->auxtrace_mmap_pages = KiB(128) / page_size;
606 if (opts->mmap_pages == UINT_MAX)
607 opts->mmap_pages = KiB(256) / page_size;
609 } else if (!opts->auxtrace_mmap_pages && !privileged &&
610 opts->mmap_pages == UINT_MAX) {
611 opts->mmap_pages = KiB(256) / page_size;
613 if (!opts->auxtrace_snapshot_size)
614 opts->auxtrace_snapshot_size =
615 opts->auxtrace_mmap_pages * (size_t)page_size;
616 if (!opts->auxtrace_mmap_pages) {
617 size_t sz = opts->auxtrace_snapshot_size;
619 sz = round_up(sz, page_size) / page_size;
620 opts->auxtrace_mmap_pages = roundup_pow_of_two(sz);
622 if (opts->auxtrace_snapshot_size >
623 opts->auxtrace_mmap_pages * (size_t)page_size) {
624 pr_err("Snapshot size %zu must not be greater than AUX area tracing mmap size %zu\n",
625 opts->auxtrace_snapshot_size,
626 opts->auxtrace_mmap_pages * (size_t)page_size);
629 if (!opts->auxtrace_snapshot_size || !opts->auxtrace_mmap_pages) {
630 pr_err("Failed to calculate default snapshot size and/or AUX area tracing mmap pages\n");
633 pr_debug2("Intel PT snapshot size: %zu\n",
634 opts->auxtrace_snapshot_size);
636 opts->auxtrace_snapshot_size <= psb_period +
637 INTEL_PT_PSB_PERIOD_NEAR)
638 ui__warning("Intel PT snapshot size (%zu) may be too small for PSB period (%zu)\n",
639 opts->auxtrace_snapshot_size, psb_period);
642 /* Set default sizes for full trace mode */
643 if (opts->full_auxtrace && !opts->auxtrace_mmap_pages) {
645 opts->auxtrace_mmap_pages = MiB(4) / page_size;
647 opts->auxtrace_mmap_pages = KiB(128) / page_size;
648 if (opts->mmap_pages == UINT_MAX)
649 opts->mmap_pages = KiB(256) / page_size;
653 /* Validate auxtrace_mmap_pages */
654 if (opts->auxtrace_mmap_pages) {
655 size_t sz = opts->auxtrace_mmap_pages * (size_t)page_size;
658 if (opts->auxtrace_snapshot_mode)
663 if (sz < min_sz || !is_power_of_2(sz)) {
664 pr_err("Invalid mmap size for Intel Processor Trace: must be at least %zuKiB and a power of 2\n",
670 intel_pt_parse_terms(&intel_pt_pmu->format, "tsc", &tsc_bit);
672 if (opts->full_auxtrace && (intel_pt_evsel->attr.config & tsc_bit))
673 have_timing_info = true;
675 have_timing_info = false;
678 * Per-cpu recording needs sched_switch events to distinguish different
681 if (have_timing_info && !cpu_map__empty(cpus)) {
682 if (perf_can_record_switch_events()) {
683 bool cpu_wide = !target__none(&opts->target) &&
684 !target__has_task(&opts->target);
686 if (!cpu_wide && perf_can_record_cpu_wide()) {
687 struct perf_evsel *switch_evsel;
689 err = parse_events(evlist, "dummy:u", NULL);
693 switch_evsel = perf_evlist__last(evlist);
695 switch_evsel->attr.freq = 0;
696 switch_evsel->attr.sample_period = 1;
697 switch_evsel->attr.context_switch = 1;
699 switch_evsel->system_wide = true;
700 switch_evsel->no_aux_samples = true;
701 switch_evsel->immediate = true;
703 perf_evsel__set_sample_bit(switch_evsel, TID);
704 perf_evsel__set_sample_bit(switch_evsel, TIME);
705 perf_evsel__set_sample_bit(switch_evsel, CPU);
706 perf_evsel__reset_sample_bit(switch_evsel, BRANCH_STACK);
708 opts->record_switch_events = false;
709 ptr->have_sched_switch = 3;
711 opts->record_switch_events = true;
712 need_immediate = true;
714 ptr->have_sched_switch = 3;
716 ptr->have_sched_switch = 2;
719 err = intel_pt_track_switches(evlist);
721 pr_debug2("Unable to select sched:sched_switch\n");
725 ptr->have_sched_switch = 1;
729 if (intel_pt_evsel) {
731 * To obtain the auxtrace buffer file descriptor, the auxtrace
732 * event must come first.
734 perf_evlist__to_front(evlist, intel_pt_evsel);
736 * In the case of per-cpu mmaps, we need the CPU on the
739 if (!cpu_map__empty(cpus))
740 perf_evsel__set_sample_bit(intel_pt_evsel, CPU);
743 /* Add dummy event to keep tracking */
744 if (opts->full_auxtrace) {
745 struct perf_evsel *tracking_evsel;
747 err = parse_events(evlist, "dummy:u", NULL);
751 tracking_evsel = perf_evlist__last(evlist);
753 perf_evlist__set_tracking_event(evlist, tracking_evsel);
755 tracking_evsel->attr.freq = 0;
756 tracking_evsel->attr.sample_period = 1;
758 tracking_evsel->no_aux_samples = true;
760 tracking_evsel->immediate = true;
762 /* In per-cpu case, always need the time of mmap events etc */
763 if (!cpu_map__empty(cpus)) {
764 perf_evsel__set_sample_bit(tracking_evsel, TIME);
765 /* And the CPU for switch events */
766 perf_evsel__set_sample_bit(tracking_evsel, CPU);
768 perf_evsel__reset_sample_bit(tracking_evsel, BRANCH_STACK);
772 * Warn the user when we do not have enough information to decode i.e.
773 * per-cpu with no sched_switch (except workload-only).
775 if (!ptr->have_sched_switch && !cpu_map__empty(cpus) &&
776 !target__none(&opts->target))
777 ui__warning("Intel Processor Trace decoding will not be possible except for kernel tracing!\n");
782 static int intel_pt_snapshot_start(struct auxtrace_record *itr)
784 struct intel_pt_recording *ptr =
785 container_of(itr, struct intel_pt_recording, itr);
786 struct perf_evsel *evsel;
788 evlist__for_each_entry(ptr->evlist, evsel) {
789 if (evsel->attr.type == ptr->intel_pt_pmu->type)
790 return perf_evsel__disable(evsel);
795 static int intel_pt_snapshot_finish(struct auxtrace_record *itr)
797 struct intel_pt_recording *ptr =
798 container_of(itr, struct intel_pt_recording, itr);
799 struct perf_evsel *evsel;
801 evlist__for_each_entry(ptr->evlist, evsel) {
802 if (evsel->attr.type == ptr->intel_pt_pmu->type)
803 return perf_evsel__enable(evsel);
808 static int intel_pt_alloc_snapshot_refs(struct intel_pt_recording *ptr, int idx)
810 const size_t sz = sizeof(struct intel_pt_snapshot_ref);
811 int cnt = ptr->snapshot_ref_cnt, new_cnt = cnt * 2;
812 struct intel_pt_snapshot_ref *refs;
817 while (new_cnt <= idx)
820 refs = calloc(new_cnt, sz);
824 memcpy(refs, ptr->snapshot_refs, cnt * sz);
826 ptr->snapshot_refs = refs;
827 ptr->snapshot_ref_cnt = new_cnt;
832 static void intel_pt_free_snapshot_refs(struct intel_pt_recording *ptr)
836 for (i = 0; i < ptr->snapshot_ref_cnt; i++)
837 zfree(&ptr->snapshot_refs[i].ref_buf);
838 zfree(&ptr->snapshot_refs);
841 static void intel_pt_recording_free(struct auxtrace_record *itr)
843 struct intel_pt_recording *ptr =
844 container_of(itr, struct intel_pt_recording, itr);
846 intel_pt_free_snapshot_refs(ptr);
850 static int intel_pt_alloc_snapshot_ref(struct intel_pt_recording *ptr, int idx,
851 size_t snapshot_buf_size)
853 size_t ref_buf_size = ptr->snapshot_ref_buf_size;
856 ref_buf = zalloc(ref_buf_size);
860 ptr->snapshot_refs[idx].ref_buf = ref_buf;
861 ptr->snapshot_refs[idx].ref_offset = snapshot_buf_size - ref_buf_size;
866 static size_t intel_pt_snapshot_ref_buf_size(struct intel_pt_recording *ptr,
867 size_t snapshot_buf_size)
869 const size_t max_size = 256 * 1024;
870 size_t buf_size = 0, psb_period;
872 if (ptr->snapshot_size <= 64 * 1024)
875 psb_period = intel_pt_psb_period(ptr->intel_pt_pmu, ptr->evlist);
877 buf_size = psb_period * 2;
879 if (!buf_size || buf_size > max_size)
882 if (buf_size >= snapshot_buf_size)
885 if (buf_size >= ptr->snapshot_size / 2)
891 static int intel_pt_snapshot_init(struct intel_pt_recording *ptr,
892 size_t snapshot_buf_size)
894 if (ptr->snapshot_init_done)
897 ptr->snapshot_init_done = true;
899 ptr->snapshot_ref_buf_size = intel_pt_snapshot_ref_buf_size(ptr,
906 * intel_pt_compare_buffers - compare bytes in a buffer to a circular buffer.
907 * @buf1: first buffer
908 * @compare_size: number of bytes to compare
909 * @buf2: second buffer (a circular buffer)
910 * @offs2: offset in second buffer
911 * @buf2_size: size of second buffer
913 * The comparison allows for the possibility that the bytes to compare in the
914 * circular buffer are not contiguous. It is assumed that @compare_size <=
915 * @buf2_size. This function returns %false if the bytes are identical, %true
918 static bool intel_pt_compare_buffers(void *buf1, size_t compare_size,
919 void *buf2, size_t offs2, size_t buf2_size)
921 size_t end2 = offs2 + compare_size, part_size;
923 if (end2 <= buf2_size)
924 return memcmp(buf1, buf2 + offs2, compare_size);
926 part_size = end2 - buf2_size;
927 if (memcmp(buf1, buf2 + offs2, part_size))
930 compare_size -= part_size;
932 return memcmp(buf1 + part_size, buf2, compare_size);
935 static bool intel_pt_compare_ref(void *ref_buf, size_t ref_offset,
936 size_t ref_size, size_t buf_size,
937 void *data, size_t head)
939 size_t ref_end = ref_offset + ref_size;
941 if (ref_end > buf_size) {
942 if (head > ref_offset || head < ref_end - buf_size)
944 } else if (head > ref_offset && head < ref_end) {
948 return intel_pt_compare_buffers(ref_buf, ref_size, data, ref_offset,
952 static void intel_pt_copy_ref(void *ref_buf, size_t ref_size, size_t buf_size,
953 void *data, size_t head)
955 if (head >= ref_size) {
956 memcpy(ref_buf, data + head - ref_size, ref_size);
958 memcpy(ref_buf, data, head);
960 memcpy(ref_buf + head, data + buf_size - ref_size, ref_size);
964 static bool intel_pt_wrapped(struct intel_pt_recording *ptr, int idx,
965 struct auxtrace_mmap *mm, unsigned char *data,
968 struct intel_pt_snapshot_ref *ref = &ptr->snapshot_refs[idx];
971 wrapped = intel_pt_compare_ref(ref->ref_buf, ref->ref_offset,
972 ptr->snapshot_ref_buf_size, mm->len,
975 intel_pt_copy_ref(ref->ref_buf, ptr->snapshot_ref_buf_size, mm->len,
981 static bool intel_pt_first_wrap(u64 *data, size_t buf_size)
990 for (i = a; i < b; i++) {
998 static int intel_pt_find_snapshot(struct auxtrace_record *itr, int idx,
999 struct auxtrace_mmap *mm, unsigned char *data,
1000 u64 *head, u64 *old)
1002 struct intel_pt_recording *ptr =
1003 container_of(itr, struct intel_pt_recording, itr);
1007 pr_debug3("%s: mmap index %d old head %zu new head %zu\n",
1008 __func__, idx, (size_t)*old, (size_t)*head);
1010 err = intel_pt_snapshot_init(ptr, mm->len);
1014 if (idx >= ptr->snapshot_ref_cnt) {
1015 err = intel_pt_alloc_snapshot_refs(ptr, idx);
1020 if (ptr->snapshot_ref_buf_size) {
1021 if (!ptr->snapshot_refs[idx].ref_buf) {
1022 err = intel_pt_alloc_snapshot_ref(ptr, idx, mm->len);
1026 wrapped = intel_pt_wrapped(ptr, idx, mm, data, *head);
1028 wrapped = ptr->snapshot_refs[idx].wrapped;
1029 if (!wrapped && intel_pt_first_wrap((u64 *)data, mm->len)) {
1030 ptr->snapshot_refs[idx].wrapped = true;
1036 * In full trace mode 'head' continually increases. However in snapshot
1037 * mode 'head' is an offset within the buffer. Here 'old' and 'head'
1038 * are adjusted to match the full trace case which expects that 'old' is
1039 * always less than 'head'.
1053 pr_debug3("%s: wrap-around %sdetected, adjusted old head %zu adjusted new head %zu\n",
1054 __func__, wrapped ? "" : "not ", (size_t)*old, (size_t)*head);
1059 pr_err("%s: failed, error %d\n", __func__, err);
1063 static u64 intel_pt_reference(struct auxtrace_record *itr __maybe_unused)
1068 static int intel_pt_read_finish(struct auxtrace_record *itr, int idx)
1070 struct intel_pt_recording *ptr =
1071 container_of(itr, struct intel_pt_recording, itr);
1072 struct perf_evsel *evsel;
1074 evlist__for_each_entry(ptr->evlist, evsel) {
1075 if (evsel->attr.type == ptr->intel_pt_pmu->type)
1076 return perf_evlist__enable_event_idx(ptr->evlist, evsel,
1082 struct auxtrace_record *intel_pt_recording_init(int *err)
1084 struct perf_pmu *intel_pt_pmu = perf_pmu__find(INTEL_PT_PMU_NAME);
1085 struct intel_pt_recording *ptr;
1090 if (setenv("JITDUMP_USE_ARCH_TIMESTAMP", "1", 1)) {
1095 ptr = zalloc(sizeof(struct intel_pt_recording));
1101 ptr->intel_pt_pmu = intel_pt_pmu;
1102 ptr->itr.recording_options = intel_pt_recording_options;
1103 ptr->itr.info_priv_size = intel_pt_info_priv_size;
1104 ptr->itr.info_fill = intel_pt_info_fill;
1105 ptr->itr.free = intel_pt_recording_free;
1106 ptr->itr.snapshot_start = intel_pt_snapshot_start;
1107 ptr->itr.snapshot_finish = intel_pt_snapshot_finish;
1108 ptr->itr.find_snapshot = intel_pt_find_snapshot;
1109 ptr->itr.parse_snapshot_options = intel_pt_parse_snapshot_options;
1110 ptr->itr.reference = intel_pt_reference;
1111 ptr->itr.read_finish = intel_pt_read_finish;