1 // SPDX-License-Identifier: GPL-2.0-only
5 * Builtin top command: Display a continuously updated profile of
6 * any workload, CPU or specific PID.
8 * Copyright (C) 2008, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
9 * 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
11 * Improvements and fixes by:
13 * Arjan van de Ven <arjan@linux.intel.com>
14 * Yanmin Zhang <yanmin.zhang@intel.com>
15 * Wu Fengguang <fengguang.wu@intel.com>
16 * Mike Galbraith <efault@gmx.de>
17 * Paul Mackerras <paulus@samba.org>
23 #include "util/annotate.h"
24 #include "util/bpf-event.h"
25 #include "util/cgroup.h"
26 #include "util/config.h"
27 #include "util/color.h"
29 #include "util/evlist.h"
30 #include "util/evsel.h"
31 #include "util/evsel_config.h"
32 #include "util/event.h"
33 #include "util/machine.h"
35 #include "util/mmap.h"
36 #include "util/session.h"
37 #include "util/thread.h"
38 #include "util/symbol.h"
39 #include "util/synthetic-events.h"
41 #include "util/util.h"
42 #include <linux/rbtree.h>
43 #include <subcmd/parse-options.h>
44 #include "util/parse-events.h"
45 #include "util/callchain.h"
46 #include "util/cpumap.h"
47 #include "util/sort.h"
48 #include "util/string2.h"
49 #include "util/term.h"
50 #include "util/intlist.h"
51 #include "util/parse-branch-options.h"
52 #include "arch/common.h"
55 #include "util/debug.h"
56 #include "util/ordered-events.h"
73 #include <sys/syscall.h>
74 #include <sys/ioctl.h>
76 #include <sys/prctl.h>
79 #include <sys/utsname.h>
82 #include <linux/stringify.h>
83 #include <linux/time64.h>
84 #include <linux/types.h>
85 #include <linux/err.h>
87 #include <linux/ctype.h>
88 #include <perf/mmap.h>
90 static volatile sig_atomic_t done;
91 static volatile sig_atomic_t resize;
93 #define HEADER_LINE_NR 5
95 static void perf_top__update_print_entries(struct perf_top *top)
97 top->print_entries = top->winsize.ws_row - HEADER_LINE_NR;
100 static void winch_sig(int sig __maybe_unused)
105 static void perf_top__resize(struct perf_top *top)
107 get_term_dimensions(&top->winsize);
108 perf_top__update_print_entries(top);
111 static int perf_top__parse_source(struct perf_top *top, struct hist_entry *he)
115 struct annotation *notes;
120 if (!he || !he->ms.sym)
123 evsel = hists_to_evsel(he->hists);
130 * We can't annotate with just /proc/kallsyms
132 if (dso->symtab_type == DSO_BINARY_TYPE__KALLSYMS && !dso__is_kcore(dso)) {
133 pr_err("Can't annotate %s: No vmlinux file was found in the "
134 "path\n", sym->name);
139 notes = symbol__annotation(sym);
140 mutex_lock(¬es->lock);
142 if (!symbol__hists(sym, top->evlist->core.nr_entries)) {
143 mutex_unlock(¬es->lock);
144 pr_err("Not enough memory for annotating '%s' symbol!\n",
150 err = symbol__annotate(&he->ms, evsel, &top->annotation_opts, NULL);
152 top->sym_filter_entry = he;
155 symbol__strerror_disassemble(&he->ms, err, msg, sizeof(msg));
156 pr_err("Couldn't annotate %s: %s\n", sym->name, msg);
159 mutex_unlock(¬es->lock);
163 static void __zero_source_counters(struct hist_entry *he)
165 struct symbol *sym = he->ms.sym;
166 symbol__annotate_zero_histograms(sym);
169 static void ui__warn_map_erange(struct map *map, struct symbol *sym, u64 ip)
172 int err = uname(&uts);
173 struct dso *dso = map__dso(map);
175 ui__warning("Out of bounds address found:\n\n"
176 "Addr: %" PRIx64 "\n"
178 "Map: %" PRIx64 "-%" PRIx64 "\n"
179 "Symbol: %" PRIx64 "-%" PRIx64 " %c %s\n"
183 "Not all samples will be on the annotation output.\n\n"
184 "Please report to linux-kernel@vger.kernel.org\n",
185 ip, dso->long_name, dso__symtab_origin(dso),
186 map__start(map), map__end(map), sym->start, sym->end,
187 sym->binding == STB_GLOBAL ? 'g' :
188 sym->binding == STB_LOCAL ? 'l' : 'w', sym->name,
189 err ? "[unknown]" : uts.machine,
190 err ? "[unknown]" : uts.release, perf_version_string);
191 if (use_browser <= 0)
194 map->erange_warned = true;
197 static void perf_top__record_precise_ip(struct perf_top *top,
198 struct hist_entry *he,
199 struct perf_sample *sample,
200 struct evsel *evsel, u64 ip)
201 EXCLUSIVE_LOCKS_REQUIRED(he->hists->lock)
203 struct annotation *notes;
204 struct symbol *sym = he->ms.sym;
207 if (sym == NULL || (use_browser == 0 &&
208 (top->sym_filter_entry == NULL ||
209 top->sym_filter_entry->ms.sym != sym)))
212 notes = symbol__annotation(sym);
214 if (!mutex_trylock(¬es->lock))
217 err = hist_entry__inc_addr_samples(he, sample, evsel, ip);
219 mutex_unlock(¬es->lock);
223 * This function is now called with he->hists->lock held.
224 * Release it before going to sleep.
226 mutex_unlock(&he->hists->lock);
228 if (err == -ERANGE && !he->ms.map->erange_warned)
229 ui__warn_map_erange(he->ms.map, sym, ip);
230 else if (err == -ENOMEM) {
231 pr_err("Not enough memory for annotating '%s' symbol!\n",
236 mutex_lock(&he->hists->lock);
240 static void perf_top__show_details(struct perf_top *top)
242 struct hist_entry *he = top->sym_filter_entry;
244 struct annotation *notes;
245 struct symbol *symbol;
251 evsel = hists_to_evsel(he->hists);
254 notes = symbol__annotation(symbol);
256 mutex_lock(¬es->lock);
258 symbol__calc_percent(symbol, evsel);
260 if (notes->src == NULL)
263 printf("Showing %s for %s\n", evsel__name(top->sym_evsel), symbol->name);
264 printf(" Events Pcnt (>=%d%%)\n", top->annotation_opts.min_pcnt);
266 more = symbol__annotate_printf(&he->ms, top->sym_evsel, &top->annotation_opts);
268 if (top->evlist->enabled) {
270 symbol__annotate_zero_histogram(symbol, top->sym_evsel->core.idx);
272 symbol__annotate_decay_histogram(symbol, top->sym_evsel->core.idx);
275 printf("%d lines not displayed, maybe increase display entries [e]\n", more);
277 mutex_unlock(¬es->lock);
280 static void perf_top__resort_hists(struct perf_top *t)
282 struct evlist *evlist = t->evlist;
285 evlist__for_each_entry(evlist, pos) {
286 struct hists *hists = evsel__hists(pos);
289 * unlink existing entries so that they can be linked
290 * in a correct order in hists__match() below.
292 hists__unlink(hists);
294 if (evlist->enabled) {
296 hists__delete_entries(hists);
298 hists__decay_entries(hists, t->hide_user_symbols,
299 t->hide_kernel_symbols);
303 hists__collapse_resort(hists, NULL);
305 /* Non-group events are considered as leader */
306 if (symbol_conf.event_group && !evsel__is_group_leader(pos)) {
307 struct hists *leader_hists = evsel__hists(evsel__leader(pos));
309 hists__match(leader_hists, hists);
310 hists__link(leader_hists, hists);
314 evlist__for_each_entry(evlist, pos) {
315 evsel__output_resort(pos, NULL);
319 static void perf_top__print_sym_table(struct perf_top *top)
323 const int win_width = top->winsize.ws_col - 1;
324 struct evsel *evsel = top->sym_evsel;
325 struct hists *hists = evsel__hists(evsel);
329 perf_top__header_snprintf(top, bf, sizeof(bf));
332 printf("%-*.*s\n", win_width, win_width, graph_dotted_line);
334 if (!top->record_opts.overwrite &&
335 (top->evlist->stats.nr_lost_warned !=
336 top->evlist->stats.nr_events[PERF_RECORD_LOST])) {
337 top->evlist->stats.nr_lost_warned =
338 top->evlist->stats.nr_events[PERF_RECORD_LOST];
339 color_fprintf(stdout, PERF_COLOR_RED,
340 "WARNING: LOST %d chunks, Check IO/CPU overload",
341 top->evlist->stats.nr_lost_warned);
345 if (top->sym_filter_entry) {
346 perf_top__show_details(top);
350 perf_top__resort_hists(top);
352 hists__output_recalc_col_len(hists, top->print_entries - printed);
354 hists__fprintf(hists, false, top->print_entries - printed, win_width,
355 top->min_percent, stdout, !symbol_conf.use_callchain);
358 static void prompt_integer(int *target, const char *msg)
360 char *buf = malloc(0), *p;
364 fprintf(stdout, "\n%s: ", msg);
365 if (getline(&buf, &dummy, stdin) < 0)
368 p = strchr(buf, '\n');
378 tmp = strtoul(buf, NULL, 10);
384 static void prompt_percent(int *target, const char *msg)
388 prompt_integer(&tmp, msg);
389 if (tmp >= 0 && tmp <= 100)
393 static void perf_top__prompt_symbol(struct perf_top *top, const char *msg)
395 char *buf = malloc(0), *p;
396 struct hist_entry *syme = top->sym_filter_entry, *n, *found = NULL;
397 struct hists *hists = evsel__hists(top->sym_evsel);
398 struct rb_node *next;
401 /* zero counters of active symbol */
403 __zero_source_counters(syme);
404 top->sym_filter_entry = NULL;
407 fprintf(stdout, "\n%s: ", msg);
408 if (getline(&buf, &dummy, stdin) < 0)
411 p = strchr(buf, '\n');
415 next = rb_first_cached(&hists->entries);
417 n = rb_entry(next, struct hist_entry, rb_node);
418 if (n->ms.sym && !strcmp(buf, n->ms.sym->name)) {
422 next = rb_next(&n->rb_node);
426 fprintf(stderr, "Sorry, %s is not active.\n", buf);
429 perf_top__parse_source(top, found);
435 static void perf_top__print_mapped_keys(struct perf_top *top)
439 if (top->sym_filter_entry) {
440 struct symbol *sym = top->sym_filter_entry->ms.sym;
444 fprintf(stdout, "\nMapped keys:\n");
445 fprintf(stdout, "\t[d] display refresh delay. \t(%d)\n", top->delay_secs);
446 fprintf(stdout, "\t[e] display entries (lines). \t(%d)\n", top->print_entries);
448 if (top->evlist->core.nr_entries > 1)
449 fprintf(stdout, "\t[E] active event counter. \t(%s)\n", evsel__name(top->sym_evsel));
451 fprintf(stdout, "\t[f] profile display filter (count). \t(%d)\n", top->count_filter);
453 fprintf(stdout, "\t[F] annotate display filter (percent). \t(%d%%)\n", top->annotation_opts.min_pcnt);
454 fprintf(stdout, "\t[s] annotate symbol. \t(%s)\n", name?: "NULL");
455 fprintf(stdout, "\t[S] stop annotation.\n");
458 "\t[K] hide kernel symbols. \t(%s)\n",
459 top->hide_kernel_symbols ? "yes" : "no");
461 "\t[U] hide user symbols. \t(%s)\n",
462 top->hide_user_symbols ? "yes" : "no");
463 fprintf(stdout, "\t[z] toggle sample zeroing. \t(%d)\n", top->zero ? 1 : 0);
464 fprintf(stdout, "\t[qQ] quit.\n");
467 static int perf_top__key_mapped(struct perf_top *top, int c)
483 return top->evlist->core.nr_entries > 1 ? 1 : 0;
491 static bool perf_top__handle_keypress(struct perf_top *top, int c)
495 if (!perf_top__key_mapped(top, c)) {
496 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
499 perf_top__print_mapped_keys(top);
500 fprintf(stdout, "\nEnter selection, or unmapped key to continue: ");
503 set_term_quiet_input(&save);
505 poll(&stdin_poll, 1, -1);
508 tcsetattr(0, TCSAFLUSH, &save);
509 if (!perf_top__key_mapped(top, c))
515 prompt_integer(&top->delay_secs, "Enter display delay");
516 if (top->delay_secs < 1)
520 prompt_integer(&top->print_entries, "Enter display entries (lines)");
521 if (top->print_entries == 0) {
522 perf_top__resize(top);
523 signal(SIGWINCH, winch_sig);
525 signal(SIGWINCH, SIG_DFL);
529 if (top->evlist->core.nr_entries > 1) {
530 /* Select 0 as the default event: */
533 fprintf(stderr, "\nAvailable events:");
535 evlist__for_each_entry(top->evlist, top->sym_evsel)
536 fprintf(stderr, "\n\t%d %s", top->sym_evsel->core.idx, evsel__name(top->sym_evsel));
538 prompt_integer(&counter, "Enter details event counter");
540 if (counter >= top->evlist->core.nr_entries) {
541 top->sym_evsel = evlist__first(top->evlist);
542 fprintf(stderr, "Sorry, no such event, using %s.\n", evsel__name(top->sym_evsel));
546 evlist__for_each_entry(top->evlist, top->sym_evsel)
547 if (top->sym_evsel->core.idx == counter)
550 top->sym_evsel = evlist__first(top->evlist);
553 prompt_integer(&top->count_filter, "Enter display event count filter");
556 prompt_percent(&top->annotation_opts.min_pcnt,
557 "Enter details display event filter (percent)");
560 top->hide_kernel_symbols = !top->hide_kernel_symbols;
564 printf("exiting.\n");
565 if (top->dump_symtab)
566 perf_session__fprintf_dsos(top->session, stderr);
570 perf_top__prompt_symbol(top, "Enter details symbol");
573 if (!top->sym_filter_entry)
576 struct hist_entry *syme = top->sym_filter_entry;
578 top->sym_filter_entry = NULL;
579 __zero_source_counters(syme);
583 top->hide_user_symbols = !top->hide_user_symbols;
586 top->zero = !top->zero;
595 static void perf_top__sort_new_samples(void *arg)
597 struct perf_top *t = arg;
599 if (t->evlist->selected != NULL)
600 t->sym_evsel = t->evlist->selected;
602 perf_top__resort_hists(t);
604 if (t->lost || t->drop)
605 pr_warning("Too slow to read ring buffer (change period (-c/-F) or limit CPUs (-C)\n");
608 static void stop_top(void)
614 static void *display_thread_tui(void *arg)
617 struct perf_top *top = arg;
618 const char *help = "For a higher level overview, try: perf top --sort comm,dso";
619 struct hist_browser_timer hbt = {
620 .timer = perf_top__sort_new_samples,
622 .refresh = top->delay_secs,
626 /* In order to read symbols from other namespaces perf to needs to call
627 * setns(2). This isn't permitted if the struct_fs has multiple users.
628 * unshare(2) the fs so that we may continue to setns into namespaces
629 * that we're observing.
633 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
636 perf_top__sort_new_samples(top);
639 * Initialize the uid_filter_str, in the future the TUI will allow
640 * Zooming in/out UIDs. For now just use whatever the user passed
643 evlist__for_each_entry(top->evlist, pos) {
644 struct hists *hists = evsel__hists(pos);
645 hists->uid_filter_str = top->record_opts.target.uid_str;
648 ret = evlist__tui_browse_hists(top->evlist, help, &hbt, top->min_percent,
649 &top->session->header.env, !top->record_opts.overwrite,
650 &top->annotation_opts);
651 if (ret == K_RELOAD) {
660 static void display_sig(int sig __maybe_unused)
665 static void display_setup_sig(void)
667 signal(SIGSEGV, sighandler_dump_stack);
668 signal(SIGFPE, sighandler_dump_stack);
669 signal(SIGINT, display_sig);
670 signal(SIGQUIT, display_sig);
671 signal(SIGTERM, display_sig);
674 static void *display_thread(void *arg)
676 struct pollfd stdin_poll = { .fd = 0, .events = POLLIN };
678 struct perf_top *top = arg;
681 /* In order to read symbols from other namespaces perf to needs to call
682 * setns(2). This isn't permitted if the struct_fs has multiple users.
683 * unshare(2) the fs so that we may continue to setns into namespaces
684 * that we're observing.
688 prctl(PR_SET_NAME, "perf-top-UI", 0, 0, 0);
691 pthread__unblock_sigwinch();
693 delay_msecs = top->delay_secs * MSEC_PER_SEC;
694 set_term_quiet_input(&save);
697 if (poll(&stdin_poll, 1, 0) > 0)
701 perf_top__print_sym_table(top);
703 * Either timeout expired or we got an EINTR due to SIGWINCH,
704 * refresh screen in both cases.
706 switch (poll(&stdin_poll, 1, delay_msecs)) {
715 tcsetattr(0, TCSAFLUSH, &save);
717 if (perf_top__handle_keypress(top, c))
723 tcsetattr(0, TCSAFLUSH, &save);
727 static int hist_iter__top_callback(struct hist_entry_iter *iter,
728 struct addr_location *al, bool single,
730 EXCLUSIVE_LOCKS_REQUIRED(iter->he->hists->lock)
732 struct perf_top *top = arg;
733 struct evsel *evsel = iter->evsel;
735 if (perf_hpp_list.sym && single)
736 perf_top__record_precise_ip(top, iter->he, iter->sample, evsel, al->addr);
738 hist__account_cycles(iter->sample->branch_stack, al, iter->sample,
739 !(top->record_opts.branch_stack & PERF_SAMPLE_BRANCH_ANY),
744 static void perf_event__process_sample(struct perf_tool *tool,
745 const union perf_event *event,
747 struct perf_sample *sample,
748 struct machine *machine)
750 struct perf_top *top = container_of(tool, struct perf_top, tool);
751 struct addr_location al;
753 if (!machine && perf_guest) {
754 static struct intlist *seen;
757 seen = intlist__new(NULL);
759 if (!intlist__has_entry(seen, sample->pid)) {
760 pr_err("Can't find guest [%d]'s kernel information\n",
762 intlist__add(seen, sample->pid);
768 pr_err("%u unprocessable samples recorded.\r",
769 top->session->evlist->stats.nr_unprocessable_samples++);
773 if (event->header.misc & PERF_RECORD_MISC_EXACT_IP)
774 top->exact_samples++;
776 if (machine__resolve(machine, &al, sample) < 0)
780 al.thread->lbr_stitch_enable = true;
782 if (!machine->kptr_restrict_warned &&
783 symbol_conf.kptr_restrict &&
784 al.cpumode == PERF_RECORD_MISC_KERNEL) {
785 if (!evlist__exclude_kernel(top->session->evlist)) {
787 "Kernel address maps (/proc/{kallsyms,modules}) are restricted.\n\n"
788 "Check /proc/sys/kernel/kptr_restrict and /proc/sys/kernel/perf_event_paranoid.\n\n"
789 "Kernel%s samples will not be resolved.\n",
790 al.map && map__has_symbols(al.map) ?
792 if (use_browser <= 0)
795 machine->kptr_restrict_warned = true;
798 if (al.sym == NULL && al.map != NULL) {
799 const char *msg = "Kernel samples will not be resolved.\n";
801 * As we do lazy loading of symtabs we only will know if the
802 * specified vmlinux file is invalid when we actually have a
803 * hit in kernel space and then try to load it. So if we get
804 * here and there are _no_ symbols in the DSO backing the
805 * kernel map, bail out.
807 * We may never get here, for instance, if we use -K/
808 * --hide-kernel-symbols, even if the user specifies an
809 * invalid --vmlinux ;-)
811 if (!machine->kptr_restrict_warned && !top->vmlinux_warned &&
812 __map__is_kernel(al.map) && map__has_symbols(al.map)) {
813 if (symbol_conf.vmlinux_name) {
816 dso__strerror_load(map__dso(al.map), serr, sizeof(serr));
817 ui__warning("The %s file can't be used: %s\n%s",
818 symbol_conf.vmlinux_name, serr, msg);
820 ui__warning("A vmlinux file was not found.\n%s",
824 if (use_browser <= 0)
826 top->vmlinux_warned = true;
830 if (al.sym == NULL || !al.sym->idle) {
831 struct hists *hists = evsel__hists(evsel);
832 struct hist_entry_iter iter = {
835 .add_entry_cb = hist_iter__top_callback,
838 if (symbol_conf.cumulate_callchain)
839 iter.ops = &hist_iter_cumulative;
841 iter.ops = &hist_iter_normal;
843 mutex_lock(&hists->lock);
845 if (hist_entry_iter__add(&iter, &al, top->max_stack, top) < 0)
846 pr_err("Problem incrementing symbol period, skipping event\n");
848 mutex_unlock(&hists->lock);
851 addr_location__put(&al);
855 perf_top__process_lost(struct perf_top *top, union perf_event *event,
858 top->lost += event->lost.lost;
859 top->lost_total += event->lost.lost;
860 evsel->evlist->stats.total_lost += event->lost.lost;
864 perf_top__process_lost_samples(struct perf_top *top,
865 union perf_event *event,
868 top->lost += event->lost_samples.lost;
869 top->lost_total += event->lost_samples.lost;
870 evsel->evlist->stats.total_lost_samples += event->lost_samples.lost;
873 static u64 last_timestamp;
875 static void perf_top__mmap_read_idx(struct perf_top *top, int idx)
877 struct record_opts *opts = &top->record_opts;
878 struct evlist *evlist = top->evlist;
880 union perf_event *event;
882 md = opts->overwrite ? &evlist->overwrite_mmap[idx] : &evlist->mmap[idx];
883 if (perf_mmap__read_init(&md->core) < 0)
886 while ((event = perf_mmap__read_event(&md->core)) != NULL) {
889 ret = evlist__parse_sample_timestamp(evlist, event, &last_timestamp);
890 if (ret && ret != -1)
893 ret = ordered_events__queue(top->qe.in, event, last_timestamp, 0, NULL);
897 perf_mmap__consume(&md->core);
899 if (top->qe.rotate) {
900 mutex_lock(&top->qe.mutex);
901 top->qe.rotate = false;
902 cond_signal(&top->qe.cond);
903 mutex_unlock(&top->qe.mutex);
907 perf_mmap__read_done(&md->core);
910 static void perf_top__mmap_read(struct perf_top *top)
912 bool overwrite = top->record_opts.overwrite;
913 struct evlist *evlist = top->evlist;
917 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_DATA_PENDING);
919 for (i = 0; i < top->evlist->core.nr_mmaps; i++)
920 perf_top__mmap_read_idx(top, i);
923 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
924 evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
929 * Check per-event overwrite term.
930 * perf top should support consistent term for all events.
931 * - All events don't have per-event term
932 * E.g. "cpu/cpu-cycles/,cpu/instructions/"
933 * Nothing change, return 0.
934 * - All events have same per-event term
935 * E.g. "cpu/cpu-cycles,no-overwrite/,cpu/instructions,no-overwrite/
936 * Using the per-event setting to replace the opts->overwrite if
937 * they are different, then return 0.
938 * - Events have different per-event term
939 * E.g. "cpu/cpu-cycles,overwrite/,cpu/instructions,no-overwrite/"
941 * - Some of the event set per-event term, but some not.
942 * E.g. "cpu/cpu-cycles/,cpu/instructions,no-overwrite/"
945 static int perf_top__overwrite_check(struct perf_top *top)
947 struct record_opts *opts = &top->record_opts;
948 struct evlist *evlist = top->evlist;
949 struct evsel_config_term *term;
950 struct list_head *config_terms;
952 int set, overwrite = -1;
954 evlist__for_each_entry(evlist, evsel) {
956 config_terms = &evsel->config_terms;
957 list_for_each_entry(term, config_terms, list) {
958 if (term->type == EVSEL__CONFIG_TERM_OVERWRITE)
959 set = term->val.overwrite ? 1 : 0;
962 /* no term for current and previous event (likely) */
963 if ((overwrite < 0) && (set < 0))
966 /* has term for both current and previous event, compare */
967 if ((overwrite >= 0) && (set >= 0) && (overwrite != set))
970 /* no term for current event but has term for previous one */
971 if ((overwrite >= 0) && (set < 0))
974 /* has term for current event */
975 if ((overwrite < 0) && (set >= 0)) {
976 /* if it's first event, set overwrite */
977 if (evsel == evlist__first(evlist))
984 if ((overwrite >= 0) && (opts->overwrite != overwrite))
985 opts->overwrite = overwrite;
990 static int perf_top_overwrite_fallback(struct perf_top *top,
993 struct record_opts *opts = &top->record_opts;
994 struct evlist *evlist = top->evlist;
995 struct evsel *counter;
997 if (!opts->overwrite)
1000 /* only fall back when first event fails */
1001 if (evsel != evlist__first(evlist))
1004 evlist__for_each_entry(evlist, counter)
1005 counter->core.attr.write_backward = false;
1006 opts->overwrite = false;
1007 pr_debug2("fall back to non-overwrite mode\n");
1011 static int perf_top__start_counters(struct perf_top *top)
1014 struct evsel *counter;
1015 struct evlist *evlist = top->evlist;
1016 struct record_opts *opts = &top->record_opts;
1018 if (perf_top__overwrite_check(top)) {
1019 ui__error("perf top only support consistent per-event "
1020 "overwrite setting for all events\n");
1024 evlist__config(evlist, opts, &callchain_param);
1026 evlist__for_each_entry(evlist, counter) {
1028 if (evsel__open(counter, top->evlist->core.user_requested_cpus,
1029 top->evlist->core.threads) < 0) {
1032 * Specially handle overwrite fall back.
1033 * Because perf top is the only tool which has
1034 * overwrite mode by default, support
1035 * both overwrite and non-overwrite mode, and
1036 * require consistent mode for all events.
1038 * May move it to generic code with more tools
1039 * have similar attribute.
1041 if (perf_missing_features.write_backward &&
1042 perf_top_overwrite_fallback(top, counter))
1045 if (evsel__fallback(counter, errno, msg, sizeof(msg))) {
1047 ui__warning("%s\n", msg);
1051 evsel__open_strerror(counter, &opts->target, errno, msg, sizeof(msg));
1052 ui__error("%s\n", msg);
1057 if (evlist__mmap(evlist, opts->mmap_pages) < 0) {
1058 ui__error("Failed to mmap with %d (%s)\n",
1059 errno, str_error_r(errno, msg, sizeof(msg)));
1069 static int callchain_param__setup_sample_type(struct callchain_param *callchain)
1071 if (callchain->mode != CHAIN_NONE) {
1072 if (callchain_register_param(callchain) < 0) {
1073 ui__error("Can't register callchain params.\n");
1081 static struct ordered_events *rotate_queues(struct perf_top *top)
1083 struct ordered_events *in = top->qe.in;
1085 if (top->qe.in == &top->qe.data[1])
1086 top->qe.in = &top->qe.data[0];
1088 top->qe.in = &top->qe.data[1];
1093 static void *process_thread(void *arg)
1095 struct perf_top *top = arg;
1098 struct ordered_events *out, *in = top->qe.in;
1100 if (!in->nr_events) {
1105 out = rotate_queues(top);
1107 mutex_lock(&top->qe.mutex);
1108 top->qe.rotate = true;
1109 cond_wait(&top->qe.cond, &top->qe.mutex);
1110 mutex_unlock(&top->qe.mutex);
1112 if (ordered_events__flush(out, OE_FLUSH__TOP))
1113 pr_err("failed to process events\n");
1120 * Allow only 'top->delay_secs' seconds behind samples.
1122 static int should_drop(struct ordered_event *qevent, struct perf_top *top)
1124 union perf_event *event = qevent->event;
1125 u64 delay_timestamp;
1127 if (event->header.type != PERF_RECORD_SAMPLE)
1130 delay_timestamp = qevent->timestamp + top->delay_secs * NSEC_PER_SEC;
1131 return delay_timestamp < last_timestamp;
1134 static int deliver_event(struct ordered_events *qe,
1135 struct ordered_event *qevent)
1137 struct perf_top *top = qe->data;
1138 struct evlist *evlist = top->evlist;
1139 struct perf_session *session = top->session;
1140 union perf_event *event = qevent->event;
1141 struct perf_sample sample;
1142 struct evsel *evsel;
1143 struct machine *machine;
1146 if (should_drop(qevent, top)) {
1152 ret = evlist__parse_sample(evlist, event, &sample);
1154 pr_err("Can't parse sample, err = %d\n", ret);
1158 evsel = evlist__id2evsel(session->evlist, sample.id);
1159 assert(evsel != NULL);
1161 if (event->header.type == PERF_RECORD_SAMPLE) {
1162 if (evswitch__discard(&top->evswitch, evsel))
1167 switch (sample.cpumode) {
1168 case PERF_RECORD_MISC_USER:
1170 if (top->hide_user_symbols)
1172 machine = &session->machines.host;
1174 case PERF_RECORD_MISC_KERNEL:
1175 ++top->kernel_samples;
1176 if (top->hide_kernel_symbols)
1178 machine = &session->machines.host;
1180 case PERF_RECORD_MISC_GUEST_KERNEL:
1181 ++top->guest_kernel_samples;
1182 machine = perf_session__find_machine(session,
1185 case PERF_RECORD_MISC_GUEST_USER:
1186 ++top->guest_us_samples;
1188 * TODO: we don't process guest user from host side
1189 * except simple counting.
1193 if (event->header.type == PERF_RECORD_SAMPLE)
1195 machine = &session->machines.host;
1199 if (event->header.type == PERF_RECORD_SAMPLE) {
1200 perf_event__process_sample(&top->tool, event, evsel,
1202 } else if (event->header.type == PERF_RECORD_LOST) {
1203 perf_top__process_lost(top, event, evsel);
1204 } else if (event->header.type == PERF_RECORD_LOST_SAMPLES) {
1205 perf_top__process_lost_samples(top, event, evsel);
1206 } else if (event->header.type < PERF_RECORD_MAX) {
1207 events_stats__inc(&session->evlist->stats, event->header.type);
1208 machine__process_event(machine, event, &sample);
1210 ++session->evlist->stats.nr_unknown_events;
1217 static void init_process_thread(struct perf_top *top)
1219 ordered_events__init(&top->qe.data[0], deliver_event, top);
1220 ordered_events__init(&top->qe.data[1], deliver_event, top);
1221 ordered_events__set_copy_on_queue(&top->qe.data[0], true);
1222 ordered_events__set_copy_on_queue(&top->qe.data[1], true);
1223 top->qe.in = &top->qe.data[0];
1224 mutex_init(&top->qe.mutex);
1225 cond_init(&top->qe.cond);
1228 static int __cmd_top(struct perf_top *top)
1230 struct record_opts *opts = &top->record_opts;
1231 pthread_t thread, thread_process;
1234 if (!top->annotation_opts.objdump_path) {
1235 ret = perf_env__lookup_objdump(&top->session->header.env,
1236 &top->annotation_opts.objdump_path);
1241 ret = callchain_param__setup_sample_type(&callchain_param);
1245 if (perf_session__register_idle_thread(top->session) < 0)
1248 if (top->nr_threads_synthesize > 1)
1249 perf_set_multithreaded();
1251 init_process_thread(top);
1253 if (opts->record_namespaces)
1254 top->tool.namespace_events = true;
1255 if (opts->record_cgroup) {
1256 #ifdef HAVE_FILE_HANDLE
1257 top->tool.cgroup_events = true;
1259 pr_err("cgroup tracking is not supported.\n");
1264 ret = perf_event__synthesize_bpf_events(top->session, perf_event__process,
1265 &top->session->machines.host,
1268 pr_debug("Couldn't synthesize BPF events: Pre-existing BPF programs won't have symbols resolved.\n");
1270 ret = perf_event__synthesize_cgroups(&top->tool, perf_event__process,
1271 &top->session->machines.host);
1273 pr_debug("Couldn't synthesize cgroup events.\n");
1275 machine__synthesize_threads(&top->session->machines.host, &opts->target,
1276 top->evlist->core.threads, true, false,
1277 top->nr_threads_synthesize);
1279 perf_set_multithreaded();
1281 if (perf_hpp_list.socket) {
1282 ret = perf_env__read_cpu_topology_map(&perf_env);
1284 char errbuf[BUFSIZ];
1285 const char *err = str_error_r(-ret, errbuf, sizeof(errbuf));
1287 ui__error("Could not read the CPU topology map: %s\n", err);
1292 ret = perf_top__start_counters(top);
1296 top->session->evlist = top->evlist;
1297 perf_session__set_id_hdr_size(top->session);
1300 * When perf is starting the traced process, all the events (apart from
1301 * group members) have enable_on_exec=1 set, so don't spoil it by
1302 * prematurely enabling them.
1304 * XXX 'top' still doesn't start workloads like record, trace, but should,
1305 * so leave the check here.
1307 if (!target__none(&opts->target))
1308 evlist__enable(top->evlist);
1311 if (pthread_create(&thread_process, NULL, process_thread, top)) {
1312 ui__error("Could not create process thread.\n");
1316 if (pthread_create(&thread, NULL, (use_browser > 0 ? display_thread_tui :
1317 display_thread), top)) {
1318 ui__error("Could not create display thread.\n");
1319 goto out_join_thread;
1322 if (top->realtime_prio) {
1323 struct sched_param param;
1325 param.sched_priority = top->realtime_prio;
1326 if (sched_setscheduler(0, SCHED_FIFO, ¶m)) {
1327 ui__error("Could not set realtime priority.\n");
1332 /* Wait for a minimal set of events before starting the snapshot */
1333 evlist__poll(top->evlist, 100);
1335 perf_top__mmap_read(top);
1338 u64 hits = top->samples;
1340 perf_top__mmap_read(top);
1342 if (opts->overwrite || (hits == top->samples))
1343 ret = evlist__poll(top->evlist, 100);
1346 perf_top__resize(top);
1353 pthread_join(thread, NULL);
1355 cond_signal(&top->qe.cond);
1356 pthread_join(thread_process, NULL);
1357 perf_set_singlethreaded();
1362 callchain_opt(const struct option *opt, const char *arg, int unset)
1364 symbol_conf.use_callchain = true;
1365 return record_callchain_opt(opt, arg, unset);
1369 parse_callchain_opt(const struct option *opt, const char *arg, int unset)
1371 struct callchain_param *callchain = opt->value;
1373 callchain->enabled = !unset;
1374 callchain->record_mode = CALLCHAIN_FP;
1380 symbol_conf.use_callchain = false;
1381 callchain->record_mode = CALLCHAIN_NONE;
1385 return parse_callchain_top_opt(arg);
1388 static int perf_top_config(const char *var, const char *value, void *cb __maybe_unused)
1390 if (!strcmp(var, "top.call-graph")) {
1391 var = "call-graph.record-mode";
1392 return perf_default_config(var, value, cb);
1394 if (!strcmp(var, "top.children")) {
1395 symbol_conf.cumulate_callchain = perf_config_bool(var, value);
1403 parse_percent_limit(const struct option *opt, const char *arg,
1404 int unset __maybe_unused)
1406 struct perf_top *top = opt->value;
1408 top->min_percent = strtof(arg, NULL);
1412 const char top_callchain_help[] = CALLCHAIN_RECORD_HELP CALLCHAIN_REPORT_HELP
1413 "\n\t\t\t\tDefault: fp,graph,0.5,caller,function";
1415 int cmd_top(int argc, const char **argv)
1417 char errbuf[BUFSIZ];
1418 struct perf_top top = {
1422 .mmap_pages = UINT_MAX,
1423 .user_freq = UINT_MAX,
1424 .user_interval = ULLONG_MAX,
1425 .freq = 4000, /* 4 KHz */
1430 * FIXME: This will lose PERF_RECORD_MMAP and other metadata
1431 * when we pause, fix that and reenable. Probably using a
1432 * separate evlist with a dummy event, i.e. a non-overwrite
1433 * ring buffer just for metadata events, while PERF_RECORD_SAMPLE
1434 * stays in overwrite mode. -acme
1437 .sample_time = true,
1438 .sample_time_set = true,
1440 .max_stack = sysctl__max_stack(),
1441 .nr_threads_synthesize = UINT_MAX,
1443 bool branch_call_mode = false;
1444 struct record_opts *opts = &top.record_opts;
1445 struct target *target = &opts->target;
1446 const char *disassembler_style = NULL, *objdump_path = NULL, *addr2line_path = NULL;
1447 const struct option options[] = {
1448 OPT_CALLBACK('e', "event", &top.evlist, "event",
1449 "event selector. use 'perf list' to list available events",
1450 parse_events_option),
1451 OPT_U64('c', "count", &opts->user_interval, "event period to sample"),
1452 OPT_STRING('p', "pid", &target->pid, "pid",
1453 "profile events on existing process id"),
1454 OPT_STRING('t', "tid", &target->tid, "tid",
1455 "profile events on existing thread id"),
1456 OPT_BOOLEAN('a', "all-cpus", &target->system_wide,
1457 "system-wide collection from all CPUs"),
1458 OPT_STRING('C', "cpu", &target->cpu_list, "cpu",
1459 "list of cpus to monitor"),
1460 OPT_STRING('k', "vmlinux", &symbol_conf.vmlinux_name,
1461 "file", "vmlinux pathname"),
1462 OPT_BOOLEAN(0, "ignore-vmlinux", &symbol_conf.ignore_vmlinux,
1463 "don't load vmlinux even if found"),
1464 OPT_STRING(0, "kallsyms", &symbol_conf.kallsyms_name,
1465 "file", "kallsyms pathname"),
1466 OPT_BOOLEAN('K', "hide_kernel_symbols", &top.hide_kernel_symbols,
1467 "hide kernel symbols"),
1468 OPT_CALLBACK('m', "mmap-pages", &opts->mmap_pages, "pages",
1469 "number of mmap data pages", evlist__parse_mmap_pages),
1470 OPT_INTEGER('r', "realtime", &top.realtime_prio,
1471 "collect data with this RT SCHED_FIFO priority"),
1472 OPT_INTEGER('d', "delay", &top.delay_secs,
1473 "number of seconds to delay between refreshes"),
1474 OPT_BOOLEAN('D', "dump-symtab", &top.dump_symtab,
1475 "dump the symbol table used for profiling"),
1476 OPT_INTEGER('f', "count-filter", &top.count_filter,
1477 "only display functions with more events than this"),
1478 OPT_BOOLEAN('i', "no-inherit", &opts->no_inherit,
1479 "child tasks do not inherit counters"),
1480 OPT_STRING(0, "sym-annotate", &top.sym_filter, "symbol name",
1481 "symbol to annotate"),
1482 OPT_BOOLEAN('z', "zero", &top.zero, "zero history across updates"),
1483 OPT_CALLBACK('F', "freq", &top.record_opts, "freq or 'max'",
1484 "profile at this frequency",
1485 record__parse_freq),
1486 OPT_INTEGER('E', "entries", &top.print_entries,
1487 "display this many functions"),
1488 OPT_BOOLEAN('U', "hide_user_symbols", &top.hide_user_symbols,
1489 "hide user symbols"),
1490 #ifdef HAVE_SLANG_SUPPORT
1491 OPT_BOOLEAN(0, "tui", &top.use_tui, "Use the TUI interface"),
1493 OPT_BOOLEAN(0, "stdio", &top.use_stdio, "Use the stdio interface"),
1494 OPT_INCR('v', "verbose", &verbose,
1495 "be more verbose (show counter open errors, etc)"),
1496 OPT_STRING('s', "sort", &sort_order, "key[,key2...]",
1497 "sort by key(s): pid, comm, dso, symbol, parent, cpu, srcline, ..."
1498 " Please refer the man page for the complete list."),
1499 OPT_STRING(0, "fields", &field_order, "key[,keys...]",
1500 "output field(s): overhead, period, sample plus all of sort keys"),
1501 OPT_BOOLEAN('n', "show-nr-samples", &symbol_conf.show_nr_samples,
1502 "Show a column with the number of samples"),
1503 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
1504 NULL, "enables call-graph recording and display",
1506 OPT_CALLBACK(0, "call-graph", &callchain_param,
1507 "record_mode[,record_size],print_type,threshold[,print_limit],order,sort_key[,branch]",
1508 top_callchain_help, &parse_callchain_opt),
1509 OPT_BOOLEAN(0, "children", &symbol_conf.cumulate_callchain,
1510 "Accumulate callchains of children and show total overhead as well"),
1511 OPT_INTEGER(0, "max-stack", &top.max_stack,
1512 "Set the maximum stack depth when parsing the callchain. "
1513 "Default: kernel.perf_event_max_stack or " __stringify(PERF_MAX_STACK_DEPTH)),
1514 OPT_CALLBACK(0, "ignore-callees", NULL, "regex",
1515 "ignore callees of these functions in call graphs",
1516 report_parse_ignore_callees_opt),
1517 OPT_BOOLEAN(0, "show-total-period", &symbol_conf.show_total_period,
1518 "Show a column with the sum of periods"),
1519 OPT_STRING(0, "dsos", &symbol_conf.dso_list_str, "dso[,dso...]",
1520 "only consider symbols in these dsos"),
1521 OPT_STRING(0, "comms", &symbol_conf.comm_list_str, "comm[,comm...]",
1522 "only consider symbols in these comms"),
1523 OPT_STRING(0, "symbols", &symbol_conf.sym_list_str, "symbol[,symbol...]",
1524 "only consider these symbols"),
1525 OPT_BOOLEAN(0, "source", &top.annotation_opts.annotate_src,
1526 "Interleave source code with assembly code (default)"),
1527 OPT_BOOLEAN(0, "asm-raw", &top.annotation_opts.show_asm_raw,
1528 "Display raw encoding of assembly instructions (default)"),
1529 OPT_BOOLEAN(0, "demangle-kernel", &symbol_conf.demangle_kernel,
1530 "Enable kernel symbol demangling"),
1531 OPT_BOOLEAN(0, "no-bpf-event", &top.record_opts.no_bpf_event, "do not record bpf events"),
1532 OPT_STRING(0, "objdump", &objdump_path, "path",
1533 "objdump binary to use for disassembly and annotations"),
1534 OPT_STRING(0, "addr2line", &addr2line_path, "path",
1535 "addr2line binary to use for line numbers"),
1536 OPT_STRING('M', "disassembler-style", &disassembler_style, "disassembler style",
1537 "Specify disassembler style (e.g. -M intel for intel syntax)"),
1538 OPT_STRING(0, "prefix", &top.annotation_opts.prefix, "prefix",
1539 "Add prefix to source file path names in programs (with --prefix-strip)"),
1540 OPT_STRING(0, "prefix-strip", &top.annotation_opts.prefix_strip, "N",
1541 "Strip first N entries of source file path name in programs (with --prefix)"),
1542 OPT_STRING('u', "uid", &target->uid_str, "user", "user to profile"),
1543 OPT_CALLBACK(0, "percent-limit", &top, "percent",
1544 "Don't show entries under that percent", parse_percent_limit),
1545 OPT_CALLBACK(0, "percentage", NULL, "relative|absolute",
1546 "How to display percentage of filtered entries", parse_filter_percentage),
1547 OPT_STRING('w', "column-widths", &symbol_conf.col_width_list_str,
1549 "don't try to adjust column width, use these fixed values"),
1550 OPT_UINTEGER(0, "proc-map-timeout", &proc_map_timeout,
1551 "per thread proc mmap processing timeout in ms"),
1552 OPT_CALLBACK_NOOPT('b', "branch-any", &opts->branch_stack,
1553 "branch any", "sample any taken branches",
1554 parse_branch_stack),
1555 OPT_CALLBACK('j', "branch-filter", &opts->branch_stack,
1556 "branch filter mask", "branch stack filter modes",
1557 parse_branch_stack),
1558 OPT_BOOLEAN(0, "branch-history", &branch_call_mode,
1559 "add last branch records to call history"),
1560 OPT_BOOLEAN(0, "raw-trace", &symbol_conf.raw_trace,
1561 "Show raw trace event output (do not use print fmt or plugins)"),
1562 OPT_BOOLEAN(0, "hierarchy", &symbol_conf.report_hierarchy,
1563 "Show entries in a hierarchy"),
1564 OPT_BOOLEAN(0, "overwrite", &top.record_opts.overwrite,
1565 "Use a backward ring buffer, default: no"),
1566 OPT_BOOLEAN(0, "force", &symbol_conf.force, "don't complain, do it"),
1567 OPT_UINTEGER(0, "num-thread-synthesize", &top.nr_threads_synthesize,
1568 "number of thread to run event synthesize"),
1569 OPT_CALLBACK('G', "cgroup", &top.evlist, "name",
1570 "monitor event in cgroup name only", parse_cgroups),
1571 OPT_BOOLEAN(0, "namespaces", &opts->record_namespaces,
1572 "Record namespaces events"),
1573 OPT_BOOLEAN(0, "all-cgroups", &opts->record_cgroup,
1574 "Record cgroup events"),
1575 OPT_INTEGER(0, "group-sort-idx", &symbol_conf.group_sort_idx,
1576 "Sort the output by the event at the index n in group. "
1577 "If n is invalid, sort by the first event. "
1578 "WARNING: should be used on grouped events."),
1579 OPT_BOOLEAN(0, "stitch-lbr", &top.stitch_lbr,
1580 "Enable LBR callgraph stitching approach"),
1582 OPT_CALLBACK(0, "pfm-events", &top.evlist, "event",
1583 "libpfm4 event selector. use 'perf list' to list available events",
1584 parse_libpfm_events_option),
1586 OPTS_EVSWITCH(&top.evswitch),
1589 const char * const top_usage[] = {
1590 "perf top [<options>]",
1593 int status = hists__init();
1598 annotation_options__init(&top.annotation_opts);
1600 top.annotation_opts.min_pcnt = 5;
1601 top.annotation_opts.context = 4;
1603 top.evlist = evlist__new();
1604 if (top.evlist == NULL)
1607 status = perf_config(perf_top_config, &top);
1611 * Since the per arch annotation init routine may need the cpuid, read
1612 * it here, since we are not getting this from the perf.data header.
1614 status = perf_env__read_cpuid(&perf_env);
1617 * Some arches do not provide a get_cpuid(), so just use pr_debug, otherwise
1618 * warn the user explicitly.
1620 eprintf(status == ENOSYS ? 1 : 0, verbose,
1621 "Couldn't read the cpuid for this machine: %s\n",
1622 str_error_r(errno, errbuf, sizeof(errbuf)));
1624 top.evlist->env = &perf_env;
1626 argc = parse_options(argc, argv, options, top_usage, 0);
1628 usage_with_options(top_usage, options);
1630 if (disassembler_style) {
1631 top.annotation_opts.disassembler_style = strdup(disassembler_style);
1632 if (!top.annotation_opts.disassembler_style)
1636 top.annotation_opts.objdump_path = strdup(objdump_path);
1637 if (!top.annotation_opts.objdump_path)
1640 if (addr2line_path) {
1641 symbol_conf.addr2line_path = strdup(addr2line_path);
1642 if (!symbol_conf.addr2line_path)
1646 status = symbol__validate_sym_arguments();
1648 goto out_delete_evlist;
1650 if (annotate_check_args(&top.annotation_opts) < 0)
1651 goto out_delete_evlist;
1653 if (!top.evlist->core.nr_entries &&
1654 evlist__add_default(top.evlist) < 0) {
1655 pr_err("Not enough memory for event selector list\n");
1656 goto out_delete_evlist;
1659 status = evswitch__init(&top.evswitch, top.evlist, stderr);
1661 goto out_delete_evlist;
1663 if (symbol_conf.report_hierarchy) {
1664 /* disable incompatible options */
1665 symbol_conf.event_group = false;
1666 symbol_conf.cumulate_callchain = false;
1669 pr_err("Error: --hierarchy and --fields options cannot be used together\n");
1670 parse_options_usage(top_usage, options, "fields", 0);
1671 parse_options_usage(NULL, options, "hierarchy", 0);
1672 goto out_delete_evlist;
1676 if (top.stitch_lbr && !(callchain_param.record_mode == CALLCHAIN_LBR)) {
1677 pr_err("Error: --stitch-lbr must be used with --call-graph lbr\n");
1678 goto out_delete_evlist;
1681 if (nr_cgroups > 0 && opts->record_cgroup) {
1682 pr_err("--cgroup and --all-cgroups cannot be used together\n");
1683 goto out_delete_evlist;
1686 if (branch_call_mode) {
1687 if (!opts->branch_stack)
1688 opts->branch_stack = PERF_SAMPLE_BRANCH_ANY;
1689 symbol_conf.use_callchain = true;
1690 callchain_param.key = CCKEY_ADDRESS;
1691 callchain_param.branch_callstack = true;
1692 callchain_param.enabled = true;
1693 if (callchain_param.record_mode == CALLCHAIN_NONE)
1694 callchain_param.record_mode = CALLCHAIN_FP;
1695 callchain_register_param(&callchain_param);
1697 sort_order = "srcline,symbol,dso";
1700 if (opts->branch_stack && callchain_param.enabled)
1701 symbol_conf.show_branchflag_count = true;
1703 sort__mode = SORT_MODE__TOP;
1704 /* display thread wants entries to be collapsed in a different tree */
1705 perf_hpp_list.need_collapse = 1;
1709 #ifdef HAVE_SLANG_SUPPORT
1710 else if (top.use_tui)
1714 setup_browser(false);
1716 if (setup_sorting(top.evlist) < 0) {
1718 parse_options_usage(top_usage, options, "s", 1);
1720 parse_options_usage(sort_order ? NULL : top_usage,
1721 options, "fields", 0);
1722 goto out_delete_evlist;
1725 status = target__validate(target);
1727 target__strerror(target, status, errbuf, BUFSIZ);
1728 ui__warning("%s\n", errbuf);
1731 status = target__parse_uid(target);
1733 int saved_errno = errno;
1735 target__strerror(target, status, errbuf, BUFSIZ);
1736 ui__error("%s\n", errbuf);
1738 status = -saved_errno;
1739 goto out_delete_evlist;
1742 if (target__none(target))
1743 target->system_wide = true;
1745 if (evlist__create_maps(top.evlist, target) < 0) {
1746 ui__error("Couldn't create thread/CPU maps: %s\n",
1747 errno == ENOENT ? "No such process" : str_error_r(errno, errbuf, sizeof(errbuf)));
1749 goto out_delete_evlist;
1752 if (top.delay_secs < 1)
1755 if (record_opts__config(opts)) {
1757 goto out_delete_evlist;
1760 top.sym_evsel = evlist__first(top.evlist);
1762 if (!callchain_param.enabled) {
1763 symbol_conf.cumulate_callchain = false;
1764 perf_hpp__cancel_cumulate();
1767 if (symbol_conf.cumulate_callchain && !callchain_param.order_set)
1768 callchain_param.order = ORDER_CALLER;
1770 status = symbol__annotation_init();
1772 goto out_delete_evlist;
1774 annotation_config__init(&top.annotation_opts);
1776 symbol_conf.try_vmlinux_path = (symbol_conf.vmlinux_name == NULL);
1777 status = symbol__init(NULL);
1779 goto out_delete_evlist;
1781 sort__setup_elide(stdout);
1783 get_term_dimensions(&top.winsize);
1784 if (top.print_entries == 0) {
1785 perf_top__update_print_entries(&top);
1786 signal(SIGWINCH, winch_sig);
1789 top.session = perf_session__new(NULL, NULL);
1790 if (IS_ERR(top.session)) {
1791 status = PTR_ERR(top.session);
1792 goto out_delete_evlist;
1795 #ifdef HAVE_LIBBPF_SUPPORT
1796 if (!top.record_opts.no_bpf_event) {
1797 top.sb_evlist = evlist__new();
1799 if (top.sb_evlist == NULL) {
1800 pr_err("Couldn't create side band evlist.\n.");
1802 goto out_delete_evlist;
1805 if (evlist__add_bpf_sb_event(top.sb_evlist, &perf_env)) {
1806 pr_err("Couldn't ask for PERF_RECORD_BPF_EVENT side band events.\n.");
1808 goto out_delete_evlist;
1813 if (evlist__start_sb_thread(top.sb_evlist, target)) {
1814 pr_debug("Couldn't start the BPF side band thread:\nBPF programs starting from now on won't be annotatable\n");
1815 opts->no_bpf_event = true;
1818 status = __cmd_top(&top);
1820 if (!opts->no_bpf_event)
1821 evlist__stop_sb_thread(top.sb_evlist);
1824 evlist__delete(top.evlist);
1825 perf_session__delete(top.session);
1826 annotation_options__exit(&top.annotation_opts);