1 // SPDX-License-Identifier: GPL-2.0
6 #include <linux/mman.h>
7 #include <linux/time64.h>
12 #include "cacheline.h"
17 #include "map_symbol.h"
25 #include "mem-events.h"
28 #include "annotate-data.h"
30 #include "time-utils.h"
33 #include "trace-event.h"
34 #include <linux/kernel.h>
35 #include <linux/string.h>
37 #ifdef HAVE_LIBTRACEEVENT
38 #include <traceevent/event-parse.h>
42 const char default_parent_pattern[] = "^sys_|^do_page_fault";
43 const char *parent_pattern = default_parent_pattern;
44 const char *default_sort_order = "comm,dso,symbol";
45 const char default_branch_sort_order[] = "comm,dso_from,symbol_from,symbol_to,cycles";
46 const char default_mem_sort_order[] = "local_weight,mem,sym,dso,symbol_daddr,dso_daddr,snoop,tlb,locked,blocked,local_ins_lat,local_p_stage_cyc";
47 const char default_top_sort_order[] = "dso,symbol";
48 const char default_diff_sort_order[] = "dso,symbol";
49 const char default_tracepoint_sort_order[] = "trace";
50 const char *sort_order;
51 const char *field_order;
52 regex_t ignore_callees_regex;
53 int have_ignore_callees = 0;
54 enum sort_mode sort__mode = SORT_MODE__NORMAL;
55 static const char *const dynamic_headers[] = {"local_ins_lat", "ins_lat", "local_p_stage_cyc", "p_stage_cyc"};
56 static const char *const arch_specific_sort_keys[] = {"local_p_stage_cyc", "p_stage_cyc"};
59 * Some architectures have Adjacent Cacheline Prefetch feature, which
60 * behaves like the cacheline size is doubled. Enable this flag to
61 * check things in double cacheline granularity.
66 * Replaces all occurrences of a char used with the:
68 * -t, --field-separator
70 * option, that uses a special separator character and don't pad with spaces,
71 * replacing all occurrences of this separator in symbol names (and other
72 * output) with a '.' character, that thus it's the only non valid separator.
74 static int repsep_snprintf(char *bf, size_t size, const char *fmt, ...)
80 n = vsnprintf(bf, size, fmt, ap);
81 if (symbol_conf.field_sep && n > 0) {
85 sep = strchr(sep, *symbol_conf.field_sep);
98 static int64_t cmp_null(const void *l, const void *r)
111 sort__thread_cmp(struct hist_entry *left, struct hist_entry *right)
113 return thread__tid(right->thread) - thread__tid(left->thread);
116 static int hist_entry__thread_snprintf(struct hist_entry *he, char *bf,
117 size_t size, unsigned int width)
119 const char *comm = thread__comm_str(he->thread);
121 width = max(7U, width) - 8;
122 return repsep_snprintf(bf, size, "%7d:%-*.*s", thread__tid(he->thread),
123 width, width, comm ?: "");
126 static int hist_entry__thread_filter(struct hist_entry *he, int type, const void *arg)
128 const struct thread *th = arg;
130 if (type != HIST_FILTER__THREAD)
133 return th && !RC_CHK_EQUAL(he->thread, th);
136 struct sort_entry sort_thread = {
137 .se_header = " Pid:Command",
138 .se_cmp = sort__thread_cmp,
139 .se_snprintf = hist_entry__thread_snprintf,
140 .se_filter = hist_entry__thread_filter,
141 .se_width_idx = HISTC_THREAD,
147 sort__simd_cmp(struct hist_entry *left, struct hist_entry *right)
149 if (left->simd_flags.arch != right->simd_flags.arch)
150 return (int64_t) left->simd_flags.arch - right->simd_flags.arch;
152 return (int64_t) left->simd_flags.pred - right->simd_flags.pred;
155 static const char *hist_entry__get_simd_name(struct simd_flags *simd_flags)
157 u64 arch = simd_flags->arch;
159 if (arch & SIMD_OP_FLAGS_ARCH_SVE)
165 static int hist_entry__simd_snprintf(struct hist_entry *he, char *bf,
166 size_t size, unsigned int width __maybe_unused)
170 if (!he->simd_flags.arch)
171 return repsep_snprintf(bf, size, "");
173 name = hist_entry__get_simd_name(&he->simd_flags);
175 if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_EMPTY)
176 return repsep_snprintf(bf, size, "[e] %s", name);
177 else if (he->simd_flags.pred & SIMD_OP_FLAGS_PRED_PARTIAL)
178 return repsep_snprintf(bf, size, "[p] %s", name);
180 return repsep_snprintf(bf, size, "[.] %s", name);
183 struct sort_entry sort_simd = {
184 .se_header = "Simd ",
185 .se_cmp = sort__simd_cmp,
186 .se_snprintf = hist_entry__simd_snprintf,
187 .se_width_idx = HISTC_SIMD,
193 * We can't use pointer comparison in functions below,
194 * because it gives different results based on pointer
195 * values, which could break some sorting assumptions.
198 sort__comm_cmp(struct hist_entry *left, struct hist_entry *right)
200 return strcmp(comm__str(right->comm), comm__str(left->comm));
204 sort__comm_collapse(struct hist_entry *left, struct hist_entry *right)
206 return strcmp(comm__str(right->comm), comm__str(left->comm));
210 sort__comm_sort(struct hist_entry *left, struct hist_entry *right)
212 return strcmp(comm__str(right->comm), comm__str(left->comm));
215 static int hist_entry__comm_snprintf(struct hist_entry *he, char *bf,
216 size_t size, unsigned int width)
218 return repsep_snprintf(bf, size, "%-*.*s", width, width, comm__str(he->comm));
221 struct sort_entry sort_comm = {
222 .se_header = "Command",
223 .se_cmp = sort__comm_cmp,
224 .se_collapse = sort__comm_collapse,
225 .se_sort = sort__comm_sort,
226 .se_snprintf = hist_entry__comm_snprintf,
227 .se_filter = hist_entry__thread_filter,
228 .se_width_idx = HISTC_COMM,
233 static int64_t _sort__dso_cmp(struct map *map_l, struct map *map_r)
235 struct dso *dso_l = map_l ? map__dso(map_l) : NULL;
236 struct dso *dso_r = map_r ? map__dso(map_r) : NULL;
237 const char *dso_name_l, *dso_name_r;
239 if (!dso_l || !dso_r)
240 return cmp_null(dso_r, dso_l);
243 dso_name_l = dso__long_name(dso_l);
244 dso_name_r = dso__long_name(dso_r);
246 dso_name_l = dso__short_name(dso_l);
247 dso_name_r = dso__short_name(dso_r);
250 return strcmp(dso_name_l, dso_name_r);
254 sort__dso_cmp(struct hist_entry *left, struct hist_entry *right)
256 return _sort__dso_cmp(right->ms.map, left->ms.map);
259 static int _hist_entry__dso_snprintf(struct map *map, char *bf,
260 size_t size, unsigned int width)
262 const struct dso *dso = map ? map__dso(map) : NULL;
263 const char *dso_name = "[unknown]";
266 dso_name = verbose > 0 ? dso__long_name(dso) : dso__short_name(dso);
268 return repsep_snprintf(bf, size, "%-*.*s", width, width, dso_name);
271 static int hist_entry__dso_snprintf(struct hist_entry *he, char *bf,
272 size_t size, unsigned int width)
274 return _hist_entry__dso_snprintf(he->ms.map, bf, size, width);
277 static int hist_entry__dso_filter(struct hist_entry *he, int type, const void *arg)
279 const struct dso *dso = arg;
281 if (type != HIST_FILTER__DSO)
284 return dso && (!he->ms.map || map__dso(he->ms.map) != dso);
287 struct sort_entry sort_dso = {
288 .se_header = "Shared Object",
289 .se_cmp = sort__dso_cmp,
290 .se_snprintf = hist_entry__dso_snprintf,
291 .se_filter = hist_entry__dso_filter,
292 .se_width_idx = HISTC_DSO,
297 static int64_t _sort__addr_cmp(u64 left_ip, u64 right_ip)
299 return (int64_t)(right_ip - left_ip);
302 int64_t _sort__sym_cmp(struct symbol *sym_l, struct symbol *sym_r)
304 if (!sym_l || !sym_r)
305 return cmp_null(sym_l, sym_r);
310 if (sym_l->inlined || sym_r->inlined) {
311 int ret = strcmp(sym_l->name, sym_r->name);
315 if ((sym_l->start <= sym_r->end) && (sym_l->end >= sym_r->start))
319 if (sym_l->start != sym_r->start)
320 return (int64_t)(sym_r->start - sym_l->start);
322 return (int64_t)(sym_r->end - sym_l->end);
326 sort__sym_cmp(struct hist_entry *left, struct hist_entry *right)
330 if (!left->ms.sym && !right->ms.sym)
331 return _sort__addr_cmp(left->ip, right->ip);
334 * comparing symbol address alone is not enough since it's a
335 * relative address within a dso.
337 if (!hists__has(left->hists, dso)) {
338 ret = sort__dso_cmp(left, right);
343 return _sort__sym_cmp(left->ms.sym, right->ms.sym);
347 sort__sym_sort(struct hist_entry *left, struct hist_entry *right)
349 if (!left->ms.sym || !right->ms.sym)
350 return cmp_null(left->ms.sym, right->ms.sym);
352 return strcmp(right->ms.sym->name, left->ms.sym->name);
355 static int _hist_entry__sym_snprintf(struct map_symbol *ms,
356 u64 ip, char level, char *bf, size_t size,
359 struct symbol *sym = ms->sym;
360 struct map *map = ms->map;
364 struct dso *dso = map ? map__dso(map) : NULL;
365 char o = dso ? dso__symtab_origin(dso) : '!';
368 if (dso && dso__kernel(dso) && dso__adjust_symbols(dso))
369 rip = map__unmap_ip(map, ip);
371 ret += repsep_snprintf(bf, size, "%-#*llx %c ",
372 BITS_PER_LONG / 4 + 2, rip, o);
375 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
377 if (sym->type == STT_OBJECT) {
378 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
379 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
380 ip - map__unmap_ip(map, sym->start));
382 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
386 ret += repsep_snprintf(bf + ret, size - ret,
390 size_t len = BITS_PER_LONG / 4;
391 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
398 int hist_entry__sym_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
400 return _hist_entry__sym_snprintf(&he->ms, he->ip,
401 he->level, bf, size, width);
404 static int hist_entry__sym_filter(struct hist_entry *he, int type, const void *arg)
406 const char *sym = arg;
408 if (type != HIST_FILTER__SYMBOL)
411 return sym && (!he->ms.sym || !strstr(he->ms.sym->name, sym));
414 struct sort_entry sort_sym = {
415 .se_header = "Symbol",
416 .se_cmp = sort__sym_cmp,
417 .se_sort = sort__sym_sort,
418 .se_snprintf = hist_entry__sym_snprintf,
419 .se_filter = hist_entry__sym_filter,
420 .se_width_idx = HISTC_SYMBOL,
426 sort__symoff_cmp(struct hist_entry *left, struct hist_entry *right)
430 ret = sort__sym_cmp(left, right);
434 return left->ip - right->ip;
438 sort__symoff_sort(struct hist_entry *left, struct hist_entry *right)
442 ret = sort__sym_sort(left, right);
446 return left->ip - right->ip;
450 hist_entry__symoff_snprintf(struct hist_entry *he, char *bf, size_t size, unsigned int width)
452 struct symbol *sym = he->ms.sym;
455 return repsep_snprintf(bf, size, "[%c] %-#.*llx", he->level, width - 4, he->ip);
457 return repsep_snprintf(bf, size, "[%c] %s+0x%llx", he->level, sym->name, he->ip - sym->start);
460 struct sort_entry sort_sym_offset = {
461 .se_header = "Symbol Offset",
462 .se_cmp = sort__symoff_cmp,
463 .se_sort = sort__symoff_sort,
464 .se_snprintf = hist_entry__symoff_snprintf,
465 .se_filter = hist_entry__sym_filter,
466 .se_width_idx = HISTC_SYMBOL_OFFSET,
471 char *hist_entry__srcline(struct hist_entry *he)
473 return map__srcline(he->ms.map, he->ip, he->ms.sym);
477 sort__srcline_cmp(struct hist_entry *left, struct hist_entry *right)
481 ret = _sort__addr_cmp(left->ip, right->ip);
485 return sort__dso_cmp(left, right);
489 sort__srcline_collapse(struct hist_entry *left, struct hist_entry *right)
492 left->srcline = hist_entry__srcline(left);
494 right->srcline = hist_entry__srcline(right);
496 return strcmp(right->srcline, left->srcline);
500 sort__srcline_sort(struct hist_entry *left, struct hist_entry *right)
502 return sort__srcline_collapse(left, right);
506 sort__srcline_init(struct hist_entry *he)
509 he->srcline = hist_entry__srcline(he);
512 static int hist_entry__srcline_snprintf(struct hist_entry *he, char *bf,
513 size_t size, unsigned int width)
515 return repsep_snprintf(bf, size, "%-.*s", width, he->srcline);
518 struct sort_entry sort_srcline = {
519 .se_header = "Source:Line",
520 .se_cmp = sort__srcline_cmp,
521 .se_collapse = sort__srcline_collapse,
522 .se_sort = sort__srcline_sort,
523 .se_init = sort__srcline_init,
524 .se_snprintf = hist_entry__srcline_snprintf,
525 .se_width_idx = HISTC_SRCLINE,
528 /* --sort srcline_from */
530 static char *addr_map_symbol__srcline(struct addr_map_symbol *ams)
532 return map__srcline(ams->ms.map, ams->al_addr, ams->ms.sym);
536 sort__srcline_from_cmp(struct hist_entry *left, struct hist_entry *right)
538 return left->branch_info->from.addr - right->branch_info->from.addr;
542 sort__srcline_from_collapse(struct hist_entry *left, struct hist_entry *right)
544 if (!left->branch_info->srcline_from)
545 left->branch_info->srcline_from = addr_map_symbol__srcline(&left->branch_info->from);
547 if (!right->branch_info->srcline_from)
548 right->branch_info->srcline_from = addr_map_symbol__srcline(&right->branch_info->from);
550 return strcmp(right->branch_info->srcline_from, left->branch_info->srcline_from);
554 sort__srcline_from_sort(struct hist_entry *left, struct hist_entry *right)
556 return sort__srcline_from_collapse(left, right);
559 static void sort__srcline_from_init(struct hist_entry *he)
561 if (!he->branch_info->srcline_from)
562 he->branch_info->srcline_from = addr_map_symbol__srcline(&he->branch_info->from);
565 static int hist_entry__srcline_from_snprintf(struct hist_entry *he, char *bf,
566 size_t size, unsigned int width)
568 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_from);
571 struct sort_entry sort_srcline_from = {
572 .se_header = "From Source:Line",
573 .se_cmp = sort__srcline_from_cmp,
574 .se_collapse = sort__srcline_from_collapse,
575 .se_sort = sort__srcline_from_sort,
576 .se_init = sort__srcline_from_init,
577 .se_snprintf = hist_entry__srcline_from_snprintf,
578 .se_width_idx = HISTC_SRCLINE_FROM,
581 /* --sort srcline_to */
584 sort__srcline_to_cmp(struct hist_entry *left, struct hist_entry *right)
586 return left->branch_info->to.addr - right->branch_info->to.addr;
590 sort__srcline_to_collapse(struct hist_entry *left, struct hist_entry *right)
592 if (!left->branch_info->srcline_to)
593 left->branch_info->srcline_to = addr_map_symbol__srcline(&left->branch_info->to);
595 if (!right->branch_info->srcline_to)
596 right->branch_info->srcline_to = addr_map_symbol__srcline(&right->branch_info->to);
598 return strcmp(right->branch_info->srcline_to, left->branch_info->srcline_to);
602 sort__srcline_to_sort(struct hist_entry *left, struct hist_entry *right)
604 return sort__srcline_to_collapse(left, right);
607 static void sort__srcline_to_init(struct hist_entry *he)
609 if (!he->branch_info->srcline_to)
610 he->branch_info->srcline_to = addr_map_symbol__srcline(&he->branch_info->to);
613 static int hist_entry__srcline_to_snprintf(struct hist_entry *he, char *bf,
614 size_t size, unsigned int width)
616 return repsep_snprintf(bf, size, "%-*.*s", width, width, he->branch_info->srcline_to);
619 struct sort_entry sort_srcline_to = {
620 .se_header = "To Source:Line",
621 .se_cmp = sort__srcline_to_cmp,
622 .se_collapse = sort__srcline_to_collapse,
623 .se_sort = sort__srcline_to_sort,
624 .se_init = sort__srcline_to_init,
625 .se_snprintf = hist_entry__srcline_to_snprintf,
626 .se_width_idx = HISTC_SRCLINE_TO,
629 static int hist_entry__sym_ipc_snprintf(struct hist_entry *he, char *bf,
630 size_t size, unsigned int width)
633 struct symbol *sym = he->ms.sym;
634 struct annotated_branch *branch;
635 double ipc = 0.0, coverage = 0.0;
639 return repsep_snprintf(bf, size, "%-*s", width, "-");
641 branch = symbol__annotation(sym)->branch;
643 if (branch && branch->hit_cycles)
644 ipc = branch->hit_insn / ((double)branch->hit_cycles);
646 if (branch && branch->total_insn) {
647 coverage = branch->cover_insn * 100.0 /
648 ((double)branch->total_insn);
651 snprintf(tmp, sizeof(tmp), "%-5.2f [%5.1f%%]", ipc, coverage);
652 return repsep_snprintf(bf, size, "%-*s", width, tmp);
655 struct sort_entry sort_sym_ipc = {
656 .se_header = "IPC [IPC Coverage]",
657 .se_cmp = sort__sym_cmp,
658 .se_snprintf = hist_entry__sym_ipc_snprintf,
659 .se_width_idx = HISTC_SYMBOL_IPC,
662 static int hist_entry__sym_ipc_null_snprintf(struct hist_entry *he
664 char *bf, size_t size,
669 snprintf(tmp, sizeof(tmp), "%-5s %2s", "-", "-");
670 return repsep_snprintf(bf, size, "%-*s", width, tmp);
673 struct sort_entry sort_sym_ipc_null = {
674 .se_header = "IPC [IPC Coverage]",
675 .se_cmp = sort__sym_cmp,
676 .se_snprintf = hist_entry__sym_ipc_null_snprintf,
677 .se_width_idx = HISTC_SYMBOL_IPC,
682 static char no_srcfile[1];
684 static char *hist_entry__get_srcfile(struct hist_entry *e)
687 struct map *map = e->ms.map;
692 sf = __get_srcline(map__dso(map), map__rip_2objdump(map, e->ip),
693 e->ms.sym, false, true, true, e->ip);
694 if (sf == SRCLINE_UNKNOWN)
706 sort__srcfile_cmp(struct hist_entry *left, struct hist_entry *right)
708 return sort__srcline_cmp(left, right);
712 sort__srcfile_collapse(struct hist_entry *left, struct hist_entry *right)
715 left->srcfile = hist_entry__get_srcfile(left);
717 right->srcfile = hist_entry__get_srcfile(right);
719 return strcmp(right->srcfile, left->srcfile);
723 sort__srcfile_sort(struct hist_entry *left, struct hist_entry *right)
725 return sort__srcfile_collapse(left, right);
728 static void sort__srcfile_init(struct hist_entry *he)
731 he->srcfile = hist_entry__get_srcfile(he);
734 static int hist_entry__srcfile_snprintf(struct hist_entry *he, char *bf,
735 size_t size, unsigned int width)
737 return repsep_snprintf(bf, size, "%-.*s", width, he->srcfile);
740 struct sort_entry sort_srcfile = {
741 .se_header = "Source File",
742 .se_cmp = sort__srcfile_cmp,
743 .se_collapse = sort__srcfile_collapse,
744 .se_sort = sort__srcfile_sort,
745 .se_init = sort__srcfile_init,
746 .se_snprintf = hist_entry__srcfile_snprintf,
747 .se_width_idx = HISTC_SRCFILE,
753 sort__parent_cmp(struct hist_entry *left, struct hist_entry *right)
755 struct symbol *sym_l = left->parent;
756 struct symbol *sym_r = right->parent;
758 if (!sym_l || !sym_r)
759 return cmp_null(sym_l, sym_r);
761 return strcmp(sym_r->name, sym_l->name);
764 static int hist_entry__parent_snprintf(struct hist_entry *he, char *bf,
765 size_t size, unsigned int width)
767 return repsep_snprintf(bf, size, "%-*.*s", width, width,
768 he->parent ? he->parent->name : "[other]");
771 struct sort_entry sort_parent = {
772 .se_header = "Parent symbol",
773 .se_cmp = sort__parent_cmp,
774 .se_snprintf = hist_entry__parent_snprintf,
775 .se_width_idx = HISTC_PARENT,
781 sort__cpu_cmp(struct hist_entry *left, struct hist_entry *right)
783 return right->cpu - left->cpu;
786 static int hist_entry__cpu_snprintf(struct hist_entry *he, char *bf,
787 size_t size, unsigned int width)
789 return repsep_snprintf(bf, size, "%*.*d", width, width, he->cpu);
792 struct sort_entry sort_cpu = {
794 .se_cmp = sort__cpu_cmp,
795 .se_snprintf = hist_entry__cpu_snprintf,
796 .se_width_idx = HISTC_CPU,
799 /* --sort cgroup_id */
801 static int64_t _sort__cgroup_dev_cmp(u64 left_dev, u64 right_dev)
803 return (int64_t)(right_dev - left_dev);
806 static int64_t _sort__cgroup_inode_cmp(u64 left_ino, u64 right_ino)
808 return (int64_t)(right_ino - left_ino);
812 sort__cgroup_id_cmp(struct hist_entry *left, struct hist_entry *right)
816 ret = _sort__cgroup_dev_cmp(right->cgroup_id.dev, left->cgroup_id.dev);
820 return _sort__cgroup_inode_cmp(right->cgroup_id.ino,
821 left->cgroup_id.ino);
824 static int hist_entry__cgroup_id_snprintf(struct hist_entry *he,
825 char *bf, size_t size,
826 unsigned int width __maybe_unused)
828 return repsep_snprintf(bf, size, "%lu/0x%lx", he->cgroup_id.dev,
832 struct sort_entry sort_cgroup_id = {
833 .se_header = "cgroup id (dev/inode)",
834 .se_cmp = sort__cgroup_id_cmp,
835 .se_snprintf = hist_entry__cgroup_id_snprintf,
836 .se_width_idx = HISTC_CGROUP_ID,
842 sort__cgroup_cmp(struct hist_entry *left, struct hist_entry *right)
844 return right->cgroup - left->cgroup;
847 static int hist_entry__cgroup_snprintf(struct hist_entry *he,
848 char *bf, size_t size,
849 unsigned int width __maybe_unused)
851 const char *cgrp_name = "N/A";
854 struct cgroup *cgrp = cgroup__find(maps__machine(he->ms.maps)->env,
857 cgrp_name = cgrp->name;
859 cgrp_name = "unknown";
862 return repsep_snprintf(bf, size, "%s", cgrp_name);
865 struct sort_entry sort_cgroup = {
866 .se_header = "Cgroup",
867 .se_cmp = sort__cgroup_cmp,
868 .se_snprintf = hist_entry__cgroup_snprintf,
869 .se_width_idx = HISTC_CGROUP,
875 sort__socket_cmp(struct hist_entry *left, struct hist_entry *right)
877 return right->socket - left->socket;
880 static int hist_entry__socket_snprintf(struct hist_entry *he, char *bf,
881 size_t size, unsigned int width)
883 return repsep_snprintf(bf, size, "%*.*d", width, width-3, he->socket);
886 static int hist_entry__socket_filter(struct hist_entry *he, int type, const void *arg)
888 int sk = *(const int *)arg;
890 if (type != HIST_FILTER__SOCKET)
893 return sk >= 0 && he->socket != sk;
896 struct sort_entry sort_socket = {
897 .se_header = "Socket",
898 .se_cmp = sort__socket_cmp,
899 .se_snprintf = hist_entry__socket_snprintf,
900 .se_filter = hist_entry__socket_filter,
901 .se_width_idx = HISTC_SOCKET,
907 sort__time_cmp(struct hist_entry *left, struct hist_entry *right)
909 return right->time - left->time;
912 static int hist_entry__time_snprintf(struct hist_entry *he, char *bf,
913 size_t size, unsigned int width)
917 if (symbol_conf.nanosecs)
918 timestamp__scnprintf_nsec(he->time, he_time,
921 timestamp__scnprintf_usec(he->time, he_time,
924 return repsep_snprintf(bf, size, "%-.*s", width, he_time);
927 struct sort_entry sort_time = {
929 .se_cmp = sort__time_cmp,
930 .se_snprintf = hist_entry__time_snprintf,
931 .se_width_idx = HISTC_TIME,
936 #ifdef HAVE_LIBTRACEEVENT
937 static char *get_trace_output(struct hist_entry *he)
939 struct trace_seq seq;
941 struct tep_record rec = {
942 .data = he->raw_data,
943 .size = he->raw_size,
946 evsel = hists_to_evsel(he->hists);
948 trace_seq_init(&seq);
949 if (symbol_conf.raw_trace) {
950 tep_print_fields(&seq, he->raw_data, he->raw_size,
953 tep_print_event(evsel->tp_format->tep,
954 &seq, &rec, "%s", TEP_PRINT_INFO);
957 * Trim the buffer, it starts at 4KB and we're not going to
958 * add anything more to this buffer.
960 return realloc(seq.buffer, seq.len + 1);
964 sort__trace_cmp(struct hist_entry *left, struct hist_entry *right)
968 evsel = hists_to_evsel(left->hists);
969 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
972 if (left->trace_output == NULL)
973 left->trace_output = get_trace_output(left);
974 if (right->trace_output == NULL)
975 right->trace_output = get_trace_output(right);
977 return strcmp(right->trace_output, left->trace_output);
980 static int hist_entry__trace_snprintf(struct hist_entry *he, char *bf,
981 size_t size, unsigned int width)
985 evsel = hists_to_evsel(he->hists);
986 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
987 return scnprintf(bf, size, "%-.*s", width, "N/A");
989 if (he->trace_output == NULL)
990 he->trace_output = get_trace_output(he);
991 return repsep_snprintf(bf, size, "%-.*s", width, he->trace_output);
994 struct sort_entry sort_trace = {
995 .se_header = "Trace output",
996 .se_cmp = sort__trace_cmp,
997 .se_snprintf = hist_entry__trace_snprintf,
998 .se_width_idx = HISTC_TRACE,
1000 #endif /* HAVE_LIBTRACEEVENT */
1002 /* sort keys for branch stacks */
1005 sort__dso_from_cmp(struct hist_entry *left, struct hist_entry *right)
1007 if (!left->branch_info || !right->branch_info)
1008 return cmp_null(left->branch_info, right->branch_info);
1010 return _sort__dso_cmp(left->branch_info->from.ms.map,
1011 right->branch_info->from.ms.map);
1014 static int hist_entry__dso_from_snprintf(struct hist_entry *he, char *bf,
1015 size_t size, unsigned int width)
1017 if (he->branch_info)
1018 return _hist_entry__dso_snprintf(he->branch_info->from.ms.map,
1021 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1024 static int hist_entry__dso_from_filter(struct hist_entry *he, int type,
1027 const struct dso *dso = arg;
1029 if (type != HIST_FILTER__DSO)
1032 return dso && (!he->branch_info || !he->branch_info->from.ms.map ||
1033 map__dso(he->branch_info->from.ms.map) != dso);
1037 sort__dso_to_cmp(struct hist_entry *left, struct hist_entry *right)
1039 if (!left->branch_info || !right->branch_info)
1040 return cmp_null(left->branch_info, right->branch_info);
1042 return _sort__dso_cmp(left->branch_info->to.ms.map,
1043 right->branch_info->to.ms.map);
1046 static int hist_entry__dso_to_snprintf(struct hist_entry *he, char *bf,
1047 size_t size, unsigned int width)
1049 if (he->branch_info)
1050 return _hist_entry__dso_snprintf(he->branch_info->to.ms.map,
1053 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1056 static int hist_entry__dso_to_filter(struct hist_entry *he, int type,
1059 const struct dso *dso = arg;
1061 if (type != HIST_FILTER__DSO)
1064 return dso && (!he->branch_info || !he->branch_info->to.ms.map ||
1065 map__dso(he->branch_info->to.ms.map) != dso);
1069 sort__sym_from_cmp(struct hist_entry *left, struct hist_entry *right)
1071 struct addr_map_symbol *from_l, *from_r;
1073 if (!left->branch_info || !right->branch_info)
1074 return cmp_null(left->branch_info, right->branch_info);
1076 from_l = &left->branch_info->from;
1077 from_r = &right->branch_info->from;
1079 if (!from_l->ms.sym && !from_r->ms.sym)
1080 return _sort__addr_cmp(from_l->addr, from_r->addr);
1082 return _sort__sym_cmp(from_l->ms.sym, from_r->ms.sym);
1086 sort__sym_to_cmp(struct hist_entry *left, struct hist_entry *right)
1088 struct addr_map_symbol *to_l, *to_r;
1090 if (!left->branch_info || !right->branch_info)
1091 return cmp_null(left->branch_info, right->branch_info);
1093 to_l = &left->branch_info->to;
1094 to_r = &right->branch_info->to;
1096 if (!to_l->ms.sym && !to_r->ms.sym)
1097 return _sort__addr_cmp(to_l->addr, to_r->addr);
1099 return _sort__sym_cmp(to_l->ms.sym, to_r->ms.sym);
1102 static int hist_entry__sym_from_snprintf(struct hist_entry *he, char *bf,
1103 size_t size, unsigned int width)
1105 if (he->branch_info) {
1106 struct addr_map_symbol *from = &he->branch_info->from;
1108 return _hist_entry__sym_snprintf(&from->ms, from->al_addr,
1109 from->al_level, bf, size, width);
1112 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1115 static int hist_entry__sym_to_snprintf(struct hist_entry *he, char *bf,
1116 size_t size, unsigned int width)
1118 if (he->branch_info) {
1119 struct addr_map_symbol *to = &he->branch_info->to;
1121 return _hist_entry__sym_snprintf(&to->ms, to->al_addr,
1122 to->al_level, bf, size, width);
1125 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1128 static int hist_entry__sym_from_filter(struct hist_entry *he, int type,
1131 const char *sym = arg;
1133 if (type != HIST_FILTER__SYMBOL)
1136 return sym && !(he->branch_info && he->branch_info->from.ms.sym &&
1137 strstr(he->branch_info->from.ms.sym->name, sym));
1140 static int hist_entry__sym_to_filter(struct hist_entry *he, int type,
1143 const char *sym = arg;
1145 if (type != HIST_FILTER__SYMBOL)
1148 return sym && !(he->branch_info && he->branch_info->to.ms.sym &&
1149 strstr(he->branch_info->to.ms.sym->name, sym));
1152 struct sort_entry sort_dso_from = {
1153 .se_header = "Source Shared Object",
1154 .se_cmp = sort__dso_from_cmp,
1155 .se_snprintf = hist_entry__dso_from_snprintf,
1156 .se_filter = hist_entry__dso_from_filter,
1157 .se_width_idx = HISTC_DSO_FROM,
1160 struct sort_entry sort_dso_to = {
1161 .se_header = "Target Shared Object",
1162 .se_cmp = sort__dso_to_cmp,
1163 .se_snprintf = hist_entry__dso_to_snprintf,
1164 .se_filter = hist_entry__dso_to_filter,
1165 .se_width_idx = HISTC_DSO_TO,
1168 struct sort_entry sort_sym_from = {
1169 .se_header = "Source Symbol",
1170 .se_cmp = sort__sym_from_cmp,
1171 .se_snprintf = hist_entry__sym_from_snprintf,
1172 .se_filter = hist_entry__sym_from_filter,
1173 .se_width_idx = HISTC_SYMBOL_FROM,
1176 struct sort_entry sort_sym_to = {
1177 .se_header = "Target Symbol",
1178 .se_cmp = sort__sym_to_cmp,
1179 .se_snprintf = hist_entry__sym_to_snprintf,
1180 .se_filter = hist_entry__sym_to_filter,
1181 .se_width_idx = HISTC_SYMBOL_TO,
1184 static int _hist_entry__addr_snprintf(struct map_symbol *ms,
1185 u64 ip, char level, char *bf, size_t size,
1188 struct symbol *sym = ms->sym;
1189 struct map *map = ms->map;
1190 size_t ret = 0, offs;
1192 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", level);
1194 if (sym->type == STT_OBJECT) {
1195 ret += repsep_snprintf(bf + ret, size - ret, "%s", sym->name);
1196 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx",
1197 ip - map__unmap_ip(map, sym->start));
1199 ret += repsep_snprintf(bf + ret, size - ret, "%.*s",
1202 offs = ip - sym->start;
1204 ret += repsep_snprintf(bf + ret, size - ret, "+0x%llx", offs);
1207 size_t len = BITS_PER_LONG / 4;
1208 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx",
1215 static int hist_entry__addr_from_snprintf(struct hist_entry *he, char *bf,
1216 size_t size, unsigned int width)
1218 if (he->branch_info) {
1219 struct addr_map_symbol *from = &he->branch_info->from;
1221 return _hist_entry__addr_snprintf(&from->ms, from->al_addr,
1222 he->level, bf, size, width);
1225 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1228 static int hist_entry__addr_to_snprintf(struct hist_entry *he, char *bf,
1229 size_t size, unsigned int width)
1231 if (he->branch_info) {
1232 struct addr_map_symbol *to = &he->branch_info->to;
1234 return _hist_entry__addr_snprintf(&to->ms, to->al_addr,
1235 he->level, bf, size, width);
1238 return repsep_snprintf(bf, size, "%-*.*s", width, width, "N/A");
1242 sort__addr_from_cmp(struct hist_entry *left, struct hist_entry *right)
1244 struct addr_map_symbol *from_l;
1245 struct addr_map_symbol *from_r;
1248 if (!left->branch_info || !right->branch_info)
1249 return cmp_null(left->branch_info, right->branch_info);
1251 from_l = &left->branch_info->from;
1252 from_r = &right->branch_info->from;
1255 * comparing symbol address alone is not enough since it's a
1256 * relative address within a dso.
1258 ret = _sort__dso_cmp(from_l->ms.map, from_r->ms.map);
1262 return _sort__addr_cmp(from_l->addr, from_r->addr);
1266 sort__addr_to_cmp(struct hist_entry *left, struct hist_entry *right)
1268 struct addr_map_symbol *to_l;
1269 struct addr_map_symbol *to_r;
1272 if (!left->branch_info || !right->branch_info)
1273 return cmp_null(left->branch_info, right->branch_info);
1275 to_l = &left->branch_info->to;
1276 to_r = &right->branch_info->to;
1279 * comparing symbol address alone is not enough since it's a
1280 * relative address within a dso.
1282 ret = _sort__dso_cmp(to_l->ms.map, to_r->ms.map);
1286 return _sort__addr_cmp(to_l->addr, to_r->addr);
1289 struct sort_entry sort_addr_from = {
1290 .se_header = "Source Address",
1291 .se_cmp = sort__addr_from_cmp,
1292 .se_snprintf = hist_entry__addr_from_snprintf,
1293 .se_filter = hist_entry__sym_from_filter, /* shared with sym_from */
1294 .se_width_idx = HISTC_ADDR_FROM,
1297 struct sort_entry sort_addr_to = {
1298 .se_header = "Target Address",
1299 .se_cmp = sort__addr_to_cmp,
1300 .se_snprintf = hist_entry__addr_to_snprintf,
1301 .se_filter = hist_entry__sym_to_filter, /* shared with sym_to */
1302 .se_width_idx = HISTC_ADDR_TO,
1307 sort__mispredict_cmp(struct hist_entry *left, struct hist_entry *right)
1309 unsigned char mp, p;
1311 if (!left->branch_info || !right->branch_info)
1312 return cmp_null(left->branch_info, right->branch_info);
1314 mp = left->branch_info->flags.mispred != right->branch_info->flags.mispred;
1315 p = left->branch_info->flags.predicted != right->branch_info->flags.predicted;
1319 static int hist_entry__mispredict_snprintf(struct hist_entry *he, char *bf,
1320 size_t size, unsigned int width){
1321 static const char *out = "N/A";
1323 if (he->branch_info) {
1324 if (he->branch_info->flags.predicted)
1326 else if (he->branch_info->flags.mispred)
1330 return repsep_snprintf(bf, size, "%-*.*s", width, width, out);
1334 sort__cycles_cmp(struct hist_entry *left, struct hist_entry *right)
1336 if (!left->branch_info || !right->branch_info)
1337 return cmp_null(left->branch_info, right->branch_info);
1339 return left->branch_info->flags.cycles -
1340 right->branch_info->flags.cycles;
1343 static int hist_entry__cycles_snprintf(struct hist_entry *he, char *bf,
1344 size_t size, unsigned int width)
1346 if (!he->branch_info)
1347 return scnprintf(bf, size, "%-.*s", width, "N/A");
1348 if (he->branch_info->flags.cycles == 0)
1349 return repsep_snprintf(bf, size, "%-*s", width, "-");
1350 return repsep_snprintf(bf, size, "%-*hd", width,
1351 he->branch_info->flags.cycles);
1354 struct sort_entry sort_cycles = {
1355 .se_header = "Basic Block Cycles",
1356 .se_cmp = sort__cycles_cmp,
1357 .se_snprintf = hist_entry__cycles_snprintf,
1358 .se_width_idx = HISTC_CYCLES,
1361 /* --sort daddr_sym */
1363 sort__daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1365 uint64_t l = 0, r = 0;
1368 l = mem_info__daddr(left->mem_info)->addr;
1369 if (right->mem_info)
1370 r = mem_info__daddr(right->mem_info)->addr;
1372 return (int64_t)(r - l);
1375 static int hist_entry__daddr_snprintf(struct hist_entry *he, char *bf,
1376 size_t size, unsigned int width)
1379 struct map_symbol *ms = NULL;
1382 addr = mem_info__daddr(he->mem_info)->addr;
1383 ms = &mem_info__daddr(he->mem_info)->ms;
1385 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1389 sort__iaddr_cmp(struct hist_entry *left, struct hist_entry *right)
1391 uint64_t l = 0, r = 0;
1394 l = mem_info__iaddr(left->mem_info)->addr;
1395 if (right->mem_info)
1396 r = mem_info__iaddr(right->mem_info)->addr;
1398 return (int64_t)(r - l);
1401 static int hist_entry__iaddr_snprintf(struct hist_entry *he, char *bf,
1402 size_t size, unsigned int width)
1405 struct map_symbol *ms = NULL;
1408 addr = mem_info__iaddr(he->mem_info)->addr;
1409 ms = &mem_info__iaddr(he->mem_info)->ms;
1411 return _hist_entry__sym_snprintf(ms, addr, he->level, bf, size, width);
1415 sort__dso_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1417 struct map *map_l = NULL;
1418 struct map *map_r = NULL;
1421 map_l = mem_info__daddr(left->mem_info)->ms.map;
1422 if (right->mem_info)
1423 map_r = mem_info__daddr(right->mem_info)->ms.map;
1425 return _sort__dso_cmp(map_l, map_r);
1428 static int hist_entry__dso_daddr_snprintf(struct hist_entry *he, char *bf,
1429 size_t size, unsigned int width)
1431 struct map *map = NULL;
1434 map = mem_info__daddr(he->mem_info)->ms.map;
1436 return _hist_entry__dso_snprintf(map, bf, size, width);
1440 sort__locked_cmp(struct hist_entry *left, struct hist_entry *right)
1442 union perf_mem_data_src data_src_l;
1443 union perf_mem_data_src data_src_r;
1446 data_src_l = *mem_info__data_src(left->mem_info);
1448 data_src_l.mem_lock = PERF_MEM_LOCK_NA;
1450 if (right->mem_info)
1451 data_src_r = *mem_info__data_src(right->mem_info);
1453 data_src_r.mem_lock = PERF_MEM_LOCK_NA;
1455 return (int64_t)(data_src_r.mem_lock - data_src_l.mem_lock);
1458 static int hist_entry__locked_snprintf(struct hist_entry *he, char *bf,
1459 size_t size, unsigned int width)
1463 perf_mem__lck_scnprintf(out, sizeof(out), he->mem_info);
1464 return repsep_snprintf(bf, size, "%.*s", width, out);
1468 sort__tlb_cmp(struct hist_entry *left, struct hist_entry *right)
1470 union perf_mem_data_src data_src_l;
1471 union perf_mem_data_src data_src_r;
1474 data_src_l = *mem_info__data_src(left->mem_info);
1476 data_src_l.mem_dtlb = PERF_MEM_TLB_NA;
1478 if (right->mem_info)
1479 data_src_r = *mem_info__data_src(right->mem_info);
1481 data_src_r.mem_dtlb = PERF_MEM_TLB_NA;
1483 return (int64_t)(data_src_r.mem_dtlb - data_src_l.mem_dtlb);
1486 static int hist_entry__tlb_snprintf(struct hist_entry *he, char *bf,
1487 size_t size, unsigned int width)
1491 perf_mem__tlb_scnprintf(out, sizeof(out), he->mem_info);
1492 return repsep_snprintf(bf, size, "%-*s", width, out);
1496 sort__lvl_cmp(struct hist_entry *left, struct hist_entry *right)
1498 union perf_mem_data_src data_src_l;
1499 union perf_mem_data_src data_src_r;
1502 data_src_l = *mem_info__data_src(left->mem_info);
1504 data_src_l.mem_lvl = PERF_MEM_LVL_NA;
1506 if (right->mem_info)
1507 data_src_r = *mem_info__data_src(right->mem_info);
1509 data_src_r.mem_lvl = PERF_MEM_LVL_NA;
1511 return (int64_t)(data_src_r.mem_lvl - data_src_l.mem_lvl);
1514 static int hist_entry__lvl_snprintf(struct hist_entry *he, char *bf,
1515 size_t size, unsigned int width)
1519 perf_mem__lvl_scnprintf(out, sizeof(out), he->mem_info);
1520 return repsep_snprintf(bf, size, "%-*s", width, out);
1524 sort__snoop_cmp(struct hist_entry *left, struct hist_entry *right)
1526 union perf_mem_data_src data_src_l;
1527 union perf_mem_data_src data_src_r;
1530 data_src_l = *mem_info__data_src(left->mem_info);
1532 data_src_l.mem_snoop = PERF_MEM_SNOOP_NA;
1534 if (right->mem_info)
1535 data_src_r = *mem_info__data_src(right->mem_info);
1537 data_src_r.mem_snoop = PERF_MEM_SNOOP_NA;
1539 return (int64_t)(data_src_r.mem_snoop - data_src_l.mem_snoop);
1542 static int hist_entry__snoop_snprintf(struct hist_entry *he, char *bf,
1543 size_t size, unsigned int width)
1547 perf_mem__snp_scnprintf(out, sizeof(out), he->mem_info);
1548 return repsep_snprintf(bf, size, "%-*s", width, out);
1552 sort__dcacheline_cmp(struct hist_entry *left, struct hist_entry *right)
1555 struct map *l_map, *r_map;
1556 struct dso *l_dso, *r_dso;
1559 if (!left->mem_info) return -1;
1560 if (!right->mem_info) return 1;
1562 /* group event types together */
1563 if (left->cpumode > right->cpumode) return -1;
1564 if (left->cpumode < right->cpumode) return 1;
1566 l_map = mem_info__daddr(left->mem_info)->ms.map;
1567 r_map = mem_info__daddr(right->mem_info)->ms.map;
1569 /* if both are NULL, jump to sort on al_addr instead */
1570 if (!l_map && !r_map)
1573 if (!l_map) return -1;
1574 if (!r_map) return 1;
1576 l_dso = map__dso(l_map);
1577 r_dso = map__dso(r_map);
1578 rc = dso__cmp_id(l_dso, r_dso);
1582 * Addresses with no major/minor numbers are assumed to be
1583 * anonymous in userspace. Sort those on pid then address.
1585 * The kernel and non-zero major/minor mapped areas are
1586 * assumed to be unity mapped. Sort those on address.
1589 if ((left->cpumode != PERF_RECORD_MISC_KERNEL) &&
1590 (!(map__flags(l_map) & MAP_SHARED)) && !dso__id(l_dso)->maj && !dso__id(l_dso)->min &&
1591 !dso__id(l_dso)->ino && !dso__id(l_dso)->ino_generation) {
1592 /* userspace anonymous */
1594 if (thread__pid(left->thread) > thread__pid(right->thread))
1596 if (thread__pid(left->thread) < thread__pid(right->thread))
1601 /* al_addr does all the right addr - start + offset calculations */
1602 l = cl_address(mem_info__daddr(left->mem_info)->al_addr, chk_double_cl);
1603 r = cl_address(mem_info__daddr(right->mem_info)->al_addr, chk_double_cl);
1605 if (l > r) return -1;
1606 if (l < r) return 1;
1611 static int hist_entry__dcacheline_snprintf(struct hist_entry *he, char *bf,
1612 size_t size, unsigned int width)
1616 struct map_symbol *ms = NULL;
1617 char level = he->level;
1620 struct map *map = mem_info__daddr(he->mem_info)->ms.map;
1621 struct dso *dso = map ? map__dso(map) : NULL;
1623 addr = cl_address(mem_info__daddr(he->mem_info)->al_addr, chk_double_cl);
1624 ms = &mem_info__daddr(he->mem_info)->ms;
1626 /* print [s] for shared data mmaps */
1627 if ((he->cpumode != PERF_RECORD_MISC_KERNEL) &&
1628 map && !(map__prot(map) & PROT_EXEC) &&
1629 (map__flags(map) & MAP_SHARED) &&
1630 (dso__id(dso)->maj || dso__id(dso)->min || dso__id(dso)->ino ||
1631 dso__id(dso)->ino_generation))
1636 return _hist_entry__sym_snprintf(ms, addr, level, bf, size, width);
1639 struct sort_entry sort_mispredict = {
1640 .se_header = "Branch Mispredicted",
1641 .se_cmp = sort__mispredict_cmp,
1642 .se_snprintf = hist_entry__mispredict_snprintf,
1643 .se_width_idx = HISTC_MISPREDICT,
1647 sort__weight_cmp(struct hist_entry *left, struct hist_entry *right)
1649 return left->weight - right->weight;
1652 static int hist_entry__local_weight_snprintf(struct hist_entry *he, char *bf,
1653 size_t size, unsigned int width)
1655 return repsep_snprintf(bf, size, "%-*llu", width, he->weight);
1658 struct sort_entry sort_local_weight = {
1659 .se_header = "Local Weight",
1660 .se_cmp = sort__weight_cmp,
1661 .se_snprintf = hist_entry__local_weight_snprintf,
1662 .se_width_idx = HISTC_LOCAL_WEIGHT,
1665 static int hist_entry__global_weight_snprintf(struct hist_entry *he, char *bf,
1666 size_t size, unsigned int width)
1668 return repsep_snprintf(bf, size, "%-*llu", width,
1669 he->weight * he->stat.nr_events);
1672 struct sort_entry sort_global_weight = {
1673 .se_header = "Weight",
1674 .se_cmp = sort__weight_cmp,
1675 .se_snprintf = hist_entry__global_weight_snprintf,
1676 .se_width_idx = HISTC_GLOBAL_WEIGHT,
1680 sort__ins_lat_cmp(struct hist_entry *left, struct hist_entry *right)
1682 return left->ins_lat - right->ins_lat;
1685 static int hist_entry__local_ins_lat_snprintf(struct hist_entry *he, char *bf,
1686 size_t size, unsigned int width)
1688 return repsep_snprintf(bf, size, "%-*u", width, he->ins_lat);
1691 struct sort_entry sort_local_ins_lat = {
1692 .se_header = "Local INSTR Latency",
1693 .se_cmp = sort__ins_lat_cmp,
1694 .se_snprintf = hist_entry__local_ins_lat_snprintf,
1695 .se_width_idx = HISTC_LOCAL_INS_LAT,
1698 static int hist_entry__global_ins_lat_snprintf(struct hist_entry *he, char *bf,
1699 size_t size, unsigned int width)
1701 return repsep_snprintf(bf, size, "%-*u", width,
1702 he->ins_lat * he->stat.nr_events);
1705 struct sort_entry sort_global_ins_lat = {
1706 .se_header = "INSTR Latency",
1707 .se_cmp = sort__ins_lat_cmp,
1708 .se_snprintf = hist_entry__global_ins_lat_snprintf,
1709 .se_width_idx = HISTC_GLOBAL_INS_LAT,
1713 sort__p_stage_cyc_cmp(struct hist_entry *left, struct hist_entry *right)
1715 return left->p_stage_cyc - right->p_stage_cyc;
1718 static int hist_entry__global_p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1719 size_t size, unsigned int width)
1721 return repsep_snprintf(bf, size, "%-*u", width,
1722 he->p_stage_cyc * he->stat.nr_events);
1726 static int hist_entry__p_stage_cyc_snprintf(struct hist_entry *he, char *bf,
1727 size_t size, unsigned int width)
1729 return repsep_snprintf(bf, size, "%-*u", width, he->p_stage_cyc);
1732 struct sort_entry sort_local_p_stage_cyc = {
1733 .se_header = "Local Pipeline Stage Cycle",
1734 .se_cmp = sort__p_stage_cyc_cmp,
1735 .se_snprintf = hist_entry__p_stage_cyc_snprintf,
1736 .se_width_idx = HISTC_LOCAL_P_STAGE_CYC,
1739 struct sort_entry sort_global_p_stage_cyc = {
1740 .se_header = "Pipeline Stage Cycle",
1741 .se_cmp = sort__p_stage_cyc_cmp,
1742 .se_snprintf = hist_entry__global_p_stage_cyc_snprintf,
1743 .se_width_idx = HISTC_GLOBAL_P_STAGE_CYC,
1746 struct sort_entry sort_mem_daddr_sym = {
1747 .se_header = "Data Symbol",
1748 .se_cmp = sort__daddr_cmp,
1749 .se_snprintf = hist_entry__daddr_snprintf,
1750 .se_width_idx = HISTC_MEM_DADDR_SYMBOL,
1753 struct sort_entry sort_mem_iaddr_sym = {
1754 .se_header = "Code Symbol",
1755 .se_cmp = sort__iaddr_cmp,
1756 .se_snprintf = hist_entry__iaddr_snprintf,
1757 .se_width_idx = HISTC_MEM_IADDR_SYMBOL,
1760 struct sort_entry sort_mem_daddr_dso = {
1761 .se_header = "Data Object",
1762 .se_cmp = sort__dso_daddr_cmp,
1763 .se_snprintf = hist_entry__dso_daddr_snprintf,
1764 .se_width_idx = HISTC_MEM_DADDR_DSO,
1767 struct sort_entry sort_mem_locked = {
1768 .se_header = "Locked",
1769 .se_cmp = sort__locked_cmp,
1770 .se_snprintf = hist_entry__locked_snprintf,
1771 .se_width_idx = HISTC_MEM_LOCKED,
1774 struct sort_entry sort_mem_tlb = {
1775 .se_header = "TLB access",
1776 .se_cmp = sort__tlb_cmp,
1777 .se_snprintf = hist_entry__tlb_snprintf,
1778 .se_width_idx = HISTC_MEM_TLB,
1781 struct sort_entry sort_mem_lvl = {
1782 .se_header = "Memory access",
1783 .se_cmp = sort__lvl_cmp,
1784 .se_snprintf = hist_entry__lvl_snprintf,
1785 .se_width_idx = HISTC_MEM_LVL,
1788 struct sort_entry sort_mem_snoop = {
1789 .se_header = "Snoop",
1790 .se_cmp = sort__snoop_cmp,
1791 .se_snprintf = hist_entry__snoop_snprintf,
1792 .se_width_idx = HISTC_MEM_SNOOP,
1795 struct sort_entry sort_mem_dcacheline = {
1796 .se_header = "Data Cacheline",
1797 .se_cmp = sort__dcacheline_cmp,
1798 .se_snprintf = hist_entry__dcacheline_snprintf,
1799 .se_width_idx = HISTC_MEM_DCACHELINE,
1803 sort__blocked_cmp(struct hist_entry *left, struct hist_entry *right)
1805 union perf_mem_data_src data_src_l;
1806 union perf_mem_data_src data_src_r;
1809 data_src_l = *mem_info__data_src(left->mem_info);
1811 data_src_l.mem_blk = PERF_MEM_BLK_NA;
1813 if (right->mem_info)
1814 data_src_r = *mem_info__data_src(right->mem_info);
1816 data_src_r.mem_blk = PERF_MEM_BLK_NA;
1818 return (int64_t)(data_src_r.mem_blk - data_src_l.mem_blk);
1821 static int hist_entry__blocked_snprintf(struct hist_entry *he, char *bf,
1822 size_t size, unsigned int width)
1826 perf_mem__blk_scnprintf(out, sizeof(out), he->mem_info);
1827 return repsep_snprintf(bf, size, "%.*s", width, out);
1830 struct sort_entry sort_mem_blocked = {
1831 .se_header = "Blocked",
1832 .se_cmp = sort__blocked_cmp,
1833 .se_snprintf = hist_entry__blocked_snprintf,
1834 .se_width_idx = HISTC_MEM_BLOCKED,
1838 sort__phys_daddr_cmp(struct hist_entry *left, struct hist_entry *right)
1840 uint64_t l = 0, r = 0;
1843 l = mem_info__daddr(left->mem_info)->phys_addr;
1844 if (right->mem_info)
1845 r = mem_info__daddr(right->mem_info)->phys_addr;
1847 return (int64_t)(r - l);
1850 static int hist_entry__phys_daddr_snprintf(struct hist_entry *he, char *bf,
1851 size_t size, unsigned int width)
1855 size_t len = BITS_PER_LONG / 4;
1857 addr = mem_info__daddr(he->mem_info)->phys_addr;
1859 ret += repsep_snprintf(bf + ret, size - ret, "[%c] ", he->level);
1861 ret += repsep_snprintf(bf + ret, size - ret, "%-#.*llx", len, addr);
1863 ret += repsep_snprintf(bf + ret, size - ret, "%-*s", width - ret, "");
1871 struct sort_entry sort_mem_phys_daddr = {
1872 .se_header = "Data Physical Address",
1873 .se_cmp = sort__phys_daddr_cmp,
1874 .se_snprintf = hist_entry__phys_daddr_snprintf,
1875 .se_width_idx = HISTC_MEM_PHYS_DADDR,
1879 sort__data_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1881 uint64_t l = 0, r = 0;
1884 l = mem_info__daddr(left->mem_info)->data_page_size;
1885 if (right->mem_info)
1886 r = mem_info__daddr(right->mem_info)->data_page_size;
1888 return (int64_t)(r - l);
1891 static int hist_entry__data_page_size_snprintf(struct hist_entry *he, char *bf,
1892 size_t size, unsigned int width)
1894 char str[PAGE_SIZE_NAME_LEN];
1896 return repsep_snprintf(bf, size, "%-*s", width,
1897 get_page_size_name(mem_info__daddr(he->mem_info)->data_page_size, str));
1900 struct sort_entry sort_mem_data_page_size = {
1901 .se_header = "Data Page Size",
1902 .se_cmp = sort__data_page_size_cmp,
1903 .se_snprintf = hist_entry__data_page_size_snprintf,
1904 .se_width_idx = HISTC_MEM_DATA_PAGE_SIZE,
1908 sort__code_page_size_cmp(struct hist_entry *left, struct hist_entry *right)
1910 uint64_t l = left->code_page_size;
1911 uint64_t r = right->code_page_size;
1913 return (int64_t)(r - l);
1916 static int hist_entry__code_page_size_snprintf(struct hist_entry *he, char *bf,
1917 size_t size, unsigned int width)
1919 char str[PAGE_SIZE_NAME_LEN];
1921 return repsep_snprintf(bf, size, "%-*s", width,
1922 get_page_size_name(he->code_page_size, str));
1925 struct sort_entry sort_code_page_size = {
1926 .se_header = "Code Page Size",
1927 .se_cmp = sort__code_page_size_cmp,
1928 .se_snprintf = hist_entry__code_page_size_snprintf,
1929 .se_width_idx = HISTC_CODE_PAGE_SIZE,
1933 sort__abort_cmp(struct hist_entry *left, struct hist_entry *right)
1935 if (!left->branch_info || !right->branch_info)
1936 return cmp_null(left->branch_info, right->branch_info);
1938 return left->branch_info->flags.abort !=
1939 right->branch_info->flags.abort;
1942 static int hist_entry__abort_snprintf(struct hist_entry *he, char *bf,
1943 size_t size, unsigned int width)
1945 static const char *out = "N/A";
1947 if (he->branch_info) {
1948 if (he->branch_info->flags.abort)
1954 return repsep_snprintf(bf, size, "%-*s", width, out);
1957 struct sort_entry sort_abort = {
1958 .se_header = "Transaction abort",
1959 .se_cmp = sort__abort_cmp,
1960 .se_snprintf = hist_entry__abort_snprintf,
1961 .se_width_idx = HISTC_ABORT,
1965 sort__in_tx_cmp(struct hist_entry *left, struct hist_entry *right)
1967 if (!left->branch_info || !right->branch_info)
1968 return cmp_null(left->branch_info, right->branch_info);
1970 return left->branch_info->flags.in_tx !=
1971 right->branch_info->flags.in_tx;
1974 static int hist_entry__in_tx_snprintf(struct hist_entry *he, char *bf,
1975 size_t size, unsigned int width)
1977 static const char *out = "N/A";
1979 if (he->branch_info) {
1980 if (he->branch_info->flags.in_tx)
1986 return repsep_snprintf(bf, size, "%-*s", width, out);
1989 struct sort_entry sort_in_tx = {
1990 .se_header = "Branch in transaction",
1991 .se_cmp = sort__in_tx_cmp,
1992 .se_snprintf = hist_entry__in_tx_snprintf,
1993 .se_width_idx = HISTC_IN_TX,
1997 sort__transaction_cmp(struct hist_entry *left, struct hist_entry *right)
1999 return left->transaction - right->transaction;
2002 static inline char *add_str(char *p, const char *str)
2005 return p + strlen(str);
2008 static struct txbit {
2013 { PERF_TXN_ELISION, "EL ", 0 },
2014 { PERF_TXN_TRANSACTION, "TX ", 1 },
2015 { PERF_TXN_SYNC, "SYNC ", 1 },
2016 { PERF_TXN_ASYNC, "ASYNC ", 0 },
2017 { PERF_TXN_RETRY, "RETRY ", 0 },
2018 { PERF_TXN_CONFLICT, "CON ", 0 },
2019 { PERF_TXN_CAPACITY_WRITE, "CAP-WRITE ", 1 },
2020 { PERF_TXN_CAPACITY_READ, "CAP-READ ", 0 },
2024 int hist_entry__transaction_len(void)
2029 for (i = 0; txbits[i].name; i++) {
2030 if (!txbits[i].skip_for_len)
2031 len += strlen(txbits[i].name);
2033 len += 4; /* :XX<space> */
2037 static int hist_entry__transaction_snprintf(struct hist_entry *he, char *bf,
2038 size_t size, unsigned int width)
2040 u64 t = he->transaction;
2046 for (i = 0; txbits[i].name; i++)
2047 if (txbits[i].flag & t)
2048 p = add_str(p, txbits[i].name);
2049 if (t && !(t & (PERF_TXN_SYNC|PERF_TXN_ASYNC)))
2050 p = add_str(p, "NEITHER ");
2051 if (t & PERF_TXN_ABORT_MASK) {
2052 sprintf(p, ":%" PRIx64,
2053 (t & PERF_TXN_ABORT_MASK) >>
2054 PERF_TXN_ABORT_SHIFT);
2058 return repsep_snprintf(bf, size, "%-*s", width, buf);
2061 struct sort_entry sort_transaction = {
2062 .se_header = "Transaction ",
2063 .se_cmp = sort__transaction_cmp,
2064 .se_snprintf = hist_entry__transaction_snprintf,
2065 .se_width_idx = HISTC_TRANSACTION,
2068 /* --sort symbol_size */
2070 static int64_t _sort__sym_size_cmp(struct symbol *sym_l, struct symbol *sym_r)
2072 int64_t size_l = sym_l != NULL ? symbol__size(sym_l) : 0;
2073 int64_t size_r = sym_r != NULL ? symbol__size(sym_r) : 0;
2075 return size_l < size_r ? -1 :
2076 size_l == size_r ? 0 : 1;
2080 sort__sym_size_cmp(struct hist_entry *left, struct hist_entry *right)
2082 return _sort__sym_size_cmp(right->ms.sym, left->ms.sym);
2085 static int _hist_entry__sym_size_snprintf(struct symbol *sym, char *bf,
2086 size_t bf_size, unsigned int width)
2089 return repsep_snprintf(bf, bf_size, "%*d", width, symbol__size(sym));
2091 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2094 static int hist_entry__sym_size_snprintf(struct hist_entry *he, char *bf,
2095 size_t size, unsigned int width)
2097 return _hist_entry__sym_size_snprintf(he->ms.sym, bf, size, width);
2100 struct sort_entry sort_sym_size = {
2101 .se_header = "Symbol size",
2102 .se_cmp = sort__sym_size_cmp,
2103 .se_snprintf = hist_entry__sym_size_snprintf,
2104 .se_width_idx = HISTC_SYM_SIZE,
2107 /* --sort dso_size */
2109 static int64_t _sort__dso_size_cmp(struct map *map_l, struct map *map_r)
2111 int64_t size_l = map_l != NULL ? map__size(map_l) : 0;
2112 int64_t size_r = map_r != NULL ? map__size(map_r) : 0;
2114 return size_l < size_r ? -1 :
2115 size_l == size_r ? 0 : 1;
2119 sort__dso_size_cmp(struct hist_entry *left, struct hist_entry *right)
2121 return _sort__dso_size_cmp(right->ms.map, left->ms.map);
2124 static int _hist_entry__dso_size_snprintf(struct map *map, char *bf,
2125 size_t bf_size, unsigned int width)
2127 if (map && map__dso(map))
2128 return repsep_snprintf(bf, bf_size, "%*d", width, map__size(map));
2130 return repsep_snprintf(bf, bf_size, "%*s", width, "unknown");
2133 static int hist_entry__dso_size_snprintf(struct hist_entry *he, char *bf,
2134 size_t size, unsigned int width)
2136 return _hist_entry__dso_size_snprintf(he->ms.map, bf, size, width);
2139 struct sort_entry sort_dso_size = {
2140 .se_header = "DSO size",
2141 .se_cmp = sort__dso_size_cmp,
2142 .se_snprintf = hist_entry__dso_size_snprintf,
2143 .se_width_idx = HISTC_DSO_SIZE,
2149 sort__addr_cmp(struct hist_entry *left, struct hist_entry *right)
2151 u64 left_ip = left->ip;
2152 u64 right_ip = right->ip;
2153 struct map *left_map = left->ms.map;
2154 struct map *right_map = right->ms.map;
2157 left_ip = map__unmap_ip(left_map, left_ip);
2159 right_ip = map__unmap_ip(right_map, right_ip);
2161 return _sort__addr_cmp(left_ip, right_ip);
2164 static int hist_entry__addr_snprintf(struct hist_entry *he, char *bf,
2165 size_t size, unsigned int width)
2168 struct map *map = he->ms.map;
2171 ip = map__unmap_ip(map, ip);
2173 return repsep_snprintf(bf, size, "%-#*llx", width, ip);
2176 struct sort_entry sort_addr = {
2177 .se_header = "Address",
2178 .se_cmp = sort__addr_cmp,
2179 .se_snprintf = hist_entry__addr_snprintf,
2180 .se_width_idx = HISTC_ADDR,
2185 struct annotated_data_type unknown_type = {
2187 .type_name = (char *)"(unknown)",
2188 .children = LIST_HEAD_INIT(unknown_type.self.children),
2193 sort__type_cmp(struct hist_entry *left, struct hist_entry *right)
2195 return sort__addr_cmp(left, right);
2198 static void sort__type_init(struct hist_entry *he)
2203 he->mem_type = hist_entry__get_data_type(he);
2204 if (he->mem_type == NULL) {
2205 he->mem_type = &unknown_type;
2206 he->mem_type_off = 0;
2211 sort__type_collapse(struct hist_entry *left, struct hist_entry *right)
2213 struct annotated_data_type *left_type = left->mem_type;
2214 struct annotated_data_type *right_type = right->mem_type;
2217 sort__type_init(left);
2218 left_type = left->mem_type;
2222 sort__type_init(right);
2223 right_type = right->mem_type;
2226 return strcmp(left_type->self.type_name, right_type->self.type_name);
2230 sort__type_sort(struct hist_entry *left, struct hist_entry *right)
2232 return sort__type_collapse(left, right);
2235 static int hist_entry__type_snprintf(struct hist_entry *he, char *bf,
2236 size_t size, unsigned int width)
2238 return repsep_snprintf(bf, size, "%-*s", width, he->mem_type->self.type_name);
2241 struct sort_entry sort_type = {
2242 .se_header = "Data Type",
2243 .se_cmp = sort__type_cmp,
2244 .se_collapse = sort__type_collapse,
2245 .se_sort = sort__type_sort,
2246 .se_init = sort__type_init,
2247 .se_snprintf = hist_entry__type_snprintf,
2248 .se_width_idx = HISTC_TYPE,
2251 /* --sort typeoff */
2254 sort__typeoff_sort(struct hist_entry *left, struct hist_entry *right)
2256 struct annotated_data_type *left_type = left->mem_type;
2257 struct annotated_data_type *right_type = right->mem_type;
2261 sort__type_init(left);
2262 left_type = left->mem_type;
2266 sort__type_init(right);
2267 right_type = right->mem_type;
2270 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2273 return left->mem_type_off - right->mem_type_off;
2276 static void fill_member_name(char *buf, size_t sz, struct annotated_member *m,
2277 int offset, bool first)
2279 struct annotated_member *child;
2281 if (list_empty(&m->children))
2284 list_for_each_entry(child, &m->children, node) {
2285 if (child->offset <= offset && offset < child->offset + child->size) {
2288 /* It can have anonymous struct/union members */
2289 if (child->var_name) {
2290 len = scnprintf(buf, sz, "%s%s",
2291 first ? "" : ".", child->var_name);
2295 fill_member_name(buf + len, sz - len, child, offset, first);
2301 static int hist_entry__typeoff_snprintf(struct hist_entry *he, char *bf,
2302 size_t size, unsigned int width __maybe_unused)
2304 struct annotated_data_type *he_type = he->mem_type;
2308 if (list_empty(&he_type->self.children))
2309 snprintf(buf, sizeof(buf), "no field");
2311 fill_member_name(buf, sizeof(buf), &he_type->self,
2312 he->mem_type_off, true);
2315 return repsep_snprintf(bf, size, "%s +%#x (%s)", he_type->self.type_name,
2316 he->mem_type_off, buf);
2319 struct sort_entry sort_type_offset = {
2320 .se_header = "Data Type Offset",
2321 .se_cmp = sort__type_cmp,
2322 .se_collapse = sort__typeoff_sort,
2323 .se_sort = sort__typeoff_sort,
2324 .se_init = sort__type_init,
2325 .se_snprintf = hist_entry__typeoff_snprintf,
2326 .se_width_idx = HISTC_TYPE_OFFSET,
2329 /* --sort typecln */
2331 /* TODO: use actual value in the system */
2332 #define TYPE_CACHELINE_SIZE 64
2335 sort__typecln_sort(struct hist_entry *left, struct hist_entry *right)
2337 struct annotated_data_type *left_type = left->mem_type;
2338 struct annotated_data_type *right_type = right->mem_type;
2339 int64_t left_cln, right_cln;
2343 sort__type_init(left);
2344 left_type = left->mem_type;
2348 sort__type_init(right);
2349 right_type = right->mem_type;
2352 ret = strcmp(left_type->self.type_name, right_type->self.type_name);
2356 left_cln = left->mem_type_off / TYPE_CACHELINE_SIZE;
2357 right_cln = right->mem_type_off / TYPE_CACHELINE_SIZE;
2358 return left_cln - right_cln;
2361 static int hist_entry__typecln_snprintf(struct hist_entry *he, char *bf,
2362 size_t size, unsigned int width __maybe_unused)
2364 struct annotated_data_type *he_type = he->mem_type;
2366 return repsep_snprintf(bf, size, "%s: cache-line %d", he_type->self.type_name,
2367 he->mem_type_off / TYPE_CACHELINE_SIZE);
2370 struct sort_entry sort_type_cacheline = {
2371 .se_header = "Data Type Cacheline",
2372 .se_cmp = sort__type_cmp,
2373 .se_collapse = sort__typecln_sort,
2374 .se_sort = sort__typecln_sort,
2375 .se_init = sort__type_init,
2376 .se_snprintf = hist_entry__typecln_snprintf,
2377 .se_width_idx = HISTC_TYPE_CACHELINE,
2381 struct sort_dimension {
2383 struct sort_entry *entry;
2387 int __weak arch_support_sort_key(const char *sort_key __maybe_unused)
2392 const char * __weak arch_perf_header_entry(const char *se_header)
2397 static void sort_dimension_add_dynamic_header(struct sort_dimension *sd)
2399 sd->entry->se_header = arch_perf_header_entry(sd->entry->se_header);
2402 #define DIM(d, n, func) [d] = { .name = n, .entry = &(func) }
2404 static struct sort_dimension common_sort_dimensions[] = {
2405 DIM(SORT_PID, "pid", sort_thread),
2406 DIM(SORT_COMM, "comm", sort_comm),
2407 DIM(SORT_DSO, "dso", sort_dso),
2408 DIM(SORT_SYM, "symbol", sort_sym),
2409 DIM(SORT_PARENT, "parent", sort_parent),
2410 DIM(SORT_CPU, "cpu", sort_cpu),
2411 DIM(SORT_SOCKET, "socket", sort_socket),
2412 DIM(SORT_SRCLINE, "srcline", sort_srcline),
2413 DIM(SORT_SRCFILE, "srcfile", sort_srcfile),
2414 DIM(SORT_LOCAL_WEIGHT, "local_weight", sort_local_weight),
2415 DIM(SORT_GLOBAL_WEIGHT, "weight", sort_global_weight),
2416 DIM(SORT_TRANSACTION, "transaction", sort_transaction),
2417 #ifdef HAVE_LIBTRACEEVENT
2418 DIM(SORT_TRACE, "trace", sort_trace),
2420 DIM(SORT_SYM_SIZE, "symbol_size", sort_sym_size),
2421 DIM(SORT_DSO_SIZE, "dso_size", sort_dso_size),
2422 DIM(SORT_CGROUP, "cgroup", sort_cgroup),
2423 DIM(SORT_CGROUP_ID, "cgroup_id", sort_cgroup_id),
2424 DIM(SORT_SYM_IPC_NULL, "ipc_null", sort_sym_ipc_null),
2425 DIM(SORT_TIME, "time", sort_time),
2426 DIM(SORT_CODE_PAGE_SIZE, "code_page_size", sort_code_page_size),
2427 DIM(SORT_LOCAL_INS_LAT, "local_ins_lat", sort_local_ins_lat),
2428 DIM(SORT_GLOBAL_INS_LAT, "ins_lat", sort_global_ins_lat),
2429 DIM(SORT_LOCAL_PIPELINE_STAGE_CYC, "local_p_stage_cyc", sort_local_p_stage_cyc),
2430 DIM(SORT_GLOBAL_PIPELINE_STAGE_CYC, "p_stage_cyc", sort_global_p_stage_cyc),
2431 DIM(SORT_ADDR, "addr", sort_addr),
2432 DIM(SORT_LOCAL_RETIRE_LAT, "local_retire_lat", sort_local_p_stage_cyc),
2433 DIM(SORT_GLOBAL_RETIRE_LAT, "retire_lat", sort_global_p_stage_cyc),
2434 DIM(SORT_SIMD, "simd", sort_simd),
2435 DIM(SORT_ANNOTATE_DATA_TYPE, "type", sort_type),
2436 DIM(SORT_ANNOTATE_DATA_TYPE_OFFSET, "typeoff", sort_type_offset),
2437 DIM(SORT_SYM_OFFSET, "symoff", sort_sym_offset),
2438 DIM(SORT_ANNOTATE_DATA_TYPE_CACHELINE, "typecln", sort_type_cacheline),
2443 #define DIM(d, n, func) [d - __SORT_BRANCH_STACK] = { .name = n, .entry = &(func) }
2445 static struct sort_dimension bstack_sort_dimensions[] = {
2446 DIM(SORT_DSO_FROM, "dso_from", sort_dso_from),
2447 DIM(SORT_DSO_TO, "dso_to", sort_dso_to),
2448 DIM(SORT_SYM_FROM, "symbol_from", sort_sym_from),
2449 DIM(SORT_SYM_TO, "symbol_to", sort_sym_to),
2450 DIM(SORT_MISPREDICT, "mispredict", sort_mispredict),
2451 DIM(SORT_IN_TX, "in_tx", sort_in_tx),
2452 DIM(SORT_ABORT, "abort", sort_abort),
2453 DIM(SORT_CYCLES, "cycles", sort_cycles),
2454 DIM(SORT_SRCLINE_FROM, "srcline_from", sort_srcline_from),
2455 DIM(SORT_SRCLINE_TO, "srcline_to", sort_srcline_to),
2456 DIM(SORT_SYM_IPC, "ipc_lbr", sort_sym_ipc),
2457 DIM(SORT_ADDR_FROM, "addr_from", sort_addr_from),
2458 DIM(SORT_ADDR_TO, "addr_to", sort_addr_to),
2463 #define DIM(d, n, func) [d - __SORT_MEMORY_MODE] = { .name = n, .entry = &(func) }
2465 static struct sort_dimension memory_sort_dimensions[] = {
2466 DIM(SORT_MEM_DADDR_SYMBOL, "symbol_daddr", sort_mem_daddr_sym),
2467 DIM(SORT_MEM_IADDR_SYMBOL, "symbol_iaddr", sort_mem_iaddr_sym),
2468 DIM(SORT_MEM_DADDR_DSO, "dso_daddr", sort_mem_daddr_dso),
2469 DIM(SORT_MEM_LOCKED, "locked", sort_mem_locked),
2470 DIM(SORT_MEM_TLB, "tlb", sort_mem_tlb),
2471 DIM(SORT_MEM_LVL, "mem", sort_mem_lvl),
2472 DIM(SORT_MEM_SNOOP, "snoop", sort_mem_snoop),
2473 DIM(SORT_MEM_DCACHELINE, "dcacheline", sort_mem_dcacheline),
2474 DIM(SORT_MEM_PHYS_DADDR, "phys_daddr", sort_mem_phys_daddr),
2475 DIM(SORT_MEM_DATA_PAGE_SIZE, "data_page_size", sort_mem_data_page_size),
2476 DIM(SORT_MEM_BLOCKED, "blocked", sort_mem_blocked),
2481 struct hpp_dimension {
2483 struct perf_hpp_fmt *fmt;
2487 #define DIM(d, n) { .name = n, .fmt = &perf_hpp__format[d], }
2489 static struct hpp_dimension hpp_sort_dimensions[] = {
2490 DIM(PERF_HPP__OVERHEAD, "overhead"),
2491 DIM(PERF_HPP__OVERHEAD_SYS, "overhead_sys"),
2492 DIM(PERF_HPP__OVERHEAD_US, "overhead_us"),
2493 DIM(PERF_HPP__OVERHEAD_GUEST_SYS, "overhead_guest_sys"),
2494 DIM(PERF_HPP__OVERHEAD_GUEST_US, "overhead_guest_us"),
2495 DIM(PERF_HPP__OVERHEAD_ACC, "overhead_children"),
2496 DIM(PERF_HPP__SAMPLES, "sample"),
2497 DIM(PERF_HPP__PERIOD, "period"),
2498 DIM(PERF_HPP__WEIGHT1, "weight1"),
2499 DIM(PERF_HPP__WEIGHT2, "weight2"),
2500 DIM(PERF_HPP__WEIGHT3, "weight3"),
2501 /* aliases for weight_struct */
2502 DIM(PERF_HPP__WEIGHT2, "ins_lat"),
2503 DIM(PERF_HPP__WEIGHT3, "retire_lat"),
2504 DIM(PERF_HPP__WEIGHT3, "p_stage_cyc"),
2509 struct hpp_sort_entry {
2510 struct perf_hpp_fmt hpp;
2511 struct sort_entry *se;
2514 void perf_hpp__reset_sort_width(struct perf_hpp_fmt *fmt, struct hists *hists)
2516 struct hpp_sort_entry *hse;
2518 if (!perf_hpp__is_sort_entry(fmt))
2521 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2522 hists__new_col_len(hists, hse->se->se_width_idx, strlen(fmt->name));
2525 static int __sort__hpp_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2526 struct hists *hists, int line __maybe_unused,
2527 int *span __maybe_unused)
2529 struct hpp_sort_entry *hse;
2530 size_t len = fmt->user_len;
2532 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2535 len = hists__col_len(hists, hse->se->se_width_idx);
2537 return scnprintf(hpp->buf, hpp->size, "%-*.*s", len, len, fmt->name);
2540 static int __sort__hpp_width(struct perf_hpp_fmt *fmt,
2541 struct perf_hpp *hpp __maybe_unused,
2542 struct hists *hists)
2544 struct hpp_sort_entry *hse;
2545 size_t len = fmt->user_len;
2547 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2550 len = hists__col_len(hists, hse->se->se_width_idx);
2555 static int __sort__hpp_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2556 struct hist_entry *he)
2558 struct hpp_sort_entry *hse;
2559 size_t len = fmt->user_len;
2561 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2564 len = hists__col_len(he->hists, hse->se->se_width_idx);
2566 return hse->se->se_snprintf(he, hpp->buf, hpp->size, len);
2569 static int64_t __sort__hpp_cmp(struct perf_hpp_fmt *fmt,
2570 struct hist_entry *a, struct hist_entry *b)
2572 struct hpp_sort_entry *hse;
2574 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2575 return hse->se->se_cmp(a, b);
2578 static int64_t __sort__hpp_collapse(struct perf_hpp_fmt *fmt,
2579 struct hist_entry *a, struct hist_entry *b)
2581 struct hpp_sort_entry *hse;
2582 int64_t (*collapse_fn)(struct hist_entry *, struct hist_entry *);
2584 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2585 collapse_fn = hse->se->se_collapse ?: hse->se->se_cmp;
2586 return collapse_fn(a, b);
2589 static int64_t __sort__hpp_sort(struct perf_hpp_fmt *fmt,
2590 struct hist_entry *a, struct hist_entry *b)
2592 struct hpp_sort_entry *hse;
2593 int64_t (*sort_fn)(struct hist_entry *, struct hist_entry *);
2595 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2596 sort_fn = hse->se->se_sort ?: hse->se->se_cmp;
2597 return sort_fn(a, b);
2600 bool perf_hpp__is_sort_entry(struct perf_hpp_fmt *format)
2602 return format->header == __sort__hpp_header;
2605 #define MK_SORT_ENTRY_CHK(key) \
2606 bool perf_hpp__is_ ## key ## _entry(struct perf_hpp_fmt *fmt) \
2608 struct hpp_sort_entry *hse; \
2610 if (!perf_hpp__is_sort_entry(fmt)) \
2613 hse = container_of(fmt, struct hpp_sort_entry, hpp); \
2614 return hse->se == &sort_ ## key ; \
2617 #ifdef HAVE_LIBTRACEEVENT
2618 MK_SORT_ENTRY_CHK(trace)
2620 bool perf_hpp__is_trace_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2625 MK_SORT_ENTRY_CHK(srcline)
2626 MK_SORT_ENTRY_CHK(srcfile)
2627 MK_SORT_ENTRY_CHK(thread)
2628 MK_SORT_ENTRY_CHK(comm)
2629 MK_SORT_ENTRY_CHK(dso)
2630 MK_SORT_ENTRY_CHK(sym)
2633 static bool __sort__hpp_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
2635 struct hpp_sort_entry *hse_a;
2636 struct hpp_sort_entry *hse_b;
2638 if (!perf_hpp__is_sort_entry(a) || !perf_hpp__is_sort_entry(b))
2641 hse_a = container_of(a, struct hpp_sort_entry, hpp);
2642 hse_b = container_of(b, struct hpp_sort_entry, hpp);
2644 return hse_a->se == hse_b->se;
2647 static void hse_free(struct perf_hpp_fmt *fmt)
2649 struct hpp_sort_entry *hse;
2651 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2655 static void hse_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
2657 struct hpp_sort_entry *hse;
2659 if (!perf_hpp__is_sort_entry(fmt))
2662 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2664 if (hse->se->se_init)
2665 hse->se->se_init(he);
2668 static struct hpp_sort_entry *
2669 __sort_dimension__alloc_hpp(struct sort_dimension *sd, int level)
2671 struct hpp_sort_entry *hse;
2673 hse = malloc(sizeof(*hse));
2675 pr_err("Memory allocation failed\n");
2679 hse->se = sd->entry;
2680 hse->hpp.name = sd->entry->se_header;
2681 hse->hpp.header = __sort__hpp_header;
2682 hse->hpp.width = __sort__hpp_width;
2683 hse->hpp.entry = __sort__hpp_entry;
2684 hse->hpp.color = NULL;
2686 hse->hpp.cmp = __sort__hpp_cmp;
2687 hse->hpp.collapse = __sort__hpp_collapse;
2688 hse->hpp.sort = __sort__hpp_sort;
2689 hse->hpp.equal = __sort__hpp_equal;
2690 hse->hpp.free = hse_free;
2691 hse->hpp.init = hse_init;
2693 INIT_LIST_HEAD(&hse->hpp.list);
2694 INIT_LIST_HEAD(&hse->hpp.sort_list);
2695 hse->hpp.elide = false;
2697 hse->hpp.user_len = 0;
2698 hse->hpp.level = level;
2703 static void hpp_free(struct perf_hpp_fmt *fmt)
2708 static struct perf_hpp_fmt *__hpp_dimension__alloc_hpp(struct hpp_dimension *hd,
2711 struct perf_hpp_fmt *fmt;
2713 fmt = memdup(hd->fmt, sizeof(*fmt));
2715 INIT_LIST_HEAD(&fmt->list);
2716 INIT_LIST_HEAD(&fmt->sort_list);
2717 fmt->free = hpp_free;
2724 int hist_entry__filter(struct hist_entry *he, int type, const void *arg)
2726 struct perf_hpp_fmt *fmt;
2727 struct hpp_sort_entry *hse;
2731 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
2732 if (!perf_hpp__is_sort_entry(fmt))
2735 hse = container_of(fmt, struct hpp_sort_entry, hpp);
2736 if (hse->se->se_filter == NULL)
2740 * hist entry is filtered if any of sort key in the hpp list
2741 * is applied. But it should skip non-matched filter types.
2743 r = hse->se->se_filter(he, type, arg);
2754 static int __sort_dimension__add_hpp_sort(struct sort_dimension *sd,
2755 struct perf_hpp_list *list,
2758 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, level);
2763 perf_hpp_list__register_sort_field(list, &hse->hpp);
2767 static int __sort_dimension__add_hpp_output(struct sort_dimension *sd,
2768 struct perf_hpp_list *list)
2770 struct hpp_sort_entry *hse = __sort_dimension__alloc_hpp(sd, 0);
2775 perf_hpp_list__column_register(list, &hse->hpp);
2779 #ifndef HAVE_LIBTRACEEVENT
2780 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused)
2784 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt __maybe_unused,
2785 struct hists *hists __maybe_unused)
2790 struct hpp_dynamic_entry {
2791 struct perf_hpp_fmt hpp;
2792 struct evsel *evsel;
2793 struct tep_format_field *field;
2794 unsigned dynamic_len;
2798 static int hde_width(struct hpp_dynamic_entry *hde)
2800 if (!hde->hpp.len) {
2801 int len = hde->dynamic_len;
2802 int namelen = strlen(hde->field->name);
2803 int fieldlen = hde->field->size;
2808 if (!(hde->field->flags & TEP_FIELD_IS_STRING)) {
2809 /* length for print hex numbers */
2810 fieldlen = hde->field->size * 2 + 2;
2817 return hde->hpp.len;
2820 static void update_dynamic_len(struct hpp_dynamic_entry *hde,
2821 struct hist_entry *he)
2824 struct tep_format_field *field = hde->field;
2831 /* parse pretty print result and update max length */
2832 if (!he->trace_output)
2833 he->trace_output = get_trace_output(he);
2835 namelen = strlen(field->name);
2836 str = he->trace_output;
2839 pos = strchr(str, ' ');
2842 pos = str + strlen(str);
2845 if (!strncmp(str, field->name, namelen)) {
2851 if (len > hde->dynamic_len)
2852 hde->dynamic_len = len;
2863 static int __sort__hde_header(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2864 struct hists *hists __maybe_unused,
2865 int line __maybe_unused,
2866 int *span __maybe_unused)
2868 struct hpp_dynamic_entry *hde;
2869 size_t len = fmt->user_len;
2871 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2874 len = hde_width(hde);
2876 return scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, hde->field->name);
2879 static int __sort__hde_width(struct perf_hpp_fmt *fmt,
2880 struct perf_hpp *hpp __maybe_unused,
2881 struct hists *hists __maybe_unused)
2883 struct hpp_dynamic_entry *hde;
2884 size_t len = fmt->user_len;
2886 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2889 len = hde_width(hde);
2894 bool perf_hpp__defined_dynamic_entry(struct perf_hpp_fmt *fmt, struct hists *hists)
2896 struct hpp_dynamic_entry *hde;
2898 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2900 return hists_to_evsel(hists) == hde->evsel;
2903 static int __sort__hde_entry(struct perf_hpp_fmt *fmt, struct perf_hpp *hpp,
2904 struct hist_entry *he)
2906 struct hpp_dynamic_entry *hde;
2907 size_t len = fmt->user_len;
2909 struct tep_format_field *field;
2914 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2917 len = hde_width(hde);
2922 if (!he->trace_output)
2923 he->trace_output = get_trace_output(he);
2926 namelen = strlen(field->name);
2927 str = he->trace_output;
2930 pos = strchr(str, ' ');
2933 pos = str + strlen(str);
2936 if (!strncmp(str, field->name, namelen)) {
2938 str = strndup(str, pos - str);
2941 return scnprintf(hpp->buf, hpp->size,
2942 "%*.*s", len, len, "ERROR");
2953 struct trace_seq seq;
2955 trace_seq_init(&seq);
2956 tep_print_field(&seq, he->raw_data, hde->field);
2960 ret = scnprintf(hpp->buf, hpp->size, "%*.*s", len, len, str);
2965 static int64_t __sort__hde_cmp(struct perf_hpp_fmt *fmt,
2966 struct hist_entry *a, struct hist_entry *b)
2968 struct hpp_dynamic_entry *hde;
2969 struct tep_format_field *field;
2970 unsigned offset, size;
2972 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
2975 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
2976 unsigned long long dyn;
2978 tep_read_number_field(field, a->raw_data, &dyn);
2979 offset = dyn & 0xffff;
2980 size = (dyn >> 16) & 0xffff;
2981 if (tep_field_is_relative(field->flags))
2982 offset += field->offset + field->size;
2983 /* record max width for output */
2984 if (size > hde->dynamic_len)
2985 hde->dynamic_len = size;
2987 offset = field->offset;
2991 return memcmp(a->raw_data + offset, b->raw_data + offset, size);
2994 bool perf_hpp__is_dynamic_entry(struct perf_hpp_fmt *fmt)
2996 return fmt->cmp == __sort__hde_cmp;
2999 static bool __sort__hde_equal(struct perf_hpp_fmt *a, struct perf_hpp_fmt *b)
3001 struct hpp_dynamic_entry *hde_a;
3002 struct hpp_dynamic_entry *hde_b;
3004 if (!perf_hpp__is_dynamic_entry(a) || !perf_hpp__is_dynamic_entry(b))
3007 hde_a = container_of(a, struct hpp_dynamic_entry, hpp);
3008 hde_b = container_of(b, struct hpp_dynamic_entry, hpp);
3010 return hde_a->field == hde_b->field;
3013 static void hde_free(struct perf_hpp_fmt *fmt)
3015 struct hpp_dynamic_entry *hde;
3017 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3021 static void __sort__hde_init(struct perf_hpp_fmt *fmt, struct hist_entry *he)
3023 struct hpp_dynamic_entry *hde;
3025 if (!perf_hpp__is_dynamic_entry(fmt))
3028 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3029 update_dynamic_len(hde, he);
3032 static struct hpp_dynamic_entry *
3033 __alloc_dynamic_entry(struct evsel *evsel, struct tep_format_field *field,
3036 struct hpp_dynamic_entry *hde;
3038 hde = malloc(sizeof(*hde));
3040 pr_debug("Memory allocation failed\n");
3046 hde->dynamic_len = 0;
3048 hde->hpp.name = field->name;
3049 hde->hpp.header = __sort__hde_header;
3050 hde->hpp.width = __sort__hde_width;
3051 hde->hpp.entry = __sort__hde_entry;
3052 hde->hpp.color = NULL;
3054 hde->hpp.init = __sort__hde_init;
3055 hde->hpp.cmp = __sort__hde_cmp;
3056 hde->hpp.collapse = __sort__hde_cmp;
3057 hde->hpp.sort = __sort__hde_cmp;
3058 hde->hpp.equal = __sort__hde_equal;
3059 hde->hpp.free = hde_free;
3061 INIT_LIST_HEAD(&hde->hpp.list);
3062 INIT_LIST_HEAD(&hde->hpp.sort_list);
3063 hde->hpp.elide = false;
3065 hde->hpp.user_len = 0;
3066 hde->hpp.level = level;
3070 #endif /* HAVE_LIBTRACEEVENT */
3072 struct perf_hpp_fmt *perf_hpp_fmt__dup(struct perf_hpp_fmt *fmt)
3074 struct perf_hpp_fmt *new_fmt = NULL;
3076 if (perf_hpp__is_sort_entry(fmt)) {
3077 struct hpp_sort_entry *hse, *new_hse;
3079 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3080 new_hse = memdup(hse, sizeof(*hse));
3082 new_fmt = &new_hse->hpp;
3083 #ifdef HAVE_LIBTRACEEVENT
3084 } else if (perf_hpp__is_dynamic_entry(fmt)) {
3085 struct hpp_dynamic_entry *hde, *new_hde;
3087 hde = container_of(fmt, struct hpp_dynamic_entry, hpp);
3088 new_hde = memdup(hde, sizeof(*hde));
3090 new_fmt = &new_hde->hpp;
3093 new_fmt = memdup(fmt, sizeof(*fmt));
3096 INIT_LIST_HEAD(&new_fmt->list);
3097 INIT_LIST_HEAD(&new_fmt->sort_list);
3102 static int parse_field_name(char *str, char **event, char **field, char **opt)
3104 char *event_name, *field_name, *opt_name;
3107 field_name = strchr(str, '.');
3110 *field_name++ = '\0';
3116 opt_name = strchr(field_name, '/');
3120 *event = event_name;
3121 *field = field_name;
3127 /* find match evsel using a given event name. The event name can be:
3128 * 1. '%' + event index (e.g. '%1' for first event)
3129 * 2. full event name (e.g. sched:sched_switch)
3130 * 3. partial event name (should not contain ':')
3132 static struct evsel *find_evsel(struct evlist *evlist, char *event_name)
3134 struct evsel *evsel = NULL;
3139 if (event_name[0] == '%') {
3140 int nr = strtol(event_name+1, NULL, 0);
3142 if (nr > evlist->core.nr_entries)
3145 evsel = evlist__first(evlist);
3147 evsel = evsel__next(evsel);
3152 full_name = !!strchr(event_name, ':');
3153 evlist__for_each_entry(evlist, pos) {
3155 if (full_name && evsel__name_is(pos, event_name))
3158 if (!full_name && strstr(pos->name, event_name)) {
3160 pr_debug("'%s' event is ambiguous: it can be %s or %s\n",
3161 event_name, evsel->name, pos->name);
3171 #ifdef HAVE_LIBTRACEEVENT
3172 static int __dynamic_dimension__add(struct evsel *evsel,
3173 struct tep_format_field *field,
3174 bool raw_trace, int level)
3176 struct hpp_dynamic_entry *hde;
3178 hde = __alloc_dynamic_entry(evsel, field, level);
3182 hde->raw_trace = raw_trace;
3184 perf_hpp__register_sort_field(&hde->hpp);
3188 static int add_evsel_fields(struct evsel *evsel, bool raw_trace, int level)
3191 struct tep_format_field *field;
3193 field = evsel->tp_format->format.fields;
3195 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3199 field = field->next;
3204 static int add_all_dynamic_fields(struct evlist *evlist, bool raw_trace,
3208 struct evsel *evsel;
3210 evlist__for_each_entry(evlist, evsel) {
3211 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3214 ret = add_evsel_fields(evsel, raw_trace, level);
3221 static int add_all_matching_fields(struct evlist *evlist,
3222 char *field_name, bool raw_trace, int level)
3225 struct evsel *evsel;
3226 struct tep_format_field *field;
3228 evlist__for_each_entry(evlist, evsel) {
3229 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
3232 field = tep_find_any_field(evsel->tp_format, field_name);
3236 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3242 #endif /* HAVE_LIBTRACEEVENT */
3244 static int add_dynamic_entry(struct evlist *evlist, const char *tok,
3247 char *str, *event_name, *field_name, *opt_name;
3248 struct evsel *evsel;
3249 bool raw_trace = symbol_conf.raw_trace;
3259 if (parse_field_name(str, &event_name, &field_name, &opt_name) < 0) {
3265 if (strcmp(opt_name, "raw")) {
3266 pr_debug("unsupported field option %s\n", opt_name);
3273 #ifdef HAVE_LIBTRACEEVENT
3274 if (!strcmp(field_name, "trace_fields")) {
3275 ret = add_all_dynamic_fields(evlist, raw_trace, level);
3279 if (event_name == NULL) {
3280 ret = add_all_matching_fields(evlist, field_name, raw_trace, level);
3284 evlist__for_each_entry(evlist, evsel) {
3285 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT) {
3286 pr_err("%s %s", ret ? "," : "This perf binary isn't linked with libtraceevent, can't process", evsel__name(evsel));
3297 evsel = find_evsel(evlist, event_name);
3298 if (evsel == NULL) {
3299 pr_debug("Cannot find event: %s\n", event_name);
3304 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3305 pr_debug("%s is not a tracepoint event\n", event_name);
3310 #ifdef HAVE_LIBTRACEEVENT
3311 if (!strcmp(field_name, "*")) {
3312 ret = add_evsel_fields(evsel, raw_trace, level);
3314 struct tep_format_field *field = tep_find_any_field(evsel->tp_format, field_name);
3316 if (field == NULL) {
3317 pr_debug("Cannot find event field for %s.%s\n",
3318 event_name, field_name);
3322 ret = __dynamic_dimension__add(evsel, field, raw_trace, level);
3327 #endif /* HAVE_LIBTRACEEVENT */
3334 static int __sort_dimension__add(struct sort_dimension *sd,
3335 struct perf_hpp_list *list,
3341 if (__sort_dimension__add_hpp_sort(sd, list, level) < 0)
3344 if (sd->entry->se_collapse)
3345 list->need_collapse = 1;
3352 static int __hpp_dimension__add(struct hpp_dimension *hd,
3353 struct perf_hpp_list *list,
3356 struct perf_hpp_fmt *fmt;
3361 fmt = __hpp_dimension__alloc_hpp(hd, level);
3366 perf_hpp_list__register_sort_field(list, fmt);
3370 static int __sort_dimension__add_output(struct perf_hpp_list *list,
3371 struct sort_dimension *sd)
3376 if (__sort_dimension__add_hpp_output(sd, list) < 0)
3383 static int __hpp_dimension__add_output(struct perf_hpp_list *list,
3384 struct hpp_dimension *hd)
3386 struct perf_hpp_fmt *fmt;
3391 fmt = __hpp_dimension__alloc_hpp(hd, 0);
3396 perf_hpp_list__column_register(list, fmt);
3400 int hpp_dimension__add_output(unsigned col)
3402 BUG_ON(col >= PERF_HPP__MAX_INDEX);
3403 return __hpp_dimension__add_output(&perf_hpp_list, &hpp_sort_dimensions[col]);
3406 int sort_dimension__add(struct perf_hpp_list *list, const char *tok,
3407 struct evlist *evlist,
3413 * Check to see if there are any arch specific
3414 * sort dimensions not applicable for the current
3415 * architecture. If so, Skip that sort key since
3416 * we don't want to display it in the output fields.
3418 for (j = 0; j < ARRAY_SIZE(arch_specific_sort_keys); j++) {
3419 if (!strcmp(arch_specific_sort_keys[j], tok) &&
3420 !arch_support_sort_key(tok)) {
3425 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3426 struct sort_dimension *sd = &common_sort_dimensions[i];
3428 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3431 for (j = 0; j < ARRAY_SIZE(dynamic_headers); j++) {
3432 if (sd->name && !strcmp(dynamic_headers[j], sd->name))
3433 sort_dimension_add_dynamic_header(sd);
3436 if (sd->entry == &sort_parent && parent_pattern) {
3437 int ret = regcomp(&parent_regex, parent_pattern, REG_EXTENDED);
3441 regerror(ret, &parent_regex, err, sizeof(err));
3442 pr_err("Invalid regex: %s\n%s", parent_pattern, err);
3446 } else if (sd->entry == &sort_sym) {
3449 * perf diff displays the performance difference amongst
3450 * two or more perf.data files. Those files could come
3451 * from different binaries. So we should not compare
3452 * their ips, but the name of symbol.
3454 if (sort__mode == SORT_MODE__DIFF)
3455 sd->entry->se_collapse = sort__sym_sort;
3457 } else if (sd->entry == &sort_dso) {
3459 } else if (sd->entry == &sort_socket) {
3461 } else if (sd->entry == &sort_thread) {
3463 } else if (sd->entry == &sort_comm) {
3465 } else if (sd->entry == &sort_type_offset) {
3466 symbol_conf.annotate_data_member = true;
3469 return __sort_dimension__add(sd, list, level);
3472 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3473 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3475 if (strncasecmp(tok, hd->name, strlen(tok)))
3478 return __hpp_dimension__add(hd, list, level);
3481 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3482 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3484 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3487 if (sort__mode != SORT_MODE__BRANCH)
3490 if (sd->entry == &sort_sym_from || sd->entry == &sort_sym_to)
3493 __sort_dimension__add(sd, list, level);
3497 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3498 struct sort_dimension *sd = &memory_sort_dimensions[i];
3500 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3503 if (sort__mode != SORT_MODE__MEMORY)
3506 if (sd->entry == &sort_mem_dcacheline && cacheline_size() == 0)
3509 if (sd->entry == &sort_mem_daddr_sym)
3512 __sort_dimension__add(sd, list, level);
3516 if (!add_dynamic_entry(evlist, tok, level))
3522 static int setup_sort_list(struct perf_hpp_list *list, char *str,
3523 struct evlist *evlist)
3529 bool in_group = false;
3533 tmp = strpbrk(str, "{}, ");
3538 next_level = level + 1;
3542 else if (*tmp == '}')
3550 ret = sort_dimension__add(list, tok, evlist, level);
3551 if (ret == -EINVAL) {
3552 if (!cacheline_size() && !strncasecmp(tok, "dcacheline", strlen(tok)))
3553 ui__error("The \"dcacheline\" --sort key needs to know the cacheline size and it couldn't be determined on this system");
3555 ui__error("Invalid --sort key: `%s'", tok);
3557 } else if (ret == -ESRCH) {
3558 ui__error("Unknown --sort key: `%s'", tok);
3569 static const char *get_default_sort_order(struct evlist *evlist)
3571 const char *default_sort_orders[] = {
3573 default_branch_sort_order,
3574 default_mem_sort_order,
3575 default_top_sort_order,
3576 default_diff_sort_order,
3577 default_tracepoint_sort_order,
3579 bool use_trace = true;
3580 struct evsel *evsel;
3582 BUG_ON(sort__mode >= ARRAY_SIZE(default_sort_orders));
3584 if (evlist == NULL || evlist__empty(evlist))
3587 evlist__for_each_entry(evlist, evsel) {
3588 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT) {
3595 sort__mode = SORT_MODE__TRACEPOINT;
3596 if (symbol_conf.raw_trace)
3597 return "trace_fields";
3600 return default_sort_orders[sort__mode];
3603 static int setup_sort_order(struct evlist *evlist)
3605 char *new_sort_order;
3608 * Append '+'-prefixed sort order to the default sort
3611 if (!sort_order || is_strict_order(sort_order))
3614 if (sort_order[1] == '\0') {
3615 ui__error("Invalid --sort key: `+'");
3620 * We allocate new sort_order string, but we never free it,
3621 * because it's checked over the rest of the code.
3623 if (asprintf(&new_sort_order, "%s,%s",
3624 get_default_sort_order(evlist), sort_order + 1) < 0) {
3625 pr_err("Not enough memory to set up --sort");
3629 sort_order = new_sort_order;
3634 * Adds 'pre,' prefix into 'str' is 'pre' is
3635 * not already part of 'str'.
3637 static char *prefix_if_not_in(const char *pre, char *str)
3641 if (!str || strstr(str, pre))
3644 if (asprintf(&n, "%s,%s", pre, str) < 0)
3651 static char *setup_overhead(char *keys)
3653 if (sort__mode == SORT_MODE__DIFF)
3656 keys = prefix_if_not_in("overhead", keys);
3658 if (symbol_conf.cumulate_callchain)
3659 keys = prefix_if_not_in("overhead_children", keys);
3664 static int __setup_sorting(struct evlist *evlist)
3667 const char *sort_keys;
3670 ret = setup_sort_order(evlist);
3674 sort_keys = sort_order;
3675 if (sort_keys == NULL) {
3676 if (is_strict_order(field_order)) {
3678 * If user specified field order but no sort order,
3679 * we'll honor it and not add default sort orders.
3684 sort_keys = get_default_sort_order(evlist);
3687 str = strdup(sort_keys);
3689 pr_err("Not enough memory to setup sort keys");
3694 * Prepend overhead fields for backward compatibility.
3696 if (!is_strict_order(field_order)) {
3697 str = setup_overhead(str);
3699 pr_err("Not enough memory to setup overhead keys");
3704 ret = setup_sort_list(&perf_hpp_list, str, evlist);
3710 void perf_hpp__set_elide(int idx, bool elide)
3712 struct perf_hpp_fmt *fmt;
3713 struct hpp_sort_entry *hse;
3715 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3716 if (!perf_hpp__is_sort_entry(fmt))
3719 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3720 if (hse->se->se_width_idx == idx) {
3727 static bool __get_elide(struct strlist *list, const char *list_name, FILE *fp)
3729 if (list && strlist__nr_entries(list) == 1) {
3731 fprintf(fp, "# %s: %s\n", list_name,
3732 strlist__entry(list, 0)->s);
3738 static bool get_elide(int idx, FILE *output)
3742 return __get_elide(symbol_conf.sym_list, "symbol", output);
3744 return __get_elide(symbol_conf.dso_list, "dso", output);
3746 return __get_elide(symbol_conf.comm_list, "comm", output);
3751 if (sort__mode != SORT_MODE__BRANCH)
3755 case HISTC_SYMBOL_FROM:
3756 return __get_elide(symbol_conf.sym_from_list, "sym_from", output);
3757 case HISTC_SYMBOL_TO:
3758 return __get_elide(symbol_conf.sym_to_list, "sym_to", output);
3759 case HISTC_DSO_FROM:
3760 return __get_elide(symbol_conf.dso_from_list, "dso_from", output);
3762 return __get_elide(symbol_conf.dso_to_list, "dso_to", output);
3763 case HISTC_ADDR_FROM:
3764 return __get_elide(symbol_conf.sym_from_list, "addr_from", output);
3766 return __get_elide(symbol_conf.sym_to_list, "addr_to", output);
3774 void sort__setup_elide(FILE *output)
3776 struct perf_hpp_fmt *fmt;
3777 struct hpp_sort_entry *hse;
3779 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3780 if (!perf_hpp__is_sort_entry(fmt))
3783 hse = container_of(fmt, struct hpp_sort_entry, hpp);
3784 fmt->elide = get_elide(hse->se->se_width_idx, output);
3788 * It makes no sense to elide all of sort entries.
3789 * Just revert them to show up again.
3791 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3792 if (!perf_hpp__is_sort_entry(fmt))
3799 perf_hpp_list__for_each_format(&perf_hpp_list, fmt) {
3800 if (!perf_hpp__is_sort_entry(fmt))
3807 int output_field_add(struct perf_hpp_list *list, const char *tok)
3811 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++) {
3812 struct hpp_dimension *hd = &hpp_sort_dimensions[i];
3814 if (strncasecmp(tok, hd->name, strlen(tok)))
3817 if (!strcasecmp(tok, "weight"))
3818 ui__warning("--fields weight shows the average value unlike in the --sort key.\n");
3820 return __hpp_dimension__add_output(list, hd);
3823 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++) {
3824 struct sort_dimension *sd = &common_sort_dimensions[i];
3826 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3829 return __sort_dimension__add_output(list, sd);
3832 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++) {
3833 struct sort_dimension *sd = &bstack_sort_dimensions[i];
3835 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3838 if (sort__mode != SORT_MODE__BRANCH)
3841 return __sort_dimension__add_output(list, sd);
3844 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++) {
3845 struct sort_dimension *sd = &memory_sort_dimensions[i];
3847 if (!sd->name || strncasecmp(tok, sd->name, strlen(tok)))
3850 if (sort__mode != SORT_MODE__MEMORY)
3853 return __sort_dimension__add_output(list, sd);
3859 static int setup_output_list(struct perf_hpp_list *list, char *str)
3864 for (tok = strtok_r(str, ", ", &tmp);
3865 tok; tok = strtok_r(NULL, ", ", &tmp)) {
3866 ret = output_field_add(list, tok);
3867 if (ret == -EINVAL) {
3868 ui__error("Invalid --fields key: `%s'", tok);
3870 } else if (ret == -ESRCH) {
3871 ui__error("Unknown --fields key: `%s'", tok);
3879 void reset_dimensions(void)
3883 for (i = 0; i < ARRAY_SIZE(common_sort_dimensions); i++)
3884 common_sort_dimensions[i].taken = 0;
3886 for (i = 0; i < ARRAY_SIZE(hpp_sort_dimensions); i++)
3887 hpp_sort_dimensions[i].taken = 0;
3889 for (i = 0; i < ARRAY_SIZE(bstack_sort_dimensions); i++)
3890 bstack_sort_dimensions[i].taken = 0;
3892 for (i = 0; i < ARRAY_SIZE(memory_sort_dimensions); i++)
3893 memory_sort_dimensions[i].taken = 0;
3896 bool is_strict_order(const char *order)
3898 return order && (*order != '+');
3901 static int __setup_output_field(void)
3906 if (field_order == NULL)
3909 strp = str = strdup(field_order);
3911 pr_err("Not enough memory to setup output fields");
3915 if (!is_strict_order(field_order))
3918 if (!strlen(strp)) {
3919 ui__error("Invalid --fields key: `+'");
3923 ret = setup_output_list(&perf_hpp_list, strp);
3930 int setup_sorting(struct evlist *evlist)
3934 err = __setup_sorting(evlist);
3938 if (parent_pattern != default_parent_pattern) {
3939 err = sort_dimension__add(&perf_hpp_list, "parent", evlist, -1);
3947 * perf diff doesn't use default hpp output fields.
3949 if (sort__mode != SORT_MODE__DIFF)
3952 err = __setup_output_field();
3956 /* copy sort keys to output fields */
3957 perf_hpp__setup_output_field(&perf_hpp_list);
3958 /* and then copy output fields to sort keys */
3959 perf_hpp__append_sort_keys(&perf_hpp_list);
3961 /* setup hists-specific output fields */
3962 if (perf_hpp__setup_hists_formats(&perf_hpp_list, evlist) < 0)
3968 void reset_output_field(void)
3970 perf_hpp_list.need_collapse = 0;
3971 perf_hpp_list.parent = 0;
3972 perf_hpp_list.sym = 0;
3973 perf_hpp_list.dso = 0;
3979 perf_hpp__reset_output_field(&perf_hpp_list);
3982 #define INDENT (3*8 + 1)
3984 static void add_key(struct strbuf *sb, const char *str, int *llen)
3990 strbuf_addstr(sb, "\n\t\t\t ");
3993 strbuf_addf(sb, " %s", str);
3994 *llen += strlen(str) + 1;
3997 static void add_sort_string(struct strbuf *sb, struct sort_dimension *s, int n,
4002 for (i = 0; i < n; i++)
4003 add_key(sb, s[i].name, llen);
4006 static void add_hpp_sort_string(struct strbuf *sb, struct hpp_dimension *s, int n,
4011 for (i = 0; i < n; i++)
4012 add_key(sb, s[i].name, llen);
4015 char *sort_help(const char *prefix, enum sort_mode mode)
4019 int len = strlen(prefix) + INDENT;
4021 strbuf_init(&sb, 300);
4022 strbuf_addstr(&sb, prefix);
4023 add_hpp_sort_string(&sb, hpp_sort_dimensions,
4024 ARRAY_SIZE(hpp_sort_dimensions), &len);
4025 add_sort_string(&sb, common_sort_dimensions,
4026 ARRAY_SIZE(common_sort_dimensions), &len);
4027 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__BRANCH)
4028 add_sort_string(&sb, bstack_sort_dimensions,
4029 ARRAY_SIZE(bstack_sort_dimensions), &len);
4030 if (mode == SORT_MODE__NORMAL || mode == SORT_MODE__MEMORY)
4031 add_sort_string(&sb, memory_sort_dimensions,
4032 ARRAY_SIZE(memory_sort_dimensions), &len);
4033 s = strbuf_detach(&sb, NULL);
4034 strbuf_release(&sb);