9 #include "ui/progress.h"
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13 struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15 struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17 struct hist_entry *he);
19 u16 hists__col_len(struct hists *hists, enum hist_column col)
21 return hists->col_len[col];
24 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
26 hists->col_len[col] = len;
29 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
31 if (len > hists__col_len(hists, col)) {
32 hists__set_col_len(hists, col, len);
38 void hists__reset_col_len(struct hists *hists)
42 for (col = 0; col < HISTC_NR_COLS; ++col)
43 hists__set_col_len(hists, col, 0);
46 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
48 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
50 if (hists__col_len(hists, dso) < unresolved_col_width &&
51 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
52 !symbol_conf.dso_list)
53 hists__set_col_len(hists, dso, unresolved_col_width);
56 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
58 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
63 * +4 accounts for '[x] ' priv level info
64 * +2 accounts for 0x prefix on raw addresses
65 * +3 accounts for ' y ' symtab origin info
68 symlen = h->ms.sym->namelen + 4;
70 symlen += BITS_PER_LONG / 4 + 2 + 3;
71 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
73 symlen = unresolved_col_width + 4 + 2;
74 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
75 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78 len = thread__comm_len(h->thread);
79 if (hists__new_col_len(hists, HISTC_COMM, len))
80 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83 len = dso__name_len(h->ms.map->dso);
84 hists__new_col_len(hists, HISTC_DSO, len);
88 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91 if (h->branch_info->from.sym) {
92 symlen = (int)h->branch_info->from.sym->namelen + 4;
94 symlen += BITS_PER_LONG / 4 + 2 + 3;
95 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
97 symlen = dso__name_len(h->branch_info->from.map->dso);
98 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
100 symlen = unresolved_col_width + 4 + 2;
101 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
102 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105 if (h->branch_info->to.sym) {
106 symlen = (int)h->branch_info->to.sym->namelen + 4;
108 symlen += BITS_PER_LONG / 4 + 2 + 3;
109 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
111 symlen = dso__name_len(h->branch_info->to.map->dso);
112 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
114 symlen = unresolved_col_width + 4 + 2;
115 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
116 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
121 if (h->mem_info->daddr.sym) {
122 symlen = (int)h->mem_info->daddr.sym->namelen + 4
123 + unresolved_col_width + 2;
124 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
126 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129 symlen = unresolved_col_width + 4 + 2;
130 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133 if (h->mem_info->daddr.map) {
134 symlen = dso__name_len(h->mem_info->daddr.map->dso);
135 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
138 symlen = unresolved_col_width + 4 + 2;
139 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
142 symlen = unresolved_col_width + 4 + 2;
143 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
147 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
148 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
149 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
150 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
151 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
152 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
155 hists__new_col_len(hists, HISTC_TRANSACTION,
156 hist_entry__transaction_len());
159 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
161 struct rb_node *next = rb_first(&hists->entries);
162 struct hist_entry *n;
165 hists__reset_col_len(hists);
167 while (next && row++ < max_rows) {
168 n = rb_entry(next, struct hist_entry, rb_node);
170 hists__calc_col_len(hists, n);
171 next = rb_next(&n->rb_node);
175 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
176 unsigned int cpumode, u64 period)
179 case PERF_RECORD_MISC_KERNEL:
180 he_stat->period_sys += period;
182 case PERF_RECORD_MISC_USER:
183 he_stat->period_us += period;
185 case PERF_RECORD_MISC_GUEST_KERNEL:
186 he_stat->period_guest_sys += period;
188 case PERF_RECORD_MISC_GUEST_USER:
189 he_stat->period_guest_us += period;
196 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
200 he_stat->period += period;
201 he_stat->weight += weight;
202 he_stat->nr_events += 1;
205 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
207 dest->period += src->period;
208 dest->period_sys += src->period_sys;
209 dest->period_us += src->period_us;
210 dest->period_guest_sys += src->period_guest_sys;
211 dest->period_guest_us += src->period_guest_us;
212 dest->nr_events += src->nr_events;
213 dest->weight += src->weight;
216 static void he_stat__decay(struct he_stat *he_stat)
218 he_stat->period = (he_stat->period * 7) / 8;
219 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
220 /* XXX need decay for weight too? */
223 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
225 u64 prev_period = he->stat.period;
228 if (prev_period == 0)
231 he_stat__decay(&he->stat);
232 if (symbol_conf.cumulate_callchain)
233 he_stat__decay(he->stat_acc);
235 diff = prev_period - he->stat.period;
237 hists->stats.total_period -= diff;
239 hists->stats.total_non_filtered_period -= diff;
241 return he->stat.period == 0;
244 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
246 struct rb_node *next = rb_first(&hists->entries);
247 struct hist_entry *n;
250 n = rb_entry(next, struct hist_entry, rb_node);
251 next = rb_next(&n->rb_node);
253 * We may be annotating this, for instance, so keep it here in
254 * case some it gets new samples, we'll eventually free it when
255 * the user stops browsing and it agains gets fully decayed.
257 if (((zap_user && n->level == '.') ||
258 (zap_kernel && n->level != '.') ||
259 hists__decay_entry(hists, n)) &&
261 rb_erase(&n->rb_node, &hists->entries);
263 if (sort__need_collapse)
264 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
268 --hists->nr_non_filtered_entries;
275 void hists__delete_entries(struct hists *hists)
277 struct rb_node *next = rb_first(&hists->entries);
278 struct hist_entry *n;
281 n = rb_entry(next, struct hist_entry, rb_node);
282 next = rb_next(&n->rb_node);
284 rb_erase(&n->rb_node, &hists->entries);
286 if (sort__need_collapse)
287 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
291 --hists->nr_non_filtered_entries;
298 * histogram, sorted on item, collects periods
301 static struct hist_entry *hist_entry__new(struct hist_entry *template,
304 size_t callchain_size = 0;
305 struct hist_entry *he;
307 if (symbol_conf.use_callchain)
308 callchain_size = sizeof(struct callchain_root);
310 he = zalloc(sizeof(*he) + callchain_size);
315 if (symbol_conf.cumulate_callchain) {
316 he->stat_acc = malloc(sizeof(he->stat));
317 if (he->stat_acc == NULL) {
321 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
323 memset(&he->stat, 0, sizeof(he->stat));
327 he->ms.map->referenced = true;
329 if (he->branch_info) {
331 * This branch info is (a part of) allocated from
332 * sample__resolve_bstack() and will be freed after
333 * adding new entries. So we need to save a copy.
335 he->branch_info = malloc(sizeof(*he->branch_info));
336 if (he->branch_info == NULL) {
342 memcpy(he->branch_info, template->branch_info,
343 sizeof(*he->branch_info));
345 if (he->branch_info->from.map)
346 he->branch_info->from.map->referenced = true;
347 if (he->branch_info->to.map)
348 he->branch_info->to.map->referenced = true;
352 if (he->mem_info->iaddr.map)
353 he->mem_info->iaddr.map->referenced = true;
354 if (he->mem_info->daddr.map)
355 he->mem_info->daddr.map->referenced = true;
358 if (symbol_conf.use_callchain)
359 callchain_init(he->callchain);
361 INIT_LIST_HEAD(&he->pairs.node);
367 static u8 symbol__parent_filter(const struct symbol *parent)
369 if (symbol_conf.exclude_other && parent == NULL)
370 return 1 << HIST_FILTER__PARENT;
374 static struct hist_entry *add_hist_entry(struct hists *hists,
375 struct hist_entry *entry,
376 struct addr_location *al,
380 struct rb_node *parent = NULL;
381 struct hist_entry *he;
383 u64 period = entry->stat.period;
384 u64 weight = entry->stat.weight;
386 p = &hists->entries_in->rb_node;
390 he = rb_entry(parent, struct hist_entry, rb_node_in);
393 * Make sure that it receives arguments in a same order as
394 * hist_entry__collapse() so that we can use an appropriate
395 * function when searching an entry regardless which sort
398 cmp = hist_entry__cmp(he, entry);
402 he_stat__add_period(&he->stat, period, weight);
403 if (symbol_conf.cumulate_callchain)
404 he_stat__add_period(he->stat_acc, period, weight);
407 * This mem info was allocated from sample__resolve_mem
408 * and will not be used anymore.
410 zfree(&entry->mem_info);
412 /* If the map of an existing hist_entry has
413 * become out-of-date due to an exec() or
414 * similar, update it. Otherwise we will
415 * mis-adjust symbol addresses when computing
416 * the history counter to increment.
418 if (he->ms.map != entry->ms.map) {
419 he->ms.map = entry->ms.map;
421 he->ms.map->referenced = true;
432 he = hist_entry__new(entry, sample_self);
436 rb_link_node(&he->rb_node_in, parent, p);
437 rb_insert_color(&he->rb_node_in, hists->entries_in);
440 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
441 if (symbol_conf.cumulate_callchain)
442 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
446 struct hist_entry *__hists__add_entry(struct hists *hists,
447 struct addr_location *al,
448 struct symbol *sym_parent,
449 struct branch_info *bi,
451 u64 period, u64 weight, u64 transaction,
454 struct hist_entry entry = {
455 .thread = al->thread,
456 .comm = thread__comm(al->thread),
462 .cpumode = al->cpumode,
470 .parent = sym_parent,
471 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
475 .transaction = transaction,
478 return add_hist_entry(hists, &entry, al, sample_self);
482 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
483 struct addr_location *al __maybe_unused)
489 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
490 struct addr_location *al __maybe_unused)
496 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
498 struct perf_sample *sample = iter->sample;
501 mi = sample__resolve_mem(sample, al);
510 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
513 struct mem_info *mi = iter->priv;
514 struct hists *hists = evsel__hists(iter->evsel);
515 struct hist_entry *he;
520 cost = iter->sample->weight;
525 * must pass period=weight in order to get the correct
526 * sorting from hists__collapse_resort() which is solely
527 * based on periods. We want sorting be done on nr_events * weight
528 * and this is indirectly achieved by passing period=weight here
529 * and the he_stat__add_period() function.
531 he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
532 cost, cost, 0, true);
541 iter_finish_mem_entry(struct hist_entry_iter *iter,
542 struct addr_location *al __maybe_unused)
544 struct perf_evsel *evsel = iter->evsel;
545 struct hists *hists = evsel__hists(evsel);
546 struct hist_entry *he = iter->he;
552 hists__inc_nr_samples(hists, he->filtered);
554 err = hist_entry__append_callchain(he, iter->sample);
558 * We don't need to free iter->priv (mem_info) here since
559 * the mem info was either already freed in add_hist_entry() or
560 * passed to a new hist entry by hist_entry__new().
569 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
571 struct branch_info *bi;
572 struct perf_sample *sample = iter->sample;
574 bi = sample__resolve_bstack(sample, al);
579 iter->total = sample->branch_stack->nr;
586 iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
587 struct addr_location *al __maybe_unused)
589 /* to avoid calling callback function */
596 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
598 struct branch_info *bi = iter->priv;
604 if (iter->curr >= iter->total)
607 al->map = bi[i].to.map;
608 al->sym = bi[i].to.sym;
609 al->addr = bi[i].to.addr;
614 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
616 struct branch_info *bi;
617 struct perf_evsel *evsel = iter->evsel;
618 struct hists *hists = evsel__hists(evsel);
619 struct hist_entry *he = NULL;
625 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
629 * The report shows the percentage of total branches captured
630 * and not events sampled. Thus we use a pseudo period of 1.
632 he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
637 hists__inc_nr_samples(hists, he->filtered);
646 iter_finish_branch_entry(struct hist_entry_iter *iter,
647 struct addr_location *al __maybe_unused)
652 return iter->curr >= iter->total ? 0 : -1;
656 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
657 struct addr_location *al __maybe_unused)
663 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
665 struct perf_evsel *evsel = iter->evsel;
666 struct perf_sample *sample = iter->sample;
667 struct hist_entry *he;
669 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
670 sample->period, sample->weight,
671 sample->transaction, true);
680 iter_finish_normal_entry(struct hist_entry_iter *iter,
681 struct addr_location *al __maybe_unused)
683 struct hist_entry *he = iter->he;
684 struct perf_evsel *evsel = iter->evsel;
685 struct perf_sample *sample = iter->sample;
692 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
694 return hist_entry__append_callchain(he, sample);
698 iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
699 struct addr_location *al __maybe_unused)
701 struct hist_entry **he_cache;
703 callchain_cursor_commit(&callchain_cursor);
706 * This is for detecting cycles or recursions so that they're
707 * cumulated only one time to prevent entries more than 100%
710 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
711 if (he_cache == NULL)
714 iter->priv = he_cache;
721 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
722 struct addr_location *al)
724 struct perf_evsel *evsel = iter->evsel;
725 struct hists *hists = evsel__hists(evsel);
726 struct perf_sample *sample = iter->sample;
727 struct hist_entry **he_cache = iter->priv;
728 struct hist_entry *he;
731 he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
732 sample->period, sample->weight,
733 sample->transaction, true);
738 he_cache[iter->curr++] = he;
740 hist_entry__append_callchain(he, sample);
743 * We need to re-initialize the cursor since callchain_append()
744 * advanced the cursor to the end.
746 callchain_cursor_commit(&callchain_cursor);
748 hists__inc_nr_samples(hists, he->filtered);
754 iter_next_cumulative_entry(struct hist_entry_iter *iter,
755 struct addr_location *al)
757 struct callchain_cursor_node *node;
759 node = callchain_cursor_current(&callchain_cursor);
763 return fill_callchain_info(al, node, iter->hide_unresolved);
767 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
768 struct addr_location *al)
770 struct perf_evsel *evsel = iter->evsel;
771 struct perf_sample *sample = iter->sample;
772 struct hist_entry **he_cache = iter->priv;
773 struct hist_entry *he;
774 struct hist_entry he_tmp = {
776 .thread = al->thread,
777 .comm = thread__comm(al->thread),
783 .parent = iter->parent,
786 struct callchain_cursor cursor;
788 callchain_cursor_snapshot(&cursor, &callchain_cursor);
790 callchain_cursor_advance(&callchain_cursor);
793 * Check if there's duplicate entries in the callchain.
794 * It's possible that it has cycles or recursive calls.
796 for (i = 0; i < iter->curr; i++) {
797 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
798 /* to avoid calling callback function */
804 he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
805 sample->period, sample->weight,
806 sample->transaction, false);
811 he_cache[iter->curr++] = he;
813 if (symbol_conf.use_callchain)
814 callchain_append(he->callchain, &cursor, sample->period);
819 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
820 struct addr_location *al __maybe_unused)
828 const struct hist_iter_ops hist_iter_mem = {
829 .prepare_entry = iter_prepare_mem_entry,
830 .add_single_entry = iter_add_single_mem_entry,
831 .next_entry = iter_next_nop_entry,
832 .add_next_entry = iter_add_next_nop_entry,
833 .finish_entry = iter_finish_mem_entry,
836 const struct hist_iter_ops hist_iter_branch = {
837 .prepare_entry = iter_prepare_branch_entry,
838 .add_single_entry = iter_add_single_branch_entry,
839 .next_entry = iter_next_branch_entry,
840 .add_next_entry = iter_add_next_branch_entry,
841 .finish_entry = iter_finish_branch_entry,
844 const struct hist_iter_ops hist_iter_normal = {
845 .prepare_entry = iter_prepare_normal_entry,
846 .add_single_entry = iter_add_single_normal_entry,
847 .next_entry = iter_next_nop_entry,
848 .add_next_entry = iter_add_next_nop_entry,
849 .finish_entry = iter_finish_normal_entry,
852 const struct hist_iter_ops hist_iter_cumulative = {
853 .prepare_entry = iter_prepare_cumulative_entry,
854 .add_single_entry = iter_add_single_cumulative_entry,
855 .next_entry = iter_next_cumulative_entry,
856 .add_next_entry = iter_add_next_cumulative_entry,
857 .finish_entry = iter_finish_cumulative_entry,
860 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
861 struct perf_evsel *evsel, struct perf_sample *sample,
862 int max_stack_depth, void *arg)
866 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
872 iter->sample = sample;
874 err = iter->ops->prepare_entry(iter, al);
878 err = iter->ops->add_single_entry(iter, al);
882 if (iter->he && iter->add_entry_cb) {
883 err = iter->add_entry_cb(iter, al, true, arg);
888 while (iter->ops->next_entry(iter, al)) {
889 err = iter->ops->add_next_entry(iter, al);
893 if (iter->he && iter->add_entry_cb) {
894 err = iter->add_entry_cb(iter, al, false, arg);
901 err2 = iter->ops->finish_entry(iter, al);
909 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
911 struct perf_hpp_fmt *fmt;
914 perf_hpp__for_each_sort_list(fmt) {
915 if (perf_hpp__should_skip(fmt))
918 cmp = fmt->cmp(left, right);
927 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
929 struct perf_hpp_fmt *fmt;
932 perf_hpp__for_each_sort_list(fmt) {
933 if (perf_hpp__should_skip(fmt))
936 cmp = fmt->collapse(left, right);
944 void hist_entry__free(struct hist_entry *he)
946 zfree(&he->branch_info);
947 zfree(&he->mem_info);
948 zfree(&he->stat_acc);
949 free_srcline(he->srcline);
950 free_callchain(he->callchain);
955 * collapse the histogram
958 static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
959 struct rb_root *root,
960 struct hist_entry *he)
962 struct rb_node **p = &root->rb_node;
963 struct rb_node *parent = NULL;
964 struct hist_entry *iter;
969 iter = rb_entry(parent, struct hist_entry, rb_node_in);
971 cmp = hist_entry__collapse(iter, he);
974 he_stat__add_stat(&iter->stat, &he->stat);
975 if (symbol_conf.cumulate_callchain)
976 he_stat__add_stat(iter->stat_acc, he->stat_acc);
978 if (symbol_conf.use_callchain) {
979 callchain_cursor_reset(&callchain_cursor);
980 callchain_merge(&callchain_cursor,
984 hist_entry__free(he);
995 rb_link_node(&he->rb_node_in, parent, p);
996 rb_insert_color(&he->rb_node_in, root);
1000 static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1002 struct rb_root *root;
1004 pthread_mutex_lock(&hists->lock);
1006 root = hists->entries_in;
1007 if (++hists->entries_in > &hists->entries_in_array[1])
1008 hists->entries_in = &hists->entries_in_array[0];
1010 pthread_mutex_unlock(&hists->lock);
1015 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1017 hists__filter_entry_by_dso(hists, he);
1018 hists__filter_entry_by_thread(hists, he);
1019 hists__filter_entry_by_symbol(hists, he);
1022 void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1024 struct rb_root *root;
1025 struct rb_node *next;
1026 struct hist_entry *n;
1028 if (!sort__need_collapse)
1031 hists->nr_entries = 0;
1033 root = hists__get_rotate_entries_in(hists);
1035 next = rb_first(root);
1040 n = rb_entry(next, struct hist_entry, rb_node_in);
1041 next = rb_next(&n->rb_node_in);
1043 rb_erase(&n->rb_node_in, root);
1044 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1046 * If it wasn't combined with one of the entries already
1047 * collapsed, we need to apply the filters that may have
1048 * been set by, say, the hist_browser.
1050 hists__apply_filters(hists, n);
1053 ui_progress__update(prog, 1);
1057 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1059 struct perf_hpp_fmt *fmt;
1062 perf_hpp__for_each_sort_list(fmt) {
1063 if (perf_hpp__should_skip(fmt))
1066 cmp = fmt->sort(a, b);
1074 static void hists__reset_filter_stats(struct hists *hists)
1076 hists->nr_non_filtered_entries = 0;
1077 hists->stats.total_non_filtered_period = 0;
1080 void hists__reset_stats(struct hists *hists)
1082 hists->nr_entries = 0;
1083 hists->stats.total_period = 0;
1085 hists__reset_filter_stats(hists);
1088 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1090 hists->nr_non_filtered_entries++;
1091 hists->stats.total_non_filtered_period += h->stat.period;
1094 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1097 hists__inc_filter_stats(hists, h);
1099 hists->nr_entries++;
1100 hists->stats.total_period += h->stat.period;
1103 static void __hists__insert_output_entry(struct rb_root *entries,
1104 struct hist_entry *he,
1105 u64 min_callchain_hits)
1107 struct rb_node **p = &entries->rb_node;
1108 struct rb_node *parent = NULL;
1109 struct hist_entry *iter;
1111 if (symbol_conf.use_callchain)
1112 callchain_param.sort(&he->sorted_chain, he->callchain,
1113 min_callchain_hits, &callchain_param);
1115 while (*p != NULL) {
1117 iter = rb_entry(parent, struct hist_entry, rb_node);
1119 if (hist_entry__sort(he, iter) > 0)
1122 p = &(*p)->rb_right;
1125 rb_link_node(&he->rb_node, parent, p);
1126 rb_insert_color(&he->rb_node, entries);
1129 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1131 struct rb_root *root;
1132 struct rb_node *next;
1133 struct hist_entry *n;
1134 u64 min_callchain_hits;
1136 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
1138 if (sort__need_collapse)
1139 root = &hists->entries_collapsed;
1141 root = hists->entries_in;
1143 next = rb_first(root);
1144 hists->entries = RB_ROOT;
1146 hists__reset_stats(hists);
1147 hists__reset_col_len(hists);
1150 n = rb_entry(next, struct hist_entry, rb_node_in);
1151 next = rb_next(&n->rb_node_in);
1153 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
1154 hists__inc_stats(hists, n);
1157 hists__calc_col_len(hists, n);
1160 ui_progress__update(prog, 1);
1164 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1165 enum hist_filter filter)
1167 h->filtered &= ~(1 << filter);
1171 /* force fold unfiltered entry for simplicity */
1172 h->ms.unfolded = false;
1175 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1177 hists__inc_filter_stats(hists, h);
1178 hists__calc_col_len(hists, h);
1182 static bool hists__filter_entry_by_dso(struct hists *hists,
1183 struct hist_entry *he)
1185 if (hists->dso_filter != NULL &&
1186 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1187 he->filtered |= (1 << HIST_FILTER__DSO);
1194 void hists__filter_by_dso(struct hists *hists)
1198 hists->stats.nr_non_filtered_samples = 0;
1200 hists__reset_filter_stats(hists);
1201 hists__reset_col_len(hists);
1203 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1204 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1206 if (symbol_conf.exclude_other && !h->parent)
1209 if (hists__filter_entry_by_dso(hists, h))
1212 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
1216 static bool hists__filter_entry_by_thread(struct hists *hists,
1217 struct hist_entry *he)
1219 if (hists->thread_filter != NULL &&
1220 he->thread != hists->thread_filter) {
1221 he->filtered |= (1 << HIST_FILTER__THREAD);
1228 void hists__filter_by_thread(struct hists *hists)
1232 hists->stats.nr_non_filtered_samples = 0;
1234 hists__reset_filter_stats(hists);
1235 hists__reset_col_len(hists);
1237 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1238 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1240 if (hists__filter_entry_by_thread(hists, h))
1243 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
1247 static bool hists__filter_entry_by_symbol(struct hists *hists,
1248 struct hist_entry *he)
1250 if (hists->symbol_filter_str != NULL &&
1251 (!he->ms.sym || strstr(he->ms.sym->name,
1252 hists->symbol_filter_str) == NULL)) {
1253 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1260 void hists__filter_by_symbol(struct hists *hists)
1264 hists->stats.nr_non_filtered_samples = 0;
1266 hists__reset_filter_stats(hists);
1267 hists__reset_col_len(hists);
1269 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1270 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1272 if (hists__filter_entry_by_symbol(hists, h))
1275 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1279 void events_stats__inc(struct events_stats *stats, u32 type)
1281 ++stats->nr_events[0];
1282 ++stats->nr_events[type];
1285 void hists__inc_nr_events(struct hists *hists, u32 type)
1287 events_stats__inc(&hists->stats, type);
1290 void hists__inc_nr_samples(struct hists *hists, bool filtered)
1292 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1294 hists->stats.nr_non_filtered_samples++;
1297 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1298 struct hist_entry *pair)
1300 struct rb_root *root;
1302 struct rb_node *parent = NULL;
1303 struct hist_entry *he;
1306 if (sort__need_collapse)
1307 root = &hists->entries_collapsed;
1309 root = hists->entries_in;
1313 while (*p != NULL) {
1315 he = rb_entry(parent, struct hist_entry, rb_node_in);
1317 cmp = hist_entry__collapse(he, pair);
1325 p = &(*p)->rb_right;
1328 he = hist_entry__new(pair, true);
1330 memset(&he->stat, 0, sizeof(he->stat));
1332 rb_link_node(&he->rb_node_in, parent, p);
1333 rb_insert_color(&he->rb_node_in, root);
1334 hists__inc_stats(hists, he);
1341 static struct hist_entry *hists__find_entry(struct hists *hists,
1342 struct hist_entry *he)
1346 if (sort__need_collapse)
1347 n = hists->entries_collapsed.rb_node;
1349 n = hists->entries_in->rb_node;
1352 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1353 int64_t cmp = hist_entry__collapse(iter, he);
1367 * Look for pairs to link to the leader buckets (hist_entries):
1369 void hists__match(struct hists *leader, struct hists *other)
1371 struct rb_root *root;
1373 struct hist_entry *pos, *pair;
1375 if (sort__need_collapse)
1376 root = &leader->entries_collapsed;
1378 root = leader->entries_in;
1380 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1381 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1382 pair = hists__find_entry(other, pos);
1385 hist_entry__add_pair(pair, pos);
1390 * Look for entries in the other hists that are not present in the leader, if
1391 * we find them, just add a dummy entry on the leader hists, with period=0,
1392 * nr_events=0, to serve as the list header.
1394 int hists__link(struct hists *leader, struct hists *other)
1396 struct rb_root *root;
1398 struct hist_entry *pos, *pair;
1400 if (sort__need_collapse)
1401 root = &other->entries_collapsed;
1403 root = other->entries_in;
1405 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1406 pos = rb_entry(nd, struct hist_entry, rb_node_in);
1408 if (!hist_entry__has_pairs(pos)) {
1409 pair = hists__add_dummy_entry(leader, pos);
1412 hist_entry__add_pair(pos, pair);
1420 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
1422 struct perf_evsel *pos;
1425 evlist__for_each(evlist, pos) {
1426 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
1427 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
1434 u64 hists__total_period(struct hists *hists)
1436 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1437 hists->stats.total_period;
1440 int parse_filter_percentage(const struct option *opt __maybe_unused,
1441 const char *arg, int unset __maybe_unused)
1443 if (!strcmp(arg, "relative"))
1444 symbol_conf.filter_relative = true;
1445 else if (!strcmp(arg, "absolute"))
1446 symbol_conf.filter_relative = false;
1453 int perf_hist_config(const char *var, const char *value)
1455 if (!strcmp(var, "hist.percentage"))
1456 return parse_filter_percentage(NULL, value, 0);
1461 static int hists_evsel__init(struct perf_evsel *evsel)
1463 struct hists *hists = evsel__hists(evsel);
1465 memset(hists, 0, sizeof(*hists));
1466 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
1467 hists->entries_in = &hists->entries_in_array[0];
1468 hists->entries_collapsed = RB_ROOT;
1469 hists->entries = RB_ROOT;
1470 pthread_mutex_init(&hists->lock, NULL);
1475 * XXX We probably need a hists_evsel__exit() to free the hist_entries
1476 * stored in the rbtree...
1479 int hists__init(void)
1481 int err = perf_evsel__object_config(sizeof(struct hists_evsel),
1482 hists_evsel__init, NULL);
1484 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);