Merge branch 'rc-fixes' of git://git.kernel.org/pub/scm/linux/kernel/git/mmarek/kbuild
[linux-2.6-block.git] / tools / perf / util / hist.c
CommitLineData
8a0ecfb8 1#include "util.h"
598357eb 2#include "build-id.h"
3d1d07ec 3#include "hist.h"
4e4f06e4
ACM
4#include "session.h"
5#include "sort.h"
29d720ed 6#include "evsel.h"
69bcb019 7#include "annotate.h"
9b33827d 8#include <math.h>
3d1d07ec 9
90cf1fb5
ACM
10static bool hists__filter_entry_by_dso(struct hists *hists,
11 struct hist_entry *he);
12static bool hists__filter_entry_by_thread(struct hists *hists,
13 struct hist_entry *he);
e94d53eb
NK
14static bool hists__filter_entry_by_symbol(struct hists *hists,
15 struct hist_entry *he);
90cf1fb5 16
3d1d07ec
JK
17struct callchain_param callchain_param = {
18 .mode = CHAIN_GRAPH_REL,
d797fdc5 19 .min_percent = 0.5,
99571ab3
AK
20 .order = ORDER_CALLEE,
21 .key = CCKEY_FUNCTION
3d1d07ec
JK
22};
23
42b28ac0 24u16 hists__col_len(struct hists *hists, enum hist_column col)
8a6c5b26 25{
42b28ac0 26 return hists->col_len[col];
8a6c5b26
ACM
27}
28
42b28ac0 29void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 30{
42b28ac0 31 hists->col_len[col] = len;
8a6c5b26
ACM
32}
33
42b28ac0 34bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 35{
42b28ac0
ACM
36 if (len > hists__col_len(hists, col)) {
37 hists__set_col_len(hists, col, len);
8a6c5b26
ACM
38 return true;
39 }
40 return false;
41}
42
7ccf4f90 43void hists__reset_col_len(struct hists *hists)
8a6c5b26
ACM
44{
45 enum hist_column col;
46
47 for (col = 0; col < HISTC_NR_COLS; ++col)
42b28ac0 48 hists__set_col_len(hists, col, 0);
8a6c5b26
ACM
49}
50
b5387528
RAV
51static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
52{
53 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
54
55 if (hists__col_len(hists, dso) < unresolved_col_width &&
56 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
57 !symbol_conf.dso_list)
58 hists__set_col_len(hists, dso, unresolved_col_width);
59}
60
7ccf4f90 61void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
8a6c5b26 62{
b5387528 63 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
98a3b32c 64 int symlen;
8a6c5b26
ACM
65 u16 len;
66
ded19d57
NK
67 /*
68 * +4 accounts for '[x] ' priv level info
69 * +2 accounts for 0x prefix on raw addresses
70 * +3 accounts for ' y ' symtab origin info
71 */
72 if (h->ms.sym) {
73 symlen = h->ms.sym->namelen + 4;
74 if (verbose)
75 symlen += BITS_PER_LONG / 4 + 2 + 3;
76 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77 } else {
98a3b32c
SE
78 symlen = unresolved_col_width + 4 + 2;
79 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
b5387528 80 hists__set_unres_dso_col_len(hists, HISTC_DSO);
98a3b32c 81 }
8a6c5b26
ACM
82
83 len = thread__comm_len(h->thread);
42b28ac0
ACM
84 if (hists__new_col_len(hists, HISTC_COMM, len))
85 hists__set_col_len(hists, HISTC_THREAD, len + 6);
8a6c5b26
ACM
86
87 if (h->ms.map) {
88 len = dso__name_len(h->ms.map->dso);
42b28ac0 89 hists__new_col_len(hists, HISTC_DSO, len);
8a6c5b26 90 }
b5387528 91
cb993744
NK
92 if (h->parent)
93 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
94
b5387528 95 if (h->branch_info) {
b5387528
RAV
96 if (h->branch_info->from.sym) {
97 symlen = (int)h->branch_info->from.sym->namelen + 4;
ded19d57
NK
98 if (verbose)
99 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
100 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
101
102 symlen = dso__name_len(h->branch_info->from.map->dso);
103 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
104 } else {
105 symlen = unresolved_col_width + 4 + 2;
106 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
107 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
108 }
109
110 if (h->branch_info->to.sym) {
111 symlen = (int)h->branch_info->to.sym->namelen + 4;
ded19d57
NK
112 if (verbose)
113 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
114 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
115
116 symlen = dso__name_len(h->branch_info->to.map->dso);
117 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
118 } else {
119 symlen = unresolved_col_width + 4 + 2;
120 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
121 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
122 }
123 }
98a3b32c
SE
124
125 if (h->mem_info) {
98a3b32c
SE
126 if (h->mem_info->daddr.sym) {
127 symlen = (int)h->mem_info->daddr.sym->namelen + 4
128 + unresolved_col_width + 2;
129 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
130 symlen);
9b32ba71
DZ
131 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
132 symlen + 1);
98a3b32c
SE
133 } else {
134 symlen = unresolved_col_width + 4 + 2;
135 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
136 symlen);
137 }
138 if (h->mem_info->daddr.map) {
139 symlen = dso__name_len(h->mem_info->daddr.map->dso);
140 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
141 symlen);
142 } else {
143 symlen = unresolved_col_width + 4 + 2;
144 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
145 }
146 } else {
147 symlen = unresolved_col_width + 4 + 2;
148 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
149 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
150 }
151
152 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
153 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
154 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
155 hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
156 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
157 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
475eeab9
AK
158
159 if (h->transaction)
160 hists__new_col_len(hists, HISTC_TRANSACTION,
161 hist_entry__transaction_len());
8a6c5b26
ACM
162}
163
7ccf4f90
NK
164void hists__output_recalc_col_len(struct hists *hists, int max_rows)
165{
166 struct rb_node *next = rb_first(&hists->entries);
167 struct hist_entry *n;
168 int row = 0;
169
170 hists__reset_col_len(hists);
171
172 while (next && row++ < max_rows) {
173 n = rb_entry(next, struct hist_entry, rb_node);
174 if (!n->filtered)
175 hists__calc_col_len(hists, n);
176 next = rb_next(&n->rb_node);
177 }
178}
179
f39056f9
NK
180static void he_stat__add_cpumode_period(struct he_stat *he_stat,
181 unsigned int cpumode, u64 period)
a1645ce1 182{
28e2a106 183 switch (cpumode) {
a1645ce1 184 case PERF_RECORD_MISC_KERNEL:
f39056f9 185 he_stat->period_sys += period;
a1645ce1
ZY
186 break;
187 case PERF_RECORD_MISC_USER:
f39056f9 188 he_stat->period_us += period;
a1645ce1
ZY
189 break;
190 case PERF_RECORD_MISC_GUEST_KERNEL:
f39056f9 191 he_stat->period_guest_sys += period;
a1645ce1
ZY
192 break;
193 case PERF_RECORD_MISC_GUEST_USER:
f39056f9 194 he_stat->period_guest_us += period;
a1645ce1
ZY
195 break;
196 default:
197 break;
198 }
199}
200
05484298
AK
201static void he_stat__add_period(struct he_stat *he_stat, u64 period,
202 u64 weight)
139c0815 203{
98a3b32c 204
139c0815 205 he_stat->period += period;
05484298 206 he_stat->weight += weight;
139c0815
NK
207 he_stat->nr_events += 1;
208}
209
210static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
211{
212 dest->period += src->period;
213 dest->period_sys += src->period_sys;
214 dest->period_us += src->period_us;
215 dest->period_guest_sys += src->period_guest_sys;
216 dest->period_guest_us += src->period_guest_us;
217 dest->nr_events += src->nr_events;
05484298 218 dest->weight += src->weight;
139c0815
NK
219}
220
f39056f9 221static void he_stat__decay(struct he_stat *he_stat)
ab81f3fd 222{
f39056f9
NK
223 he_stat->period = (he_stat->period * 7) / 8;
224 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
05484298 225 /* XXX need decay for weight too? */
ab81f3fd
ACM
226}
227
228static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
229{
b24c28f7 230 u64 prev_period = he->stat.period;
3186b681 231 u64 diff;
c64550cf
ACM
232
233 if (prev_period == 0)
df71d95f 234 return true;
c64550cf 235
f39056f9 236 he_stat__decay(&he->stat);
f8be1c8c
NK
237 if (symbol_conf.cumulate_callchain)
238 he_stat__decay(he->stat_acc);
c64550cf 239
3186b681
NK
240 diff = prev_period - he->stat.period;
241
242 hists->stats.total_period -= diff;
c64550cf 243 if (!he->filtered)
3186b681 244 hists->stats.total_non_filtered_period -= diff;
c64550cf 245
b24c28f7 246 return he->stat.period == 0;
ab81f3fd
ACM
247}
248
3a5714f8 249void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
ab81f3fd
ACM
250{
251 struct rb_node *next = rb_first(&hists->entries);
252 struct hist_entry *n;
253
254 while (next) {
255 n = rb_entry(next, struct hist_entry, rb_node);
256 next = rb_next(&n->rb_node);
df71d95f
ACM
257 /*
258 * We may be annotating this, for instance, so keep it here in
259 * case some it gets new samples, we'll eventually free it when
260 * the user stops browsing and it agains gets fully decayed.
261 */
b079d4e9
ACM
262 if (((zap_user && n->level == '.') ||
263 (zap_kernel && n->level != '.') ||
264 hists__decay_entry(hists, n)) &&
265 !n->used) {
ab81f3fd
ACM
266 rb_erase(&n->rb_node, &hists->entries);
267
3a5714f8 268 if (sort__need_collapse)
ab81f3fd
ACM
269 rb_erase(&n->rb_node_in, &hists->entries_collapsed);
270
ab81f3fd 271 --hists->nr_entries;
3186b681
NK
272 if (!n->filtered)
273 --hists->nr_non_filtered_entries;
274
275 hist_entry__free(n);
ab81f3fd
ACM
276 }
277 }
278}
279
3d1d07ec 280/*
c82ee828 281 * histogram, sorted on item, collects periods
3d1d07ec
JK
282 */
283
a0b51af3
NK
284static struct hist_entry *hist_entry__new(struct hist_entry *template,
285 bool sample_self)
28e2a106 286{
f8be1c8c
NK
287 size_t callchain_size = 0;
288 struct hist_entry *he;
289
290 if (symbol_conf.use_callchain || symbol_conf.cumulate_callchain)
291 callchain_size = sizeof(struct callchain_root);
292
293 he = zalloc(sizeof(*he) + callchain_size);
28e2a106 294
12c14278
ACM
295 if (he != NULL) {
296 *he = *template;
c4b35351 297
f8be1c8c
NK
298 if (symbol_conf.cumulate_callchain) {
299 he->stat_acc = malloc(sizeof(he->stat));
300 if (he->stat_acc == NULL) {
301 free(he);
302 return NULL;
303 }
304 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
a0b51af3
NK
305 if (!sample_self)
306 memset(&he->stat, 0, sizeof(he->stat));
f8be1c8c
NK
307 }
308
12c14278
ACM
309 if (he->ms.map)
310 he->ms.map->referenced = true;
3cf0cb1f
SE
311
312 if (he->branch_info) {
26353a61
NK
313 /*
314 * This branch info is (a part of) allocated from
644f2df2 315 * sample__resolve_bstack() and will be freed after
26353a61
NK
316 * adding new entries. So we need to save a copy.
317 */
318 he->branch_info = malloc(sizeof(*he->branch_info));
319 if (he->branch_info == NULL) {
f8be1c8c 320 free(he->stat_acc);
26353a61
NK
321 free(he);
322 return NULL;
323 }
324
325 memcpy(he->branch_info, template->branch_info,
326 sizeof(*he->branch_info));
327
3cf0cb1f
SE
328 if (he->branch_info->from.map)
329 he->branch_info->from.map->referenced = true;
330 if (he->branch_info->to.map)
331 he->branch_info->to.map->referenced = true;
332 }
333
98a3b32c
SE
334 if (he->mem_info) {
335 if (he->mem_info->iaddr.map)
336 he->mem_info->iaddr.map->referenced = true;
337 if (he->mem_info->daddr.map)
338 he->mem_info->daddr.map->referenced = true;
339 }
340
28e2a106 341 if (symbol_conf.use_callchain)
12c14278 342 callchain_init(he->callchain);
b821c732
ACM
343
344 INIT_LIST_HEAD(&he->pairs.node);
28e2a106
ACM
345 }
346
12c14278 347 return he;
28e2a106
ACM
348}
349
7a007ca9
ACM
350static u8 symbol__parent_filter(const struct symbol *parent)
351{
352 if (symbol_conf.exclude_other && parent == NULL)
353 return 1 << HIST_FILTER__PARENT;
354 return 0;
355}
356
b5387528 357static struct hist_entry *add_hist_entry(struct hists *hists,
f1cbf78d 358 struct hist_entry *entry,
a0b51af3
NK
359 struct addr_location *al,
360 bool sample_self)
9735abf1 361{
1980c2eb 362 struct rb_node **p;
9735abf1
ACM
363 struct rb_node *parent = NULL;
364 struct hist_entry *he;
354cc40e 365 int64_t cmp;
f1cbf78d
NK
366 u64 period = entry->stat.period;
367 u64 weight = entry->stat.weight;
9735abf1 368
1980c2eb
ACM
369 p = &hists->entries_in->rb_node;
370
9735abf1
ACM
371 while (*p != NULL) {
372 parent = *p;
1980c2eb 373 he = rb_entry(parent, struct hist_entry, rb_node_in);
9735abf1 374
9afcf930
NK
375 /*
376 * Make sure that it receives arguments in a same order as
377 * hist_entry__collapse() so that we can use an appropriate
378 * function when searching an entry regardless which sort
379 * keys were used.
380 */
381 cmp = hist_entry__cmp(he, entry);
9735abf1
ACM
382
383 if (!cmp) {
a0b51af3
NK
384 if (sample_self)
385 he_stat__add_period(&he->stat, period, weight);
f8be1c8c
NK
386 if (symbol_conf.cumulate_callchain)
387 he_stat__add_period(he->stat_acc, period, weight);
63fa471d 388
ceb2acbc 389 /*
e80faac0 390 * This mem info was allocated from sample__resolve_mem
ceb2acbc
NK
391 * and will not be used anymore.
392 */
74cf249d 393 zfree(&entry->mem_info);
ceb2acbc 394
63fa471d
DM
395 /* If the map of an existing hist_entry has
396 * become out-of-date due to an exec() or
397 * similar, update it. Otherwise we will
398 * mis-adjust symbol addresses when computing
399 * the history counter to increment.
400 */
401 if (he->ms.map != entry->ms.map) {
402 he->ms.map = entry->ms.map;
403 if (he->ms.map)
404 he->ms.map->referenced = true;
405 }
28e2a106 406 goto out;
9735abf1
ACM
407 }
408
409 if (cmp < 0)
410 p = &(*p)->rb_left;
411 else
412 p = &(*p)->rb_right;
413 }
414
a0b51af3 415 he = hist_entry__new(entry, sample_self);
9735abf1 416 if (!he)
27a0dcb7 417 return NULL;
1980c2eb
ACM
418
419 rb_link_node(&he->rb_node_in, parent, p);
420 rb_insert_color(&he->rb_node_in, hists->entries_in);
28e2a106 421out:
a0b51af3
NK
422 if (sample_self)
423 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
f8be1c8c
NK
424 if (symbol_conf.cumulate_callchain)
425 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
9735abf1
ACM
426 return he;
427}
428
c824c433 429struct hist_entry *__hists__add_entry(struct hists *hists,
b5387528 430 struct addr_location *al,
41a4e6e2
NK
431 struct symbol *sym_parent,
432 struct branch_info *bi,
433 struct mem_info *mi,
a0b51af3
NK
434 u64 period, u64 weight, u64 transaction,
435 bool sample_self)
b5387528
RAV
436{
437 struct hist_entry entry = {
438 .thread = al->thread,
4dfced35 439 .comm = thread__comm(al->thread),
b5387528
RAV
440 .ms = {
441 .map = al->map,
442 .sym = al->sym,
443 },
7365be55
DZ
444 .cpu = al->cpu,
445 .cpumode = al->cpumode,
446 .ip = al->addr,
447 .level = al->level,
b24c28f7 448 .stat = {
c4b35351 449 .nr_events = 1,
41a4e6e2 450 .period = period,
05484298 451 .weight = weight,
b24c28f7 452 },
b5387528 453 .parent = sym_parent,
2c86c7ca 454 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
c824c433 455 .hists = hists,
41a4e6e2
NK
456 .branch_info = bi,
457 .mem_info = mi,
475eeab9 458 .transaction = transaction,
b5387528
RAV
459 };
460
a0b51af3 461 return add_hist_entry(hists, &entry, al, sample_self);
b5387528
RAV
462}
463
69bcb019
NK
464static int
465iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
466 struct addr_location *al __maybe_unused)
467{
468 return 0;
469}
470
471static int
472iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
473 struct addr_location *al __maybe_unused)
474{
475 return 0;
476}
477
478static int
479iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
480{
481 struct perf_sample *sample = iter->sample;
482 struct mem_info *mi;
483
484 mi = sample__resolve_mem(sample, al);
485 if (mi == NULL)
486 return -ENOMEM;
487
488 iter->priv = mi;
489 return 0;
490}
491
492static int
493iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
494{
495 u64 cost;
496 struct mem_info *mi = iter->priv;
497 struct hist_entry *he;
498
499 if (mi == NULL)
500 return -EINVAL;
501
502 cost = iter->sample->weight;
503 if (!cost)
504 cost = 1;
505
506 /*
507 * must pass period=weight in order to get the correct
508 * sorting from hists__collapse_resort() which is solely
509 * based on periods. We want sorting be done on nr_events * weight
510 * and this is indirectly achieved by passing period=weight here
511 * and the he_stat__add_period() function.
512 */
513 he = __hists__add_entry(&iter->evsel->hists, al, iter->parent, NULL, mi,
a0b51af3 514 cost, cost, 0, true);
69bcb019
NK
515 if (!he)
516 return -ENOMEM;
517
518 iter->he = he;
519 return 0;
520}
521
522static int
9d3c02d7
NK
523iter_finish_mem_entry(struct hist_entry_iter *iter,
524 struct addr_location *al __maybe_unused)
69bcb019
NK
525{
526 struct perf_evsel *evsel = iter->evsel;
527 struct hist_entry *he = iter->he;
69bcb019
NK
528 int err = -EINVAL;
529
530 if (he == NULL)
531 goto out;
532
69bcb019
NK
533 hists__inc_nr_samples(&evsel->hists, he->filtered);
534
535 err = hist_entry__append_callchain(he, iter->sample);
536
537out:
538 /*
539 * We don't need to free iter->priv (mem_info) here since
540 * the mem info was either already freed in add_hist_entry() or
541 * passed to a new hist entry by hist_entry__new().
542 */
543 iter->priv = NULL;
544
545 iter->he = NULL;
546 return err;
547}
548
549static int
550iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
551{
552 struct branch_info *bi;
553 struct perf_sample *sample = iter->sample;
554
555 bi = sample__resolve_bstack(sample, al);
556 if (!bi)
557 return -ENOMEM;
558
559 iter->curr = 0;
560 iter->total = sample->branch_stack->nr;
561
562 iter->priv = bi;
563 return 0;
564}
565
566static int
567iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
568 struct addr_location *al __maybe_unused)
569{
9d3c02d7
NK
570 /* to avoid calling callback function */
571 iter->he = NULL;
572
69bcb019
NK
573 return 0;
574}
575
576static int
577iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
578{
579 struct branch_info *bi = iter->priv;
580 int i = iter->curr;
581
582 if (bi == NULL)
583 return 0;
584
585 if (iter->curr >= iter->total)
586 return 0;
587
588 al->map = bi[i].to.map;
589 al->sym = bi[i].to.sym;
590 al->addr = bi[i].to.addr;
591 return 1;
592}
593
594static int
595iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
596{
9d3c02d7 597 struct branch_info *bi;
69bcb019
NK
598 struct perf_evsel *evsel = iter->evsel;
599 struct hist_entry *he = NULL;
600 int i = iter->curr;
601 int err = 0;
602
603 bi = iter->priv;
604
605 if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
606 goto out;
607
608 /*
609 * The report shows the percentage of total branches captured
610 * and not events sampled. Thus we use a pseudo period of 1.
611 */
612 he = __hists__add_entry(&evsel->hists, al, iter->parent, &bi[i], NULL,
a0b51af3 613 1, 1, 0, true);
69bcb019
NK
614 if (he == NULL)
615 return -ENOMEM;
616
69bcb019
NK
617 hists__inc_nr_samples(&evsel->hists, he->filtered);
618
619out:
620 iter->he = he;
621 iter->curr++;
622 return err;
623}
624
625static int
626iter_finish_branch_entry(struct hist_entry_iter *iter,
627 struct addr_location *al __maybe_unused)
628{
629 zfree(&iter->priv);
630 iter->he = NULL;
631
632 return iter->curr >= iter->total ? 0 : -1;
633}
634
635static int
636iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
637 struct addr_location *al __maybe_unused)
638{
639 return 0;
640}
641
642static int
643iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
644{
645 struct perf_evsel *evsel = iter->evsel;
646 struct perf_sample *sample = iter->sample;
647 struct hist_entry *he;
648
649 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
650 sample->period, sample->weight,
a0b51af3 651 sample->transaction, true);
69bcb019
NK
652 if (he == NULL)
653 return -ENOMEM;
654
655 iter->he = he;
656 return 0;
657}
658
659static int
9d3c02d7
NK
660iter_finish_normal_entry(struct hist_entry_iter *iter,
661 struct addr_location *al __maybe_unused)
69bcb019 662{
69bcb019
NK
663 struct hist_entry *he = iter->he;
664 struct perf_evsel *evsel = iter->evsel;
665 struct perf_sample *sample = iter->sample;
666
667 if (he == NULL)
668 return 0;
669
670 iter->he = NULL;
671
69bcb019
NK
672 hists__inc_nr_samples(&evsel->hists, he->filtered);
673
674 return hist_entry__append_callchain(he, sample);
675}
676
7a13aa28
NK
677static int
678iter_prepare_cumulative_entry(struct hist_entry_iter *iter __maybe_unused,
679 struct addr_location *al __maybe_unused)
680{
b4d3c8bd
NK
681 struct hist_entry **he_cache;
682
7a13aa28 683 callchain_cursor_commit(&callchain_cursor);
b4d3c8bd
NK
684
685 /*
686 * This is for detecting cycles or recursions so that they're
687 * cumulated only one time to prevent entries more than 100%
688 * overhead.
689 */
690 he_cache = malloc(sizeof(*he_cache) * (PERF_MAX_STACK_DEPTH + 1));
691 if (he_cache == NULL)
692 return -ENOMEM;
693
694 iter->priv = he_cache;
695 iter->curr = 0;
696
7a13aa28
NK
697 return 0;
698}
699
700static int
701iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
702 struct addr_location *al)
703{
704 struct perf_evsel *evsel = iter->evsel;
705 struct perf_sample *sample = iter->sample;
b4d3c8bd 706 struct hist_entry **he_cache = iter->priv;
7a13aa28
NK
707 struct hist_entry *he;
708 int err = 0;
709
710 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
711 sample->period, sample->weight,
712 sample->transaction, true);
713 if (he == NULL)
714 return -ENOMEM;
715
716 iter->he = he;
b4d3c8bd 717 he_cache[iter->curr++] = he;
7a13aa28 718
be7f855a
NK
719 callchain_append(he->callchain, &callchain_cursor, sample->period);
720
721 /*
722 * We need to re-initialize the cursor since callchain_append()
723 * advanced the cursor to the end.
724 */
725 callchain_cursor_commit(&callchain_cursor);
726
7a13aa28
NK
727 hists__inc_nr_samples(&evsel->hists, he->filtered);
728
729 return err;
730}
731
732static int
733iter_next_cumulative_entry(struct hist_entry_iter *iter,
734 struct addr_location *al)
735{
736 struct callchain_cursor_node *node;
737
738 node = callchain_cursor_current(&callchain_cursor);
739 if (node == NULL)
740 return 0;
741
c7405d85 742 return fill_callchain_info(al, node, iter->hide_unresolved);
7a13aa28
NK
743}
744
745static int
746iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
747 struct addr_location *al)
748{
749 struct perf_evsel *evsel = iter->evsel;
750 struct perf_sample *sample = iter->sample;
b4d3c8bd 751 struct hist_entry **he_cache = iter->priv;
7a13aa28 752 struct hist_entry *he;
b4d3c8bd
NK
753 struct hist_entry he_tmp = {
754 .cpu = al->cpu,
755 .thread = al->thread,
756 .comm = thread__comm(al->thread),
757 .ip = al->addr,
758 .ms = {
759 .map = al->map,
760 .sym = al->sym,
761 },
762 .parent = iter->parent,
763 };
764 int i;
be7f855a
NK
765 struct callchain_cursor cursor;
766
767 callchain_cursor_snapshot(&cursor, &callchain_cursor);
768
769 callchain_cursor_advance(&callchain_cursor);
b4d3c8bd
NK
770
771 /*
772 * Check if there's duplicate entries in the callchain.
773 * It's possible that it has cycles or recursive calls.
774 */
775 for (i = 0; i < iter->curr; i++) {
9d3c02d7
NK
776 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
777 /* to avoid calling callback function */
778 iter->he = NULL;
b4d3c8bd 779 return 0;
9d3c02d7 780 }
b4d3c8bd 781 }
7a13aa28
NK
782
783 he = __hists__add_entry(&evsel->hists, al, iter->parent, NULL, NULL,
784 sample->period, sample->weight,
785 sample->transaction, false);
786 if (he == NULL)
787 return -ENOMEM;
788
789 iter->he = he;
b4d3c8bd 790 he_cache[iter->curr++] = he;
7a13aa28 791
be7f855a 792 callchain_append(he->callchain, &cursor, sample->period);
7a13aa28
NK
793 return 0;
794}
795
796static int
797iter_finish_cumulative_entry(struct hist_entry_iter *iter,
798 struct addr_location *al __maybe_unused)
799{
b4d3c8bd 800 zfree(&iter->priv);
7a13aa28 801 iter->he = NULL;
b4d3c8bd 802
7a13aa28
NK
803 return 0;
804}
805
69bcb019
NK
806const struct hist_iter_ops hist_iter_mem = {
807 .prepare_entry = iter_prepare_mem_entry,
808 .add_single_entry = iter_add_single_mem_entry,
809 .next_entry = iter_next_nop_entry,
810 .add_next_entry = iter_add_next_nop_entry,
811 .finish_entry = iter_finish_mem_entry,
812};
813
814const struct hist_iter_ops hist_iter_branch = {
815 .prepare_entry = iter_prepare_branch_entry,
816 .add_single_entry = iter_add_single_branch_entry,
817 .next_entry = iter_next_branch_entry,
818 .add_next_entry = iter_add_next_branch_entry,
819 .finish_entry = iter_finish_branch_entry,
820};
821
822const struct hist_iter_ops hist_iter_normal = {
823 .prepare_entry = iter_prepare_normal_entry,
824 .add_single_entry = iter_add_single_normal_entry,
825 .next_entry = iter_next_nop_entry,
826 .add_next_entry = iter_add_next_nop_entry,
827 .finish_entry = iter_finish_normal_entry,
828};
829
7a13aa28
NK
830const struct hist_iter_ops hist_iter_cumulative = {
831 .prepare_entry = iter_prepare_cumulative_entry,
832 .add_single_entry = iter_add_single_cumulative_entry,
833 .next_entry = iter_next_cumulative_entry,
834 .add_next_entry = iter_add_next_cumulative_entry,
835 .finish_entry = iter_finish_cumulative_entry,
836};
837
69bcb019
NK
838int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
839 struct perf_evsel *evsel, struct perf_sample *sample,
9d3c02d7 840 int max_stack_depth, void *arg)
69bcb019
NK
841{
842 int err, err2;
843
844 err = sample__resolve_callchain(sample, &iter->parent, evsel, al,
845 max_stack_depth);
846 if (err)
847 return err;
848
849 iter->evsel = evsel;
850 iter->sample = sample;
851
852 err = iter->ops->prepare_entry(iter, al);
853 if (err)
854 goto out;
855
856 err = iter->ops->add_single_entry(iter, al);
857 if (err)
858 goto out;
859
9d3c02d7
NK
860 if (iter->he && iter->add_entry_cb) {
861 err = iter->add_entry_cb(iter, al, true, arg);
862 if (err)
863 goto out;
864 }
865
69bcb019
NK
866 while (iter->ops->next_entry(iter, al)) {
867 err = iter->ops->add_next_entry(iter, al);
868 if (err)
869 break;
9d3c02d7
NK
870
871 if (iter->he && iter->add_entry_cb) {
872 err = iter->add_entry_cb(iter, al, false, arg);
873 if (err)
874 goto out;
875 }
69bcb019
NK
876 }
877
878out:
879 err2 = iter->ops->finish_entry(iter, al);
880 if (!err)
881 err = err2;
882
883 return err;
884}
885
3d1d07ec
JK
886int64_t
887hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
888{
093f0ef3 889 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
890 int64_t cmp = 0;
891
093f0ef3 892 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
893 if (perf_hpp__should_skip(fmt))
894 continue;
895
093f0ef3 896 cmp = fmt->cmp(left, right);
3d1d07ec
JK
897 if (cmp)
898 break;
899 }
900
901 return cmp;
902}
903
904int64_t
905hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
906{
093f0ef3 907 struct perf_hpp_fmt *fmt;
3d1d07ec
JK
908 int64_t cmp = 0;
909
093f0ef3 910 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
911 if (perf_hpp__should_skip(fmt))
912 continue;
913
093f0ef3 914 cmp = fmt->collapse(left, right);
3d1d07ec
JK
915 if (cmp)
916 break;
917 }
918
919 return cmp;
920}
921
922void hist_entry__free(struct hist_entry *he)
923{
74cf249d
ACM
924 zfree(&he->branch_info);
925 zfree(&he->mem_info);
f8be1c8c 926 zfree(&he->stat_acc);
f048d548 927 free_srcline(he->srcline);
3d1d07ec
JK
928 free(he);
929}
930
931/*
932 * collapse the histogram
933 */
934
1d037ca1 935static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused,
1b3a0e95
FW
936 struct rb_root *root,
937 struct hist_entry *he)
3d1d07ec 938{
b9bf0892 939 struct rb_node **p = &root->rb_node;
3d1d07ec
JK
940 struct rb_node *parent = NULL;
941 struct hist_entry *iter;
942 int64_t cmp;
943
944 while (*p != NULL) {
945 parent = *p;
1980c2eb 946 iter = rb_entry(parent, struct hist_entry, rb_node_in);
3d1d07ec
JK
947
948 cmp = hist_entry__collapse(iter, he);
949
950 if (!cmp) {
139c0815 951 he_stat__add_stat(&iter->stat, &he->stat);
f8be1c8c
NK
952 if (symbol_conf.cumulate_callchain)
953 he_stat__add_stat(iter->stat_acc, he->stat_acc);
9ec60972 954
1b3a0e95 955 if (symbol_conf.use_callchain) {
47260645
NK
956 callchain_cursor_reset(&callchain_cursor);
957 callchain_merge(&callchain_cursor,
958 iter->callchain,
1b3a0e95
FW
959 he->callchain);
960 }
3d1d07ec 961 hist_entry__free(he);
fefb0b94 962 return false;
3d1d07ec
JK
963 }
964
965 if (cmp < 0)
966 p = &(*p)->rb_left;
967 else
968 p = &(*p)->rb_right;
969 }
970
1980c2eb
ACM
971 rb_link_node(&he->rb_node_in, parent, p);
972 rb_insert_color(&he->rb_node_in, root);
fefb0b94 973 return true;
3d1d07ec
JK
974}
975
1980c2eb 976static struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
3d1d07ec 977{
1980c2eb
ACM
978 struct rb_root *root;
979
980 pthread_mutex_lock(&hists->lock);
981
982 root = hists->entries_in;
983 if (++hists->entries_in > &hists->entries_in_array[1])
984 hists->entries_in = &hists->entries_in_array[0];
985
986 pthread_mutex_unlock(&hists->lock);
987
988 return root;
989}
990
90cf1fb5
ACM
991static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
992{
993 hists__filter_entry_by_dso(hists, he);
994 hists__filter_entry_by_thread(hists, he);
e94d53eb 995 hists__filter_entry_by_symbol(hists, he);
90cf1fb5
ACM
996}
997
c1fb5651 998void hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1980c2eb
ACM
999{
1000 struct rb_root *root;
3d1d07ec
JK
1001 struct rb_node *next;
1002 struct hist_entry *n;
1003
3a5714f8 1004 if (!sort__need_collapse)
3d1d07ec
JK
1005 return;
1006
1980c2eb
ACM
1007 root = hists__get_rotate_entries_in(hists);
1008 next = rb_first(root);
b9bf0892 1009
3d1d07ec 1010 while (next) {
33e940a2
ACM
1011 if (session_done())
1012 break;
1980c2eb
ACM
1013 n = rb_entry(next, struct hist_entry, rb_node_in);
1014 next = rb_next(&n->rb_node_in);
3d1d07ec 1015
1980c2eb 1016 rb_erase(&n->rb_node_in, root);
90cf1fb5
ACM
1017 if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) {
1018 /*
1019 * If it wasn't combined with one of the entries already
1020 * collapsed, we need to apply the filters that may have
1021 * been set by, say, the hist_browser.
1022 */
1023 hists__apply_filters(hists, n);
90cf1fb5 1024 }
c1fb5651
NK
1025 if (prog)
1026 ui_progress__update(prog, 1);
3d1d07ec 1027 }
1980c2eb 1028}
b9bf0892 1029
043ca389 1030static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
29d720ed 1031{
043ca389
NK
1032 struct perf_hpp_fmt *fmt;
1033 int64_t cmp = 0;
29d720ed 1034
26d8b338 1035 perf_hpp__for_each_sort_list(fmt) {
e67d49a7
NK
1036 if (perf_hpp__should_skip(fmt))
1037 continue;
1038
043ca389
NK
1039 cmp = fmt->sort(a, b);
1040 if (cmp)
29d720ed
NK
1041 break;
1042 }
1043
043ca389 1044 return cmp;
29d720ed
NK
1045}
1046
9283ba9b
NK
1047static void hists__reset_filter_stats(struct hists *hists)
1048{
1049 hists->nr_non_filtered_entries = 0;
1050 hists->stats.total_non_filtered_period = 0;
1051}
1052
1053void hists__reset_stats(struct hists *hists)
1054{
1055 hists->nr_entries = 0;
1056 hists->stats.total_period = 0;
1057
1058 hists__reset_filter_stats(hists);
1059}
1060
1061static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1062{
1063 hists->nr_non_filtered_entries++;
1064 hists->stats.total_non_filtered_period += h->stat.period;
1065}
1066
1067void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1068{
1069 if (!h->filtered)
1070 hists__inc_filter_stats(hists, h);
1071
1072 hists->nr_entries++;
1073 hists->stats.total_period += h->stat.period;
1074}
1075
1c02c4d2
ACM
1076static void __hists__insert_output_entry(struct rb_root *entries,
1077 struct hist_entry *he,
1078 u64 min_callchain_hits)
3d1d07ec 1079{
1c02c4d2 1080 struct rb_node **p = &entries->rb_node;
3d1d07ec
JK
1081 struct rb_node *parent = NULL;
1082 struct hist_entry *iter;
1083
d599db3f 1084 if (symbol_conf.use_callchain)
b9fb9304 1085 callchain_param.sort(&he->sorted_chain, he->callchain,
3d1d07ec
JK
1086 min_callchain_hits, &callchain_param);
1087
1088 while (*p != NULL) {
1089 parent = *p;
1090 iter = rb_entry(parent, struct hist_entry, rb_node);
1091
043ca389 1092 if (hist_entry__sort(he, iter) > 0)
3d1d07ec
JK
1093 p = &(*p)->rb_left;
1094 else
1095 p = &(*p)->rb_right;
1096 }
1097
1098 rb_link_node(&he->rb_node, parent, p);
1c02c4d2 1099 rb_insert_color(&he->rb_node, entries);
3d1d07ec
JK
1100}
1101
3a5714f8 1102void hists__output_resort(struct hists *hists)
3d1d07ec 1103{
1980c2eb 1104 struct rb_root *root;
3d1d07ec
JK
1105 struct rb_node *next;
1106 struct hist_entry *n;
3d1d07ec
JK
1107 u64 min_callchain_hits;
1108
42b28ac0 1109 min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100);
3d1d07ec 1110
3a5714f8 1111 if (sort__need_collapse)
1980c2eb
ACM
1112 root = &hists->entries_collapsed;
1113 else
1114 root = hists->entries_in;
1115
1116 next = rb_first(root);
1117 hists->entries = RB_ROOT;
3d1d07ec 1118
9283ba9b 1119 hists__reset_stats(hists);
42b28ac0 1120 hists__reset_col_len(hists);
fefb0b94 1121
3d1d07ec 1122 while (next) {
1980c2eb
ACM
1123 n = rb_entry(next, struct hist_entry, rb_node_in);
1124 next = rb_next(&n->rb_node_in);
3d1d07ec 1125
1980c2eb 1126 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits);
6263835a 1127 hists__inc_stats(hists, n);
ae993efc
NK
1128
1129 if (!n->filtered)
1130 hists__calc_col_len(hists, n);
3d1d07ec 1131 }
1980c2eb 1132}
b9bf0892 1133
42b28ac0 1134static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
cc5edb0e
ACM
1135 enum hist_filter filter)
1136{
1137 h->filtered &= ~(1 << filter);
1138 if (h->filtered)
1139 return;
1140
87e90f43
NK
1141 /* force fold unfiltered entry for simplicity */
1142 h->ms.unfolded = false;
0f0cbf7a 1143 h->row_offset = 0;
9283ba9b 1144
1ab1fa5d 1145 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
cc5edb0e 1146
9283ba9b 1147 hists__inc_filter_stats(hists, h);
42b28ac0 1148 hists__calc_col_len(hists, h);
cc5edb0e
ACM
1149}
1150
90cf1fb5
ACM
1151
1152static bool hists__filter_entry_by_dso(struct hists *hists,
1153 struct hist_entry *he)
1154{
1155 if (hists->dso_filter != NULL &&
1156 (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1157 he->filtered |= (1 << HIST_FILTER__DSO);
1158 return true;
1159 }
1160
1161 return false;
1162}
1163
d7b76f09 1164void hists__filter_by_dso(struct hists *hists)
b09e0190
ACM
1165{
1166 struct rb_node *nd;
1167
1ab1fa5d 1168 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1169
1170 hists__reset_filter_stats(hists);
42b28ac0 1171 hists__reset_col_len(hists);
b09e0190 1172
42b28ac0 1173 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1174 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1175
1176 if (symbol_conf.exclude_other && !h->parent)
1177 continue;
1178
90cf1fb5 1179 if (hists__filter_entry_by_dso(hists, h))
b09e0190 1180 continue;
b09e0190 1181
42b28ac0 1182 hists__remove_entry_filter(hists, h, HIST_FILTER__DSO);
b09e0190
ACM
1183 }
1184}
1185
90cf1fb5
ACM
1186static bool hists__filter_entry_by_thread(struct hists *hists,
1187 struct hist_entry *he)
1188{
1189 if (hists->thread_filter != NULL &&
1190 he->thread != hists->thread_filter) {
1191 he->filtered |= (1 << HIST_FILTER__THREAD);
1192 return true;
1193 }
1194
1195 return false;
1196}
1197
d7b76f09 1198void hists__filter_by_thread(struct hists *hists)
b09e0190
ACM
1199{
1200 struct rb_node *nd;
1201
1ab1fa5d 1202 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1203
1204 hists__reset_filter_stats(hists);
42b28ac0 1205 hists__reset_col_len(hists);
b09e0190 1206
42b28ac0 1207 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
b09e0190
ACM
1208 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1209
90cf1fb5 1210 if (hists__filter_entry_by_thread(hists, h))
b09e0190 1211 continue;
cc5edb0e 1212
42b28ac0 1213 hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD);
b09e0190
ACM
1214 }
1215}
ef7b93a1 1216
e94d53eb
NK
1217static bool hists__filter_entry_by_symbol(struct hists *hists,
1218 struct hist_entry *he)
1219{
1220 if (hists->symbol_filter_str != NULL &&
1221 (!he->ms.sym || strstr(he->ms.sym->name,
1222 hists->symbol_filter_str) == NULL)) {
1223 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1224 return true;
1225 }
1226
1227 return false;
1228}
1229
1230void hists__filter_by_symbol(struct hists *hists)
1231{
1232 struct rb_node *nd;
1233
1ab1fa5d 1234 hists->stats.nr_non_filtered_samples = 0;
9283ba9b
NK
1235
1236 hists__reset_filter_stats(hists);
e94d53eb
NK
1237 hists__reset_col_len(hists);
1238
1239 for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1240 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1241
1242 if (hists__filter_entry_by_symbol(hists, h))
1243 continue;
1244
1245 hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL);
1246 }
1247}
1248
28a6b6aa
ACM
1249void events_stats__inc(struct events_stats *stats, u32 type)
1250{
1251 ++stats->nr_events[0];
1252 ++stats->nr_events[type];
1253}
1254
42b28ac0 1255void hists__inc_nr_events(struct hists *hists, u32 type)
c8446b9b 1256{
28a6b6aa 1257 events_stats__inc(&hists->stats, type);
c8446b9b 1258}
95529be4 1259
1844dbcb
NK
1260void hists__inc_nr_samples(struct hists *hists, bool filtered)
1261{
1262 events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
1263 if (!filtered)
1264 hists->stats.nr_non_filtered_samples++;
1265}
1266
494d70a1
ACM
1267static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
1268 struct hist_entry *pair)
1269{
ce74f60e
NK
1270 struct rb_root *root;
1271 struct rb_node **p;
494d70a1
ACM
1272 struct rb_node *parent = NULL;
1273 struct hist_entry *he;
354cc40e 1274 int64_t cmp;
494d70a1 1275
ce74f60e
NK
1276 if (sort__need_collapse)
1277 root = &hists->entries_collapsed;
1278 else
1279 root = hists->entries_in;
1280
1281 p = &root->rb_node;
1282
494d70a1
ACM
1283 while (*p != NULL) {
1284 parent = *p;
ce74f60e 1285 he = rb_entry(parent, struct hist_entry, rb_node_in);
494d70a1 1286
ce74f60e 1287 cmp = hist_entry__collapse(he, pair);
494d70a1
ACM
1288
1289 if (!cmp)
1290 goto out;
1291
1292 if (cmp < 0)
1293 p = &(*p)->rb_left;
1294 else
1295 p = &(*p)->rb_right;
1296 }
1297
a0b51af3 1298 he = hist_entry__new(pair, true);
494d70a1 1299 if (he) {
30193d78
ACM
1300 memset(&he->stat, 0, sizeof(he->stat));
1301 he->hists = hists;
ce74f60e
NK
1302 rb_link_node(&he->rb_node_in, parent, p);
1303 rb_insert_color(&he->rb_node_in, root);
6263835a 1304 hists__inc_stats(hists, he);
e0af43d2 1305 he->dummy = true;
494d70a1
ACM
1306 }
1307out:
1308 return he;
1309}
1310
95529be4
ACM
1311static struct hist_entry *hists__find_entry(struct hists *hists,
1312 struct hist_entry *he)
1313{
ce74f60e
NK
1314 struct rb_node *n;
1315
1316 if (sort__need_collapse)
1317 n = hists->entries_collapsed.rb_node;
1318 else
1319 n = hists->entries_in->rb_node;
95529be4
ACM
1320
1321 while (n) {
ce74f60e
NK
1322 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
1323 int64_t cmp = hist_entry__collapse(iter, he);
95529be4
ACM
1324
1325 if (cmp < 0)
1326 n = n->rb_left;
1327 else if (cmp > 0)
1328 n = n->rb_right;
1329 else
1330 return iter;
1331 }
1332
1333 return NULL;
1334}
1335
1336/*
1337 * Look for pairs to link to the leader buckets (hist_entries):
1338 */
1339void hists__match(struct hists *leader, struct hists *other)
1340{
ce74f60e 1341 struct rb_root *root;
95529be4
ACM
1342 struct rb_node *nd;
1343 struct hist_entry *pos, *pair;
1344
ce74f60e
NK
1345 if (sort__need_collapse)
1346 root = &leader->entries_collapsed;
1347 else
1348 root = leader->entries_in;
1349
1350 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1351 pos = rb_entry(nd, struct hist_entry, rb_node_in);
95529be4
ACM
1352 pair = hists__find_entry(other, pos);
1353
1354 if (pair)
5fa9041b 1355 hist_entry__add_pair(pair, pos);
95529be4
ACM
1356 }
1357}
494d70a1
ACM
1358
1359/*
1360 * Look for entries in the other hists that are not present in the leader, if
1361 * we find them, just add a dummy entry on the leader hists, with period=0,
1362 * nr_events=0, to serve as the list header.
1363 */
1364int hists__link(struct hists *leader, struct hists *other)
1365{
ce74f60e 1366 struct rb_root *root;
494d70a1
ACM
1367 struct rb_node *nd;
1368 struct hist_entry *pos, *pair;
1369
ce74f60e
NK
1370 if (sort__need_collapse)
1371 root = &other->entries_collapsed;
1372 else
1373 root = other->entries_in;
1374
1375 for (nd = rb_first(root); nd; nd = rb_next(nd)) {
1376 pos = rb_entry(nd, struct hist_entry, rb_node_in);
494d70a1
ACM
1377
1378 if (!hist_entry__has_pairs(pos)) {
1379 pair = hists__add_dummy_entry(leader, pos);
1380 if (pair == NULL)
1381 return -1;
5fa9041b 1382 hist_entry__add_pair(pos, pair);
494d70a1
ACM
1383 }
1384 }
1385
1386 return 0;
1387}
f2148330
NK
1388
1389u64 hists__total_period(struct hists *hists)
1390{
1391 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
1392 hists->stats.total_period;
1393}
33db4568
NK
1394
1395int parse_filter_percentage(const struct option *opt __maybe_unused,
1396 const char *arg, int unset __maybe_unused)
1397{
1398 if (!strcmp(arg, "relative"))
1399 symbol_conf.filter_relative = true;
1400 else if (!strcmp(arg, "absolute"))
1401 symbol_conf.filter_relative = false;
1402 else
1403 return -1;
1404
1405 return 0;
1406}
0b93da17
NK
1407
1408int perf_hist_config(const char *var, const char *value)
1409{
1410 if (!strcmp(var, "hist.percentage"))
1411 return parse_filter_percentage(NULL, value, 0);
1412
1413 return 0;
1414}