Merge branch 'linus' of git://git.kernel.org/pub/scm/linux/kernel/git/herbert/crypto-2.6
[linux-2.6-block.git] / tools / perf / util / hist.c
1 #include "util.h"
2 #include "build-id.h"
3 #include "hist.h"
4 #include "session.h"
5 #include "sort.h"
6 #include "evlist.h"
7 #include "evsel.h"
8 #include "annotate.h"
9 #include "ui/progress.h"
10 #include <math.h>
11
12 static bool hists__filter_entry_by_dso(struct hists *hists,
13                                        struct hist_entry *he);
14 static bool hists__filter_entry_by_thread(struct hists *hists,
15                                           struct hist_entry *he);
16 static bool hists__filter_entry_by_symbol(struct hists *hists,
17                                           struct hist_entry *he);
18 static bool hists__filter_entry_by_socket(struct hists *hists,
19                                           struct hist_entry *he);
20
21 u16 hists__col_len(struct hists *hists, enum hist_column col)
22 {
23         return hists->col_len[col];
24 }
25
26 void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
27 {
28         hists->col_len[col] = len;
29 }
30
31 bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
32 {
33         if (len > hists__col_len(hists, col)) {
34                 hists__set_col_len(hists, col, len);
35                 return true;
36         }
37         return false;
38 }
39
40 void hists__reset_col_len(struct hists *hists)
41 {
42         enum hist_column col;
43
44         for (col = 0; col < HISTC_NR_COLS; ++col)
45                 hists__set_col_len(hists, col, 0);
46 }
47
48 static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
49 {
50         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
51
52         if (hists__col_len(hists, dso) < unresolved_col_width &&
53             !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
54             !symbol_conf.dso_list)
55                 hists__set_col_len(hists, dso, unresolved_col_width);
56 }
57
58 void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
59 {
60         const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
61         int symlen;
62         u16 len;
63
64         /*
65          * +4 accounts for '[x] ' priv level info
66          * +2 accounts for 0x prefix on raw addresses
67          * +3 accounts for ' y ' symtab origin info
68          */
69         if (h->ms.sym) {
70                 symlen = h->ms.sym->namelen + 4;
71                 if (verbose)
72                         symlen += BITS_PER_LONG / 4 + 2 + 3;
73                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
74         } else {
75                 symlen = unresolved_col_width + 4 + 2;
76                 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
77                 hists__set_unres_dso_col_len(hists, HISTC_DSO);
78         }
79
80         len = thread__comm_len(h->thread);
81         if (hists__new_col_len(hists, HISTC_COMM, len))
82                 hists__set_col_len(hists, HISTC_THREAD, len + 6);
83
84         if (h->ms.map) {
85                 len = dso__name_len(h->ms.map->dso);
86                 hists__new_col_len(hists, HISTC_DSO, len);
87         }
88
89         if (h->parent)
90                 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
91
92         if (h->branch_info) {
93                 if (h->branch_info->from.sym) {
94                         symlen = (int)h->branch_info->from.sym->namelen + 4;
95                         if (verbose)
96                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
97                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
98
99                         symlen = dso__name_len(h->branch_info->from.map->dso);
100                         hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
101                 } else {
102                         symlen = unresolved_col_width + 4 + 2;
103                         hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
104                         hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
105                 }
106
107                 if (h->branch_info->to.sym) {
108                         symlen = (int)h->branch_info->to.sym->namelen + 4;
109                         if (verbose)
110                                 symlen += BITS_PER_LONG / 4 + 2 + 3;
111                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
112
113                         symlen = dso__name_len(h->branch_info->to.map->dso);
114                         hists__new_col_len(hists, HISTC_DSO_TO, symlen);
115                 } else {
116                         symlen = unresolved_col_width + 4 + 2;
117                         hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
118                         hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
119                 }
120         }
121
122         if (h->mem_info) {
123                 if (h->mem_info->daddr.sym) {
124                         symlen = (int)h->mem_info->daddr.sym->namelen + 4
125                                + unresolved_col_width + 2;
126                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
127                                            symlen);
128                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
129                                            symlen + 1);
130                 } else {
131                         symlen = unresolved_col_width + 4 + 2;
132                         hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
133                                            symlen);
134                         hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
135                                            symlen);
136                 }
137
138                 if (h->mem_info->iaddr.sym) {
139                         symlen = (int)h->mem_info->iaddr.sym->namelen + 4
140                                + unresolved_col_width + 2;
141                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
142                                            symlen);
143                 } else {
144                         symlen = unresolved_col_width + 4 + 2;
145                         hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
146                                            symlen);
147                 }
148
149                 if (h->mem_info->daddr.map) {
150                         symlen = dso__name_len(h->mem_info->daddr.map->dso);
151                         hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
152                                            symlen);
153                 } else {
154                         symlen = unresolved_col_width + 4 + 2;
155                         hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
156                 }
157         } else {
158                 symlen = unresolved_col_width + 4 + 2;
159                 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
160                 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
161                 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
162         }
163
164         hists__new_col_len(hists, HISTC_CPU, 3);
165         hists__new_col_len(hists, HISTC_SOCKET, 6);
166         hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
167         hists__new_col_len(hists, HISTC_MEM_TLB, 22);
168         hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
169         hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3);
170         hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
171         hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
172
173         if (h->srcline)
174                 hists__new_col_len(hists, HISTC_SRCLINE, strlen(h->srcline));
175
176         if (h->srcfile)
177                 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
178
179         if (h->transaction)
180                 hists__new_col_len(hists, HISTC_TRANSACTION,
181                                    hist_entry__transaction_len());
182
183         if (h->trace_output)
184                 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
185 }
186
187 void hists__output_recalc_col_len(struct hists *hists, int max_rows)
188 {
189         struct rb_node *next = rb_first(&hists->entries);
190         struct hist_entry *n;
191         int row = 0;
192
193         hists__reset_col_len(hists);
194
195         while (next && row++ < max_rows) {
196                 n = rb_entry(next, struct hist_entry, rb_node);
197                 if (!n->filtered)
198                         hists__calc_col_len(hists, n);
199                 next = rb_next(&n->rb_node);
200         }
201 }
202
203 static void he_stat__add_cpumode_period(struct he_stat *he_stat,
204                                         unsigned int cpumode, u64 period)
205 {
206         switch (cpumode) {
207         case PERF_RECORD_MISC_KERNEL:
208                 he_stat->period_sys += period;
209                 break;
210         case PERF_RECORD_MISC_USER:
211                 he_stat->period_us += period;
212                 break;
213         case PERF_RECORD_MISC_GUEST_KERNEL:
214                 he_stat->period_guest_sys += period;
215                 break;
216         case PERF_RECORD_MISC_GUEST_USER:
217                 he_stat->period_guest_us += period;
218                 break;
219         default:
220                 break;
221         }
222 }
223
224 static void he_stat__add_period(struct he_stat *he_stat, u64 period,
225                                 u64 weight)
226 {
227
228         he_stat->period         += period;
229         he_stat->weight         += weight;
230         he_stat->nr_events      += 1;
231 }
232
233 static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
234 {
235         dest->period            += src->period;
236         dest->period_sys        += src->period_sys;
237         dest->period_us         += src->period_us;
238         dest->period_guest_sys  += src->period_guest_sys;
239         dest->period_guest_us   += src->period_guest_us;
240         dest->nr_events         += src->nr_events;
241         dest->weight            += src->weight;
242 }
243
244 static void he_stat__decay(struct he_stat *he_stat)
245 {
246         he_stat->period = (he_stat->period * 7) / 8;
247         he_stat->nr_events = (he_stat->nr_events * 7) / 8;
248         /* XXX need decay for weight too? */
249 }
250
251 static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
252
253 static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
254 {
255         u64 prev_period = he->stat.period;
256         u64 diff;
257
258         if (prev_period == 0)
259                 return true;
260
261         he_stat__decay(&he->stat);
262         if (symbol_conf.cumulate_callchain)
263                 he_stat__decay(he->stat_acc);
264         decay_callchain(he->callchain);
265
266         diff = prev_period - he->stat.period;
267
268         if (!he->depth) {
269                 hists->stats.total_period -= diff;
270                 if (!he->filtered)
271                         hists->stats.total_non_filtered_period -= diff;
272         }
273
274         if (!he->leaf) {
275                 struct hist_entry *child;
276                 struct rb_node *node = rb_first(&he->hroot_out);
277                 while (node) {
278                         child = rb_entry(node, struct hist_entry, rb_node);
279                         node = rb_next(node);
280
281                         if (hists__decay_entry(hists, child))
282                                 hists__delete_entry(hists, child);
283                 }
284         }
285
286         return he->stat.period == 0;
287 }
288
289 static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
290 {
291         struct rb_root *root_in;
292         struct rb_root *root_out;
293
294         if (he->parent_he) {
295                 root_in  = &he->parent_he->hroot_in;
296                 root_out = &he->parent_he->hroot_out;
297         } else {
298                 if (hists__has(hists, need_collapse))
299                         root_in = &hists->entries_collapsed;
300                 else
301                         root_in = hists->entries_in;
302                 root_out = &hists->entries;
303         }
304
305         rb_erase(&he->rb_node_in, root_in);
306         rb_erase(&he->rb_node, root_out);
307
308         --hists->nr_entries;
309         if (!he->filtered)
310                 --hists->nr_non_filtered_entries;
311
312         hist_entry__delete(he);
313 }
314
315 void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
316 {
317         struct rb_node *next = rb_first(&hists->entries);
318         struct hist_entry *n;
319
320         while (next) {
321                 n = rb_entry(next, struct hist_entry, rb_node);
322                 next = rb_next(&n->rb_node);
323                 if (((zap_user && n->level == '.') ||
324                      (zap_kernel && n->level != '.') ||
325                      hists__decay_entry(hists, n))) {
326                         hists__delete_entry(hists, n);
327                 }
328         }
329 }
330
331 void hists__delete_entries(struct hists *hists)
332 {
333         struct rb_node *next = rb_first(&hists->entries);
334         struct hist_entry *n;
335
336         while (next) {
337                 n = rb_entry(next, struct hist_entry, rb_node);
338                 next = rb_next(&n->rb_node);
339
340                 hists__delete_entry(hists, n);
341         }
342 }
343
344 /*
345  * histogram, sorted on item, collects periods
346  */
347
348 static struct hist_entry *hist_entry__new(struct hist_entry *template,
349                                           bool sample_self)
350 {
351         size_t callchain_size = 0;
352         struct hist_entry *he;
353
354         if (symbol_conf.use_callchain)
355                 callchain_size = sizeof(struct callchain_root);
356
357         he = zalloc(sizeof(*he) + callchain_size);
358
359         if (he != NULL) {
360                 *he = *template;
361
362                 if (symbol_conf.cumulate_callchain) {
363                         he->stat_acc = malloc(sizeof(he->stat));
364                         if (he->stat_acc == NULL) {
365                                 free(he);
366                                 return NULL;
367                         }
368                         memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
369                         if (!sample_self)
370                                 memset(&he->stat, 0, sizeof(he->stat));
371                 }
372
373                 map__get(he->ms.map);
374
375                 if (he->branch_info) {
376                         /*
377                          * This branch info is (a part of) allocated from
378                          * sample__resolve_bstack() and will be freed after
379                          * adding new entries.  So we need to save a copy.
380                          */
381                         he->branch_info = malloc(sizeof(*he->branch_info));
382                         if (he->branch_info == NULL) {
383                                 map__zput(he->ms.map);
384                                 free(he->stat_acc);
385                                 free(he);
386                                 return NULL;
387                         }
388
389                         memcpy(he->branch_info, template->branch_info,
390                                sizeof(*he->branch_info));
391
392                         map__get(he->branch_info->from.map);
393                         map__get(he->branch_info->to.map);
394                 }
395
396                 if (he->mem_info) {
397                         map__get(he->mem_info->iaddr.map);
398                         map__get(he->mem_info->daddr.map);
399                 }
400
401                 if (symbol_conf.use_callchain)
402                         callchain_init(he->callchain);
403
404                 if (he->raw_data) {
405                         he->raw_data = memdup(he->raw_data, he->raw_size);
406
407                         if (he->raw_data == NULL) {
408                                 map__put(he->ms.map);
409                                 if (he->branch_info) {
410                                         map__put(he->branch_info->from.map);
411                                         map__put(he->branch_info->to.map);
412                                         free(he->branch_info);
413                                 }
414                                 if (he->mem_info) {
415                                         map__put(he->mem_info->iaddr.map);
416                                         map__put(he->mem_info->daddr.map);
417                                 }
418                                 free(he->stat_acc);
419                                 free(he);
420                                 return NULL;
421                         }
422                 }
423                 INIT_LIST_HEAD(&he->pairs.node);
424                 thread__get(he->thread);
425
426                 if (!symbol_conf.report_hierarchy)
427                         he->leaf = true;
428         }
429
430         return he;
431 }
432
433 static u8 symbol__parent_filter(const struct symbol *parent)
434 {
435         if (symbol_conf.exclude_other && parent == NULL)
436                 return 1 << HIST_FILTER__PARENT;
437         return 0;
438 }
439
440 static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period)
441 {
442         if (!symbol_conf.use_callchain)
443                 return;
444
445         he->hists->callchain_period += period;
446         if (!he->filtered)
447                 he->hists->callchain_non_filtered_period += period;
448 }
449
450 static struct hist_entry *hists__findnew_entry(struct hists *hists,
451                                                struct hist_entry *entry,
452                                                struct addr_location *al,
453                                                bool sample_self)
454 {
455         struct rb_node **p;
456         struct rb_node *parent = NULL;
457         struct hist_entry *he;
458         int64_t cmp;
459         u64 period = entry->stat.period;
460         u64 weight = entry->stat.weight;
461
462         p = &hists->entries_in->rb_node;
463
464         while (*p != NULL) {
465                 parent = *p;
466                 he = rb_entry(parent, struct hist_entry, rb_node_in);
467
468                 /*
469                  * Make sure that it receives arguments in a same order as
470                  * hist_entry__collapse() so that we can use an appropriate
471                  * function when searching an entry regardless which sort
472                  * keys were used.
473                  */
474                 cmp = hist_entry__cmp(he, entry);
475
476                 if (!cmp) {
477                         if (sample_self) {
478                                 he_stat__add_period(&he->stat, period, weight);
479                                 hist_entry__add_callchain_period(he, period);
480                         }
481                         if (symbol_conf.cumulate_callchain)
482                                 he_stat__add_period(he->stat_acc, period, weight);
483
484                         /*
485                          * This mem info was allocated from sample__resolve_mem
486                          * and will not be used anymore.
487                          */
488                         zfree(&entry->mem_info);
489
490                         /* If the map of an existing hist_entry has
491                          * become out-of-date due to an exec() or
492                          * similar, update it.  Otherwise we will
493                          * mis-adjust symbol addresses when computing
494                          * the history counter to increment.
495                          */
496                         if (he->ms.map != entry->ms.map) {
497                                 map__put(he->ms.map);
498                                 he->ms.map = map__get(entry->ms.map);
499                         }
500                         goto out;
501                 }
502
503                 if (cmp < 0)
504                         p = &(*p)->rb_left;
505                 else
506                         p = &(*p)->rb_right;
507         }
508
509         he = hist_entry__new(entry, sample_self);
510         if (!he)
511                 return NULL;
512
513         if (sample_self)
514                 hist_entry__add_callchain_period(he, period);
515         hists->nr_entries++;
516
517         rb_link_node(&he->rb_node_in, parent, p);
518         rb_insert_color(&he->rb_node_in, hists->entries_in);
519 out:
520         if (sample_self)
521                 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
522         if (symbol_conf.cumulate_callchain)
523                 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
524         return he;
525 }
526
527 struct hist_entry *__hists__add_entry(struct hists *hists,
528                                       struct addr_location *al,
529                                       struct symbol *sym_parent,
530                                       struct branch_info *bi,
531                                       struct mem_info *mi,
532                                       struct perf_sample *sample,
533                                       bool sample_self)
534 {
535         struct hist_entry entry = {
536                 .thread = al->thread,
537                 .comm = thread__comm(al->thread),
538                 .ms = {
539                         .map    = al->map,
540                         .sym    = al->sym,
541                 },
542                 .socket  = al->socket,
543                 .cpu     = al->cpu,
544                 .cpumode = al->cpumode,
545                 .ip      = al->addr,
546                 .level   = al->level,
547                 .stat = {
548                         .nr_events = 1,
549                         .period = sample->period,
550                         .weight = sample->weight,
551                 },
552                 .parent = sym_parent,
553                 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
554                 .hists  = hists,
555                 .branch_info = bi,
556                 .mem_info = mi,
557                 .transaction = sample->transaction,
558                 .raw_data = sample->raw_data,
559                 .raw_size = sample->raw_size,
560         };
561
562         return hists__findnew_entry(hists, &entry, al, sample_self);
563 }
564
565 static int
566 iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
567                     struct addr_location *al __maybe_unused)
568 {
569         return 0;
570 }
571
572 static int
573 iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
574                         struct addr_location *al __maybe_unused)
575 {
576         return 0;
577 }
578
579 static int
580 iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
581 {
582         struct perf_sample *sample = iter->sample;
583         struct mem_info *mi;
584
585         mi = sample__resolve_mem(sample, al);
586         if (mi == NULL)
587                 return -ENOMEM;
588
589         iter->priv = mi;
590         return 0;
591 }
592
593 static int
594 iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
595 {
596         u64 cost;
597         struct mem_info *mi = iter->priv;
598         struct hists *hists = evsel__hists(iter->evsel);
599         struct perf_sample *sample = iter->sample;
600         struct hist_entry *he;
601
602         if (mi == NULL)
603                 return -EINVAL;
604
605         cost = sample->weight;
606         if (!cost)
607                 cost = 1;
608
609         /*
610          * must pass period=weight in order to get the correct
611          * sorting from hists__collapse_resort() which is solely
612          * based on periods. We want sorting be done on nr_events * weight
613          * and this is indirectly achieved by passing period=weight here
614          * and the he_stat__add_period() function.
615          */
616         sample->period = cost;
617
618         he = __hists__add_entry(hists, al, iter->parent, NULL, mi,
619                                 sample, true);
620         if (!he)
621                 return -ENOMEM;
622
623         iter->he = he;
624         return 0;
625 }
626
627 static int
628 iter_finish_mem_entry(struct hist_entry_iter *iter,
629                       struct addr_location *al __maybe_unused)
630 {
631         struct perf_evsel *evsel = iter->evsel;
632         struct hists *hists = evsel__hists(evsel);
633         struct hist_entry *he = iter->he;
634         int err = -EINVAL;
635
636         if (he == NULL)
637                 goto out;
638
639         hists__inc_nr_samples(hists, he->filtered);
640
641         err = hist_entry__append_callchain(he, iter->sample);
642
643 out:
644         /*
645          * We don't need to free iter->priv (mem_info) here since the mem info
646          * was either already freed in hists__findnew_entry() or passed to a
647          * new hist entry by hist_entry__new().
648          */
649         iter->priv = NULL;
650
651         iter->he = NULL;
652         return err;
653 }
654
655 static int
656 iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
657 {
658         struct branch_info *bi;
659         struct perf_sample *sample = iter->sample;
660
661         bi = sample__resolve_bstack(sample, al);
662         if (!bi)
663                 return -ENOMEM;
664
665         iter->curr = 0;
666         iter->total = sample->branch_stack->nr;
667
668         iter->priv = bi;
669         return 0;
670 }
671
672 static int
673 iter_add_single_branch_entry(struct hist_entry_iter *iter,
674                              struct addr_location *al __maybe_unused)
675 {
676         /* to avoid calling callback function */
677         iter->he = NULL;
678
679         return 0;
680 }
681
682 static int
683 iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
684 {
685         struct branch_info *bi = iter->priv;
686         int i = iter->curr;
687
688         if (bi == NULL)
689                 return 0;
690
691         if (iter->curr >= iter->total)
692                 return 0;
693
694         al->map = bi[i].to.map;
695         al->sym = bi[i].to.sym;
696         al->addr = bi[i].to.addr;
697         return 1;
698 }
699
700 static int
701 iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
702 {
703         struct branch_info *bi;
704         struct perf_evsel *evsel = iter->evsel;
705         struct hists *hists = evsel__hists(evsel);
706         struct perf_sample *sample = iter->sample;
707         struct hist_entry *he = NULL;
708         int i = iter->curr;
709         int err = 0;
710
711         bi = iter->priv;
712
713         if (iter->hide_unresolved && !(bi[i].from.sym && bi[i].to.sym))
714                 goto out;
715
716         /*
717          * The report shows the percentage of total branches captured
718          * and not events sampled. Thus we use a pseudo period of 1.
719          */
720         sample->period = 1;
721         sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
722
723         he = __hists__add_entry(hists, al, iter->parent, &bi[i], NULL,
724                                 sample, true);
725         if (he == NULL)
726                 return -ENOMEM;
727
728         hists__inc_nr_samples(hists, he->filtered);
729
730 out:
731         iter->he = he;
732         iter->curr++;
733         return err;
734 }
735
736 static int
737 iter_finish_branch_entry(struct hist_entry_iter *iter,
738                          struct addr_location *al __maybe_unused)
739 {
740         zfree(&iter->priv);
741         iter->he = NULL;
742
743         return iter->curr >= iter->total ? 0 : -1;
744 }
745
746 static int
747 iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
748                           struct addr_location *al __maybe_unused)
749 {
750         return 0;
751 }
752
753 static int
754 iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
755 {
756         struct perf_evsel *evsel = iter->evsel;
757         struct perf_sample *sample = iter->sample;
758         struct hist_entry *he;
759
760         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
761                                 sample, true);
762         if (he == NULL)
763                 return -ENOMEM;
764
765         iter->he = he;
766         return 0;
767 }
768
769 static int
770 iter_finish_normal_entry(struct hist_entry_iter *iter,
771                          struct addr_location *al __maybe_unused)
772 {
773         struct hist_entry *he = iter->he;
774         struct perf_evsel *evsel = iter->evsel;
775         struct perf_sample *sample = iter->sample;
776
777         if (he == NULL)
778                 return 0;
779
780         iter->he = NULL;
781
782         hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
783
784         return hist_entry__append_callchain(he, sample);
785 }
786
787 static int
788 iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
789                               struct addr_location *al __maybe_unused)
790 {
791         struct hist_entry **he_cache;
792
793         callchain_cursor_commit(&callchain_cursor);
794
795         /*
796          * This is for detecting cycles or recursions so that they're
797          * cumulated only one time to prevent entries more than 100%
798          * overhead.
799          */
800         he_cache = malloc(sizeof(*he_cache) * (iter->max_stack + 1));
801         if (he_cache == NULL)
802                 return -ENOMEM;
803
804         iter->priv = he_cache;
805         iter->curr = 0;
806
807         return 0;
808 }
809
810 static int
811 iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
812                                  struct addr_location *al)
813 {
814         struct perf_evsel *evsel = iter->evsel;
815         struct hists *hists = evsel__hists(evsel);
816         struct perf_sample *sample = iter->sample;
817         struct hist_entry **he_cache = iter->priv;
818         struct hist_entry *he;
819         int err = 0;
820
821         he = __hists__add_entry(hists, al, iter->parent, NULL, NULL,
822                                 sample, true);
823         if (he == NULL)
824                 return -ENOMEM;
825
826         iter->he = he;
827         he_cache[iter->curr++] = he;
828
829         hist_entry__append_callchain(he, sample);
830
831         /*
832          * We need to re-initialize the cursor since callchain_append()
833          * advanced the cursor to the end.
834          */
835         callchain_cursor_commit(&callchain_cursor);
836
837         hists__inc_nr_samples(hists, he->filtered);
838
839         return err;
840 }
841
842 static int
843 iter_next_cumulative_entry(struct hist_entry_iter *iter,
844                            struct addr_location *al)
845 {
846         struct callchain_cursor_node *node;
847
848         node = callchain_cursor_current(&callchain_cursor);
849         if (node == NULL)
850                 return 0;
851
852         return fill_callchain_info(al, node, iter->hide_unresolved);
853 }
854
855 static int
856 iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
857                                struct addr_location *al)
858 {
859         struct perf_evsel *evsel = iter->evsel;
860         struct perf_sample *sample = iter->sample;
861         struct hist_entry **he_cache = iter->priv;
862         struct hist_entry *he;
863         struct hist_entry he_tmp = {
864                 .hists = evsel__hists(evsel),
865                 .cpu = al->cpu,
866                 .thread = al->thread,
867                 .comm = thread__comm(al->thread),
868                 .ip = al->addr,
869                 .ms = {
870                         .map = al->map,
871                         .sym = al->sym,
872                 },
873                 .parent = iter->parent,
874                 .raw_data = sample->raw_data,
875                 .raw_size = sample->raw_size,
876         };
877         int i;
878         struct callchain_cursor cursor;
879
880         callchain_cursor_snapshot(&cursor, &callchain_cursor);
881
882         callchain_cursor_advance(&callchain_cursor);
883
884         /*
885          * Check if there's duplicate entries in the callchain.
886          * It's possible that it has cycles or recursive calls.
887          */
888         for (i = 0; i < iter->curr; i++) {
889                 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
890                         /* to avoid calling callback function */
891                         iter->he = NULL;
892                         return 0;
893                 }
894         }
895
896         he = __hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
897                                 sample, false);
898         if (he == NULL)
899                 return -ENOMEM;
900
901         iter->he = he;
902         he_cache[iter->curr++] = he;
903
904         if (symbol_conf.use_callchain)
905                 callchain_append(he->callchain, &cursor, sample->period);
906         return 0;
907 }
908
909 static int
910 iter_finish_cumulative_entry(struct hist_entry_iter *iter,
911                              struct addr_location *al __maybe_unused)
912 {
913         zfree(&iter->priv);
914         iter->he = NULL;
915
916         return 0;
917 }
918
919 const struct hist_iter_ops hist_iter_mem = {
920         .prepare_entry          = iter_prepare_mem_entry,
921         .add_single_entry       = iter_add_single_mem_entry,
922         .next_entry             = iter_next_nop_entry,
923         .add_next_entry         = iter_add_next_nop_entry,
924         .finish_entry           = iter_finish_mem_entry,
925 };
926
927 const struct hist_iter_ops hist_iter_branch = {
928         .prepare_entry          = iter_prepare_branch_entry,
929         .add_single_entry       = iter_add_single_branch_entry,
930         .next_entry             = iter_next_branch_entry,
931         .add_next_entry         = iter_add_next_branch_entry,
932         .finish_entry           = iter_finish_branch_entry,
933 };
934
935 const struct hist_iter_ops hist_iter_normal = {
936         .prepare_entry          = iter_prepare_normal_entry,
937         .add_single_entry       = iter_add_single_normal_entry,
938         .next_entry             = iter_next_nop_entry,
939         .add_next_entry         = iter_add_next_nop_entry,
940         .finish_entry           = iter_finish_normal_entry,
941 };
942
943 const struct hist_iter_ops hist_iter_cumulative = {
944         .prepare_entry          = iter_prepare_cumulative_entry,
945         .add_single_entry       = iter_add_single_cumulative_entry,
946         .next_entry             = iter_next_cumulative_entry,
947         .add_next_entry         = iter_add_next_cumulative_entry,
948         .finish_entry           = iter_finish_cumulative_entry,
949 };
950
951 int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
952                          int max_stack_depth, void *arg)
953 {
954         int err, err2;
955
956         err = sample__resolve_callchain(iter->sample, &callchain_cursor, &iter->parent,
957                                         iter->evsel, al, max_stack_depth);
958         if (err)
959                 return err;
960
961         iter->max_stack = max_stack_depth;
962
963         err = iter->ops->prepare_entry(iter, al);
964         if (err)
965                 goto out;
966
967         err = iter->ops->add_single_entry(iter, al);
968         if (err)
969                 goto out;
970
971         if (iter->he && iter->add_entry_cb) {
972                 err = iter->add_entry_cb(iter, al, true, arg);
973                 if (err)
974                         goto out;
975         }
976
977         while (iter->ops->next_entry(iter, al)) {
978                 err = iter->ops->add_next_entry(iter, al);
979                 if (err)
980                         break;
981
982                 if (iter->he && iter->add_entry_cb) {
983                         err = iter->add_entry_cb(iter, al, false, arg);
984                         if (err)
985                                 goto out;
986                 }
987         }
988
989 out:
990         err2 = iter->ops->finish_entry(iter, al);
991         if (!err)
992                 err = err2;
993
994         return err;
995 }
996
997 int64_t
998 hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
999 {
1000         struct hists *hists = left->hists;
1001         struct perf_hpp_fmt *fmt;
1002         int64_t cmp = 0;
1003
1004         hists__for_each_sort_list(hists, fmt) {
1005                 if (perf_hpp__is_dynamic_entry(fmt) &&
1006                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1007                         continue;
1008
1009                 cmp = fmt->cmp(fmt, left, right);
1010                 if (cmp)
1011                         break;
1012         }
1013
1014         return cmp;
1015 }
1016
1017 int64_t
1018 hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1019 {
1020         struct hists *hists = left->hists;
1021         struct perf_hpp_fmt *fmt;
1022         int64_t cmp = 0;
1023
1024         hists__for_each_sort_list(hists, fmt) {
1025                 if (perf_hpp__is_dynamic_entry(fmt) &&
1026                     !perf_hpp__defined_dynamic_entry(fmt, hists))
1027                         continue;
1028
1029                 cmp = fmt->collapse(fmt, left, right);
1030                 if (cmp)
1031                         break;
1032         }
1033
1034         return cmp;
1035 }
1036
1037 void hist_entry__delete(struct hist_entry *he)
1038 {
1039         thread__zput(he->thread);
1040         map__zput(he->ms.map);
1041
1042         if (he->branch_info) {
1043                 map__zput(he->branch_info->from.map);
1044                 map__zput(he->branch_info->to.map);
1045                 zfree(&he->branch_info);
1046         }
1047
1048         if (he->mem_info) {
1049                 map__zput(he->mem_info->iaddr.map);
1050                 map__zput(he->mem_info->daddr.map);
1051                 zfree(&he->mem_info);
1052         }
1053
1054         zfree(&he->stat_acc);
1055         free_srcline(he->srcline);
1056         if (he->srcfile && he->srcfile[0])
1057                 free(he->srcfile);
1058         free_callchain(he->callchain);
1059         free(he->trace_output);
1060         free(he->raw_data);
1061         free(he);
1062 }
1063
1064 /*
1065  * If this is not the last column, then we need to pad it according to the
1066  * pre-calculated max lenght for this column, otherwise don't bother adding
1067  * spaces because that would break viewing this with, for instance, 'less',
1068  * that would show tons of trailing spaces when a long C++ demangled method
1069  * names is sampled.
1070 */
1071 int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1072                                    struct perf_hpp_fmt *fmt, int printed)
1073 {
1074         if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
1075                 const int width = fmt->width(fmt, hpp, hists_to_evsel(he->hists));
1076                 if (printed < width) {
1077                         advance_hpp(hpp, printed);
1078                         printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1079                 }
1080         }
1081
1082         return printed;
1083 }
1084
1085 /*
1086  * collapse the histogram
1087  */
1088
1089 static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
1090 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1091                                        enum hist_filter type);
1092
1093 typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1094
1095 static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1096 {
1097         return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1098 }
1099
1100 static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1101                                                 enum hist_filter type,
1102                                                 fmt_chk_fn check)
1103 {
1104         struct perf_hpp_fmt *fmt;
1105         bool type_match = false;
1106         struct hist_entry *parent = he->parent_he;
1107
1108         switch (type) {
1109         case HIST_FILTER__THREAD:
1110                 if (symbol_conf.comm_list == NULL &&
1111                     symbol_conf.pid_list == NULL &&
1112                     symbol_conf.tid_list == NULL)
1113                         return;
1114                 break;
1115         case HIST_FILTER__DSO:
1116                 if (symbol_conf.dso_list == NULL)
1117                         return;
1118                 break;
1119         case HIST_FILTER__SYMBOL:
1120                 if (symbol_conf.sym_list == NULL)
1121                         return;
1122                 break;
1123         case HIST_FILTER__PARENT:
1124         case HIST_FILTER__GUEST:
1125         case HIST_FILTER__HOST:
1126         case HIST_FILTER__SOCKET:
1127         default:
1128                 return;
1129         }
1130
1131         /* if it's filtered by own fmt, it has to have filter bits */
1132         perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1133                 if (check(fmt)) {
1134                         type_match = true;
1135                         break;
1136                 }
1137         }
1138
1139         if (type_match) {
1140                 /*
1141                  * If the filter is for current level entry, propagate
1142                  * filter marker to parents.  The marker bit was
1143                  * already set by default so it only needs to clear
1144                  * non-filtered entries.
1145                  */
1146                 if (!(he->filtered & (1 << type))) {
1147                         while (parent) {
1148                                 parent->filtered &= ~(1 << type);
1149                                 parent = parent->parent_he;
1150                         }
1151                 }
1152         } else {
1153                 /*
1154                  * If current entry doesn't have matching formats, set
1155                  * filter marker for upper level entries.  it will be
1156                  * cleared if its lower level entries is not filtered.
1157                  *
1158                  * For lower-level entries, it inherits parent's
1159                  * filter bit so that lower level entries of a
1160                  * non-filtered entry won't set the filter marker.
1161                  */
1162                 if (parent == NULL)
1163                         he->filtered |= (1 << type);
1164                 else
1165                         he->filtered |= (parent->filtered & (1 << type));
1166         }
1167 }
1168
1169 static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1170 {
1171         hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1172                                             check_thread_entry);
1173
1174         hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1175                                             perf_hpp__is_dso_entry);
1176
1177         hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1178                                             perf_hpp__is_sym_entry);
1179
1180         hists__apply_filters(he->hists, he);
1181 }
1182
1183 static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
1184                                                  struct rb_root *root,
1185                                                  struct hist_entry *he,
1186                                                  struct hist_entry *parent_he,
1187                                                  struct perf_hpp_list *hpp_list)
1188 {
1189         struct rb_node **p = &root->rb_node;
1190         struct rb_node *parent = NULL;
1191         struct hist_entry *iter, *new;
1192         struct perf_hpp_fmt *fmt;
1193         int64_t cmp;
1194
1195         while (*p != NULL) {
1196                 parent = *p;
1197                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1198
1199                 cmp = 0;
1200                 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1201                         cmp = fmt->collapse(fmt, iter, he);
1202                         if (cmp)
1203                                 break;
1204                 }
1205
1206                 if (!cmp) {
1207                         he_stat__add_stat(&iter->stat, &he->stat);
1208                         return iter;
1209                 }
1210
1211                 if (cmp < 0)
1212                         p = &parent->rb_left;
1213                 else
1214                         p = &parent->rb_right;
1215         }
1216
1217         new = hist_entry__new(he, true);
1218         if (new == NULL)
1219                 return NULL;
1220
1221         hists->nr_entries++;
1222
1223         /* save related format list for output */
1224         new->hpp_list = hpp_list;
1225         new->parent_he = parent_he;
1226
1227         hist_entry__apply_hierarchy_filters(new);
1228
1229         /* some fields are now passed to 'new' */
1230         perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1231                 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1232                         he->trace_output = NULL;
1233                 else
1234                         new->trace_output = NULL;
1235
1236                 if (perf_hpp__is_srcline_entry(fmt))
1237                         he->srcline = NULL;
1238                 else
1239                         new->srcline = NULL;
1240
1241                 if (perf_hpp__is_srcfile_entry(fmt))
1242                         he->srcfile = NULL;
1243                 else
1244                         new->srcfile = NULL;
1245         }
1246
1247         rb_link_node(&new->rb_node_in, parent, p);
1248         rb_insert_color(&new->rb_node_in, root);
1249         return new;
1250 }
1251
1252 static int hists__hierarchy_insert_entry(struct hists *hists,
1253                                          struct rb_root *root,
1254                                          struct hist_entry *he)
1255 {
1256         struct perf_hpp_list_node *node;
1257         struct hist_entry *new_he = NULL;
1258         struct hist_entry *parent = NULL;
1259         int depth = 0;
1260         int ret = 0;
1261
1262         list_for_each_entry(node, &hists->hpp_formats, list) {
1263                 /* skip period (overhead) and elided columns */
1264                 if (node->level == 0 || node->skip)
1265                         continue;
1266
1267                 /* insert copy of 'he' for each fmt into the hierarchy */
1268                 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
1269                 if (new_he == NULL) {
1270                         ret = -1;
1271                         break;
1272                 }
1273
1274                 root = &new_he->hroot_in;
1275                 new_he->depth = depth++;
1276                 parent = new_he;
1277         }
1278
1279         if (new_he) {
1280                 new_he->leaf = true;
1281
1282                 if (symbol_conf.use_callchain) {
1283                         callchain_cursor_reset(&callchain_cursor);
1284                         if (callchain_merge(&callchain_cursor,
1285                                             new_he->callchain,
1286                                             he->callchain) < 0)
1287                                 ret = -1;
1288                 }
1289         }
1290
1291         /* 'he' is no longer used */
1292         hist_entry__delete(he);
1293
1294         /* return 0 (or -1) since it already applied filters */
1295         return ret;
1296 }
1297
1298 static int hists__collapse_insert_entry(struct hists *hists,
1299                                         struct rb_root *root,
1300                                         struct hist_entry *he)
1301 {
1302         struct rb_node **p = &root->rb_node;
1303         struct rb_node *parent = NULL;
1304         struct hist_entry *iter;
1305         int64_t cmp;
1306
1307         if (symbol_conf.report_hierarchy)
1308                 return hists__hierarchy_insert_entry(hists, root, he);
1309
1310         while (*p != NULL) {
1311                 parent = *p;
1312                 iter = rb_entry(parent, struct hist_entry, rb_node_in);
1313
1314                 cmp = hist_entry__collapse(iter, he);
1315
1316                 if (!cmp) {
1317                         int ret = 0;
1318
1319                         he_stat__add_stat(&iter->stat, &he->stat);
1320                         if (symbol_conf.cumulate_callchain)
1321                                 he_stat__add_stat(iter->stat_acc, he->stat_acc);
1322
1323                         if (symbol_conf.use_callchain) {
1324                                 callchain_cursor_reset(&callchain_cursor);
1325                                 if (callchain_merge(&callchain_cursor,
1326                                                     iter->callchain,
1327                                                     he->callchain) < 0)
1328                                         ret = -1;
1329                         }
1330                         hist_entry__delete(he);
1331                         return ret;
1332                 }
1333
1334                 if (cmp < 0)
1335                         p = &(*p)->rb_left;
1336                 else
1337                         p = &(*p)->rb_right;
1338         }
1339         hists->nr_entries++;
1340
1341         rb_link_node(&he->rb_node_in, parent, p);
1342         rb_insert_color(&he->rb_node_in, root);
1343         return 1;
1344 }
1345
1346 struct rb_root *hists__get_rotate_entries_in(struct hists *hists)
1347 {
1348         struct rb_root *root;
1349
1350         pthread_mutex_lock(&hists->lock);
1351
1352         root = hists->entries_in;
1353         if (++hists->entries_in > &hists->entries_in_array[1])
1354                 hists->entries_in = &hists->entries_in_array[0];
1355
1356         pthread_mutex_unlock(&hists->lock);
1357
1358         return root;
1359 }
1360
1361 static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1362 {
1363         hists__filter_entry_by_dso(hists, he);
1364         hists__filter_entry_by_thread(hists, he);
1365         hists__filter_entry_by_symbol(hists, he);
1366         hists__filter_entry_by_socket(hists, he);
1367 }
1368
1369 int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1370 {
1371         struct rb_root *root;
1372         struct rb_node *next;
1373         struct hist_entry *n;
1374         int ret;
1375
1376         if (!hists__has(hists, need_collapse))
1377                 return 0;
1378
1379         hists->nr_entries = 0;
1380
1381         root = hists__get_rotate_entries_in(hists);
1382
1383         next = rb_first(root);
1384
1385         while (next) {
1386                 if (session_done())
1387                         break;
1388                 n = rb_entry(next, struct hist_entry, rb_node_in);
1389                 next = rb_next(&n->rb_node_in);
1390
1391                 rb_erase(&n->rb_node_in, root);
1392                 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1393                 if (ret < 0)
1394                         return -1;
1395
1396                 if (ret) {
1397                         /*
1398                          * If it wasn't combined with one of the entries already
1399                          * collapsed, we need to apply the filters that may have
1400                          * been set by, say, the hist_browser.
1401                          */
1402                         hists__apply_filters(hists, n);
1403                 }
1404                 if (prog)
1405                         ui_progress__update(prog, 1);
1406         }
1407         return 0;
1408 }
1409
1410 static int hist_entry__sort(struct hist_entry *a, struct hist_entry *b)
1411 {
1412         struct hists *hists = a->hists;
1413         struct perf_hpp_fmt *fmt;
1414         int64_t cmp = 0;
1415
1416         hists__for_each_sort_list(hists, fmt) {
1417                 if (perf_hpp__should_skip(fmt, a->hists))
1418                         continue;
1419
1420                 cmp = fmt->sort(fmt, a, b);
1421                 if (cmp)
1422                         break;
1423         }
1424
1425         return cmp;
1426 }
1427
1428 static void hists__reset_filter_stats(struct hists *hists)
1429 {
1430         hists->nr_non_filtered_entries = 0;
1431         hists->stats.total_non_filtered_period = 0;
1432 }
1433
1434 void hists__reset_stats(struct hists *hists)
1435 {
1436         hists->nr_entries = 0;
1437         hists->stats.total_period = 0;
1438
1439         hists__reset_filter_stats(hists);
1440 }
1441
1442 static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1443 {
1444         hists->nr_non_filtered_entries++;
1445         hists->stats.total_non_filtered_period += h->stat.period;
1446 }
1447
1448 void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1449 {
1450         if (!h->filtered)
1451                 hists__inc_filter_stats(hists, h);
1452
1453         hists->nr_entries++;
1454         hists->stats.total_period += h->stat.period;
1455 }
1456
1457 static void hierarchy_recalc_total_periods(struct hists *hists)
1458 {
1459         struct rb_node *node;
1460         struct hist_entry *he;
1461
1462         node = rb_first(&hists->entries);
1463
1464         hists->stats.total_period = 0;
1465         hists->stats.total_non_filtered_period = 0;
1466
1467         /*
1468          * recalculate total period using top-level entries only
1469          * since lower level entries only see non-filtered entries
1470          * but upper level entries have sum of both entries.
1471          */
1472         while (node) {
1473                 he = rb_entry(node, struct hist_entry, rb_node);
1474                 node = rb_next(node);
1475
1476                 hists->stats.total_period += he->stat.period;
1477                 if (!he->filtered)
1478                         hists->stats.total_non_filtered_period += he->stat.period;
1479         }
1480 }
1481
1482 static void hierarchy_insert_output_entry(struct rb_root *root,
1483                                           struct hist_entry *he)
1484 {
1485         struct rb_node **p = &root->rb_node;
1486         struct rb_node *parent = NULL;
1487         struct hist_entry *iter;
1488         struct perf_hpp_fmt *fmt;
1489
1490         while (*p != NULL) {
1491                 parent = *p;
1492                 iter = rb_entry(parent, struct hist_entry, rb_node);
1493
1494                 if (hist_entry__sort(he, iter) > 0)
1495                         p = &parent->rb_left;
1496                 else
1497                         p = &parent->rb_right;
1498         }
1499
1500         rb_link_node(&he->rb_node, parent, p);
1501         rb_insert_color(&he->rb_node, root);
1502
1503         /* update column width of dynamic entry */
1504         perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
1505                 if (perf_hpp__is_dynamic_entry(fmt))
1506                         fmt->sort(fmt, he, NULL);
1507         }
1508 }
1509
1510 static void hists__hierarchy_output_resort(struct hists *hists,
1511                                            struct ui_progress *prog,
1512                                            struct rb_root *root_in,
1513                                            struct rb_root *root_out,
1514                                            u64 min_callchain_hits,
1515                                            bool use_callchain)
1516 {
1517         struct rb_node *node;
1518         struct hist_entry *he;
1519
1520         *root_out = RB_ROOT;
1521         node = rb_first(root_in);
1522
1523         while (node) {
1524                 he = rb_entry(node, struct hist_entry, rb_node_in);
1525                 node = rb_next(node);
1526
1527                 hierarchy_insert_output_entry(root_out, he);
1528
1529                 if (prog)
1530                         ui_progress__update(prog, 1);
1531
1532                 if (!he->leaf) {
1533                         hists__hierarchy_output_resort(hists, prog,
1534                                                        &he->hroot_in,
1535                                                        &he->hroot_out,
1536                                                        min_callchain_hits,
1537                                                        use_callchain);
1538                         hists->nr_entries++;
1539                         if (!he->filtered) {
1540                                 hists->nr_non_filtered_entries++;
1541                                 hists__calc_col_len(hists, he);
1542                         }
1543
1544                         continue;
1545                 }
1546
1547                 if (!use_callchain)
1548                         continue;
1549
1550                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1551                         u64 total = he->stat.period;
1552
1553                         if (symbol_conf.cumulate_callchain)
1554                                 total = he->stat_acc->period;
1555
1556                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1557                 }
1558
1559                 callchain_param.sort(&he->sorted_chain, he->callchain,
1560                                      min_callchain_hits, &callchain_param);
1561         }
1562 }
1563
1564 static void __hists__insert_output_entry(struct rb_root *entries,
1565                                          struct hist_entry *he,
1566                                          u64 min_callchain_hits,
1567                                          bool use_callchain)
1568 {
1569         struct rb_node **p = &entries->rb_node;
1570         struct rb_node *parent = NULL;
1571         struct hist_entry *iter;
1572         struct perf_hpp_fmt *fmt;
1573
1574         if (use_callchain) {
1575                 if (callchain_param.mode == CHAIN_GRAPH_REL) {
1576                         u64 total = he->stat.period;
1577
1578                         if (symbol_conf.cumulate_callchain)
1579                                 total = he->stat_acc->period;
1580
1581                         min_callchain_hits = total * (callchain_param.min_percent / 100);
1582                 }
1583                 callchain_param.sort(&he->sorted_chain, he->callchain,
1584                                       min_callchain_hits, &callchain_param);
1585         }
1586
1587         while (*p != NULL) {
1588                 parent = *p;
1589                 iter = rb_entry(parent, struct hist_entry, rb_node);
1590
1591                 if (hist_entry__sort(he, iter) > 0)
1592                         p = &(*p)->rb_left;
1593                 else
1594                         p = &(*p)->rb_right;
1595         }
1596
1597         rb_link_node(&he->rb_node, parent, p);
1598         rb_insert_color(&he->rb_node, entries);
1599
1600         perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
1601                 if (perf_hpp__is_dynamic_entry(fmt) &&
1602                     perf_hpp__defined_dynamic_entry(fmt, he->hists))
1603                         fmt->sort(fmt, he, NULL);  /* update column width */
1604         }
1605 }
1606
1607 static void output_resort(struct hists *hists, struct ui_progress *prog,
1608                           bool use_callchain)
1609 {
1610         struct rb_root *root;
1611         struct rb_node *next;
1612         struct hist_entry *n;
1613         u64 callchain_total;
1614         u64 min_callchain_hits;
1615
1616         callchain_total = hists->callchain_period;
1617         if (symbol_conf.filter_relative)
1618                 callchain_total = hists->callchain_non_filtered_period;
1619
1620         min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
1621
1622         hists__reset_stats(hists);
1623         hists__reset_col_len(hists);
1624
1625         if (symbol_conf.report_hierarchy) {
1626                 hists__hierarchy_output_resort(hists, prog,
1627                                                &hists->entries_collapsed,
1628                                                &hists->entries,
1629                                                min_callchain_hits,
1630                                                use_callchain);
1631                 hierarchy_recalc_total_periods(hists);
1632                 return;
1633         }
1634
1635         if (hists__has(hists, need_collapse))
1636                 root = &hists->entries_collapsed;
1637         else
1638                 root = hists->entries_in;
1639
1640         next = rb_first(root);
1641         hists->entries = RB_ROOT;
1642
1643         while (next) {
1644                 n = rb_entry(next, struct hist_entry, rb_node_in);
1645                 next = rb_next(&n->rb_node_in);
1646
1647                 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
1648                 hists__inc_stats(hists, n);
1649
1650                 if (!n->filtered)
1651                         hists__calc_col_len(hists, n);
1652
1653                 if (prog)
1654                         ui_progress__update(prog, 1);
1655         }
1656 }
1657
1658 void perf_evsel__output_resort(struct perf_evsel *evsel, struct ui_progress *prog)
1659 {
1660         bool use_callchain;
1661
1662         if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
1663                 use_callchain = evsel->attr.sample_type & PERF_SAMPLE_CALLCHAIN;
1664         else
1665                 use_callchain = symbol_conf.use_callchain;
1666
1667         output_resort(evsel__hists(evsel), prog, use_callchain);
1668 }
1669
1670 void hists__output_resort(struct hists *hists, struct ui_progress *prog)
1671 {
1672         output_resort(hists, prog, symbol_conf.use_callchain);
1673 }
1674
1675 static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
1676 {
1677         if (he->leaf || hmd == HMD_FORCE_SIBLING)
1678                 return false;
1679
1680         if (he->unfolded || hmd == HMD_FORCE_CHILD)
1681                 return true;
1682
1683         return false;
1684 }
1685
1686 struct rb_node *rb_hierarchy_last(struct rb_node *node)
1687 {
1688         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1689
1690         while (can_goto_child(he, HMD_NORMAL)) {
1691                 node = rb_last(&he->hroot_out);
1692                 he = rb_entry(node, struct hist_entry, rb_node);
1693         }
1694         return node;
1695 }
1696
1697 struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
1698 {
1699         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1700
1701         if (can_goto_child(he, hmd))
1702                 node = rb_first(&he->hroot_out);
1703         else
1704                 node = rb_next(node);
1705
1706         while (node == NULL) {
1707                 he = he->parent_he;
1708                 if (he == NULL)
1709                         break;
1710
1711                 node = rb_next(&he->rb_node);
1712         }
1713         return node;
1714 }
1715
1716 struct rb_node *rb_hierarchy_prev(struct rb_node *node)
1717 {
1718         struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
1719
1720         node = rb_prev(node);
1721         if (node)
1722                 return rb_hierarchy_last(node);
1723
1724         he = he->parent_he;
1725         if (he == NULL)
1726                 return NULL;
1727
1728         return &he->rb_node;
1729 }
1730
1731 bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
1732 {
1733         struct rb_node *node;
1734         struct hist_entry *child;
1735         float percent;
1736
1737         if (he->leaf)
1738                 return false;
1739
1740         node = rb_first(&he->hroot_out);
1741         child = rb_entry(node, struct hist_entry, rb_node);
1742
1743         while (node && child->filtered) {
1744                 node = rb_next(node);
1745                 child = rb_entry(node, struct hist_entry, rb_node);
1746         }
1747
1748         if (node)
1749                 percent = hist_entry__get_percent_limit(child);
1750         else
1751                 percent = 0;
1752
1753         return node && percent >= limit;
1754 }
1755
1756 static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
1757                                        enum hist_filter filter)
1758 {
1759         h->filtered &= ~(1 << filter);
1760
1761         if (symbol_conf.report_hierarchy) {
1762                 struct hist_entry *parent = h->parent_he;
1763
1764                 while (parent) {
1765                         he_stat__add_stat(&parent->stat, &h->stat);
1766
1767                         parent->filtered &= ~(1 << filter);
1768
1769                         if (parent->filtered)
1770                                 goto next;
1771
1772                         /* force fold unfiltered entry for simplicity */
1773                         parent->unfolded = false;
1774                         parent->has_no_entry = false;
1775                         parent->row_offset = 0;
1776                         parent->nr_rows = 0;
1777 next:
1778                         parent = parent->parent_he;
1779                 }
1780         }
1781
1782         if (h->filtered)
1783                 return;
1784
1785         /* force fold unfiltered entry for simplicity */
1786         h->unfolded = false;
1787         h->has_no_entry = false;
1788         h->row_offset = 0;
1789         h->nr_rows = 0;
1790
1791         hists->stats.nr_non_filtered_samples += h->stat.nr_events;
1792
1793         hists__inc_filter_stats(hists, h);
1794         hists__calc_col_len(hists, h);
1795 }
1796
1797
1798 static bool hists__filter_entry_by_dso(struct hists *hists,
1799                                        struct hist_entry *he)
1800 {
1801         if (hists->dso_filter != NULL &&
1802             (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) {
1803                 he->filtered |= (1 << HIST_FILTER__DSO);
1804                 return true;
1805         }
1806
1807         return false;
1808 }
1809
1810 static bool hists__filter_entry_by_thread(struct hists *hists,
1811                                           struct hist_entry *he)
1812 {
1813         if (hists->thread_filter != NULL &&
1814             he->thread != hists->thread_filter) {
1815                 he->filtered |= (1 << HIST_FILTER__THREAD);
1816                 return true;
1817         }
1818
1819         return false;
1820 }
1821
1822 static bool hists__filter_entry_by_symbol(struct hists *hists,
1823                                           struct hist_entry *he)
1824 {
1825         if (hists->symbol_filter_str != NULL &&
1826             (!he->ms.sym || strstr(he->ms.sym->name,
1827                                    hists->symbol_filter_str) == NULL)) {
1828                 he->filtered |= (1 << HIST_FILTER__SYMBOL);
1829                 return true;
1830         }
1831
1832         return false;
1833 }
1834
1835 static bool hists__filter_entry_by_socket(struct hists *hists,
1836                                           struct hist_entry *he)
1837 {
1838         if ((hists->socket_filter > -1) &&
1839             (he->socket != hists->socket_filter)) {
1840                 he->filtered |= (1 << HIST_FILTER__SOCKET);
1841                 return true;
1842         }
1843
1844         return false;
1845 }
1846
1847 typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
1848
1849 static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
1850 {
1851         struct rb_node *nd;
1852
1853         hists->stats.nr_non_filtered_samples = 0;
1854
1855         hists__reset_filter_stats(hists);
1856         hists__reset_col_len(hists);
1857
1858         for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) {
1859                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1860
1861                 if (filter(hists, h))
1862                         continue;
1863
1864                 hists__remove_entry_filter(hists, h, type);
1865         }
1866 }
1867
1868 static void resort_filtered_entry(struct rb_root *root, struct hist_entry *he)
1869 {
1870         struct rb_node **p = &root->rb_node;
1871         struct rb_node *parent = NULL;
1872         struct hist_entry *iter;
1873         struct rb_root new_root = RB_ROOT;
1874         struct rb_node *nd;
1875
1876         while (*p != NULL) {
1877                 parent = *p;
1878                 iter = rb_entry(parent, struct hist_entry, rb_node);
1879
1880                 if (hist_entry__sort(he, iter) > 0)
1881                         p = &(*p)->rb_left;
1882                 else
1883                         p = &(*p)->rb_right;
1884         }
1885
1886         rb_link_node(&he->rb_node, parent, p);
1887         rb_insert_color(&he->rb_node, root);
1888
1889         if (he->leaf || he->filtered)
1890                 return;
1891
1892         nd = rb_first(&he->hroot_out);
1893         while (nd) {
1894                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1895
1896                 nd = rb_next(nd);
1897                 rb_erase(&h->rb_node, &he->hroot_out);
1898
1899                 resort_filtered_entry(&new_root, h);
1900         }
1901
1902         he->hroot_out = new_root;
1903 }
1904
1905 static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
1906 {
1907         struct rb_node *nd;
1908         struct rb_root new_root = RB_ROOT;
1909
1910         hists->stats.nr_non_filtered_samples = 0;
1911
1912         hists__reset_filter_stats(hists);
1913         hists__reset_col_len(hists);
1914
1915         nd = rb_first(&hists->entries);
1916         while (nd) {
1917                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1918                 int ret;
1919
1920                 ret = hist_entry__filter(h, type, arg);
1921
1922                 /*
1923                  * case 1. non-matching type
1924                  * zero out the period, set filter marker and move to child
1925                  */
1926                 if (ret < 0) {
1927                         memset(&h->stat, 0, sizeof(h->stat));
1928                         h->filtered |= (1 << type);
1929
1930                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
1931                 }
1932                 /*
1933                  * case 2. matched type (filter out)
1934                  * set filter marker and move to next
1935                  */
1936                 else if (ret == 1) {
1937                         h->filtered |= (1 << type);
1938
1939                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1940                 }
1941                 /*
1942                  * case 3. ok (not filtered)
1943                  * add period to hists and parents, erase the filter marker
1944                  * and move to next sibling
1945                  */
1946                 else {
1947                         hists__remove_entry_filter(hists, h, type);
1948
1949                         nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
1950                 }
1951         }
1952
1953         hierarchy_recalc_total_periods(hists);
1954
1955         /*
1956          * resort output after applying a new filter since filter in a lower
1957          * hierarchy can change periods in a upper hierarchy.
1958          */
1959         nd = rb_first(&hists->entries);
1960         while (nd) {
1961                 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
1962
1963                 nd = rb_next(nd);
1964                 rb_erase(&h->rb_node, &hists->entries);
1965
1966                 resort_filtered_entry(&new_root, h);
1967         }
1968
1969         hists->entries = new_root;
1970 }
1971
1972 void hists__filter_by_thread(struct hists *hists)
1973 {
1974         if (symbol_conf.report_hierarchy)
1975                 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
1976                                         hists->thread_filter);
1977         else
1978                 hists__filter_by_type(hists, HIST_FILTER__THREAD,
1979                                       hists__filter_entry_by_thread);
1980 }
1981
1982 void hists__filter_by_dso(struct hists *hists)
1983 {
1984         if (symbol_conf.report_hierarchy)
1985                 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
1986                                         hists->dso_filter);
1987         else
1988                 hists__filter_by_type(hists, HIST_FILTER__DSO,
1989                                       hists__filter_entry_by_dso);
1990 }
1991
1992 void hists__filter_by_symbol(struct hists *hists)
1993 {
1994         if (symbol_conf.report_hierarchy)
1995                 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
1996                                         hists->symbol_filter_str);
1997         else
1998                 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
1999                                       hists__filter_entry_by_symbol);
2000 }
2001
2002 void hists__filter_by_socket(struct hists *hists)
2003 {
2004         if (symbol_conf.report_hierarchy)
2005                 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2006                                         &hists->socket_filter);
2007         else
2008                 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2009                                       hists__filter_entry_by_socket);
2010 }
2011
2012 void events_stats__inc(struct events_stats *stats, u32 type)
2013 {
2014         ++stats->nr_events[0];
2015         ++stats->nr_events[type];
2016 }
2017
2018 void hists__inc_nr_events(struct hists *hists, u32 type)
2019 {
2020         events_stats__inc(&hists->stats, type);
2021 }
2022
2023 void hists__inc_nr_samples(struct hists *hists, bool filtered)
2024 {
2025         events_stats__inc(&hists->stats, PERF_RECORD_SAMPLE);
2026         if (!filtered)
2027                 hists->stats.nr_non_filtered_samples++;
2028 }
2029
2030 static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2031                                                  struct hist_entry *pair)
2032 {
2033         struct rb_root *root;
2034         struct rb_node **p;
2035         struct rb_node *parent = NULL;
2036         struct hist_entry *he;
2037         int64_t cmp;
2038
2039         if (hists__has(hists, need_collapse))
2040                 root = &hists->entries_collapsed;
2041         else
2042                 root = hists->entries_in;
2043
2044         p = &root->rb_node;
2045
2046         while (*p != NULL) {
2047                 parent = *p;
2048                 he = rb_entry(parent, struct hist_entry, rb_node_in);
2049
2050                 cmp = hist_entry__collapse(he, pair);
2051
2052                 if (!cmp)
2053                         goto out;
2054
2055                 if (cmp < 0)
2056                         p = &(*p)->rb_left;
2057                 else
2058                         p = &(*p)->rb_right;
2059         }
2060
2061         he = hist_entry__new(pair, true);
2062         if (he) {
2063                 memset(&he->stat, 0, sizeof(he->stat));
2064                 he->hists = hists;
2065                 if (symbol_conf.cumulate_callchain)
2066                         memset(he->stat_acc, 0, sizeof(he->stat));
2067                 rb_link_node(&he->rb_node_in, parent, p);
2068                 rb_insert_color(&he->rb_node_in, root);
2069                 hists__inc_stats(hists, he);
2070                 he->dummy = true;
2071         }
2072 out:
2073         return he;
2074 }
2075
2076 static struct hist_entry *hists__find_entry(struct hists *hists,
2077                                             struct hist_entry *he)
2078 {
2079         struct rb_node *n;
2080
2081         if (hists__has(hists, need_collapse))
2082                 n = hists->entries_collapsed.rb_node;
2083         else
2084                 n = hists->entries_in->rb_node;
2085
2086         while (n) {
2087                 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2088                 int64_t cmp = hist_entry__collapse(iter, he);
2089
2090                 if (cmp < 0)
2091                         n = n->rb_left;
2092                 else if (cmp > 0)
2093                         n = n->rb_right;
2094                 else
2095                         return iter;
2096         }
2097
2098         return NULL;
2099 }
2100
2101 /*
2102  * Look for pairs to link to the leader buckets (hist_entries):
2103  */
2104 void hists__match(struct hists *leader, struct hists *other)
2105 {
2106         struct rb_root *root;
2107         struct rb_node *nd;
2108         struct hist_entry *pos, *pair;
2109
2110         if (hists__has(leader, need_collapse))
2111                 root = &leader->entries_collapsed;
2112         else
2113                 root = leader->entries_in;
2114
2115         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2116                 pos  = rb_entry(nd, struct hist_entry, rb_node_in);
2117                 pair = hists__find_entry(other, pos);
2118
2119                 if (pair)
2120                         hist_entry__add_pair(pair, pos);
2121         }
2122 }
2123
2124 /*
2125  * Look for entries in the other hists that are not present in the leader, if
2126  * we find them, just add a dummy entry on the leader hists, with period=0,
2127  * nr_events=0, to serve as the list header.
2128  */
2129 int hists__link(struct hists *leader, struct hists *other)
2130 {
2131         struct rb_root *root;
2132         struct rb_node *nd;
2133         struct hist_entry *pos, *pair;
2134
2135         if (hists__has(other, need_collapse))
2136                 root = &other->entries_collapsed;
2137         else
2138                 root = other->entries_in;
2139
2140         for (nd = rb_first(root); nd; nd = rb_next(nd)) {
2141                 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2142
2143                 if (!hist_entry__has_pairs(pos)) {
2144                         pair = hists__add_dummy_entry(leader, pos);
2145                         if (pair == NULL)
2146                                 return -1;
2147                         hist_entry__add_pair(pos, pair);
2148                 }
2149         }
2150
2151         return 0;
2152 }
2153
2154 void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
2155                           struct perf_sample *sample, bool nonany_branch_mode)
2156 {
2157         struct branch_info *bi;
2158
2159         /* If we have branch cycles always annotate them. */
2160         if (bs && bs->nr && bs->entries[0].flags.cycles) {
2161                 int i;
2162
2163                 bi = sample__resolve_bstack(sample, al);
2164                 if (bi) {
2165                         struct addr_map_symbol *prev = NULL;
2166
2167                         /*
2168                          * Ignore errors, still want to process the
2169                          * other entries.
2170                          *
2171                          * For non standard branch modes always
2172                          * force no IPC (prev == NULL)
2173                          *
2174                          * Note that perf stores branches reversed from
2175                          * program order!
2176                          */
2177                         for (i = bs->nr - 1; i >= 0; i--) {
2178                                 addr_map_symbol__account_cycles(&bi[i].from,
2179                                         nonany_branch_mode ? NULL : prev,
2180                                         bi[i].flags.cycles);
2181                                 prev = &bi[i].to;
2182                         }
2183                         free(bi);
2184                 }
2185         }
2186 }
2187
2188 size_t perf_evlist__fprintf_nr_events(struct perf_evlist *evlist, FILE *fp)
2189 {
2190         struct perf_evsel *pos;
2191         size_t ret = 0;
2192
2193         evlist__for_each(evlist, pos) {
2194                 ret += fprintf(fp, "%s stats:\n", perf_evsel__name(pos));
2195                 ret += events_stats__fprintf(&evsel__hists(pos)->stats, fp);
2196         }
2197
2198         return ret;
2199 }
2200
2201
2202 u64 hists__total_period(struct hists *hists)
2203 {
2204         return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2205                 hists->stats.total_period;
2206 }
2207
2208 int parse_filter_percentage(const struct option *opt __maybe_unused,
2209                             const char *arg, int unset __maybe_unused)
2210 {
2211         if (!strcmp(arg, "relative"))
2212                 symbol_conf.filter_relative = true;
2213         else if (!strcmp(arg, "absolute"))
2214                 symbol_conf.filter_relative = false;
2215         else
2216                 return -1;
2217
2218         return 0;
2219 }
2220
2221 int perf_hist_config(const char *var, const char *value)
2222 {
2223         if (!strcmp(var, "hist.percentage"))
2224                 return parse_filter_percentage(NULL, value, 0);
2225
2226         return 0;
2227 }
2228
2229 int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
2230 {
2231         memset(hists, 0, sizeof(*hists));
2232         hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
2233         hists->entries_in = &hists->entries_in_array[0];
2234         hists->entries_collapsed = RB_ROOT;
2235         hists->entries = RB_ROOT;
2236         pthread_mutex_init(&hists->lock, NULL);
2237         hists->socket_filter = -1;
2238         hists->hpp_list = hpp_list;
2239         INIT_LIST_HEAD(&hists->hpp_formats);
2240         return 0;
2241 }
2242
2243 static void hists__delete_remaining_entries(struct rb_root *root)
2244 {
2245         struct rb_node *node;
2246         struct hist_entry *he;
2247
2248         while (!RB_EMPTY_ROOT(root)) {
2249                 node = rb_first(root);
2250                 rb_erase(node, root);
2251
2252                 he = rb_entry(node, struct hist_entry, rb_node_in);
2253                 hist_entry__delete(he);
2254         }
2255 }
2256
2257 static void hists__delete_all_entries(struct hists *hists)
2258 {
2259         hists__delete_entries(hists);
2260         hists__delete_remaining_entries(&hists->entries_in_array[0]);
2261         hists__delete_remaining_entries(&hists->entries_in_array[1]);
2262         hists__delete_remaining_entries(&hists->entries_collapsed);
2263 }
2264
2265 static void hists_evsel__exit(struct perf_evsel *evsel)
2266 {
2267         struct hists *hists = evsel__hists(evsel);
2268         struct perf_hpp_fmt *fmt, *pos;
2269         struct perf_hpp_list_node *node, *tmp;
2270
2271         hists__delete_all_entries(hists);
2272
2273         list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
2274                 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
2275                         list_del(&fmt->list);
2276                         free(fmt);
2277                 }
2278                 list_del(&node->list);
2279                 free(node);
2280         }
2281 }
2282
2283 static int hists_evsel__init(struct perf_evsel *evsel)
2284 {
2285         struct hists *hists = evsel__hists(evsel);
2286
2287         __hists__init(hists, &perf_hpp_list);
2288         return 0;
2289 }
2290
2291 /*
2292  * XXX We probably need a hists_evsel__exit() to free the hist_entries
2293  * stored in the rbtree...
2294  */
2295
2296 int hists__init(void)
2297 {
2298         int err = perf_evsel__object_config(sizeof(struct hists_evsel),
2299                                             hists_evsel__init,
2300                                             hists_evsel__exit);
2301         if (err)
2302                 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
2303
2304         return err;
2305 }
2306
2307 void perf_hpp_list__init(struct perf_hpp_list *list)
2308 {
2309         INIT_LIST_HEAD(&list->fields);
2310         INIT_LIST_HEAD(&list->sorts);
2311 }