Merge tag 'mm-hotfixes-stable-2025-07-11-16-16' of git://git.kernel.org/pub/scm/linux...
[linux-2.6-block.git] / tools / perf / util / hist.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
b10ba7f1 2#include "callchain.h"
b4209025 3#include "debug.h"
4a3cec84 4#include "dso.h"
598357eb 5#include "build-id.h"
3d1d07ec 6#include "hist.h"
ebf39d29 7#include "kvm-stat.h"
9c68ae98 8#include "map.h"
d3300a3c
ACM
9#include "map_symbol.h"
10#include "branch.h"
11#include "mem-events.h"
ad3003a6 12#include "mem-info.h"
4e4f06e4 13#include "session.h"
d890a98c 14#include "namespaces.h"
b629f3e9 15#include "cgroup.h"
4e4f06e4 16#include "sort.h"
25c312db 17#include "units.h"
2a1731fb 18#include "evlist.h"
29d720ed 19#include "evsel.h"
69bcb019 20#include "annotate.h"
632a5cab 21#include "srcline.h"
daecf9e0 22#include "symbol.h"
e7ff8920 23#include "thread.h"
60414418 24#include "block-info.h"
740b97f9 25#include "ui/progress.h"
a43783ae 26#include <errno.h>
9b33827d 27#include <math.h>
25c312db 28#include <inttypes.h>
391e4206 29#include <sys/param.h>
5c9dbe6d 30#include <linux/rbtree.h>
8520a98d 31#include <linux/string.h>
3723908d 32#include <linux/time64.h>
7f7c536f 33#include <linux/zalloc.h>
3d1d07ec 34
cd57c04c
DV
35static int64_t hist_entry__cmp(struct hist_entry *left, struct hist_entry *right);
36static int64_t hist_entry__collapse(struct hist_entry *left, struct hist_entry *right);
37
90cf1fb5
ACM
38static bool hists__filter_entry_by_dso(struct hists *hists,
39 struct hist_entry *he);
40static bool hists__filter_entry_by_thread(struct hists *hists,
41 struct hist_entry *he);
e94d53eb
NK
42static bool hists__filter_entry_by_symbol(struct hists *hists,
43 struct hist_entry *he);
21394d94
KL
44static bool hists__filter_entry_by_socket(struct hists *hists,
45 struct hist_entry *he);
61b6b31c
DV
46static bool hists__filter_entry_by_parallelism(struct hists *hists,
47 struct hist_entry *he);
90cf1fb5 48
42b28ac0 49u16 hists__col_len(struct hists *hists, enum hist_column col)
8a6c5b26 50{
42b28ac0 51 return hists->col_len[col];
8a6c5b26
ACM
52}
53
42b28ac0 54void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 55{
42b28ac0 56 hists->col_len[col] = len;
8a6c5b26
ACM
57}
58
42b28ac0 59bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len)
8a6c5b26 60{
42b28ac0
ACM
61 if (len > hists__col_len(hists, col)) {
62 hists__set_col_len(hists, col, len);
8a6c5b26
ACM
63 return true;
64 }
65 return false;
66}
67
7ccf4f90 68void hists__reset_col_len(struct hists *hists)
8a6c5b26
ACM
69{
70 enum hist_column col;
71
72 for (col = 0; col < HISTC_NR_COLS; ++col)
42b28ac0 73 hists__set_col_len(hists, col, 0);
8a6c5b26
ACM
74}
75
b5387528
RAV
76static void hists__set_unres_dso_col_len(struct hists *hists, int dso)
77{
78 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
79
80 if (hists__col_len(hists, dso) < unresolved_col_width &&
81 !symbol_conf.col_width_list_str && !symbol_conf.field_sep &&
82 !symbol_conf.dso_list)
83 hists__set_col_len(hists, dso, unresolved_col_width);
84}
85
7ccf4f90 86void hists__calc_col_len(struct hists *hists, struct hist_entry *h)
8a6c5b26 87{
b5387528 88 const unsigned int unresolved_col_width = BITS_PER_LONG / 4;
98a3b32c 89 int symlen;
8a6c5b26
ACM
90 u16 len;
91
0bdf181f
JY
92 if (h->block_info)
93 return;
ded19d57
NK
94 /*
95 * +4 accounts for '[x] ' priv level info
96 * +2 accounts for 0x prefix on raw addresses
97 * +3 accounts for ' y ' symtab origin info
98 */
99 if (h->ms.sym) {
100 symlen = h->ms.sym->namelen + 4;
bb963e16 101 if (verbose > 0)
ded19d57
NK
102 symlen += BITS_PER_LONG / 4 + 2 + 3;
103 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
104 } else {
98a3b32c
SE
105 symlen = unresolved_col_width + 4 + 2;
106 hists__new_col_len(hists, HISTC_SYMBOL, symlen);
b5387528 107 hists__set_unres_dso_col_len(hists, HISTC_DSO);
98a3b32c 108 }
8a6c5b26
ACM
109
110 len = thread__comm_len(h->thread);
42b28ac0 111 if (hists__new_col_len(hists, HISTC_COMM, len))
89c7cb2c 112 hists__set_col_len(hists, HISTC_THREAD, len + 8);
8a6c5b26
ACM
113
114 if (h->ms.map) {
63df0e4b 115 len = dso__name_len(map__dso(h->ms.map));
42b28ac0 116 hists__new_col_len(hists, HISTC_DSO, len);
8a6c5b26 117 }
b5387528 118
cb993744
NK
119 if (h->parent)
120 hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen);
121
b5387528 122 if (h->branch_info) {
d46a4cdf
ACM
123 if (h->branch_info->from.ms.sym) {
124 symlen = (int)h->branch_info->from.ms.sym->namelen + 4;
bb963e16 125 if (verbose > 0)
ded19d57 126 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
127 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
128
63df0e4b 129 symlen = dso__name_len(map__dso(h->branch_info->from.ms.map));
b5387528
RAV
130 hists__new_col_len(hists, HISTC_DSO_FROM, symlen);
131 } else {
132 symlen = unresolved_col_width + 4 + 2;
133 hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen);
05274770 134 hists__new_col_len(hists, HISTC_ADDR_FROM, symlen);
b5387528
RAV
135 hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM);
136 }
137
d46a4cdf
ACM
138 if (h->branch_info->to.ms.sym) {
139 symlen = (int)h->branch_info->to.ms.sym->namelen + 4;
bb963e16 140 if (verbose > 0)
ded19d57 141 symlen += BITS_PER_LONG / 4 + 2 + 3;
b5387528
RAV
142 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
143
63df0e4b 144 symlen = dso__name_len(map__dso(h->branch_info->to.ms.map));
b5387528
RAV
145 hists__new_col_len(hists, HISTC_DSO_TO, symlen);
146 } else {
147 symlen = unresolved_col_width + 4 + 2;
148 hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen);
05274770 149 hists__new_col_len(hists, HISTC_ADDR_TO, symlen);
b5387528
RAV
150 hists__set_unres_dso_col_len(hists, HISTC_DSO_TO);
151 }
508be0df
AK
152
153 if (h->branch_info->srcline_from)
154 hists__new_col_len(hists, HISTC_SRCLINE_FROM,
155 strlen(h->branch_info->srcline_from));
156 if (h->branch_info->srcline_to)
157 hists__new_col_len(hists, HISTC_SRCLINE_TO,
158 strlen(h->branch_info->srcline_to));
b5387528 159 }
98a3b32c
SE
160
161 if (h->mem_info) {
1a8c2e01
IR
162 if (mem_info__daddr(h->mem_info)->ms.sym) {
163 symlen = (int)mem_info__daddr(h->mem_info)->ms.sym->namelen + 4
98a3b32c
SE
164 + unresolved_col_width + 2;
165 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
166 symlen);
9b32ba71
DZ
167 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
168 symlen + 1);
98a3b32c
SE
169 } else {
170 symlen = unresolved_col_width + 4 + 2;
171 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL,
172 symlen);
0805909f
JO
173 hists__new_col_len(hists, HISTC_MEM_DCACHELINE,
174 symlen);
98a3b32c 175 }
b34b3bf0 176
1a8c2e01
IR
177 if (mem_info__iaddr(h->mem_info)->ms.sym) {
178 symlen = (int)mem_info__iaddr(h->mem_info)->ms.sym->namelen + 4
b34b3bf0
JO
179 + unresolved_col_width + 2;
180 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
181 symlen);
182 } else {
183 symlen = unresolved_col_width + 4 + 2;
184 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL,
185 symlen);
186 }
187
1a8c2e01
IR
188 if (mem_info__daddr(h->mem_info)->ms.map) {
189 symlen = dso__name_len(map__dso(mem_info__daddr(h->mem_info)->ms.map));
98a3b32c
SE
190 hists__new_col_len(hists, HISTC_MEM_DADDR_DSO,
191 symlen);
192 } else {
193 symlen = unresolved_col_width + 4 + 2;
194 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
195 }
8780fb25
KL
196
197 hists__new_col_len(hists, HISTC_MEM_PHYS_DADDR,
198 unresolved_col_width + 4 + 2);
199
a50d03e3
KL
200 hists__new_col_len(hists, HISTC_MEM_DATA_PAGE_SIZE,
201 unresolved_col_width + 4 + 2);
202
98a3b32c
SE
203 } else {
204 symlen = unresolved_col_width + 4 + 2;
205 hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen);
b34b3bf0 206 hists__new_col_len(hists, HISTC_MEM_IADDR_SYMBOL, symlen);
98a3b32c
SE
207 hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO);
208 }
209
b629f3e9 210 hists__new_col_len(hists, HISTC_CGROUP, 6);
d890a98c 211 hists__new_col_len(hists, HISTC_CGROUP_ID, 20);
7ae1972e 212 hists__new_col_len(hists, HISTC_PARALLELISM, 11);
a4978eca 213 hists__new_col_len(hists, HISTC_CPU, 3);
2e7ea3ab 214 hists__new_col_len(hists, HISTC_SOCKET, 6);
98a3b32c
SE
215 hists__new_col_len(hists, HISTC_MEM_LOCKED, 6);
216 hists__new_col_len(hists, HISTC_MEM_TLB, 22);
217 hists__new_col_len(hists, HISTC_MEM_SNOOP, 12);
4953c897 218 hists__new_col_len(hists, HISTC_MEM_LVL, 36 + 3);
98a3b32c
SE
219 hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12);
220 hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12);
a054c298 221 hists__new_col_len(hists, HISTC_MEM_BLOCKED, 10);
590db42d
KL
222 hists__new_col_len(hists, HISTC_LOCAL_INS_LAT, 13);
223 hists__new_col_len(hists, HISTC_GLOBAL_INS_LAT, 13);
e3304c21
AR
224 hists__new_col_len(hists, HISTC_LOCAL_P_STAGE_CYC, 13);
225 hists__new_col_len(hists, HISTC_GLOBAL_P_STAGE_CYC, 13);
762461f1 226 hists__new_col_len(hists, HISTC_ADDR, BITS_PER_LONG / 4 + 2);
48966a5a
TF
227 hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_PREDICTED, 9);
228 hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_ABORT, 5);
229 hists__new_col_len(hists, HISTC_CALLCHAIN_BRANCH_CYCLES, 6);
e3304c21 230
3dab6ac0
AK
231 if (symbol_conf.nanosecs)
232 hists__new_col_len(hists, HISTC_TIME, 16);
233 else
234 hists__new_col_len(hists, HISTC_TIME, 12);
9fd74f20 235 hists__new_col_len(hists, HISTC_CODE_PAGE_SIZE, 6);
475eeab9 236
f666ac0d
JO
237 if (h->srcline) {
238 len = MAX(strlen(h->srcline), strlen(sort_srcline.se_header));
239 hists__new_col_len(hists, HISTC_SRCLINE, len);
240 }
e8e6d37e 241
31191a85
AK
242 if (h->srcfile)
243 hists__new_col_len(hists, HISTC_SRCFILE, strlen(h->srcfile));
244
475eeab9
AK
245 if (h->transaction)
246 hists__new_col_len(hists, HISTC_TRANSACTION,
247 hist_entry__transaction_len());
0c0af78d
NK
248
249 if (h->trace_output)
250 hists__new_col_len(hists, HISTC_TRACE, strlen(h->trace_output));
b629f3e9
NK
251
252 if (h->cgroup) {
253 const char *cgrp_name = "unknown";
5ab6d715 254 struct cgroup *cgrp = cgroup__find(maps__machine(h->ms.maps)->env,
b629f3e9
NK
255 h->cgroup);
256 if (cgrp != NULL)
257 cgrp_name = cgrp->name;
258
259 hists__new_col_len(hists, HISTC_CGROUP, strlen(cgrp_name));
260 }
8a6c5b26
ACM
261}
262
7ccf4f90
NK
263void hists__output_recalc_col_len(struct hists *hists, int max_rows)
264{
2eb3d689 265 struct rb_node *next = rb_first_cached(&hists->entries);
7ccf4f90
NK
266 struct hist_entry *n;
267 int row = 0;
268
269 hists__reset_col_len(hists);
270
271 while (next && row++ < max_rows) {
272 n = rb_entry(next, struct hist_entry, rb_node);
273 if (!n->filtered)
274 hists__calc_col_len(hists, n);
275 next = rb_next(&n->rb_node);
276 }
277}
278
f39056f9
NK
279static void he_stat__add_cpumode_period(struct he_stat *he_stat,
280 unsigned int cpumode, u64 period)
a1645ce1 281{
28e2a106 282 switch (cpumode) {
a1645ce1 283 case PERF_RECORD_MISC_KERNEL:
f39056f9 284 he_stat->period_sys += period;
a1645ce1
ZY
285 break;
286 case PERF_RECORD_MISC_USER:
f39056f9 287 he_stat->period_us += period;
a1645ce1
ZY
288 break;
289 case PERF_RECORD_MISC_GUEST_KERNEL:
f39056f9 290 he_stat->period_guest_sys += period;
a1645ce1
ZY
291 break;
292 case PERF_RECORD_MISC_GUEST_USER:
f39056f9 293 he_stat->period_guest_us += period;
a1645ce1
ZY
294 break;
295 default:
296 break;
297 }
298}
299
3723908d
AK
300static long hist_time(unsigned long htime)
301{
302 unsigned long time_quantum = symbol_conf.time_quantum;
303 if (time_quantum)
304 return (htime / time_quantum) * time_quantum;
305 return htime;
306}
307
ee1cffbe 308static void he_stat__add_period(struct he_stat *he_stat, u64 period, u64 latency)
139c0815
NK
309{
310 he_stat->period += period;
ee1cffbe 311 he_stat->latency += latency;
139c0815
NK
312 he_stat->nr_events += 1;
313}
314
315static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src)
316{
317 dest->period += src->period;
318 dest->period_sys += src->period_sys;
319 dest->period_us += src->period_us;
320 dest->period_guest_sys += src->period_guest_sys;
321 dest->period_guest_us += src->period_guest_us;
6fcf1e65
NK
322 dest->weight1 += src->weight1;
323 dest->weight2 += src->weight2;
324 dest->weight3 += src->weight3;
139c0815 325 dest->nr_events += src->nr_events;
ee1cffbe 326 dest->latency += src->latency;
139c0815
NK
327}
328
f39056f9 329static void he_stat__decay(struct he_stat *he_stat)
ab81f3fd 330{
f39056f9
NK
331 he_stat->period = (he_stat->period * 7) / 8;
332 he_stat->nr_events = (he_stat->nr_events * 7) / 8;
6fcf1e65
NK
333 he_stat->weight1 = (he_stat->weight1 * 7) / 8;
334 he_stat->weight2 = (he_stat->weight2 * 7) / 8;
335 he_stat->weight3 = (he_stat->weight3 * 7) / 8;
ee1cffbe 336 he_stat->latency = (he_stat->latency * 7) / 8;
ab81f3fd
ACM
337}
338
930d4c45
NK
339static int hists__update_mem_stat(struct hists *hists, struct hist_entry *he,
340 struct mem_info *mi, u64 period)
341{
342 if (hists->nr_mem_stats == 0)
343 return 0;
344
345 if (he->mem_stat == NULL) {
346 he->mem_stat = calloc(hists->nr_mem_stats, sizeof(*he->mem_stat));
347 if (he->mem_stat == NULL)
348 return -1;
349 }
350
351 for (int i = 0; i < hists->nr_mem_stats; i++) {
9fcb43e2
NK
352 int idx = mem_stat_index(hists->mem_stat_types[i],
353 mem_info__const_data_src(mi)->val);
930d4c45 354
9fcb43e2 355 assert(0 <= idx && idx < MEM_STAT_LEN);
930d4c45 356 he->mem_stat[i].entries[idx] += period;
225772c1 357 hists->mem_stat_total[i].entries[idx] += period;
930d4c45
NK
358 }
359 return 0;
360}
361
362static void hists__add_mem_stat(struct hists *hists, struct hist_entry *dst,
363 struct hist_entry *src)
364{
365 if (hists->nr_mem_stats == 0)
366 return;
367
368 for (int i = 0; i < hists->nr_mem_stats; i++) {
369 for (int k = 0; k < MEM_STAT_LEN; k++)
370 dst->mem_stat[i].entries[k] += src->mem_stat[i].entries[k];
371 }
372}
373
374static int hists__clone_mem_stat(struct hists *hists, struct hist_entry *dst,
375 struct hist_entry *src)
376{
377 if (hists->nr_mem_stats == 0)
378 return 0;
379
380 dst->mem_stat = calloc(hists->nr_mem_stats, sizeof(*dst->mem_stat));
381 if (dst->mem_stat == NULL)
382 return -1;
383
384 for (int i = 0; i < hists->nr_mem_stats; i++) {
385 for (int k = 0; k < MEM_STAT_LEN; k++)
386 dst->mem_stat[i].entries[k] = src->mem_stat[i].entries[k];
387 }
388 return 0;
389}
390
391static void hists__decay_mem_stat(struct hists *hists, struct hist_entry *he)
392{
393 if (hists->nr_mem_stats == 0)
394 return;
395
396 for (int i = 0; i < hists->nr_mem_stats; i++) {
397 for (int k = 0; k < MEM_STAT_LEN; k++)
398 he->mem_stat[i].entries[k] = (he->mem_stat[i].entries[k] * 7) / 8;
399 }
400}
401
5d8200ae
NK
402static void hists__delete_entry(struct hists *hists, struct hist_entry *he);
403
ab81f3fd
ACM
404static bool hists__decay_entry(struct hists *hists, struct hist_entry *he)
405{
b24c28f7 406 u64 prev_period = he->stat.period;
ee1cffbe 407 u64 prev_latency = he->stat.latency;
c64550cf
ACM
408
409 if (prev_period == 0)
df71d95f 410 return true;
c64550cf 411
f39056f9 412 he_stat__decay(&he->stat);
f8be1c8c
NK
413 if (symbol_conf.cumulate_callchain)
414 he_stat__decay(he->stat_acc);
42b276a2 415 decay_callchain(he->callchain);
930d4c45 416 hists__decay_mem_stat(hists, he);
c64550cf 417
5d8200ae 418 if (!he->depth) {
ee1cffbe
DV
419 u64 period_diff = prev_period - he->stat.period;
420 u64 latency_diff = prev_latency - he->stat.latency;
421
422 hists->stats.total_period -= period_diff;
423 hists->stats.total_latency -= latency_diff;
424 if (!he->filtered) {
425 hists->stats.total_non_filtered_period -= period_diff;
426 hists->stats.total_non_filtered_latency -= latency_diff;
427 }
5d8200ae
NK
428 }
429
430 if (!he->leaf) {
431 struct hist_entry *child;
2eb3d689 432 struct rb_node *node = rb_first_cached(&he->hroot_out);
5d8200ae
NK
433 while (node) {
434 child = rb_entry(node, struct hist_entry, rb_node);
435 node = rb_next(node);
436
437 if (hists__decay_entry(hists, child))
438 hists__delete_entry(hists, child);
439 }
440 }
c64550cf 441
ee1cffbe 442 return he->stat.period == 0 && he->stat.latency == 0;
ab81f3fd
ACM
443}
444
956b65e1
ACM
445static void hists__delete_entry(struct hists *hists, struct hist_entry *he)
446{
2eb3d689
DB
447 struct rb_root_cached *root_in;
448 struct rb_root_cached *root_out;
5d8200ae
NK
449
450 if (he->parent_he) {
451 root_in = &he->parent_he->hroot_in;
452 root_out = &he->parent_he->hroot_out;
453 } else {
52225036 454 if (hists__has(hists, need_collapse))
5d8200ae
NK
455 root_in = &hists->entries_collapsed;
456 else
457 root_in = hists->entries_in;
458 root_out = &hists->entries;
459 }
956b65e1 460
2eb3d689
DB
461 rb_erase_cached(&he->rb_node_in, root_in);
462 rb_erase_cached(&he->rb_node, root_out);
956b65e1
ACM
463
464 --hists->nr_entries;
465 if (!he->filtered)
466 --hists->nr_non_filtered_entries;
467
468 hist_entry__delete(he);
469}
470
3a5714f8 471void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel)
ab81f3fd 472{
2eb3d689 473 struct rb_node *next = rb_first_cached(&hists->entries);
ab81f3fd
ACM
474 struct hist_entry *n;
475
476 while (next) {
477 n = rb_entry(next, struct hist_entry, rb_node);
478 next = rb_next(&n->rb_node);
b079d4e9
ACM
479 if (((zap_user && n->level == '.') ||
480 (zap_kernel && n->level != '.') ||
4c47f4fc 481 hists__decay_entry(hists, n))) {
956b65e1 482 hists__delete_entry(hists, n);
ab81f3fd
ACM
483 }
484 }
485}
486
701937bd
NK
487void hists__delete_entries(struct hists *hists)
488{
2eb3d689 489 struct rb_node *next = rb_first_cached(&hists->entries);
701937bd
NK
490 struct hist_entry *n;
491
492 while (next) {
493 n = rb_entry(next, struct hist_entry, rb_node);
494 next = rb_next(&n->rb_node);
495
956b65e1 496 hists__delete_entry(hists, n);
701937bd
NK
497 }
498}
499
b10c78c5
JY
500struct hist_entry *hists__get_entry(struct hists *hists, int idx)
501{
502 struct rb_node *next = rb_first_cached(&hists->entries);
503 struct hist_entry *n;
504 int i = 0;
505
506 while (next) {
507 n = rb_entry(next, struct hist_entry, rb_node);
508 if (i == idx)
509 return n;
510
511 next = rb_next(&n->rb_node);
512 i++;
513 }
514
515 return NULL;
516}
517
3d1d07ec 518/*
c82ee828 519 * histogram, sorted on item, collects periods
3d1d07ec
JK
520 */
521
0a269a6b
JO
522static int hist_entry__init(struct hist_entry *he,
523 struct hist_entry *template,
41477acf
ACM
524 bool sample_self,
525 size_t callchain_size)
28e2a106 526{
0a269a6b 527 *he = *template;
41477acf 528 he->callchain_size = callchain_size;
0a269a6b
JO
529
530 if (symbol_conf.cumulate_callchain) {
531 he->stat_acc = malloc(sizeof(he->stat));
532 if (he->stat_acc == NULL)
533 return -ENOMEM;
534 memcpy(he->stat_acc, &he->stat, sizeof(he->stat));
535 if (!sample_self)
536 memset(&he->stat, 0, sizeof(he->stat));
537 }
f8be1c8c 538
bffb5b0c 539 he->ms.maps = maps__get(he->ms.maps);
ec417ad4 540 he->ms.map = map__get(he->ms.map);
0a269a6b
JO
541
542 if (he->branch_info) {
543 /*
544 * This branch info is (a part of) allocated from
545 * sample__resolve_bstack() and will be freed after
546 * adding new entries. So we need to save a copy.
547 */
548 he->branch_info = malloc(sizeof(*he->branch_info));
c5758910
JO
549 if (he->branch_info == NULL)
550 goto err;
f8be1c8c 551
0a269a6b
JO
552 memcpy(he->branch_info, template->branch_info,
553 sizeof(*he->branch_info));
28e2a106 554
b2f70c99 555 he->branch_info->from.ms.maps = maps__get(he->branch_info->from.ms.maps);
ec417ad4 556 he->branch_info->from.ms.map = map__get(he->branch_info->from.ms.map);
b2f70c99 557 he->branch_info->to.ms.maps = maps__get(he->branch_info->to.ms.maps);
ec417ad4 558 he->branch_info->to.ms.map = map__get(he->branch_info->to.ms.map);
0a269a6b 559 }
c4b35351 560
96465e01
NK
561 if (he->mem_info) {
562 he->mem_info = mem_info__clone(template->mem_info);
563 if (he->mem_info == NULL)
564 goto err_infos;
565 }
566
fabd37b8 567 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
0a269a6b 568 callchain_init(he->callchain);
26353a61 569
0a269a6b
JO
570 if (he->raw_data) {
571 he->raw_data = memdup(he->raw_data, he->raw_size);
c5758910
JO
572 if (he->raw_data == NULL)
573 goto err_infos;
0a269a6b 574 }
26349585 575
922db21d 576 if (he->srcline && he->srcline != SRCLINE_UNKNOWN) {
26349585
JO
577 he->srcline = strdup(he->srcline);
578 if (he->srcline == NULL)
579 goto err_rawdata;
580 }
581
4968ac8f 582 if (symbol_conf.res_sample) {
7bbe8f00
SH
583 he->res_samples = calloc(symbol_conf.res_sample,
584 sizeof(struct res_sample));
4968ac8f
AK
585 if (!he->res_samples)
586 goto err_srcline;
587 }
588
0a269a6b 589 INIT_LIST_HEAD(&he->pairs.node);
bffb5b0c 590 he->thread = thread__get(he->thread);
2eb3d689
DB
591 he->hroot_in = RB_ROOT_CACHED;
592 he->hroot_out = RB_ROOT_CACHED;
3cf0cb1f 593
0a269a6b
JO
594 if (!symbol_conf.report_hierarchy)
595 he->leaf = true;
98a3b32c 596
0a269a6b 597 return 0;
c5758910 598
4968ac8f 599err_srcline:
d8f9da24 600 zfree(&he->srcline);
4968ac8f 601
26349585 602err_rawdata:
d8f9da24 603 zfree(&he->raw_data);
26349585 604
c5758910
JO
605err_infos:
606 if (he->branch_info) {
56e144fe
IR
607 map_symbol__exit(&he->branch_info->from.ms);
608 map_symbol__exit(&he->branch_info->to.ms);
d8f9da24 609 zfree(&he->branch_info);
c5758910
JO
610 }
611 if (he->mem_info) {
1a8c2e01
IR
612 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
613 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
c5758910
JO
614 }
615err:
56e144fe 616 map_symbol__exit(&he->ms);
d8f9da24 617 zfree(&he->stat_acc);
c5758910 618 return -ENOMEM;
0a269a6b
JO
619}
620
f542e767
JO
621static void *hist_entry__zalloc(size_t size)
622{
623 return zalloc(size + sizeof(struct hist_entry));
624}
625
626static void hist_entry__free(void *ptr)
627{
628 free(ptr);
629}
630
631static struct hist_entry_ops default_ops = {
632 .new = hist_entry__zalloc,
633 .free = hist_entry__free,
634};
635
0a269a6b
JO
636static struct hist_entry *hist_entry__new(struct hist_entry *template,
637 bool sample_self)
638{
f542e767 639 struct hist_entry_ops *ops = template->ops;
0a269a6b
JO
640 size_t callchain_size = 0;
641 struct hist_entry *he;
642 int err = 0;
aef810ec 643
f542e767
JO
644 if (!ops)
645 ops = template->ops = &default_ops;
646
0a269a6b
JO
647 if (symbol_conf.use_callchain)
648 callchain_size = sizeof(struct callchain_root);
649
f542e767 650 he = ops->new(callchain_size);
0a269a6b 651 if (he) {
41477acf 652 err = hist_entry__init(he, template, sample_self, callchain_size);
f542e767
JO
653 if (err) {
654 ops->free(he);
655 he = NULL;
656 }
28e2a106 657 }
12c14278 658 return he;
28e2a106
ACM
659}
660
216f8a97 661static filter_mask_t symbol__parent_filter(const struct symbol *parent)
7a007ca9
ACM
662{
663 if (symbol_conf.exclude_other && parent == NULL)
664 return 1 << HIST_FILTER__PARENT;
665 return 0;
666}
667
ee1cffbe 668static void hist_entry__add_callchain_period(struct hist_entry *he, u64 period, u64 latency)
467ef10c 669{
fabd37b8 670 if (!hist_entry__has_callchains(he) || !symbol_conf.use_callchain)
467ef10c
NK
671 return;
672
673 he->hists->callchain_period += period;
ee1cffbe
DV
674 he->hists->callchain_latency += latency;
675 if (!he->filtered) {
467ef10c 676 he->hists->callchain_non_filtered_period += period;
ee1cffbe
DV
677 he->hists->callchain_non_filtered_latency += latency;
678 }
467ef10c
NK
679}
680
e7e0efcd
ACM
681static struct hist_entry *hists__findnew_entry(struct hists *hists,
682 struct hist_entry *entry,
0dd5041c 683 const struct addr_location *al,
e7e0efcd 684 bool sample_self)
9735abf1 685{
1980c2eb 686 struct rb_node **p;
9735abf1
ACM
687 struct rb_node *parent = NULL;
688 struct hist_entry *he;
354cc40e 689 int64_t cmp;
f1cbf78d 690 u64 period = entry->stat.period;
ee1cffbe 691 u64 latency = entry->stat.latency;
2eb3d689 692 bool leftmost = true;
9735abf1 693
2eb3d689 694 p = &hists->entries_in->rb_root.rb_node;
1980c2eb 695
9735abf1
ACM
696 while (*p != NULL) {
697 parent = *p;
1980c2eb 698 he = rb_entry(parent, struct hist_entry, rb_node_in);
9735abf1 699
9afcf930
NK
700 /*
701 * Make sure that it receives arguments in a same order as
702 * hist_entry__collapse() so that we can use an appropriate
703 * function when searching an entry regardless which sort
704 * keys were used.
705 */
706 cmp = hist_entry__cmp(he, entry);
9735abf1 707 if (!cmp) {
0f58474e 708 if (sample_self) {
6fcf1e65 709 he_stat__add_stat(&he->stat, &entry->stat);
ee1cffbe 710 hist_entry__add_callchain_period(he, period, latency);
0f58474e 711 }
f8be1c8c 712 if (symbol_conf.cumulate_callchain)
ee1cffbe 713 he_stat__add_period(he->stat_acc, period, latency);
63fa471d 714
557b32c3 715 block_info__delete(entry->block_info);
fe96245c 716
f1e8f259
LY
717 kvm_info__zput(entry->kvm_info);
718
63fa471d
DM
719 /* If the map of an existing hist_entry has
720 * become out-of-date due to an exec() or
721 * similar, update it. Otherwise we will
722 * mis-adjust symbol addresses when computing
723 * the history counter to increment.
724 */
9af2efee 725 if (hists__has(hists, sym) && he->ms.map != entry->ms.map) {
ac01c8c4
MF
726 if (he->ms.sym) {
727 u64 addr = he->ms.sym->start;
728 he->ms.sym = map__find_symbol(entry->ms.map, addr);
729 }
730
5c24b67a
ACM
731 map__put(he->ms.map);
732 he->ms.map = map__get(entry->ms.map);
63fa471d 733 }
28e2a106 734 goto out;
9735abf1
ACM
735 }
736
737 if (cmp < 0)
738 p = &(*p)->rb_left;
2eb3d689 739 else {
9735abf1 740 p = &(*p)->rb_right;
2eb3d689
DB
741 leftmost = false;
742 }
9735abf1
ACM
743 }
744
a0b51af3 745 he = hist_entry__new(entry, sample_self);
9735abf1 746 if (!he)
27a0dcb7 747 return NULL;
1980c2eb 748
0f58474e 749 if (sample_self)
ee1cffbe 750 hist_entry__add_callchain_period(he, period, latency);
467ef10c 751 hists->nr_entries++;
590cd344 752
1980c2eb 753 rb_link_node(&he->rb_node_in, parent, p);
2eb3d689 754 rb_insert_color_cached(&he->rb_node_in, hists->entries_in, leftmost);
28e2a106 755out:
a0b51af3
NK
756 if (sample_self)
757 he_stat__add_cpumode_period(&he->stat, al->cpumode, period);
f8be1c8c
NK
758 if (symbol_conf.cumulate_callchain)
759 he_stat__add_cpumode_period(he->stat_acc, al->cpumode, period);
930d4c45
NK
760 if (hists__update_mem_stat(hists, he, entry->mem_info, period) < 0) {
761 hist_entry__delete(he);
762 return NULL;
763 }
9735abf1
ACM
764 return he;
765}
766
4968ac8f
AK
767static unsigned random_max(unsigned high)
768{
769 unsigned thresh = -high % high;
770 for (;;) {
771 unsigned r = random();
772 if (r >= thresh)
773 return r % high;
774 }
775}
776
777static void hists__res_sample(struct hist_entry *he, struct perf_sample *sample)
778{
779 struct res_sample *r;
780 int j;
781
782 if (he->num_res < symbol_conf.res_sample) {
783 j = he->num_res++;
784 } else {
785 j = random_max(symbol_conf.res_sample);
786 }
787 r = &he->res_samples[j];
788 r->time = sample->time;
789 r->cpu = sample->cpu;
790 r->tid = sample->tid;
791}
792
a5051979
JO
793static struct hist_entry*
794__hists__add_entry(struct hists *hists,
795 struct addr_location *al,
796 struct symbol *sym_parent,
797 struct branch_info *bi,
798 struct mem_info *mi,
ebf39d29 799 struct kvm_info *ki,
fe96245c 800 struct block_info *block_info,
a5051979
JO
801 struct perf_sample *sample,
802 bool sample_self,
803 struct hist_entry_ops *ops)
b5387528 804{
d890a98c 805 struct namespaces *ns = thread__namespaces(al->thread);
b5387528
RAV
806 struct hist_entry entry = {
807 .thread = al->thread,
4dfced35 808 .comm = thread__comm(al->thread),
d890a98c
HB
809 .cgroup_id = {
810 .dev = ns ? ns->link_info[CGROUP_NS_INDEX].dev : 0,
811 .ino = ns ? ns->link_info[CGROUP_NS_INDEX].ino : 0,
812 },
b629f3e9 813 .cgroup = sample->cgroup,
b5387528 814 .ms = {
f2eaea09 815 .maps = al->maps,
b5387528
RAV
816 .map = al->map,
817 .sym = al->sym,
818 },
26349585 819 .srcline = (char *) al->srcline,
0c4c4deb 820 .socket = al->socket,
7365be55
DZ
821 .cpu = al->cpu,
822 .cpumode = al->cpumode,
823 .ip = al->addr,
824 .level = al->level,
9fd74f20 825 .code_page_size = sample->code_page_size,
7ae1972e 826 .parallelism = al->parallelism,
b24c28f7 827 .stat = {
c4b35351 828 .nr_events = 1,
fd36f3dd 829 .period = sample->period,
6fcf1e65
NK
830 .weight1 = sample->weight,
831 .weight2 = sample->ins_lat,
832 .weight3 = sample->p_stage_cyc,
ee1cffbe 833 .latency = al->latency,
b24c28f7 834 },
b5387528 835 .parent = sym_parent,
2c86c7ca 836 .filtered = symbol__parent_filter(sym_parent) | al->filtered,
c824c433 837 .hists = hists,
41a4e6e2 838 .branch_info = bi,
96465e01 839 .mem_info = mi,
ebf39d29 840 .kvm_info = ki,
fe96245c 841 .block_info = block_info,
fd36f3dd 842 .transaction = sample->transaction,
72392834
NK
843 .raw_data = sample->raw_data,
844 .raw_size = sample->raw_size,
a5051979 845 .ops = ops,
3723908d 846 .time = hist_time(sample->time),
784e8add 847 .weight = sample->weight,
4d03c753 848 .ins_lat = sample->ins_lat,
db4b2840 849 .p_stage_cyc = sample->p_stage_cyc,
ea15483e 850 .simd_flags = sample->simd_flags,
c9d36628 851 }, *he = hists__findnew_entry(hists, &entry, al, sample_self);
b5387528 852
c9d36628
ACM
853 if (!hists->has_callchains && he && he->callchain_size != 0)
854 hists->has_callchains = true;
4968ac8f
AK
855 if (he && symbol_conf.res_sample)
856 hists__res_sample(he, sample);
c9d36628 857 return he;
b5387528
RAV
858}
859
a5051979
JO
860struct hist_entry *hists__add_entry(struct hists *hists,
861 struct addr_location *al,
862 struct symbol *sym_parent,
863 struct branch_info *bi,
864 struct mem_info *mi,
ebf39d29 865 struct kvm_info *ki,
a5051979
JO
866 struct perf_sample *sample,
867 bool sample_self)
868{
ebf39d29 869 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
a5051979
JO
870 sample, sample_self, NULL);
871}
872
873struct hist_entry *hists__add_entry_ops(struct hists *hists,
874 struct hist_entry_ops *ops,
875 struct addr_location *al,
876 struct symbol *sym_parent,
877 struct branch_info *bi,
878 struct mem_info *mi,
ebf39d29 879 struct kvm_info *ki,
a5051979
JO
880 struct perf_sample *sample,
881 bool sample_self)
882{
ebf39d29 883 return __hists__add_entry(hists, al, sym_parent, bi, mi, ki, NULL,
a5051979
JO
884 sample, sample_self, ops);
885}
886
fe96245c
JY
887struct hist_entry *hists__add_entry_block(struct hists *hists,
888 struct addr_location *al,
889 struct block_info *block_info)
890{
891 struct hist_entry entry = {
892 .block_info = block_info,
893 .hists = hists,
b65a7d37 894 .ms = {
f2eaea09 895 .maps = al->maps,
b65a7d37
JY
896 .map = al->map,
897 .sym = al->sym,
898 },
fe96245c
JY
899 }, *he = hists__findnew_entry(hists, &entry, al, false);
900
901 return he;
902}
903
69bcb019
NK
904static int
905iter_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
906 struct addr_location *al __maybe_unused)
907{
908 return 0;
909}
910
911static int
912iter_add_next_nop_entry(struct hist_entry_iter *iter __maybe_unused,
913 struct addr_location *al __maybe_unused)
914{
915 return 0;
916}
917
918static int
919iter_prepare_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
920{
921 struct perf_sample *sample = iter->sample;
922 struct mem_info *mi;
923
924 mi = sample__resolve_mem(sample, al);
925 if (mi == NULL)
926 return -ENOMEM;
927
d561e170 928 iter->mi = mi;
69bcb019
NK
929 return 0;
930}
931
932static int
933iter_add_single_mem_entry(struct hist_entry_iter *iter, struct addr_location *al)
934{
935 u64 cost;
d561e170 936 struct mem_info *mi = iter->mi;
4ea062ed 937 struct hists *hists = evsel__hists(iter->evsel);
fd36f3dd 938 struct perf_sample *sample = iter->sample;
69bcb019
NK
939 struct hist_entry *he;
940
941 if (mi == NULL)
942 return -EINVAL;
943
fd36f3dd 944 cost = sample->weight;
69bcb019
NK
945 if (!cost)
946 cost = 1;
947
948 /*
949 * must pass period=weight in order to get the correct
950 * sorting from hists__collapse_resort() which is solely
951 * based on periods. We want sorting be done on nr_events * weight
952 * and this is indirectly achieved by passing period=weight here
953 * and the he_stat__add_period() function.
954 */
fd36f3dd
NK
955 sample->period = cost;
956
ebf39d29 957 he = hists__add_entry(hists, al, iter->parent, NULL, mi, NULL,
0102ef3e 958 sample, true);
69bcb019
NK
959 if (!he)
960 return -ENOMEM;
961
962 iter->he = he;
963 return 0;
964}
965
966static int
9d3c02d7
NK
967iter_finish_mem_entry(struct hist_entry_iter *iter,
968 struct addr_location *al __maybe_unused)
69bcb019 969{
32dcd021 970 struct evsel *evsel = iter->evsel;
4ea062ed 971 struct hists *hists = evsel__hists(evsel);
69bcb019 972 struct hist_entry *he = iter->he;
69bcb019
NK
973 int err = -EINVAL;
974
975 if (he == NULL)
976 goto out;
977
4ea062ed 978 hists__inc_nr_samples(hists, he->filtered);
69bcb019
NK
979
980 err = hist_entry__append_callchain(he, iter->sample);
981
982out:
d561e170 983 mem_info__zput(iter->mi);
69bcb019
NK
984
985 iter->he = NULL;
986 return err;
987}
988
989static int
990iter_prepare_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
991{
992 struct branch_info *bi;
993 struct perf_sample *sample = iter->sample;
994
995 bi = sample__resolve_bstack(sample, al);
996 if (!bi)
997 return -ENOMEM;
998
999 iter->curr = 0;
1000 iter->total = sample->branch_stack->nr;
1001
d561e170 1002 iter->bi = bi;
69bcb019
NK
1003 return 0;
1004}
1005
1006static int
2d78b189 1007iter_add_single_branch_entry(struct hist_entry_iter *iter __maybe_unused,
69bcb019
NK
1008 struct addr_location *al __maybe_unused)
1009{
1010 return 0;
1011}
1012
1013static int
1014iter_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
1015{
d561e170 1016 struct branch_info *bi = iter->bi;
69bcb019
NK
1017 int i = iter->curr;
1018
1019 if (bi == NULL)
1020 return 0;
1021
1022 if (iter->curr >= iter->total)
1023 return 0;
1024
0dd5041c
IR
1025 maps__put(al->maps);
1026 al->maps = maps__get(bi[i].to.ms.maps);
1027 map__put(al->map);
1028 al->map = map__get(bi[i].to.ms.map);
d46a4cdf 1029 al->sym = bi[i].to.ms.sym;
69bcb019
NK
1030 al->addr = bi[i].to.addr;
1031 return 1;
1032}
1033
1034static int
1035iter_add_next_branch_entry(struct hist_entry_iter *iter, struct addr_location *al)
1036{
9d3c02d7 1037 struct branch_info *bi;
32dcd021 1038 struct evsel *evsel = iter->evsel;
4ea062ed 1039 struct hists *hists = evsel__hists(evsel);
fd36f3dd 1040 struct perf_sample *sample = iter->sample;
69bcb019
NK
1041 struct hist_entry *he = NULL;
1042 int i = iter->curr;
1043 int err = 0;
1044
d561e170 1045 bi = iter->bi;
69bcb019 1046
d46a4cdf 1047 if (iter->hide_unresolved && !(bi[i].from.ms.sym && bi[i].to.ms.sym))
69bcb019
NK
1048 goto out;
1049
1050 /*
1051 * The report shows the percentage of total branches captured
1052 * and not events sampled. Thus we use a pseudo period of 1.
1053 */
fd36f3dd
NK
1054 sample->period = 1;
1055 sample->weight = bi->flags.cycles ? bi->flags.cycles : 1;
1056
ebf39d29 1057 he = hists__add_entry(hists, al, iter->parent, &bi[i], NULL, NULL,
0102ef3e 1058 sample, true);
69bcb019
NK
1059 if (he == NULL)
1060 return -ENOMEM;
1061
69bcb019
NK
1062out:
1063 iter->he = he;
1064 iter->curr++;
1065 return err;
1066}
1067
b2f70c99
IR
1068static void branch_info__exit(struct branch_info *bi)
1069{
1070 map_symbol__exit(&bi->from.ms);
1071 map_symbol__exit(&bi->to.ms);
1072 zfree_srcline(&bi->srcline_from);
1073 zfree_srcline(&bi->srcline_to);
1074}
1075
69bcb019
NK
1076static int
1077iter_finish_branch_entry(struct hist_entry_iter *iter,
1078 struct addr_location *al __maybe_unused)
1079{
c40aa8d9
TF
1080 struct evsel *evsel = iter->evsel;
1081 struct hists *hists = evsel__hists(evsel);
1082
b2f70c99
IR
1083 for (int i = 0; i < iter->total; i++)
1084 branch_info__exit(&iter->bi[i]);
1085
c40aa8d9
TF
1086 if (iter->he)
1087 hists__inc_nr_samples(hists, iter->he->filtered);
1088
d561e170 1089 zfree(&iter->bi);
69bcb019
NK
1090 iter->he = NULL;
1091
1092 return iter->curr >= iter->total ? 0 : -1;
1093}
1094
1095static int
1096iter_prepare_normal_entry(struct hist_entry_iter *iter __maybe_unused,
1097 struct addr_location *al __maybe_unused)
1098{
1099 return 0;
1100}
1101
1102static int
1103iter_add_single_normal_entry(struct hist_entry_iter *iter, struct addr_location *al)
1104{
32dcd021 1105 struct evsel *evsel = iter->evsel;
69bcb019
NK
1106 struct perf_sample *sample = iter->sample;
1107 struct hist_entry *he;
1108
0102ef3e 1109 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
ebf39d29 1110 NULL, sample, true);
69bcb019
NK
1111 if (he == NULL)
1112 return -ENOMEM;
1113
1114 iter->he = he;
1115 return 0;
1116}
1117
1118static int
9d3c02d7
NK
1119iter_finish_normal_entry(struct hist_entry_iter *iter,
1120 struct addr_location *al __maybe_unused)
69bcb019 1121{
69bcb019 1122 struct hist_entry *he = iter->he;
32dcd021 1123 struct evsel *evsel = iter->evsel;
69bcb019
NK
1124 struct perf_sample *sample = iter->sample;
1125
1126 if (he == NULL)
1127 return 0;
1128
1129 iter->he = NULL;
1130
4ea062ed 1131 hists__inc_nr_samples(evsel__hists(evsel), he->filtered);
69bcb019
NK
1132
1133 return hist_entry__append_callchain(he, sample);
1134}
1135
7a13aa28 1136static int
96b40f3c 1137iter_prepare_cumulative_entry(struct hist_entry_iter *iter,
7a13aa28
NK
1138 struct addr_location *al __maybe_unused)
1139{
b4d3c8bd 1140 struct hist_entry **he_cache;
8ab12a20 1141 struct callchain_cursor *cursor = get_tls_callchain_cursor();
b4d3c8bd 1142
8ab12a20
IR
1143 if (cursor == NULL)
1144 return -ENOMEM;
1145
1146 callchain_cursor_commit(cursor);
b4d3c8bd
NK
1147
1148 /*
1149 * This is for detecting cycles or recursions so that they're
1150 * cumulated only one time to prevent entries more than 100%
1151 * overhead.
1152 */
8ab12a20 1153 he_cache = malloc(sizeof(*he_cache) * (cursor->nr + 1));
b4d3c8bd
NK
1154 if (he_cache == NULL)
1155 return -ENOMEM;
1156
d561e170 1157 iter->he_cache = he_cache;
b4d3c8bd
NK
1158 iter->curr = 0;
1159
7a13aa28
NK
1160 return 0;
1161}
1162
1163static int
1164iter_add_single_cumulative_entry(struct hist_entry_iter *iter,
1165 struct addr_location *al)
1166{
32dcd021 1167 struct evsel *evsel = iter->evsel;
4ea062ed 1168 struct hists *hists = evsel__hists(evsel);
7a13aa28 1169 struct perf_sample *sample = iter->sample;
d561e170 1170 struct hist_entry **he_cache = iter->he_cache;
7a13aa28
NK
1171 struct hist_entry *he;
1172 int err = 0;
1173
ebf39d29 1174 he = hists__add_entry(hists, al, iter->parent, NULL, NULL, NULL,
0102ef3e 1175 sample, true);
7a13aa28
NK
1176 if (he == NULL)
1177 return -ENOMEM;
1178
1179 iter->he = he;
b4d3c8bd 1180 he_cache[iter->curr++] = he;
7a13aa28 1181
82aa019e 1182 hist_entry__append_callchain(he, sample);
be7f855a
NK
1183
1184 /*
1185 * We need to re-initialize the cursor since callchain_append()
1186 * advanced the cursor to the end.
1187 */
8ab12a20 1188 callchain_cursor_commit(get_tls_callchain_cursor());
be7f855a 1189
4ea062ed 1190 hists__inc_nr_samples(hists, he->filtered);
7a13aa28
NK
1191
1192 return err;
1193}
1194
1195static int
1196iter_next_cumulative_entry(struct hist_entry_iter *iter,
1197 struct addr_location *al)
1198{
1199 struct callchain_cursor_node *node;
1200
8ab12a20 1201 node = callchain_cursor_current(get_tls_callchain_cursor());
7a13aa28
NK
1202 if (node == NULL)
1203 return 0;
1204
c7405d85 1205 return fill_callchain_info(al, node, iter->hide_unresolved);
7a13aa28
NK
1206}
1207
12e89e65
KL
1208static bool
1209hist_entry__fast__sym_diff(struct hist_entry *left,
1210 struct hist_entry *right)
1211{
1212 struct symbol *sym_l = left->ms.sym;
1213 struct symbol *sym_r = right->ms.sym;
1214
1215 if (!sym_l && !sym_r)
1216 return left->ip != right->ip;
1217
1218 return !!_sort__sym_cmp(sym_l, sym_r);
1219}
1220
1221
7a13aa28
NK
1222static int
1223iter_add_next_cumulative_entry(struct hist_entry_iter *iter,
1224 struct addr_location *al)
1225{
32dcd021 1226 struct evsel *evsel = iter->evsel;
7a13aa28 1227 struct perf_sample *sample = iter->sample;
d561e170 1228 struct hist_entry **he_cache = iter->he_cache;
7a13aa28 1229 struct hist_entry *he;
b4d3c8bd 1230 struct hist_entry he_tmp = {
5cef8976 1231 .hists = evsel__hists(evsel),
b4d3c8bd
NK
1232 .cpu = al->cpu,
1233 .thread = al->thread,
1234 .comm = thread__comm(al->thread),
1235 .ip = al->addr,
1236 .ms = {
f2eaea09 1237 .maps = al->maps,
b4d3c8bd
NK
1238 .map = al->map,
1239 .sym = al->sym,
1240 },
26349585 1241 .srcline = (char *) al->srcline,
b4d3c8bd 1242 .parent = iter->parent,
72392834
NK
1243 .raw_data = sample->raw_data,
1244 .raw_size = sample->raw_size,
b4d3c8bd
NK
1245 };
1246 int i;
8ab12a20 1247 struct callchain_cursor cursor, *tls_cursor = get_tls_callchain_cursor();
12e89e65 1248 bool fast = hists__has(he_tmp.hists, sym);
be7f855a 1249
8ab12a20
IR
1250 if (tls_cursor == NULL)
1251 return -ENOMEM;
1252
1253 callchain_cursor_snapshot(&cursor, tls_cursor);
be7f855a 1254
8ab12a20 1255 callchain_cursor_advance(tls_cursor);
b4d3c8bd
NK
1256
1257 /*
1258 * Check if there's duplicate entries in the callchain.
1259 * It's possible that it has cycles or recursive calls.
1260 */
1261 for (i = 0; i < iter->curr; i++) {
12e89e65
KL
1262 /*
1263 * For most cases, there are no duplicate entries in callchain.
1264 * The symbols are usually different. Do a quick check for
1265 * symbols first.
1266 */
1267 if (fast && hist_entry__fast__sym_diff(he_cache[i], &he_tmp))
1268 continue;
1269
9d3c02d7
NK
1270 if (hist_entry__cmp(he_cache[i], &he_tmp) == 0) {
1271 /* to avoid calling callback function */
1272 iter->he = NULL;
b4d3c8bd 1273 return 0;
9d3c02d7 1274 }
b4d3c8bd 1275 }
7a13aa28 1276
0102ef3e 1277 he = hists__add_entry(evsel__hists(evsel), al, iter->parent, NULL, NULL,
ebf39d29 1278 NULL, sample, false);
7a13aa28
NK
1279 if (he == NULL)
1280 return -ENOMEM;
1281
1282 iter->he = he;
b4d3c8bd 1283 he_cache[iter->curr++] = he;
7a13aa28 1284
fabd37b8 1285 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain)
82aa019e 1286 callchain_append(he->callchain, &cursor, sample->period);
7a13aa28
NK
1287 return 0;
1288}
1289
1290static int
1291iter_finish_cumulative_entry(struct hist_entry_iter *iter,
1292 struct addr_location *al __maybe_unused)
1293{
d561e170
IR
1294 mem_info__zput(iter->mi);
1295 zfree(&iter->bi);
1296 zfree(&iter->he_cache);
7a13aa28 1297 iter->he = NULL;
b4d3c8bd 1298
7a13aa28
NK
1299 return 0;
1300}
1301
69bcb019
NK
1302const struct hist_iter_ops hist_iter_mem = {
1303 .prepare_entry = iter_prepare_mem_entry,
1304 .add_single_entry = iter_add_single_mem_entry,
1305 .next_entry = iter_next_nop_entry,
1306 .add_next_entry = iter_add_next_nop_entry,
1307 .finish_entry = iter_finish_mem_entry,
1308};
1309
1310const struct hist_iter_ops hist_iter_branch = {
1311 .prepare_entry = iter_prepare_branch_entry,
1312 .add_single_entry = iter_add_single_branch_entry,
1313 .next_entry = iter_next_branch_entry,
1314 .add_next_entry = iter_add_next_branch_entry,
1315 .finish_entry = iter_finish_branch_entry,
1316};
1317
1318const struct hist_iter_ops hist_iter_normal = {
1319 .prepare_entry = iter_prepare_normal_entry,
1320 .add_single_entry = iter_add_single_normal_entry,
1321 .next_entry = iter_next_nop_entry,
1322 .add_next_entry = iter_add_next_nop_entry,
1323 .finish_entry = iter_finish_normal_entry,
1324};
1325
7a13aa28
NK
1326const struct hist_iter_ops hist_iter_cumulative = {
1327 .prepare_entry = iter_prepare_cumulative_entry,
1328 .add_single_entry = iter_add_single_cumulative_entry,
1329 .next_entry = iter_next_cumulative_entry,
1330 .add_next_entry = iter_add_next_cumulative_entry,
1331 .finish_entry = iter_finish_cumulative_entry,
1332};
1333
69bcb019 1334int hist_entry_iter__add(struct hist_entry_iter *iter, struct addr_location *al,
9d3c02d7 1335 int max_stack_depth, void *arg)
69bcb019
NK
1336{
1337 int err, err2;
9c68ae98
KJ
1338 struct map *alm = NULL;
1339
362379aa 1340 if (al)
9c68ae98 1341 alm = map__get(al->map);
69bcb019 1342
8ab12a20 1343 err = sample__resolve_callchain(iter->sample, get_tls_callchain_cursor(), &iter->parent,
063bd936 1344 iter->evsel, al, max_stack_depth);
cb6186ae
CD
1345 if (err) {
1346 map__put(alm);
69bcb019 1347 return err;
cb6186ae 1348 }
69bcb019 1349
69bcb019
NK
1350 err = iter->ops->prepare_entry(iter, al);
1351 if (err)
1352 goto out;
1353
1354 err = iter->ops->add_single_entry(iter, al);
1355 if (err)
1356 goto out;
1357
9d3c02d7
NK
1358 if (iter->he && iter->add_entry_cb) {
1359 err = iter->add_entry_cb(iter, al, true, arg);
1360 if (err)
1361 goto out;
1362 }
1363
69bcb019
NK
1364 while (iter->ops->next_entry(iter, al)) {
1365 err = iter->ops->add_next_entry(iter, al);
1366 if (err)
1367 break;
9d3c02d7
NK
1368
1369 if (iter->he && iter->add_entry_cb) {
1370 err = iter->add_entry_cb(iter, al, false, arg);
1371 if (err)
1372 goto out;
1373 }
69bcb019
NK
1374 }
1375
1376out:
1377 err2 = iter->ops->finish_entry(iter, al);
1378 if (!err)
1379 err = err2;
1380
9c68ae98
KJ
1381 map__put(alm);
1382
69bcb019
NK
1383 return err;
1384}
1385
cd57c04c
DV
1386static int64_t
1387hist_entry__cmp_impl(struct perf_hpp_list *hpp_list, struct hist_entry *left,
1388 struct hist_entry *right, unsigned long fn_offset,
1389 bool ignore_dynamic, bool ignore_skipped)
3d1d07ec 1390{
aa6f50af 1391 struct hists *hists = left->hists;
093f0ef3 1392 struct perf_hpp_fmt *fmt;
cd57c04c 1393 perf_hpp_fmt_cmp_t *fn;
8b4799e4
DV
1394 int64_t cmp;
1395
1396 /*
1397 * Never collapse filtered and non-filtered entries.
1398 * Note this is not the same as having an extra (invisible) fmt
1399 * that corresponds to the filtered status.
1400 */
1401 cmp = (int64_t)!!left->filtered - (int64_t)!!right->filtered;
1402 if (cmp)
1403 return cmp;
3d1d07ec 1404
cd57c04c
DV
1405 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1406 if (ignore_dynamic && perf_hpp__is_dynamic_entry(fmt) &&
84b6ee8e
NK
1407 !perf_hpp__defined_dynamic_entry(fmt, hists))
1408 continue;
1409
cd57c04c
DV
1410 if (ignore_skipped && perf_hpp__should_skip(fmt, hists))
1411 continue;
1412
1413 fn = (void *)fmt + fn_offset;
1414 cmp = (*fn)(fmt, left, right);
3d1d07ec
JK
1415 if (cmp)
1416 break;
1417 }
1418
1419 return cmp;
1420}
1421
1422int64_t
cd57c04c 1423hist_entry__cmp(struct hist_entry *left, struct hist_entry *right)
3d1d07ec 1424{
cd57c04c
DV
1425 return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1426 offsetof(struct perf_hpp_fmt, cmp), true, false);
1427}
3d1d07ec 1428
cd57c04c
DV
1429static int64_t
1430hist_entry__sort(struct hist_entry *left, struct hist_entry *right)
1431{
1432 return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1433 offsetof(struct perf_hpp_fmt, sort), false, true);
1434}
84b6ee8e 1435
cd57c04c
DV
1436int64_t
1437hist_entry__collapse(struct hist_entry *left, struct hist_entry *right)
1438{
1439 return hist_entry__cmp_impl(left->hists->hpp_list, left, right,
1440 offsetof(struct perf_hpp_fmt, collapse), true, false);
1441}
3d1d07ec 1442
cd57c04c
DV
1443static int64_t
1444hist_entry__collapse_hierarchy(struct perf_hpp_list *hpp_list,
1445 struct hist_entry *left,
1446 struct hist_entry *right)
1447{
1448 return hist_entry__cmp_impl(hpp_list, left, right,
1449 offsetof(struct perf_hpp_fmt, collapse), false, false);
3d1d07ec
JK
1450}
1451
6733d1bf 1452void hist_entry__delete(struct hist_entry *he)
3d1d07ec 1453{
f542e767
JO
1454 struct hist_entry_ops *ops = he->ops;
1455
e1f5bb18
NK
1456 if (symbol_conf.report_hierarchy) {
1457 struct rb_root *root = &he->hroot_out.rb_root;
1458 struct hist_entry *child, *tmp;
1459
1460 rbtree_postorder_for_each_entry_safe(child, tmp, root, rb_node)
1461 hist_entry__delete(child);
1462
1463 *root = RB_ROOT;
1464 }
1465
f3b623b8 1466 thread__zput(he->thread);
56e144fe 1467 map_symbol__exit(&he->ms);
5c24b67a
ACM
1468
1469 if (he->branch_info) {
b2f70c99 1470 branch_info__exit(he->branch_info);
5c24b67a
ACM
1471 zfree(&he->branch_info);
1472 }
1473
1474 if (he->mem_info) {
1a8c2e01
IR
1475 map_symbol__exit(&mem_info__iaddr(he->mem_info)->ms);
1476 map_symbol__exit(&mem_info__daddr(he->mem_info)->ms);
9f87498f 1477 mem_info__zput(he->mem_info);
5c24b67a
ACM
1478 }
1479
99150a1f 1480 if (he->block_info)
557b32c3 1481 block_info__delete(he->block_info);
99150a1f 1482
f1e8f259
LY
1483 if (he->kvm_info)
1484 kvm_info__zput(he->kvm_info);
1485
4968ac8f 1486 zfree(&he->res_samples);
f8be1c8c 1487 zfree(&he->stat_acc);
625db36e 1488 zfree_srcline(&he->srcline);
31191a85 1489 if (he->srcfile && he->srcfile[0])
d8f9da24 1490 zfree(&he->srcfile);
d114960c 1491 free_callchain(he->callchain);
d8f9da24
ACM
1492 zfree(&he->trace_output);
1493 zfree(&he->raw_data);
930d4c45 1494 zfree(&he->mem_stat);
f542e767 1495 ops->free(he);
3d1d07ec
JK
1496}
1497
89fee709
ACM
1498/*
1499 * If this is not the last column, then we need to pad it according to the
adba1634 1500 * pre-calculated max length for this column, otherwise don't bother adding
89fee709
ACM
1501 * spaces because that would break viewing this with, for instance, 'less',
1502 * that would show tons of trailing spaces when a long C++ demangled method
1503 * names is sampled.
1504*/
1505int hist_entry__snprintf_alignment(struct hist_entry *he, struct perf_hpp *hpp,
1506 struct perf_hpp_fmt *fmt, int printed)
1507{
1508 if (!list_is_last(&fmt->list, &he->hists->hpp_list->fields)) {
da1b0407 1509 const int width = fmt->width(fmt, hpp, he->hists);
89fee709
ACM
1510 if (printed < width) {
1511 advance_hpp(hpp, printed);
1512 printed = scnprintf(hpp->buf, hpp->size, "%-*s", width - printed, " ");
1513 }
1514 }
1515
1516 return printed;
1517}
1518
3d1d07ec
JK
1519/*
1520 * collapse the histogram
1521 */
1522
aef810ec 1523static void hists__apply_filters(struct hists *hists, struct hist_entry *he);
aec13a7e
NK
1524static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *he,
1525 enum hist_filter type);
1526
1527typedef bool (*fmt_chk_fn)(struct perf_hpp_fmt *fmt);
1528
1529static bool check_thread_entry(struct perf_hpp_fmt *fmt)
1530{
1531 return perf_hpp__is_thread_entry(fmt) || perf_hpp__is_comm_entry(fmt);
1532}
1533
1534static void hist_entry__check_and_remove_filter(struct hist_entry *he,
1535 enum hist_filter type,
1536 fmt_chk_fn check)
1537{
1538 struct perf_hpp_fmt *fmt;
1539 bool type_match = false;
1540 struct hist_entry *parent = he->parent_he;
1541
1542 switch (type) {
1543 case HIST_FILTER__THREAD:
1544 if (symbol_conf.comm_list == NULL &&
1545 symbol_conf.pid_list == NULL &&
1546 symbol_conf.tid_list == NULL)
1547 return;
1548 break;
1549 case HIST_FILTER__DSO:
1550 if (symbol_conf.dso_list == NULL)
1551 return;
1552 break;
1553 case HIST_FILTER__SYMBOL:
1554 if (symbol_conf.sym_list == NULL)
1555 return;
1556 break;
61b6b31c
DV
1557 case HIST_FILTER__PARALLELISM:
1558 if (__bitmap_weight(symbol_conf.parallelism_filter, MAX_NR_CPUS + 1) == 0)
1559 return;
1560 break;
aec13a7e
NK
1561 case HIST_FILTER__PARENT:
1562 case HIST_FILTER__GUEST:
1563 case HIST_FILTER__HOST:
1564 case HIST_FILTER__SOCKET:
9857b717 1565 case HIST_FILTER__C2C:
aec13a7e
NK
1566 default:
1567 return;
1568 }
1569
1570 /* if it's filtered by own fmt, it has to have filter bits */
1571 perf_hpp_list__for_each_format(he->hpp_list, fmt) {
1572 if (check(fmt)) {
1573 type_match = true;
1574 break;
1575 }
1576 }
1577
1578 if (type_match) {
1579 /*
1580 * If the filter is for current level entry, propagate
1581 * filter marker to parents. The marker bit was
1582 * already set by default so it only needs to clear
1583 * non-filtered entries.
1584 */
1585 if (!(he->filtered & (1 << type))) {
1586 while (parent) {
1587 parent->filtered &= ~(1 << type);
1588 parent = parent->parent_he;
1589 }
1590 }
1591 } else {
1592 /*
1593 * If current entry doesn't have matching formats, set
1594 * filter marker for upper level entries. it will be
1595 * cleared if its lower level entries is not filtered.
1596 *
1597 * For lower-level entries, it inherits parent's
1598 * filter bit so that lower level entries of a
1599 * non-filtered entry won't set the filter marker.
1600 */
1601 if (parent == NULL)
1602 he->filtered |= (1 << type);
1603 else
1604 he->filtered |= (parent->filtered & (1 << type));
1605 }
1606}
1607
1608static void hist_entry__apply_hierarchy_filters(struct hist_entry *he)
1609{
1610 hist_entry__check_and_remove_filter(he, HIST_FILTER__THREAD,
1611 check_thread_entry);
1612
1613 hist_entry__check_and_remove_filter(he, HIST_FILTER__DSO,
1614 perf_hpp__is_dso_entry);
1615
1616 hist_entry__check_and_remove_filter(he, HIST_FILTER__SYMBOL,
1617 perf_hpp__is_sym_entry);
1618
61b6b31c
DV
1619 hist_entry__check_and_remove_filter(he, HIST_FILTER__PARALLELISM,
1620 perf_hpp__is_parallelism_entry);
1621
aec13a7e
NK
1622 hists__apply_filters(he->hists, he);
1623}
aef810ec
NK
1624
1625static struct hist_entry *hierarchy_insert_entry(struct hists *hists,
2eb3d689 1626 struct rb_root_cached *root,
aef810ec 1627 struct hist_entry *he,
aec13a7e 1628 struct hist_entry *parent_he,
1b2dbbf4 1629 struct perf_hpp_list *hpp_list)
aef810ec 1630{
2eb3d689 1631 struct rb_node **p = &root->rb_root.rb_node;
aef810ec
NK
1632 struct rb_node *parent = NULL;
1633 struct hist_entry *iter, *new;
1b2dbbf4 1634 struct perf_hpp_fmt *fmt;
aef810ec 1635 int64_t cmp;
2eb3d689 1636 bool leftmost = true;
aef810ec
NK
1637
1638 while (*p != NULL) {
1639 parent = *p;
1640 iter = rb_entry(parent, struct hist_entry, rb_node_in);
cd57c04c 1641 cmp = hist_entry__collapse_hierarchy(hpp_list, iter, he);
aef810ec
NK
1642 if (!cmp) {
1643 he_stat__add_stat(&iter->stat, &he->stat);
930d4c45 1644 hists__add_mem_stat(hists, iter, he);
aef810ec
NK
1645 return iter;
1646 }
1647
1648 if (cmp < 0)
1649 p = &parent->rb_left;
2eb3d689 1650 else {
aef810ec 1651 p = &parent->rb_right;
2eb3d689
DB
1652 leftmost = false;
1653 }
aef810ec
NK
1654 }
1655
1656 new = hist_entry__new(he, true);
1657 if (new == NULL)
1658 return NULL;
1659
aef810ec
NK
1660 hists->nr_entries++;
1661
1b2dbbf4
NK
1662 /* save related format list for output */
1663 new->hpp_list = hpp_list;
aec13a7e
NK
1664 new->parent_he = parent_he;
1665
1666 hist_entry__apply_hierarchy_filters(new);
aef810ec
NK
1667
1668 /* some fields are now passed to 'new' */
1b2dbbf4
NK
1669 perf_hpp_list__for_each_sort_list(hpp_list, fmt) {
1670 if (perf_hpp__is_trace_entry(fmt) || perf_hpp__is_dynamic_entry(fmt))
1671 he->trace_output = NULL;
1672 else
1673 new->trace_output = NULL;
aef810ec 1674
1b2dbbf4
NK
1675 if (perf_hpp__is_srcline_entry(fmt))
1676 he->srcline = NULL;
1677 else
1678 new->srcline = NULL;
aef810ec 1679
1b2dbbf4
NK
1680 if (perf_hpp__is_srcfile_entry(fmt))
1681 he->srcfile = NULL;
1682 else
1683 new->srcfile = NULL;
1684 }
aef810ec 1685
930d4c45
NK
1686 if (hists__clone_mem_stat(hists, new, he) < 0) {
1687 hist_entry__delete(new);
1688 return NULL;
1689 }
1690
aef810ec 1691 rb_link_node(&new->rb_node_in, parent, p);
2eb3d689 1692 rb_insert_color_cached(&new->rb_node_in, root, leftmost);
aef810ec
NK
1693 return new;
1694}
1695
1696static int hists__hierarchy_insert_entry(struct hists *hists,
2eb3d689 1697 struct rb_root_cached *root,
aef810ec
NK
1698 struct hist_entry *he)
1699{
1b2dbbf4 1700 struct perf_hpp_list_node *node;
aef810ec
NK
1701 struct hist_entry *new_he = NULL;
1702 struct hist_entry *parent = NULL;
1703 int depth = 0;
1704 int ret = 0;
1705
1b2dbbf4
NK
1706 list_for_each_entry(node, &hists->hpp_formats, list) {
1707 /* skip period (overhead) and elided columns */
1708 if (node->level == 0 || node->skip)
aef810ec
NK
1709 continue;
1710
1711 /* insert copy of 'he' for each fmt into the hierarchy */
aec13a7e 1712 new_he = hierarchy_insert_entry(hists, root, he, parent, &node->hpp);
aef810ec
NK
1713 if (new_he == NULL) {
1714 ret = -1;
1715 break;
1716 }
1717
1718 root = &new_he->hroot_in;
aef810ec
NK
1719 new_he->depth = depth++;
1720 parent = new_he;
1721 }
1722
1723 if (new_he) {
1724 new_he->leaf = true;
1725
fabd37b8
ACM
1726 if (hist_entry__has_callchains(new_he) &&
1727 symbol_conf.use_callchain) {
8ab12a20
IR
1728 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1729
1730 if (cursor == NULL)
1731 return -1;
1732
1733 callchain_cursor_reset(cursor);
1734 if (callchain_merge(cursor,
aef810ec
NK
1735 new_he->callchain,
1736 he->callchain) < 0)
1737 ret = -1;
1738 }
1739 }
1740
1741 /* 'he' is no longer used */
1742 hist_entry__delete(he);
1743
1744 /* return 0 (or -1) since it already applied filters */
1745 return ret;
1746}
1747
592dac6f 1748static int hists__collapse_insert_entry(struct hists *hists,
2eb3d689 1749 struct rb_root_cached *root,
592dac6f 1750 struct hist_entry *he)
3d1d07ec 1751{
2eb3d689 1752 struct rb_node **p = &root->rb_root.rb_node;
3d1d07ec
JK
1753 struct rb_node *parent = NULL;
1754 struct hist_entry *iter;
1755 int64_t cmp;
2eb3d689 1756 bool leftmost = true;
3d1d07ec 1757
aef810ec
NK
1758 if (symbol_conf.report_hierarchy)
1759 return hists__hierarchy_insert_entry(hists, root, he);
1760
3d1d07ec
JK
1761 while (*p != NULL) {
1762 parent = *p;
1980c2eb 1763 iter = rb_entry(parent, struct hist_entry, rb_node_in);
3d1d07ec
JK
1764
1765 cmp = hist_entry__collapse(iter, he);
1766
1767 if (!cmp) {
bba58cdf
NK
1768 int ret = 0;
1769
139c0815 1770 he_stat__add_stat(&iter->stat, &he->stat);
f8be1c8c
NK
1771 if (symbol_conf.cumulate_callchain)
1772 he_stat__add_stat(iter->stat_acc, he->stat_acc);
930d4c45 1773 hists__add_mem_stat(hists, iter, he);
9ec60972 1774
fabd37b8 1775 if (hist_entry__has_callchains(he) && symbol_conf.use_callchain) {
8ab12a20
IR
1776 struct callchain_cursor *cursor = get_tls_callchain_cursor();
1777
1778 if (cursor != NULL) {
1779 callchain_cursor_reset(cursor);
1780 if (callchain_merge(cursor, iter->callchain, he->callchain) < 0)
1781 ret = -1;
1782 } else {
1783 ret = 0;
1784 }
1b3a0e95 1785 }
6733d1bf 1786 hist_entry__delete(he);
bba58cdf 1787 return ret;
3d1d07ec
JK
1788 }
1789
1790 if (cmp < 0)
1791 p = &(*p)->rb_left;
2eb3d689 1792 else {
3d1d07ec 1793 p = &(*p)->rb_right;
2eb3d689
DB
1794 leftmost = false;
1795 }
3d1d07ec 1796 }
740b97f9 1797 hists->nr_entries++;
3d1d07ec 1798
1980c2eb 1799 rb_link_node(&he->rb_node_in, parent, p);
2eb3d689 1800 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
bba58cdf 1801 return 1;
3d1d07ec
JK
1802}
1803
2eb3d689 1804struct rb_root_cached *hists__get_rotate_entries_in(struct hists *hists)
3d1d07ec 1805{
2eb3d689 1806 struct rb_root_cached *root;
1980c2eb 1807
8e03bb88 1808 mutex_lock(&hists->lock);
1980c2eb
ACM
1809
1810 root = hists->entries_in;
1811 if (++hists->entries_in > &hists->entries_in_array[1])
1812 hists->entries_in = &hists->entries_in_array[0];
1813
8e03bb88 1814 mutex_unlock(&hists->lock);
1980c2eb
ACM
1815
1816 return root;
1817}
1818
90cf1fb5
ACM
1819static void hists__apply_filters(struct hists *hists, struct hist_entry *he)
1820{
1821 hists__filter_entry_by_dso(hists, he);
1822 hists__filter_entry_by_thread(hists, he);
e94d53eb 1823 hists__filter_entry_by_symbol(hists, he);
21394d94 1824 hists__filter_entry_by_socket(hists, he);
61b6b31c 1825 hists__filter_entry_by_parallelism(hists, he);
90cf1fb5
ACM
1826}
1827
bba58cdf 1828int hists__collapse_resort(struct hists *hists, struct ui_progress *prog)
1980c2eb 1829{
2eb3d689 1830 struct rb_root_cached *root;
3d1d07ec
JK
1831 struct rb_node *next;
1832 struct hist_entry *n;
bba58cdf 1833 int ret;
3d1d07ec 1834
52225036 1835 if (!hists__has(hists, need_collapse))
bba58cdf 1836 return 0;
3d1d07ec 1837
740b97f9
NK
1838 hists->nr_entries = 0;
1839
1980c2eb 1840 root = hists__get_rotate_entries_in(hists);
740b97f9 1841
2eb3d689 1842 next = rb_first_cached(root);
b9bf0892 1843
3d1d07ec 1844 while (next) {
33e940a2
ACM
1845 if (session_done())
1846 break;
1980c2eb
ACM
1847 n = rb_entry(next, struct hist_entry, rb_node_in);
1848 next = rb_next(&n->rb_node_in);
3d1d07ec 1849
2eb3d689 1850 rb_erase_cached(&n->rb_node_in, root);
bba58cdf
NK
1851 ret = hists__collapse_insert_entry(hists, &hists->entries_collapsed, n);
1852 if (ret < 0)
1853 return -1;
1854
1855 if (ret) {
90cf1fb5
ACM
1856 /*
1857 * If it wasn't combined with one of the entries already
1858 * collapsed, we need to apply the filters that may have
1859 * been set by, say, the hist_browser.
1860 */
1861 hists__apply_filters(hists, n);
90cf1fb5 1862 }
c1fb5651
NK
1863 if (prog)
1864 ui_progress__update(prog, 1);
3d1d07ec 1865 }
bba58cdf 1866 return 0;
1980c2eb 1867}
b9bf0892 1868
9283ba9b
NK
1869static void hists__reset_filter_stats(struct hists *hists)
1870{
1871 hists->nr_non_filtered_entries = 0;
1872 hists->stats.total_non_filtered_period = 0;
ee1cffbe 1873 hists->stats.total_non_filtered_latency = 0;
9283ba9b
NK
1874}
1875
1876void hists__reset_stats(struct hists *hists)
1877{
1878 hists->nr_entries = 0;
1879 hists->stats.total_period = 0;
ee1cffbe 1880 hists->stats.total_latency = 0;
9283ba9b
NK
1881
1882 hists__reset_filter_stats(hists);
1883}
1884
1885static void hists__inc_filter_stats(struct hists *hists, struct hist_entry *h)
1886{
1887 hists->nr_non_filtered_entries++;
1888 hists->stats.total_non_filtered_period += h->stat.period;
ee1cffbe 1889 hists->stats.total_non_filtered_latency += h->stat.latency;
9283ba9b
NK
1890}
1891
1892void hists__inc_stats(struct hists *hists, struct hist_entry *h)
1893{
1894 if (!h->filtered)
1895 hists__inc_filter_stats(hists, h);
1896
1897 hists->nr_entries++;
1898 hists->stats.total_period += h->stat.period;
ee1cffbe 1899 hists->stats.total_latency += h->stat.latency;
9283ba9b
NK
1900}
1901
f7fb538a
NK
1902static void hierarchy_recalc_total_periods(struct hists *hists)
1903{
1904 struct rb_node *node;
1905 struct hist_entry *he;
1906
2eb3d689 1907 node = rb_first_cached(&hists->entries);
f7fb538a
NK
1908
1909 hists->stats.total_period = 0;
1910 hists->stats.total_non_filtered_period = 0;
ee1cffbe
DV
1911 hists->stats.total_latency = 0;
1912 hists->stats.total_non_filtered_latency = 0;
f7fb538a
NK
1913
1914 /*
1915 * recalculate total period using top-level entries only
1916 * since lower level entries only see non-filtered entries
1917 * but upper level entries have sum of both entries.
1918 */
1919 while (node) {
1920 he = rb_entry(node, struct hist_entry, rb_node);
1921 node = rb_next(node);
1922
1923 hists->stats.total_period += he->stat.period;
ee1cffbe
DV
1924 hists->stats.total_latency += he->stat.latency;
1925 if (!he->filtered) {
f7fb538a 1926 hists->stats.total_non_filtered_period += he->stat.period;
ee1cffbe
DV
1927 hists->stats.total_non_filtered_latency += he->stat.latency;
1928 }
f7fb538a
NK
1929 }
1930}
1931
2eb3d689 1932static void hierarchy_insert_output_entry(struct rb_root_cached *root,
1a3906a7
NK
1933 struct hist_entry *he)
1934{
2eb3d689 1935 struct rb_node **p = &root->rb_root.rb_node;
1a3906a7
NK
1936 struct rb_node *parent = NULL;
1937 struct hist_entry *iter;
1b2dbbf4 1938 struct perf_hpp_fmt *fmt;
2eb3d689 1939 bool leftmost = true;
1a3906a7
NK
1940
1941 while (*p != NULL) {
1942 parent = *p;
1943 iter = rb_entry(parent, struct hist_entry, rb_node);
1944
1945 if (hist_entry__sort(he, iter) > 0)
1946 p = &parent->rb_left;
2eb3d689 1947 else {
1a3906a7 1948 p = &parent->rb_right;
2eb3d689
DB
1949 leftmost = false;
1950 }
1a3906a7
NK
1951 }
1952
1953 rb_link_node(&he->rb_node, parent, p);
2eb3d689 1954 rb_insert_color_cached(&he->rb_node, root, leftmost);
abab5e7f
NK
1955
1956 /* update column width of dynamic entry */
1b2dbbf4 1957 perf_hpp_list__for_each_sort_list(he->hpp_list, fmt) {
cb6e92c7
NK
1958 if (fmt->init)
1959 fmt->init(fmt, he);
1b2dbbf4 1960 }
1a3906a7
NK
1961}
1962
1963static void hists__hierarchy_output_resort(struct hists *hists,
1964 struct ui_progress *prog,
2eb3d689
DB
1965 struct rb_root_cached *root_in,
1966 struct rb_root_cached *root_out,
1a3906a7
NK
1967 u64 min_callchain_hits,
1968 bool use_callchain)
1969{
1970 struct rb_node *node;
1971 struct hist_entry *he;
1972
2eb3d689
DB
1973 *root_out = RB_ROOT_CACHED;
1974 node = rb_first_cached(root_in);
1a3906a7
NK
1975
1976 while (node) {
1977 he = rb_entry(node, struct hist_entry, rb_node_in);
1978 node = rb_next(node);
1979
1980 hierarchy_insert_output_entry(root_out, he);
1981
1982 if (prog)
1983 ui_progress__update(prog, 1);
1984
c72ab446
NK
1985 hists->nr_entries++;
1986 if (!he->filtered) {
1987 hists->nr_non_filtered_entries++;
1988 hists__calc_col_len(hists, he);
1989 }
1990
1a3906a7
NK
1991 if (!he->leaf) {
1992 hists__hierarchy_output_resort(hists, prog,
1993 &he->hroot_in,
1994 &he->hroot_out,
1995 min_callchain_hits,
1996 use_callchain);
1a3906a7
NK
1997 continue;
1998 }
1999
1a3906a7
NK
2000 if (!use_callchain)
2001 continue;
2002
2003 if (callchain_param.mode == CHAIN_GRAPH_REL) {
2004 u64 total = he->stat.period;
2005
2006 if (symbol_conf.cumulate_callchain)
2007 total = he->stat_acc->period;
2008
2009 min_callchain_hits = total * (callchain_param.min_percent / 100);
2010 }
2011
2012 callchain_param.sort(&he->sorted_chain, he->callchain,
2013 min_callchain_hits, &callchain_param);
2014 }
2015}
2016
2eb3d689 2017static void __hists__insert_output_entry(struct rb_root_cached *entries,
1c02c4d2 2018 struct hist_entry *he,
f9db0d0f
KL
2019 u64 min_callchain_hits,
2020 bool use_callchain)
3d1d07ec 2021{
2eb3d689 2022 struct rb_node **p = &entries->rb_root.rb_node;
3d1d07ec
JK
2023 struct rb_node *parent = NULL;
2024 struct hist_entry *iter;
abab5e7f 2025 struct perf_hpp_fmt *fmt;
2eb3d689 2026 bool leftmost = true;
3d1d07ec 2027
744070e0
NK
2028 if (use_callchain) {
2029 if (callchain_param.mode == CHAIN_GRAPH_REL) {
2030 u64 total = he->stat.period;
2031
2032 if (symbol_conf.cumulate_callchain)
2033 total = he->stat_acc->period;
2034
2035 min_callchain_hits = total * (callchain_param.min_percent / 100);
2036 }
b9fb9304 2037 callchain_param.sort(&he->sorted_chain, he->callchain,
3d1d07ec 2038 min_callchain_hits, &callchain_param);
744070e0 2039 }
3d1d07ec
JK
2040
2041 while (*p != NULL) {
2042 parent = *p;
2043 iter = rb_entry(parent, struct hist_entry, rb_node);
2044
043ca389 2045 if (hist_entry__sort(he, iter) > 0)
3d1d07ec 2046 p = &(*p)->rb_left;
2eb3d689 2047 else {
3d1d07ec 2048 p = &(*p)->rb_right;
2eb3d689
DB
2049 leftmost = false;
2050 }
3d1d07ec
JK
2051 }
2052
2053 rb_link_node(&he->rb_node, parent, p);
2eb3d689 2054 rb_insert_color_cached(&he->rb_node, entries, leftmost);
abab5e7f 2055
cb6e92c7 2056 /* update column width of dynamic entries */
abab5e7f 2057 perf_hpp_list__for_each_sort_list(&perf_hpp_list, fmt) {
cb6e92c7
NK
2058 if (fmt->init)
2059 fmt->init(fmt, he);
abab5e7f 2060 }
3d1d07ec
JK
2061}
2062
01441af5 2063static void output_resort(struct hists *hists, struct ui_progress *prog,
e4c38fd4
JO
2064 bool use_callchain, hists__resort_cb_t cb,
2065 void *cb_arg)
3d1d07ec 2066{
2eb3d689 2067 struct rb_root_cached *root;
3d1d07ec
JK
2068 struct rb_node *next;
2069 struct hist_entry *n;
467ef10c 2070 u64 callchain_total;
3d1d07ec
JK
2071 u64 min_callchain_hits;
2072
467ef10c
NK
2073 callchain_total = hists->callchain_period;
2074 if (symbol_conf.filter_relative)
2075 callchain_total = hists->callchain_non_filtered_period;
2076
2077 min_callchain_hits = callchain_total * (callchain_param.min_percent / 100);
3d1d07ec 2078
1a3906a7
NK
2079 hists__reset_stats(hists);
2080 hists__reset_col_len(hists);
2081
2082 if (symbol_conf.report_hierarchy) {
f7fb538a
NK
2083 hists__hierarchy_output_resort(hists, prog,
2084 &hists->entries_collapsed,
2085 &hists->entries,
2086 min_callchain_hits,
2087 use_callchain);
2088 hierarchy_recalc_total_periods(hists);
2089 return;
1a3906a7
NK
2090 }
2091
52225036 2092 if (hists__has(hists, need_collapse))
1980c2eb
ACM
2093 root = &hists->entries_collapsed;
2094 else
2095 root = hists->entries_in;
2096
2eb3d689
DB
2097 next = rb_first_cached(root);
2098 hists->entries = RB_ROOT_CACHED;
3d1d07ec
JK
2099
2100 while (next) {
1980c2eb
ACM
2101 n = rb_entry(next, struct hist_entry, rb_node_in);
2102 next = rb_next(&n->rb_node_in);
3d1d07ec 2103
e4c38fd4 2104 if (cb && cb(n, cb_arg))
52c5cc36
JO
2105 continue;
2106
f9db0d0f 2107 __hists__insert_output_entry(&hists->entries, n, min_callchain_hits, use_callchain);
6263835a 2108 hists__inc_stats(hists, n);
ae993efc
NK
2109
2110 if (!n->filtered)
2111 hists__calc_col_len(hists, n);
740b97f9
NK
2112
2113 if (prog)
2114 ui_progress__update(prog, 1);
3d1d07ec 2115 }
1980c2eb 2116}
b9bf0892 2117
10c513f7
ACM
2118void evsel__output_resort_cb(struct evsel *evsel, struct ui_progress *prog,
2119 hists__resort_cb_t cb, void *cb_arg)
01441af5 2120{
01441af5
JO
2121 bool use_callchain;
2122
2123 if (evsel && symbol_conf.use_callchain && !symbol_conf.show_ref_callgraph)
27de9b2b 2124 use_callchain = evsel__has_callchain(evsel);
01441af5
JO
2125 else
2126 use_callchain = symbol_conf.use_callchain;
2127
b49a821e
JY
2128 use_callchain |= symbol_conf.show_branchflag_count;
2129
57496187
JO
2130 output_resort(evsel__hists(evsel), prog, use_callchain, cb, cb_arg);
2131}
2132
10c513f7 2133void evsel__output_resort(struct evsel *evsel, struct ui_progress *prog)
57496187 2134{
10c513f7 2135 return evsel__output_resort_cb(evsel, prog, NULL, NULL);
452ce03b
JO
2136}
2137
2138void hists__output_resort(struct hists *hists, struct ui_progress *prog)
2139{
e4c38fd4 2140 output_resort(hists, prog, symbol_conf.use_callchain, NULL, NULL);
52c5cc36
JO
2141}
2142
2143void hists__output_resort_cb(struct hists *hists, struct ui_progress *prog,
2144 hists__resort_cb_t cb)
2145{
e4c38fd4 2146 output_resort(hists, prog, symbol_conf.use_callchain, cb, NULL);
01441af5
JO
2147}
2148
8c01872f
NK
2149static bool can_goto_child(struct hist_entry *he, enum hierarchy_move_dir hmd)
2150{
2151 if (he->leaf || hmd == HMD_FORCE_SIBLING)
2152 return false;
2153
2154 if (he->unfolded || hmd == HMD_FORCE_CHILD)
2155 return true;
2156
2157 return false;
2158}
2159
2160struct rb_node *rb_hierarchy_last(struct rb_node *node)
2161{
2162 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2163
2164 while (can_goto_child(he, HMD_NORMAL)) {
2eb3d689 2165 node = rb_last(&he->hroot_out.rb_root);
8c01872f
NK
2166 he = rb_entry(node, struct hist_entry, rb_node);
2167 }
2168 return node;
2169}
2170
2171struct rb_node *__rb_hierarchy_next(struct rb_node *node, enum hierarchy_move_dir hmd)
2172{
2173 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2174
2175 if (can_goto_child(he, hmd))
2eb3d689 2176 node = rb_first_cached(&he->hroot_out);
8c01872f
NK
2177 else
2178 node = rb_next(node);
2179
2180 while (node == NULL) {
2181 he = he->parent_he;
2182 if (he == NULL)
2183 break;
2184
2185 node = rb_next(&he->rb_node);
2186 }
2187 return node;
2188}
2189
2190struct rb_node *rb_hierarchy_prev(struct rb_node *node)
2191{
2192 struct hist_entry *he = rb_entry(node, struct hist_entry, rb_node);
2193
2194 node = rb_prev(node);
2195 if (node)
2196 return rb_hierarchy_last(node);
2197
2198 he = he->parent_he;
2199 if (he == NULL)
2200 return NULL;
2201
2202 return &he->rb_node;
2203}
2204
a7b5895b
NK
2205bool hist_entry__has_hierarchy_children(struct hist_entry *he, float limit)
2206{
2207 struct rb_node *node;
2208 struct hist_entry *child;
2209 float percent;
2210
2211 if (he->leaf)
2212 return false;
2213
2eb3d689 2214 node = rb_first_cached(&he->hroot_out);
a7b5895b
NK
2215 child = rb_entry(node, struct hist_entry, rb_node);
2216
2217 while (node && child->filtered) {
2218 node = rb_next(node);
2219 child = rb_entry(node, struct hist_entry, rb_node);
2220 }
2221
2222 if (node)
2223 percent = hist_entry__get_percent_limit(child);
2224 else
2225 percent = 0;
2226
2227 return node && percent >= limit;
2228}
2229
42b28ac0 2230static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h,
cc5edb0e
ACM
2231 enum hist_filter filter)
2232{
2233 h->filtered &= ~(1 << filter);
155e9aff
NK
2234
2235 if (symbol_conf.report_hierarchy) {
2236 struct hist_entry *parent = h->parent_he;
2237
2238 while (parent) {
2239 he_stat__add_stat(&parent->stat, &h->stat);
2240
2241 parent->filtered &= ~(1 << filter);
2242
2243 if (parent->filtered)
2244 goto next;
2245
2246 /* force fold unfiltered entry for simplicity */
2247 parent->unfolded = false;
79dded87 2248 parent->has_no_entry = false;
155e9aff
NK
2249 parent->row_offset = 0;
2250 parent->nr_rows = 0;
2251next:
2252 parent = parent->parent_he;
2253 }
2254 }
2255
cc5edb0e
ACM
2256 if (h->filtered)
2257 return;
2258
87e90f43 2259 /* force fold unfiltered entry for simplicity */
3698dab1 2260 h->unfolded = false;
79dded87 2261 h->has_no_entry = false;
0f0cbf7a 2262 h->row_offset = 0;
a8cd1f43 2263 h->nr_rows = 0;
9283ba9b 2264
1ab1fa5d 2265 hists->stats.nr_non_filtered_samples += h->stat.nr_events;
cc5edb0e 2266
9283ba9b 2267 hists__inc_filter_stats(hists, h);
42b28ac0 2268 hists__calc_col_len(hists, h);
cc5edb0e
ACM
2269}
2270
90cf1fb5
ACM
2271
2272static bool hists__filter_entry_by_dso(struct hists *hists,
2273 struct hist_entry *he)
2274{
2275 if (hists->dso_filter != NULL &&
ee756ef7 2276 (he->ms.map == NULL || !RC_CHK_EQUAL(map__dso(he->ms.map), hists->dso_filter))) {
90cf1fb5
ACM
2277 he->filtered |= (1 << HIST_FILTER__DSO);
2278 return true;
2279 }
2280
2281 return false;
2282}
2283
90cf1fb5
ACM
2284static bool hists__filter_entry_by_thread(struct hists *hists,
2285 struct hist_entry *he)
2286{
2287 if (hists->thread_filter != NULL &&
78c32f4c 2288 !RC_CHK_EQUAL(he->thread, hists->thread_filter)) {
90cf1fb5
ACM
2289 he->filtered |= (1 << HIST_FILTER__THREAD);
2290 return true;
2291 }
2292
2293 return false;
2294}
2295
e94d53eb
NK
2296static bool hists__filter_entry_by_symbol(struct hists *hists,
2297 struct hist_entry *he)
2298{
2299 if (hists->symbol_filter_str != NULL &&
2300 (!he->ms.sym || strstr(he->ms.sym->name,
2301 hists->symbol_filter_str) == NULL)) {
2302 he->filtered |= (1 << HIST_FILTER__SYMBOL);
2303 return true;
2304 }
2305
2306 return false;
2307}
2308
21394d94
KL
2309static bool hists__filter_entry_by_socket(struct hists *hists,
2310 struct hist_entry *he)
2311{
2312 if ((hists->socket_filter > -1) &&
2313 (he->socket != hists->socket_filter)) {
2314 he->filtered |= (1 << HIST_FILTER__SOCKET);
2315 return true;
2316 }
2317
2318 return false;
2319}
2320
61b6b31c
DV
2321static bool hists__filter_entry_by_parallelism(struct hists *hists,
2322 struct hist_entry *he)
2323{
2324 if (test_bit(he->parallelism, hists->parallelism_filter)) {
2325 he->filtered |= (1 << HIST_FILTER__PARALLELISM);
2326 return true;
2327 }
2328 return false;
2329}
2330
1f7c2541
NK
2331typedef bool (*filter_fn_t)(struct hists *hists, struct hist_entry *he);
2332
2333static void hists__filter_by_type(struct hists *hists, int type, filter_fn_t filter)
84734b06
KL
2334{
2335 struct rb_node *nd;
2336
2337 hists->stats.nr_non_filtered_samples = 0;
2338
2339 hists__reset_filter_stats(hists);
2340 hists__reset_col_len(hists);
2341
2eb3d689 2342 for (nd = rb_first_cached(&hists->entries); nd; nd = rb_next(nd)) {
84734b06
KL
2343 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2344
1f7c2541 2345 if (filter(hists, h))
84734b06
KL
2346 continue;
2347
1f7c2541 2348 hists__remove_entry_filter(hists, h, type);
84734b06
KL
2349 }
2350}
2351
2eb3d689
DB
2352static void resort_filtered_entry(struct rb_root_cached *root,
2353 struct hist_entry *he)
70642850 2354{
2eb3d689 2355 struct rb_node **p = &root->rb_root.rb_node;
70642850
NK
2356 struct rb_node *parent = NULL;
2357 struct hist_entry *iter;
2eb3d689 2358 struct rb_root_cached new_root = RB_ROOT_CACHED;
70642850 2359 struct rb_node *nd;
2eb3d689 2360 bool leftmost = true;
70642850
NK
2361
2362 while (*p != NULL) {
2363 parent = *p;
2364 iter = rb_entry(parent, struct hist_entry, rb_node);
2365
2366 if (hist_entry__sort(he, iter) > 0)
2367 p = &(*p)->rb_left;
2eb3d689 2368 else {
70642850 2369 p = &(*p)->rb_right;
2eb3d689
DB
2370 leftmost = false;
2371 }
70642850
NK
2372 }
2373
2374 rb_link_node(&he->rb_node, parent, p);
2eb3d689 2375 rb_insert_color_cached(&he->rb_node, root, leftmost);
70642850
NK
2376
2377 if (he->leaf || he->filtered)
2378 return;
2379
2eb3d689 2380 nd = rb_first_cached(&he->hroot_out);
70642850
NK
2381 while (nd) {
2382 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2383
2384 nd = rb_next(nd);
2eb3d689 2385 rb_erase_cached(&h->rb_node, &he->hroot_out);
70642850
NK
2386
2387 resort_filtered_entry(&new_root, h);
2388 }
2389
2390 he->hroot_out = new_root;
2391}
2392
155e9aff
NK
2393static void hists__filter_hierarchy(struct hists *hists, int type, const void *arg)
2394{
2395 struct rb_node *nd;
2eb3d689 2396 struct rb_root_cached new_root = RB_ROOT_CACHED;
155e9aff
NK
2397
2398 hists->stats.nr_non_filtered_samples = 0;
2399
2400 hists__reset_filter_stats(hists);
2401 hists__reset_col_len(hists);
2402
2eb3d689 2403 nd = rb_first_cached(&hists->entries);
155e9aff
NK
2404 while (nd) {
2405 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2406 int ret;
2407
2408 ret = hist_entry__filter(h, type, arg);
2409
2410 /*
2411 * case 1. non-matching type
2412 * zero out the period, set filter marker and move to child
2413 */
2414 if (ret < 0) {
2415 memset(&h->stat, 0, sizeof(h->stat));
2416 h->filtered |= (1 << type);
2417
2418 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_CHILD);
2419 }
2420 /*
2421 * case 2. matched type (filter out)
2422 * set filter marker and move to next
2423 */
2424 else if (ret == 1) {
2425 h->filtered |= (1 << type);
2426
2427 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2428 }
2429 /*
2430 * case 3. ok (not filtered)
2431 * add period to hists and parents, erase the filter marker
2432 * and move to next sibling
2433 */
2434 else {
2435 hists__remove_entry_filter(hists, h, type);
2436
2437 nd = __rb_hierarchy_next(&h->rb_node, HMD_FORCE_SIBLING);
2438 }
2439 }
70642850 2440
f7fb538a
NK
2441 hierarchy_recalc_total_periods(hists);
2442
70642850
NK
2443 /*
2444 * resort output after applying a new filter since filter in a lower
2445 * hierarchy can change periods in a upper hierarchy.
2446 */
2eb3d689 2447 nd = rb_first_cached(&hists->entries);
70642850
NK
2448 while (nd) {
2449 struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node);
2450
2451 nd = rb_next(nd);
2eb3d689 2452 rb_erase_cached(&h->rb_node, &hists->entries);
70642850
NK
2453
2454 resort_filtered_entry(&new_root, h);
2455 }
2456
2457 hists->entries = new_root;
155e9aff
NK
2458}
2459
1f7c2541
NK
2460void hists__filter_by_thread(struct hists *hists)
2461{
155e9aff
NK
2462 if (symbol_conf.report_hierarchy)
2463 hists__filter_hierarchy(hists, HIST_FILTER__THREAD,
2464 hists->thread_filter);
2465 else
2466 hists__filter_by_type(hists, HIST_FILTER__THREAD,
2467 hists__filter_entry_by_thread);
1f7c2541
NK
2468}
2469
2470void hists__filter_by_dso(struct hists *hists)
2471{
155e9aff
NK
2472 if (symbol_conf.report_hierarchy)
2473 hists__filter_hierarchy(hists, HIST_FILTER__DSO,
2474 hists->dso_filter);
2475 else
2476 hists__filter_by_type(hists, HIST_FILTER__DSO,
2477 hists__filter_entry_by_dso);
1f7c2541
NK
2478}
2479
2480void hists__filter_by_symbol(struct hists *hists)
2481{
155e9aff
NK
2482 if (symbol_conf.report_hierarchy)
2483 hists__filter_hierarchy(hists, HIST_FILTER__SYMBOL,
2484 hists->symbol_filter_str);
2485 else
2486 hists__filter_by_type(hists, HIST_FILTER__SYMBOL,
2487 hists__filter_entry_by_symbol);
1f7c2541
NK
2488}
2489
2490void hists__filter_by_socket(struct hists *hists)
2491{
155e9aff
NK
2492 if (symbol_conf.report_hierarchy)
2493 hists__filter_hierarchy(hists, HIST_FILTER__SOCKET,
2494 &hists->socket_filter);
2495 else
2496 hists__filter_by_type(hists, HIST_FILTER__SOCKET,
2497 hists__filter_entry_by_socket);
1f7c2541
NK
2498}
2499
61b6b31c
DV
2500void hists__filter_by_parallelism(struct hists *hists)
2501{
2502 if (symbol_conf.report_hierarchy)
2503 hists__filter_hierarchy(hists, HIST_FILTER__PARALLELISM,
2504 hists->parallelism_filter);
2505 else
2506 hists__filter_by_type(hists, HIST_FILTER__PARALLELISM,
2507 hists__filter_entry_by_parallelism);
2508}
2509
28a6b6aa
ACM
2510void events_stats__inc(struct events_stats *stats, u32 type)
2511{
2512 ++stats->nr_events[0];
2513 ++stats->nr_events[type];
2514}
2515
0f0abbac 2516static void hists_stats__inc(struct hists_stats *stats)
c8446b9b 2517{
0f0abbac
NK
2518 ++stats->nr_samples;
2519}
2520
2521void hists__inc_nr_events(struct hists *hists)
2522{
2523 hists_stats__inc(&hists->stats);
c8446b9b 2524}
95529be4 2525
1844dbcb
NK
2526void hists__inc_nr_samples(struct hists *hists, bool filtered)
2527{
0f0abbac 2528 hists_stats__inc(&hists->stats);
1844dbcb
NK
2529 if (!filtered)
2530 hists->stats.nr_non_filtered_samples++;
2531}
2532
75b37db0
NK
2533void hists__inc_nr_lost_samples(struct hists *hists, u32 lost)
2534{
2535 hists->stats.nr_lost_samples += lost;
2536}
2537
1a5474a7
NK
2538void hists__inc_nr_dropped_samples(struct hists *hists, u32 lost)
2539{
2540 hists->stats.nr_dropped_samples += lost;
2541}
2542
494d70a1
ACM
2543static struct hist_entry *hists__add_dummy_entry(struct hists *hists,
2544 struct hist_entry *pair)
2545{
2eb3d689 2546 struct rb_root_cached *root;
ce74f60e 2547 struct rb_node **p;
494d70a1
ACM
2548 struct rb_node *parent = NULL;
2549 struct hist_entry *he;
354cc40e 2550 int64_t cmp;
2eb3d689 2551 bool leftmost = true;
494d70a1 2552
52225036 2553 if (hists__has(hists, need_collapse))
ce74f60e
NK
2554 root = &hists->entries_collapsed;
2555 else
2556 root = hists->entries_in;
2557
2eb3d689 2558 p = &root->rb_root.rb_node;
ce74f60e 2559
494d70a1
ACM
2560 while (*p != NULL) {
2561 parent = *p;
ce74f60e 2562 he = rb_entry(parent, struct hist_entry, rb_node_in);
494d70a1 2563
ce74f60e 2564 cmp = hist_entry__collapse(he, pair);
494d70a1
ACM
2565
2566 if (!cmp)
2567 goto out;
2568
2569 if (cmp < 0)
2570 p = &(*p)->rb_left;
2eb3d689 2571 else {
494d70a1 2572 p = &(*p)->rb_right;
2eb3d689
DB
2573 leftmost = false;
2574 }
494d70a1
ACM
2575 }
2576
a0b51af3 2577 he = hist_entry__new(pair, true);
494d70a1 2578 if (he) {
30193d78
ACM
2579 memset(&he->stat, 0, sizeof(he->stat));
2580 he->hists = hists;
09623d79
KL
2581 if (symbol_conf.cumulate_callchain)
2582 memset(he->stat_acc, 0, sizeof(he->stat));
ce74f60e 2583 rb_link_node(&he->rb_node_in, parent, p);
2eb3d689 2584 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
6263835a 2585 hists__inc_stats(hists, he);
e0af43d2 2586 he->dummy = true;
494d70a1
ACM
2587 }
2588out:
2589 return he;
2590}
2591
9d97b8f5 2592static struct hist_entry *add_dummy_hierarchy_entry(struct hists *hists,
2eb3d689 2593 struct rb_root_cached *root,
9d97b8f5
NK
2594 struct hist_entry *pair)
2595{
2596 struct rb_node **p;
2597 struct rb_node *parent = NULL;
2598 struct hist_entry *he;
2eb3d689 2599 bool leftmost = true;
9d97b8f5 2600
2eb3d689 2601 p = &root->rb_root.rb_node;
9d97b8f5 2602 while (*p != NULL) {
cd57c04c 2603 int64_t cmp;
9d97b8f5
NK
2604
2605 parent = *p;
2606 he = rb_entry(parent, struct hist_entry, rb_node_in);
cd57c04c 2607 cmp = hist_entry__collapse_hierarchy(he->hpp_list, he, pair);
9d97b8f5
NK
2608 if (!cmp)
2609 goto out;
2610
2611 if (cmp < 0)
2612 p = &parent->rb_left;
2eb3d689 2613 else {
9d97b8f5 2614 p = &parent->rb_right;
2eb3d689
DB
2615 leftmost = false;
2616 }
9d97b8f5
NK
2617 }
2618
2619 he = hist_entry__new(pair, true);
2620 if (he) {
2621 rb_link_node(&he->rb_node_in, parent, p);
2eb3d689 2622 rb_insert_color_cached(&he->rb_node_in, root, leftmost);
9d97b8f5
NK
2623
2624 he->dummy = true;
2625 he->hists = hists;
2626 memset(&he->stat, 0, sizeof(he->stat));
2627 hists__inc_stats(hists, he);
2628 }
2629out:
2630 return he;
2631}
2632
95529be4
ACM
2633static struct hist_entry *hists__find_entry(struct hists *hists,
2634 struct hist_entry *he)
2635{
ce74f60e
NK
2636 struct rb_node *n;
2637
52225036 2638 if (hists__has(hists, need_collapse))
2eb3d689 2639 n = hists->entries_collapsed.rb_root.rb_node;
ce74f60e 2640 else
2eb3d689 2641 n = hists->entries_in->rb_root.rb_node;
95529be4
ACM
2642
2643 while (n) {
ce74f60e
NK
2644 struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in);
2645 int64_t cmp = hist_entry__collapse(iter, he);
95529be4
ACM
2646
2647 if (cmp < 0)
2648 n = n->rb_left;
2649 else if (cmp > 0)
2650 n = n->rb_right;
2651 else
2652 return iter;
2653 }
2654
2655 return NULL;
2656}
2657
2eb3d689 2658static struct hist_entry *hists__find_hierarchy_entry(struct rb_root_cached *root,
09034de6
NK
2659 struct hist_entry *he)
2660{
2eb3d689 2661 struct rb_node *n = root->rb_root.rb_node;
09034de6
NK
2662
2663 while (n) {
2664 struct hist_entry *iter;
cd57c04c 2665 int64_t cmp;
09034de6
NK
2666
2667 iter = rb_entry(n, struct hist_entry, rb_node_in);
cd57c04c 2668 cmp = hist_entry__collapse_hierarchy(he->hpp_list, iter, he);
09034de6
NK
2669 if (cmp < 0)
2670 n = n->rb_left;
2671 else if (cmp > 0)
2672 n = n->rb_right;
2673 else
2674 return iter;
2675 }
2676
2677 return NULL;
2678}
2679
2eb3d689
DB
2680static void hists__match_hierarchy(struct rb_root_cached *leader_root,
2681 struct rb_root_cached *other_root)
09034de6
NK
2682{
2683 struct rb_node *nd;
2684 struct hist_entry *pos, *pair;
2685
2eb3d689 2686 for (nd = rb_first_cached(leader_root); nd; nd = rb_next(nd)) {
09034de6
NK
2687 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2688 pair = hists__find_hierarchy_entry(other_root, pos);
2689
2690 if (pair) {
2691 hist_entry__add_pair(pair, pos);
2692 hists__match_hierarchy(&pos->hroot_in, &pair->hroot_in);
2693 }
2694 }
2695}
2696
95529be4
ACM
2697/*
2698 * Look for pairs to link to the leader buckets (hist_entries):
2699 */
2700void hists__match(struct hists *leader, struct hists *other)
2701{
2eb3d689 2702 struct rb_root_cached *root;
95529be4 2703 struct rb_node *nd;
be5863b7 2704 struct hist_entry *pos, *pair;
95529be4 2705
09034de6
NK
2706 if (symbol_conf.report_hierarchy) {
2707 /* hierarchy report always collapses entries */
2708 return hists__match_hierarchy(&leader->entries_collapsed,
2709 &other->entries_collapsed);
2710 }
2711
52225036 2712 if (hists__has(leader, need_collapse))
ce74f60e
NK
2713 root = &leader->entries_collapsed;
2714 else
2715 root = leader->entries_in;
2716
2eb3d689 2717 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
ce74f60e 2718 pos = rb_entry(nd, struct hist_entry, rb_node_in);
95529be4
ACM
2719 pair = hists__find_entry(other, pos);
2720
be5863b7 2721 if (pair)
5fa9041b 2722 hist_entry__add_pair(pair, pos);
95529be4
ACM
2723 }
2724}
494d70a1 2725
9d97b8f5
NK
2726static int hists__link_hierarchy(struct hists *leader_hists,
2727 struct hist_entry *parent,
2eb3d689
DB
2728 struct rb_root_cached *leader_root,
2729 struct rb_root_cached *other_root)
9d97b8f5
NK
2730{
2731 struct rb_node *nd;
2732 struct hist_entry *pos, *leader;
2733
2eb3d689 2734 for (nd = rb_first_cached(other_root); nd; nd = rb_next(nd)) {
9d97b8f5
NK
2735 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2736
2737 if (hist_entry__has_pairs(pos)) {
2738 bool found = false;
2739
2740 list_for_each_entry(leader, &pos->pairs.head, pairs.node) {
2741 if (leader->hists == leader_hists) {
2742 found = true;
2743 break;
2744 }
2745 }
2746 if (!found)
2747 return -1;
2748 } else {
2749 leader = add_dummy_hierarchy_entry(leader_hists,
2750 leader_root, pos);
2751 if (leader == NULL)
2752 return -1;
2753
2754 /* do not point parent in the pos */
2755 leader->parent_he = parent;
2756
2757 hist_entry__add_pair(pos, leader);
2758 }
2759
2760 if (!pos->leaf) {
2761 if (hists__link_hierarchy(leader_hists, leader,
2762 &leader->hroot_in,
2763 &pos->hroot_in) < 0)
2764 return -1;
2765 }
2766 }
2767 return 0;
2768}
2769
494d70a1
ACM
2770/*
2771 * Look for entries in the other hists that are not present in the leader, if
2772 * we find them, just add a dummy entry on the leader hists, with period=0,
2773 * nr_events=0, to serve as the list header.
2774 */
2775int hists__link(struct hists *leader, struct hists *other)
2776{
2eb3d689 2777 struct rb_root_cached *root;
494d70a1
ACM
2778 struct rb_node *nd;
2779 struct hist_entry *pos, *pair;
2780
9d97b8f5
NK
2781 if (symbol_conf.report_hierarchy) {
2782 /* hierarchy report always collapses entries */
2783 return hists__link_hierarchy(leader, NULL,
2784 &leader->entries_collapsed,
2785 &other->entries_collapsed);
2786 }
2787
52225036 2788 if (hists__has(other, need_collapse))
ce74f60e
NK
2789 root = &other->entries_collapsed;
2790 else
2791 root = other->entries_in;
2792
2eb3d689 2793 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
ce74f60e 2794 pos = rb_entry(nd, struct hist_entry, rb_node_in);
494d70a1
ACM
2795
2796 if (!hist_entry__has_pairs(pos)) {
2797 pair = hists__add_dummy_entry(leader, pos);
2798 if (pair == NULL)
2799 return -1;
5fa9041b 2800 hist_entry__add_pair(pos, pair);
494d70a1
ACM
2801 }
2802 }
2803
2804 return 0;
2805}
f2148330 2806
be5863b7
NK
2807int hists__unlink(struct hists *hists)
2808{
2809 struct rb_root_cached *root;
2810 struct rb_node *nd;
2811 struct hist_entry *pos;
2812
2813 if (hists__has(hists, need_collapse))
2814 root = &hists->entries_collapsed;
2815 else
2816 root = hists->entries_in;
2817
2818 for (nd = rb_first_cached(root); nd; nd = rb_next(nd)) {
2819 pos = rb_entry(nd, struct hist_entry, rb_node_in);
2820 list_del_init(&pos->pairs.node);
2821 }
2822
2823 return 0;
2824}
2825
57849998 2826void hist__account_cycles(struct branch_stack *bs, struct addr_location *al,
7841f40a 2827 struct perf_sample *sample, bool nonany_branch_mode,
1f2b7fbb 2828 u64 *total_cycles, struct evsel *evsel)
57849998
AK
2829{
2830 struct branch_info *bi;
42bbabed 2831 struct branch_entry *entries = perf_sample__branch_entries(sample);
57849998
AK
2832
2833 /* If we have branch cycles always annotate them. */
42bbabed 2834 if (bs && bs->nr && entries[0].flags.cycles) {
57849998
AK
2835 bi = sample__resolve_bstack(sample, al);
2836 if (bi) {
2837 struct addr_map_symbol *prev = NULL;
2838
2839 /*
2840 * Ignore errors, still want to process the
2841 * other entries.
2842 *
2843 * For non standard branch modes always
2844 * force no IPC (prev == NULL)
2845 *
2846 * Note that perf stores branches reversed from
2847 * program order!
2848 */
c1149037 2849 for (int i = bs->nr - 1; i >= 0; i--) {
57849998
AK
2850 addr_map_symbol__account_cycles(&bi[i].from,
2851 nonany_branch_mode ? NULL : prev,
1f2b7fbb
KL
2852 bi[i].flags.cycles, evsel,
2853 bi[i].branch_stack_cntr);
57849998 2854 prev = &bi[i].to;
7841f40a
JY
2855
2856 if (total_cycles)
2857 *total_cycles += bi[i].flags.cycles;
57849998 2858 }
c1149037 2859 for (unsigned int i = 0; i < bs->nr; i++) {
56e144fe
IR
2860 map_symbol__exit(&bi[i].to.ms);
2861 map_symbol__exit(&bi[i].from.ms);
c1149037 2862 }
57849998
AK
2863 free(bi);
2864 }
2865 }
2866}
2a1731fb 2867
411ee135 2868size_t evlist__fprintf_nr_events(struct evlist *evlist, FILE *fp)
2a1731fb 2869{
32dcd021 2870 struct evsel *pos;
2a1731fb
ACM
2871 size_t ret = 0;
2872
e5cadb93 2873 evlist__for_each_entry(evlist, pos) {
0f0abbac 2874 struct hists *hists = evsel__hists(pos);
1a5474a7
NK
2875 u64 total_samples = hists->stats.nr_samples;
2876
2877 total_samples += hists->stats.nr_lost_samples;
2878 total_samples += hists->stats.nr_dropped_samples;
0f0abbac 2879
1a5474a7 2880 if (symbol_conf.skip_empty && total_samples == 0)
2775de0b
NK
2881 continue;
2882
8ab2e96d 2883 ret += fprintf(fp, "%s stats:\n", evsel__name(pos));
d7ba22d4 2884 if (hists->stats.nr_samples)
1a5474a7 2885 ret += fprintf(fp, "%20s events: %10d\n",
d7ba22d4
NK
2886 "SAMPLE", hists->stats.nr_samples);
2887 if (hists->stats.nr_lost_samples)
1a5474a7 2888 ret += fprintf(fp, "%20s events: %10d\n",
d7ba22d4 2889 "LOST_SAMPLES", hists->stats.nr_lost_samples);
1a5474a7
NK
2890 if (hists->stats.nr_dropped_samples)
2891 ret += fprintf(fp, "%20s events: %10d\n",
2892 "LOST_SAMPLES (BPF)", hists->stats.nr_dropped_samples);
2a1731fb
ACM
2893 }
2894
2895 return ret;
2896}
2897
2898
f2148330
NK
2899u64 hists__total_period(struct hists *hists)
2900{
2901 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_period :
2902 hists->stats.total_period;
2903}
33db4568 2904
ee1cffbe
DV
2905u64 hists__total_latency(struct hists *hists)
2906{
2907 return symbol_conf.filter_relative ? hists->stats.total_non_filtered_latency :
2908 hists->stats.total_latency;
2909}
2910
25c312db
ACM
2911int __hists__scnprintf_title(struct hists *hists, char *bf, size_t size, bool show_freq)
2912{
2913 char unit;
2914 int printed;
2915 const struct dso *dso = hists->dso_filter;
7cb10a08 2916 struct thread *thread = hists->thread_filter;
25c312db 2917 int socket_id = hists->socket_filter;
0f0abbac 2918 unsigned long nr_samples = hists->stats.nr_samples;
25c312db 2919 u64 nr_events = hists->stats.total_period;
32dcd021 2920 struct evsel *evsel = hists_to_evsel(hists);
8ab2e96d 2921 const char *ev_name = evsel__name(evsel);
25c312db
ACM
2922 char buf[512], sample_freq_str[64] = "";
2923 size_t buflen = sizeof(buf);
2924 char ref[30] = " show reference callgraph, ";
2925 bool enable_ref = false;
2926
2927 if (symbol_conf.filter_relative) {
2928 nr_samples = hists->stats.nr_non_filtered_samples;
2929 nr_events = hists->stats.total_non_filtered_period;
2930 }
2931
c754c382 2932 if (evsel__is_group_event(evsel)) {
32dcd021 2933 struct evsel *pos;
25c312db 2934
347c751a 2935 evsel__group_desc(evsel, buf, buflen);
25c312db
ACM
2936 ev_name = buf;
2937
2938 for_each_group_member(pos, evsel) {
2939 struct hists *pos_hists = evsel__hists(pos);
2940
2941 if (symbol_conf.filter_relative) {
2942 nr_samples += pos_hists->stats.nr_non_filtered_samples;
2943 nr_events += pos_hists->stats.total_non_filtered_period;
2944 } else {
0f0abbac 2945 nr_samples += pos_hists->stats.nr_samples;
25c312db
ACM
2946 nr_events += pos_hists->stats.total_period;
2947 }
2948 }
2949 }
2950
2951 if (symbol_conf.show_ref_callgraph &&
2952 strstr(ev_name, "call-graph=no"))
2953 enable_ref = true;
2954
2955 if (show_freq)
1fc632ce 2956 scnprintf(sample_freq_str, sizeof(sample_freq_str), " %d Hz,", evsel->core.attr.sample_freq);
25c312db
ACM
2957
2958 nr_samples = convert_unit(nr_samples, &unit);
2959 printed = scnprintf(bf, size,
2960 "Samples: %lu%c of event%s '%s',%s%sEvent count (approx.): %" PRIu64,
5643b1a5 2961 nr_samples, unit, evsel->core.nr_members > 1 ? "s" : "",
25c312db
ACM
2962 ev_name, sample_freq_str, enable_ref ? ref : " ", nr_events);
2963
2964
2965 if (hists->uid_filter_str)
2966 printed += snprintf(bf + printed, size - printed,
2967 ", UID: %s", hists->uid_filter_str);
2968 if (thread) {
2969 if (hists__has(hists, thread)) {
2970 printed += scnprintf(bf + printed, size - printed,
2971 ", Thread: %s(%d)",
ee84a303
IR
2972 (thread__comm_set(thread) ? thread__comm_str(thread) : ""),
2973 thread__tid(thread));
25c312db
ACM
2974 } else {
2975 printed += scnprintf(bf + printed, size - printed,
2976 ", Thread: %s",
ee84a303 2977 (thread__comm_set(thread) ? thread__comm_str(thread) : ""));
25c312db
ACM
2978 }
2979 }
2980 if (dso)
2981 printed += scnprintf(bf + printed, size - printed,
ee756ef7 2982 ", DSO: %s", dso__short_name(dso));
25c312db
ACM
2983 if (socket_id > -1)
2984 printed += scnprintf(bf + printed, size - printed,
2985 ", Processor Socket: %d", socket_id);
2986
2987 return printed;
2988}
2989
33db4568
NK
2990int parse_filter_percentage(const struct option *opt __maybe_unused,
2991 const char *arg, int unset __maybe_unused)
2992{
2993 if (!strcmp(arg, "relative"))
2994 symbol_conf.filter_relative = true;
2995 else if (!strcmp(arg, "absolute"))
2996 symbol_conf.filter_relative = false;
ecc4c561 2997 else {
a596a877 2998 pr_debug("Invalid percentage: %s\n", arg);
33db4568 2999 return -1;
ecc4c561 3000 }
33db4568
NK
3001
3002 return 0;
3003}
0b93da17
NK
3004
3005int perf_hist_config(const char *var, const char *value)
3006{
3007 if (!strcmp(var, "hist.percentage"))
3008 return parse_filter_percentage(NULL, value, 0);
3009
3010 return 0;
3011}
a635fc51 3012
5b65855e 3013int __hists__init(struct hists *hists, struct perf_hpp_list *hpp_list)
a635fc51 3014{
a635fc51 3015 memset(hists, 0, sizeof(*hists));
2eb3d689 3016 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT_CACHED;
a635fc51 3017 hists->entries_in = &hists->entries_in_array[0];
2eb3d689
DB
3018 hists->entries_collapsed = RB_ROOT_CACHED;
3019 hists->entries = RB_ROOT_CACHED;
8e03bb88 3020 mutex_init(&hists->lock);
21394d94 3021 hists->socket_filter = -1;
61b6b31c 3022 hists->parallelism_filter = symbol_conf.parallelism_filter;
5b65855e 3023 hists->hpp_list = hpp_list;
c3bc0c43 3024 INIT_LIST_HEAD(&hists->hpp_formats);
a635fc51
ACM
3025 return 0;
3026}
3027
2eb3d689 3028static void hists__delete_remaining_entries(struct rb_root_cached *root)
61fa0e94
NK
3029{
3030 struct rb_node *node;
3031 struct hist_entry *he;
3032
2eb3d689
DB
3033 while (!RB_EMPTY_ROOT(&root->rb_root)) {
3034 node = rb_first_cached(root);
3035 rb_erase_cached(node, root);
61fa0e94
NK
3036
3037 he = rb_entry(node, struct hist_entry, rb_node_in);
3038 hist_entry__delete(he);
3039 }
3040}
3041
3042static void hists__delete_all_entries(struct hists *hists)
3043{
3044 hists__delete_entries(hists);
3045 hists__delete_remaining_entries(&hists->entries_in_array[0]);
3046 hists__delete_remaining_entries(&hists->entries_in_array[1]);
3047 hists__delete_remaining_entries(&hists->entries_collapsed);
3048}
3049
32dcd021 3050static void hists_evsel__exit(struct evsel *evsel)
17577dec
MH
3051{
3052 struct hists *hists = evsel__hists(evsel);
c3bc0c43
NK
3053 struct perf_hpp_fmt *fmt, *pos;
3054 struct perf_hpp_list_node *node, *tmp;
17577dec 3055
61fa0e94 3056 hists__delete_all_entries(hists);
9fcb43e2 3057 zfree(&hists->mem_stat_types);
225772c1 3058 zfree(&hists->mem_stat_total);
c3bc0c43
NK
3059
3060 list_for_each_entry_safe(node, tmp, &hists->hpp_formats, list) {
3061 perf_hpp_list__for_each_format_safe(&node->hpp, fmt, pos) {
e56fbc9d 3062 list_del_init(&fmt->list);
c3bc0c43
NK
3063 free(fmt);
3064 }
e56fbc9d 3065 list_del_init(&node->list);
c3bc0c43
NK
3066 free(node);
3067 }
17577dec
MH
3068}
3069
32dcd021 3070static int hists_evsel__init(struct evsel *evsel)
fc284be9
NK
3071{
3072 struct hists *hists = evsel__hists(evsel);
3073
5b65855e 3074 __hists__init(hists, &perf_hpp_list);
fc284be9
NK
3075 return 0;
3076}
3077
a635fc51
ACM
3078/*
3079 * XXX We probably need a hists_evsel__exit() to free the hist_entries
3080 * stored in the rbtree...
3081 */
3082
3083int hists__init(void)
3084{
4c703828
ACM
3085 int err = evsel__object_config(sizeof(struct hists_evsel),
3086 hists_evsel__init, hists_evsel__exit);
a635fc51
ACM
3087 if (err)
3088 fputs("FATAL ERROR: Couldn't setup hists class\n", stderr);
3089
3090 return err;
3091}
94b3dc38
JO
3092
3093void perf_hpp_list__init(struct perf_hpp_list *list)
3094{
3095 INIT_LIST_HEAD(&list->fields);
3096 INIT_LIST_HEAD(&list->sorts);
3097}