Commit | Line | Data |
---|---|---|
78f7defe | 1 | #include "annotate.h" |
8a0ecfb8 | 2 | #include "util.h" |
598357eb | 3 | #include "build-id.h" |
3d1d07ec | 4 | #include "hist.h" |
4e4f06e4 ACM |
5 | #include "session.h" |
6 | #include "sort.h" | |
29d720ed | 7 | #include "evsel.h" |
9b33827d | 8 | #include <math.h> |
3d1d07ec | 9 | |
90cf1fb5 ACM |
10 | static bool hists__filter_entry_by_dso(struct hists *hists, |
11 | struct hist_entry *he); | |
12 | static bool hists__filter_entry_by_thread(struct hists *hists, | |
13 | struct hist_entry *he); | |
e94d53eb NK |
14 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
15 | struct hist_entry *he); | |
90cf1fb5 | 16 | |
7a007ca9 ACM |
17 | enum hist_filter { |
18 | HIST_FILTER__DSO, | |
19 | HIST_FILTER__THREAD, | |
20 | HIST_FILTER__PARENT, | |
e94d53eb | 21 | HIST_FILTER__SYMBOL, |
7a007ca9 ACM |
22 | }; |
23 | ||
3d1d07ec JK |
24 | struct callchain_param callchain_param = { |
25 | .mode = CHAIN_GRAPH_REL, | |
d797fdc5 SL |
26 | .min_percent = 0.5, |
27 | .order = ORDER_CALLEE | |
3d1d07ec JK |
28 | }; |
29 | ||
42b28ac0 | 30 | u16 hists__col_len(struct hists *hists, enum hist_column col) |
8a6c5b26 | 31 | { |
42b28ac0 | 32 | return hists->col_len[col]; |
8a6c5b26 ACM |
33 | } |
34 | ||
42b28ac0 | 35 | void hists__set_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 36 | { |
42b28ac0 | 37 | hists->col_len[col] = len; |
8a6c5b26 ACM |
38 | } |
39 | ||
42b28ac0 | 40 | bool hists__new_col_len(struct hists *hists, enum hist_column col, u16 len) |
8a6c5b26 | 41 | { |
42b28ac0 ACM |
42 | if (len > hists__col_len(hists, col)) { |
43 | hists__set_col_len(hists, col, len); | |
8a6c5b26 ACM |
44 | return true; |
45 | } | |
46 | return false; | |
47 | } | |
48 | ||
7ccf4f90 | 49 | void hists__reset_col_len(struct hists *hists) |
8a6c5b26 ACM |
50 | { |
51 | enum hist_column col; | |
52 | ||
53 | for (col = 0; col < HISTC_NR_COLS; ++col) | |
42b28ac0 | 54 | hists__set_col_len(hists, col, 0); |
8a6c5b26 ACM |
55 | } |
56 | ||
b5387528 RAV |
57 | static void hists__set_unres_dso_col_len(struct hists *hists, int dso) |
58 | { | |
59 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; | |
60 | ||
61 | if (hists__col_len(hists, dso) < unresolved_col_width && | |
62 | !symbol_conf.col_width_list_str && !symbol_conf.field_sep && | |
63 | !symbol_conf.dso_list) | |
64 | hists__set_col_len(hists, dso, unresolved_col_width); | |
65 | } | |
66 | ||
7ccf4f90 | 67 | void hists__calc_col_len(struct hists *hists, struct hist_entry *h) |
8a6c5b26 | 68 | { |
b5387528 | 69 | const unsigned int unresolved_col_width = BITS_PER_LONG / 4; |
98a3b32c | 70 | int symlen; |
8a6c5b26 ACM |
71 | u16 len; |
72 | ||
73 | if (h->ms.sym) | |
b5387528 | 74 | hists__new_col_len(hists, HISTC_SYMBOL, h->ms.sym->namelen + 4); |
98a3b32c SE |
75 | else { |
76 | symlen = unresolved_col_width + 4 + 2; | |
77 | hists__new_col_len(hists, HISTC_SYMBOL, symlen); | |
b5387528 | 78 | hists__set_unres_dso_col_len(hists, HISTC_DSO); |
98a3b32c | 79 | } |
8a6c5b26 ACM |
80 | |
81 | len = thread__comm_len(h->thread); | |
42b28ac0 ACM |
82 | if (hists__new_col_len(hists, HISTC_COMM, len)) |
83 | hists__set_col_len(hists, HISTC_THREAD, len + 6); | |
8a6c5b26 ACM |
84 | |
85 | if (h->ms.map) { | |
86 | len = dso__name_len(h->ms.map->dso); | |
42b28ac0 | 87 | hists__new_col_len(hists, HISTC_DSO, len); |
8a6c5b26 | 88 | } |
b5387528 | 89 | |
cb993744 NK |
90 | if (h->parent) |
91 | hists__new_col_len(hists, HISTC_PARENT, h->parent->namelen); | |
92 | ||
b5387528 | 93 | if (h->branch_info) { |
b5387528 RAV |
94 | /* |
95 | * +4 accounts for '[x] ' priv level info | |
96 | * +2 account of 0x prefix on raw addresses | |
97 | */ | |
98 | if (h->branch_info->from.sym) { | |
99 | symlen = (int)h->branch_info->from.sym->namelen + 4; | |
100 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
101 | ||
102 | symlen = dso__name_len(h->branch_info->from.map->dso); | |
103 | hists__new_col_len(hists, HISTC_DSO_FROM, symlen); | |
104 | } else { | |
105 | symlen = unresolved_col_width + 4 + 2; | |
106 | hists__new_col_len(hists, HISTC_SYMBOL_FROM, symlen); | |
107 | hists__set_unres_dso_col_len(hists, HISTC_DSO_FROM); | |
108 | } | |
109 | ||
110 | if (h->branch_info->to.sym) { | |
111 | symlen = (int)h->branch_info->to.sym->namelen + 4; | |
112 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
113 | ||
114 | symlen = dso__name_len(h->branch_info->to.map->dso); | |
115 | hists__new_col_len(hists, HISTC_DSO_TO, symlen); | |
116 | } else { | |
117 | symlen = unresolved_col_width + 4 + 2; | |
118 | hists__new_col_len(hists, HISTC_SYMBOL_TO, symlen); | |
119 | hists__set_unres_dso_col_len(hists, HISTC_DSO_TO); | |
120 | } | |
121 | } | |
98a3b32c SE |
122 | |
123 | if (h->mem_info) { | |
124 | /* | |
125 | * +4 accounts for '[x] ' priv level info | |
126 | * +2 account of 0x prefix on raw addresses | |
127 | */ | |
128 | if (h->mem_info->daddr.sym) { | |
129 | symlen = (int)h->mem_info->daddr.sym->namelen + 4 | |
130 | + unresolved_col_width + 2; | |
131 | hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, | |
132 | symlen); | |
133 | } else { | |
134 | symlen = unresolved_col_width + 4 + 2; | |
135 | hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, | |
136 | symlen); | |
137 | } | |
138 | if (h->mem_info->daddr.map) { | |
139 | symlen = dso__name_len(h->mem_info->daddr.map->dso); | |
140 | hists__new_col_len(hists, HISTC_MEM_DADDR_DSO, | |
141 | symlen); | |
142 | } else { | |
143 | symlen = unresolved_col_width + 4 + 2; | |
144 | hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); | |
145 | } | |
146 | } else { | |
147 | symlen = unresolved_col_width + 4 + 2; | |
148 | hists__new_col_len(hists, HISTC_MEM_DADDR_SYMBOL, symlen); | |
149 | hists__set_unres_dso_col_len(hists, HISTC_MEM_DADDR_DSO); | |
150 | } | |
151 | ||
152 | hists__new_col_len(hists, HISTC_MEM_LOCKED, 6); | |
153 | hists__new_col_len(hists, HISTC_MEM_TLB, 22); | |
154 | hists__new_col_len(hists, HISTC_MEM_SNOOP, 12); | |
155 | hists__new_col_len(hists, HISTC_MEM_LVL, 21 + 3); | |
156 | hists__new_col_len(hists, HISTC_LOCAL_WEIGHT, 12); | |
157 | hists__new_col_len(hists, HISTC_GLOBAL_WEIGHT, 12); | |
8a6c5b26 ACM |
158 | } |
159 | ||
7ccf4f90 NK |
160 | void hists__output_recalc_col_len(struct hists *hists, int max_rows) |
161 | { | |
162 | struct rb_node *next = rb_first(&hists->entries); | |
163 | struct hist_entry *n; | |
164 | int row = 0; | |
165 | ||
166 | hists__reset_col_len(hists); | |
167 | ||
168 | while (next && row++ < max_rows) { | |
169 | n = rb_entry(next, struct hist_entry, rb_node); | |
170 | if (!n->filtered) | |
171 | hists__calc_col_len(hists, n); | |
172 | next = rb_next(&n->rb_node); | |
173 | } | |
174 | } | |
175 | ||
12c14278 | 176 | static void hist_entry__add_cpumode_period(struct hist_entry *he, |
c82ee828 | 177 | unsigned int cpumode, u64 period) |
a1645ce1 | 178 | { |
28e2a106 | 179 | switch (cpumode) { |
a1645ce1 | 180 | case PERF_RECORD_MISC_KERNEL: |
b24c28f7 | 181 | he->stat.period_sys += period; |
a1645ce1 ZY |
182 | break; |
183 | case PERF_RECORD_MISC_USER: | |
b24c28f7 | 184 | he->stat.period_us += period; |
a1645ce1 ZY |
185 | break; |
186 | case PERF_RECORD_MISC_GUEST_KERNEL: | |
b24c28f7 | 187 | he->stat.period_guest_sys += period; |
a1645ce1 ZY |
188 | break; |
189 | case PERF_RECORD_MISC_GUEST_USER: | |
b24c28f7 | 190 | he->stat.period_guest_us += period; |
a1645ce1 ZY |
191 | break; |
192 | default: | |
193 | break; | |
194 | } | |
195 | } | |
196 | ||
05484298 AK |
197 | static void he_stat__add_period(struct he_stat *he_stat, u64 period, |
198 | u64 weight) | |
139c0815 | 199 | { |
98a3b32c | 200 | |
139c0815 | 201 | he_stat->period += period; |
05484298 | 202 | he_stat->weight += weight; |
139c0815 NK |
203 | he_stat->nr_events += 1; |
204 | } | |
205 | ||
206 | static void he_stat__add_stat(struct he_stat *dest, struct he_stat *src) | |
207 | { | |
208 | dest->period += src->period; | |
209 | dest->period_sys += src->period_sys; | |
210 | dest->period_us += src->period_us; | |
211 | dest->period_guest_sys += src->period_guest_sys; | |
212 | dest->period_guest_us += src->period_guest_us; | |
213 | dest->nr_events += src->nr_events; | |
05484298 | 214 | dest->weight += src->weight; |
139c0815 NK |
215 | } |
216 | ||
ab81f3fd ACM |
217 | static void hist_entry__decay(struct hist_entry *he) |
218 | { | |
b24c28f7 NK |
219 | he->stat.period = (he->stat.period * 7) / 8; |
220 | he->stat.nr_events = (he->stat.nr_events * 7) / 8; | |
05484298 | 221 | /* XXX need decay for weight too? */ |
ab81f3fd ACM |
222 | } |
223 | ||
224 | static bool hists__decay_entry(struct hists *hists, struct hist_entry *he) | |
225 | { | |
b24c28f7 | 226 | u64 prev_period = he->stat.period; |
c64550cf ACM |
227 | |
228 | if (prev_period == 0) | |
df71d95f | 229 | return true; |
c64550cf | 230 | |
ab81f3fd | 231 | hist_entry__decay(he); |
c64550cf ACM |
232 | |
233 | if (!he->filtered) | |
b24c28f7 | 234 | hists->stats.total_period -= prev_period - he->stat.period; |
c64550cf | 235 | |
b24c28f7 | 236 | return he->stat.period == 0; |
ab81f3fd ACM |
237 | } |
238 | ||
b079d4e9 ACM |
239 | static void __hists__decay_entries(struct hists *hists, bool zap_user, |
240 | bool zap_kernel, bool threaded) | |
ab81f3fd ACM |
241 | { |
242 | struct rb_node *next = rb_first(&hists->entries); | |
243 | struct hist_entry *n; | |
244 | ||
245 | while (next) { | |
246 | n = rb_entry(next, struct hist_entry, rb_node); | |
247 | next = rb_next(&n->rb_node); | |
df71d95f ACM |
248 | /* |
249 | * We may be annotating this, for instance, so keep it here in | |
250 | * case some it gets new samples, we'll eventually free it when | |
251 | * the user stops browsing and it agains gets fully decayed. | |
252 | */ | |
b079d4e9 ACM |
253 | if (((zap_user && n->level == '.') || |
254 | (zap_kernel && n->level != '.') || | |
255 | hists__decay_entry(hists, n)) && | |
256 | !n->used) { | |
ab81f3fd ACM |
257 | rb_erase(&n->rb_node, &hists->entries); |
258 | ||
e345fa18 | 259 | if (sort__need_collapse || threaded) |
ab81f3fd ACM |
260 | rb_erase(&n->rb_node_in, &hists->entries_collapsed); |
261 | ||
262 | hist_entry__free(n); | |
263 | --hists->nr_entries; | |
264 | } | |
265 | } | |
266 | } | |
267 | ||
b079d4e9 | 268 | void hists__decay_entries(struct hists *hists, bool zap_user, bool zap_kernel) |
e345fa18 | 269 | { |
b079d4e9 | 270 | return __hists__decay_entries(hists, zap_user, zap_kernel, false); |
e345fa18 ACM |
271 | } |
272 | ||
b079d4e9 ACM |
273 | void hists__decay_entries_threaded(struct hists *hists, |
274 | bool zap_user, bool zap_kernel) | |
e345fa18 | 275 | { |
b079d4e9 | 276 | return __hists__decay_entries(hists, zap_user, zap_kernel, true); |
e345fa18 ACM |
277 | } |
278 | ||
3d1d07ec | 279 | /* |
c82ee828 | 280 | * histogram, sorted on item, collects periods |
3d1d07ec JK |
281 | */ |
282 | ||
28e2a106 ACM |
283 | static struct hist_entry *hist_entry__new(struct hist_entry *template) |
284 | { | |
d2009c51 | 285 | size_t callchain_size = symbol_conf.use_callchain ? sizeof(struct callchain_root) : 0; |
98a3b32c | 286 | struct hist_entry *he = zalloc(sizeof(*he) + callchain_size); |
28e2a106 | 287 | |
12c14278 ACM |
288 | if (he != NULL) { |
289 | *he = *template; | |
c4b35351 | 290 | |
12c14278 ACM |
291 | if (he->ms.map) |
292 | he->ms.map->referenced = true; | |
3cf0cb1f SE |
293 | |
294 | if (he->branch_info) { | |
26353a61 NK |
295 | /* |
296 | * This branch info is (a part of) allocated from | |
297 | * machine__resolve_bstack() and will be freed after | |
298 | * adding new entries. So we need to save a copy. | |
299 | */ | |
300 | he->branch_info = malloc(sizeof(*he->branch_info)); | |
301 | if (he->branch_info == NULL) { | |
302 | free(he); | |
303 | return NULL; | |
304 | } | |
305 | ||
306 | memcpy(he->branch_info, template->branch_info, | |
307 | sizeof(*he->branch_info)); | |
308 | ||
3cf0cb1f SE |
309 | if (he->branch_info->from.map) |
310 | he->branch_info->from.map->referenced = true; | |
311 | if (he->branch_info->to.map) | |
312 | he->branch_info->to.map->referenced = true; | |
313 | } | |
314 | ||
98a3b32c SE |
315 | if (he->mem_info) { |
316 | if (he->mem_info->iaddr.map) | |
317 | he->mem_info->iaddr.map->referenced = true; | |
318 | if (he->mem_info->daddr.map) | |
319 | he->mem_info->daddr.map->referenced = true; | |
320 | } | |
321 | ||
28e2a106 | 322 | if (symbol_conf.use_callchain) |
12c14278 | 323 | callchain_init(he->callchain); |
b821c732 ACM |
324 | |
325 | INIT_LIST_HEAD(&he->pairs.node); | |
28e2a106 ACM |
326 | } |
327 | ||
12c14278 | 328 | return he; |
28e2a106 ACM |
329 | } |
330 | ||
66f97ed3 | 331 | void hists__inc_nr_entries(struct hists *hists, struct hist_entry *h) |
fefb0b94 | 332 | { |
8a6c5b26 | 333 | if (!h->filtered) { |
42b28ac0 ACM |
334 | hists__calc_col_len(hists, h); |
335 | ++hists->nr_entries; | |
b24c28f7 | 336 | hists->stats.total_period += h->stat.period; |
8a6c5b26 | 337 | } |
fefb0b94 ACM |
338 | } |
339 | ||
7a007ca9 ACM |
340 | static u8 symbol__parent_filter(const struct symbol *parent) |
341 | { | |
342 | if (symbol_conf.exclude_other && parent == NULL) | |
343 | return 1 << HIST_FILTER__PARENT; | |
344 | return 0; | |
345 | } | |
346 | ||
b5387528 RAV |
347 | static struct hist_entry *add_hist_entry(struct hists *hists, |
348 | struct hist_entry *entry, | |
1c02c4d2 | 349 | struct addr_location *al, |
05484298 AK |
350 | u64 period, |
351 | u64 weight) | |
9735abf1 | 352 | { |
1980c2eb | 353 | struct rb_node **p; |
9735abf1 ACM |
354 | struct rb_node *parent = NULL; |
355 | struct hist_entry *he; | |
9735abf1 ACM |
356 | int cmp; |
357 | ||
1980c2eb ACM |
358 | pthread_mutex_lock(&hists->lock); |
359 | ||
360 | p = &hists->entries_in->rb_node; | |
361 | ||
9735abf1 ACM |
362 | while (*p != NULL) { |
363 | parent = *p; | |
1980c2eb | 364 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
9735abf1 | 365 | |
9afcf930 NK |
366 | /* |
367 | * Make sure that it receives arguments in a same order as | |
368 | * hist_entry__collapse() so that we can use an appropriate | |
369 | * function when searching an entry regardless which sort | |
370 | * keys were used. | |
371 | */ | |
372 | cmp = hist_entry__cmp(he, entry); | |
9735abf1 ACM |
373 | |
374 | if (!cmp) { | |
05484298 | 375 | he_stat__add_period(&he->stat, period, weight); |
63fa471d | 376 | |
ceb2acbc NK |
377 | /* |
378 | * This mem info was allocated from machine__resolve_mem | |
379 | * and will not be used anymore. | |
380 | */ | |
381 | free(entry->mem_info); | |
382 | ||
63fa471d DM |
383 | /* If the map of an existing hist_entry has |
384 | * become out-of-date due to an exec() or | |
385 | * similar, update it. Otherwise we will | |
386 | * mis-adjust symbol addresses when computing | |
387 | * the history counter to increment. | |
388 | */ | |
389 | if (he->ms.map != entry->ms.map) { | |
390 | he->ms.map = entry->ms.map; | |
391 | if (he->ms.map) | |
392 | he->ms.map->referenced = true; | |
393 | } | |
28e2a106 | 394 | goto out; |
9735abf1 ACM |
395 | } |
396 | ||
397 | if (cmp < 0) | |
398 | p = &(*p)->rb_left; | |
399 | else | |
400 | p = &(*p)->rb_right; | |
401 | } | |
402 | ||
b5387528 | 403 | he = hist_entry__new(entry); |
9735abf1 | 404 | if (!he) |
1980c2eb ACM |
405 | goto out_unlock; |
406 | ||
407 | rb_link_node(&he->rb_node_in, parent, p); | |
408 | rb_insert_color(&he->rb_node_in, hists->entries_in); | |
28e2a106 | 409 | out: |
c82ee828 | 410 | hist_entry__add_cpumode_period(he, al->cpumode, period); |
1980c2eb ACM |
411 | out_unlock: |
412 | pthread_mutex_unlock(&hists->lock); | |
9735abf1 ACM |
413 | return he; |
414 | } | |
415 | ||
98a3b32c SE |
416 | struct hist_entry *__hists__add_mem_entry(struct hists *self, |
417 | struct addr_location *al, | |
418 | struct symbol *sym_parent, | |
419 | struct mem_info *mi, | |
420 | u64 period, | |
421 | u64 weight) | |
422 | { | |
423 | struct hist_entry entry = { | |
424 | .thread = al->thread, | |
425 | .ms = { | |
426 | .map = al->map, | |
427 | .sym = al->sym, | |
428 | }, | |
429 | .stat = { | |
430 | .period = period, | |
431 | .weight = weight, | |
432 | .nr_events = 1, | |
433 | }, | |
434 | .cpu = al->cpu, | |
435 | .ip = al->addr, | |
436 | .level = al->level, | |
437 | .parent = sym_parent, | |
438 | .filtered = symbol__parent_filter(sym_parent), | |
439 | .hists = self, | |
440 | .mem_info = mi, | |
441 | .branch_info = NULL, | |
442 | }; | |
443 | return add_hist_entry(self, &entry, al, period, weight); | |
444 | } | |
445 | ||
b5387528 RAV |
446 | struct hist_entry *__hists__add_branch_entry(struct hists *self, |
447 | struct addr_location *al, | |
448 | struct symbol *sym_parent, | |
449 | struct branch_info *bi, | |
05484298 AK |
450 | u64 period, |
451 | u64 weight) | |
b5387528 RAV |
452 | { |
453 | struct hist_entry entry = { | |
454 | .thread = al->thread, | |
455 | .ms = { | |
456 | .map = bi->to.map, | |
457 | .sym = bi->to.sym, | |
458 | }, | |
459 | .cpu = al->cpu, | |
460 | .ip = bi->to.addr, | |
461 | .level = al->level, | |
b24c28f7 NK |
462 | .stat = { |
463 | .period = period, | |
c4b35351 | 464 | .nr_events = 1, |
05484298 | 465 | .weight = weight, |
b24c28f7 | 466 | }, |
b5387528 RAV |
467 | .parent = sym_parent, |
468 | .filtered = symbol__parent_filter(sym_parent), | |
469 | .branch_info = bi, | |
ae359f19 | 470 | .hists = self, |
98a3b32c | 471 | .mem_info = NULL, |
b5387528 RAV |
472 | }; |
473 | ||
05484298 | 474 | return add_hist_entry(self, &entry, al, period, weight); |
b5387528 RAV |
475 | } |
476 | ||
477 | struct hist_entry *__hists__add_entry(struct hists *self, | |
478 | struct addr_location *al, | |
05484298 AK |
479 | struct symbol *sym_parent, u64 period, |
480 | u64 weight) | |
b5387528 RAV |
481 | { |
482 | struct hist_entry entry = { | |
483 | .thread = al->thread, | |
484 | .ms = { | |
485 | .map = al->map, | |
486 | .sym = al->sym, | |
487 | }, | |
488 | .cpu = al->cpu, | |
489 | .ip = al->addr, | |
490 | .level = al->level, | |
b24c28f7 NK |
491 | .stat = { |
492 | .period = period, | |
c4b35351 | 493 | .nr_events = 1, |
05484298 | 494 | .weight = weight, |
b24c28f7 | 495 | }, |
b5387528 RAV |
496 | .parent = sym_parent, |
497 | .filtered = symbol__parent_filter(sym_parent), | |
ae359f19 | 498 | .hists = self, |
98a3b32c SE |
499 | .branch_info = NULL, |
500 | .mem_info = NULL, | |
b5387528 RAV |
501 | }; |
502 | ||
05484298 | 503 | return add_hist_entry(self, &entry, al, period, weight); |
b5387528 RAV |
504 | } |
505 | ||
3d1d07ec JK |
506 | int64_t |
507 | hist_entry__cmp(struct hist_entry *left, struct hist_entry *right) | |
508 | { | |
509 | struct sort_entry *se; | |
510 | int64_t cmp = 0; | |
511 | ||
512 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
fcd14984 | 513 | cmp = se->se_cmp(left, right); |
3d1d07ec JK |
514 | if (cmp) |
515 | break; | |
516 | } | |
517 | ||
518 | return cmp; | |
519 | } | |
520 | ||
521 | int64_t | |
522 | hist_entry__collapse(struct hist_entry *left, struct hist_entry *right) | |
523 | { | |
524 | struct sort_entry *se; | |
525 | int64_t cmp = 0; | |
526 | ||
527 | list_for_each_entry(se, &hist_entry__sort_list, list) { | |
528 | int64_t (*f)(struct hist_entry *, struct hist_entry *); | |
529 | ||
fcd14984 | 530 | f = se->se_collapse ?: se->se_cmp; |
3d1d07ec JK |
531 | |
532 | cmp = f(left, right); | |
533 | if (cmp) | |
534 | break; | |
535 | } | |
536 | ||
537 | return cmp; | |
538 | } | |
539 | ||
540 | void hist_entry__free(struct hist_entry *he) | |
541 | { | |
580e338d | 542 | free(he->branch_info); |
028f12ee | 543 | free(he->mem_info); |
3d1d07ec JK |
544 | free(he); |
545 | } | |
546 | ||
547 | /* | |
548 | * collapse the histogram | |
549 | */ | |
550 | ||
1d037ca1 | 551 | static bool hists__collapse_insert_entry(struct hists *hists __maybe_unused, |
1b3a0e95 FW |
552 | struct rb_root *root, |
553 | struct hist_entry *he) | |
3d1d07ec | 554 | { |
b9bf0892 | 555 | struct rb_node **p = &root->rb_node; |
3d1d07ec JK |
556 | struct rb_node *parent = NULL; |
557 | struct hist_entry *iter; | |
558 | int64_t cmp; | |
559 | ||
560 | while (*p != NULL) { | |
561 | parent = *p; | |
1980c2eb | 562 | iter = rb_entry(parent, struct hist_entry, rb_node_in); |
3d1d07ec JK |
563 | |
564 | cmp = hist_entry__collapse(iter, he); | |
565 | ||
566 | if (!cmp) { | |
139c0815 | 567 | he_stat__add_stat(&iter->stat, &he->stat); |
9ec60972 | 568 | |
1b3a0e95 | 569 | if (symbol_conf.use_callchain) { |
47260645 NK |
570 | callchain_cursor_reset(&callchain_cursor); |
571 | callchain_merge(&callchain_cursor, | |
572 | iter->callchain, | |
1b3a0e95 FW |
573 | he->callchain); |
574 | } | |
3d1d07ec | 575 | hist_entry__free(he); |
fefb0b94 | 576 | return false; |
3d1d07ec JK |
577 | } |
578 | ||
579 | if (cmp < 0) | |
580 | p = &(*p)->rb_left; | |
581 | else | |
582 | p = &(*p)->rb_right; | |
583 | } | |
584 | ||
1980c2eb ACM |
585 | rb_link_node(&he->rb_node_in, parent, p); |
586 | rb_insert_color(&he->rb_node_in, root); | |
fefb0b94 | 587 | return true; |
3d1d07ec JK |
588 | } |
589 | ||
1980c2eb | 590 | static struct rb_root *hists__get_rotate_entries_in(struct hists *hists) |
3d1d07ec | 591 | { |
1980c2eb ACM |
592 | struct rb_root *root; |
593 | ||
594 | pthread_mutex_lock(&hists->lock); | |
595 | ||
596 | root = hists->entries_in; | |
597 | if (++hists->entries_in > &hists->entries_in_array[1]) | |
598 | hists->entries_in = &hists->entries_in_array[0]; | |
599 | ||
600 | pthread_mutex_unlock(&hists->lock); | |
601 | ||
602 | return root; | |
603 | } | |
604 | ||
90cf1fb5 ACM |
605 | static void hists__apply_filters(struct hists *hists, struct hist_entry *he) |
606 | { | |
607 | hists__filter_entry_by_dso(hists, he); | |
608 | hists__filter_entry_by_thread(hists, he); | |
e94d53eb | 609 | hists__filter_entry_by_symbol(hists, he); |
90cf1fb5 ACM |
610 | } |
611 | ||
1980c2eb ACM |
612 | static void __hists__collapse_resort(struct hists *hists, bool threaded) |
613 | { | |
614 | struct rb_root *root; | |
3d1d07ec JK |
615 | struct rb_node *next; |
616 | struct hist_entry *n; | |
617 | ||
1980c2eb | 618 | if (!sort__need_collapse && !threaded) |
3d1d07ec JK |
619 | return; |
620 | ||
1980c2eb ACM |
621 | root = hists__get_rotate_entries_in(hists); |
622 | next = rb_first(root); | |
b9bf0892 | 623 | |
3d1d07ec | 624 | while (next) { |
1980c2eb ACM |
625 | n = rb_entry(next, struct hist_entry, rb_node_in); |
626 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 627 | |
1980c2eb | 628 | rb_erase(&n->rb_node_in, root); |
90cf1fb5 ACM |
629 | if (hists__collapse_insert_entry(hists, &hists->entries_collapsed, n)) { |
630 | /* | |
631 | * If it wasn't combined with one of the entries already | |
632 | * collapsed, we need to apply the filters that may have | |
633 | * been set by, say, the hist_browser. | |
634 | */ | |
635 | hists__apply_filters(hists, n); | |
90cf1fb5 | 636 | } |
3d1d07ec | 637 | } |
1980c2eb | 638 | } |
b9bf0892 | 639 | |
1980c2eb ACM |
640 | void hists__collapse_resort(struct hists *hists) |
641 | { | |
642 | return __hists__collapse_resort(hists, false); | |
643 | } | |
644 | ||
645 | void hists__collapse_resort_threaded(struct hists *hists) | |
646 | { | |
647 | return __hists__collapse_resort(hists, true); | |
3d1d07ec JK |
648 | } |
649 | ||
650 | /* | |
c82ee828 | 651 | * reverse the map, sort on period. |
3d1d07ec JK |
652 | */ |
653 | ||
29d720ed NK |
654 | static int period_cmp(u64 period_a, u64 period_b) |
655 | { | |
656 | if (period_a > period_b) | |
657 | return 1; | |
658 | if (period_a < period_b) | |
659 | return -1; | |
660 | return 0; | |
661 | } | |
662 | ||
663 | static int hist_entry__sort_on_period(struct hist_entry *a, | |
664 | struct hist_entry *b) | |
665 | { | |
666 | int ret; | |
667 | int i, nr_members; | |
668 | struct perf_evsel *evsel; | |
669 | struct hist_entry *pair; | |
670 | u64 *periods_a, *periods_b; | |
671 | ||
672 | ret = period_cmp(a->stat.period, b->stat.period); | |
673 | if (ret || !symbol_conf.event_group) | |
674 | return ret; | |
675 | ||
676 | evsel = hists_to_evsel(a->hists); | |
677 | nr_members = evsel->nr_members; | |
678 | if (nr_members <= 1) | |
679 | return ret; | |
680 | ||
681 | periods_a = zalloc(sizeof(periods_a) * nr_members); | |
682 | periods_b = zalloc(sizeof(periods_b) * nr_members); | |
683 | ||
684 | if (!periods_a || !periods_b) | |
685 | goto out; | |
686 | ||
687 | list_for_each_entry(pair, &a->pairs.head, pairs.node) { | |
688 | evsel = hists_to_evsel(pair->hists); | |
689 | periods_a[perf_evsel__group_idx(evsel)] = pair->stat.period; | |
690 | } | |
691 | ||
692 | list_for_each_entry(pair, &b->pairs.head, pairs.node) { | |
693 | evsel = hists_to_evsel(pair->hists); | |
694 | periods_b[perf_evsel__group_idx(evsel)] = pair->stat.period; | |
695 | } | |
696 | ||
697 | for (i = 1; i < nr_members; i++) { | |
698 | ret = period_cmp(periods_a[i], periods_b[i]); | |
699 | if (ret) | |
700 | break; | |
701 | } | |
702 | ||
703 | out: | |
704 | free(periods_a); | |
705 | free(periods_b); | |
706 | ||
707 | return ret; | |
708 | } | |
709 | ||
1c02c4d2 ACM |
710 | static void __hists__insert_output_entry(struct rb_root *entries, |
711 | struct hist_entry *he, | |
712 | u64 min_callchain_hits) | |
3d1d07ec | 713 | { |
1c02c4d2 | 714 | struct rb_node **p = &entries->rb_node; |
3d1d07ec JK |
715 | struct rb_node *parent = NULL; |
716 | struct hist_entry *iter; | |
717 | ||
d599db3f | 718 | if (symbol_conf.use_callchain) |
b9fb9304 | 719 | callchain_param.sort(&he->sorted_chain, he->callchain, |
3d1d07ec JK |
720 | min_callchain_hits, &callchain_param); |
721 | ||
722 | while (*p != NULL) { | |
723 | parent = *p; | |
724 | iter = rb_entry(parent, struct hist_entry, rb_node); | |
725 | ||
29d720ed | 726 | if (hist_entry__sort_on_period(he, iter) > 0) |
3d1d07ec JK |
727 | p = &(*p)->rb_left; |
728 | else | |
729 | p = &(*p)->rb_right; | |
730 | } | |
731 | ||
732 | rb_link_node(&he->rb_node, parent, p); | |
1c02c4d2 | 733 | rb_insert_color(&he->rb_node, entries); |
3d1d07ec JK |
734 | } |
735 | ||
1980c2eb | 736 | static void __hists__output_resort(struct hists *hists, bool threaded) |
3d1d07ec | 737 | { |
1980c2eb | 738 | struct rb_root *root; |
3d1d07ec JK |
739 | struct rb_node *next; |
740 | struct hist_entry *n; | |
3d1d07ec JK |
741 | u64 min_callchain_hits; |
742 | ||
42b28ac0 | 743 | min_callchain_hits = hists->stats.total_period * (callchain_param.min_percent / 100); |
3d1d07ec | 744 | |
1980c2eb ACM |
745 | if (sort__need_collapse || threaded) |
746 | root = &hists->entries_collapsed; | |
747 | else | |
748 | root = hists->entries_in; | |
749 | ||
750 | next = rb_first(root); | |
751 | hists->entries = RB_ROOT; | |
3d1d07ec | 752 | |
42b28ac0 | 753 | hists->nr_entries = 0; |
7928631a | 754 | hists->stats.total_period = 0; |
42b28ac0 | 755 | hists__reset_col_len(hists); |
fefb0b94 | 756 | |
3d1d07ec | 757 | while (next) { |
1980c2eb ACM |
758 | n = rb_entry(next, struct hist_entry, rb_node_in); |
759 | next = rb_next(&n->rb_node_in); | |
3d1d07ec | 760 | |
1980c2eb | 761 | __hists__insert_output_entry(&hists->entries, n, min_callchain_hits); |
42b28ac0 | 762 | hists__inc_nr_entries(hists, n); |
3d1d07ec | 763 | } |
1980c2eb | 764 | } |
b9bf0892 | 765 | |
1980c2eb ACM |
766 | void hists__output_resort(struct hists *hists) |
767 | { | |
768 | return __hists__output_resort(hists, false); | |
769 | } | |
770 | ||
771 | void hists__output_resort_threaded(struct hists *hists) | |
772 | { | |
773 | return __hists__output_resort(hists, true); | |
3d1d07ec | 774 | } |
4ecf84d0 | 775 | |
42b28ac0 | 776 | static void hists__remove_entry_filter(struct hists *hists, struct hist_entry *h, |
cc5edb0e ACM |
777 | enum hist_filter filter) |
778 | { | |
779 | h->filtered &= ~(1 << filter); | |
780 | if (h->filtered) | |
781 | return; | |
782 | ||
42b28ac0 | 783 | ++hists->nr_entries; |
0f0cbf7a | 784 | if (h->ms.unfolded) |
42b28ac0 | 785 | hists->nr_entries += h->nr_rows; |
0f0cbf7a | 786 | h->row_offset = 0; |
b24c28f7 NK |
787 | hists->stats.total_period += h->stat.period; |
788 | hists->stats.nr_events[PERF_RECORD_SAMPLE] += h->stat.nr_events; | |
cc5edb0e | 789 | |
42b28ac0 | 790 | hists__calc_col_len(hists, h); |
cc5edb0e ACM |
791 | } |
792 | ||
90cf1fb5 ACM |
793 | |
794 | static bool hists__filter_entry_by_dso(struct hists *hists, | |
795 | struct hist_entry *he) | |
796 | { | |
797 | if (hists->dso_filter != NULL && | |
798 | (he->ms.map == NULL || he->ms.map->dso != hists->dso_filter)) { | |
799 | he->filtered |= (1 << HIST_FILTER__DSO); | |
800 | return true; | |
801 | } | |
802 | ||
803 | return false; | |
804 | } | |
805 | ||
d7b76f09 | 806 | void hists__filter_by_dso(struct hists *hists) |
b09e0190 ACM |
807 | { |
808 | struct rb_node *nd; | |
809 | ||
42b28ac0 ACM |
810 | hists->nr_entries = hists->stats.total_period = 0; |
811 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
812 | hists__reset_col_len(hists); | |
b09e0190 | 813 | |
42b28ac0 | 814 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
815 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
816 | ||
817 | if (symbol_conf.exclude_other && !h->parent) | |
818 | continue; | |
819 | ||
90cf1fb5 | 820 | if (hists__filter_entry_by_dso(hists, h)) |
b09e0190 | 821 | continue; |
b09e0190 | 822 | |
42b28ac0 | 823 | hists__remove_entry_filter(hists, h, HIST_FILTER__DSO); |
b09e0190 ACM |
824 | } |
825 | } | |
826 | ||
90cf1fb5 ACM |
827 | static bool hists__filter_entry_by_thread(struct hists *hists, |
828 | struct hist_entry *he) | |
829 | { | |
830 | if (hists->thread_filter != NULL && | |
831 | he->thread != hists->thread_filter) { | |
832 | he->filtered |= (1 << HIST_FILTER__THREAD); | |
833 | return true; | |
834 | } | |
835 | ||
836 | return false; | |
837 | } | |
838 | ||
d7b76f09 | 839 | void hists__filter_by_thread(struct hists *hists) |
b09e0190 ACM |
840 | { |
841 | struct rb_node *nd; | |
842 | ||
42b28ac0 ACM |
843 | hists->nr_entries = hists->stats.total_period = 0; |
844 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
845 | hists__reset_col_len(hists); | |
b09e0190 | 846 | |
42b28ac0 | 847 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { |
b09e0190 ACM |
848 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); |
849 | ||
90cf1fb5 | 850 | if (hists__filter_entry_by_thread(hists, h)) |
b09e0190 | 851 | continue; |
cc5edb0e | 852 | |
42b28ac0 | 853 | hists__remove_entry_filter(hists, h, HIST_FILTER__THREAD); |
b09e0190 ACM |
854 | } |
855 | } | |
ef7b93a1 | 856 | |
e94d53eb NK |
857 | static bool hists__filter_entry_by_symbol(struct hists *hists, |
858 | struct hist_entry *he) | |
859 | { | |
860 | if (hists->symbol_filter_str != NULL && | |
861 | (!he->ms.sym || strstr(he->ms.sym->name, | |
862 | hists->symbol_filter_str) == NULL)) { | |
863 | he->filtered |= (1 << HIST_FILTER__SYMBOL); | |
864 | return true; | |
865 | } | |
866 | ||
867 | return false; | |
868 | } | |
869 | ||
870 | void hists__filter_by_symbol(struct hists *hists) | |
871 | { | |
872 | struct rb_node *nd; | |
873 | ||
874 | hists->nr_entries = hists->stats.total_period = 0; | |
875 | hists->stats.nr_events[PERF_RECORD_SAMPLE] = 0; | |
876 | hists__reset_col_len(hists); | |
877 | ||
878 | for (nd = rb_first(&hists->entries); nd; nd = rb_next(nd)) { | |
879 | struct hist_entry *h = rb_entry(nd, struct hist_entry, rb_node); | |
880 | ||
881 | if (hists__filter_entry_by_symbol(hists, h)) | |
882 | continue; | |
883 | ||
884 | hists__remove_entry_filter(hists, h, HIST_FILTER__SYMBOL); | |
885 | } | |
886 | } | |
887 | ||
2f525d01 | 888 | int hist_entry__inc_addr_samples(struct hist_entry *he, int evidx, u64 ip) |
ef7b93a1 | 889 | { |
2f525d01 | 890 | return symbol__inc_addr_samples(he->ms.sym, he->ms.map, evidx, ip); |
ef7b93a1 ACM |
891 | } |
892 | ||
ce6f4fab | 893 | int hist_entry__annotate(struct hist_entry *he, size_t privsize) |
ef7b93a1 | 894 | { |
ce6f4fab | 895 | return symbol__annotate(he->ms.sym, he->ms.map, privsize); |
ef7b93a1 | 896 | } |
c8446b9b | 897 | |
28a6b6aa ACM |
898 | void events_stats__inc(struct events_stats *stats, u32 type) |
899 | { | |
900 | ++stats->nr_events[0]; | |
901 | ++stats->nr_events[type]; | |
902 | } | |
903 | ||
42b28ac0 | 904 | void hists__inc_nr_events(struct hists *hists, u32 type) |
c8446b9b | 905 | { |
28a6b6aa | 906 | events_stats__inc(&hists->stats, type); |
c8446b9b | 907 | } |
95529be4 | 908 | |
494d70a1 ACM |
909 | static struct hist_entry *hists__add_dummy_entry(struct hists *hists, |
910 | struct hist_entry *pair) | |
911 | { | |
ce74f60e NK |
912 | struct rb_root *root; |
913 | struct rb_node **p; | |
494d70a1 ACM |
914 | struct rb_node *parent = NULL; |
915 | struct hist_entry *he; | |
916 | int cmp; | |
917 | ||
ce74f60e NK |
918 | if (sort__need_collapse) |
919 | root = &hists->entries_collapsed; | |
920 | else | |
921 | root = hists->entries_in; | |
922 | ||
923 | p = &root->rb_node; | |
924 | ||
494d70a1 ACM |
925 | while (*p != NULL) { |
926 | parent = *p; | |
ce74f60e | 927 | he = rb_entry(parent, struct hist_entry, rb_node_in); |
494d70a1 | 928 | |
ce74f60e | 929 | cmp = hist_entry__collapse(he, pair); |
494d70a1 ACM |
930 | |
931 | if (!cmp) | |
932 | goto out; | |
933 | ||
934 | if (cmp < 0) | |
935 | p = &(*p)->rb_left; | |
936 | else | |
937 | p = &(*p)->rb_right; | |
938 | } | |
939 | ||
940 | he = hist_entry__new(pair); | |
941 | if (he) { | |
30193d78 ACM |
942 | memset(&he->stat, 0, sizeof(he->stat)); |
943 | he->hists = hists; | |
ce74f60e NK |
944 | rb_link_node(&he->rb_node_in, parent, p); |
945 | rb_insert_color(&he->rb_node_in, root); | |
494d70a1 ACM |
946 | hists__inc_nr_entries(hists, he); |
947 | } | |
948 | out: | |
949 | return he; | |
950 | } | |
951 | ||
95529be4 ACM |
952 | static struct hist_entry *hists__find_entry(struct hists *hists, |
953 | struct hist_entry *he) | |
954 | { | |
ce74f60e NK |
955 | struct rb_node *n; |
956 | ||
957 | if (sort__need_collapse) | |
958 | n = hists->entries_collapsed.rb_node; | |
959 | else | |
960 | n = hists->entries_in->rb_node; | |
95529be4 ACM |
961 | |
962 | while (n) { | |
ce74f60e NK |
963 | struct hist_entry *iter = rb_entry(n, struct hist_entry, rb_node_in); |
964 | int64_t cmp = hist_entry__collapse(iter, he); | |
95529be4 ACM |
965 | |
966 | if (cmp < 0) | |
967 | n = n->rb_left; | |
968 | else if (cmp > 0) | |
969 | n = n->rb_right; | |
970 | else | |
971 | return iter; | |
972 | } | |
973 | ||
974 | return NULL; | |
975 | } | |
976 | ||
977 | /* | |
978 | * Look for pairs to link to the leader buckets (hist_entries): | |
979 | */ | |
980 | void hists__match(struct hists *leader, struct hists *other) | |
981 | { | |
ce74f60e | 982 | struct rb_root *root; |
95529be4 ACM |
983 | struct rb_node *nd; |
984 | struct hist_entry *pos, *pair; | |
985 | ||
ce74f60e NK |
986 | if (sort__need_collapse) |
987 | root = &leader->entries_collapsed; | |
988 | else | |
989 | root = leader->entries_in; | |
990 | ||
991 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
992 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
95529be4 ACM |
993 | pair = hists__find_entry(other, pos); |
994 | ||
995 | if (pair) | |
5fa9041b | 996 | hist_entry__add_pair(pair, pos); |
95529be4 ACM |
997 | } |
998 | } | |
494d70a1 ACM |
999 | |
1000 | /* | |
1001 | * Look for entries in the other hists that are not present in the leader, if | |
1002 | * we find them, just add a dummy entry on the leader hists, with period=0, | |
1003 | * nr_events=0, to serve as the list header. | |
1004 | */ | |
1005 | int hists__link(struct hists *leader, struct hists *other) | |
1006 | { | |
ce74f60e | 1007 | struct rb_root *root; |
494d70a1 ACM |
1008 | struct rb_node *nd; |
1009 | struct hist_entry *pos, *pair; | |
1010 | ||
ce74f60e NK |
1011 | if (sort__need_collapse) |
1012 | root = &other->entries_collapsed; | |
1013 | else | |
1014 | root = other->entries_in; | |
1015 | ||
1016 | for (nd = rb_first(root); nd; nd = rb_next(nd)) { | |
1017 | pos = rb_entry(nd, struct hist_entry, rb_node_in); | |
494d70a1 ACM |
1018 | |
1019 | if (!hist_entry__has_pairs(pos)) { | |
1020 | pair = hists__add_dummy_entry(leader, pos); | |
1021 | if (pair == NULL) | |
1022 | return -1; | |
5fa9041b | 1023 | hist_entry__add_pair(pos, pair); |
494d70a1 ACM |
1024 | } |
1025 | } | |
1026 | ||
1027 | return 0; | |
1028 | } |