2 #include "util/debug.h"
3 #include "util/symbol.h"
5 #include "util/evsel.h"
6 #include "util/evlist.h"
7 #include "util/machine.h"
8 #include "util/thread.h"
9 #include "util/parse-events.h"
10 #include "tests/tests.h"
11 #include "tests/hists_common.h"
16 struct thread *thread;
21 /* For the numbers, see hists_common.c */
22 static struct sample fake_samples[] = {
23 /* perf [kernel] schedule() */
24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
25 /* perf [perf] main() */
26 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_MAIN, },
27 /* perf [perf] cmd_record() */
28 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_CMD_RECORD, },
29 /* perf [libc] malloc() */
30 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
31 /* perf [libc] free() */
32 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_FREE, },
33 /* perf [perf] main() */
34 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
35 /* perf [kernel] page_fault() */
36 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
37 /* bash [bash] main() */
38 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_MAIN, },
39 /* bash [bash] xmalloc() */
40 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
41 /* bash [kernel] page_fault() */
42 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
46 * Will be casted to struct ip_callchain which has all 64 bit entries
49 static u64 fake_callchains[][10] = {
50 /* schedule => run_command => main */
51 { 3, FAKE_IP_KERNEL_SCHEDULE, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
53 { 1, FAKE_IP_PERF_MAIN, },
54 /* cmd_record => run_command => main */
55 { 3, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
56 /* malloc => cmd_record => run_command => main */
57 { 4, FAKE_IP_LIBC_MALLOC, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
59 /* free => cmd_record => run_command => main */
60 { 4, FAKE_IP_LIBC_FREE, FAKE_IP_PERF_CMD_RECORD, FAKE_IP_PERF_RUN_COMMAND,
63 { 1, FAKE_IP_PERF_MAIN, },
64 /* page_fault => sys_perf_event_open => run_command => main */
65 { 4, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN,
66 FAKE_IP_PERF_RUN_COMMAND, FAKE_IP_PERF_MAIN, },
68 { 1, FAKE_IP_BASH_MAIN, },
69 /* xmalloc => malloc => xmalloc => malloc => xmalloc => main */
70 { 6, FAKE_IP_BASH_XMALLOC, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC,
71 FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_XMALLOC, FAKE_IP_BASH_MAIN, },
72 /* page_fault => malloc => main */
73 { 3, FAKE_IP_KERNEL_PAGE_FAULT, FAKE_IP_LIBC_MALLOC, FAKE_IP_BASH_MAIN, },
76 static int add_hist_entries(struct hists *hists, struct machine *machine)
78 struct addr_location al;
79 struct perf_evsel *evsel = hists_to_evsel(hists);
80 struct perf_sample sample = { .period = 1000, };
83 for (i = 0; i < ARRAY_SIZE(fake_samples); i++) {
84 const union perf_event event = {
86 .misc = PERF_RECORD_MISC_USER,
89 struct hist_entry_iter iter = {
90 .hide_unresolved = false,
93 if (symbol_conf.cumulate_callchain)
94 iter.ops = &hist_iter_cumulative;
96 iter.ops = &hist_iter_normal;
98 sample.pid = fake_samples[i].pid;
99 sample.tid = fake_samples[i].pid;
100 sample.ip = fake_samples[i].ip;
101 sample.callchain = (struct ip_callchain *)fake_callchains[i];
103 if (perf_event__preprocess_sample(&event, machine, &al,
107 if (hist_entry_iter__add(&iter, &al, evsel, &sample,
108 PERF_MAX_STACK_DEPTH, NULL) < 0) {
109 addr_location__put(&al);
113 fake_samples[i].thread = al.thread;
114 fake_samples[i].map = al.map;
115 fake_samples[i].sym = al.sym;
121 pr_debug("Not enough memory for adding a hist entry\n");
125 static void del_hist_entries(struct hists *hists)
127 struct hist_entry *he;
128 struct rb_root *root_in;
129 struct rb_root *root_out;
130 struct rb_node *node;
132 if (sort__need_collapse)
133 root_in = &hists->entries_collapsed;
135 root_in = hists->entries_in;
137 root_out = &hists->entries;
139 while (!RB_EMPTY_ROOT(root_out)) {
140 node = rb_first(root_out);
142 he = rb_entry(node, struct hist_entry, rb_node);
143 rb_erase(node, root_out);
144 rb_erase(&he->rb_node_in, root_in);
145 hist_entry__delete(he);
149 typedef int (*test_fn_t)(struct perf_evsel *, struct machine *);
151 #define COMM(he) (thread__comm_str(he->thread))
152 #define DSO(he) (he->ms.map->dso->short_name)
153 #define SYM(he) (he->ms.sym->name)
154 #define CPU(he) (he->cpu)
155 #define PID(he) (he->thread->tid)
156 #define DEPTH(he) (he->callchain->max_depth)
157 #define CDSO(cl) (cl->ms.map->dso->short_name)
158 #define CSYM(cl) (cl->ms.sym->name)
168 struct callchain_result {
176 static int do_test(struct hists *hists, struct result *expected, size_t nr_expected,
177 struct callchain_result *expected_callchain, size_t nr_callchain)
181 struct hist_entry *he;
182 struct rb_root *root;
183 struct rb_node *node;
184 struct callchain_node *cnode;
185 struct callchain_list *clist;
188 * adding and deleting hist entries must be done outside of this
189 * function since TEST_ASSERT_VAL() returns in case of failure.
191 hists__collapse_resort(hists, NULL);
192 hists__output_resort(hists, NULL);
195 pr_info("use callchain: %d, cumulate callchain: %d\n",
196 symbol_conf.use_callchain,
197 symbol_conf.cumulate_callchain);
198 print_hists_out(hists);
201 root = &hists->entries;
202 for (node = rb_first(root), i = 0;
203 node && (he = rb_entry(node, struct hist_entry, rb_node));
204 node = rb_next(node), i++) {
205 scnprintf(buf, sizeof(buf), "Invalid hist entry #%zd", i);
207 TEST_ASSERT_VAL("Incorrect number of hist entry",
209 TEST_ASSERT_VAL(buf, he->stat.period == expected[i].self &&
210 !strcmp(COMM(he), expected[i].comm) &&
211 !strcmp(DSO(he), expected[i].dso) &&
212 !strcmp(SYM(he), expected[i].sym));
214 if (symbol_conf.cumulate_callchain)
215 TEST_ASSERT_VAL(buf, he->stat_acc->period == expected[i].children);
217 if (!symbol_conf.use_callchain)
220 /* check callchain entries */
221 root = &he->callchain->node.rb_root;
222 cnode = rb_entry(rb_first(root), struct callchain_node, rb_node);
225 list_for_each_entry(clist, &cnode->val, list) {
226 scnprintf(buf, sizeof(buf), "Invalid callchain entry #%zd/%zd", i, c);
228 TEST_ASSERT_VAL("Incorrect number of callchain entry",
229 c < expected_callchain[i].nr);
231 !strcmp(CDSO(clist), expected_callchain[i].node[c].dso) &&
232 !strcmp(CSYM(clist), expected_callchain[i].node[c].sym));
235 /* TODO: handle multiple child nodes properly */
236 TEST_ASSERT_VAL("Incorrect number of callchain entry",
237 c <= expected_callchain[i].nr);
239 TEST_ASSERT_VAL("Incorrect number of hist entry",
241 TEST_ASSERT_VAL("Incorrect number of callchain entry",
242 !symbol_conf.use_callchain || nr_expected == nr_callchain);
246 /* NO callchain + NO children */
247 static int test1(struct perf_evsel *evsel, struct machine *machine)
250 struct hists *hists = evsel__hists(evsel);
254 * Overhead Command Shared Object Symbol
255 * ======== ======= ============= ==============
256 * 20.00% perf perf [.] main
257 * 10.00% bash [kernel] [k] page_fault
258 * 10.00% bash bash [.] main
259 * 10.00% bash bash [.] xmalloc
260 * 10.00% perf [kernel] [k] page_fault
261 * 10.00% perf [kernel] [k] schedule
262 * 10.00% perf libc [.] free
263 * 10.00% perf libc [.] malloc
264 * 10.00% perf perf [.] cmd_record
266 struct result expected[] = {
267 { 0, 2000, "perf", "perf", "main" },
268 { 0, 1000, "bash", "[kernel]", "page_fault" },
269 { 0, 1000, "bash", "bash", "main" },
270 { 0, 1000, "bash", "bash", "xmalloc" },
271 { 0, 1000, "perf", "[kernel]", "page_fault" },
272 { 0, 1000, "perf", "[kernel]", "schedule" },
273 { 0, 1000, "perf", "libc", "free" },
274 { 0, 1000, "perf", "libc", "malloc" },
275 { 0, 1000, "perf", "perf", "cmd_record" },
278 symbol_conf.use_callchain = false;
279 symbol_conf.cumulate_callchain = false;
282 callchain_register_param(&callchain_param);
284 err = add_hist_entries(hists, machine);
288 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
291 del_hist_entries(hists);
292 reset_output_field();
296 /* callcain + NO children */
297 static int test2(struct perf_evsel *evsel, struct machine *machine)
300 struct hists *hists = evsel__hists(evsel);
304 * Overhead Command Shared Object Symbol
305 * ======== ======= ============= ==============
306 * 20.00% perf perf [.] main
310 * 10.00% bash [kernel] [k] page_fault
316 * 10.00% bash bash [.] main
320 * 10.00% bash bash [.] xmalloc
324 * xmalloc <--- NOTE: there's a cycle
329 * 10.00% perf [kernel] [k] page_fault
332 * sys_perf_event_open
336 * 10.00% perf [kernel] [k] schedule
342 * 10.00% perf libc [.] free
349 * 10.00% perf libc [.] malloc
356 * 10.00% perf perf [.] cmd_record
363 struct result expected[] = {
364 { 0, 2000, "perf", "perf", "main" },
365 { 0, 1000, "bash", "[kernel]", "page_fault" },
366 { 0, 1000, "bash", "bash", "main" },
367 { 0, 1000, "bash", "bash", "xmalloc" },
368 { 0, 1000, "perf", "[kernel]", "page_fault" },
369 { 0, 1000, "perf", "[kernel]", "schedule" },
370 { 0, 1000, "perf", "libc", "free" },
371 { 0, 1000, "perf", "libc", "malloc" },
372 { 0, 1000, "perf", "perf", "cmd_record" },
374 struct callchain_result expected_callchain[] = {
376 1, { { "perf", "main" }, },
379 3, { { "[kernel]", "page_fault" },
380 { "libc", "malloc" },
381 { "bash", "main" }, },
384 1, { { "bash", "main" }, },
387 6, { { "bash", "xmalloc" },
388 { "libc", "malloc" },
389 { "bash", "xmalloc" },
390 { "libc", "malloc" },
391 { "bash", "xmalloc" },
392 { "bash", "main" }, },
395 4, { { "[kernel]", "page_fault" },
396 { "[kernel]", "sys_perf_event_open" },
397 { "perf", "run_command" },
398 { "perf", "main" }, },
401 3, { { "[kernel]", "schedule" },
402 { "perf", "run_command" },
403 { "perf", "main" }, },
406 4, { { "libc", "free" },
407 { "perf", "cmd_record" },
408 { "perf", "run_command" },
409 { "perf", "main" }, },
412 4, { { "libc", "malloc" },
413 { "perf", "cmd_record" },
414 { "perf", "run_command" },
415 { "perf", "main" }, },
418 3, { { "perf", "cmd_record" },
419 { "perf", "run_command" },
420 { "perf", "main" }, },
424 symbol_conf.use_callchain = true;
425 symbol_conf.cumulate_callchain = false;
428 callchain_register_param(&callchain_param);
430 err = add_hist_entries(hists, machine);
434 err = do_test(hists, expected, ARRAY_SIZE(expected),
435 expected_callchain, ARRAY_SIZE(expected_callchain));
438 del_hist_entries(hists);
439 reset_output_field();
443 /* NO callchain + children */
444 static int test3(struct perf_evsel *evsel, struct machine *machine)
447 struct hists *hists = evsel__hists(evsel);
451 * Children Self Command Shared Object Symbol
452 * ======== ======== ======= ============= =======================
453 * 70.00% 20.00% perf perf [.] main
454 * 50.00% 0.00% perf perf [.] run_command
455 * 30.00% 10.00% bash bash [.] main
456 * 30.00% 10.00% perf perf [.] cmd_record
457 * 20.00% 0.00% bash libc [.] malloc
458 * 10.00% 10.00% bash [kernel] [k] page_fault
459 * 10.00% 10.00% bash bash [.] xmalloc
460 * 10.00% 10.00% perf [kernel] [k] page_fault
461 * 10.00% 10.00% perf libc [.] malloc
462 * 10.00% 10.00% perf [kernel] [k] schedule
463 * 10.00% 10.00% perf libc [.] free
464 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
466 struct result expected[] = {
467 { 7000, 2000, "perf", "perf", "main" },
468 { 5000, 0, "perf", "perf", "run_command" },
469 { 3000, 1000, "bash", "bash", "main" },
470 { 3000, 1000, "perf", "perf", "cmd_record" },
471 { 2000, 0, "bash", "libc", "malloc" },
472 { 1000, 1000, "bash", "[kernel]", "page_fault" },
473 { 1000, 1000, "bash", "bash", "xmalloc" },
474 { 1000, 1000, "perf", "[kernel]", "page_fault" },
475 { 1000, 1000, "perf", "[kernel]", "schedule" },
476 { 1000, 1000, "perf", "libc", "free" },
477 { 1000, 1000, "perf", "libc", "malloc" },
478 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
481 symbol_conf.use_callchain = false;
482 symbol_conf.cumulate_callchain = true;
485 callchain_register_param(&callchain_param);
487 err = add_hist_entries(hists, machine);
491 err = do_test(hists, expected, ARRAY_SIZE(expected), NULL, 0);
494 del_hist_entries(hists);
495 reset_output_field();
499 /* callchain + children */
500 static int test4(struct perf_evsel *evsel, struct machine *machine)
503 struct hists *hists = evsel__hists(evsel);
507 * Children Self Command Shared Object Symbol
508 * ======== ======== ======= ============= =======================
509 * 70.00% 20.00% perf perf [.] main
513 * 50.00% 0.00% perf perf [.] run_command
518 * 30.00% 10.00% bash bash [.] main
522 * 30.00% 10.00% perf perf [.] cmd_record
528 * 20.00% 0.00% bash libc [.] malloc
532 * |--50.00%-- xmalloc
536 * 10.00% 10.00% bash [kernel] [k] page_fault
542 * 10.00% 10.00% bash bash [.] xmalloc
546 * xmalloc <--- NOTE: there's a cycle
551 * 10.00% 0.00% perf [kernel] [k] sys_perf_event_open
553 * --- sys_perf_event_open
557 * 10.00% 10.00% perf [kernel] [k] page_fault
560 * sys_perf_event_open
564 * 10.00% 10.00% perf [kernel] [k] schedule
570 * 10.00% 10.00% perf libc [.] free
577 * 10.00% 10.00% perf libc [.] malloc
585 struct result expected[] = {
586 { 7000, 2000, "perf", "perf", "main" },
587 { 5000, 0, "perf", "perf", "run_command" },
588 { 3000, 1000, "bash", "bash", "main" },
589 { 3000, 1000, "perf", "perf", "cmd_record" },
590 { 2000, 0, "bash", "libc", "malloc" },
591 { 1000, 1000, "bash", "[kernel]", "page_fault" },
592 { 1000, 1000, "bash", "bash", "xmalloc" },
593 { 1000, 0, "perf", "[kernel]", "sys_perf_event_open" },
594 { 1000, 1000, "perf", "[kernel]", "page_fault" },
595 { 1000, 1000, "perf", "[kernel]", "schedule" },
596 { 1000, 1000, "perf", "libc", "free" },
597 { 1000, 1000, "perf", "libc", "malloc" },
599 struct callchain_result expected_callchain[] = {
601 1, { { "perf", "main" }, },
604 2, { { "perf", "run_command" },
605 { "perf", "main" }, },
608 1, { { "bash", "main" }, },
611 3, { { "perf", "cmd_record" },
612 { "perf", "run_command" },
613 { "perf", "main" }, },
616 4, { { "libc", "malloc" },
617 { "bash", "xmalloc" },
619 { "bash", "main" }, },
622 3, { { "[kernel]", "page_fault" },
623 { "libc", "malloc" },
624 { "bash", "main" }, },
627 6, { { "bash", "xmalloc" },
628 { "libc", "malloc" },
629 { "bash", "xmalloc" },
630 { "libc", "malloc" },
631 { "bash", "xmalloc" },
632 { "bash", "main" }, },
635 3, { { "[kernel]", "sys_perf_event_open" },
636 { "perf", "run_command" },
637 { "perf", "main" }, },
640 4, { { "[kernel]", "page_fault" },
641 { "[kernel]", "sys_perf_event_open" },
642 { "perf", "run_command" },
643 { "perf", "main" }, },
646 3, { { "[kernel]", "schedule" },
647 { "perf", "run_command" },
648 { "perf", "main" }, },
651 4, { { "libc", "free" },
652 { "perf", "cmd_record" },
653 { "perf", "run_command" },
654 { "perf", "main" }, },
657 4, { { "libc", "malloc" },
658 { "perf", "cmd_record" },
659 { "perf", "run_command" },
660 { "perf", "main" }, },
664 symbol_conf.use_callchain = true;
665 symbol_conf.cumulate_callchain = true;
668 callchain_register_param(&callchain_param);
670 err = add_hist_entries(hists, machine);
674 err = do_test(hists, expected, ARRAY_SIZE(expected),
675 expected_callchain, ARRAY_SIZE(expected_callchain));
678 del_hist_entries(hists);
679 reset_output_field();
683 int test__hists_cumulate(void)
686 struct machines machines;
687 struct machine *machine;
688 struct perf_evsel *evsel;
689 struct perf_evlist *evlist = perf_evlist__new();
691 test_fn_t testcases[] = {
698 TEST_ASSERT_VAL("No memory", evlist);
700 err = parse_events(evlist, "cpu-clock", NULL);
704 machines__init(&machines);
706 /* setup threads/dso/map/symbols also */
707 machine = setup_fake_machine(&machines);
712 machine__fprintf(machine, stderr);
714 evsel = perf_evlist__first(evlist);
716 for (i = 0; i < ARRAY_SIZE(testcases); i++) {
717 err = testcases[i](evsel, machine);
723 /* tear down everything */
724 perf_evlist__delete(evlist);
725 machines__exit(&machines);