Merge tag 'perf-core-for-mingo' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / tools / perf / tests / hists_link.c
CommitLineData
f8ebb0cd
NK
1#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
11
12static struct {
13 u32 pid;
14 const char *comm;
15} fake_threads[] = {
16 { 100, "perf" },
17 { 200, "perf" },
18 { 300, "bash" },
19};
20
21static struct {
22 u32 pid;
23 u64 start;
24 const char *filename;
25} fake_mmap_info[] = {
26 { 100, 0x40000, "perf" },
27 { 100, 0x50000, "libc" },
28 { 100, 0xf0000, "[kernel]" },
29 { 200, 0x40000, "perf" },
30 { 200, 0x50000, "libc" },
31 { 200, 0xf0000, "[kernel]" },
32 { 300, 0x40000, "bash" },
33 { 300, 0x50000, "libc" },
34 { 300, 0xf0000, "[kernel]" },
35};
36
37struct fake_sym {
38 u64 start;
39 u64 length;
40 const char *name;
41};
42
43static struct fake_sym perf_syms[] = {
44 { 700, 100, "main" },
45 { 800, 100, "run_command" },
46 { 900, 100, "cmd_record" },
47};
48
49static struct fake_sym bash_syms[] = {
50 { 700, 100, "main" },
51 { 800, 100, "xmalloc" },
52 { 900, 100, "xfree" },
53};
54
55static struct fake_sym libc_syms[] = {
56 { 700, 100, "malloc" },
57 { 800, 100, "free" },
58 { 900, 100, "realloc" },
59};
60
61static struct fake_sym kernel_syms[] = {
62 { 700, 100, "schedule" },
63 { 800, 100, "page_fault" },
64 { 900, 100, "sys_perf_event_open" },
65};
66
67static struct {
68 const char *dso_name;
69 struct fake_sym *syms;
70 size_t nr_syms;
71} fake_symbols[] = {
72 { "perf", perf_syms, ARRAY_SIZE(perf_syms) },
73 { "bash", bash_syms, ARRAY_SIZE(bash_syms) },
74 { "libc", libc_syms, ARRAY_SIZE(libc_syms) },
75 { "[kernel]", kernel_syms, ARRAY_SIZE(kernel_syms) },
76};
77
876650e6 78static struct machine *setup_fake_machine(struct machines *machines)
f8ebb0cd 79{
876650e6 80 struct machine *machine = machines__find(machines, HOST_KERNEL_ID);
f8ebb0cd
NK
81 size_t i;
82
f8ebb0cd
NK
83 if (machine == NULL) {
84 pr_debug("Not enough memory for machine setup\n");
85 return NULL;
86 }
87
88 for (i = 0; i < ARRAY_SIZE(fake_threads); i++) {
89 struct thread *thread;
90
314add6b
AH
91 thread = machine__findnew_thread(machine, fake_threads[i].pid,
92 fake_threads[i].pid);
f8ebb0cd
NK
93 if (thread == NULL)
94 goto out;
95
162f0bef 96 thread__set_comm(thread, fake_threads[i].comm, 0);
f8ebb0cd
NK
97 }
98
99 for (i = 0; i < ARRAY_SIZE(fake_mmap_info); i++) {
100 union perf_event fake_mmap_event = {
101 .mmap = {
102 .header = { .misc = PERF_RECORD_MISC_USER, },
103 .pid = fake_mmap_info[i].pid,
11c9abf2 104 .tid = fake_mmap_info[i].pid,
f8ebb0cd
NK
105 .start = fake_mmap_info[i].start,
106 .len = 0x1000ULL,
107 .pgoff = 0ULL,
108 },
109 };
110
111 strcpy(fake_mmap_event.mmap.filename,
112 fake_mmap_info[i].filename);
113
162f0bef 114 machine__process_mmap_event(machine, &fake_mmap_event, NULL);
f8ebb0cd
NK
115 }
116
117 for (i = 0; i < ARRAY_SIZE(fake_symbols); i++) {
118 size_t k;
119 struct dso *dso;
120
121 dso = __dsos__findnew(&machine->user_dsos,
122 fake_symbols[i].dso_name);
123 if (dso == NULL)
124 goto out;
125
126 /* emulate dso__load() */
127 dso__set_loaded(dso, MAP__FUNCTION);
128
129 for (k = 0; k < fake_symbols[i].nr_syms; k++) {
130 struct symbol *sym;
131 struct fake_sym *fsym = &fake_symbols[i].syms[k];
132
133 sym = symbol__new(fsym->start, fsym->length,
134 STB_GLOBAL, fsym->name);
135 if (sym == NULL)
136 goto out;
137
138 symbols__insert(&dso->symbols[MAP__FUNCTION], sym);
139 }
140 }
141
142 return machine;
143
144out:
145 pr_debug("Not enough memory for machine setup\n");
146 machine__delete_threads(machine);
147 machine__delete(machine);
148 return NULL;
149}
150
151struct sample {
152 u32 pid;
153 u64 ip;
154 struct thread *thread;
155 struct map *map;
156 struct symbol *sym;
157};
158
159static struct sample fake_common_samples[] = {
160 /* perf [kernel] schedule() */
161 { .pid = 100, .ip = 0xf0000 + 700, },
162 /* perf [perf] main() */
163 { .pid = 200, .ip = 0x40000 + 700, },
164 /* perf [perf] cmd_record() */
165 { .pid = 200, .ip = 0x40000 + 900, },
166 /* bash [bash] xmalloc() */
167 { .pid = 300, .ip = 0x40000 + 800, },
168 /* bash [libc] malloc() */
169 { .pid = 300, .ip = 0x50000 + 700, },
170};
171
172static struct sample fake_samples[][5] = {
173 {
174 /* perf [perf] run_command() */
175 { .pid = 100, .ip = 0x40000 + 800, },
176 /* perf [libc] malloc() */
177 { .pid = 100, .ip = 0x50000 + 700, },
178 /* perf [kernel] page_fault() */
179 { .pid = 100, .ip = 0xf0000 + 800, },
180 /* perf [kernel] sys_perf_event_open() */
181 { .pid = 200, .ip = 0xf0000 + 900, },
182 /* bash [libc] free() */
183 { .pid = 300, .ip = 0x50000 + 800, },
184 },
185 {
186 /* perf [libc] free() */
187 { .pid = 200, .ip = 0x50000 + 800, },
188 /* bash [libc] malloc() */
189 { .pid = 300, .ip = 0x50000 + 700, }, /* will be merged */
190 /* bash [bash] xfee() */
191 { .pid = 300, .ip = 0x40000 + 900, },
192 /* bash [libc] realloc() */
193 { .pid = 300, .ip = 0x50000 + 900, },
194 /* bash [kernel] page_fault() */
195 { .pid = 300, .ip = 0xf0000 + 800, },
196 },
197};
198
199static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
200{
201 struct perf_evsel *evsel;
202 struct addr_location al;
203 struct hist_entry *he;
204 struct perf_sample sample = { .cpu = 0, };
205 size_t i = 0, k;
206
207 /*
208 * each evsel will have 10 samples - 5 common and 5 distinct.
209 * However the second evsel also has a collapsed entry for
210 * "bash [libc] malloc" so total 9 entries will be in the tree.
211 */
0050f7aa 212 evlist__for_each(evlist, evsel) {
f8ebb0cd
NK
213 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
214 const union perf_event event = {
ef89325f
AH
215 .header = {
216 .misc = PERF_RECORD_MISC_USER,
f8ebb0cd
NK
217 },
218 };
219
ef89325f
AH
220 sample.pid = fake_common_samples[k].pid;
221 sample.ip = fake_common_samples[k].ip;
f8ebb0cd 222 if (perf_event__preprocess_sample(&event, machine, &al,
e44baa3e 223 &sample) < 0)
f8ebb0cd
NK
224 goto out;
225
475eeab9 226 he = __hists__add_entry(&evsel->hists, &al, NULL,
41a4e6e2 227 NULL, NULL, 1, 1, 0);
f8ebb0cd
NK
228 if (he == NULL)
229 goto out;
230
231 fake_common_samples[k].thread = al.thread;
232 fake_common_samples[k].map = al.map;
233 fake_common_samples[k].sym = al.sym;
234 }
235
236 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
237 const union perf_event event = {
ef89325f
AH
238 .header = {
239 .misc = PERF_RECORD_MISC_USER,
f8ebb0cd
NK
240 },
241 };
242
ef89325f
AH
243 sample.pid = fake_samples[i][k].pid;
244 sample.ip = fake_samples[i][k].ip;
f8ebb0cd 245 if (perf_event__preprocess_sample(&event, machine, &al,
e44baa3e 246 &sample) < 0)
f8ebb0cd
NK
247 goto out;
248
41a4e6e2
NK
249 he = __hists__add_entry(&evsel->hists, &al, NULL,
250 NULL, NULL, 1, 1, 0);
f8ebb0cd
NK
251 if (he == NULL)
252 goto out;
253
254 fake_samples[i][k].thread = al.thread;
255 fake_samples[i][k].map = al.map;
256 fake_samples[i][k].sym = al.sym;
257 }
258 i++;
259 }
260
261 return 0;
262
263out:
264 pr_debug("Not enough memory for adding a hist entry\n");
265 return -1;
266}
267
268static int find_sample(struct sample *samples, size_t nr_samples,
269 struct thread *t, struct map *m, struct symbol *s)
270{
271 while (nr_samples--) {
272 if (samples->thread == t && samples->map == m &&
273 samples->sym == s)
274 return 1;
275 samples++;
276 }
277 return 0;
278}
279
280static int __validate_match(struct hists *hists)
281{
282 size_t count = 0;
283 struct rb_root *root;
284 struct rb_node *node;
285
286 /*
287 * Only entries from fake_common_samples should have a pair.
288 */
289 if (sort__need_collapse)
290 root = &hists->entries_collapsed;
291 else
292 root = hists->entries_in;
293
294 node = rb_first(root);
295 while (node) {
296 struct hist_entry *he;
297
298 he = rb_entry(node, struct hist_entry, rb_node_in);
299
300 if (hist_entry__has_pairs(he)) {
301 if (find_sample(fake_common_samples,
302 ARRAY_SIZE(fake_common_samples),
303 he->thread, he->ms.map, he->ms.sym)) {
304 count++;
305 } else {
306 pr_debug("Can't find the matched entry\n");
307 return -1;
308 }
309 }
310
311 node = rb_next(node);
312 }
313
314 if (count != ARRAY_SIZE(fake_common_samples)) {
315 pr_debug("Invalid count for matched entries: %zd of %zd\n",
316 count, ARRAY_SIZE(fake_common_samples));
317 return -1;
318 }
319
320 return 0;
321}
322
323static int validate_match(struct hists *leader, struct hists *other)
324{
325 return __validate_match(leader) || __validate_match(other);
326}
327
328static int __validate_link(struct hists *hists, int idx)
329{
330 size_t count = 0;
331 size_t count_pair = 0;
332 size_t count_dummy = 0;
333 struct rb_root *root;
334 struct rb_node *node;
335
336 /*
337 * Leader hists (idx = 0) will have dummy entries from other,
338 * and some entries will have no pair. However every entry
339 * in other hists should have (dummy) pair.
340 */
341 if (sort__need_collapse)
342 root = &hists->entries_collapsed;
343 else
344 root = hists->entries_in;
345
346 node = rb_first(root);
347 while (node) {
348 struct hist_entry *he;
349
350 he = rb_entry(node, struct hist_entry, rb_node_in);
351
352 if (hist_entry__has_pairs(he)) {
353 if (!find_sample(fake_common_samples,
354 ARRAY_SIZE(fake_common_samples),
355 he->thread, he->ms.map, he->ms.sym) &&
356 !find_sample(fake_samples[idx],
357 ARRAY_SIZE(fake_samples[idx]),
358 he->thread, he->ms.map, he->ms.sym)) {
359 count_dummy++;
360 }
361 count_pair++;
362 } else if (idx) {
363 pr_debug("A entry from the other hists should have pair\n");
364 return -1;
365 }
366
367 count++;
368 node = rb_next(node);
369 }
370
371 /*
372 * Note that we have a entry collapsed in the other (idx = 1) hists.
373 */
374 if (idx == 0) {
375 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
376 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
377 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
378 return -1;
379 }
380 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
381 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
382 count, count_pair + ARRAY_SIZE(fake_samples[0]));
383 return -1;
384 }
385 } else {
386 if (count != count_pair) {
387 pr_debug("Invalid count of total other entries: %zd of %zd\n",
388 count, count_pair);
389 return -1;
390 }
391 if (count_dummy > 0) {
392 pr_debug("Other hists should not have dummy entries: %zd\n",
393 count_dummy);
394 return -1;
395 }
396 }
397
398 return 0;
399}
400
401static int validate_link(struct hists *leader, struct hists *other)
402{
403 return __validate_link(leader, 0) || __validate_link(other, 1);
404}
405
406static void print_hists(struct hists *hists)
407{
408 int i = 0;
409 struct rb_root *root;
410 struct rb_node *node;
411
412 if (sort__need_collapse)
413 root = &hists->entries_collapsed;
414 else
415 root = hists->entries_in;
416
417 pr_info("----- %s --------\n", __func__);
418 node = rb_first(root);
419 while (node) {
420 struct hist_entry *he;
421
422 he = rb_entry(node, struct hist_entry, rb_node_in);
423
424 pr_info("%2d: entry: %-8s [%-8s] %20s: period = %"PRIu64"\n",
b9c5143a 425 i, thread__comm_str(he->thread), he->ms.map->dso->short_name,
f8ebb0cd
NK
426 he->ms.sym->name, he->stat.period);
427
428 i++;
429 node = rb_next(node);
430 }
431}
432
433int test__hists_link(void)
434{
435 int err = -1;
876650e6 436 struct machines machines;
f8ebb0cd
NK
437 struct machine *machine = NULL;
438 struct perf_evsel *evsel, *first;
334fe7a3 439 struct perf_evlist *evlist = perf_evlist__new();
f8ebb0cd
NK
440
441 if (evlist == NULL)
442 return -ENOMEM;
443
d8f7bbc9 444 err = parse_events(evlist, "cpu-clock");
f8ebb0cd
NK
445 if (err)
446 goto out;
d8f7bbc9 447 err = parse_events(evlist, "task-clock");
f8ebb0cd
NK
448 if (err)
449 goto out;
450
451 /* default sort order (comm,dso,sym) will be used */
55309985
NK
452 if (setup_sorting() < 0)
453 goto out;
f8ebb0cd 454
876650e6
ACM
455 machines__init(&machines);
456
f8ebb0cd 457 /* setup threads/dso/map/symbols also */
876650e6 458 machine = setup_fake_machine(&machines);
f8ebb0cd
NK
459 if (!machine)
460 goto out;
461
462 if (verbose > 1)
463 machine__fprintf(machine, stderr);
464
465 /* process sample events */
466 err = add_hist_entries(evlist, machine);
467 if (err < 0)
468 goto out;
469
0050f7aa 470 evlist__for_each(evlist, evsel) {
c1fb5651 471 hists__collapse_resort(&evsel->hists, NULL);
f8ebb0cd
NK
472
473 if (verbose > 2)
474 print_hists(&evsel->hists);
475 }
476
477 first = perf_evlist__first(evlist);
478 evsel = perf_evlist__last(evlist);
479
480 /* match common entries */
481 hists__match(&first->hists, &evsel->hists);
482 err = validate_match(&first->hists, &evsel->hists);
483 if (err)
484 goto out;
485
486 /* link common and/or dummy entries */
487 hists__link(&first->hists, &evsel->hists);
488 err = validate_link(&first->hists, &evsel->hists);
489 if (err)
490 goto out;
491
492 err = 0;
493
494out:
495 /* tear down everything */
496 perf_evlist__delete(evlist);
876650e6 497 machines__exit(&machines);
f8ebb0cd
NK
498
499 return err;
500}