Merge branch 'for-next' of git://git.kernel.org/pub/scm/linux/kernel/git/j.anaszewski...
[linux-2.6-block.git] / tools / perf / tests / hists_link.c
CommitLineData
f8ebb0cd
NK
1#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
6e344a95 11#include "hists_common.h"
f8ebb0cd
NK
12
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
6e344a95 21/* For the numbers, see hists_common.c */
f8ebb0cd
NK
22static struct sample fake_common_samples[] = {
23 /* perf [kernel] schedule() */
a1891aa4 24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
f8ebb0cd 25 /* perf [perf] main() */
a1891aa4 26 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
f8ebb0cd 27 /* perf [perf] cmd_record() */
a1891aa4 28 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
f8ebb0cd 29 /* bash [bash] xmalloc() */
a1891aa4 30 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
f8ebb0cd 31 /* bash [libc] malloc() */
a1891aa4 32 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
f8ebb0cd
NK
33};
34
35static struct sample fake_samples[][5] = {
36 {
37 /* perf [perf] run_command() */
a1891aa4 38 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
f8ebb0cd 39 /* perf [libc] malloc() */
a1891aa4 40 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
f8ebb0cd 41 /* perf [kernel] page_fault() */
a1891aa4 42 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
f8ebb0cd 43 /* perf [kernel] sys_perf_event_open() */
a1891aa4 44 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
f8ebb0cd 45 /* bash [libc] free() */
a1891aa4 46 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
f8ebb0cd
NK
47 },
48 {
49 /* perf [libc] free() */
a1891aa4 50 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
f8ebb0cd 51 /* bash [libc] malloc() */
a1891aa4 52 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
f8ebb0cd 53 /* bash [bash] xfee() */
a1891aa4 54 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
f8ebb0cd 55 /* bash [libc] realloc() */
a1891aa4 56 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
f8ebb0cd 57 /* bash [kernel] page_fault() */
a1891aa4 58 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
f8ebb0cd
NK
59 },
60};
61
62static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63{
64 struct perf_evsel *evsel;
65 struct addr_location al;
66 struct hist_entry *he;
fd36f3dd 67 struct perf_sample sample = { .period = 1, .weight = 1, };
f8ebb0cd
NK
68 size_t i = 0, k;
69
70 /*
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 */
0050f7aa 75 evlist__for_each(evlist, evsel) {
4ea062ed
ACM
76 struct hists *hists = evsel__hists(evsel);
77
f8ebb0cd 78 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
473398a2 79 sample.cpumode = PERF_RECORD_MISC_USER;
ef89325f 80 sample.pid = fake_common_samples[k].pid;
13ce34df 81 sample.tid = fake_common_samples[k].pid;
ef89325f 82 sample.ip = fake_common_samples[k].ip;
bb3eb566
ACM
83
84 if (machine__resolve(machine, &al, &sample) < 0)
f8ebb0cd
NK
85 goto out;
86
4ea062ed 87 he = __hists__add_entry(hists, &al, NULL,
fd36f3dd 88 NULL, NULL, &sample, true);
b91fc39f
ACM
89 if (he == NULL) {
90 addr_location__put(&al);
f8ebb0cd 91 goto out;
b91fc39f 92 }
f8ebb0cd
NK
93
94 fake_common_samples[k].thread = al.thread;
95 fake_common_samples[k].map = al.map;
96 fake_common_samples[k].sym = al.sym;
97 }
98
99 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
ef89325f 100 sample.pid = fake_samples[i][k].pid;
13ce34df 101 sample.tid = fake_samples[i][k].pid;
ef89325f 102 sample.ip = fake_samples[i][k].ip;
bb3eb566 103 if (machine__resolve(machine, &al, &sample) < 0)
f8ebb0cd
NK
104 goto out;
105
4ea062ed 106 he = __hists__add_entry(hists, &al, NULL,
fd36f3dd 107 NULL, NULL, &sample, true);
b91fc39f
ACM
108 if (he == NULL) {
109 addr_location__put(&al);
f8ebb0cd 110 goto out;
b91fc39f 111 }
f8ebb0cd
NK
112
113 fake_samples[i][k].thread = al.thread;
114 fake_samples[i][k].map = al.map;
115 fake_samples[i][k].sym = al.sym;
116 }
117 i++;
118 }
119
120 return 0;
121
122out:
123 pr_debug("Not enough memory for adding a hist entry\n");
124 return -1;
125}
126
127static int find_sample(struct sample *samples, size_t nr_samples,
128 struct thread *t, struct map *m, struct symbol *s)
129{
130 while (nr_samples--) {
131 if (samples->thread == t && samples->map == m &&
132 samples->sym == s)
133 return 1;
134 samples++;
135 }
136 return 0;
137}
138
139static int __validate_match(struct hists *hists)
140{
141 size_t count = 0;
142 struct rb_root *root;
143 struct rb_node *node;
144
145 /*
146 * Only entries from fake_common_samples should have a pair.
147 */
52225036 148 if (hists__has(hists, need_collapse))
f8ebb0cd
NK
149 root = &hists->entries_collapsed;
150 else
151 root = hists->entries_in;
152
153 node = rb_first(root);
154 while (node) {
155 struct hist_entry *he;
156
157 he = rb_entry(node, struct hist_entry, rb_node_in);
158
159 if (hist_entry__has_pairs(he)) {
160 if (find_sample(fake_common_samples,
161 ARRAY_SIZE(fake_common_samples),
162 he->thread, he->ms.map, he->ms.sym)) {
163 count++;
164 } else {
165 pr_debug("Can't find the matched entry\n");
166 return -1;
167 }
168 }
169
170 node = rb_next(node);
171 }
172
173 if (count != ARRAY_SIZE(fake_common_samples)) {
174 pr_debug("Invalid count for matched entries: %zd of %zd\n",
175 count, ARRAY_SIZE(fake_common_samples));
176 return -1;
177 }
178
179 return 0;
180}
181
182static int validate_match(struct hists *leader, struct hists *other)
183{
184 return __validate_match(leader) || __validate_match(other);
185}
186
187static int __validate_link(struct hists *hists, int idx)
188{
189 size_t count = 0;
190 size_t count_pair = 0;
191 size_t count_dummy = 0;
192 struct rb_root *root;
193 struct rb_node *node;
194
195 /*
196 * Leader hists (idx = 0) will have dummy entries from other,
197 * and some entries will have no pair. However every entry
198 * in other hists should have (dummy) pair.
199 */
52225036 200 if (hists__has(hists, need_collapse))
f8ebb0cd
NK
201 root = &hists->entries_collapsed;
202 else
203 root = hists->entries_in;
204
205 node = rb_first(root);
206 while (node) {
207 struct hist_entry *he;
208
209 he = rb_entry(node, struct hist_entry, rb_node_in);
210
211 if (hist_entry__has_pairs(he)) {
212 if (!find_sample(fake_common_samples,
213 ARRAY_SIZE(fake_common_samples),
214 he->thread, he->ms.map, he->ms.sym) &&
215 !find_sample(fake_samples[idx],
216 ARRAY_SIZE(fake_samples[idx]),
217 he->thread, he->ms.map, he->ms.sym)) {
218 count_dummy++;
219 }
220 count_pair++;
221 } else if (idx) {
222 pr_debug("A entry from the other hists should have pair\n");
223 return -1;
224 }
225
226 count++;
227 node = rb_next(node);
228 }
229
230 /*
231 * Note that we have a entry collapsed in the other (idx = 1) hists.
232 */
233 if (idx == 0) {
234 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
235 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
236 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
237 return -1;
238 }
239 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
240 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
241 count, count_pair + ARRAY_SIZE(fake_samples[0]));
242 return -1;
243 }
244 } else {
245 if (count != count_pair) {
246 pr_debug("Invalid count of total other entries: %zd of %zd\n",
247 count, count_pair);
248 return -1;
249 }
250 if (count_dummy > 0) {
251 pr_debug("Other hists should not have dummy entries: %zd\n",
252 count_dummy);
253 return -1;
254 }
255 }
256
257 return 0;
258}
259
260static int validate_link(struct hists *leader, struct hists *other)
261{
262 return __validate_link(leader, 0) || __validate_link(other, 1);
263}
264
721a1f53 265int test__hists_link(int subtest __maybe_unused)
f8ebb0cd
NK
266{
267 int err = -1;
4ea062ed 268 struct hists *hists, *first_hists;
876650e6 269 struct machines machines;
f8ebb0cd
NK
270 struct machine *machine = NULL;
271 struct perf_evsel *evsel, *first;
334fe7a3 272 struct perf_evlist *evlist = perf_evlist__new();
f8ebb0cd
NK
273
274 if (evlist == NULL)
275 return -ENOMEM;
276
b39b8393 277 err = parse_events(evlist, "cpu-clock", NULL);
f8ebb0cd
NK
278 if (err)
279 goto out;
b39b8393 280 err = parse_events(evlist, "task-clock", NULL);
f8ebb0cd
NK
281 if (err)
282 goto out;
283
b0500c16 284 err = TEST_FAIL;
f8ebb0cd 285 /* default sort order (comm,dso,sym) will be used */
40184c46 286 if (setup_sorting(NULL) < 0)
55309985 287 goto out;
f8ebb0cd 288
876650e6
ACM
289 machines__init(&machines);
290
f8ebb0cd 291 /* setup threads/dso/map/symbols also */
876650e6 292 machine = setup_fake_machine(&machines);
f8ebb0cd
NK
293 if (!machine)
294 goto out;
295
296 if (verbose > 1)
297 machine__fprintf(machine, stderr);
298
299 /* process sample events */
300 err = add_hist_entries(evlist, machine);
301 if (err < 0)
302 goto out;
303
0050f7aa 304 evlist__for_each(evlist, evsel) {
4ea062ed
ACM
305 hists = evsel__hists(evsel);
306 hists__collapse_resort(hists, NULL);
f8ebb0cd
NK
307
308 if (verbose > 2)
4ea062ed 309 print_hists_in(hists);
f8ebb0cd
NK
310 }
311
312 first = perf_evlist__first(evlist);
313 evsel = perf_evlist__last(evlist);
314
4ea062ed
ACM
315 first_hists = evsel__hists(first);
316 hists = evsel__hists(evsel);
317
f8ebb0cd 318 /* match common entries */
4ea062ed
ACM
319 hists__match(first_hists, hists);
320 err = validate_match(first_hists, hists);
f8ebb0cd
NK
321 if (err)
322 goto out;
323
324 /* link common and/or dummy entries */
4ea062ed
ACM
325 hists__link(first_hists, hists);
326 err = validate_link(first_hists, hists);
f8ebb0cd
NK
327 if (err)
328 goto out;
329
330 err = 0;
331
332out:
333 /* tear down everything */
334 perf_evlist__delete(evlist);
f21d1815 335 reset_output_field();
876650e6 336 machines__exit(&machines);
f8ebb0cd
NK
337
338 return err;
339}