perf tools: Use atomic_t to implement thread__{get,put} refcnt
[linux-2.6-block.git] / tools / perf / tests / hists_link.c
CommitLineData
f8ebb0cd
NK
1#include "perf.h"
2#include "tests.h"
3#include "debug.h"
4#include "symbol.h"
5#include "sort.h"
6#include "evsel.h"
7#include "evlist.h"
8#include "machine.h"
9#include "thread.h"
10#include "parse-events.h"
6e344a95 11#include "hists_common.h"
f8ebb0cd
NK
12
13struct sample {
14 u32 pid;
15 u64 ip;
16 struct thread *thread;
17 struct map *map;
18 struct symbol *sym;
19};
20
6e344a95 21/* For the numbers, see hists_common.c */
f8ebb0cd
NK
22static struct sample fake_common_samples[] = {
23 /* perf [kernel] schedule() */
a1891aa4 24 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_SCHEDULE, },
f8ebb0cd 25 /* perf [perf] main() */
a1891aa4 26 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_MAIN, },
f8ebb0cd 27 /* perf [perf] cmd_record() */
a1891aa4 28 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_PERF_CMD_RECORD, },
f8ebb0cd 29 /* bash [bash] xmalloc() */
a1891aa4 30 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XMALLOC, },
f8ebb0cd 31 /* bash [libc] malloc() */
a1891aa4 32 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, },
f8ebb0cd
NK
33};
34
35static struct sample fake_samples[][5] = {
36 {
37 /* perf [perf] run_command() */
a1891aa4 38 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_PERF_RUN_COMMAND, },
f8ebb0cd 39 /* perf [libc] malloc() */
a1891aa4 40 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_LIBC_MALLOC, },
f8ebb0cd 41 /* perf [kernel] page_fault() */
a1891aa4 42 { .pid = FAKE_PID_PERF1, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
f8ebb0cd 43 /* perf [kernel] sys_perf_event_open() */
a1891aa4 44 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_KERNEL_SYS_PERF_EVENT_OPEN, },
f8ebb0cd 45 /* bash [libc] free() */
a1891aa4 46 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_FREE, },
f8ebb0cd
NK
47 },
48 {
49 /* perf [libc] free() */
a1891aa4 50 { .pid = FAKE_PID_PERF2, .ip = FAKE_IP_LIBC_FREE, },
f8ebb0cd 51 /* bash [libc] malloc() */
a1891aa4 52 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_MALLOC, }, /* will be merged */
f8ebb0cd 53 /* bash [bash] xfee() */
a1891aa4 54 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_BASH_XFREE, },
f8ebb0cd 55 /* bash [libc] realloc() */
a1891aa4 56 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_LIBC_REALLOC, },
f8ebb0cd 57 /* bash [kernel] page_fault() */
a1891aa4 58 { .pid = FAKE_PID_BASH, .ip = FAKE_IP_KERNEL_PAGE_FAULT, },
f8ebb0cd
NK
59 },
60};
61
62static int add_hist_entries(struct perf_evlist *evlist, struct machine *machine)
63{
64 struct perf_evsel *evsel;
65 struct addr_location al;
66 struct hist_entry *he;
a1891aa4 67 struct perf_sample sample = { .period = 1, };
f8ebb0cd
NK
68 size_t i = 0, k;
69
70 /*
71 * each evsel will have 10 samples - 5 common and 5 distinct.
72 * However the second evsel also has a collapsed entry for
73 * "bash [libc] malloc" so total 9 entries will be in the tree.
74 */
0050f7aa 75 evlist__for_each(evlist, evsel) {
4ea062ed
ACM
76 struct hists *hists = evsel__hists(evsel);
77
f8ebb0cd
NK
78 for (k = 0; k < ARRAY_SIZE(fake_common_samples); k++) {
79 const union perf_event event = {
ef89325f
AH
80 .header = {
81 .misc = PERF_RECORD_MISC_USER,
f8ebb0cd
NK
82 },
83 };
84
ef89325f 85 sample.pid = fake_common_samples[k].pid;
13ce34df 86 sample.tid = fake_common_samples[k].pid;
ef89325f 87 sample.ip = fake_common_samples[k].ip;
f8ebb0cd 88 if (perf_event__preprocess_sample(&event, machine, &al,
e44baa3e 89 &sample) < 0)
f8ebb0cd
NK
90 goto out;
91
4ea062ed 92 he = __hists__add_entry(hists, &al, NULL,
a0b51af3 93 NULL, NULL, 1, 1, 0, true);
f8ebb0cd
NK
94 if (he == NULL)
95 goto out;
96
97 fake_common_samples[k].thread = al.thread;
98 fake_common_samples[k].map = al.map;
99 fake_common_samples[k].sym = al.sym;
100 }
101
102 for (k = 0; k < ARRAY_SIZE(fake_samples[i]); k++) {
103 const union perf_event event = {
ef89325f
AH
104 .header = {
105 .misc = PERF_RECORD_MISC_USER,
f8ebb0cd
NK
106 },
107 };
108
ef89325f 109 sample.pid = fake_samples[i][k].pid;
13ce34df 110 sample.tid = fake_samples[i][k].pid;
ef89325f 111 sample.ip = fake_samples[i][k].ip;
f8ebb0cd 112 if (perf_event__preprocess_sample(&event, machine, &al,
e44baa3e 113 &sample) < 0)
f8ebb0cd
NK
114 goto out;
115
4ea062ed 116 he = __hists__add_entry(hists, &al, NULL,
a0b51af3 117 NULL, NULL, 1, 1, 0, true);
f8ebb0cd
NK
118 if (he == NULL)
119 goto out;
120
121 fake_samples[i][k].thread = al.thread;
122 fake_samples[i][k].map = al.map;
123 fake_samples[i][k].sym = al.sym;
124 }
125 i++;
126 }
127
128 return 0;
129
130out:
131 pr_debug("Not enough memory for adding a hist entry\n");
132 return -1;
133}
134
135static int find_sample(struct sample *samples, size_t nr_samples,
136 struct thread *t, struct map *m, struct symbol *s)
137{
138 while (nr_samples--) {
139 if (samples->thread == t && samples->map == m &&
140 samples->sym == s)
141 return 1;
142 samples++;
143 }
144 return 0;
145}
146
147static int __validate_match(struct hists *hists)
148{
149 size_t count = 0;
150 struct rb_root *root;
151 struct rb_node *node;
152
153 /*
154 * Only entries from fake_common_samples should have a pair.
155 */
156 if (sort__need_collapse)
157 root = &hists->entries_collapsed;
158 else
159 root = hists->entries_in;
160
161 node = rb_first(root);
162 while (node) {
163 struct hist_entry *he;
164
165 he = rb_entry(node, struct hist_entry, rb_node_in);
166
167 if (hist_entry__has_pairs(he)) {
168 if (find_sample(fake_common_samples,
169 ARRAY_SIZE(fake_common_samples),
170 he->thread, he->ms.map, he->ms.sym)) {
171 count++;
172 } else {
173 pr_debug("Can't find the matched entry\n");
174 return -1;
175 }
176 }
177
178 node = rb_next(node);
179 }
180
181 if (count != ARRAY_SIZE(fake_common_samples)) {
182 pr_debug("Invalid count for matched entries: %zd of %zd\n",
183 count, ARRAY_SIZE(fake_common_samples));
184 return -1;
185 }
186
187 return 0;
188}
189
190static int validate_match(struct hists *leader, struct hists *other)
191{
192 return __validate_match(leader) || __validate_match(other);
193}
194
195static int __validate_link(struct hists *hists, int idx)
196{
197 size_t count = 0;
198 size_t count_pair = 0;
199 size_t count_dummy = 0;
200 struct rb_root *root;
201 struct rb_node *node;
202
203 /*
204 * Leader hists (idx = 0) will have dummy entries from other,
205 * and some entries will have no pair. However every entry
206 * in other hists should have (dummy) pair.
207 */
208 if (sort__need_collapse)
209 root = &hists->entries_collapsed;
210 else
211 root = hists->entries_in;
212
213 node = rb_first(root);
214 while (node) {
215 struct hist_entry *he;
216
217 he = rb_entry(node, struct hist_entry, rb_node_in);
218
219 if (hist_entry__has_pairs(he)) {
220 if (!find_sample(fake_common_samples,
221 ARRAY_SIZE(fake_common_samples),
222 he->thread, he->ms.map, he->ms.sym) &&
223 !find_sample(fake_samples[idx],
224 ARRAY_SIZE(fake_samples[idx]),
225 he->thread, he->ms.map, he->ms.sym)) {
226 count_dummy++;
227 }
228 count_pair++;
229 } else if (idx) {
230 pr_debug("A entry from the other hists should have pair\n");
231 return -1;
232 }
233
234 count++;
235 node = rb_next(node);
236 }
237
238 /*
239 * Note that we have a entry collapsed in the other (idx = 1) hists.
240 */
241 if (idx == 0) {
242 if (count_dummy != ARRAY_SIZE(fake_samples[1]) - 1) {
243 pr_debug("Invalid count of dummy entries: %zd of %zd\n",
244 count_dummy, ARRAY_SIZE(fake_samples[1]) - 1);
245 return -1;
246 }
247 if (count != count_pair + ARRAY_SIZE(fake_samples[0])) {
248 pr_debug("Invalid count of total leader entries: %zd of %zd\n",
249 count, count_pair + ARRAY_SIZE(fake_samples[0]));
250 return -1;
251 }
252 } else {
253 if (count != count_pair) {
254 pr_debug("Invalid count of total other entries: %zd of %zd\n",
255 count, count_pair);
256 return -1;
257 }
258 if (count_dummy > 0) {
259 pr_debug("Other hists should not have dummy entries: %zd\n",
260 count_dummy);
261 return -1;
262 }
263 }
264
265 return 0;
266}
267
268static int validate_link(struct hists *leader, struct hists *other)
269{
270 return __validate_link(leader, 0) || __validate_link(other, 1);
271}
272
f8ebb0cd
NK
273int test__hists_link(void)
274{
275 int err = -1;
4ea062ed 276 struct hists *hists, *first_hists;
876650e6 277 struct machines machines;
f8ebb0cd
NK
278 struct machine *machine = NULL;
279 struct perf_evsel *evsel, *first;
334fe7a3 280 struct perf_evlist *evlist = perf_evlist__new();
f8ebb0cd
NK
281
282 if (evlist == NULL)
283 return -ENOMEM;
284
b39b8393 285 err = parse_events(evlist, "cpu-clock", NULL);
f8ebb0cd
NK
286 if (err)
287 goto out;
b39b8393 288 err = parse_events(evlist, "task-clock", NULL);
f8ebb0cd
NK
289 if (err)
290 goto out;
291
292 /* default sort order (comm,dso,sym) will be used */
55309985
NK
293 if (setup_sorting() < 0)
294 goto out;
f8ebb0cd 295
876650e6
ACM
296 machines__init(&machines);
297
f8ebb0cd 298 /* setup threads/dso/map/symbols also */
876650e6 299 machine = setup_fake_machine(&machines);
f8ebb0cd
NK
300 if (!machine)
301 goto out;
302
303 if (verbose > 1)
304 machine__fprintf(machine, stderr);
305
306 /* process sample events */
307 err = add_hist_entries(evlist, machine);
308 if (err < 0)
309 goto out;
310
0050f7aa 311 evlist__for_each(evlist, evsel) {
4ea062ed
ACM
312 hists = evsel__hists(evsel);
313 hists__collapse_resort(hists, NULL);
f8ebb0cd
NK
314
315 if (verbose > 2)
4ea062ed 316 print_hists_in(hists);
f8ebb0cd
NK
317 }
318
319 first = perf_evlist__first(evlist);
320 evsel = perf_evlist__last(evlist);
321
4ea062ed
ACM
322 first_hists = evsel__hists(first);
323 hists = evsel__hists(evsel);
324
f8ebb0cd 325 /* match common entries */
4ea062ed
ACM
326 hists__match(first_hists, hists);
327 err = validate_match(first_hists, hists);
f8ebb0cd
NK
328 if (err)
329 goto out;
330
331 /* link common and/or dummy entries */
4ea062ed
ACM
332 hists__link(first_hists, hists);
333 err = validate_link(first_hists, hists);
f8ebb0cd
NK
334 if (err)
335 goto out;
336
337 err = 0;
338
339out:
340 /* tear down everything */
341 perf_evlist__delete(evlist);
f21d1815 342 reset_output_field();
876650e6 343 machines__exit(&machines);
f8ebb0cd
NK
344
345 return err;
346}