perf evlist: Fix event ID retrieval for group format read case
[linux-block.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
85c66be1 10#include <lk/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
35b9d88e 17#include <unistd.h>
361c99a6 18
50d08e47
ACM
19#include "parse-events.h"
20
f8a95309
ACM
21#include <sys/mman.h>
22
70db7533
ACM
23#include <linux/bitops.h>
24#include <linux/hash.h>
25
f8a95309 26#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 27#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 28
7e2ed097
ACM
29void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
ef1d1af2
ACM
31{
32 int i;
33
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 37 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 38 evlist->workload.pid = -1;
ef1d1af2
ACM
39}
40
334fe7a3 41struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
42{
43 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
44
ef1d1af2 45 if (evlist != NULL)
334fe7a3 46 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
47
48 return evlist;
49}
50
f77a9518
ACM
51void perf_evlist__config(struct perf_evlist *evlist,
52 struct perf_record_opts *opts)
0f82ebc4 53{
cac21425 54 struct perf_evsel *evsel;
f77a9518
ACM
55 /*
56 * Set the evsel leader links before we configure attributes,
57 * since some might depend on this info.
58 */
59 if (opts->group)
60 perf_evlist__set_leader(evlist);
0f82ebc4
ACM
61
62 if (evlist->cpus->map[0] < 0)
63 opts->no_inherit = true;
64
65 list_for_each_entry(evsel, &evlist->entries, node) {
cac21425 66 perf_evsel__config(evsel, opts);
0f82ebc4
ACM
67
68 if (evlist->nr_entries > 1)
3a5afaec 69 perf_evsel__set_sample_id(evsel);
0f82ebc4
ACM
70 }
71}
72
361c99a6
ACM
73static void perf_evlist__purge(struct perf_evlist *evlist)
74{
75 struct perf_evsel *pos, *n;
76
77 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
78 list_del_init(&pos->node);
79 perf_evsel__delete(pos);
80 }
81
82 evlist->nr_entries = 0;
83}
84
ef1d1af2 85void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 86{
70db7533 87 free(evlist->mmap);
5c581041 88 free(evlist->pollfd);
ef1d1af2
ACM
89 evlist->mmap = NULL;
90 evlist->pollfd = NULL;
91}
92
93void perf_evlist__delete(struct perf_evlist *evlist)
94{
95 perf_evlist__purge(evlist);
96 perf_evlist__exit(evlist);
361c99a6
ACM
97 free(evlist);
98}
99
100void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
101{
102 list_add_tail(&entry->node, &evlist->entries);
103 ++evlist->nr_entries;
104}
105
0529bc1f
JO
106void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
107 struct list_head *list,
108 int nr_entries)
50d08e47
ACM
109{
110 list_splice_tail(list, &evlist->entries);
111 evlist->nr_entries += nr_entries;
112}
113
63dab225
ACM
114void __perf_evlist__set_leader(struct list_head *list)
115{
116 struct perf_evsel *evsel, *leader;
117
118 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
119 evsel = list_entry(list->prev, struct perf_evsel, node);
120
121 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225
ACM
122
123 list_for_each_entry(evsel, list, node) {
74b2133d 124 evsel->leader = leader;
63dab225
ACM
125 }
126}
127
128void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 129{
97f63e4a
NK
130 if (evlist->nr_entries) {
131 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 132 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 133 }
6a4bb04c
JO
134}
135
361c99a6
ACM
136int perf_evlist__add_default(struct perf_evlist *evlist)
137{
138 struct perf_event_attr attr = {
139 .type = PERF_TYPE_HARDWARE,
140 .config = PERF_COUNT_HW_CPU_CYCLES,
141 };
1aed2671
JR
142 struct perf_evsel *evsel;
143
144 event_attr_init(&attr);
361c99a6 145
1aed2671 146 evsel = perf_evsel__new(&attr, 0);
361c99a6 147 if (evsel == NULL)
cc2d86b0
SE
148 goto error;
149
150 /* use strdup() because free(evsel) assumes name is allocated */
151 evsel->name = strdup("cycles");
152 if (!evsel->name)
153 goto error_free;
361c99a6
ACM
154
155 perf_evlist__add(evlist, evsel);
156 return 0;
cc2d86b0
SE
157error_free:
158 perf_evsel__delete(evsel);
159error:
160 return -ENOMEM;
361c99a6 161}
5c581041 162
e60fc847
ACM
163static int perf_evlist__add_attrs(struct perf_evlist *evlist,
164 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
165{
166 struct perf_evsel *evsel, *n;
167 LIST_HEAD(head);
168 size_t i;
169
170 for (i = 0; i < nr_attrs; i++) {
171 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
172 if (evsel == NULL)
173 goto out_delete_partial_list;
174 list_add_tail(&evsel->node, &head);
175 }
176
177 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
178
179 return 0;
180
181out_delete_partial_list:
182 list_for_each_entry_safe(evsel, n, &head, node)
183 perf_evsel__delete(evsel);
184 return -1;
185}
186
79695e1b
ACM
187int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
188 struct perf_event_attr *attrs, size_t nr_attrs)
189{
190 size_t i;
191
192 for (i = 0; i < nr_attrs; i++)
193 event_attr_init(attrs + i);
194
195 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
196}
197
da378962
ACM
198struct perf_evsel *
199perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
200{
201 struct perf_evsel *evsel;
202
203 list_for_each_entry(evsel, &evlist->entries, node) {
204 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
205 (int)evsel->attr.config == id)
206 return evsel;
207 }
208
209 return NULL;
210}
211
39876e7d
ACM
212int perf_evlist__add_newtp(struct perf_evlist *evlist,
213 const char *sys, const char *name, void *handler)
214{
215 struct perf_evsel *evsel;
216
217 evsel = perf_evsel__newtp(sys, name, evlist->nr_entries);
218 if (evsel == NULL)
219 return -1;
220
221 evsel->handler.func = handler;
222 perf_evlist__add(evlist, evsel);
223 return 0;
224}
225
4152ab37
ACM
226void perf_evlist__disable(struct perf_evlist *evlist)
227{
228 int cpu, thread;
229 struct perf_evsel *pos;
b3a319d5
NK
230 int nr_cpus = cpu_map__nr(evlist->cpus);
231 int nr_threads = thread_map__nr(evlist->threads);
4152ab37 232
b3a319d5 233 for (cpu = 0; cpu < nr_cpus; cpu++) {
4152ab37 234 list_for_each_entry(pos, &evlist->entries, node) {
823254ed 235 if (!perf_evsel__is_group_leader(pos))
3fe4430d 236 continue;
b3a319d5 237 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
238 ioctl(FD(pos, cpu, thread),
239 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
240 }
241 }
242}
243
764e16a3
DA
244void perf_evlist__enable(struct perf_evlist *evlist)
245{
246 int cpu, thread;
247 struct perf_evsel *pos;
b3a319d5
NK
248 int nr_cpus = cpu_map__nr(evlist->cpus);
249 int nr_threads = thread_map__nr(evlist->threads);
764e16a3 250
b3a319d5 251 for (cpu = 0; cpu < nr_cpus; cpu++) {
764e16a3 252 list_for_each_entry(pos, &evlist->entries, node) {
823254ed 253 if (!perf_evsel__is_group_leader(pos))
3fe4430d 254 continue;
b3a319d5 255 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
256 ioctl(FD(pos, cpu, thread),
257 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
258 }
259 }
260}
261
806fb630 262static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 263{
b3a319d5
NK
264 int nr_cpus = cpu_map__nr(evlist->cpus);
265 int nr_threads = thread_map__nr(evlist->threads);
266 int nfds = nr_cpus * nr_threads * evlist->nr_entries;
5c581041
ACM
267 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
268 return evlist->pollfd != NULL ? 0 : -ENOMEM;
269}
70082dd9
ACM
270
271void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
272{
273 fcntl(fd, F_SETFL, O_NONBLOCK);
274 evlist->pollfd[evlist->nr_fds].fd = fd;
275 evlist->pollfd[evlist->nr_fds].events = POLLIN;
276 evlist->nr_fds++;
277}
70db7533 278
a91e5431
ACM
279static void perf_evlist__id_hash(struct perf_evlist *evlist,
280 struct perf_evsel *evsel,
281 int cpu, int thread, u64 id)
3d3b5e95
ACM
282{
283 int hash;
284 struct perf_sample_id *sid = SID(evsel, cpu, thread);
285
286 sid->id = id;
287 sid->evsel = evsel;
288 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
289 hlist_add_head(&sid->node, &evlist->heads[hash]);
290}
291
a91e5431
ACM
292void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
293 int cpu, int thread, u64 id)
294{
295 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
296 evsel->id[evsel->ids++] = id;
297}
298
299static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
300 struct perf_evsel *evsel,
301 int cpu, int thread, int fd)
f8a95309 302{
f8a95309 303 u64 read_data[4] = { 0, };
3d3b5e95 304 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
305 u64 id;
306 int ret;
307
308 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
309 if (!ret)
310 goto add;
311
312 if (errno != ENOTTY)
313 return -1;
314
315 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 316
c4861afe
JO
317 /*
318 * This way does not work with group format read, so bail
319 * out in that case.
320 */
321 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
322 return -1;
323
f8a95309
ACM
324 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
325 read(fd, &read_data, sizeof(read_data)) == -1)
326 return -1;
327
328 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
329 ++id_idx;
330 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
331 ++id_idx;
332
e2b5abe0
JO
333 id = read_data[id_idx];
334
335 add:
336 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
337 return 0;
338}
339
70db7533
ACM
340struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
341{
342 struct hlist_head *head;
70db7533
ACM
343 struct perf_sample_id *sid;
344 int hash;
345
346 if (evlist->nr_entries == 1)
0c21f736 347 return perf_evlist__first(evlist);
70db7533
ACM
348
349 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
350 head = &evlist->heads[hash];
351
b67bfe0d 352 hlist_for_each_entry(sid, head, node)
70db7533
ACM
353 if (sid->id == id)
354 return sid->evsel;
30e68bcc
NK
355
356 if (!perf_evlist__sample_id_all(evlist))
0c21f736 357 return perf_evlist__first(evlist);
30e68bcc 358
70db7533
ACM
359 return NULL;
360}
04391deb 361
aece948f 362union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 363{
aece948f 364 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
365 unsigned int head = perf_mmap__read_head(md);
366 unsigned int old = md->prev;
367 unsigned char *data = md->base + page_size;
8115d60c 368 union perf_event *event = NULL;
04391deb 369
7bb41152 370 if (evlist->overwrite) {
04391deb 371 /*
7bb41152
ACM
372 * If we're further behind than half the buffer, there's a chance
373 * the writer will bite our tail and mess up the samples under us.
374 *
375 * If we somehow ended up ahead of the head, we got messed up.
376 *
377 * In either case, truncate and restart at head.
04391deb 378 */
7bb41152
ACM
379 int diff = head - old;
380 if (diff > md->mask / 2 || diff < 0) {
381 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
382
383 /*
384 * head points to a known good entry, start there.
385 */
386 old = head;
387 }
04391deb
ACM
388 }
389
390 if (old != head) {
391 size_t size;
392
8115d60c 393 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
394 size = event->header.size;
395
396 /*
397 * Event straddles the mmap boundary -- header should always
398 * be inside due to u64 alignment of output.
399 */
400 if ((old & md->mask) + size != ((old + size) & md->mask)) {
401 unsigned int offset = old;
402 unsigned int len = min(sizeof(*event), size), cpy;
0479b8b9 403 void *dst = &md->event_copy;
04391deb
ACM
404
405 do {
406 cpy = min(md->mask + 1 - (offset & md->mask), len);
407 memcpy(dst, &data[offset & md->mask], cpy);
408 offset += cpy;
409 dst += cpy;
410 len -= cpy;
411 } while (len);
412
0479b8b9 413 event = &md->event_copy;
04391deb
ACM
414 }
415
416 old += size;
417 }
418
419 md->prev = old;
7bb41152
ACM
420
421 if (!evlist->overwrite)
422 perf_mmap__write_tail(md, old);
423
04391deb
ACM
424 return event;
425}
f8a95309 426
93edcbd9
AH
427static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
428{
429 if (evlist->mmap[idx].base != NULL) {
430 munmap(evlist->mmap[idx].base, evlist->mmap_len);
431 evlist->mmap[idx].base = NULL;
432 }
433}
434
7e2ed097 435void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 436{
aece948f 437 int i;
f8a95309 438
93edcbd9
AH
439 for (i = 0; i < evlist->nr_mmaps; i++)
440 __perf_evlist__munmap(evlist, i);
aece948f
ACM
441
442 free(evlist->mmap);
443 evlist->mmap = NULL;
f8a95309
ACM
444}
445
806fb630 446static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 447{
a14bb7a6 448 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 449 if (cpu_map__empty(evlist->cpus))
b3a319d5 450 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 451 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
452 return evlist->mmap != NULL ? 0 : -ENOMEM;
453}
454
bccdaba0 455static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 456 int idx, int prot, int mask, int fd)
f8a95309 457{
aece948f
ACM
458 evlist->mmap[idx].prev = 0;
459 evlist->mmap[idx].mask = mask;
460 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 461 MAP_SHARED, fd, 0);
301b195d
NE
462 if (evlist->mmap[idx].base == MAP_FAILED) {
463 evlist->mmap[idx].base = NULL;
f8a95309 464 return -1;
301b195d 465 }
f8a95309
ACM
466
467 perf_evlist__add_pollfd(evlist, fd);
468 return 0;
469}
470
aece948f
ACM
471static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
472{
473 struct perf_evsel *evsel;
474 int cpu, thread;
b3a319d5
NK
475 int nr_cpus = cpu_map__nr(evlist->cpus);
476 int nr_threads = thread_map__nr(evlist->threads);
aece948f 477
b3a319d5 478 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
479 int output = -1;
480
b3a319d5 481 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
482 list_for_each_entry(evsel, &evlist->entries, node) {
483 int fd = FD(evsel, cpu, thread);
484
485 if (output == -1) {
486 output = fd;
bccdaba0 487 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
488 prot, mask, output) < 0)
489 goto out_unmap;
490 } else {
491 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
492 goto out_unmap;
493 }
494
495 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
496 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
497 goto out_unmap;
498 }
499 }
500 }
501
502 return 0;
503
504out_unmap:
93edcbd9
AH
505 for (cpu = 0; cpu < nr_cpus; cpu++)
506 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
507 return -1;
508}
509
510static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
511{
512 struct perf_evsel *evsel;
513 int thread;
b3a319d5 514 int nr_threads = thread_map__nr(evlist->threads);
aece948f 515
b3a319d5 516 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
517 int output = -1;
518
519 list_for_each_entry(evsel, &evlist->entries, node) {
520 int fd = FD(evsel, 0, thread);
521
522 if (output == -1) {
523 output = fd;
bccdaba0 524 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
525 prot, mask, output) < 0)
526 goto out_unmap;
527 } else {
528 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
529 goto out_unmap;
530 }
531
532 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
533 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
534 goto out_unmap;
535 }
536 }
537
538 return 0;
539
540out_unmap:
93edcbd9
AH
541 for (thread = 0; thread < nr_threads; thread++)
542 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
543 return -1;
544}
545
f8a95309
ACM
546/** perf_evlist__mmap - Create per cpu maps to receive events
547 *
548 * @evlist - list of events
f8a95309
ACM
549 * @pages - map length in pages
550 * @overwrite - overwrite older events?
551 *
552 * If overwrite is false the user needs to signal event consuption using:
553 *
554 * struct perf_mmap *m = &evlist->mmap[cpu];
555 * unsigned int head = perf_mmap__read_head(m);
556 *
557 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
558 *
559 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 560 */
50a682ce
ACM
561int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
562 bool overwrite)
f8a95309 563{
aece948f 564 struct perf_evsel *evsel;
7e2ed097
ACM
565 const struct cpu_map *cpus = evlist->cpus;
566 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
567 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
568
569 /* 512 kiB: default amount of unprivileged mlocked memory */
570 if (pages == UINT_MAX)
571 pages = (512 * 1024) / page_size;
41d0d933
NE
572 else if (!is_power_of_2(pages))
573 return -EINVAL;
50a682ce
ACM
574
575 mask = pages * page_size - 1;
f8a95309 576
7e2ed097 577 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
578 return -ENOMEM;
579
7e2ed097 580 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
581 return -ENOMEM;
582
583 evlist->overwrite = overwrite;
584 evlist->mmap_len = (pages + 1) * page_size;
f8a95309
ACM
585
586 list_for_each_entry(evsel, &evlist->entries, node) {
587 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 588 evsel->sample_id == NULL &&
a14bb7a6 589 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 590 return -ENOMEM;
f8a95309
ACM
591 }
592
ec1e7e43 593 if (cpu_map__empty(cpus))
aece948f 594 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 595
aece948f 596 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 597}
7e2ed097 598
b809ac10
NK
599int perf_evlist__create_maps(struct perf_evlist *evlist,
600 struct perf_target *target)
7e2ed097 601{
b809ac10
NK
602 evlist->threads = thread_map__new_str(target->pid, target->tid,
603 target->uid);
7e2ed097
ACM
604
605 if (evlist->threads == NULL)
606 return -1;
607
879d77d0 608 if (perf_target__has_task(target))
d67356e7 609 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
610 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
611 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
612 else
613 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
614
615 if (evlist->cpus == NULL)
616 goto out_delete_threads;
617
618 return 0;
619
620out_delete_threads:
621 thread_map__delete(evlist->threads);
622 return -1;
623}
624
625void perf_evlist__delete_maps(struct perf_evlist *evlist)
626{
627 cpu_map__delete(evlist->cpus);
628 thread_map__delete(evlist->threads);
629 evlist->cpus = NULL;
630 evlist->threads = NULL;
631}
0a102479 632
1491a632 633int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 634{
0a102479 635 struct perf_evsel *evsel;
745cefc5
ACM
636 int err = 0;
637 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 638 nthreads = thread_map__nr(evlist->threads);
0a102479
FW
639
640 list_for_each_entry(evsel, &evlist->entries, node) {
745cefc5 641 if (evsel->filter == NULL)
0a102479 642 continue;
745cefc5
ACM
643
644 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
645 if (err)
646 break;
0a102479
FW
647 }
648
745cefc5
ACM
649 return err;
650}
651
652int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
653{
654 struct perf_evsel *evsel;
655 int err = 0;
656 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 657 nthreads = thread_map__nr(evlist->threads);
745cefc5
ACM
658
659 list_for_each_entry(evsel, &evlist->entries, node) {
660 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
661 if (err)
662 break;
663 }
664
665 return err;
0a102479 666}
74429964 667
0c21f736 668bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 669{
0c21f736 670 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
671
672 list_for_each_entry_continue(pos, &evlist->entries, node) {
673 if (first->attr.sample_type != pos->attr.sample_type)
674 return false;
74429964
FW
675 }
676
c2a70653 677 return true;
74429964
FW
678}
679
0c21f736 680u64 perf_evlist__sample_type(struct perf_evlist *evlist)
c2a70653 681{
0c21f736 682 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653
ACM
683 return first->attr.sample_type;
684}
685
9ede473c
JO
686bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
687{
688 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
689 u64 read_format = first->attr.read_format;
690 u64 sample_type = first->attr.sample_type;
691
692 list_for_each_entry_continue(pos, &evlist->entries, node) {
693 if (read_format != pos->attr.read_format)
694 return false;
695 }
696
697 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
698 if ((sample_type & PERF_SAMPLE_READ) &&
699 !(read_format & PERF_FORMAT_ID)) {
700 return false;
701 }
702
703 return true;
704}
705
706u64 perf_evlist__read_format(struct perf_evlist *evlist)
707{
708 struct perf_evsel *first = perf_evlist__first(evlist);
709 return first->attr.read_format;
710}
711
0c21f736 712u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 713{
0c21f736 714 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
715 struct perf_sample *data;
716 u64 sample_type;
717 u16 size = 0;
718
81e36bff
ACM
719 if (!first->attr.sample_id_all)
720 goto out;
721
722 sample_type = first->attr.sample_type;
723
724 if (sample_type & PERF_SAMPLE_TID)
725 size += sizeof(data->tid) * 2;
726
727 if (sample_type & PERF_SAMPLE_TIME)
728 size += sizeof(data->time);
729
730 if (sample_type & PERF_SAMPLE_ID)
731 size += sizeof(data->id);
732
733 if (sample_type & PERF_SAMPLE_STREAM_ID)
734 size += sizeof(data->stream_id);
735
736 if (sample_type & PERF_SAMPLE_CPU)
737 size += sizeof(data->cpu) * 2;
738out:
739 return size;
740}
741
0c21f736 742bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 743{
0c21f736 744 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653
ACM
745
746 list_for_each_entry_continue(pos, &evlist->entries, node) {
747 if (first->attr.sample_id_all != pos->attr.sample_id_all)
748 return false;
74429964
FW
749 }
750
c2a70653
ACM
751 return true;
752}
753
0c21f736 754bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 755{
0c21f736 756 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 757 return first->attr.sample_id_all;
74429964 758}
81cce8de
ACM
759
760void perf_evlist__set_selected(struct perf_evlist *evlist,
761 struct perf_evsel *evsel)
762{
763 evlist->selected = evsel;
764}
727ab04e 765
a74b4b66
NK
766void perf_evlist__close(struct perf_evlist *evlist)
767{
768 struct perf_evsel *evsel;
769 int ncpus = cpu_map__nr(evlist->cpus);
770 int nthreads = thread_map__nr(evlist->threads);
771
772 list_for_each_entry_reverse(evsel, &evlist->entries, node)
773 perf_evsel__close(evsel, ncpus, nthreads);
774}
775
6a4bb04c 776int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 777{
6a4bb04c 778 struct perf_evsel *evsel;
a74b4b66 779 int err;
727ab04e 780
727ab04e 781 list_for_each_entry(evsel, &evlist->entries, node) {
6a4bb04c 782 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
783 if (err < 0)
784 goto out_err;
785 }
786
787 return 0;
788out_err:
a74b4b66 789 perf_evlist__close(evlist);
41c21a68 790 errno = -err;
727ab04e
ACM
791 return err;
792}
35b9d88e
ACM
793
794int perf_evlist__prepare_workload(struct perf_evlist *evlist,
6ef73ec4 795 struct perf_target *target,
55e162ea
NK
796 const char *argv[], bool pipe_output,
797 bool want_signal)
35b9d88e
ACM
798{
799 int child_ready_pipe[2], go_pipe[2];
800 char bf;
801
802 if (pipe(child_ready_pipe) < 0) {
803 perror("failed to create 'ready' pipe");
804 return -1;
805 }
806
807 if (pipe(go_pipe) < 0) {
808 perror("failed to create 'go' pipe");
809 goto out_close_ready_pipe;
810 }
811
812 evlist->workload.pid = fork();
813 if (evlist->workload.pid < 0) {
814 perror("failed to fork");
815 goto out_close_pipes;
816 }
817
818 if (!evlist->workload.pid) {
119fa3c9 819 if (pipe_output)
35b9d88e
ACM
820 dup2(2, 1);
821
0817df08
DA
822 signal(SIGTERM, SIG_DFL);
823
35b9d88e
ACM
824 close(child_ready_pipe[0]);
825 close(go_pipe[1]);
826 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
827
828 /*
829 * Do a dummy execvp to get the PLT entry resolved,
830 * so we avoid the resolver overhead on the real
831 * execvp call.
832 */
833 execvp("", (char **)argv);
834
835 /*
836 * Tell the parent we're ready to go
837 */
838 close(child_ready_pipe[1]);
839
840 /*
841 * Wait until the parent tells us to go.
842 */
843 if (read(go_pipe[0], &bf, 1) == -1)
844 perror("unable to read pipe");
845
846 execvp(argv[0], (char **)argv);
847
848 perror(argv[0]);
55e162ea
NK
849 if (want_signal)
850 kill(getppid(), SIGUSR1);
35b9d88e
ACM
851 exit(-1);
852 }
853
6ef73ec4 854 if (perf_target__none(target))
35b9d88e
ACM
855 evlist->threads->map[0] = evlist->workload.pid;
856
857 close(child_ready_pipe[1]);
858 close(go_pipe[0]);
859 /*
860 * wait for child to settle
861 */
862 if (read(child_ready_pipe[0], &bf, 1) == -1) {
863 perror("unable to read pipe");
864 goto out_close_pipes;
865 }
866
bcf3145f 867 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
868 evlist->workload.cork_fd = go_pipe[1];
869 close(child_ready_pipe[0]);
870 return 0;
871
872out_close_pipes:
873 close(go_pipe[0]);
874 close(go_pipe[1]);
875out_close_ready_pipe:
876 close(child_ready_pipe[0]);
877 close(child_ready_pipe[1]);
878 return -1;
879}
880
881int perf_evlist__start_workload(struct perf_evlist *evlist)
882{
883 if (evlist->workload.cork_fd > 0) {
b3824404 884 char bf = 0;
bcf3145f 885 int ret;
35b9d88e
ACM
886 /*
887 * Remove the cork, let it rip!
888 */
bcf3145f
NK
889 ret = write(evlist->workload.cork_fd, &bf, 1);
890 if (ret < 0)
891 perror("enable to write to pipe");
892
893 close(evlist->workload.cork_fd);
894 return ret;
35b9d88e
ACM
895 }
896
897 return 0;
898}
cb0b29e0 899
a3f698fe 900int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 901 struct perf_sample *sample)
cb0b29e0 902{
0c21f736 903 struct perf_evsel *evsel = perf_evlist__first(evlist);
0807d2d8 904 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 905}
78f067b3
ACM
906
907size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
908{
909 struct perf_evsel *evsel;
910 size_t printed = 0;
911
912 list_for_each_entry(evsel, &evlist->entries, node) {
913 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
914 perf_evsel__name(evsel));
915 }
916
917 return printed + fprintf(fp, "\n");;
918}