perf tools: Update ioctl documentation for PERF_IOC_FLAG_GROUP
[linux-block.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18
ACM
9#include "util.h"
10#include "debugfs.h"
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
35b9d88e 17#include <unistd.h>
361c99a6 18
50d08e47
ACM
19#include "parse-events.h"
20
f8a95309
ACM
21#include <sys/mman.h>
22
70db7533
ACM
23#include <linux/bitops.h>
24#include <linux/hash.h>
25
f8a95309 26#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 27#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 28
7e2ed097
ACM
29void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
30 struct thread_map *threads)
ef1d1af2
ACM
31{
32 int i;
33
34 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
35 INIT_HLIST_HEAD(&evlist->heads[i]);
36 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 37 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 38 evlist->workload.pid = -1;
ef1d1af2
ACM
39}
40
7e2ed097
ACM
41struct perf_evlist *perf_evlist__new(struct cpu_map *cpus,
42 struct thread_map *threads)
361c99a6
ACM
43{
44 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
45
ef1d1af2 46 if (evlist != NULL)
7e2ed097 47 perf_evlist__init(evlist, cpus, threads);
361c99a6
ACM
48
49 return evlist;
50}
51
0f82ebc4
ACM
52void perf_evlist__config_attrs(struct perf_evlist *evlist,
53 struct perf_record_opts *opts)
54{
5090c6ae 55 struct perf_evsel *evsel, *first;
0f82ebc4
ACM
56
57 if (evlist->cpus->map[0] < 0)
58 opts->no_inherit = true;
59
5090c6ae
NK
60 first = list_entry(evlist->entries.next, struct perf_evsel, node);
61
0f82ebc4 62 list_for_each_entry(evsel, &evlist->entries, node) {
5090c6ae 63 perf_evsel__config(evsel, opts, first);
0f82ebc4
ACM
64
65 if (evlist->nr_entries > 1)
66 evsel->attr.sample_type |= PERF_SAMPLE_ID;
67 }
68}
69
361c99a6
ACM
70static void perf_evlist__purge(struct perf_evlist *evlist)
71{
72 struct perf_evsel *pos, *n;
73
74 list_for_each_entry_safe(pos, n, &evlist->entries, node) {
75 list_del_init(&pos->node);
76 perf_evsel__delete(pos);
77 }
78
79 evlist->nr_entries = 0;
80}
81
ef1d1af2 82void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 83{
70db7533 84 free(evlist->mmap);
5c581041 85 free(evlist->pollfd);
ef1d1af2
ACM
86 evlist->mmap = NULL;
87 evlist->pollfd = NULL;
88}
89
90void perf_evlist__delete(struct perf_evlist *evlist)
91{
92 perf_evlist__purge(evlist);
93 perf_evlist__exit(evlist);
361c99a6
ACM
94 free(evlist);
95}
96
97void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
98{
99 list_add_tail(&entry->node, &evlist->entries);
100 ++evlist->nr_entries;
101}
102
0529bc1f
JO
103void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
104 struct list_head *list,
105 int nr_entries)
50d08e47
ACM
106{
107 list_splice_tail(list, &evlist->entries);
108 evlist->nr_entries += nr_entries;
109}
110
361c99a6
ACM
111int perf_evlist__add_default(struct perf_evlist *evlist)
112{
113 struct perf_event_attr attr = {
114 .type = PERF_TYPE_HARDWARE,
115 .config = PERF_COUNT_HW_CPU_CYCLES,
116 };
1aed2671
JR
117 struct perf_evsel *evsel;
118
119 event_attr_init(&attr);
361c99a6 120
1aed2671 121 evsel = perf_evsel__new(&attr, 0);
361c99a6 122 if (evsel == NULL)
cc2d86b0
SE
123 goto error;
124
125 /* use strdup() because free(evsel) assumes name is allocated */
126 evsel->name = strdup("cycles");
127 if (!evsel->name)
128 goto error_free;
361c99a6
ACM
129
130 perf_evlist__add(evlist, evsel);
131 return 0;
cc2d86b0
SE
132error_free:
133 perf_evsel__delete(evsel);
134error:
135 return -ENOMEM;
361c99a6 136}
5c581041 137
50d08e47
ACM
138int perf_evlist__add_attrs(struct perf_evlist *evlist,
139 struct perf_event_attr *attrs, size_t nr_attrs)
140{
141 struct perf_evsel *evsel, *n;
142 LIST_HEAD(head);
143 size_t i;
144
145 for (i = 0; i < nr_attrs; i++) {
146 evsel = perf_evsel__new(attrs + i, evlist->nr_entries + i);
147 if (evsel == NULL)
148 goto out_delete_partial_list;
149 list_add_tail(&evsel->node, &head);
150 }
151
152 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
153
154 return 0;
155
156out_delete_partial_list:
157 list_for_each_entry_safe(evsel, n, &head, node)
158 perf_evsel__delete(evsel);
159 return -1;
160}
161
79695e1b
ACM
162int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
163 struct perf_event_attr *attrs, size_t nr_attrs)
164{
165 size_t i;
166
167 for (i = 0; i < nr_attrs; i++)
168 event_attr_init(attrs + i);
169
170 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
171}
172
a8c9ae18
ACM
173static int trace_event__id(const char *evname)
174{
175 char *filename, *colon;
176 int err = -1, fd;
177
178 if (asprintf(&filename, "%s/%s/id", tracing_events_path, evname) < 0)
179 return -1;
180
181 colon = strrchr(filename, ':');
182 if (colon != NULL)
183 *colon = '/';
184
185 fd = open(filename, O_RDONLY);
186 if (fd >= 0) {
187 char id[16];
188 if (read(fd, id, sizeof(id)) > 0)
189 err = atoi(id);
190 close(fd);
191 }
192
193 free(filename);
194 return err;
195}
196
197int perf_evlist__add_tracepoints(struct perf_evlist *evlist,
198 const char *tracepoints[],
199 size_t nr_tracepoints)
200{
201 int err;
202 size_t i;
203 struct perf_event_attr *attrs = zalloc(nr_tracepoints * sizeof(*attrs));
204
205 if (attrs == NULL)
206 return -1;
207
208 for (i = 0; i < nr_tracepoints; i++) {
209 err = trace_event__id(tracepoints[i]);
210
211 if (err < 0)
212 goto out_free_attrs;
213
214 attrs[i].type = PERF_TYPE_TRACEPOINT;
215 attrs[i].config = err;
216 attrs[i].sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
217 PERF_SAMPLE_CPU);
218 attrs[i].sample_period = 1;
219 }
220
221 err = perf_evlist__add_attrs(evlist, attrs, nr_tracepoints);
222out_free_attrs:
223 free(attrs);
224 return err;
225}
226
ee29be62
ACM
227static struct perf_evsel *
228 perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
229{
230 struct perf_evsel *evsel;
231
232 list_for_each_entry(evsel, &evlist->entries, node) {
233 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
234 (int)evsel->attr.config == id)
235 return evsel;
236 }
237
238 return NULL;
239}
240
241int perf_evlist__set_tracepoints_handlers(struct perf_evlist *evlist,
242 const struct perf_evsel_str_handler *assocs,
243 size_t nr_assocs)
244{
245 struct perf_evsel *evsel;
246 int err;
247 size_t i;
248
249 for (i = 0; i < nr_assocs; i++) {
250 err = trace_event__id(assocs[i].name);
251 if (err < 0)
252 goto out;
253
254 evsel = perf_evlist__find_tracepoint_by_id(evlist, err);
255 if (evsel == NULL)
256 continue;
257
258 err = -EEXIST;
259 if (evsel->handler.func != NULL)
260 goto out;
261 evsel->handler.func = assocs[i].handler;
262 }
263
264 err = 0;
265out:
266 return err;
267}
268
4152ab37
ACM
269void perf_evlist__disable(struct perf_evlist *evlist)
270{
271 int cpu, thread;
272 struct perf_evsel *pos;
273
274 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
275 list_for_each_entry(pos, &evlist->entries, node) {
276 for (thread = 0; thread < evlist->threads->nr; thread++)
277 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_DISABLE);
278 }
279 }
280}
281
764e16a3
DA
282void perf_evlist__enable(struct perf_evlist *evlist)
283{
284 int cpu, thread;
285 struct perf_evsel *pos;
286
287 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
288 list_for_each_entry(pos, &evlist->entries, node) {
289 for (thread = 0; thread < evlist->threads->nr; thread++)
290 ioctl(FD(pos, cpu, thread), PERF_EVENT_IOC_ENABLE);
291 }
292 }
293}
294
806fb630 295static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 296{
7e2ed097 297 int nfds = evlist->cpus->nr * evlist->threads->nr * evlist->nr_entries;
5c581041
ACM
298 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
299 return evlist->pollfd != NULL ? 0 : -ENOMEM;
300}
70082dd9
ACM
301
302void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
303{
304 fcntl(fd, F_SETFL, O_NONBLOCK);
305 evlist->pollfd[evlist->nr_fds].fd = fd;
306 evlist->pollfd[evlist->nr_fds].events = POLLIN;
307 evlist->nr_fds++;
308}
70db7533 309
a91e5431
ACM
310static void perf_evlist__id_hash(struct perf_evlist *evlist,
311 struct perf_evsel *evsel,
312 int cpu, int thread, u64 id)
3d3b5e95
ACM
313{
314 int hash;
315 struct perf_sample_id *sid = SID(evsel, cpu, thread);
316
317 sid->id = id;
318 sid->evsel = evsel;
319 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
320 hlist_add_head(&sid->node, &evlist->heads[hash]);
321}
322
a91e5431
ACM
323void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
324 int cpu, int thread, u64 id)
325{
326 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
327 evsel->id[evsel->ids++] = id;
328}
329
330static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
331 struct perf_evsel *evsel,
332 int cpu, int thread, int fd)
f8a95309 333{
f8a95309 334 u64 read_data[4] = { 0, };
3d3b5e95 335 int id_idx = 1; /* The first entry is the counter value */
f8a95309
ACM
336
337 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
338 read(fd, &read_data, sizeof(read_data)) == -1)
339 return -1;
340
341 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
342 ++id_idx;
343 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
344 ++id_idx;
345
a91e5431 346 perf_evlist__id_add(evlist, evsel, cpu, thread, read_data[id_idx]);
f8a95309
ACM
347 return 0;
348}
349
70db7533
ACM
350struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
351{
352 struct hlist_head *head;
353 struct hlist_node *pos;
354 struct perf_sample_id *sid;
355 int hash;
356
357 if (evlist->nr_entries == 1)
358 return list_entry(evlist->entries.next, struct perf_evsel, node);
359
360 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
361 head = &evlist->heads[hash];
362
363 hlist_for_each_entry(sid, pos, head, node)
364 if (sid->id == id)
365 return sid->evsel;
30e68bcc
NK
366
367 if (!perf_evlist__sample_id_all(evlist))
368 return list_entry(evlist->entries.next, struct perf_evsel, node);
369
70db7533
ACM
370 return NULL;
371}
04391deb 372
aece948f 373union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb
ACM
374{
375 /* XXX Move this to perf.c, making it generally available */
376 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
aece948f 377 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
378 unsigned int head = perf_mmap__read_head(md);
379 unsigned int old = md->prev;
380 unsigned char *data = md->base + page_size;
8115d60c 381 union perf_event *event = NULL;
04391deb 382
7bb41152 383 if (evlist->overwrite) {
04391deb 384 /*
7bb41152
ACM
385 * If we're further behind than half the buffer, there's a chance
386 * the writer will bite our tail and mess up the samples under us.
387 *
388 * If we somehow ended up ahead of the head, we got messed up.
389 *
390 * In either case, truncate and restart at head.
04391deb 391 */
7bb41152
ACM
392 int diff = head - old;
393 if (diff > md->mask / 2 || diff < 0) {
394 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
395
396 /*
397 * head points to a known good entry, start there.
398 */
399 old = head;
400 }
04391deb
ACM
401 }
402
403 if (old != head) {
404 size_t size;
405
8115d60c 406 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
407 size = event->header.size;
408
409 /*
410 * Event straddles the mmap boundary -- header should always
411 * be inside due to u64 alignment of output.
412 */
413 if ((old & md->mask) + size != ((old + size) & md->mask)) {
414 unsigned int offset = old;
415 unsigned int len = min(sizeof(*event), size), cpy;
416 void *dst = &evlist->event_copy;
417
418 do {
419 cpy = min(md->mask + 1 - (offset & md->mask), len);
420 memcpy(dst, &data[offset & md->mask], cpy);
421 offset += cpy;
422 dst += cpy;
423 len -= cpy;
424 } while (len);
425
426 event = &evlist->event_copy;
427 }
428
429 old += size;
430 }
431
432 md->prev = old;
7bb41152
ACM
433
434 if (!evlist->overwrite)
435 perf_mmap__write_tail(md, old);
436
04391deb
ACM
437 return event;
438}
f8a95309 439
7e2ed097 440void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 441{
aece948f 442 int i;
f8a95309 443
aece948f
ACM
444 for (i = 0; i < evlist->nr_mmaps; i++) {
445 if (evlist->mmap[i].base != NULL) {
446 munmap(evlist->mmap[i].base, evlist->mmap_len);
447 evlist->mmap[i].base = NULL;
f8a95309
ACM
448 }
449 }
aece948f
ACM
450
451 free(evlist->mmap);
452 evlist->mmap = NULL;
f8a95309
ACM
453}
454
806fb630 455static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 456{
aece948f
ACM
457 evlist->nr_mmaps = evlist->cpus->nr;
458 if (evlist->cpus->map[0] == -1)
459 evlist->nr_mmaps = evlist->threads->nr;
460 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
461 return evlist->mmap != NULL ? 0 : -ENOMEM;
462}
463
bccdaba0 464static int __perf_evlist__mmap(struct perf_evlist *evlist,
aece948f 465 int idx, int prot, int mask, int fd)
f8a95309 466{
aece948f
ACM
467 evlist->mmap[idx].prev = 0;
468 evlist->mmap[idx].mask = mask;
469 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, prot,
f8a95309 470 MAP_SHARED, fd, 0);
301b195d
NE
471 if (evlist->mmap[idx].base == MAP_FAILED) {
472 evlist->mmap[idx].base = NULL;
f8a95309 473 return -1;
301b195d 474 }
f8a95309
ACM
475
476 perf_evlist__add_pollfd(evlist, fd);
477 return 0;
478}
479
aece948f
ACM
480static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist, int prot, int mask)
481{
482 struct perf_evsel *evsel;
483 int cpu, thread;
484
485 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
486 int output = -1;
487
488 for (thread = 0; thread < evlist->threads->nr; thread++) {
489 list_for_each_entry(evsel, &evlist->entries, node) {
490 int fd = FD(evsel, cpu, thread);
491
492 if (output == -1) {
493 output = fd;
bccdaba0 494 if (__perf_evlist__mmap(evlist, cpu,
aece948f
ACM
495 prot, mask, output) < 0)
496 goto out_unmap;
497 } else {
498 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
499 goto out_unmap;
500 }
501
502 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
503 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
504 goto out_unmap;
505 }
506 }
507 }
508
509 return 0;
510
511out_unmap:
512 for (cpu = 0; cpu < evlist->cpus->nr; cpu++) {
513 if (evlist->mmap[cpu].base != NULL) {
514 munmap(evlist->mmap[cpu].base, evlist->mmap_len);
515 evlist->mmap[cpu].base = NULL;
516 }
517 }
518 return -1;
519}
520
521static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist, int prot, int mask)
522{
523 struct perf_evsel *evsel;
524 int thread;
525
526 for (thread = 0; thread < evlist->threads->nr; thread++) {
527 int output = -1;
528
529 list_for_each_entry(evsel, &evlist->entries, node) {
530 int fd = FD(evsel, 0, thread);
531
532 if (output == -1) {
533 output = fd;
bccdaba0 534 if (__perf_evlist__mmap(evlist, thread,
aece948f
ACM
535 prot, mask, output) < 0)
536 goto out_unmap;
537 } else {
538 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, output) != 0)
539 goto out_unmap;
540 }
541
542 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
543 perf_evlist__id_add_fd(evlist, evsel, 0, thread, fd) < 0)
544 goto out_unmap;
545 }
546 }
547
548 return 0;
549
550out_unmap:
551 for (thread = 0; thread < evlist->threads->nr; thread++) {
552 if (evlist->mmap[thread].base != NULL) {
553 munmap(evlist->mmap[thread].base, evlist->mmap_len);
554 evlist->mmap[thread].base = NULL;
555 }
556 }
557 return -1;
558}
559
f8a95309
ACM
560/** perf_evlist__mmap - Create per cpu maps to receive events
561 *
562 * @evlist - list of events
f8a95309
ACM
563 * @pages - map length in pages
564 * @overwrite - overwrite older events?
565 *
566 * If overwrite is false the user needs to signal event consuption using:
567 *
568 * struct perf_mmap *m = &evlist->mmap[cpu];
569 * unsigned int head = perf_mmap__read_head(m);
570 *
571 * perf_mmap__write_tail(m, head)
7e2ed097
ACM
572 *
573 * Using perf_evlist__read_on_cpu does this automatically.
f8a95309 574 */
50a682ce
ACM
575int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
576 bool overwrite)
f8a95309
ACM
577{
578 unsigned int page_size = sysconf(_SC_PAGE_SIZE);
aece948f 579 struct perf_evsel *evsel;
7e2ed097
ACM
580 const struct cpu_map *cpus = evlist->cpus;
581 const struct thread_map *threads = evlist->threads;
50a682ce
ACM
582 int prot = PROT_READ | (overwrite ? 0 : PROT_WRITE), mask;
583
584 /* 512 kiB: default amount of unprivileged mlocked memory */
585 if (pages == UINT_MAX)
586 pages = (512 * 1024) / page_size;
41d0d933
NE
587 else if (!is_power_of_2(pages))
588 return -EINVAL;
50a682ce
ACM
589
590 mask = pages * page_size - 1;
f8a95309 591
7e2ed097 592 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
593 return -ENOMEM;
594
7e2ed097 595 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
596 return -ENOMEM;
597
598 evlist->overwrite = overwrite;
599 evlist->mmap_len = (pages + 1) * page_size;
f8a95309
ACM
600
601 list_for_each_entry(evsel, &evlist->entries, node) {
602 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 603 evsel->sample_id == NULL &&
f8a95309
ACM
604 perf_evsel__alloc_id(evsel, cpus->nr, threads->nr) < 0)
605 return -ENOMEM;
f8a95309
ACM
606 }
607
aece948f
ACM
608 if (evlist->cpus->map[0] == -1)
609 return perf_evlist__mmap_per_thread(evlist, prot, mask);
f8a95309 610
aece948f 611 return perf_evlist__mmap_per_cpu(evlist, prot, mask);
f8a95309 612}
7e2ed097 613
b809ac10
NK
614int perf_evlist__create_maps(struct perf_evlist *evlist,
615 struct perf_target *target)
7e2ed097 616{
b809ac10
NK
617 evlist->threads = thread_map__new_str(target->pid, target->tid,
618 target->uid);
7e2ed097
ACM
619
620 if (evlist->threads == NULL)
621 return -1;
622
879d77d0 623 if (perf_target__has_task(target))
d67356e7 624 evlist->cpus = cpu_map__dummy_new();
d1cb9fce
NK
625 else if (!perf_target__has_cpu(target) && !target->uses_mmap)
626 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
627 else
628 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
629
630 if (evlist->cpus == NULL)
631 goto out_delete_threads;
632
633 return 0;
634
635out_delete_threads:
636 thread_map__delete(evlist->threads);
637 return -1;
638}
639
640void perf_evlist__delete_maps(struct perf_evlist *evlist)
641{
642 cpu_map__delete(evlist->cpus);
643 thread_map__delete(evlist->threads);
644 evlist->cpus = NULL;
645 evlist->threads = NULL;
646}
0a102479
FW
647
648int perf_evlist__set_filters(struct perf_evlist *evlist)
649{
650 const struct thread_map *threads = evlist->threads;
651 const struct cpu_map *cpus = evlist->cpus;
652 struct perf_evsel *evsel;
653 char *filter;
654 int thread;
655 int cpu;
656 int err;
657 int fd;
658
659 list_for_each_entry(evsel, &evlist->entries, node) {
660 filter = evsel->filter;
661 if (!filter)
662 continue;
663 for (cpu = 0; cpu < cpus->nr; cpu++) {
664 for (thread = 0; thread < threads->nr; thread++) {
665 fd = FD(evsel, cpu, thread);
666 err = ioctl(fd, PERF_EVENT_IOC_SET_FILTER, filter);
667 if (err)
668 return err;
669 }
670 }
671 }
672
673 return 0;
674}
74429964 675
c2a70653 676bool perf_evlist__valid_sample_type(const struct perf_evlist *evlist)
74429964 677{
c2a70653
ACM
678 struct perf_evsel *pos, *first;
679
680 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
681
682 list_for_each_entry_continue(pos, &evlist->entries, node) {
683 if (first->attr.sample_type != pos->attr.sample_type)
684 return false;
74429964
FW
685 }
686
c2a70653 687 return true;
74429964
FW
688}
689
c2a70653
ACM
690u64 perf_evlist__sample_type(const struct perf_evlist *evlist)
691{
692 struct perf_evsel *first;
693
694 first = list_entry(evlist->entries.next, struct perf_evsel, node);
695 return first->attr.sample_type;
696}
697
81e36bff
ACM
698u16 perf_evlist__id_hdr_size(const struct perf_evlist *evlist)
699{
700 struct perf_evsel *first;
701 struct perf_sample *data;
702 u64 sample_type;
703 u16 size = 0;
704
705 first = list_entry(evlist->entries.next, struct perf_evsel, node);
706
707 if (!first->attr.sample_id_all)
708 goto out;
709
710 sample_type = first->attr.sample_type;
711
712 if (sample_type & PERF_SAMPLE_TID)
713 size += sizeof(data->tid) * 2;
714
715 if (sample_type & PERF_SAMPLE_TIME)
716 size += sizeof(data->time);
717
718 if (sample_type & PERF_SAMPLE_ID)
719 size += sizeof(data->id);
720
721 if (sample_type & PERF_SAMPLE_STREAM_ID)
722 size += sizeof(data->stream_id);
723
724 if (sample_type & PERF_SAMPLE_CPU)
725 size += sizeof(data->cpu) * 2;
726out:
727 return size;
728}
729
c2a70653 730bool perf_evlist__valid_sample_id_all(const struct perf_evlist *evlist)
74429964 731{
c2a70653
ACM
732 struct perf_evsel *pos, *first;
733
734 pos = first = list_entry(evlist->entries.next, struct perf_evsel, node);
735
736 list_for_each_entry_continue(pos, &evlist->entries, node) {
737 if (first->attr.sample_id_all != pos->attr.sample_id_all)
738 return false;
74429964
FW
739 }
740
c2a70653
ACM
741 return true;
742}
743
744bool perf_evlist__sample_id_all(const struct perf_evlist *evlist)
745{
746 struct perf_evsel *first;
747
748 first = list_entry(evlist->entries.next, struct perf_evsel, node);
749 return first->attr.sample_id_all;
74429964 750}
81cce8de
ACM
751
752void perf_evlist__set_selected(struct perf_evlist *evlist,
753 struct perf_evsel *evsel)
754{
755 evlist->selected = evsel;
756}
727ab04e
ACM
757
758int perf_evlist__open(struct perf_evlist *evlist, bool group)
759{
760 struct perf_evsel *evsel, *first;
761 int err, ncpus, nthreads;
762
763 first = list_entry(evlist->entries.next, struct perf_evsel, node);
764
765 list_for_each_entry(evsel, &evlist->entries, node) {
766 struct xyarray *group_fd = NULL;
767
768 if (group && evsel != first)
769 group_fd = first->fd;
770
771 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads,
772 group, group_fd);
773 if (err < 0)
774 goto out_err;
775 }
776
777 return 0;
778out_err:
779 ncpus = evlist->cpus ? evlist->cpus->nr : 1;
780 nthreads = evlist->threads ? evlist->threads->nr : 1;
781
782 list_for_each_entry_reverse(evsel, &evlist->entries, node)
783 perf_evsel__close(evsel, ncpus, nthreads);
784
41c21a68 785 errno = -err;
727ab04e
ACM
786 return err;
787}
35b9d88e
ACM
788
789int perf_evlist__prepare_workload(struct perf_evlist *evlist,
790 struct perf_record_opts *opts,
791 const char *argv[])
792{
793 int child_ready_pipe[2], go_pipe[2];
794 char bf;
795
796 if (pipe(child_ready_pipe) < 0) {
797 perror("failed to create 'ready' pipe");
798 return -1;
799 }
800
801 if (pipe(go_pipe) < 0) {
802 perror("failed to create 'go' pipe");
803 goto out_close_ready_pipe;
804 }
805
806 evlist->workload.pid = fork();
807 if (evlist->workload.pid < 0) {
808 perror("failed to fork");
809 goto out_close_pipes;
810 }
811
812 if (!evlist->workload.pid) {
813 if (opts->pipe_output)
814 dup2(2, 1);
815
816 close(child_ready_pipe[0]);
817 close(go_pipe[1]);
818 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
819
820 /*
821 * Do a dummy execvp to get the PLT entry resolved,
822 * so we avoid the resolver overhead on the real
823 * execvp call.
824 */
825 execvp("", (char **)argv);
826
827 /*
828 * Tell the parent we're ready to go
829 */
830 close(child_ready_pipe[1]);
831
832 /*
833 * Wait until the parent tells us to go.
834 */
835 if (read(go_pipe[0], &bf, 1) == -1)
836 perror("unable to read pipe");
837
838 execvp(argv[0], (char **)argv);
839
840 perror(argv[0]);
841 kill(getppid(), SIGUSR1);
842 exit(-1);
843 }
844
d67356e7 845 if (perf_target__none(&opts->target))
35b9d88e
ACM
846 evlist->threads->map[0] = evlist->workload.pid;
847
848 close(child_ready_pipe[1]);
849 close(go_pipe[0]);
850 /*
851 * wait for child to settle
852 */
853 if (read(child_ready_pipe[0], &bf, 1) == -1) {
854 perror("unable to read pipe");
855 goto out_close_pipes;
856 }
857
858 evlist->workload.cork_fd = go_pipe[1];
859 close(child_ready_pipe[0]);
860 return 0;
861
862out_close_pipes:
863 close(go_pipe[0]);
864 close(go_pipe[1]);
865out_close_ready_pipe:
866 close(child_ready_pipe[0]);
867 close(child_ready_pipe[1]);
868 return -1;
869}
870
871int perf_evlist__start_workload(struct perf_evlist *evlist)
872{
873 if (evlist->workload.cork_fd > 0) {
874 /*
875 * Remove the cork, let it rip!
876 */
877 return close(evlist->workload.cork_fd);
878 }
879
880 return 0;
881}