libperf: Add missing 'struct xyarray' forward declaration
[linux-block.git] / tools / perf / util / evlist.c
CommitLineData
91007045 1// SPDX-License-Identifier: GPL-2.0-only
f8a95309
ACM
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
f8a95309 7 */
956fa571 8#include <api/fs/fs.h>
a43783ae 9#include <errno.h>
fd20e811 10#include <inttypes.h>
5c581041 11#include <poll.h>
f8a95309 12#include "cpumap.h"
e0fcfb08 13#include "util/mmap.h"
f8a95309 14#include "thread_map.h"
12864b31 15#include "target.h"
361c99a6
ACM
16#include "evlist.h"
17#include "evsel.h"
e3e1a54f 18#include "debug.h"
58db1d6e 19#include "units.h"
fb71c86c 20#include "util.h" // page_size
c1a604df 21#include "../perf.h"
54cc54de 22#include "asm/bug.h"
657ee553 23#include "bpf-event.h"
9607ad3a 24#include <signal.h>
35b9d88e 25#include <unistd.h>
b397f846 26#include <sched.h>
f2a39fe8 27#include <stdlib.h>
361c99a6 28
50d08e47 29#include "parse-events.h"
4b6ab94e 30#include <subcmd/parse-options.h>
50d08e47 31
bafae98e 32#include <fcntl.h>
86a5e0c2 33#include <sys/ioctl.h>
f8a95309
ACM
34#include <sys/mman.h>
35
70db7533
ACM
36#include <linux/bitops.h>
37#include <linux/hash.h>
0389cd1f 38#include <linux/log2.h>
8dd2a131 39#include <linux/err.h>
8520a98d 40#include <linux/string.h>
7f7c536f 41#include <linux/zalloc.h>
4562a739 42#include <perf/evlist.h>
88761fa1 43#include <perf/evsel.h>
9c3516d1 44#include <perf/cpumap.h>
70db7533 45
e14e5497
ACM
46#include <internal/xyarray.h>
47
748fe088
ACM
48#ifdef LACKS_SIGQUEUE_PROTOTYPE
49int sigqueue(pid_t pid, int sig, const union sigval value);
50#endif
51
9dfcb759 52#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
a91e5431 53#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 54
52c86bca
JO
55void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
56 struct perf_thread_map *threads)
ef1d1af2
ACM
57{
58 int i;
59
60 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
61 INIT_HLIST_HEAD(&evlist->heads[i]);
4562a739 62 perf_evlist__init(&evlist->core);
453fa030 63 perf_evlist__set_maps(&evlist->core, cpus, threads);
40cb2d51 64 fdarray__init(&evlist->core.pollfd, 64);
35b9d88e 65 evlist->workload.pid = -1;
54cc54de 66 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
ef1d1af2
ACM
67}
68
0f98b11c 69struct evlist *evlist__new(void)
361c99a6 70{
63503dba 71 struct evlist *evlist = zalloc(sizeof(*evlist));
361c99a6 72
ef1d1af2 73 if (evlist != NULL)
52c86bca 74 evlist__init(evlist, NULL, NULL);
361c99a6
ACM
75
76 return evlist;
77}
78
63503dba 79struct evlist *perf_evlist__new_default(void)
b22d54b0 80{
0f98b11c 81 struct evlist *evlist = evlist__new();
b22d54b0
JO
82
83 if (evlist && perf_evlist__add_default(evlist)) {
c12995a5 84 evlist__delete(evlist);
b22d54b0
JO
85 evlist = NULL;
86 }
87
88 return evlist;
89}
90
63503dba 91struct evlist *perf_evlist__new_dummy(void)
5bae0250 92{
0f98b11c 93 struct evlist *evlist = evlist__new();
5bae0250
ACM
94
95 if (evlist && perf_evlist__add_dummy(evlist)) {
c12995a5 96 evlist__delete(evlist);
5bae0250
ACM
97 evlist = NULL;
98 }
99
100 return evlist;
101}
102
75562573
AH
103/**
104 * perf_evlist__set_id_pos - set the positions of event ids.
105 * @evlist: selected event list
106 *
107 * Events with compatible sample types all have the same id_pos
108 * and is_pos. For convenience, put a copy on evlist.
109 */
63503dba 110void perf_evlist__set_id_pos(struct evlist *evlist)
75562573 111{
32dcd021 112 struct evsel *first = perf_evlist__first(evlist);
75562573
AH
113
114 evlist->id_pos = first->id_pos;
115 evlist->is_pos = first->is_pos;
116}
117
63503dba 118static void perf_evlist__update_id_pos(struct evlist *evlist)
733cd2fe 119{
32dcd021 120 struct evsel *evsel;
733cd2fe 121
e5cadb93 122 evlist__for_each_entry(evlist, evsel)
733cd2fe
AH
123 perf_evsel__calc_id_pos(evsel);
124
125 perf_evlist__set_id_pos(evlist);
126}
127
e6b1878d 128static void evlist__purge(struct evlist *evlist)
361c99a6 129{
32dcd021 130 struct evsel *pos, *n;
361c99a6 131
e5cadb93 132 evlist__for_each_entry_safe(evlist, n, pos) {
b27c4ece 133 list_del_init(&pos->core.node);
d49e4695 134 pos->evlist = NULL;
5eb2dd2a 135 evsel__delete(pos);
361c99a6
ACM
136 }
137
6484d2f9 138 evlist->core.nr_entries = 0;
361c99a6
ACM
139}
140
470579b0 141void evlist__exit(struct evlist *evlist)
361c99a6 142{
04662523 143 zfree(&evlist->mmap);
0b72d69a 144 zfree(&evlist->overwrite_mmap);
40cb2d51 145 fdarray__exit(&evlist->core.pollfd);
ef1d1af2
ACM
146}
147
c12995a5 148void evlist__delete(struct evlist *evlist)
ef1d1af2 149{
0b04b3dc
ACM
150 if (evlist == NULL)
151 return;
152
db6b7b13 153 evlist__munmap(evlist);
750b4ede 154 evlist__close(evlist);
f72f901d 155 perf_cpu_map__put(evlist->core.cpus);
03617c22 156 perf_thread_map__put(evlist->core.threads);
f72f901d 157 evlist->core.cpus = NULL;
03617c22 158 evlist->core.threads = NULL;
e6b1878d 159 evlist__purge(evlist);
470579b0 160 evlist__exit(evlist);
361c99a6
ACM
161 free(evlist);
162}
163
a1cf3a75 164void evlist__add(struct evlist *evlist, struct evsel *entry)
361c99a6 165{
d49e4695 166 entry->evlist = evlist;
6484d2f9 167 entry->idx = evlist->core.nr_entries;
60b0896c 168 entry->tracking = !entry->idx;
ef503831 169
6484d2f9
JO
170 perf_evlist__add(&evlist->core, &entry->core);
171
172 if (evlist->core.nr_entries == 1)
75562573 173 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
174}
175
16251027 176void evlist__remove(struct evlist *evlist, struct evsel *evsel)
4768230a
AH
177{
178 evsel->evlist = NULL;
52e22fb8 179 perf_evlist__remove(&evlist->core, &evsel->core);
4768230a
AH
180}
181
63503dba 182void perf_evlist__splice_list_tail(struct evlist *evlist,
f114d6ef 183 struct list_head *list)
50d08e47 184{
32dcd021 185 struct evsel *evsel, *temp;
75562573 186
e5cadb93 187 __evlist__for_each_entry_safe(list, temp, evsel) {
b27c4ece 188 list_del_init(&evsel->core.node);
a1cf3a75 189 evlist__add(evlist, evsel);
f114d6ef 190 }
50d08e47
ACM
191}
192
63dab225
ACM
193void __perf_evlist__set_leader(struct list_head *list)
194{
32dcd021 195 struct evsel *evsel, *leader;
63dab225 196
b27c4ece
JO
197 leader = list_entry(list->next, struct evsel, core.node);
198 evsel = list_entry(list->prev, struct evsel, core.node);
97f63e4a 199
5643b1a5 200 leader->core.nr_members = evsel->idx - leader->idx + 1;
63dab225 201
e5cadb93 202 __evlist__for_each_entry(list, evsel) {
74b2133d 203 evsel->leader = leader;
63dab225
ACM
204 }
205}
206
63503dba 207void perf_evlist__set_leader(struct evlist *evlist)
6a4bb04c 208{
6484d2f9
JO
209 if (evlist->core.nr_entries) {
210 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
ce9036a6 211 __perf_evlist__set_leader(&evlist->core.entries);
97f63e4a 212 }
6a4bb04c
JO
213}
214
63503dba 215int __perf_evlist__add_default(struct evlist *evlist, bool precise)
361c99a6 216{
32dcd021 217 struct evsel *evsel = perf_evsel__new_cycles(precise);
1aed2671 218
361c99a6 219 if (evsel == NULL)
7c48dcfd 220 return -ENOMEM;
361c99a6 221
a1cf3a75 222 evlist__add(evlist, evsel);
361c99a6
ACM
223 return 0;
224}
5c581041 225
63503dba 226int perf_evlist__add_dummy(struct evlist *evlist)
5bae0250
ACM
227{
228 struct perf_event_attr attr = {
229 .type = PERF_TYPE_SOFTWARE,
230 .config = PERF_COUNT_SW_DUMMY,
231 .size = sizeof(attr), /* to capture ABI version */
232 };
6484d2f9 233 struct evsel *evsel = perf_evsel__new_idx(&attr, evlist->core.nr_entries);
5bae0250
ACM
234
235 if (evsel == NULL)
236 return -ENOMEM;
237
a1cf3a75 238 evlist__add(evlist, evsel);
5bae0250
ACM
239 return 0;
240}
241
a1cf3a75 242static int evlist__add_attrs(struct evlist *evlist,
e60fc847 243 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47 244{
32dcd021 245 struct evsel *evsel, *n;
50d08e47
ACM
246 LIST_HEAD(head);
247 size_t i;
248
249 for (i = 0; i < nr_attrs; i++) {
6484d2f9 250 evsel = perf_evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
50d08e47
ACM
251 if (evsel == NULL)
252 goto out_delete_partial_list;
b27c4ece 253 list_add_tail(&evsel->core.node, &head);
50d08e47
ACM
254 }
255
f114d6ef 256 perf_evlist__splice_list_tail(evlist, &head);
50d08e47
ACM
257
258 return 0;
259
260out_delete_partial_list:
e5cadb93 261 __evlist__for_each_entry_safe(&head, n, evsel)
5eb2dd2a 262 evsel__delete(evsel);
50d08e47
ACM
263 return -1;
264}
265
63503dba 266int __perf_evlist__add_default_attrs(struct evlist *evlist,
79695e1b
ACM
267 struct perf_event_attr *attrs, size_t nr_attrs)
268{
269 size_t i;
270
271 for (i = 0; i < nr_attrs; i++)
272 event_attr_init(attrs + i);
273
a1cf3a75 274 return evlist__add_attrs(evlist, attrs, nr_attrs);
79695e1b
ACM
275}
276
32dcd021 277struct evsel *
63503dba 278perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
ee29be62 279{
32dcd021 280 struct evsel *evsel;
ee29be62 281
e5cadb93 282 evlist__for_each_entry(evlist, evsel) {
1fc632ce
JO
283 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
284 (int)evsel->core.attr.config == id)
ee29be62
ACM
285 return evsel;
286 }
287
288 return NULL;
289}
290
32dcd021 291struct evsel *
63503dba 292perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
a2f2804a
DA
293 const char *name)
294{
32dcd021 295 struct evsel *evsel;
a2f2804a 296
e5cadb93 297 evlist__for_each_entry(evlist, evsel) {
1fc632ce 298 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
a2f2804a
DA
299 (strcmp(evsel->name, name) == 0))
300 return evsel;
301 }
302
303 return NULL;
304}
305
63503dba 306int perf_evlist__add_newtp(struct evlist *evlist,
39876e7d
ACM
307 const char *sys, const char *name, void *handler)
308{
32dcd021 309 struct evsel *evsel = perf_evsel__newtp(sys, name);
39876e7d 310
8dd2a131 311 if (IS_ERR(evsel))
39876e7d
ACM
312 return -1;
313
744a9719 314 evsel->handler = handler;
a1cf3a75 315 evlist__add(evlist, evsel);
39876e7d
ACM
316 return 0;
317}
318
63503dba 319static int perf_evlist__nr_threads(struct evlist *evlist,
32dcd021 320 struct evsel *evsel)
bf8e8f4b 321{
648b5af3 322 if (evsel->core.system_wide)
bf8e8f4b
AH
323 return 1;
324 else
a2f354e3 325 return perf_thread_map__nr(evlist->core.threads);
bf8e8f4b
AH
326}
327
e74676de 328void evlist__disable(struct evlist *evlist)
4152ab37 329{
32dcd021 330 struct evsel *pos;
3e27c920 331
e5cadb93 332 evlist__for_each_entry(evlist, pos) {
9dfcb759 333 if (pos->disabled || !perf_evsel__is_group_leader(pos) || !pos->core.fd)
3e27c920 334 continue;
9a10bb22 335 evsel__disable(pos);
4152ab37 336 }
2b56bcfb
ACM
337
338 evlist->enabled = false;
4152ab37
ACM
339}
340
1c87f165 341void evlist__enable(struct evlist *evlist)
764e16a3 342{
32dcd021 343 struct evsel *pos;
3e27c920 344
e5cadb93 345 evlist__for_each_entry(evlist, pos) {
9dfcb759 346 if (!perf_evsel__is_group_leader(pos) || !pos->core.fd)
3e27c920 347 continue;
ec7f24ef 348 evsel__enable(pos);
764e16a3 349 }
2b56bcfb
ACM
350
351 evlist->enabled = true;
352}
353
63503dba 354void perf_evlist__toggle_enable(struct evlist *evlist)
2b56bcfb 355{
e74676de 356 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
764e16a3
DA
357}
358
63503dba 359static int perf_evlist__enable_event_cpu(struct evlist *evlist,
32dcd021 360 struct evsel *evsel, int cpu)
1c65056c 361{
18ef15c6 362 int thread;
1c65056c
AH
363 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
364
9dfcb759 365 if (!evsel->core.fd)
1c65056c
AH
366 return -EINVAL;
367
368 for (thread = 0; thread < nr_threads; thread++) {
18ef15c6 369 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
1c65056c
AH
370 if (err)
371 return err;
372 }
373 return 0;
374}
375
63503dba 376static int perf_evlist__enable_event_thread(struct evlist *evlist,
32dcd021 377 struct evsel *evsel,
1c65056c
AH
378 int thread)
379{
18ef15c6 380 int cpu;
6549cd8f 381 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
1c65056c 382
9dfcb759 383 if (!evsel->core.fd)
1c65056c
AH
384 return -EINVAL;
385
386 for (cpu = 0; cpu < nr_cpus; cpu++) {
18ef15c6 387 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
1c65056c
AH
388 if (err)
389 return err;
390 }
391 return 0;
392}
393
63503dba 394int perf_evlist__enable_event_idx(struct evlist *evlist,
32dcd021 395 struct evsel *evsel, int idx)
1c65056c 396{
315c0a1f 397 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
1c65056c
AH
398
399 if (per_cpu_mmaps)
400 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
401 else
402 return perf_evlist__enable_event_thread(evlist, evsel, idx);
403}
404
63503dba 405int perf_evlist__alloc_pollfd(struct evlist *evlist)
5c581041 406{
6549cd8f 407 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
a2f354e3 408 int nr_threads = perf_thread_map__nr(evlist->core.threads);
bf8e8f4b 409 int nfds = 0;
32dcd021 410 struct evsel *evsel;
bf8e8f4b 411
e5cadb93 412 evlist__for_each_entry(evlist, evsel) {
648b5af3 413 if (evsel->core.system_wide)
bf8e8f4b
AH
414 nfds += nr_cpus;
415 else
416 nfds += nr_cpus * nr_threads;
417 }
418
40cb2d51
JO
419 if (fdarray__available_entries(&evlist->core.pollfd) < nfds &&
420 fdarray__grow(&evlist->core.pollfd, nfds) < 0)
ad6765dd
ACM
421 return -ENOMEM;
422
423 return 0;
5c581041 424}
70082dd9 425
63503dba 426static int __perf_evlist__add_pollfd(struct evlist *evlist, int fd,
a5830532 427 struct mmap *map, short revent)
e4b356b5 428{
40cb2d51 429 int pos = fdarray__add(&evlist->core.pollfd, fd, revent | POLLERR | POLLHUP);
e4b356b5
ACM
430 /*
431 * Save the idx so that when we filter out fds POLLHUP'ed we can
432 * close the associated evlist->mmap[] entry.
433 */
434 if (pos >= 0) {
40cb2d51 435 evlist->core.pollfd.priv[pos].ptr = map;
e4b356b5
ACM
436
437 fcntl(fd, F_SETFL, O_NONBLOCK);
438 }
439
440 return pos;
441}
442
63503dba 443int perf_evlist__add_pollfd(struct evlist *evlist, int fd)
70082dd9 444{
4876075b 445 return __perf_evlist__add_pollfd(evlist, fd, NULL, POLLIN);
e4b356b5
ACM
446}
447
258e4bfc
WN
448static void perf_evlist__munmap_filtered(struct fdarray *fda, int fd,
449 void *arg __maybe_unused)
e4b356b5 450{
a5830532 451 struct mmap *map = fda->priv[fd].ptr;
1b85337d 452
4876075b
WN
453 if (map)
454 perf_mmap__put(map);
70082dd9 455}
70db7533 456
63503dba 457int perf_evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
1ddec7f0 458{
40cb2d51 459 return fdarray__filter(&evlist->core.pollfd, revents_and_mask,
258e4bfc 460 perf_evlist__munmap_filtered, NULL);
1ddec7f0
ACM
461}
462
63503dba 463int perf_evlist__poll(struct evlist *evlist, int timeout)
f66a889d 464{
40cb2d51 465 return fdarray__poll(&evlist->core.pollfd, timeout);
f66a889d
ACM
466}
467
63503dba 468static void perf_evlist__id_hash(struct evlist *evlist,
32dcd021 469 struct evsel *evsel,
a91e5431 470 int cpu, int thread, u64 id)
3d3b5e95
ACM
471{
472 int hash;
473 struct perf_sample_id *sid = SID(evsel, cpu, thread);
474
475 sid->id = id;
476 sid->evsel = evsel;
477 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
478 hlist_add_head(&sid->node, &evlist->heads[hash]);
479}
480
63503dba 481void perf_evlist__id_add(struct evlist *evlist, struct evsel *evsel,
a91e5431
ACM
482 int cpu, int thread, u64 id)
483{
484 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
485 evsel->id[evsel->ids++] = id;
486}
487
63503dba 488int perf_evlist__id_add_fd(struct evlist *evlist,
32dcd021 489 struct evsel *evsel,
1c59612d 490 int cpu, int thread, int fd)
f8a95309 491{
f8a95309 492 u64 read_data[4] = { 0, };
3d3b5e95 493 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
494 u64 id;
495 int ret;
496
497 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
498 if (!ret)
499 goto add;
500
501 if (errno != ENOTTY)
502 return -1;
503
504 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 505
c4861afe
JO
506 /*
507 * This way does not work with group format read, so bail
508 * out in that case.
509 */
510 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
511 return -1;
512
1fc632ce 513 if (!(evsel->core.attr.read_format & PERF_FORMAT_ID) ||
f8a95309
ACM
514 read(fd, &read_data, sizeof(read_data)) == -1)
515 return -1;
516
1fc632ce 517 if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
f8a95309 518 ++id_idx;
1fc632ce 519 if (evsel->core.attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
f8a95309
ACM
520 ++id_idx;
521
e2b5abe0
JO
522 id = read_data[id_idx];
523
524 add:
525 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
526 return 0;
527}
528
63503dba 529static void perf_evlist__set_sid_idx(struct evlist *evlist,
32dcd021 530 struct evsel *evsel, int idx, int cpu,
3c659eed
AH
531 int thread)
532{
533 struct perf_sample_id *sid = SID(evsel, cpu, thread);
534 sid->idx = idx;
f72f901d
JO
535 if (evlist->core.cpus && cpu >= 0)
536 sid->cpu = evlist->core.cpus->map[cpu];
3c659eed
AH
537 else
538 sid->cpu = -1;
648b5af3 539 if (!evsel->core.system_wide && evlist->core.threads && thread >= 0)
a2f354e3 540 sid->tid = perf_thread_map__pid(evlist->core.threads, thread);
3c659eed
AH
541 else
542 sid->tid = -1;
543}
544
63503dba 545struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
70db7533
ACM
546{
547 struct hlist_head *head;
70db7533
ACM
548 struct perf_sample_id *sid;
549 int hash;
550
70db7533
ACM
551 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
552 head = &evlist->heads[hash];
553
b67bfe0d 554 hlist_for_each_entry(sid, head, node)
70db7533 555 if (sid->id == id)
932a3594
JO
556 return sid;
557
558 return NULL;
559}
560
63503dba 561struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
932a3594
JO
562{
563 struct perf_sample_id *sid;
564
6484d2f9 565 if (evlist->core.nr_entries == 1 || !id)
932a3594
JO
566 return perf_evlist__first(evlist);
567
568 sid = perf_evlist__id2sid(evlist, id);
569 if (sid)
570 return sid->evsel;
30e68bcc
NK
571
572 if (!perf_evlist__sample_id_all(evlist))
0c21f736 573 return perf_evlist__first(evlist);
30e68bcc 574
70db7533
ACM
575 return NULL;
576}
04391deb 577
63503dba 578struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
dddcf6ab
AH
579 u64 id)
580{
581 struct perf_sample_id *sid;
582
583 if (!id)
584 return NULL;
585
586 sid = perf_evlist__id2sid(evlist, id);
587 if (sid)
588 return sid->evsel;
589
590 return NULL;
591}
592
63503dba 593static int perf_evlist__event2id(struct evlist *evlist,
75562573
AH
594 union perf_event *event, u64 *id)
595{
b1fcd190 596 const __u64 *array = event->sample.array;
75562573
AH
597 ssize_t n;
598
599 n = (event->header.size - sizeof(event->header)) >> 3;
600
601 if (event->header.type == PERF_RECORD_SAMPLE) {
602 if (evlist->id_pos >= n)
603 return -1;
604 *id = array[evlist->id_pos];
605 } else {
606 if (evlist->is_pos > n)
607 return -1;
608 n -= evlist->is_pos;
609 *id = array[n];
610 }
611 return 0;
612}
613
63503dba 614struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
7cb5c5ac 615 union perf_event *event)
75562573 616{
32dcd021 617 struct evsel *first = perf_evlist__first(evlist);
75562573
AH
618 struct hlist_head *head;
619 struct perf_sample_id *sid;
620 int hash;
621 u64 id;
622
6484d2f9 623 if (evlist->core.nr_entries == 1)
98be6966
AH
624 return first;
625
1fc632ce 626 if (!first->core.attr.sample_id_all &&
98be6966
AH
627 event->header.type != PERF_RECORD_SAMPLE)
628 return first;
75562573
AH
629
630 if (perf_evlist__event2id(evlist, event, &id))
631 return NULL;
632
633 /* Synthesized events have an id of zero */
634 if (!id)
98be6966 635 return first;
75562573
AH
636
637 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
638 head = &evlist->heads[hash];
639
640 hlist_for_each_entry(sid, head, node) {
641 if (sid->id == id)
642 return sid->evsel;
643 }
644 return NULL;
645}
646
63503dba 647static int perf_evlist__set_paused(struct evlist *evlist, bool value)
65aea233
WN
648{
649 int i;
650
0b72d69a 651 if (!evlist->overwrite_mmap)
078c3386
WN
652 return 0;
653
c976ee11 654 for (i = 0; i < evlist->core.nr_mmaps; i++) {
2cf07b29 655 int fd = evlist->overwrite_mmap[i].core.fd;
65aea233
WN
656 int err;
657
658 if (fd < 0)
659 continue;
660 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
661 if (err)
662 return err;
663 }
664 return 0;
665}
666
63503dba 667static int perf_evlist__pause(struct evlist *evlist)
65aea233
WN
668{
669 return perf_evlist__set_paused(evlist, true);
670}
671
63503dba 672static int perf_evlist__resume(struct evlist *evlist)
65aea233
WN
673{
674 return perf_evlist__set_paused(evlist, false);
675}
676
db6b7b13 677static void evlist__munmap_nofree(struct evlist *evlist)
f8a95309 678{
aece948f 679 int i;
f8a95309 680
b2cb615d 681 if (evlist->mmap)
c976ee11 682 for (i = 0; i < evlist->core.nr_mmaps; i++)
b2cb615d 683 perf_mmap__munmap(&evlist->mmap[i]);
983874d1 684
0b72d69a 685 if (evlist->overwrite_mmap)
c976ee11 686 for (i = 0; i < evlist->core.nr_mmaps; i++)
0b72d69a 687 perf_mmap__munmap(&evlist->overwrite_mmap[i]);
a1f72618 688}
aece948f 689
db6b7b13 690void evlist__munmap(struct evlist *evlist)
a1f72618 691{
db6b7b13 692 evlist__munmap_nofree(evlist);
04662523 693 zfree(&evlist->mmap);
0b72d69a 694 zfree(&evlist->overwrite_mmap);
f8a95309
ACM
695}
696
d50cf361
JO
697static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
698 bool overwrite)
f8a95309 699{
d4c6fb36 700 int i;
a5830532 701 struct mmap *map;
d4c6fb36 702
c976ee11 703 evlist->core.nr_mmaps = perf_cpu_map__nr(evlist->core.cpus);
315c0a1f 704 if (perf_cpu_map__empty(evlist->core.cpus))
c976ee11
JO
705 evlist->core.nr_mmaps = perf_thread_map__nr(evlist->core.threads);
706 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
8db6d6b1
WN
707 if (!map)
708 return NULL;
946ae1d4 709
c976ee11 710 for (i = 0; i < evlist->core.nr_mmaps; i++) {
2cf07b29 711 map[i].core.fd = -1;
8df7a869 712 map[i].core.overwrite = overwrite;
4738ca30
ACM
713 /*
714 * When the perf_mmap() call is made we grab one refcount, plus
6afad54d 715 * one extra to let perf_mmap__consume() get the last
4738ca30
ACM
716 * events after all real references (perf_mmap__get()) are
717 * dropped.
718 *
719 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
720 * thus does perf_mmap__get() on it.
721 */
e03edfea 722 refcount_set(&map[i].core.refcnt, 0);
4738ca30 723 }
8db6d6b1 724 return map;
f8a95309
ACM
725}
726
f3058a1c 727static bool
63503dba 728perf_evlist__should_poll(struct evlist *evlist __maybe_unused,
32dcd021 729 struct evsel *evsel)
f3058a1c 730{
1fc632ce 731 if (evsel->core.attr.write_backward)
f3058a1c
WN
732 return false;
733 return true;
734}
735
9521b5f2 736static int evlist__mmap_per_evsel(struct evlist *evlist, int idx,
9f21b815 737 struct mmap_params *mp, int cpu_idx,
0b72d69a 738 int thread, int *_output, int *_output_overwrite)
aece948f 739{
32dcd021 740 struct evsel *evsel;
f3058a1c 741 int revent;
f72f901d 742 int evlist_cpu = cpu_map__cpu(evlist->core.cpus, cpu_idx);
04e21314 743
e5cadb93 744 evlist__for_each_entry(evlist, evsel) {
a5830532 745 struct mmap *maps = evlist->mmap;
078c3386 746 int *output = _output;
bf8e8f4b 747 int fd;
9f21b815 748 int cpu;
bf8e8f4b 749
71f566a3 750 mp->prot = PROT_READ | PROT_WRITE;
1fc632ce 751 if (evsel->core.attr.write_backward) {
0b72d69a
WN
752 output = _output_overwrite;
753 maps = evlist->overwrite_mmap;
078c3386
WN
754
755 if (!maps) {
d50cf361 756 maps = evlist__alloc_mmap(evlist, true);
078c3386
WN
757 if (!maps)
758 return -1;
0b72d69a 759 evlist->overwrite_mmap = maps;
54cc54de
WN
760 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
761 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
078c3386 762 }
71f566a3 763 mp->prot &= ~PROT_WRITE;
078c3386 764 }
f3058a1c 765
648b5af3 766 if (evsel->core.system_wide && thread)
bf8e8f4b
AH
767 continue;
768
b4df75de 769 cpu = perf_cpu_map__idx(evsel->core.cpus, evlist_cpu);
9f21b815
MR
770 if (cpu == -1)
771 continue;
772
bf8e8f4b 773 fd = FD(evsel, cpu, thread);
04e21314
AH
774
775 if (*output == -1) {
776 *output = fd;
078c3386 777
31fb4c0d 778 if (perf_mmap__mmap(&maps[idx], mp, *output, evlist_cpu) < 0)
04e21314
AH
779 return -1;
780 } else {
781 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
782 return -1;
82396986 783
078c3386 784 perf_mmap__get(&maps[idx]);
04e21314
AH
785 }
786
f3058a1c
WN
787 revent = perf_evlist__should_poll(evlist, evsel) ? POLLIN : 0;
788
f90d194a
AH
789 /*
790 * The system_wide flag causes a selected event to be opened
791 * always without a pid. Consequently it will never get a
792 * POLLHUP, but it is used for tracking in combination with
793 * other events, so it should not need to be polled anyway.
794 * Therefore don't add it for polling.
795 */
648b5af3 796 if (!evsel->core.system_wide &&
078c3386
WN
797 __perf_evlist__add_pollfd(evlist, fd, &maps[idx], revent) < 0) {
798 perf_mmap__put(&maps[idx]);
ad6765dd 799 return -1;
82396986 800 }
033fa713 801
1fc632ce 802 if (evsel->core.attr.read_format & PERF_FORMAT_ID) {
3c659eed
AH
803 if (perf_evlist__id_add_fd(evlist, evsel, cpu, thread,
804 fd) < 0)
805 return -1;
806 perf_evlist__set_sid_idx(evlist, evsel, idx, cpu,
807 thread);
808 }
04e21314
AH
809 }
810
811 return 0;
812}
813
9521b5f2 814static int evlist__mmap_per_cpu(struct evlist *evlist,
a8a8f3eb 815 struct mmap_params *mp)
04e21314 816{
aece948f 817 int cpu, thread;
6549cd8f 818 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
a2f354e3 819 int nr_threads = perf_thread_map__nr(evlist->core.threads);
aece948f 820
e3e1a54f 821 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 822 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f 823 int output = -1;
0b72d69a 824 int output_overwrite = -1;
aece948f 825
718c602d
AH
826 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, cpu,
827 true);
828
b3a319d5 829 for (thread = 0; thread < nr_threads; thread++) {
9521b5f2 830 if (evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
0b72d69a 831 thread, &output, &output_overwrite))
04e21314 832 goto out_unmap;
aece948f
ACM
833 }
834 }
835
836 return 0;
837
838out_unmap:
db6b7b13 839 evlist__munmap_nofree(evlist);
aece948f
ACM
840 return -1;
841}
842
9521b5f2 843static int evlist__mmap_per_thread(struct evlist *evlist,
a8a8f3eb 844 struct mmap_params *mp)
aece948f 845{
aece948f 846 int thread;
a2f354e3 847 int nr_threads = perf_thread_map__nr(evlist->core.threads);
aece948f 848
e3e1a54f 849 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 850 for (thread = 0; thread < nr_threads; thread++) {
aece948f 851 int output = -1;
0b72d69a 852 int output_overwrite = -1;
aece948f 853
718c602d
AH
854 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, thread,
855 false);
856
9521b5f2 857 if (evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
0b72d69a 858 &output, &output_overwrite))
04e21314 859 goto out_unmap;
aece948f
ACM
860 }
861
862 return 0;
863
864out_unmap:
db6b7b13 865 evlist__munmap_nofree(evlist);
aece948f
ACM
866 return -1;
867}
868
f5e7150c 869unsigned long perf_event_mlock_kb_in_pages(void)
994a1f78 870{
f5e7150c
ACM
871 unsigned long pages;
872 int max;
8185e881 873
f5e7150c
ACM
874 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
875 /*
876 * Pick a once upon a time good value, i.e. things look
877 * strange since we can't read a sysctl value, but lets not
878 * die yet...
879 */
880 max = 512;
881 } else {
882 max -= (page_size / 1024);
883 }
8185e881 884
f5e7150c
ACM
885 pages = (max * 1024) / page_size;
886 if (!is_power_of_2(pages))
887 pages = rounddown_pow_of_two(pages);
888
889 return pages;
890}
891
9521b5f2 892size_t evlist__mmap_size(unsigned long pages)
f5e7150c
ACM
893{
894 if (pages == UINT_MAX)
895 pages = perf_event_mlock_kb_in_pages();
896 else if (!is_power_of_2(pages))
994a1f78
JO
897 return 0;
898
899 return (pages + 1) * page_size;
900}
901
33c2dcfd
DA
902static long parse_pages_arg(const char *str, unsigned long min,
903 unsigned long max)
994a1f78 904{
2fbe4abe 905 unsigned long pages, val;
27050f53
JO
906 static struct parse_tag tags[] = {
907 { .tag = 'B', .mult = 1 },
908 { .tag = 'K', .mult = 1 << 10 },
909 { .tag = 'M', .mult = 1 << 20 },
910 { .tag = 'G', .mult = 1 << 30 },
911 { .tag = 0 },
912 };
994a1f78 913
8973504b 914 if (str == NULL)
33c2dcfd 915 return -EINVAL;
8973504b 916
27050f53 917 val = parse_tag_value(str, tags);
2fbe4abe 918 if (val != (unsigned long) -1) {
27050f53
JO
919 /* we got file size value */
920 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
921 } else {
922 /* we got pages count value */
923 char *eptr;
924 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
925 if (*eptr != '\0')
926 return -EINVAL;
994a1f78
JO
927 }
928
2bcab6c1 929 if (pages == 0 && min == 0) {
33c2dcfd 930 /* leave number of pages at 0 */
1dbfa938 931 } else if (!is_power_of_2(pages)) {
9808143b
JO
932 char buf[100];
933
33c2dcfd 934 /* round pages up to next power of 2 */
91529834 935 pages = roundup_pow_of_two(pages);
1dbfa938
AH
936 if (!pages)
937 return -EINVAL;
9808143b
JO
938
939 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
940 pr_info("rounding mmap pages size to %s (%lu pages)\n",
941 buf, pages);
2fbe4abe
AH
942 }
943
33c2dcfd
DA
944 if (pages > max)
945 return -EINVAL;
946
947 return pages;
948}
949
e9db1310 950int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
33c2dcfd 951{
33c2dcfd
DA
952 unsigned long max = UINT_MAX;
953 long pages;
954
f5ae9c42 955 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
956 max = SIZE_MAX / page_size;
957
958 pages = parse_pages_arg(str, 1, max);
959 if (pages < 0) {
960 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
961 return -1;
962 }
963
964 *mmap_pages = pages;
965 return 0;
966}
967
e9db1310
AH
968int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
969 int unset __maybe_unused)
970{
971 return __perf_evlist__parse_mmap_pages(opt->value, str);
972}
973
c83fa7f2 974/**
9521b5f2 975 * evlist__mmap_ex - Create mmaps to receive events.
c83fa7f2
AH
976 * @evlist: list of events
977 * @pages: map length in pages
978 * @overwrite: overwrite older events?
718c602d
AH
979 * @auxtrace_pages - auxtrace map length in pages
980 * @auxtrace_overwrite - overwrite older auxtrace data?
f8a95309 981 *
c83fa7f2 982 * If @overwrite is %false the user needs to signal event consumption using
9521b5f2 983 * perf_mmap__write_tail(). Using evlist__mmap_read() does this
c83fa7f2 984 * automatically.
7e2ed097 985 *
718c602d
AH
986 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
987 * consumption using auxtrace_mmap__write_tail().
988 *
c83fa7f2 989 * Return: %0 on success, negative error code otherwise.
f8a95309 990 */
9521b5f2 991int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
7a276ff6 992 unsigned int auxtrace_pages,
51255a8a
AB
993 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
994 int comp_level)
f8a95309 995{
32dcd021 996 struct evsel *evsel;
f72f901d 997 const struct perf_cpu_map *cpus = evlist->core.cpus;
03617c22 998 const struct perf_thread_map *threads = evlist->core.threads;
71f566a3
WN
999 /*
1000 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
1001 * Its value is decided by evsel's write_backward.
1002 * So &mp should not be passed through const pointer.
1003 */
51255a8a
AB
1004 struct mmap_params mp = { .nr_cblocks = nr_cblocks, .affinity = affinity, .flush = flush,
1005 .comp_level = comp_level };
50a682ce 1006
8db6d6b1 1007 if (!evlist->mmap)
d50cf361 1008 evlist->mmap = evlist__alloc_mmap(evlist, false);
8db6d6b1 1009 if (!evlist->mmap)
f8a95309
ACM
1010 return -ENOMEM;
1011
40cb2d51 1012 if (evlist->core.pollfd.entries == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
1013 return -ENOMEM;
1014
f6fa4375
JO
1015 evlist->core.mmap_len = evlist__mmap_size(pages);
1016 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
1017 mp.mask = evlist->core.mmap_len - page_size - 1;
f8a95309 1018
f6fa4375 1019 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
718c602d
AH
1020 auxtrace_pages, auxtrace_overwrite);
1021
e5cadb93 1022 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1023 if ((evsel->core.attr.read_format & PERF_FORMAT_ID) &&
a91e5431 1024 evsel->sample_id == NULL &&
6549cd8f 1025 perf_evsel__alloc_id(evsel, perf_cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 1026 return -ENOMEM;
f8a95309
ACM
1027 }
1028
315c0a1f 1029 if (perf_cpu_map__empty(cpus))
9521b5f2 1030 return evlist__mmap_per_thread(evlist, &mp);
f8a95309 1031
9521b5f2 1032 return evlist__mmap_per_cpu(evlist, &mp);
f8a95309 1033}
7e2ed097 1034
9521b5f2 1035int evlist__mmap(struct evlist *evlist, unsigned int pages)
718c602d 1036{
9521b5f2 1037 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
718c602d
AH
1038}
1039
63503dba 1040int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
7e2ed097 1041{
147c508f 1042 bool all_threads = (target->per_thread && target->system_wide);
f854839b 1043 struct perf_cpu_map *cpus;
9749b90e 1044 struct perf_thread_map *threads;
7e2ed097 1045
147c508f
JY
1046 /*
1047 * If specify '-a' and '--per-thread' to perf record, perf record
1048 * will override '--per-thread'. target->per_thread = false and
1049 * target->system_wide = true.
1050 *
1051 * If specify '--per-thread' only to perf record,
1052 * target->per_thread = true and target->system_wide = false.
1053 *
1054 * So target->per_thread && target->system_wide is false.
1055 * For perf record, thread_map__new_str doesn't call
1056 * thread_map__new_all_cpus. That will keep perf record's
1057 * current behavior.
1058 *
1059 * For perf stat, it allows the case that target->per_thread and
1060 * target->system_wide are all true. It means to collect system-wide
1061 * per-thread data. thread_map__new_str will call
1062 * thread_map__new_all_cpus to enumerate all threads.
1063 */
73c0ca1e 1064 threads = thread_map__new_str(target->pid, target->tid, target->uid,
147c508f 1065 all_threads);
7e2ed097 1066
74bfd2b2 1067 if (!threads)
7e2ed097
ACM
1068 return -1;
1069
9c105fbc 1070 if (target__uses_dummy_map(target))
397721e0 1071 cpus = perf_cpu_map__dummy_new();
879d77d0 1072 else
9c3516d1 1073 cpus = perf_cpu_map__new(target->cpu_list);
7e2ed097 1074
74bfd2b2 1075 if (!cpus)
7e2ed097
ACM
1076 goto out_delete_threads;
1077
ec903f26 1078 evlist->core.has_user_cpus = !!target->cpu_list;
ec9a77a7 1079
453fa030 1080 perf_evlist__set_maps(&evlist->core, cpus, threads);
d5bc056e
AH
1081
1082 return 0;
7e2ed097
ACM
1083
1084out_delete_threads:
7836e52e 1085 perf_thread_map__put(threads);
7e2ed097
ACM
1086 return -1;
1087}
1088
63503dba 1089void __perf_evlist__set_sample_bit(struct evlist *evlist,
22c8a376
ACM
1090 enum perf_event_sample_format bit)
1091{
32dcd021 1092 struct evsel *evsel;
22c8a376 1093
e5cadb93 1094 evlist__for_each_entry(evlist, evsel)
22c8a376
ACM
1095 __perf_evsel__set_sample_bit(evsel, bit);
1096}
1097
63503dba 1098void __perf_evlist__reset_sample_bit(struct evlist *evlist,
22c8a376
ACM
1099 enum perf_event_sample_format bit)
1100{
32dcd021 1101 struct evsel *evsel;
22c8a376 1102
e5cadb93 1103 evlist__for_each_entry(evlist, evsel)
22c8a376
ACM
1104 __perf_evsel__reset_sample_bit(evsel, bit);
1105}
1106
63503dba 1107int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
0a102479 1108{
32dcd021 1109 struct evsel *evsel;
745cefc5 1110 int err = 0;
0a102479 1111
e5cadb93 1112 evlist__for_each_entry(evlist, evsel) {
745cefc5 1113 if (evsel->filter == NULL)
0a102479 1114 continue;
745cefc5 1115
d988d5ee
KL
1116 /*
1117 * filters only work for tracepoint event, which doesn't have cpu limit.
1118 * So evlist and evsel should always be same.
1119 */
a00571fd 1120 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
23d4aad4
ACM
1121 if (err) {
1122 *err_evsel = evsel;
745cefc5 1123 break;
23d4aad4 1124 }
0a102479
FW
1125 }
1126
745cefc5
ACM
1127 return err;
1128}
1129
63503dba 1130int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
745cefc5 1131{
32dcd021 1132 struct evsel *evsel;
745cefc5 1133 int err = 0;
745cefc5 1134
e5cadb93 1135 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1136 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
fdf14720
WN
1137 continue;
1138
94ad89bc 1139 err = perf_evsel__set_filter(evsel, filter);
745cefc5
ACM
1140 if (err)
1141 break;
1142 }
1143
1144 return err;
0a102479 1145}
74429964 1146
63503dba 1147int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
cfd70a26
ACM
1148{
1149 char *filter;
be199ada
ACM
1150 int ret = -1;
1151 size_t i;
cfd70a26 1152
be199ada
ACM
1153 for (i = 0; i < npids; ++i) {
1154 if (i == 0) {
1155 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
1156 return -1;
1157 } else {
1158 char *tmp;
1159
1160 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1161 goto out_free;
1162
1163 free(filter);
1164 filter = tmp;
1165 }
1166 }
cfd70a26 1167
7ad92a33 1168 ret = perf_evlist__set_tp_filter(evlist, filter);
be199ada 1169out_free:
cfd70a26
ACM
1170 free(filter);
1171 return ret;
1172}
1173
63503dba 1174int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
be199ada 1175{
7ad92a33 1176 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
be199ada
ACM
1177}
1178
63503dba 1179bool perf_evlist__valid_sample_type(struct evlist *evlist)
74429964 1180{
32dcd021 1181 struct evsel *pos;
c2a70653 1182
6484d2f9 1183 if (evlist->core.nr_entries == 1)
75562573
AH
1184 return true;
1185
1186 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1187 return false;
1188
e5cadb93 1189 evlist__for_each_entry(evlist, pos) {
75562573
AH
1190 if (pos->id_pos != evlist->id_pos ||
1191 pos->is_pos != evlist->is_pos)
c2a70653 1192 return false;
74429964
FW
1193 }
1194
c2a70653 1195 return true;
74429964
FW
1196}
1197
63503dba 1198u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
c2a70653 1199{
32dcd021 1200 struct evsel *evsel;
75562573
AH
1201
1202 if (evlist->combined_sample_type)
1203 return evlist->combined_sample_type;
1204
e5cadb93 1205 evlist__for_each_entry(evlist, evsel)
1fc632ce 1206 evlist->combined_sample_type |= evsel->core.attr.sample_type;
75562573
AH
1207
1208 return evlist->combined_sample_type;
1209}
1210
63503dba 1211u64 perf_evlist__combined_sample_type(struct evlist *evlist)
75562573
AH
1212{
1213 evlist->combined_sample_type = 0;
1214 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
1215}
1216
63503dba 1217u64 perf_evlist__combined_branch_type(struct evlist *evlist)
98df858e 1218{
32dcd021 1219 struct evsel *evsel;
98df858e
AK
1220 u64 branch_type = 0;
1221
e5cadb93 1222 evlist__for_each_entry(evlist, evsel)
1fc632ce 1223 branch_type |= evsel->core.attr.branch_sample_type;
98df858e
AK
1224 return branch_type;
1225}
1226
63503dba 1227bool perf_evlist__valid_read_format(struct evlist *evlist)
9ede473c 1228{
32dcd021 1229 struct evsel *first = perf_evlist__first(evlist), *pos = first;
1fc632ce
JO
1230 u64 read_format = first->core.attr.read_format;
1231 u64 sample_type = first->core.attr.sample_type;
9ede473c 1232
e5cadb93 1233 evlist__for_each_entry(evlist, pos) {
1fc632ce 1234 if (read_format != pos->core.attr.read_format)
9ede473c
JO
1235 return false;
1236 }
1237
1238 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1239 if ((sample_type & PERF_SAMPLE_READ) &&
1240 !(read_format & PERF_FORMAT_ID)) {
1241 return false;
1242 }
1243
1244 return true;
1245}
1246
63503dba 1247u64 perf_evlist__read_format(struct evlist *evlist)
9ede473c 1248{
32dcd021 1249 struct evsel *first = perf_evlist__first(evlist);
1fc632ce 1250 return first->core.attr.read_format;
9ede473c
JO
1251}
1252
63503dba 1253u16 perf_evlist__id_hdr_size(struct evlist *evlist)
81e36bff 1254{
32dcd021 1255 struct evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
1256 struct perf_sample *data;
1257 u64 sample_type;
1258 u16 size = 0;
1259
1fc632ce 1260 if (!first->core.attr.sample_id_all)
81e36bff
ACM
1261 goto out;
1262
1fc632ce 1263 sample_type = first->core.attr.sample_type;
81e36bff
ACM
1264
1265 if (sample_type & PERF_SAMPLE_TID)
1266 size += sizeof(data->tid) * 2;
1267
1268 if (sample_type & PERF_SAMPLE_TIME)
1269 size += sizeof(data->time);
1270
1271 if (sample_type & PERF_SAMPLE_ID)
1272 size += sizeof(data->id);
1273
1274 if (sample_type & PERF_SAMPLE_STREAM_ID)
1275 size += sizeof(data->stream_id);
1276
1277 if (sample_type & PERF_SAMPLE_CPU)
1278 size += sizeof(data->cpu) * 2;
75562573
AH
1279
1280 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1281 size += sizeof(data->id);
81e36bff
ACM
1282out:
1283 return size;
1284}
1285
63503dba 1286bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
74429964 1287{
32dcd021 1288 struct evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653 1289
e5cadb93 1290 evlist__for_each_entry_continue(evlist, pos) {
1fc632ce 1291 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
c2a70653 1292 return false;
74429964
FW
1293 }
1294
c2a70653
ACM
1295 return true;
1296}
1297
63503dba 1298bool perf_evlist__sample_id_all(struct evlist *evlist)
c2a70653 1299{
32dcd021 1300 struct evsel *first = perf_evlist__first(evlist);
1fc632ce 1301 return first->core.attr.sample_id_all;
74429964 1302}
81cce8de 1303
63503dba 1304void perf_evlist__set_selected(struct evlist *evlist,
32dcd021 1305 struct evsel *evsel)
81cce8de
ACM
1306{
1307 evlist->selected = evsel;
1308}
727ab04e 1309
750b4ede 1310void evlist__close(struct evlist *evlist)
a74b4b66 1311{
32dcd021 1312 struct evsel *evsel;
a74b4b66 1313
475fb533 1314 evlist__for_each_entry_reverse(evlist, evsel)
88761fa1 1315 evsel__close(evsel);
a74b4b66
NK
1316}
1317
63503dba 1318static int perf_evlist__create_syswide_maps(struct evlist *evlist)
4112eb18 1319{
f854839b 1320 struct perf_cpu_map *cpus;
9749b90e 1321 struct perf_thread_map *threads;
4112eb18
ACM
1322 int err = -ENOMEM;
1323
1324 /*
1325 * Try reading /sys/devices/system/cpu/online to get
1326 * an all cpus map.
1327 *
1328 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1329 * code needs an overhaul to properly forward the
1330 * error, and we may not want to do that fallback to a
1331 * default cpu identity map :-\
1332 */
9c3516d1 1333 cpus = perf_cpu_map__new(NULL);
8c0498b6 1334 if (!cpus)
4112eb18
ACM
1335 goto out;
1336
4b49cce2 1337 threads = perf_thread_map__new_dummy();
8c0498b6
AH
1338 if (!threads)
1339 goto out_put;
4112eb18 1340
453fa030 1341 perf_evlist__set_maps(&evlist->core, cpus, threads);
4112eb18
ACM
1342out:
1343 return err;
8c0498b6 1344out_put:
38f01d8d 1345 perf_cpu_map__put(cpus);
4112eb18
ACM
1346 goto out;
1347}
1348
474ddc4c 1349int evlist__open(struct evlist *evlist)
727ab04e 1350{
32dcd021 1351 struct evsel *evsel;
a74b4b66 1352 int err;
727ab04e 1353
4112eb18
ACM
1354 /*
1355 * Default: one fd per CPU, all threads, aka systemwide
1356 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1357 */
03617c22 1358 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
4112eb18
ACM
1359 err = perf_evlist__create_syswide_maps(evlist);
1360 if (err < 0)
1361 goto out_err;
1362 }
1363
733cd2fe
AH
1364 perf_evlist__update_id_pos(evlist);
1365
e5cadb93 1366 evlist__for_each_entry(evlist, evsel) {
af663bd0 1367 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
727ab04e
ACM
1368 if (err < 0)
1369 goto out_err;
1370 }
1371
1372 return 0;
1373out_err:
750b4ede 1374 evlist__close(evlist);
41c21a68 1375 errno = -err;
727ab04e
ACM
1376 return err;
1377}
35b9d88e 1378
63503dba 1379int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
55e162ea 1380 const char *argv[], bool pipe_output,
735f7e0b 1381 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
35b9d88e
ACM
1382{
1383 int child_ready_pipe[2], go_pipe[2];
1384 char bf;
1385
1386 if (pipe(child_ready_pipe) < 0) {
1387 perror("failed to create 'ready' pipe");
1388 return -1;
1389 }
1390
1391 if (pipe(go_pipe) < 0) {
1392 perror("failed to create 'go' pipe");
1393 goto out_close_ready_pipe;
1394 }
1395
1396 evlist->workload.pid = fork();
1397 if (evlist->workload.pid < 0) {
1398 perror("failed to fork");
1399 goto out_close_pipes;
1400 }
1401
1402 if (!evlist->workload.pid) {
5f1c4225
ACM
1403 int ret;
1404
119fa3c9 1405 if (pipe_output)
35b9d88e
ACM
1406 dup2(2, 1);
1407
0817df08
DA
1408 signal(SIGTERM, SIG_DFL);
1409
35b9d88e
ACM
1410 close(child_ready_pipe[0]);
1411 close(go_pipe[1]);
1412 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1413
35b9d88e
ACM
1414 /*
1415 * Tell the parent we're ready to go
1416 */
1417 close(child_ready_pipe[1]);
1418
1419 /*
1420 * Wait until the parent tells us to go.
1421 */
5f1c4225
ACM
1422 ret = read(go_pipe[0], &bf, 1);
1423 /*
1424 * The parent will ask for the execvp() to be performed by
1425 * writing exactly one byte, in workload.cork_fd, usually via
1426 * perf_evlist__start_workload().
1427 *
20f86fc1 1428 * For cancelling the workload without actually running it,
5f1c4225
ACM
1429 * the parent will just close workload.cork_fd, without writing
1430 * anything, i.e. read will return zero and we just exit()
1431 * here.
1432 */
1433 if (ret != 1) {
1434 if (ret == -1)
1435 perror("unable to read pipe");
1436 exit(ret);
1437 }
35b9d88e
ACM
1438
1439 execvp(argv[0], (char **)argv);
1440
735f7e0b 1441 if (exec_error) {
f33cbe72
ACM
1442 union sigval val;
1443
1444 val.sival_int = errno;
1445 if (sigqueue(getppid(), SIGUSR1, val))
1446 perror(argv[0]);
1447 } else
1448 perror(argv[0]);
35b9d88e
ACM
1449 exit(-1);
1450 }
1451
735f7e0b
ACM
1452 if (exec_error) {
1453 struct sigaction act = {
1454 .sa_flags = SA_SIGINFO,
1455 .sa_sigaction = exec_error,
1456 };
1457 sigaction(SIGUSR1, &act, NULL);
1458 }
1459
1aaf63b1 1460 if (target__none(target)) {
03617c22 1461 if (evlist->core.threads == NULL) {
1aaf63b1
ACM
1462 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1463 __func__, __LINE__);
1464 goto out_close_pipes;
1465 }
03617c22 1466 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1aaf63b1 1467 }
35b9d88e
ACM
1468
1469 close(child_ready_pipe[1]);
1470 close(go_pipe[0]);
1471 /*
1472 * wait for child to settle
1473 */
1474 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1475 perror("unable to read pipe");
1476 goto out_close_pipes;
1477 }
1478
bcf3145f 1479 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1480 evlist->workload.cork_fd = go_pipe[1];
1481 close(child_ready_pipe[0]);
1482 return 0;
1483
1484out_close_pipes:
1485 close(go_pipe[0]);
1486 close(go_pipe[1]);
1487out_close_ready_pipe:
1488 close(child_ready_pipe[0]);
1489 close(child_ready_pipe[1]);
1490 return -1;
1491}
1492
63503dba 1493int perf_evlist__start_workload(struct evlist *evlist)
35b9d88e
ACM
1494{
1495 if (evlist->workload.cork_fd > 0) {
b3824404 1496 char bf = 0;
bcf3145f 1497 int ret;
35b9d88e
ACM
1498 /*
1499 * Remove the cork, let it rip!
1500 */
bcf3145f
NK
1501 ret = write(evlist->workload.cork_fd, &bf, 1);
1502 if (ret < 0)
e978be9e 1503 perror("unable to write to pipe");
bcf3145f
NK
1504
1505 close(evlist->workload.cork_fd);
1506 return ret;
35b9d88e
ACM
1507 }
1508
1509 return 0;
1510}
cb0b29e0 1511
63503dba 1512int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
0807d2d8 1513 struct perf_sample *sample)
cb0b29e0 1514{
32dcd021 1515 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
75562573
AH
1516
1517 if (!evsel)
1518 return -EFAULT;
0807d2d8 1519 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1520}
78f067b3 1521
63503dba 1522int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
01468120
JO
1523 union perf_event *event,
1524 u64 *timestamp)
1525{
32dcd021 1526 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
01468120
JO
1527
1528 if (!evsel)
1529 return -EFAULT;
1530 return perf_evsel__parse_sample_timestamp(evsel, event, timestamp);
1531}
1532
63503dba 1533size_t perf_evlist__fprintf(struct evlist *evlist, FILE *fp)
78f067b3 1534{
32dcd021 1535 struct evsel *evsel;
78f067b3
ACM
1536 size_t printed = 0;
1537
e5cadb93 1538 evlist__for_each_entry(evlist, evsel) {
78f067b3
ACM
1539 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1540 perf_evsel__name(evsel));
1541 }
1542
b2222139 1543 return printed + fprintf(fp, "\n");
78f067b3 1544}
6ef068cb 1545
63503dba 1546int perf_evlist__strerror_open(struct evlist *evlist,
a8f23d8f
ACM
1547 int err, char *buf, size_t size)
1548{
1549 int printed, value;
c8b5f2c9 1550 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
a8f23d8f
ACM
1551
1552 switch (err) {
1553 case EACCES:
1554 case EPERM:
1555 printed = scnprintf(buf, size,
1556 "Error:\t%s.\n"
1557 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1558
1a47245d 1559 value = perf_event_paranoid();
a8f23d8f
ACM
1560
1561 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1562
1563 if (value >= 2) {
1564 printed += scnprintf(buf + printed, size - printed,
1565 "For your workloads it needs to be <= 1\nHint:\t");
1566 }
1567 printed += scnprintf(buf + printed, size - printed,
5229e366 1568 "For system wide tracing it needs to be set to -1.\n");
a8f23d8f
ACM
1569
1570 printed += scnprintf(buf + printed, size - printed,
5229e366
ACM
1571 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1572 "Hint:\tThe current value is %d.", value);
a8f23d8f 1573 break;
d9aade7f 1574 case EINVAL: {
32dcd021 1575 struct evsel *first = perf_evlist__first(evlist);
d9aade7f
ACM
1576 int max_freq;
1577
1578 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1579 goto out_default;
1580
1fc632ce 1581 if (first->core.attr.sample_freq < (u64)max_freq)
d9aade7f
ACM
1582 goto out_default;
1583
1584 printed = scnprintf(buf, size,
1585 "Error:\t%s.\n"
1586 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1587 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1fc632ce 1588 emsg, max_freq, first->core.attr.sample_freq);
d9aade7f
ACM
1589 break;
1590 }
a8f23d8f 1591 default:
d9aade7f 1592out_default:
a8f23d8f
ACM
1593 scnprintf(buf, size, "%s", emsg);
1594 break;
1595 }
1596
1597 return 0;
1598}
a025e4f0 1599
63503dba 1600int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
956fa571 1601{
c8b5f2c9 1602 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
f6fa4375 1603 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
956fa571
ACM
1604
1605 switch (err) {
1606 case EPERM:
e5d4a290 1607 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
e965bea1
ACM
1608 printed += scnprintf(buf + printed, size - printed,
1609 "Error:\t%s.\n"
956fa571 1610 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
e965bea1 1611 "Hint:\tTried using %zd kB.\n",
e5d4a290 1612 emsg, pages_max_per_user, pages_attempted);
e965bea1
ACM
1613
1614 if (pages_attempted >= pages_max_per_user) {
1615 printed += scnprintf(buf + printed, size - printed,
1616 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1617 pages_max_per_user + pages_attempted);
1618 }
1619
1620 printed += scnprintf(buf + printed, size - printed,
1621 "Hint:\tTry using a smaller -m/--mmap-pages value.");
956fa571
ACM
1622 break;
1623 default:
1624 scnprintf(buf, size, "%s", emsg);
1625 break;
1626 }
1627
1628 return 0;
1629}
1630
63503dba 1631void perf_evlist__to_front(struct evlist *evlist,
32dcd021 1632 struct evsel *move_evsel)
a025e4f0 1633{
32dcd021 1634 struct evsel *evsel, *n;
a025e4f0
AH
1635 LIST_HEAD(move);
1636
1637 if (move_evsel == perf_evlist__first(evlist))
1638 return;
1639
e5cadb93 1640 evlist__for_each_entry_safe(evlist, n, evsel) {
a025e4f0 1641 if (evsel->leader == move_evsel->leader)
b27c4ece 1642 list_move_tail(&evsel->core.node, &move);
a025e4f0
AH
1643 }
1644
ce9036a6 1645 list_splice(&move, &evlist->core.entries);
a025e4f0 1646}
60b0896c 1647
63503dba 1648void perf_evlist__set_tracking_event(struct evlist *evlist,
32dcd021 1649 struct evsel *tracking_evsel)
60b0896c 1650{
32dcd021 1651 struct evsel *evsel;
60b0896c
AH
1652
1653 if (tracking_evsel->tracking)
1654 return;
1655
e5cadb93 1656 evlist__for_each_entry(evlist, evsel) {
60b0896c
AH
1657 if (evsel != tracking_evsel)
1658 evsel->tracking = false;
1659 }
1660
1661 tracking_evsel->tracking = true;
1662}
7630b3e2 1663
32dcd021 1664struct evsel *
63503dba 1665perf_evlist__find_evsel_by_str(struct evlist *evlist,
7630b3e2
WN
1666 const char *str)
1667{
32dcd021 1668 struct evsel *evsel;
7630b3e2 1669
e5cadb93 1670 evlist__for_each_entry(evlist, evsel) {
7630b3e2
WN
1671 if (!evsel->name)
1672 continue;
1673 if (strcmp(str, evsel->name) == 0)
1674 return evsel;
1675 }
1676
1677 return NULL;
1678}
54cc54de 1679
63503dba 1680void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
54cc54de
WN
1681 enum bkw_mmap_state state)
1682{
1683 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1684 enum action {
1685 NONE,
1686 PAUSE,
1687 RESUME,
1688 } action = NONE;
1689
0b72d69a 1690 if (!evlist->overwrite_mmap)
54cc54de
WN
1691 return;
1692
1693 switch (old_state) {
1694 case BKW_MMAP_NOTREADY: {
1695 if (state != BKW_MMAP_RUNNING)
dd8bd53a 1696 goto state_err;
54cc54de
WN
1697 break;
1698 }
1699 case BKW_MMAP_RUNNING: {
1700 if (state != BKW_MMAP_DATA_PENDING)
1701 goto state_err;
1702 action = PAUSE;
1703 break;
1704 }
1705 case BKW_MMAP_DATA_PENDING: {
1706 if (state != BKW_MMAP_EMPTY)
1707 goto state_err;
1708 break;
1709 }
1710 case BKW_MMAP_EMPTY: {
1711 if (state != BKW_MMAP_RUNNING)
1712 goto state_err;
1713 action = RESUME;
1714 break;
1715 }
1716 default:
1717 WARN_ONCE(1, "Shouldn't get there\n");
1718 }
1719
1720 evlist->bkw_mmap_state = state;
1721
1722 switch (action) {
1723 case PAUSE:
1724 perf_evlist__pause(evlist);
1725 break;
1726 case RESUME:
1727 perf_evlist__resume(evlist);
1728 break;
1729 case NONE:
1730 default:
1731 break;
1732 }
1733
1734state_err:
1735 return;
1736}
07d6f446 1737
63503dba 1738bool perf_evlist__exclude_kernel(struct evlist *evlist)
07d6f446 1739{
32dcd021 1740 struct evsel *evsel;
07d6f446
ACM
1741
1742 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1743 if (!evsel->core.attr.exclude_kernel)
07d6f446
ACM
1744 return false;
1745 }
1746
1747 return true;
1748}
e2bdbe80
JY
1749
1750/*
1751 * Events in data file are not collect in groups, but we still want
1752 * the group display. Set the artificial group and set the leader's
1753 * forced_leader flag to notify the display code.
1754 */
63503dba 1755void perf_evlist__force_leader(struct evlist *evlist)
e2bdbe80
JY
1756{
1757 if (!evlist->nr_groups) {
32dcd021 1758 struct evsel *leader = perf_evlist__first(evlist);
e2bdbe80
JY
1759
1760 perf_evlist__set_leader(evlist);
1761 leader->forced_leader = true;
1762 }
1763}
c3537fc2 1764
63503dba 1765struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
32dcd021 1766 struct evsel *evsel)
c3537fc2 1767{
32dcd021 1768 struct evsel *c2, *leader;
c3537fc2
AK
1769 bool is_open = true;
1770
1771 leader = evsel->leader;
1772 pr_debug("Weak group for %s/%d failed\n",
5643b1a5 1773 leader->name, leader->core.nr_members);
c3537fc2
AK
1774
1775 /*
1776 * for_each_group_member doesn't work here because it doesn't
1777 * include the first entry.
1778 */
1779 evlist__for_each_entry(evsel_list, c2) {
1780 if (c2 == evsel)
1781 is_open = false;
1782 if (c2->leader == leader) {
1783 if (is_open)
88761fa1 1784 evsel__close(c2);
c3537fc2 1785 c2->leader = c2;
5643b1a5 1786 c2->core.nr_members = 0;
c3537fc2
AK
1787 }
1788 }
1789 return leader;
1790}
657ee553 1791
63503dba 1792int perf_evlist__add_sb_event(struct evlist **evlist,
657ee553
SL
1793 struct perf_event_attr *attr,
1794 perf_evsel__sb_cb_t cb,
1795 void *data)
1796{
32dcd021 1797 struct evsel *evsel;
657ee553
SL
1798 bool new_evlist = (*evlist) == NULL;
1799
1800 if (*evlist == NULL)
0f98b11c 1801 *evlist = evlist__new();
657ee553
SL
1802 if (*evlist == NULL)
1803 return -1;
1804
1805 if (!attr->sample_id_all) {
1806 pr_warning("enabling sample_id_all for all side band events\n");
1807 attr->sample_id_all = 1;
1808 }
1809
6484d2f9 1810 evsel = perf_evsel__new_idx(attr, (*evlist)->core.nr_entries);
657ee553
SL
1811 if (!evsel)
1812 goto out_err;
1813
1814 evsel->side_band.cb = cb;
1815 evsel->side_band.data = data;
a1cf3a75 1816 evlist__add(*evlist, evsel);
657ee553
SL
1817 return 0;
1818
1819out_err:
1820 if (new_evlist) {
c12995a5 1821 evlist__delete(*evlist);
657ee553
SL
1822 *evlist = NULL;
1823 }
1824 return -1;
1825}
1826
1827static void *perf_evlist__poll_thread(void *arg)
1828{
63503dba 1829 struct evlist *evlist = arg;
657ee553 1830 bool draining = false;
adc6257c 1831 int i, done = 0;
b397f846
ACM
1832 /*
1833 * In order to read symbols from other namespaces perf to needs to call
1834 * setns(2). This isn't permitted if the struct_fs has multiple users.
1835 * unshare(2) the fs so that we may continue to setns into namespaces
1836 * that we're observing when, for instance, reading the build-ids at
1837 * the end of a 'perf record' session.
1838 */
1839 unshare(CLONE_FS);
adc6257c
JO
1840
1841 while (!done) {
1842 bool got_data = false;
657ee553 1843
adc6257c 1844 if (evlist->thread.done)
657ee553
SL
1845 draining = true;
1846
1847 if (!draining)
1848 perf_evlist__poll(evlist, 1000);
1849
c976ee11 1850 for (i = 0; i < evlist->core.nr_mmaps; i++) {
a5830532 1851 struct mmap *map = &evlist->mmap[i];
657ee553
SL
1852 union perf_event *event;
1853
1854 if (perf_mmap__read_init(map))
1855 continue;
1856 while ((event = perf_mmap__read_event(map)) != NULL) {
32dcd021 1857 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
657ee553
SL
1858
1859 if (evsel && evsel->side_band.cb)
1860 evsel->side_band.cb(event, evsel->side_band.data);
1861 else
1862 pr_warning("cannot locate proper evsel for the side band event\n");
1863
1864 perf_mmap__consume(map);
adc6257c 1865 got_data = true;
657ee553
SL
1866 }
1867 perf_mmap__read_done(map);
1868 }
adc6257c
JO
1869
1870 if (draining && !got_data)
1871 break;
657ee553
SL
1872 }
1873 return NULL;
1874}
1875
63503dba 1876int perf_evlist__start_sb_thread(struct evlist *evlist,
657ee553
SL
1877 struct target *target)
1878{
32dcd021 1879 struct evsel *counter;
657ee553
SL
1880
1881 if (!evlist)
1882 return 0;
1883
1884 if (perf_evlist__create_maps(evlist, target))
1885 goto out_delete_evlist;
1886
1887 evlist__for_each_entry(evlist, counter) {
f72f901d 1888 if (evsel__open(counter, evlist->core.cpus,
03617c22 1889 evlist->core.threads) < 0)
657ee553
SL
1890 goto out_delete_evlist;
1891 }
1892
9521b5f2 1893 if (evlist__mmap(evlist, UINT_MAX))
657ee553
SL
1894 goto out_delete_evlist;
1895
1896 evlist__for_each_entry(evlist, counter) {
ec7f24ef 1897 if (evsel__enable(counter))
657ee553
SL
1898 goto out_delete_evlist;
1899 }
1900
1901 evlist->thread.done = 0;
1902 if (pthread_create(&evlist->thread.th, NULL, perf_evlist__poll_thread, evlist))
1903 goto out_delete_evlist;
1904
1905 return 0;
1906
1907out_delete_evlist:
c12995a5 1908 evlist__delete(evlist);
657ee553
SL
1909 evlist = NULL;
1910 return -1;
1911}
1912
63503dba 1913void perf_evlist__stop_sb_thread(struct evlist *evlist)
657ee553
SL
1914{
1915 if (!evlist)
1916 return;
1917 evlist->thread.done = 1;
1918 pthread_join(evlist->thread.th, NULL);
c12995a5 1919 evlist__delete(evlist);
657ee553 1920}