Merge branch 'for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/dtor/input
[linux-block.git] / tools / perf / util / evlist.c
CommitLineData
91007045 1// SPDX-License-Identifier: GPL-2.0-only
f8a95309
ACM
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
f8a95309 7 */
956fa571 8#include <api/fs/fs.h>
a43783ae 9#include <errno.h>
fd20e811 10#include <inttypes.h>
5c581041 11#include <poll.h>
f8a95309 12#include "cpumap.h"
e0fcfb08 13#include "util/mmap.h"
f8a95309 14#include "thread_map.h"
12864b31 15#include "target.h"
361c99a6
ACM
16#include "evlist.h"
17#include "evsel.h"
e3e1a54f 18#include "debug.h"
58db1d6e 19#include "units.h"
20f2be1d 20#include <internal/lib.h> // page_size
7736627b 21#include "affinity.h"
c1a604df 22#include "../perf.h"
54cc54de 23#include "asm/bug.h"
657ee553 24#include "bpf-event.h"
da949f50 25#include "util/string2.h"
40c7d246 26#include "util/perf_api_probe.h"
9607ad3a 27#include <signal.h>
35b9d88e 28#include <unistd.h>
b397f846 29#include <sched.h>
f2a39fe8 30#include <stdlib.h>
361c99a6 31
50d08e47 32#include "parse-events.h"
4b6ab94e 33#include <subcmd/parse-options.h>
50d08e47 34
bafae98e 35#include <fcntl.h>
86a5e0c2 36#include <sys/ioctl.h>
f8a95309
ACM
37#include <sys/mman.h>
38
70db7533
ACM
39#include <linux/bitops.h>
40#include <linux/hash.h>
0389cd1f 41#include <linux/log2.h>
8dd2a131 42#include <linux/err.h>
8520a98d 43#include <linux/string.h>
7f7c536f 44#include <linux/zalloc.h>
4562a739 45#include <perf/evlist.h>
88761fa1 46#include <perf/evsel.h>
9c3516d1 47#include <perf/cpumap.h>
7728fa0c 48#include <perf/mmap.h>
70db7533 49
e14e5497
ACM
50#include <internal/xyarray.h>
51
748fe088
ACM
52#ifdef LACKS_SIGQUEUE_PROTOTYPE
53int sigqueue(pid_t pid, int sig, const union sigval value);
54#endif
55
9dfcb759 56#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
8cd36f3e 57#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
f8a95309 58
52c86bca
JO
59void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
60 struct perf_thread_map *threads)
ef1d1af2 61{
4562a739 62 perf_evlist__init(&evlist->core);
453fa030 63 perf_evlist__set_maps(&evlist->core, cpus, threads);
35b9d88e 64 evlist->workload.pid = -1;
54cc54de 65 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
ef1d1af2
ACM
66}
67
0f98b11c 68struct evlist *evlist__new(void)
361c99a6 69{
63503dba 70 struct evlist *evlist = zalloc(sizeof(*evlist));
361c99a6 71
ef1d1af2 72 if (evlist != NULL)
52c86bca 73 evlist__init(evlist, NULL, NULL);
361c99a6
ACM
74
75 return evlist;
76}
77
63503dba 78struct evlist *perf_evlist__new_default(void)
b22d54b0 79{
0f98b11c 80 struct evlist *evlist = evlist__new();
b22d54b0
JO
81
82 if (evlist && perf_evlist__add_default(evlist)) {
c12995a5 83 evlist__delete(evlist);
b22d54b0
JO
84 evlist = NULL;
85 }
86
87 return evlist;
88}
89
63503dba 90struct evlist *perf_evlist__new_dummy(void)
5bae0250 91{
0f98b11c 92 struct evlist *evlist = evlist__new();
5bae0250
ACM
93
94 if (evlist && perf_evlist__add_dummy(evlist)) {
c12995a5 95 evlist__delete(evlist);
5bae0250
ACM
96 evlist = NULL;
97 }
98
99 return evlist;
100}
101
75562573
AH
102/**
103 * perf_evlist__set_id_pos - set the positions of event ids.
104 * @evlist: selected event list
105 *
106 * Events with compatible sample types all have the same id_pos
107 * and is_pos. For convenience, put a copy on evlist.
108 */
63503dba 109void perf_evlist__set_id_pos(struct evlist *evlist)
75562573 110{
515dbe48 111 struct evsel *first = evlist__first(evlist);
75562573
AH
112
113 evlist->id_pos = first->id_pos;
114 evlist->is_pos = first->is_pos;
115}
116
63503dba 117static void perf_evlist__update_id_pos(struct evlist *evlist)
733cd2fe 118{
32dcd021 119 struct evsel *evsel;
733cd2fe 120
e5cadb93 121 evlist__for_each_entry(evlist, evsel)
4b5e87b7 122 evsel__calc_id_pos(evsel);
733cd2fe
AH
123
124 perf_evlist__set_id_pos(evlist);
125}
126
e6b1878d 127static void evlist__purge(struct evlist *evlist)
361c99a6 128{
32dcd021 129 struct evsel *pos, *n;
361c99a6 130
e5cadb93 131 evlist__for_each_entry_safe(evlist, n, pos) {
b27c4ece 132 list_del_init(&pos->core.node);
d49e4695 133 pos->evlist = NULL;
5eb2dd2a 134 evsel__delete(pos);
361c99a6
ACM
135 }
136
6484d2f9 137 evlist->core.nr_entries = 0;
361c99a6
ACM
138}
139
470579b0 140void evlist__exit(struct evlist *evlist)
361c99a6 141{
04662523 142 zfree(&evlist->mmap);
0b72d69a 143 zfree(&evlist->overwrite_mmap);
93dd6e28 144 perf_evlist__exit(&evlist->core);
ef1d1af2
ACM
145}
146
c12995a5 147void evlist__delete(struct evlist *evlist)
ef1d1af2 148{
0b04b3dc
ACM
149 if (evlist == NULL)
150 return;
151
db6b7b13 152 evlist__munmap(evlist);
750b4ede 153 evlist__close(evlist);
e6b1878d 154 evlist__purge(evlist);
470579b0 155 evlist__exit(evlist);
361c99a6
ACM
156 free(evlist);
157}
158
a1cf3a75 159void evlist__add(struct evlist *evlist, struct evsel *entry)
361c99a6 160{
d49e4695 161 entry->evlist = evlist;
6484d2f9 162 entry->idx = evlist->core.nr_entries;
60b0896c 163 entry->tracking = !entry->idx;
ef503831 164
6484d2f9
JO
165 perf_evlist__add(&evlist->core, &entry->core);
166
167 if (evlist->core.nr_entries == 1)
75562573 168 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
169}
170
16251027 171void evlist__remove(struct evlist *evlist, struct evsel *evsel)
4768230a
AH
172{
173 evsel->evlist = NULL;
52e22fb8 174 perf_evlist__remove(&evlist->core, &evsel->core);
4768230a
AH
175}
176
63503dba 177void perf_evlist__splice_list_tail(struct evlist *evlist,
f114d6ef 178 struct list_head *list)
50d08e47 179{
32dcd021 180 struct evsel *evsel, *temp;
75562573 181
e5cadb93 182 __evlist__for_each_entry_safe(list, temp, evsel) {
b27c4ece 183 list_del_init(&evsel->core.node);
a1cf3a75 184 evlist__add(evlist, evsel);
f114d6ef 185 }
50d08e47
ACM
186}
187
c0e53476
ACM
188int __evlist__set_tracepoints_handlers(struct evlist *evlist,
189 const struct evsel_str_handler *assocs, size_t nr_assocs)
190{
191 struct evsel *evsel;
192 size_t i;
193 int err;
194
195 for (i = 0; i < nr_assocs; i++) {
196 // Adding a handler for an event not in this evlist, just ignore it.
197 evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
198 if (evsel == NULL)
199 continue;
200
201 err = -EEXIST;
202 if (evsel->handler != NULL)
203 goto out;
204 evsel->handler = assocs[i].handler;
205 }
206
207 err = 0;
208out:
209 return err;
210}
211
63dab225
ACM
212void __perf_evlist__set_leader(struct list_head *list)
213{
32dcd021 214 struct evsel *evsel, *leader;
63dab225 215
b27c4ece
JO
216 leader = list_entry(list->next, struct evsel, core.node);
217 evsel = list_entry(list->prev, struct evsel, core.node);
97f63e4a 218
5643b1a5 219 leader->core.nr_members = evsel->idx - leader->idx + 1;
63dab225 220
e5cadb93 221 __evlist__for_each_entry(list, evsel) {
74b2133d 222 evsel->leader = leader;
63dab225
ACM
223 }
224}
225
63503dba 226void perf_evlist__set_leader(struct evlist *evlist)
6a4bb04c 227{
6484d2f9
JO
228 if (evlist->core.nr_entries) {
229 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
ce9036a6 230 __perf_evlist__set_leader(&evlist->core.entries);
97f63e4a 231 }
6a4bb04c
JO
232}
233
63503dba 234int __perf_evlist__add_default(struct evlist *evlist, bool precise)
361c99a6 235{
8f6725a2 236 struct evsel *evsel = evsel__new_cycles(precise);
1aed2671 237
361c99a6 238 if (evsel == NULL)
7c48dcfd 239 return -ENOMEM;
361c99a6 240
a1cf3a75 241 evlist__add(evlist, evsel);
361c99a6
ACM
242 return 0;
243}
5c581041 244
63503dba 245int perf_evlist__add_dummy(struct evlist *evlist)
5bae0250
ACM
246{
247 struct perf_event_attr attr = {
248 .type = PERF_TYPE_SOFTWARE,
249 .config = PERF_COUNT_SW_DUMMY,
250 .size = sizeof(attr), /* to capture ABI version */
251 };
8f6725a2 252 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
5bae0250
ACM
253
254 if (evsel == NULL)
255 return -ENOMEM;
256
a1cf3a75 257 evlist__add(evlist, evsel);
5bae0250
ACM
258 return 0;
259}
260
a1cf3a75 261static int evlist__add_attrs(struct evlist *evlist,
e60fc847 262 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47 263{
32dcd021 264 struct evsel *evsel, *n;
50d08e47
ACM
265 LIST_HEAD(head);
266 size_t i;
267
268 for (i = 0; i < nr_attrs; i++) {
8f6725a2 269 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
50d08e47
ACM
270 if (evsel == NULL)
271 goto out_delete_partial_list;
b27c4ece 272 list_add_tail(&evsel->core.node, &head);
50d08e47
ACM
273 }
274
f114d6ef 275 perf_evlist__splice_list_tail(evlist, &head);
50d08e47
ACM
276
277 return 0;
278
279out_delete_partial_list:
e5cadb93 280 __evlist__for_each_entry_safe(&head, n, evsel)
5eb2dd2a 281 evsel__delete(evsel);
50d08e47
ACM
282 return -1;
283}
284
63503dba 285int __perf_evlist__add_default_attrs(struct evlist *evlist,
79695e1b
ACM
286 struct perf_event_attr *attrs, size_t nr_attrs)
287{
288 size_t i;
289
290 for (i = 0; i < nr_attrs; i++)
291 event_attr_init(attrs + i);
292
a1cf3a75 293 return evlist__add_attrs(evlist, attrs, nr_attrs);
79695e1b
ACM
294}
295
32dcd021 296struct evsel *
63503dba 297perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
ee29be62 298{
32dcd021 299 struct evsel *evsel;
ee29be62 300
e5cadb93 301 evlist__for_each_entry(evlist, evsel) {
1fc632ce
JO
302 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
303 (int)evsel->core.attr.config == id)
ee29be62
ACM
304 return evsel;
305 }
306
307 return NULL;
308}
309
32dcd021 310struct evsel *
63503dba 311perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
a2f2804a
DA
312 const char *name)
313{
32dcd021 314 struct evsel *evsel;
a2f2804a 315
e5cadb93 316 evlist__for_each_entry(evlist, evsel) {
1fc632ce 317 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
a2f2804a
DA
318 (strcmp(evsel->name, name) == 0))
319 return evsel;
320 }
321
322 return NULL;
323}
324
63503dba 325int perf_evlist__add_newtp(struct evlist *evlist,
39876e7d
ACM
326 const char *sys, const char *name, void *handler)
327{
8f6725a2 328 struct evsel *evsel = evsel__newtp(sys, name);
39876e7d 329
8dd2a131 330 if (IS_ERR(evsel))
39876e7d
ACM
331 return -1;
332
744a9719 333 evsel->handler = handler;
a1cf3a75 334 evlist__add(evlist, evsel);
39876e7d
ACM
335 return 0;
336}
337
63503dba 338static int perf_evlist__nr_threads(struct evlist *evlist,
32dcd021 339 struct evsel *evsel)
bf8e8f4b 340{
648b5af3 341 if (evsel->core.system_wide)
bf8e8f4b
AH
342 return 1;
343 else
a2f354e3 344 return perf_thread_map__nr(evlist->core.threads);
bf8e8f4b
AH
345}
346
a8cbe40f
AK
347void evlist__cpu_iter_start(struct evlist *evlist)
348{
349 struct evsel *pos;
350
351 /*
352 * Reset the per evsel cpu_iter. This is needed because
353 * each evsel's cpumap may have a different index space,
354 * and some operations need the index to modify
355 * the FD xyarray (e.g. open, close)
356 */
357 evlist__for_each_entry(evlist, pos)
358 pos->cpu_iter = 0;
359}
360
361bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
362{
363 if (ev->cpu_iter >= ev->core.cpus->nr)
364 return true;
365 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
366 return true;
367 return false;
368}
369
370bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
371{
372 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
373 ev->cpu_iter++;
374 return false;
375 }
376 return true;
377}
378
e74676de 379void evlist__disable(struct evlist *evlist)
4152ab37 380{
32dcd021 381 struct evsel *pos;
704e2f5b 382 struct affinity affinity;
87cf8360
AH
383 int cpu, i, imm = 0;
384 bool has_imm = false;
3e27c920 385
704e2f5b
AK
386 if (affinity__setup(&affinity) < 0)
387 return;
388
87cf8360
AH
389 /* Disable 'immediate' events last */
390 for (imm = 0; imm <= 1; imm++) {
391 evlist__for_each_cpu(evlist, i, cpu) {
392 affinity__set(&affinity, cpu);
393
394 evlist__for_each_entry(evlist, pos) {
395 if (evsel__cpu_iter_skip(pos, cpu))
396 continue;
397 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
398 continue;
399 if (pos->immediate)
400 has_imm = true;
401 if (pos->immediate != imm)
402 continue;
403 evsel__disable_cpu(pos, pos->cpu_iter - 1);
404 }
704e2f5b 405 }
87cf8360
AH
406 if (!has_imm)
407 break;
704e2f5b 408 }
87cf8360 409
704e2f5b 410 affinity__cleanup(&affinity);
e5cadb93 411 evlist__for_each_entry(evlist, pos) {
c754c382 412 if (!evsel__is_group_leader(pos) || !pos->core.fd)
3e27c920 413 continue;
704e2f5b 414 pos->disabled = true;
4152ab37 415 }
2b56bcfb
ACM
416
417 evlist->enabled = false;
4152ab37
ACM
418}
419
1c87f165 420void evlist__enable(struct evlist *evlist)
764e16a3 421{
32dcd021 422 struct evsel *pos;
704e2f5b
AK
423 struct affinity affinity;
424 int cpu, i;
3e27c920 425
704e2f5b
AK
426 if (affinity__setup(&affinity) < 0)
427 return;
428
429 evlist__for_each_cpu(evlist, i, cpu) {
430 affinity__set(&affinity, cpu);
431
432 evlist__for_each_entry(evlist, pos) {
433 if (evsel__cpu_iter_skip(pos, cpu))
434 continue;
c754c382 435 if (!evsel__is_group_leader(pos) || !pos->core.fd)
704e2f5b
AK
436 continue;
437 evsel__enable_cpu(pos, pos->cpu_iter - 1);
438 }
439 }
440 affinity__cleanup(&affinity);
e5cadb93 441 evlist__for_each_entry(evlist, pos) {
c754c382 442 if (!evsel__is_group_leader(pos) || !pos->core.fd)
3e27c920 443 continue;
704e2f5b 444 pos->disabled = false;
764e16a3 445 }
2b56bcfb
ACM
446
447 evlist->enabled = true;
448}
449
63503dba 450void perf_evlist__toggle_enable(struct evlist *evlist)
2b56bcfb 451{
e74676de 452 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
764e16a3
DA
453}
454
63503dba 455static int perf_evlist__enable_event_cpu(struct evlist *evlist,
32dcd021 456 struct evsel *evsel, int cpu)
1c65056c 457{
18ef15c6 458 int thread;
1c65056c
AH
459 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
460
9dfcb759 461 if (!evsel->core.fd)
1c65056c
AH
462 return -EINVAL;
463
464 for (thread = 0; thread < nr_threads; thread++) {
18ef15c6 465 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
1c65056c
AH
466 if (err)
467 return err;
468 }
469 return 0;
470}
471
63503dba 472static int perf_evlist__enable_event_thread(struct evlist *evlist,
32dcd021 473 struct evsel *evsel,
1c65056c
AH
474 int thread)
475{
18ef15c6 476 int cpu;
6549cd8f 477 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
1c65056c 478
9dfcb759 479 if (!evsel->core.fd)
1c65056c
AH
480 return -EINVAL;
481
482 for (cpu = 0; cpu < nr_cpus; cpu++) {
18ef15c6 483 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
1c65056c
AH
484 if (err)
485 return err;
486 }
487 return 0;
488}
489
63503dba 490int perf_evlist__enable_event_idx(struct evlist *evlist,
32dcd021 491 struct evsel *evsel, int idx)
1c65056c 492{
315c0a1f 493 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
1c65056c
AH
494
495 if (per_cpu_mmaps)
496 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
497 else
498 return perf_evlist__enable_event_thread(evlist, evsel, idx);
499}
500
f4009e7b 501int evlist__add_pollfd(struct evlist *evlist, int fd)
70082dd9 502{
f4009e7b 503 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN);
e4b356b5
ACM
504}
505
f4009e7b 506int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
1ddec7f0 507{
84227cb1 508 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
1ddec7f0
ACM
509}
510
80ab2987 511int evlist__poll(struct evlist *evlist, int timeout)
f66a889d 512{
80ab2987 513 return perf_evlist__poll(&evlist->core, timeout);
f66a889d
ACM
514}
515
63503dba 516struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
70db7533
ACM
517{
518 struct hlist_head *head;
70db7533
ACM
519 struct perf_sample_id *sid;
520 int hash;
521
70db7533 522 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1d5af02d 523 head = &evlist->core.heads[hash];
70db7533 524
b67bfe0d 525 hlist_for_each_entry(sid, head, node)
70db7533 526 if (sid->id == id)
932a3594
JO
527 return sid;
528
529 return NULL;
530}
531
63503dba 532struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
932a3594
JO
533{
534 struct perf_sample_id *sid;
535
6484d2f9 536 if (evlist->core.nr_entries == 1 || !id)
515dbe48 537 return evlist__first(evlist);
932a3594
JO
538
539 sid = perf_evlist__id2sid(evlist, id);
540 if (sid)
70c20369 541 return container_of(sid->evsel, struct evsel, core);
30e68bcc
NK
542
543 if (!perf_evlist__sample_id_all(evlist))
515dbe48 544 return evlist__first(evlist);
30e68bcc 545
70db7533
ACM
546 return NULL;
547}
04391deb 548
63503dba 549struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
dddcf6ab
AH
550 u64 id)
551{
552 struct perf_sample_id *sid;
553
554 if (!id)
555 return NULL;
556
557 sid = perf_evlist__id2sid(evlist, id);
558 if (sid)
70c20369 559 return container_of(sid->evsel, struct evsel, core);
dddcf6ab
AH
560
561 return NULL;
562}
563
63503dba 564static int perf_evlist__event2id(struct evlist *evlist,
75562573
AH
565 union perf_event *event, u64 *id)
566{
b1fcd190 567 const __u64 *array = event->sample.array;
75562573
AH
568 ssize_t n;
569
570 n = (event->header.size - sizeof(event->header)) >> 3;
571
572 if (event->header.type == PERF_RECORD_SAMPLE) {
573 if (evlist->id_pos >= n)
574 return -1;
575 *id = array[evlist->id_pos];
576 } else {
577 if (evlist->is_pos > n)
578 return -1;
579 n -= evlist->is_pos;
580 *id = array[n];
581 }
582 return 0;
583}
584
63503dba 585struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
7cb5c5ac 586 union perf_event *event)
75562573 587{
515dbe48 588 struct evsel *first = evlist__first(evlist);
75562573
AH
589 struct hlist_head *head;
590 struct perf_sample_id *sid;
591 int hash;
592 u64 id;
593
6484d2f9 594 if (evlist->core.nr_entries == 1)
98be6966
AH
595 return first;
596
1fc632ce 597 if (!first->core.attr.sample_id_all &&
98be6966
AH
598 event->header.type != PERF_RECORD_SAMPLE)
599 return first;
75562573
AH
600
601 if (perf_evlist__event2id(evlist, event, &id))
602 return NULL;
603
604 /* Synthesized events have an id of zero */
605 if (!id)
98be6966 606 return first;
75562573
AH
607
608 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1d5af02d 609 head = &evlist->core.heads[hash];
75562573
AH
610
611 hlist_for_each_entry(sid, head, node) {
612 if (sid->id == id)
70c20369 613 return container_of(sid->evsel, struct evsel, core);
75562573
AH
614 }
615 return NULL;
616}
617
63503dba 618static int perf_evlist__set_paused(struct evlist *evlist, bool value)
65aea233
WN
619{
620 int i;
621
0b72d69a 622 if (!evlist->overwrite_mmap)
078c3386
WN
623 return 0;
624
c976ee11 625 for (i = 0; i < evlist->core.nr_mmaps; i++) {
2cf07b29 626 int fd = evlist->overwrite_mmap[i].core.fd;
65aea233
WN
627 int err;
628
629 if (fd < 0)
630 continue;
631 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
632 if (err)
633 return err;
634 }
635 return 0;
636}
637
63503dba 638static int perf_evlist__pause(struct evlist *evlist)
65aea233
WN
639{
640 return perf_evlist__set_paused(evlist, true);
641}
642
63503dba 643static int perf_evlist__resume(struct evlist *evlist)
65aea233
WN
644{
645 return perf_evlist__set_paused(evlist, false);
646}
647
db6b7b13 648static void evlist__munmap_nofree(struct evlist *evlist)
f8a95309 649{
aece948f 650 int i;
f8a95309 651
b2cb615d 652 if (evlist->mmap)
c976ee11 653 for (i = 0; i < evlist->core.nr_mmaps; i++)
80e53d11 654 perf_mmap__munmap(&evlist->mmap[i].core);
983874d1 655
0b72d69a 656 if (evlist->overwrite_mmap)
c976ee11 657 for (i = 0; i < evlist->core.nr_mmaps; i++)
80e53d11 658 perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
a1f72618 659}
aece948f 660
db6b7b13 661void evlist__munmap(struct evlist *evlist)
a1f72618 662{
db6b7b13 663 evlist__munmap_nofree(evlist);
04662523 664 zfree(&evlist->mmap);
0b72d69a 665 zfree(&evlist->overwrite_mmap);
f8a95309
ACM
666}
667
80e53d11
JO
668static void perf_mmap__unmap_cb(struct perf_mmap *map)
669{
670 struct mmap *m = container_of(map, struct mmap, core);
671
672 mmap__munmap(m);
673}
674
d50cf361
JO
675static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
676 bool overwrite)
f8a95309 677{
d4c6fb36 678 int i;
a5830532 679 struct mmap *map;
d4c6fb36 680
c976ee11 681 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
8db6d6b1
WN
682 if (!map)
683 return NULL;
946ae1d4 684
c976ee11 685 for (i = 0; i < evlist->core.nr_mmaps; i++) {
6eb65f7a
JO
686 struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
687
4738ca30
ACM
688 /*
689 * When the perf_mmap() call is made we grab one refcount, plus
6afad54d 690 * one extra to let perf_mmap__consume() get the last
4738ca30
ACM
691 * events after all real references (perf_mmap__get()) are
692 * dropped.
693 *
694 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
695 * thus does perf_mmap__get() on it.
696 */
6eb65f7a 697 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
4738ca30 698 }
353120b4 699
8db6d6b1 700 return map;
f8a95309
ACM
701}
702
9abd2ab2
JO
703static void
704perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
705 struct perf_mmap_param *_mp,
706 int idx, bool per_cpu)
707{
708 struct evlist *evlist = container_of(_evlist, struct evlist, core);
709 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
710
711 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
712}
713
bb1b1885
JO
714static struct perf_mmap*
715perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
716{
717 struct evlist *evlist = container_of(_evlist, struct evlist, core);
3805e4f3 718 struct mmap *maps;
bb1b1885 719
3805e4f3 720 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
bb1b1885 721
3805e4f3
JO
722 if (!maps) {
723 maps = evlist__alloc_mmap(evlist, overwrite);
724 if (!maps)
725 return NULL;
bb1b1885 726
3805e4f3 727 if (overwrite) {
bb1b1885
JO
728 evlist->overwrite_mmap = maps;
729 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
730 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
3805e4f3
JO
731 } else {
732 evlist->mmap = maps;
bb1b1885
JO
733 }
734 }
735
736 return &maps[idx].core;
737}
738
b80132b1
JO
739static int
740perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
741 int output, int cpu)
742{
743 struct mmap *map = container_of(_map, struct mmap, core);
744 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
745
746 return mmap__mmap(map, mp, output, cpu);
747}
748
f5e7150c 749unsigned long perf_event_mlock_kb_in_pages(void)
994a1f78 750{
f5e7150c
ACM
751 unsigned long pages;
752 int max;
8185e881 753
f5e7150c
ACM
754 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
755 /*
756 * Pick a once upon a time good value, i.e. things look
757 * strange since we can't read a sysctl value, but lets not
758 * die yet...
759 */
760 max = 512;
761 } else {
762 max -= (page_size / 1024);
763 }
8185e881 764
f5e7150c
ACM
765 pages = (max * 1024) / page_size;
766 if (!is_power_of_2(pages))
767 pages = rounddown_pow_of_two(pages);
768
769 return pages;
770}
771
9521b5f2 772size_t evlist__mmap_size(unsigned long pages)
f5e7150c
ACM
773{
774 if (pages == UINT_MAX)
775 pages = perf_event_mlock_kb_in_pages();
776 else if (!is_power_of_2(pages))
994a1f78
JO
777 return 0;
778
779 return (pages + 1) * page_size;
780}
781
33c2dcfd
DA
782static long parse_pages_arg(const char *str, unsigned long min,
783 unsigned long max)
994a1f78 784{
2fbe4abe 785 unsigned long pages, val;
27050f53
JO
786 static struct parse_tag tags[] = {
787 { .tag = 'B', .mult = 1 },
788 { .tag = 'K', .mult = 1 << 10 },
789 { .tag = 'M', .mult = 1 << 20 },
790 { .tag = 'G', .mult = 1 << 30 },
791 { .tag = 0 },
792 };
994a1f78 793
8973504b 794 if (str == NULL)
33c2dcfd 795 return -EINVAL;
8973504b 796
27050f53 797 val = parse_tag_value(str, tags);
2fbe4abe 798 if (val != (unsigned long) -1) {
27050f53
JO
799 /* we got file size value */
800 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
801 } else {
802 /* we got pages count value */
803 char *eptr;
804 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
805 if (*eptr != '\0')
806 return -EINVAL;
994a1f78
JO
807 }
808
2bcab6c1 809 if (pages == 0 && min == 0) {
33c2dcfd 810 /* leave number of pages at 0 */
1dbfa938 811 } else if (!is_power_of_2(pages)) {
9808143b
JO
812 char buf[100];
813
33c2dcfd 814 /* round pages up to next power of 2 */
91529834 815 pages = roundup_pow_of_two(pages);
1dbfa938
AH
816 if (!pages)
817 return -EINVAL;
9808143b
JO
818
819 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
820 pr_info("rounding mmap pages size to %s (%lu pages)\n",
821 buf, pages);
2fbe4abe
AH
822 }
823
33c2dcfd
DA
824 if (pages > max)
825 return -EINVAL;
826
827 return pages;
828}
829
e9db1310 830int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
33c2dcfd 831{
33c2dcfd
DA
832 unsigned long max = UINT_MAX;
833 long pages;
834
f5ae9c42 835 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
836 max = SIZE_MAX / page_size;
837
838 pages = parse_pages_arg(str, 1, max);
839 if (pages < 0) {
840 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
841 return -1;
842 }
843
844 *mmap_pages = pages;
845 return 0;
846}
847
e9db1310
AH
848int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
849 int unset __maybe_unused)
850{
851 return __perf_evlist__parse_mmap_pages(opt->value, str);
852}
853
c83fa7f2 854/**
9521b5f2 855 * evlist__mmap_ex - Create mmaps to receive events.
c83fa7f2
AH
856 * @evlist: list of events
857 * @pages: map length in pages
858 * @overwrite: overwrite older events?
718c602d
AH
859 * @auxtrace_pages - auxtrace map length in pages
860 * @auxtrace_overwrite - overwrite older auxtrace data?
f8a95309 861 *
c83fa7f2 862 * If @overwrite is %false the user needs to signal event consumption using
9521b5f2 863 * perf_mmap__write_tail(). Using evlist__mmap_read() does this
c83fa7f2 864 * automatically.
7e2ed097 865 *
718c602d
AH
866 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
867 * consumption using auxtrace_mmap__write_tail().
868 *
c83fa7f2 869 * Return: %0 on success, negative error code otherwise.
f8a95309 870 */
9521b5f2 871int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
7a276ff6 872 unsigned int auxtrace_pages,
51255a8a
AB
873 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
874 int comp_level)
f8a95309 875{
71f566a3
WN
876 /*
877 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
878 * Its value is decided by evsel's write_backward.
879 * So &mp should not be passed through const pointer.
880 */
e440979f
JO
881 struct mmap_params mp = {
882 .nr_cblocks = nr_cblocks,
883 .affinity = affinity,
884 .flush = flush,
885 .comp_level = comp_level
886 };
923d0f18 887 struct perf_evlist_mmap_ops ops = {
b80132b1
JO
888 .idx = perf_evlist__mmap_cb_idx,
889 .get = perf_evlist__mmap_cb_get,
890 .mmap = perf_evlist__mmap_cb_mmap,
9abd2ab2 891 };
50a682ce 892
f6fa4375
JO
893 evlist->core.mmap_len = evlist__mmap_size(pages);
894 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
f8a95309 895
f6fa4375 896 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
718c602d
AH
897 auxtrace_pages, auxtrace_overwrite);
898
923d0f18 899 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
f8a95309 900}
7e2ed097 901
9521b5f2 902int evlist__mmap(struct evlist *evlist, unsigned int pages)
718c602d 903{
9521b5f2 904 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
718c602d
AH
905}
906
63503dba 907int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
7e2ed097 908{
147c508f 909 bool all_threads = (target->per_thread && target->system_wide);
f854839b 910 struct perf_cpu_map *cpus;
9749b90e 911 struct perf_thread_map *threads;
7e2ed097 912
147c508f
JY
913 /*
914 * If specify '-a' and '--per-thread' to perf record, perf record
915 * will override '--per-thread'. target->per_thread = false and
916 * target->system_wide = true.
917 *
918 * If specify '--per-thread' only to perf record,
919 * target->per_thread = true and target->system_wide = false.
920 *
921 * So target->per_thread && target->system_wide is false.
922 * For perf record, thread_map__new_str doesn't call
923 * thread_map__new_all_cpus. That will keep perf record's
924 * current behavior.
925 *
926 * For perf stat, it allows the case that target->per_thread and
927 * target->system_wide are all true. It means to collect system-wide
928 * per-thread data. thread_map__new_str will call
929 * thread_map__new_all_cpus to enumerate all threads.
930 */
73c0ca1e 931 threads = thread_map__new_str(target->pid, target->tid, target->uid,
147c508f 932 all_threads);
7e2ed097 933
74bfd2b2 934 if (!threads)
7e2ed097
ACM
935 return -1;
936
9c105fbc 937 if (target__uses_dummy_map(target))
397721e0 938 cpus = perf_cpu_map__dummy_new();
879d77d0 939 else
9c3516d1 940 cpus = perf_cpu_map__new(target->cpu_list);
7e2ed097 941
74bfd2b2 942 if (!cpus)
7e2ed097
ACM
943 goto out_delete_threads;
944
ec903f26 945 evlist->core.has_user_cpus = !!target->cpu_list;
ec9a77a7 946
453fa030 947 perf_evlist__set_maps(&evlist->core, cpus, threads);
d5bc056e
AH
948
949 return 0;
7e2ed097
ACM
950
951out_delete_threads:
7836e52e 952 perf_thread_map__put(threads);
7e2ed097
ACM
953 return -1;
954}
955
63503dba 956void __perf_evlist__set_sample_bit(struct evlist *evlist,
22c8a376
ACM
957 enum perf_event_sample_format bit)
958{
32dcd021 959 struct evsel *evsel;
22c8a376 960
e5cadb93 961 evlist__for_each_entry(evlist, evsel)
862b2f8f 962 __evsel__set_sample_bit(evsel, bit);
22c8a376
ACM
963}
964
63503dba 965void __perf_evlist__reset_sample_bit(struct evlist *evlist,
22c8a376
ACM
966 enum perf_event_sample_format bit)
967{
32dcd021 968 struct evsel *evsel;
22c8a376 969
e5cadb93 970 evlist__for_each_entry(evlist, evsel)
862b2f8f 971 __evsel__reset_sample_bit(evsel, bit);
22c8a376
ACM
972}
973
63503dba 974int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
0a102479 975{
32dcd021 976 struct evsel *evsel;
745cefc5 977 int err = 0;
0a102479 978
e5cadb93 979 evlist__for_each_entry(evlist, evsel) {
745cefc5 980 if (evsel->filter == NULL)
0a102479 981 continue;
745cefc5 982
d988d5ee
KL
983 /*
984 * filters only work for tracepoint event, which doesn't have cpu limit.
985 * So evlist and evsel should always be same.
986 */
a00571fd 987 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
23d4aad4
ACM
988 if (err) {
989 *err_evsel = evsel;
745cefc5 990 break;
23d4aad4 991 }
0a102479
FW
992 }
993
745cefc5
ACM
994 return err;
995}
996
63503dba 997int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
745cefc5 998{
32dcd021 999 struct evsel *evsel;
745cefc5 1000 int err = 0;
745cefc5 1001
05cea449
ACM
1002 if (filter == NULL)
1003 return -1;
1004
e5cadb93 1005 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1006 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
fdf14720
WN
1007 continue;
1008
ad681adf 1009 err = evsel__set_filter(evsel, filter);
745cefc5
ACM
1010 if (err)
1011 break;
1012 }
1013
1014 return err;
0a102479 1015}
74429964 1016
53c92f73
ACM
1017int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1018{
1019 struct evsel *evsel;
1020 int err = 0;
1021
1022 if (filter == NULL)
1023 return -1;
1024
1025 evlist__for_each_entry(evlist, evsel) {
1026 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1027 continue;
1028
ad681adf 1029 err = evsel__append_tp_filter(evsel, filter);
53c92f73
ACM
1030 if (err)
1031 break;
1032 }
1033
1034 return err;
1035}
1036
da949f50 1037char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
cfd70a26
ACM
1038{
1039 char *filter;
be199ada 1040 size_t i;
cfd70a26 1041
be199ada
ACM
1042 for (i = 0; i < npids; ++i) {
1043 if (i == 0) {
1044 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
05cea449 1045 return NULL;
be199ada
ACM
1046 } else {
1047 char *tmp;
1048
1049 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1050 goto out_free;
1051
1052 free(filter);
1053 filter = tmp;
1054 }
1055 }
cfd70a26 1056
05cea449 1057 return filter;
be199ada 1058out_free:
05cea449
ACM
1059 free(filter);
1060 return NULL;
1061}
1062
1063int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1064{
1065 char *filter = asprintf__tp_filter_pids(npids, pids);
1066 int ret = perf_evlist__set_tp_filter(evlist, filter);
1067
cfd70a26
ACM
1068 free(filter);
1069 return ret;
1070}
1071
63503dba 1072int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
be199ada 1073{
7ad92a33 1074 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
be199ada
ACM
1075}
1076
1827ab5b
ACM
1077int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1078{
1079 char *filter = asprintf__tp_filter_pids(npids, pids);
1080 int ret = perf_evlist__append_tp_filter(evlist, filter);
1081
1082 free(filter);
1083 return ret;
1084}
1085
1086int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1087{
1088 return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
1089}
1090
63503dba 1091bool perf_evlist__valid_sample_type(struct evlist *evlist)
74429964 1092{
32dcd021 1093 struct evsel *pos;
c2a70653 1094
6484d2f9 1095 if (evlist->core.nr_entries == 1)
75562573
AH
1096 return true;
1097
1098 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1099 return false;
1100
e5cadb93 1101 evlist__for_each_entry(evlist, pos) {
75562573
AH
1102 if (pos->id_pos != evlist->id_pos ||
1103 pos->is_pos != evlist->is_pos)
c2a70653 1104 return false;
74429964
FW
1105 }
1106
c2a70653 1107 return true;
74429964
FW
1108}
1109
63503dba 1110u64 __perf_evlist__combined_sample_type(struct evlist *evlist)
c2a70653 1111{
32dcd021 1112 struct evsel *evsel;
75562573
AH
1113
1114 if (evlist->combined_sample_type)
1115 return evlist->combined_sample_type;
1116
e5cadb93 1117 evlist__for_each_entry(evlist, evsel)
1fc632ce 1118 evlist->combined_sample_type |= evsel->core.attr.sample_type;
75562573
AH
1119
1120 return evlist->combined_sample_type;
1121}
1122
63503dba 1123u64 perf_evlist__combined_sample_type(struct evlist *evlist)
75562573
AH
1124{
1125 evlist->combined_sample_type = 0;
1126 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
1127}
1128
63503dba 1129u64 perf_evlist__combined_branch_type(struct evlist *evlist)
98df858e 1130{
32dcd021 1131 struct evsel *evsel;
98df858e
AK
1132 u64 branch_type = 0;
1133
e5cadb93 1134 evlist__for_each_entry(evlist, evsel)
1fc632ce 1135 branch_type |= evsel->core.attr.branch_sample_type;
98df858e
AK
1136 return branch_type;
1137}
1138
63503dba 1139bool perf_evlist__valid_read_format(struct evlist *evlist)
9ede473c 1140{
515dbe48 1141 struct evsel *first = evlist__first(evlist), *pos = first;
1fc632ce
JO
1142 u64 read_format = first->core.attr.read_format;
1143 u64 sample_type = first->core.attr.sample_type;
9ede473c 1144
e5cadb93 1145 evlist__for_each_entry(evlist, pos) {
94d3820f
AH
1146 if (read_format != pos->core.attr.read_format) {
1147 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1148 read_format, (u64)pos->core.attr.read_format);
1149 }
9ede473c
JO
1150 }
1151
1152 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1153 if ((sample_type & PERF_SAMPLE_READ) &&
1154 !(read_format & PERF_FORMAT_ID)) {
1155 return false;
1156 }
1157
1158 return true;
1159}
1160
63503dba 1161u16 perf_evlist__id_hdr_size(struct evlist *evlist)
81e36bff 1162{
515dbe48 1163 struct evsel *first = evlist__first(evlist);
81e36bff
ACM
1164 struct perf_sample *data;
1165 u64 sample_type;
1166 u16 size = 0;
1167
1fc632ce 1168 if (!first->core.attr.sample_id_all)
81e36bff
ACM
1169 goto out;
1170
1fc632ce 1171 sample_type = first->core.attr.sample_type;
81e36bff
ACM
1172
1173 if (sample_type & PERF_SAMPLE_TID)
1174 size += sizeof(data->tid) * 2;
1175
1176 if (sample_type & PERF_SAMPLE_TIME)
1177 size += sizeof(data->time);
1178
1179 if (sample_type & PERF_SAMPLE_ID)
1180 size += sizeof(data->id);
1181
1182 if (sample_type & PERF_SAMPLE_STREAM_ID)
1183 size += sizeof(data->stream_id);
1184
1185 if (sample_type & PERF_SAMPLE_CPU)
1186 size += sizeof(data->cpu) * 2;
75562573
AH
1187
1188 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1189 size += sizeof(data->id);
81e36bff
ACM
1190out:
1191 return size;
1192}
1193
63503dba 1194bool perf_evlist__valid_sample_id_all(struct evlist *evlist)
74429964 1195{
515dbe48 1196 struct evsel *first = evlist__first(evlist), *pos = first;
c2a70653 1197
e5cadb93 1198 evlist__for_each_entry_continue(evlist, pos) {
1fc632ce 1199 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
c2a70653 1200 return false;
74429964
FW
1201 }
1202
c2a70653
ACM
1203 return true;
1204}
1205
63503dba 1206bool perf_evlist__sample_id_all(struct evlist *evlist)
c2a70653 1207{
515dbe48 1208 struct evsel *first = evlist__first(evlist);
1fc632ce 1209 return first->core.attr.sample_id_all;
74429964 1210}
81cce8de 1211
63503dba 1212void perf_evlist__set_selected(struct evlist *evlist,
32dcd021 1213 struct evsel *evsel)
81cce8de
ACM
1214{
1215 evlist->selected = evsel;
1216}
727ab04e 1217
750b4ede 1218void evlist__close(struct evlist *evlist)
a74b4b66 1219{
32dcd021 1220 struct evsel *evsel;
7736627b
AK
1221 struct affinity affinity;
1222 int cpu, i;
a74b4b66 1223
7736627b
AK
1224 /*
1225 * With perf record core.cpus is usually NULL.
1226 * Use the old method to handle this for now.
1227 */
1228 if (!evlist->core.cpus) {
1229 evlist__for_each_entry_reverse(evlist, evsel)
1230 evsel__close(evsel);
1231 return;
1232 }
1233
1234 if (affinity__setup(&affinity) < 0)
1235 return;
1236 evlist__for_each_cpu(evlist, i, cpu) {
1237 affinity__set(&affinity, cpu);
1238
1239 evlist__for_each_entry_reverse(evlist, evsel) {
1240 if (evsel__cpu_iter_skip(evsel, cpu))
1241 continue;
1242 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1243 }
1244 }
1245 affinity__cleanup(&affinity);
1246 evlist__for_each_entry_reverse(evlist, evsel) {
1247 perf_evsel__free_fd(&evsel->core);
1248 perf_evsel__free_id(&evsel->core);
1249 }
a74b4b66
NK
1250}
1251
63503dba 1252static int perf_evlist__create_syswide_maps(struct evlist *evlist)
4112eb18 1253{
f854839b 1254 struct perf_cpu_map *cpus;
9749b90e 1255 struct perf_thread_map *threads;
4112eb18
ACM
1256 int err = -ENOMEM;
1257
1258 /*
1259 * Try reading /sys/devices/system/cpu/online to get
1260 * an all cpus map.
1261 *
1262 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1263 * code needs an overhaul to properly forward the
1264 * error, and we may not want to do that fallback to a
1265 * default cpu identity map :-\
1266 */
9c3516d1 1267 cpus = perf_cpu_map__new(NULL);
8c0498b6 1268 if (!cpus)
4112eb18
ACM
1269 goto out;
1270
4b49cce2 1271 threads = perf_thread_map__new_dummy();
8c0498b6
AH
1272 if (!threads)
1273 goto out_put;
4112eb18 1274
453fa030 1275 perf_evlist__set_maps(&evlist->core, cpus, threads);
4112eb18
ACM
1276out:
1277 return err;
8c0498b6 1278out_put:
38f01d8d 1279 perf_cpu_map__put(cpus);
4112eb18
ACM
1280 goto out;
1281}
1282
474ddc4c 1283int evlist__open(struct evlist *evlist)
727ab04e 1284{
32dcd021 1285 struct evsel *evsel;
a74b4b66 1286 int err;
727ab04e 1287
4112eb18
ACM
1288 /*
1289 * Default: one fd per CPU, all threads, aka systemwide
1290 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1291 */
03617c22 1292 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
4112eb18
ACM
1293 err = perf_evlist__create_syswide_maps(evlist);
1294 if (err < 0)
1295 goto out_err;
1296 }
1297
733cd2fe
AH
1298 perf_evlist__update_id_pos(evlist);
1299
e5cadb93 1300 evlist__for_each_entry(evlist, evsel) {
af663bd0 1301 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
727ab04e
ACM
1302 if (err < 0)
1303 goto out_err;
1304 }
1305
1306 return 0;
1307out_err:
750b4ede 1308 evlist__close(evlist);
41c21a68 1309 errno = -err;
727ab04e
ACM
1310 return err;
1311}
35b9d88e 1312
63503dba 1313int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
55e162ea 1314 const char *argv[], bool pipe_output,
735f7e0b 1315 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
35b9d88e
ACM
1316{
1317 int child_ready_pipe[2], go_pipe[2];
1318 char bf;
1319
1320 if (pipe(child_ready_pipe) < 0) {
1321 perror("failed to create 'ready' pipe");
1322 return -1;
1323 }
1324
1325 if (pipe(go_pipe) < 0) {
1326 perror("failed to create 'go' pipe");
1327 goto out_close_ready_pipe;
1328 }
1329
1330 evlist->workload.pid = fork();
1331 if (evlist->workload.pid < 0) {
1332 perror("failed to fork");
1333 goto out_close_pipes;
1334 }
1335
1336 if (!evlist->workload.pid) {
5f1c4225
ACM
1337 int ret;
1338
119fa3c9 1339 if (pipe_output)
35b9d88e
ACM
1340 dup2(2, 1);
1341
0817df08
DA
1342 signal(SIGTERM, SIG_DFL);
1343
35b9d88e
ACM
1344 close(child_ready_pipe[0]);
1345 close(go_pipe[1]);
1346 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1347
35b9d88e
ACM
1348 /*
1349 * Tell the parent we're ready to go
1350 */
1351 close(child_ready_pipe[1]);
1352
1353 /*
1354 * Wait until the parent tells us to go.
1355 */
5f1c4225
ACM
1356 ret = read(go_pipe[0], &bf, 1);
1357 /*
1358 * The parent will ask for the execvp() to be performed by
1359 * writing exactly one byte, in workload.cork_fd, usually via
1360 * perf_evlist__start_workload().
1361 *
20f86fc1 1362 * For cancelling the workload without actually running it,
5f1c4225
ACM
1363 * the parent will just close workload.cork_fd, without writing
1364 * anything, i.e. read will return zero and we just exit()
1365 * here.
1366 */
1367 if (ret != 1) {
1368 if (ret == -1)
1369 perror("unable to read pipe");
1370 exit(ret);
1371 }
35b9d88e
ACM
1372
1373 execvp(argv[0], (char **)argv);
1374
735f7e0b 1375 if (exec_error) {
f33cbe72
ACM
1376 union sigval val;
1377
1378 val.sival_int = errno;
1379 if (sigqueue(getppid(), SIGUSR1, val))
1380 perror(argv[0]);
1381 } else
1382 perror(argv[0]);
35b9d88e
ACM
1383 exit(-1);
1384 }
1385
735f7e0b
ACM
1386 if (exec_error) {
1387 struct sigaction act = {
1388 .sa_flags = SA_SIGINFO,
1389 .sa_sigaction = exec_error,
1390 };
1391 sigaction(SIGUSR1, &act, NULL);
1392 }
1393
1aaf63b1 1394 if (target__none(target)) {
03617c22 1395 if (evlist->core.threads == NULL) {
1aaf63b1
ACM
1396 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1397 __func__, __LINE__);
1398 goto out_close_pipes;
1399 }
03617c22 1400 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1aaf63b1 1401 }
35b9d88e
ACM
1402
1403 close(child_ready_pipe[1]);
1404 close(go_pipe[0]);
1405 /*
1406 * wait for child to settle
1407 */
1408 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1409 perror("unable to read pipe");
1410 goto out_close_pipes;
1411 }
1412
bcf3145f 1413 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1414 evlist->workload.cork_fd = go_pipe[1];
1415 close(child_ready_pipe[0]);
1416 return 0;
1417
1418out_close_pipes:
1419 close(go_pipe[0]);
1420 close(go_pipe[1]);
1421out_close_ready_pipe:
1422 close(child_ready_pipe[0]);
1423 close(child_ready_pipe[1]);
1424 return -1;
1425}
1426
63503dba 1427int perf_evlist__start_workload(struct evlist *evlist)
35b9d88e
ACM
1428{
1429 if (evlist->workload.cork_fd > 0) {
b3824404 1430 char bf = 0;
bcf3145f 1431 int ret;
35b9d88e
ACM
1432 /*
1433 * Remove the cork, let it rip!
1434 */
bcf3145f
NK
1435 ret = write(evlist->workload.cork_fd, &bf, 1);
1436 if (ret < 0)
e978be9e 1437 perror("unable to write to pipe");
bcf3145f
NK
1438
1439 close(evlist->workload.cork_fd);
1440 return ret;
35b9d88e
ACM
1441 }
1442
1443 return 0;
1444}
cb0b29e0 1445
63503dba 1446int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
0807d2d8 1447 struct perf_sample *sample)
cb0b29e0 1448{
32dcd021 1449 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
75562573
AH
1450
1451 if (!evsel)
1452 return -EFAULT;
6b6017a2 1453 return evsel__parse_sample(evsel, event, sample);
cb0b29e0 1454}
78f067b3 1455
63503dba 1456int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
01468120
JO
1457 union perf_event *event,
1458 u64 *timestamp)
1459{
32dcd021 1460 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
01468120
JO
1461
1462 if (!evsel)
1463 return -EFAULT;
6b6017a2 1464 return evsel__parse_sample_timestamp(evsel, event, timestamp);
01468120
JO
1465}
1466
63503dba 1467int perf_evlist__strerror_open(struct evlist *evlist,
a8f23d8f
ACM
1468 int err, char *buf, size_t size)
1469{
1470 int printed, value;
c8b5f2c9 1471 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
a8f23d8f
ACM
1472
1473 switch (err) {
1474 case EACCES:
1475 case EPERM:
1476 printed = scnprintf(buf, size,
1477 "Error:\t%s.\n"
1478 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1479
1a47245d 1480 value = perf_event_paranoid();
a8f23d8f
ACM
1481
1482 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1483
1484 if (value >= 2) {
1485 printed += scnprintf(buf + printed, size - printed,
1486 "For your workloads it needs to be <= 1\nHint:\t");
1487 }
1488 printed += scnprintf(buf + printed, size - printed,
5229e366 1489 "For system wide tracing it needs to be set to -1.\n");
a8f23d8f
ACM
1490
1491 printed += scnprintf(buf + printed, size - printed,
5229e366
ACM
1492 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1493 "Hint:\tThe current value is %d.", value);
a8f23d8f 1494 break;
d9aade7f 1495 case EINVAL: {
515dbe48 1496 struct evsel *first = evlist__first(evlist);
d9aade7f
ACM
1497 int max_freq;
1498
1499 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1500 goto out_default;
1501
1fc632ce 1502 if (first->core.attr.sample_freq < (u64)max_freq)
d9aade7f
ACM
1503 goto out_default;
1504
1505 printed = scnprintf(buf, size,
1506 "Error:\t%s.\n"
1507 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1508 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1fc632ce 1509 emsg, max_freq, first->core.attr.sample_freq);
d9aade7f
ACM
1510 break;
1511 }
a8f23d8f 1512 default:
d9aade7f 1513out_default:
a8f23d8f
ACM
1514 scnprintf(buf, size, "%s", emsg);
1515 break;
1516 }
1517
1518 return 0;
1519}
a025e4f0 1520
63503dba 1521int perf_evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
956fa571 1522{
c8b5f2c9 1523 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
f6fa4375 1524 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
956fa571
ACM
1525
1526 switch (err) {
1527 case EPERM:
e5d4a290 1528 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
e965bea1
ACM
1529 printed += scnprintf(buf + printed, size - printed,
1530 "Error:\t%s.\n"
956fa571 1531 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
e965bea1 1532 "Hint:\tTried using %zd kB.\n",
e5d4a290 1533 emsg, pages_max_per_user, pages_attempted);
e965bea1
ACM
1534
1535 if (pages_attempted >= pages_max_per_user) {
1536 printed += scnprintf(buf + printed, size - printed,
1537 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1538 pages_max_per_user + pages_attempted);
1539 }
1540
1541 printed += scnprintf(buf + printed, size - printed,
1542 "Hint:\tTry using a smaller -m/--mmap-pages value.");
956fa571
ACM
1543 break;
1544 default:
1545 scnprintf(buf, size, "%s", emsg);
1546 break;
1547 }
1548
1549 return 0;
1550}
1551
63503dba 1552void perf_evlist__to_front(struct evlist *evlist,
32dcd021 1553 struct evsel *move_evsel)
a025e4f0 1554{
32dcd021 1555 struct evsel *evsel, *n;
a025e4f0
AH
1556 LIST_HEAD(move);
1557
515dbe48 1558 if (move_evsel == evlist__first(evlist))
a025e4f0
AH
1559 return;
1560
e5cadb93 1561 evlist__for_each_entry_safe(evlist, n, evsel) {
a025e4f0 1562 if (evsel->leader == move_evsel->leader)
b27c4ece 1563 list_move_tail(&evsel->core.node, &move);
a025e4f0
AH
1564 }
1565
ce9036a6 1566 list_splice(&move, &evlist->core.entries);
a025e4f0 1567}
60b0896c 1568
442ad225
AH
1569struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
1570{
1571 struct evsel *evsel;
1572
1573 evlist__for_each_entry(evlist, evsel) {
1574 if (evsel->tracking)
1575 return evsel;
1576 }
1577
1578 return evlist__first(evlist);
1579}
1580
63503dba 1581void perf_evlist__set_tracking_event(struct evlist *evlist,
32dcd021 1582 struct evsel *tracking_evsel)
60b0896c 1583{
32dcd021 1584 struct evsel *evsel;
60b0896c
AH
1585
1586 if (tracking_evsel->tracking)
1587 return;
1588
e5cadb93 1589 evlist__for_each_entry(evlist, evsel) {
60b0896c
AH
1590 if (evsel != tracking_evsel)
1591 evsel->tracking = false;
1592 }
1593
1594 tracking_evsel->tracking = true;
1595}
7630b3e2 1596
32dcd021 1597struct evsel *
63503dba 1598perf_evlist__find_evsel_by_str(struct evlist *evlist,
7630b3e2
WN
1599 const char *str)
1600{
32dcd021 1601 struct evsel *evsel;
7630b3e2 1602
e5cadb93 1603 evlist__for_each_entry(evlist, evsel) {
7630b3e2
WN
1604 if (!evsel->name)
1605 continue;
1606 if (strcmp(str, evsel->name) == 0)
1607 return evsel;
1608 }
1609
1610 return NULL;
1611}
54cc54de 1612
63503dba 1613void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
54cc54de
WN
1614 enum bkw_mmap_state state)
1615{
1616 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1617 enum action {
1618 NONE,
1619 PAUSE,
1620 RESUME,
1621 } action = NONE;
1622
0b72d69a 1623 if (!evlist->overwrite_mmap)
54cc54de
WN
1624 return;
1625
1626 switch (old_state) {
1627 case BKW_MMAP_NOTREADY: {
1628 if (state != BKW_MMAP_RUNNING)
dd8bd53a 1629 goto state_err;
54cc54de
WN
1630 break;
1631 }
1632 case BKW_MMAP_RUNNING: {
1633 if (state != BKW_MMAP_DATA_PENDING)
1634 goto state_err;
1635 action = PAUSE;
1636 break;
1637 }
1638 case BKW_MMAP_DATA_PENDING: {
1639 if (state != BKW_MMAP_EMPTY)
1640 goto state_err;
1641 break;
1642 }
1643 case BKW_MMAP_EMPTY: {
1644 if (state != BKW_MMAP_RUNNING)
1645 goto state_err;
1646 action = RESUME;
1647 break;
1648 }
1649 default:
1650 WARN_ONCE(1, "Shouldn't get there\n");
1651 }
1652
1653 evlist->bkw_mmap_state = state;
1654
1655 switch (action) {
1656 case PAUSE:
1657 perf_evlist__pause(evlist);
1658 break;
1659 case RESUME:
1660 perf_evlist__resume(evlist);
1661 break;
1662 case NONE:
1663 default:
1664 break;
1665 }
1666
1667state_err:
1668 return;
1669}
07d6f446 1670
63503dba 1671bool perf_evlist__exclude_kernel(struct evlist *evlist)
07d6f446 1672{
32dcd021 1673 struct evsel *evsel;
07d6f446
ACM
1674
1675 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1676 if (!evsel->core.attr.exclude_kernel)
07d6f446
ACM
1677 return false;
1678 }
1679
1680 return true;
1681}
e2bdbe80
JY
1682
1683/*
1684 * Events in data file are not collect in groups, but we still want
1685 * the group display. Set the artificial group and set the leader's
1686 * forced_leader flag to notify the display code.
1687 */
63503dba 1688void perf_evlist__force_leader(struct evlist *evlist)
e2bdbe80
JY
1689{
1690 if (!evlist->nr_groups) {
515dbe48 1691 struct evsel *leader = evlist__first(evlist);
e2bdbe80
JY
1692
1693 perf_evlist__set_leader(evlist);
1694 leader->forced_leader = true;
1695 }
1696}
c3537fc2 1697
63503dba 1698struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
4804e011
AK
1699 struct evsel *evsel,
1700 bool close)
c3537fc2 1701{
32dcd021 1702 struct evsel *c2, *leader;
c3537fc2
AK
1703 bool is_open = true;
1704
1705 leader = evsel->leader;
1706 pr_debug("Weak group for %s/%d failed\n",
5643b1a5 1707 leader->name, leader->core.nr_members);
c3537fc2
AK
1708
1709 /*
1710 * for_each_group_member doesn't work here because it doesn't
1711 * include the first entry.
1712 */
1713 evlist__for_each_entry(evsel_list, c2) {
1714 if (c2 == evsel)
1715 is_open = false;
1716 if (c2->leader == leader) {
4804e011 1717 if (is_open && close)
5a40e199 1718 perf_evsel__close(&c2->core);
c3537fc2 1719 c2->leader = c2;
5643b1a5 1720 c2->core.nr_members = 0;
4804e011
AK
1721 /*
1722 * Set this for all former members of the group
1723 * to indicate they get reopened.
1724 */
1725 c2->reset_group = true;
c3537fc2
AK
1726 }
1727 }
1728 return leader;
1729}