perf tools: Add FIFO file names as alternative options to --control
[linux-block.git] / tools / perf / util / evlist.c
CommitLineData
91007045 1// SPDX-License-Identifier: GPL-2.0-only
f8a95309
ACM
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
f8a95309 7 */
956fa571 8#include <api/fs/fs.h>
a43783ae 9#include <errno.h>
fd20e811 10#include <inttypes.h>
5c581041 11#include <poll.h>
f8a95309 12#include "cpumap.h"
e0fcfb08 13#include "util/mmap.h"
f8a95309 14#include "thread_map.h"
12864b31 15#include "target.h"
361c99a6
ACM
16#include "evlist.h"
17#include "evsel.h"
e3e1a54f 18#include "debug.h"
58db1d6e 19#include "units.h"
20f2be1d 20#include <internal/lib.h> // page_size
7736627b 21#include "affinity.h"
c1a604df 22#include "../perf.h"
54cc54de 23#include "asm/bug.h"
657ee553 24#include "bpf-event.h"
da949f50 25#include "util/string2.h"
40c7d246 26#include "util/perf_api_probe.h"
9607ad3a 27#include <signal.h>
35b9d88e 28#include <unistd.h>
b397f846 29#include <sched.h>
f2a39fe8 30#include <stdlib.h>
361c99a6 31
50d08e47 32#include "parse-events.h"
4b6ab94e 33#include <subcmd/parse-options.h>
50d08e47 34
bafae98e 35#include <fcntl.h>
86a5e0c2 36#include <sys/ioctl.h>
f8a95309
ACM
37#include <sys/mman.h>
38
70db7533
ACM
39#include <linux/bitops.h>
40#include <linux/hash.h>
0389cd1f 41#include <linux/log2.h>
8dd2a131 42#include <linux/err.h>
8520a98d 43#include <linux/string.h>
7f7c536f 44#include <linux/zalloc.h>
4562a739 45#include <perf/evlist.h>
88761fa1 46#include <perf/evsel.h>
9c3516d1 47#include <perf/cpumap.h>
7728fa0c 48#include <perf/mmap.h>
70db7533 49
e14e5497
ACM
50#include <internal/xyarray.h>
51
748fe088
ACM
52#ifdef LACKS_SIGQUEUE_PROTOTYPE
53int sigqueue(pid_t pid, int sig, const union sigval value);
54#endif
55
9dfcb759 56#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
8cd36f3e 57#define SID(e, x, y) xyarray__entry(e->core.sample_id, x, y)
f8a95309 58
52c86bca
JO
59void evlist__init(struct evlist *evlist, struct perf_cpu_map *cpus,
60 struct perf_thread_map *threads)
ef1d1af2 61{
4562a739 62 perf_evlist__init(&evlist->core);
453fa030 63 perf_evlist__set_maps(&evlist->core, cpus, threads);
35b9d88e 64 evlist->workload.pid = -1;
54cc54de 65 evlist->bkw_mmap_state = BKW_MMAP_NOTREADY;
8ab705b5
AB
66 evlist->ctl_fd.fd = -1;
67 evlist->ctl_fd.ack = -1;
68 evlist->ctl_fd.pos = -1;
ef1d1af2
ACM
69}
70
0f98b11c 71struct evlist *evlist__new(void)
361c99a6 72{
63503dba 73 struct evlist *evlist = zalloc(sizeof(*evlist));
361c99a6 74
ef1d1af2 75 if (evlist != NULL)
52c86bca 76 evlist__init(evlist, NULL, NULL);
361c99a6
ACM
77
78 return evlist;
79}
80
63503dba 81struct evlist *perf_evlist__new_default(void)
b22d54b0 82{
0f98b11c 83 struct evlist *evlist = evlist__new();
b22d54b0 84
e251abee 85 if (evlist && evlist__add_default(evlist)) {
c12995a5 86 evlist__delete(evlist);
b22d54b0
JO
87 evlist = NULL;
88 }
89
90 return evlist;
91}
92
63503dba 93struct evlist *perf_evlist__new_dummy(void)
5bae0250 94{
0f98b11c 95 struct evlist *evlist = evlist__new();
5bae0250 96
e251abee 97 if (evlist && evlist__add_dummy(evlist)) {
c12995a5 98 evlist__delete(evlist);
5bae0250
ACM
99 evlist = NULL;
100 }
101
102 return evlist;
103}
104
75562573
AH
105/**
106 * perf_evlist__set_id_pos - set the positions of event ids.
107 * @evlist: selected event list
108 *
109 * Events with compatible sample types all have the same id_pos
110 * and is_pos. For convenience, put a copy on evlist.
111 */
63503dba 112void perf_evlist__set_id_pos(struct evlist *evlist)
75562573 113{
515dbe48 114 struct evsel *first = evlist__first(evlist);
75562573
AH
115
116 evlist->id_pos = first->id_pos;
117 evlist->is_pos = first->is_pos;
118}
119
63503dba 120static void perf_evlist__update_id_pos(struct evlist *evlist)
733cd2fe 121{
32dcd021 122 struct evsel *evsel;
733cd2fe 123
e5cadb93 124 evlist__for_each_entry(evlist, evsel)
4b5e87b7 125 evsel__calc_id_pos(evsel);
733cd2fe
AH
126
127 perf_evlist__set_id_pos(evlist);
128}
129
e6b1878d 130static void evlist__purge(struct evlist *evlist)
361c99a6 131{
32dcd021 132 struct evsel *pos, *n;
361c99a6 133
e5cadb93 134 evlist__for_each_entry_safe(evlist, n, pos) {
b27c4ece 135 list_del_init(&pos->core.node);
d49e4695 136 pos->evlist = NULL;
5eb2dd2a 137 evsel__delete(pos);
361c99a6
ACM
138 }
139
6484d2f9 140 evlist->core.nr_entries = 0;
361c99a6
ACM
141}
142
470579b0 143void evlist__exit(struct evlist *evlist)
361c99a6 144{
04662523 145 zfree(&evlist->mmap);
0b72d69a 146 zfree(&evlist->overwrite_mmap);
93dd6e28 147 perf_evlist__exit(&evlist->core);
ef1d1af2
ACM
148}
149
c12995a5 150void evlist__delete(struct evlist *evlist)
ef1d1af2 151{
0b04b3dc
ACM
152 if (evlist == NULL)
153 return;
154
db6b7b13 155 evlist__munmap(evlist);
750b4ede 156 evlist__close(evlist);
e6b1878d 157 evlist__purge(evlist);
470579b0 158 evlist__exit(evlist);
361c99a6
ACM
159 free(evlist);
160}
161
a1cf3a75 162void evlist__add(struct evlist *evlist, struct evsel *entry)
361c99a6 163{
d49e4695 164 entry->evlist = evlist;
6484d2f9 165 entry->idx = evlist->core.nr_entries;
60b0896c 166 entry->tracking = !entry->idx;
ef503831 167
6484d2f9
JO
168 perf_evlist__add(&evlist->core, &entry->core);
169
170 if (evlist->core.nr_entries == 1)
75562573 171 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
172}
173
16251027 174void evlist__remove(struct evlist *evlist, struct evsel *evsel)
4768230a
AH
175{
176 evsel->evlist = NULL;
52e22fb8 177 perf_evlist__remove(&evlist->core, &evsel->core);
4768230a
AH
178}
179
63503dba 180void perf_evlist__splice_list_tail(struct evlist *evlist,
f114d6ef 181 struct list_head *list)
50d08e47 182{
32dcd021 183 struct evsel *evsel, *temp;
75562573 184
e5cadb93 185 __evlist__for_each_entry_safe(list, temp, evsel) {
b27c4ece 186 list_del_init(&evsel->core.node);
a1cf3a75 187 evlist__add(evlist, evsel);
f114d6ef 188 }
50d08e47
ACM
189}
190
c0e53476
ACM
191int __evlist__set_tracepoints_handlers(struct evlist *evlist,
192 const struct evsel_str_handler *assocs, size_t nr_assocs)
193{
194 struct evsel *evsel;
195 size_t i;
196 int err;
197
198 for (i = 0; i < nr_assocs; i++) {
199 // Adding a handler for an event not in this evlist, just ignore it.
200 evsel = perf_evlist__find_tracepoint_by_name(evlist, assocs[i].name);
201 if (evsel == NULL)
202 continue;
203
204 err = -EEXIST;
205 if (evsel->handler != NULL)
206 goto out;
207 evsel->handler = assocs[i].handler;
208 }
209
210 err = 0;
211out:
212 return err;
213}
214
63dab225
ACM
215void __perf_evlist__set_leader(struct list_head *list)
216{
32dcd021 217 struct evsel *evsel, *leader;
63dab225 218
b27c4ece
JO
219 leader = list_entry(list->next, struct evsel, core.node);
220 evsel = list_entry(list->prev, struct evsel, core.node);
97f63e4a 221
5643b1a5 222 leader->core.nr_members = evsel->idx - leader->idx + 1;
63dab225 223
e5cadb93 224 __evlist__for_each_entry(list, evsel) {
74b2133d 225 evsel->leader = leader;
63dab225
ACM
226 }
227}
228
63503dba 229void perf_evlist__set_leader(struct evlist *evlist)
6a4bb04c 230{
6484d2f9
JO
231 if (evlist->core.nr_entries) {
232 evlist->nr_groups = evlist->core.nr_entries > 1 ? 1 : 0;
ce9036a6 233 __perf_evlist__set_leader(&evlist->core.entries);
97f63e4a 234 }
6a4bb04c
JO
235}
236
e251abee 237int __evlist__add_default(struct evlist *evlist, bool precise)
361c99a6 238{
8f6725a2 239 struct evsel *evsel = evsel__new_cycles(precise);
1aed2671 240
361c99a6 241 if (evsel == NULL)
7c48dcfd 242 return -ENOMEM;
361c99a6 243
a1cf3a75 244 evlist__add(evlist, evsel);
361c99a6
ACM
245 return 0;
246}
5c581041 247
e251abee 248int evlist__add_dummy(struct evlist *evlist)
5bae0250
ACM
249{
250 struct perf_event_attr attr = {
251 .type = PERF_TYPE_SOFTWARE,
252 .config = PERF_COUNT_SW_DUMMY,
253 .size = sizeof(attr), /* to capture ABI version */
254 };
8f6725a2 255 struct evsel *evsel = evsel__new_idx(&attr, evlist->core.nr_entries);
5bae0250
ACM
256
257 if (evsel == NULL)
258 return -ENOMEM;
259
a1cf3a75 260 evlist__add(evlist, evsel);
5bae0250
ACM
261 return 0;
262}
263
e251abee 264static int evlist__add_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47 265{
32dcd021 266 struct evsel *evsel, *n;
50d08e47
ACM
267 LIST_HEAD(head);
268 size_t i;
269
270 for (i = 0; i < nr_attrs; i++) {
8f6725a2 271 evsel = evsel__new_idx(attrs + i, evlist->core.nr_entries + i);
50d08e47
ACM
272 if (evsel == NULL)
273 goto out_delete_partial_list;
b27c4ece 274 list_add_tail(&evsel->core.node, &head);
50d08e47
ACM
275 }
276
f114d6ef 277 perf_evlist__splice_list_tail(evlist, &head);
50d08e47
ACM
278
279 return 0;
280
281out_delete_partial_list:
e5cadb93 282 __evlist__for_each_entry_safe(&head, n, evsel)
5eb2dd2a 283 evsel__delete(evsel);
50d08e47
ACM
284 return -1;
285}
286
e251abee 287int __evlist__add_default_attrs(struct evlist *evlist, struct perf_event_attr *attrs, size_t nr_attrs)
79695e1b
ACM
288{
289 size_t i;
290
291 for (i = 0; i < nr_attrs; i++)
292 event_attr_init(attrs + i);
293
a1cf3a75 294 return evlist__add_attrs(evlist, attrs, nr_attrs);
79695e1b
ACM
295}
296
32dcd021 297struct evsel *
63503dba 298perf_evlist__find_tracepoint_by_id(struct evlist *evlist, int id)
ee29be62 299{
32dcd021 300 struct evsel *evsel;
ee29be62 301
e5cadb93 302 evlist__for_each_entry(evlist, evsel) {
1fc632ce
JO
303 if (evsel->core.attr.type == PERF_TYPE_TRACEPOINT &&
304 (int)evsel->core.attr.config == id)
ee29be62
ACM
305 return evsel;
306 }
307
308 return NULL;
309}
310
32dcd021 311struct evsel *
63503dba 312perf_evlist__find_tracepoint_by_name(struct evlist *evlist,
a2f2804a
DA
313 const char *name)
314{
32dcd021 315 struct evsel *evsel;
a2f2804a 316
e5cadb93 317 evlist__for_each_entry(evlist, evsel) {
1fc632ce 318 if ((evsel->core.attr.type == PERF_TYPE_TRACEPOINT) &&
a2f2804a
DA
319 (strcmp(evsel->name, name) == 0))
320 return evsel;
321 }
322
323 return NULL;
324}
325
e251abee 326int evlist__add_newtp(struct evlist *evlist, const char *sys, const char *name, void *handler)
39876e7d 327{
8f6725a2 328 struct evsel *evsel = evsel__newtp(sys, name);
39876e7d 329
8dd2a131 330 if (IS_ERR(evsel))
39876e7d
ACM
331 return -1;
332
744a9719 333 evsel->handler = handler;
a1cf3a75 334 evlist__add(evlist, evsel);
39876e7d
ACM
335 return 0;
336}
337
63503dba 338static int perf_evlist__nr_threads(struct evlist *evlist,
32dcd021 339 struct evsel *evsel)
bf8e8f4b 340{
648b5af3 341 if (evsel->core.system_wide)
bf8e8f4b
AH
342 return 1;
343 else
a2f354e3 344 return perf_thread_map__nr(evlist->core.threads);
bf8e8f4b
AH
345}
346
a8cbe40f
AK
347void evlist__cpu_iter_start(struct evlist *evlist)
348{
349 struct evsel *pos;
350
351 /*
352 * Reset the per evsel cpu_iter. This is needed because
353 * each evsel's cpumap may have a different index space,
354 * and some operations need the index to modify
355 * the FD xyarray (e.g. open, close)
356 */
357 evlist__for_each_entry(evlist, pos)
358 pos->cpu_iter = 0;
359}
360
361bool evsel__cpu_iter_skip_no_inc(struct evsel *ev, int cpu)
362{
363 if (ev->cpu_iter >= ev->core.cpus->nr)
364 return true;
365 if (cpu >= 0 && ev->core.cpus->map[ev->cpu_iter] != cpu)
366 return true;
367 return false;
368}
369
370bool evsel__cpu_iter_skip(struct evsel *ev, int cpu)
371{
372 if (!evsel__cpu_iter_skip_no_inc(ev, cpu)) {
373 ev->cpu_iter++;
374 return false;
375 }
376 return true;
377}
378
e74676de 379void evlist__disable(struct evlist *evlist)
4152ab37 380{
32dcd021 381 struct evsel *pos;
704e2f5b 382 struct affinity affinity;
87cf8360
AH
383 int cpu, i, imm = 0;
384 bool has_imm = false;
3e27c920 385
704e2f5b
AK
386 if (affinity__setup(&affinity) < 0)
387 return;
388
87cf8360
AH
389 /* Disable 'immediate' events last */
390 for (imm = 0; imm <= 1; imm++) {
391 evlist__for_each_cpu(evlist, i, cpu) {
392 affinity__set(&affinity, cpu);
393
394 evlist__for_each_entry(evlist, pos) {
395 if (evsel__cpu_iter_skip(pos, cpu))
396 continue;
397 if (pos->disabled || !evsel__is_group_leader(pos) || !pos->core.fd)
398 continue;
399 if (pos->immediate)
400 has_imm = true;
401 if (pos->immediate != imm)
402 continue;
403 evsel__disable_cpu(pos, pos->cpu_iter - 1);
404 }
704e2f5b 405 }
87cf8360
AH
406 if (!has_imm)
407 break;
704e2f5b 408 }
87cf8360 409
704e2f5b 410 affinity__cleanup(&affinity);
e5cadb93 411 evlist__for_each_entry(evlist, pos) {
c754c382 412 if (!evsel__is_group_leader(pos) || !pos->core.fd)
3e27c920 413 continue;
704e2f5b 414 pos->disabled = true;
4152ab37 415 }
2b56bcfb
ACM
416
417 evlist->enabled = false;
4152ab37
ACM
418}
419
1c87f165 420void evlist__enable(struct evlist *evlist)
764e16a3 421{
32dcd021 422 struct evsel *pos;
704e2f5b
AK
423 struct affinity affinity;
424 int cpu, i;
3e27c920 425
704e2f5b
AK
426 if (affinity__setup(&affinity) < 0)
427 return;
428
429 evlist__for_each_cpu(evlist, i, cpu) {
430 affinity__set(&affinity, cpu);
431
432 evlist__for_each_entry(evlist, pos) {
433 if (evsel__cpu_iter_skip(pos, cpu))
434 continue;
c754c382 435 if (!evsel__is_group_leader(pos) || !pos->core.fd)
704e2f5b
AK
436 continue;
437 evsel__enable_cpu(pos, pos->cpu_iter - 1);
438 }
439 }
440 affinity__cleanup(&affinity);
e5cadb93 441 evlist__for_each_entry(evlist, pos) {
c754c382 442 if (!evsel__is_group_leader(pos) || !pos->core.fd)
3e27c920 443 continue;
704e2f5b 444 pos->disabled = false;
764e16a3 445 }
2b56bcfb
ACM
446
447 evlist->enabled = true;
448}
449
63503dba 450void perf_evlist__toggle_enable(struct evlist *evlist)
2b56bcfb 451{
e74676de 452 (evlist->enabled ? evlist__disable : evlist__enable)(evlist);
764e16a3
DA
453}
454
63503dba 455static int perf_evlist__enable_event_cpu(struct evlist *evlist,
32dcd021 456 struct evsel *evsel, int cpu)
1c65056c 457{
18ef15c6 458 int thread;
1c65056c
AH
459 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
460
9dfcb759 461 if (!evsel->core.fd)
1c65056c
AH
462 return -EINVAL;
463
464 for (thread = 0; thread < nr_threads; thread++) {
18ef15c6 465 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
1c65056c
AH
466 if (err)
467 return err;
468 }
469 return 0;
470}
471
63503dba 472static int perf_evlist__enable_event_thread(struct evlist *evlist,
32dcd021 473 struct evsel *evsel,
1c65056c
AH
474 int thread)
475{
18ef15c6 476 int cpu;
6549cd8f 477 int nr_cpus = perf_cpu_map__nr(evlist->core.cpus);
1c65056c 478
9dfcb759 479 if (!evsel->core.fd)
1c65056c
AH
480 return -EINVAL;
481
482 for (cpu = 0; cpu < nr_cpus; cpu++) {
18ef15c6 483 int err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
1c65056c
AH
484 if (err)
485 return err;
486 }
487 return 0;
488}
489
63503dba 490int perf_evlist__enable_event_idx(struct evlist *evlist,
32dcd021 491 struct evsel *evsel, int idx)
1c65056c 492{
315c0a1f 493 bool per_cpu_mmaps = !perf_cpu_map__empty(evlist->core.cpus);
1c65056c
AH
494
495 if (per_cpu_mmaps)
496 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
497 else
498 return perf_evlist__enable_event_thread(evlist, evsel, idx);
499}
500
f4009e7b 501int evlist__add_pollfd(struct evlist *evlist, int fd)
70082dd9 502{
ab4c1f9f 503 return perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN, fdarray_flag__default);
e4b356b5
ACM
504}
505
f4009e7b 506int evlist__filter_pollfd(struct evlist *evlist, short revents_and_mask)
1ddec7f0 507{
84227cb1 508 return perf_evlist__filter_pollfd(&evlist->core, revents_and_mask);
1ddec7f0
ACM
509}
510
80ab2987 511int evlist__poll(struct evlist *evlist, int timeout)
f66a889d 512{
80ab2987 513 return perf_evlist__poll(&evlist->core, timeout);
f66a889d
ACM
514}
515
63503dba 516struct perf_sample_id *perf_evlist__id2sid(struct evlist *evlist, u64 id)
70db7533
ACM
517{
518 struct hlist_head *head;
70db7533
ACM
519 struct perf_sample_id *sid;
520 int hash;
521
70db7533 522 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1d5af02d 523 head = &evlist->core.heads[hash];
70db7533 524
b67bfe0d 525 hlist_for_each_entry(sid, head, node)
70db7533 526 if (sid->id == id)
932a3594
JO
527 return sid;
528
529 return NULL;
530}
531
63503dba 532struct evsel *perf_evlist__id2evsel(struct evlist *evlist, u64 id)
932a3594
JO
533{
534 struct perf_sample_id *sid;
535
6484d2f9 536 if (evlist->core.nr_entries == 1 || !id)
515dbe48 537 return evlist__first(evlist);
932a3594
JO
538
539 sid = perf_evlist__id2sid(evlist, id);
540 if (sid)
70c20369 541 return container_of(sid->evsel, struct evsel, core);
30e68bcc 542
8cedf3a5 543 if (!evlist__sample_id_all(evlist))
515dbe48 544 return evlist__first(evlist);
30e68bcc 545
70db7533
ACM
546 return NULL;
547}
04391deb 548
63503dba 549struct evsel *perf_evlist__id2evsel_strict(struct evlist *evlist,
dddcf6ab
AH
550 u64 id)
551{
552 struct perf_sample_id *sid;
553
554 if (!id)
555 return NULL;
556
557 sid = perf_evlist__id2sid(evlist, id);
558 if (sid)
70c20369 559 return container_of(sid->evsel, struct evsel, core);
dddcf6ab
AH
560
561 return NULL;
562}
563
63503dba 564static int perf_evlist__event2id(struct evlist *evlist,
75562573
AH
565 union perf_event *event, u64 *id)
566{
b1fcd190 567 const __u64 *array = event->sample.array;
75562573
AH
568 ssize_t n;
569
570 n = (event->header.size - sizeof(event->header)) >> 3;
571
572 if (event->header.type == PERF_RECORD_SAMPLE) {
573 if (evlist->id_pos >= n)
574 return -1;
575 *id = array[evlist->id_pos];
576 } else {
577 if (evlist->is_pos > n)
578 return -1;
579 n -= evlist->is_pos;
580 *id = array[n];
581 }
582 return 0;
583}
584
63503dba 585struct evsel *perf_evlist__event2evsel(struct evlist *evlist,
7cb5c5ac 586 union perf_event *event)
75562573 587{
515dbe48 588 struct evsel *first = evlist__first(evlist);
75562573
AH
589 struct hlist_head *head;
590 struct perf_sample_id *sid;
591 int hash;
592 u64 id;
593
6484d2f9 594 if (evlist->core.nr_entries == 1)
98be6966
AH
595 return first;
596
1fc632ce 597 if (!first->core.attr.sample_id_all &&
98be6966
AH
598 event->header.type != PERF_RECORD_SAMPLE)
599 return first;
75562573
AH
600
601 if (perf_evlist__event2id(evlist, event, &id))
602 return NULL;
603
604 /* Synthesized events have an id of zero */
605 if (!id)
98be6966 606 return first;
75562573
AH
607
608 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
1d5af02d 609 head = &evlist->core.heads[hash];
75562573
AH
610
611 hlist_for_each_entry(sid, head, node) {
612 if (sid->id == id)
70c20369 613 return container_of(sid->evsel, struct evsel, core);
75562573
AH
614 }
615 return NULL;
616}
617
63503dba 618static int perf_evlist__set_paused(struct evlist *evlist, bool value)
65aea233
WN
619{
620 int i;
621
0b72d69a 622 if (!evlist->overwrite_mmap)
078c3386
WN
623 return 0;
624
c976ee11 625 for (i = 0; i < evlist->core.nr_mmaps; i++) {
2cf07b29 626 int fd = evlist->overwrite_mmap[i].core.fd;
65aea233
WN
627 int err;
628
629 if (fd < 0)
630 continue;
631 err = ioctl(fd, PERF_EVENT_IOC_PAUSE_OUTPUT, value ? 1 : 0);
632 if (err)
633 return err;
634 }
635 return 0;
636}
637
63503dba 638static int perf_evlist__pause(struct evlist *evlist)
65aea233
WN
639{
640 return perf_evlist__set_paused(evlist, true);
641}
642
63503dba 643static int perf_evlist__resume(struct evlist *evlist)
65aea233
WN
644{
645 return perf_evlist__set_paused(evlist, false);
646}
647
db6b7b13 648static void evlist__munmap_nofree(struct evlist *evlist)
f8a95309 649{
aece948f 650 int i;
f8a95309 651
b2cb615d 652 if (evlist->mmap)
c976ee11 653 for (i = 0; i < evlist->core.nr_mmaps; i++)
80e53d11 654 perf_mmap__munmap(&evlist->mmap[i].core);
983874d1 655
0b72d69a 656 if (evlist->overwrite_mmap)
c976ee11 657 for (i = 0; i < evlist->core.nr_mmaps; i++)
80e53d11 658 perf_mmap__munmap(&evlist->overwrite_mmap[i].core);
a1f72618 659}
aece948f 660
db6b7b13 661void evlist__munmap(struct evlist *evlist)
a1f72618 662{
db6b7b13 663 evlist__munmap_nofree(evlist);
04662523 664 zfree(&evlist->mmap);
0b72d69a 665 zfree(&evlist->overwrite_mmap);
f8a95309
ACM
666}
667
80e53d11
JO
668static void perf_mmap__unmap_cb(struct perf_mmap *map)
669{
670 struct mmap *m = container_of(map, struct mmap, core);
671
672 mmap__munmap(m);
673}
674
d50cf361
JO
675static struct mmap *evlist__alloc_mmap(struct evlist *evlist,
676 bool overwrite)
f8a95309 677{
d4c6fb36 678 int i;
a5830532 679 struct mmap *map;
d4c6fb36 680
c976ee11 681 map = zalloc(evlist->core.nr_mmaps * sizeof(struct mmap));
8db6d6b1
WN
682 if (!map)
683 return NULL;
946ae1d4 684
c976ee11 685 for (i = 0; i < evlist->core.nr_mmaps; i++) {
6eb65f7a
JO
686 struct perf_mmap *prev = i ? &map[i - 1].core : NULL;
687
4738ca30
ACM
688 /*
689 * When the perf_mmap() call is made we grab one refcount, plus
6afad54d 690 * one extra to let perf_mmap__consume() get the last
4738ca30
ACM
691 * events after all real references (perf_mmap__get()) are
692 * dropped.
693 *
694 * Each PERF_EVENT_IOC_SET_OUTPUT points to this mmap and
695 * thus does perf_mmap__get() on it.
696 */
6eb65f7a 697 perf_mmap__init(&map[i].core, prev, overwrite, perf_mmap__unmap_cb);
4738ca30 698 }
353120b4 699
8db6d6b1 700 return map;
f8a95309
ACM
701}
702
9abd2ab2
JO
703static void
704perf_evlist__mmap_cb_idx(struct perf_evlist *_evlist,
705 struct perf_mmap_param *_mp,
706 int idx, bool per_cpu)
707{
708 struct evlist *evlist = container_of(_evlist, struct evlist, core);
709 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
710
711 auxtrace_mmap_params__set_idx(&mp->auxtrace_mp, evlist, idx, per_cpu);
712}
713
bb1b1885
JO
714static struct perf_mmap*
715perf_evlist__mmap_cb_get(struct perf_evlist *_evlist, bool overwrite, int idx)
716{
717 struct evlist *evlist = container_of(_evlist, struct evlist, core);
3805e4f3 718 struct mmap *maps;
bb1b1885 719
3805e4f3 720 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
bb1b1885 721
3805e4f3
JO
722 if (!maps) {
723 maps = evlist__alloc_mmap(evlist, overwrite);
724 if (!maps)
725 return NULL;
bb1b1885 726
3805e4f3 727 if (overwrite) {
bb1b1885
JO
728 evlist->overwrite_mmap = maps;
729 if (evlist->bkw_mmap_state == BKW_MMAP_NOTREADY)
730 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_RUNNING);
3805e4f3
JO
731 } else {
732 evlist->mmap = maps;
bb1b1885
JO
733 }
734 }
735
736 return &maps[idx].core;
737}
738
b80132b1
JO
739static int
740perf_evlist__mmap_cb_mmap(struct perf_mmap *_map, struct perf_mmap_param *_mp,
741 int output, int cpu)
742{
743 struct mmap *map = container_of(_map, struct mmap, core);
744 struct mmap_params *mp = container_of(_mp, struct mmap_params, core);
745
746 return mmap__mmap(map, mp, output, cpu);
747}
748
f5e7150c 749unsigned long perf_event_mlock_kb_in_pages(void)
994a1f78 750{
f5e7150c
ACM
751 unsigned long pages;
752 int max;
8185e881 753
f5e7150c
ACM
754 if (sysctl__read_int("kernel/perf_event_mlock_kb", &max) < 0) {
755 /*
756 * Pick a once upon a time good value, i.e. things look
757 * strange since we can't read a sysctl value, but lets not
758 * die yet...
759 */
760 max = 512;
761 } else {
762 max -= (page_size / 1024);
763 }
8185e881 764
f5e7150c
ACM
765 pages = (max * 1024) / page_size;
766 if (!is_power_of_2(pages))
767 pages = rounddown_pow_of_two(pages);
768
769 return pages;
770}
771
9521b5f2 772size_t evlist__mmap_size(unsigned long pages)
f5e7150c
ACM
773{
774 if (pages == UINT_MAX)
775 pages = perf_event_mlock_kb_in_pages();
776 else if (!is_power_of_2(pages))
994a1f78
JO
777 return 0;
778
779 return (pages + 1) * page_size;
780}
781
33c2dcfd
DA
782static long parse_pages_arg(const char *str, unsigned long min,
783 unsigned long max)
994a1f78 784{
2fbe4abe 785 unsigned long pages, val;
27050f53
JO
786 static struct parse_tag tags[] = {
787 { .tag = 'B', .mult = 1 },
788 { .tag = 'K', .mult = 1 << 10 },
789 { .tag = 'M', .mult = 1 << 20 },
790 { .tag = 'G', .mult = 1 << 30 },
791 { .tag = 0 },
792 };
994a1f78 793
8973504b 794 if (str == NULL)
33c2dcfd 795 return -EINVAL;
8973504b 796
27050f53 797 val = parse_tag_value(str, tags);
2fbe4abe 798 if (val != (unsigned long) -1) {
27050f53
JO
799 /* we got file size value */
800 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
801 } else {
802 /* we got pages count value */
803 char *eptr;
804 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
805 if (*eptr != '\0')
806 return -EINVAL;
994a1f78
JO
807 }
808
2bcab6c1 809 if (pages == 0 && min == 0) {
33c2dcfd 810 /* leave number of pages at 0 */
1dbfa938 811 } else if (!is_power_of_2(pages)) {
9808143b
JO
812 char buf[100];
813
33c2dcfd 814 /* round pages up to next power of 2 */
91529834 815 pages = roundup_pow_of_two(pages);
1dbfa938
AH
816 if (!pages)
817 return -EINVAL;
9808143b
JO
818
819 unit_number__scnprintf(buf, sizeof(buf), pages * page_size);
820 pr_info("rounding mmap pages size to %s (%lu pages)\n",
821 buf, pages);
2fbe4abe
AH
822 }
823
33c2dcfd
DA
824 if (pages > max)
825 return -EINVAL;
826
827 return pages;
828}
829
e9db1310 830int __perf_evlist__parse_mmap_pages(unsigned int *mmap_pages, const char *str)
33c2dcfd 831{
33c2dcfd
DA
832 unsigned long max = UINT_MAX;
833 long pages;
834
f5ae9c42 835 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
836 max = SIZE_MAX / page_size;
837
838 pages = parse_pages_arg(str, 1, max);
839 if (pages < 0) {
840 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
841 return -1;
842 }
843
844 *mmap_pages = pages;
845 return 0;
846}
847
e9db1310
AH
848int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
849 int unset __maybe_unused)
850{
851 return __perf_evlist__parse_mmap_pages(opt->value, str);
852}
853
c83fa7f2 854/**
9521b5f2 855 * evlist__mmap_ex - Create mmaps to receive events.
c83fa7f2
AH
856 * @evlist: list of events
857 * @pages: map length in pages
858 * @overwrite: overwrite older events?
718c602d
AH
859 * @auxtrace_pages - auxtrace map length in pages
860 * @auxtrace_overwrite - overwrite older auxtrace data?
f8a95309 861 *
c83fa7f2 862 * If @overwrite is %false the user needs to signal event consumption using
9521b5f2 863 * perf_mmap__write_tail(). Using evlist__mmap_read() does this
c83fa7f2 864 * automatically.
7e2ed097 865 *
718c602d
AH
866 * Similarly, if @auxtrace_overwrite is %false the user needs to signal data
867 * consumption using auxtrace_mmap__write_tail().
868 *
c83fa7f2 869 * Return: %0 on success, negative error code otherwise.
f8a95309 870 */
9521b5f2 871int evlist__mmap_ex(struct evlist *evlist, unsigned int pages,
7a276ff6 872 unsigned int auxtrace_pages,
51255a8a
AB
873 bool auxtrace_overwrite, int nr_cblocks, int affinity, int flush,
874 int comp_level)
f8a95309 875{
71f566a3
WN
876 /*
877 * Delay setting mp.prot: set it before calling perf_mmap__mmap.
878 * Its value is decided by evsel's write_backward.
879 * So &mp should not be passed through const pointer.
880 */
e440979f
JO
881 struct mmap_params mp = {
882 .nr_cblocks = nr_cblocks,
883 .affinity = affinity,
884 .flush = flush,
885 .comp_level = comp_level
886 };
923d0f18 887 struct perf_evlist_mmap_ops ops = {
b80132b1
JO
888 .idx = perf_evlist__mmap_cb_idx,
889 .get = perf_evlist__mmap_cb_get,
890 .mmap = perf_evlist__mmap_cb_mmap,
9abd2ab2 891 };
50a682ce 892
f6fa4375
JO
893 evlist->core.mmap_len = evlist__mmap_size(pages);
894 pr_debug("mmap size %zuB\n", evlist->core.mmap_len);
f8a95309 895
f6fa4375 896 auxtrace_mmap_params__init(&mp.auxtrace_mp, evlist->core.mmap_len,
718c602d
AH
897 auxtrace_pages, auxtrace_overwrite);
898
923d0f18 899 return perf_evlist__mmap_ops(&evlist->core, &ops, &mp.core);
f8a95309 900}
7e2ed097 901
9521b5f2 902int evlist__mmap(struct evlist *evlist, unsigned int pages)
718c602d 903{
9521b5f2 904 return evlist__mmap_ex(evlist, pages, 0, false, 0, PERF_AFFINITY_SYS, 1, 0);
718c602d
AH
905}
906
63503dba 907int perf_evlist__create_maps(struct evlist *evlist, struct target *target)
7e2ed097 908{
147c508f 909 bool all_threads = (target->per_thread && target->system_wide);
f854839b 910 struct perf_cpu_map *cpus;
9749b90e 911 struct perf_thread_map *threads;
7e2ed097 912
147c508f
JY
913 /*
914 * If specify '-a' and '--per-thread' to perf record, perf record
915 * will override '--per-thread'. target->per_thread = false and
916 * target->system_wide = true.
917 *
918 * If specify '--per-thread' only to perf record,
919 * target->per_thread = true and target->system_wide = false.
920 *
921 * So target->per_thread && target->system_wide is false.
922 * For perf record, thread_map__new_str doesn't call
923 * thread_map__new_all_cpus. That will keep perf record's
924 * current behavior.
925 *
926 * For perf stat, it allows the case that target->per_thread and
927 * target->system_wide are all true. It means to collect system-wide
928 * per-thread data. thread_map__new_str will call
929 * thread_map__new_all_cpus to enumerate all threads.
930 */
73c0ca1e 931 threads = thread_map__new_str(target->pid, target->tid, target->uid,
147c508f 932 all_threads);
7e2ed097 933
74bfd2b2 934 if (!threads)
7e2ed097
ACM
935 return -1;
936
9c105fbc 937 if (target__uses_dummy_map(target))
397721e0 938 cpus = perf_cpu_map__dummy_new();
879d77d0 939 else
9c3516d1 940 cpus = perf_cpu_map__new(target->cpu_list);
7e2ed097 941
74bfd2b2 942 if (!cpus)
7e2ed097
ACM
943 goto out_delete_threads;
944
ec903f26 945 evlist->core.has_user_cpus = !!target->cpu_list;
ec9a77a7 946
453fa030 947 perf_evlist__set_maps(&evlist->core, cpus, threads);
d5bc056e
AH
948
949 return 0;
7e2ed097
ACM
950
951out_delete_threads:
7836e52e 952 perf_thread_map__put(threads);
7e2ed097
ACM
953 return -1;
954}
955
63503dba 956void __perf_evlist__set_sample_bit(struct evlist *evlist,
22c8a376
ACM
957 enum perf_event_sample_format bit)
958{
32dcd021 959 struct evsel *evsel;
22c8a376 960
e5cadb93 961 evlist__for_each_entry(evlist, evsel)
862b2f8f 962 __evsel__set_sample_bit(evsel, bit);
22c8a376
ACM
963}
964
63503dba 965void __perf_evlist__reset_sample_bit(struct evlist *evlist,
22c8a376
ACM
966 enum perf_event_sample_format bit)
967{
32dcd021 968 struct evsel *evsel;
22c8a376 969
e5cadb93 970 evlist__for_each_entry(evlist, evsel)
862b2f8f 971 __evsel__reset_sample_bit(evsel, bit);
22c8a376
ACM
972}
973
63503dba 974int perf_evlist__apply_filters(struct evlist *evlist, struct evsel **err_evsel)
0a102479 975{
32dcd021 976 struct evsel *evsel;
745cefc5 977 int err = 0;
0a102479 978
e5cadb93 979 evlist__for_each_entry(evlist, evsel) {
745cefc5 980 if (evsel->filter == NULL)
0a102479 981 continue;
745cefc5 982
d988d5ee
KL
983 /*
984 * filters only work for tracepoint event, which doesn't have cpu limit.
985 * So evlist and evsel should always be same.
986 */
a00571fd 987 err = perf_evsel__apply_filter(&evsel->core, evsel->filter);
23d4aad4
ACM
988 if (err) {
989 *err_evsel = evsel;
745cefc5 990 break;
23d4aad4 991 }
0a102479
FW
992 }
993
745cefc5
ACM
994 return err;
995}
996
63503dba 997int perf_evlist__set_tp_filter(struct evlist *evlist, const char *filter)
745cefc5 998{
32dcd021 999 struct evsel *evsel;
745cefc5 1000 int err = 0;
745cefc5 1001
05cea449
ACM
1002 if (filter == NULL)
1003 return -1;
1004
e5cadb93 1005 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1006 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
fdf14720
WN
1007 continue;
1008
ad681adf 1009 err = evsel__set_filter(evsel, filter);
745cefc5
ACM
1010 if (err)
1011 break;
1012 }
1013
1014 return err;
0a102479 1015}
74429964 1016
53c92f73
ACM
1017int perf_evlist__append_tp_filter(struct evlist *evlist, const char *filter)
1018{
1019 struct evsel *evsel;
1020 int err = 0;
1021
1022 if (filter == NULL)
1023 return -1;
1024
1025 evlist__for_each_entry(evlist, evsel) {
1026 if (evsel->core.attr.type != PERF_TYPE_TRACEPOINT)
1027 continue;
1028
ad681adf 1029 err = evsel__append_tp_filter(evsel, filter);
53c92f73
ACM
1030 if (err)
1031 break;
1032 }
1033
1034 return err;
1035}
1036
da949f50 1037char *asprintf__tp_filter_pids(size_t npids, pid_t *pids)
cfd70a26
ACM
1038{
1039 char *filter;
be199ada 1040 size_t i;
cfd70a26 1041
be199ada
ACM
1042 for (i = 0; i < npids; ++i) {
1043 if (i == 0) {
1044 if (asprintf(&filter, "common_pid != %d", pids[i]) < 0)
05cea449 1045 return NULL;
be199ada
ACM
1046 } else {
1047 char *tmp;
1048
1049 if (asprintf(&tmp, "%s && common_pid != %d", filter, pids[i]) < 0)
1050 goto out_free;
1051
1052 free(filter);
1053 filter = tmp;
1054 }
1055 }
cfd70a26 1056
05cea449 1057 return filter;
be199ada 1058out_free:
05cea449
ACM
1059 free(filter);
1060 return NULL;
1061}
1062
1063int perf_evlist__set_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1064{
1065 char *filter = asprintf__tp_filter_pids(npids, pids);
1066 int ret = perf_evlist__set_tp_filter(evlist, filter);
1067
cfd70a26
ACM
1068 free(filter);
1069 return ret;
1070}
1071
63503dba 1072int perf_evlist__set_tp_filter_pid(struct evlist *evlist, pid_t pid)
be199ada 1073{
7ad92a33 1074 return perf_evlist__set_tp_filter_pids(evlist, 1, &pid);
be199ada
ACM
1075}
1076
1827ab5b
ACM
1077int perf_evlist__append_tp_filter_pids(struct evlist *evlist, size_t npids, pid_t *pids)
1078{
1079 char *filter = asprintf__tp_filter_pids(npids, pids);
1080 int ret = perf_evlist__append_tp_filter(evlist, filter);
1081
1082 free(filter);
1083 return ret;
1084}
1085
1086int perf_evlist__append_tp_filter_pid(struct evlist *evlist, pid_t pid)
1087{
1088 return perf_evlist__append_tp_filter_pids(evlist, 1, &pid);
1089}
1090
b3c2cc2b 1091bool evlist__valid_sample_type(struct evlist *evlist)
74429964 1092{
32dcd021 1093 struct evsel *pos;
c2a70653 1094
6484d2f9 1095 if (evlist->core.nr_entries == 1)
75562573
AH
1096 return true;
1097
1098 if (evlist->id_pos < 0 || evlist->is_pos < 0)
1099 return false;
1100
e5cadb93 1101 evlist__for_each_entry(evlist, pos) {
75562573
AH
1102 if (pos->id_pos != evlist->id_pos ||
1103 pos->is_pos != evlist->is_pos)
c2a70653 1104 return false;
74429964
FW
1105 }
1106
c2a70653 1107 return true;
74429964
FW
1108}
1109
b3c2cc2b 1110u64 __evlist__combined_sample_type(struct evlist *evlist)
c2a70653 1111{
32dcd021 1112 struct evsel *evsel;
75562573
AH
1113
1114 if (evlist->combined_sample_type)
1115 return evlist->combined_sample_type;
1116
e5cadb93 1117 evlist__for_each_entry(evlist, evsel)
1fc632ce 1118 evlist->combined_sample_type |= evsel->core.attr.sample_type;
75562573
AH
1119
1120 return evlist->combined_sample_type;
1121}
1122
b3c2cc2b 1123u64 evlist__combined_sample_type(struct evlist *evlist)
75562573
AH
1124{
1125 evlist->combined_sample_type = 0;
b3c2cc2b 1126 return __evlist__combined_sample_type(evlist);
c2a70653
ACM
1127}
1128
92c7d7cd 1129u64 evlist__combined_branch_type(struct evlist *evlist)
98df858e 1130{
32dcd021 1131 struct evsel *evsel;
98df858e
AK
1132 u64 branch_type = 0;
1133
e5cadb93 1134 evlist__for_each_entry(evlist, evsel)
1fc632ce 1135 branch_type |= evsel->core.attr.branch_sample_type;
98df858e
AK
1136 return branch_type;
1137}
1138
63503dba 1139bool perf_evlist__valid_read_format(struct evlist *evlist)
9ede473c 1140{
515dbe48 1141 struct evsel *first = evlist__first(evlist), *pos = first;
1fc632ce
JO
1142 u64 read_format = first->core.attr.read_format;
1143 u64 sample_type = first->core.attr.sample_type;
9ede473c 1144
e5cadb93 1145 evlist__for_each_entry(evlist, pos) {
94d3820f
AH
1146 if (read_format != pos->core.attr.read_format) {
1147 pr_debug("Read format differs %#" PRIx64 " vs %#" PRIx64 "\n",
1148 read_format, (u64)pos->core.attr.read_format);
1149 }
9ede473c
JO
1150 }
1151
1152 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1153 if ((sample_type & PERF_SAMPLE_READ) &&
1154 !(read_format & PERF_FORMAT_ID)) {
1155 return false;
1156 }
1157
1158 return true;
1159}
1160
63503dba 1161u16 perf_evlist__id_hdr_size(struct evlist *evlist)
81e36bff 1162{
515dbe48 1163 struct evsel *first = evlist__first(evlist);
81e36bff
ACM
1164 struct perf_sample *data;
1165 u64 sample_type;
1166 u16 size = 0;
1167
1fc632ce 1168 if (!first->core.attr.sample_id_all)
81e36bff
ACM
1169 goto out;
1170
1fc632ce 1171 sample_type = first->core.attr.sample_type;
81e36bff
ACM
1172
1173 if (sample_type & PERF_SAMPLE_TID)
1174 size += sizeof(data->tid) * 2;
1175
1176 if (sample_type & PERF_SAMPLE_TIME)
1177 size += sizeof(data->time);
1178
1179 if (sample_type & PERF_SAMPLE_ID)
1180 size += sizeof(data->id);
1181
1182 if (sample_type & PERF_SAMPLE_STREAM_ID)
1183 size += sizeof(data->stream_id);
1184
1185 if (sample_type & PERF_SAMPLE_CPU)
1186 size += sizeof(data->cpu) * 2;
75562573
AH
1187
1188 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1189 size += sizeof(data->id);
81e36bff
ACM
1190out:
1191 return size;
1192}
1193
8cedf3a5 1194bool evlist__valid_sample_id_all(struct evlist *evlist)
74429964 1195{
515dbe48 1196 struct evsel *first = evlist__first(evlist), *pos = first;
c2a70653 1197
e5cadb93 1198 evlist__for_each_entry_continue(evlist, pos) {
1fc632ce 1199 if (first->core.attr.sample_id_all != pos->core.attr.sample_id_all)
c2a70653 1200 return false;
74429964
FW
1201 }
1202
c2a70653
ACM
1203 return true;
1204}
1205
8cedf3a5 1206bool evlist__sample_id_all(struct evlist *evlist)
c2a70653 1207{
515dbe48 1208 struct evsel *first = evlist__first(evlist);
1fc632ce 1209 return first->core.attr.sample_id_all;
74429964 1210}
81cce8de 1211
63503dba 1212void perf_evlist__set_selected(struct evlist *evlist,
32dcd021 1213 struct evsel *evsel)
81cce8de
ACM
1214{
1215 evlist->selected = evsel;
1216}
727ab04e 1217
750b4ede 1218void evlist__close(struct evlist *evlist)
a74b4b66 1219{
32dcd021 1220 struct evsel *evsel;
7736627b
AK
1221 struct affinity affinity;
1222 int cpu, i;
a74b4b66 1223
7736627b
AK
1224 /*
1225 * With perf record core.cpus is usually NULL.
1226 * Use the old method to handle this for now.
1227 */
1228 if (!evlist->core.cpus) {
1229 evlist__for_each_entry_reverse(evlist, evsel)
1230 evsel__close(evsel);
1231 return;
1232 }
1233
1234 if (affinity__setup(&affinity) < 0)
1235 return;
1236 evlist__for_each_cpu(evlist, i, cpu) {
1237 affinity__set(&affinity, cpu);
1238
1239 evlist__for_each_entry_reverse(evlist, evsel) {
1240 if (evsel__cpu_iter_skip(evsel, cpu))
1241 continue;
1242 perf_evsel__close_cpu(&evsel->core, evsel->cpu_iter - 1);
1243 }
1244 }
1245 affinity__cleanup(&affinity);
1246 evlist__for_each_entry_reverse(evlist, evsel) {
1247 perf_evsel__free_fd(&evsel->core);
1248 perf_evsel__free_id(&evsel->core);
1249 }
a74b4b66
NK
1250}
1251
63503dba 1252static int perf_evlist__create_syswide_maps(struct evlist *evlist)
4112eb18 1253{
f854839b 1254 struct perf_cpu_map *cpus;
9749b90e 1255 struct perf_thread_map *threads;
4112eb18
ACM
1256 int err = -ENOMEM;
1257
1258 /*
1259 * Try reading /sys/devices/system/cpu/online to get
1260 * an all cpus map.
1261 *
1262 * FIXME: -ENOMEM is the best we can do here, the cpu_map
1263 * code needs an overhaul to properly forward the
1264 * error, and we may not want to do that fallback to a
1265 * default cpu identity map :-\
1266 */
9c3516d1 1267 cpus = perf_cpu_map__new(NULL);
8c0498b6 1268 if (!cpus)
4112eb18
ACM
1269 goto out;
1270
4b49cce2 1271 threads = perf_thread_map__new_dummy();
8c0498b6
AH
1272 if (!threads)
1273 goto out_put;
4112eb18 1274
453fa030 1275 perf_evlist__set_maps(&evlist->core, cpus, threads);
4112eb18
ACM
1276out:
1277 return err;
8c0498b6 1278out_put:
38f01d8d 1279 perf_cpu_map__put(cpus);
4112eb18
ACM
1280 goto out;
1281}
1282
474ddc4c 1283int evlist__open(struct evlist *evlist)
727ab04e 1284{
32dcd021 1285 struct evsel *evsel;
a74b4b66 1286 int err;
727ab04e 1287
4112eb18
ACM
1288 /*
1289 * Default: one fd per CPU, all threads, aka systemwide
1290 * as sys_perf_event_open(cpu = -1, thread = -1) is EINVAL
1291 */
03617c22 1292 if (evlist->core.threads == NULL && evlist->core.cpus == NULL) {
4112eb18
ACM
1293 err = perf_evlist__create_syswide_maps(evlist);
1294 if (err < 0)
1295 goto out_err;
1296 }
1297
733cd2fe
AH
1298 perf_evlist__update_id_pos(evlist);
1299
e5cadb93 1300 evlist__for_each_entry(evlist, evsel) {
af663bd0 1301 err = evsel__open(evsel, evsel->core.cpus, evsel->core.threads);
727ab04e
ACM
1302 if (err < 0)
1303 goto out_err;
1304 }
1305
1306 return 0;
1307out_err:
750b4ede 1308 evlist__close(evlist);
41c21a68 1309 errno = -err;
727ab04e
ACM
1310 return err;
1311}
35b9d88e 1312
63503dba 1313int perf_evlist__prepare_workload(struct evlist *evlist, struct target *target,
55e162ea 1314 const char *argv[], bool pipe_output,
735f7e0b 1315 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
35b9d88e
ACM
1316{
1317 int child_ready_pipe[2], go_pipe[2];
1318 char bf;
1319
1320 if (pipe(child_ready_pipe) < 0) {
1321 perror("failed to create 'ready' pipe");
1322 return -1;
1323 }
1324
1325 if (pipe(go_pipe) < 0) {
1326 perror("failed to create 'go' pipe");
1327 goto out_close_ready_pipe;
1328 }
1329
1330 evlist->workload.pid = fork();
1331 if (evlist->workload.pid < 0) {
1332 perror("failed to fork");
1333 goto out_close_pipes;
1334 }
1335
1336 if (!evlist->workload.pid) {
5f1c4225
ACM
1337 int ret;
1338
119fa3c9 1339 if (pipe_output)
35b9d88e
ACM
1340 dup2(2, 1);
1341
0817df08
DA
1342 signal(SIGTERM, SIG_DFL);
1343
35b9d88e
ACM
1344 close(child_ready_pipe[0]);
1345 close(go_pipe[1]);
1346 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1347
35b9d88e
ACM
1348 /*
1349 * Tell the parent we're ready to go
1350 */
1351 close(child_ready_pipe[1]);
1352
1353 /*
1354 * Wait until the parent tells us to go.
1355 */
5f1c4225
ACM
1356 ret = read(go_pipe[0], &bf, 1);
1357 /*
1358 * The parent will ask for the execvp() to be performed by
1359 * writing exactly one byte, in workload.cork_fd, usually via
1360 * perf_evlist__start_workload().
1361 *
20f86fc1 1362 * For cancelling the workload without actually running it,
5f1c4225
ACM
1363 * the parent will just close workload.cork_fd, without writing
1364 * anything, i.e. read will return zero and we just exit()
1365 * here.
1366 */
1367 if (ret != 1) {
1368 if (ret == -1)
1369 perror("unable to read pipe");
1370 exit(ret);
1371 }
35b9d88e
ACM
1372
1373 execvp(argv[0], (char **)argv);
1374
735f7e0b 1375 if (exec_error) {
f33cbe72
ACM
1376 union sigval val;
1377
1378 val.sival_int = errno;
1379 if (sigqueue(getppid(), SIGUSR1, val))
1380 perror(argv[0]);
1381 } else
1382 perror(argv[0]);
35b9d88e
ACM
1383 exit(-1);
1384 }
1385
735f7e0b
ACM
1386 if (exec_error) {
1387 struct sigaction act = {
1388 .sa_flags = SA_SIGINFO,
1389 .sa_sigaction = exec_error,
1390 };
1391 sigaction(SIGUSR1, &act, NULL);
1392 }
1393
1aaf63b1 1394 if (target__none(target)) {
03617c22 1395 if (evlist->core.threads == NULL) {
1aaf63b1
ACM
1396 fprintf(stderr, "FATAL: evlist->threads need to be set at this point (%s:%d).\n",
1397 __func__, __LINE__);
1398 goto out_close_pipes;
1399 }
03617c22 1400 perf_thread_map__set_pid(evlist->core.threads, 0, evlist->workload.pid);
1aaf63b1 1401 }
35b9d88e
ACM
1402
1403 close(child_ready_pipe[1]);
1404 close(go_pipe[0]);
1405 /*
1406 * wait for child to settle
1407 */
1408 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1409 perror("unable to read pipe");
1410 goto out_close_pipes;
1411 }
1412
bcf3145f 1413 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1414 evlist->workload.cork_fd = go_pipe[1];
1415 close(child_ready_pipe[0]);
1416 return 0;
1417
1418out_close_pipes:
1419 close(go_pipe[0]);
1420 close(go_pipe[1]);
1421out_close_ready_pipe:
1422 close(child_ready_pipe[0]);
1423 close(child_ready_pipe[1]);
1424 return -1;
1425}
1426
63503dba 1427int perf_evlist__start_workload(struct evlist *evlist)
35b9d88e
ACM
1428{
1429 if (evlist->workload.cork_fd > 0) {
b3824404 1430 char bf = 0;
bcf3145f 1431 int ret;
35b9d88e
ACM
1432 /*
1433 * Remove the cork, let it rip!
1434 */
bcf3145f
NK
1435 ret = write(evlist->workload.cork_fd, &bf, 1);
1436 if (ret < 0)
e978be9e 1437 perror("unable to write to pipe");
bcf3145f
NK
1438
1439 close(evlist->workload.cork_fd);
1440 return ret;
35b9d88e
ACM
1441 }
1442
1443 return 0;
1444}
cb0b29e0 1445
63503dba 1446int perf_evlist__parse_sample(struct evlist *evlist, union perf_event *event,
0807d2d8 1447 struct perf_sample *sample)
cb0b29e0 1448{
32dcd021 1449 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
75562573
AH
1450
1451 if (!evsel)
1452 return -EFAULT;
6b6017a2 1453 return evsel__parse_sample(evsel, event, sample);
cb0b29e0 1454}
78f067b3 1455
63503dba 1456int perf_evlist__parse_sample_timestamp(struct evlist *evlist,
01468120
JO
1457 union perf_event *event,
1458 u64 *timestamp)
1459{
32dcd021 1460 struct evsel *evsel = perf_evlist__event2evsel(evlist, event);
01468120
JO
1461
1462 if (!evsel)
1463 return -EFAULT;
6b6017a2 1464 return evsel__parse_sample_timestamp(evsel, event, timestamp);
01468120
JO
1465}
1466
d1f249ec 1467int evlist__strerror_open(struct evlist *evlist, int err, char *buf, size_t size)
a8f23d8f
ACM
1468{
1469 int printed, value;
c8b5f2c9 1470 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
a8f23d8f
ACM
1471
1472 switch (err) {
1473 case EACCES:
1474 case EPERM:
1475 printed = scnprintf(buf, size,
1476 "Error:\t%s.\n"
1477 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1478
1a47245d 1479 value = perf_event_paranoid();
a8f23d8f
ACM
1480
1481 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1482
1483 if (value >= 2) {
1484 printed += scnprintf(buf + printed, size - printed,
1485 "For your workloads it needs to be <= 1\nHint:\t");
1486 }
1487 printed += scnprintf(buf + printed, size - printed,
5229e366 1488 "For system wide tracing it needs to be set to -1.\n");
a8f23d8f
ACM
1489
1490 printed += scnprintf(buf + printed, size - printed,
5229e366
ACM
1491 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1492 "Hint:\tThe current value is %d.", value);
a8f23d8f 1493 break;
d9aade7f 1494 case EINVAL: {
515dbe48 1495 struct evsel *first = evlist__first(evlist);
d9aade7f
ACM
1496 int max_freq;
1497
1498 if (sysctl__read_int("kernel/perf_event_max_sample_rate", &max_freq) < 0)
1499 goto out_default;
1500
1fc632ce 1501 if (first->core.attr.sample_freq < (u64)max_freq)
d9aade7f
ACM
1502 goto out_default;
1503
1504 printed = scnprintf(buf, size,
1505 "Error:\t%s.\n"
1506 "Hint:\tCheck /proc/sys/kernel/perf_event_max_sample_rate.\n"
1507 "Hint:\tThe current value is %d and %" PRIu64 " is being requested.",
1fc632ce 1508 emsg, max_freq, first->core.attr.sample_freq);
d9aade7f
ACM
1509 break;
1510 }
a8f23d8f 1511 default:
d9aade7f 1512out_default:
a8f23d8f
ACM
1513 scnprintf(buf, size, "%s", emsg);
1514 break;
1515 }
1516
1517 return 0;
1518}
a025e4f0 1519
d1f249ec 1520int evlist__strerror_mmap(struct evlist *evlist, int err, char *buf, size_t size)
956fa571 1521{
c8b5f2c9 1522 char sbuf[STRERR_BUFSIZE], *emsg = str_error_r(err, sbuf, sizeof(sbuf));
f6fa4375 1523 int pages_attempted = evlist->core.mmap_len / 1024, pages_max_per_user, printed = 0;
956fa571
ACM
1524
1525 switch (err) {
1526 case EPERM:
e5d4a290 1527 sysctl__read_int("kernel/perf_event_mlock_kb", &pages_max_per_user);
e965bea1
ACM
1528 printed += scnprintf(buf + printed, size - printed,
1529 "Error:\t%s.\n"
956fa571 1530 "Hint:\tCheck /proc/sys/kernel/perf_event_mlock_kb (%d kB) setting.\n"
e965bea1 1531 "Hint:\tTried using %zd kB.\n",
e5d4a290 1532 emsg, pages_max_per_user, pages_attempted);
e965bea1
ACM
1533
1534 if (pages_attempted >= pages_max_per_user) {
1535 printed += scnprintf(buf + printed, size - printed,
1536 "Hint:\tTry 'sudo sh -c \"echo %d > /proc/sys/kernel/perf_event_mlock_kb\"', or\n",
1537 pages_max_per_user + pages_attempted);
1538 }
1539
1540 printed += scnprintf(buf + printed, size - printed,
1541 "Hint:\tTry using a smaller -m/--mmap-pages value.");
956fa571
ACM
1542 break;
1543 default:
1544 scnprintf(buf, size, "%s", emsg);
1545 break;
1546 }
1547
1548 return 0;
1549}
1550
63503dba 1551void perf_evlist__to_front(struct evlist *evlist,
32dcd021 1552 struct evsel *move_evsel)
a025e4f0 1553{
32dcd021 1554 struct evsel *evsel, *n;
a025e4f0
AH
1555 LIST_HEAD(move);
1556
515dbe48 1557 if (move_evsel == evlist__first(evlist))
a025e4f0
AH
1558 return;
1559
e5cadb93 1560 evlist__for_each_entry_safe(evlist, n, evsel) {
a025e4f0 1561 if (evsel->leader == move_evsel->leader)
b27c4ece 1562 list_move_tail(&evsel->core.node, &move);
a025e4f0
AH
1563 }
1564
ce9036a6 1565 list_splice(&move, &evlist->core.entries);
a025e4f0 1566}
60b0896c 1567
442ad225
AH
1568struct evsel *perf_evlist__get_tracking_event(struct evlist *evlist)
1569{
1570 struct evsel *evsel;
1571
1572 evlist__for_each_entry(evlist, evsel) {
1573 if (evsel->tracking)
1574 return evsel;
1575 }
1576
1577 return evlist__first(evlist);
1578}
1579
63503dba 1580void perf_evlist__set_tracking_event(struct evlist *evlist,
32dcd021 1581 struct evsel *tracking_evsel)
60b0896c 1582{
32dcd021 1583 struct evsel *evsel;
60b0896c
AH
1584
1585 if (tracking_evsel->tracking)
1586 return;
1587
e5cadb93 1588 evlist__for_each_entry(evlist, evsel) {
60b0896c
AH
1589 if (evsel != tracking_evsel)
1590 evsel->tracking = false;
1591 }
1592
1593 tracking_evsel->tracking = true;
1594}
7630b3e2 1595
32dcd021 1596struct evsel *
63503dba 1597perf_evlist__find_evsel_by_str(struct evlist *evlist,
7630b3e2
WN
1598 const char *str)
1599{
32dcd021 1600 struct evsel *evsel;
7630b3e2 1601
e5cadb93 1602 evlist__for_each_entry(evlist, evsel) {
7630b3e2
WN
1603 if (!evsel->name)
1604 continue;
1605 if (strcmp(str, evsel->name) == 0)
1606 return evsel;
1607 }
1608
1609 return NULL;
1610}
54cc54de 1611
63503dba 1612void perf_evlist__toggle_bkw_mmap(struct evlist *evlist,
54cc54de
WN
1613 enum bkw_mmap_state state)
1614{
1615 enum bkw_mmap_state old_state = evlist->bkw_mmap_state;
1616 enum action {
1617 NONE,
1618 PAUSE,
1619 RESUME,
1620 } action = NONE;
1621
0b72d69a 1622 if (!evlist->overwrite_mmap)
54cc54de
WN
1623 return;
1624
1625 switch (old_state) {
1626 case BKW_MMAP_NOTREADY: {
1627 if (state != BKW_MMAP_RUNNING)
dd8bd53a 1628 goto state_err;
54cc54de
WN
1629 break;
1630 }
1631 case BKW_MMAP_RUNNING: {
1632 if (state != BKW_MMAP_DATA_PENDING)
1633 goto state_err;
1634 action = PAUSE;
1635 break;
1636 }
1637 case BKW_MMAP_DATA_PENDING: {
1638 if (state != BKW_MMAP_EMPTY)
1639 goto state_err;
1640 break;
1641 }
1642 case BKW_MMAP_EMPTY: {
1643 if (state != BKW_MMAP_RUNNING)
1644 goto state_err;
1645 action = RESUME;
1646 break;
1647 }
1648 default:
1649 WARN_ONCE(1, "Shouldn't get there\n");
1650 }
1651
1652 evlist->bkw_mmap_state = state;
1653
1654 switch (action) {
1655 case PAUSE:
1656 perf_evlist__pause(evlist);
1657 break;
1658 case RESUME:
1659 perf_evlist__resume(evlist);
1660 break;
1661 case NONE:
1662 default:
1663 break;
1664 }
1665
1666state_err:
1667 return;
1668}
07d6f446 1669
63503dba 1670bool perf_evlist__exclude_kernel(struct evlist *evlist)
07d6f446 1671{
32dcd021 1672 struct evsel *evsel;
07d6f446
ACM
1673
1674 evlist__for_each_entry(evlist, evsel) {
1fc632ce 1675 if (!evsel->core.attr.exclude_kernel)
07d6f446
ACM
1676 return false;
1677 }
1678
1679 return true;
1680}
e2bdbe80
JY
1681
1682/*
1683 * Events in data file are not collect in groups, but we still want
1684 * the group display. Set the artificial group and set the leader's
1685 * forced_leader flag to notify the display code.
1686 */
63503dba 1687void perf_evlist__force_leader(struct evlist *evlist)
e2bdbe80
JY
1688{
1689 if (!evlist->nr_groups) {
515dbe48 1690 struct evsel *leader = evlist__first(evlist);
e2bdbe80
JY
1691
1692 perf_evlist__set_leader(evlist);
1693 leader->forced_leader = true;
1694 }
1695}
c3537fc2 1696
63503dba 1697struct evsel *perf_evlist__reset_weak_group(struct evlist *evsel_list,
4804e011
AK
1698 struct evsel *evsel,
1699 bool close)
c3537fc2 1700{
32dcd021 1701 struct evsel *c2, *leader;
c3537fc2
AK
1702 bool is_open = true;
1703
1704 leader = evsel->leader;
1705 pr_debug("Weak group for %s/%d failed\n",
5643b1a5 1706 leader->name, leader->core.nr_members);
c3537fc2
AK
1707
1708 /*
1709 * for_each_group_member doesn't work here because it doesn't
1710 * include the first entry.
1711 */
1712 evlist__for_each_entry(evsel_list, c2) {
1713 if (c2 == evsel)
1714 is_open = false;
1715 if (c2->leader == leader) {
4804e011 1716 if (is_open && close)
5a40e199 1717 perf_evsel__close(&c2->core);
c3537fc2 1718 c2->leader = c2;
5643b1a5 1719 c2->core.nr_members = 0;
4804e011
AK
1720 /*
1721 * Set this for all former members of the group
1722 * to indicate they get reopened.
1723 */
1724 c2->reset_group = true;
c3537fc2
AK
1725 }
1726 }
1727 return leader;
1728}
ec886bf5 1729
a8fcbd26
AH
1730static int evlist__parse_control_fifo(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
1731{
1732 char *s, *p;
1733 int ret = 0, fd;
1734
1735 if (strncmp(str, "fifo:", 5))
1736 return -EINVAL;
1737
1738 str += 5;
1739 if (!*str || *str == ',')
1740 return -EINVAL;
1741
1742 s = strdup(str);
1743 if (!s)
1744 return -ENOMEM;
1745
1746 p = strchr(s, ',');
1747 if (p)
1748 *p = '\0';
1749
1750 /*
1751 * O_RDWR avoids POLLHUPs which is necessary to allow the other
1752 * end of a FIFO to be repeatedly opened and closed.
1753 */
1754 fd = open(s, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1755 if (fd < 0) {
1756 pr_err("Failed to open '%s'\n", s);
1757 ret = -errno;
1758 goto out_free;
1759 }
1760 *ctl_fd = fd;
1761 *ctl_fd_close = true;
1762
1763 if (p && *++p) {
1764 /* O_RDWR | O_NONBLOCK means the other end need not be open */
1765 fd = open(p, O_RDWR | O_NONBLOCK | O_CLOEXEC);
1766 if (fd < 0) {
1767 pr_err("Failed to open '%s'\n", p);
1768 ret = -errno;
1769 goto out_free;
1770 }
1771 *ctl_fd_ack = fd;
1772 }
1773
1774out_free:
1775 free(s);
1776 return ret;
1777}
1778
1779int evlist__parse_control(const char *str, int *ctl_fd, int *ctl_fd_ack, bool *ctl_fd_close)
9864a66d
AH
1780{
1781 char *comma = NULL, *endptr = NULL;
1782
a8fcbd26
AH
1783 *ctl_fd_close = false;
1784
9864a66d 1785 if (strncmp(str, "fd:", 3))
a8fcbd26 1786 return evlist__parse_control_fifo(str, ctl_fd, ctl_fd_ack, ctl_fd_close);
9864a66d
AH
1787
1788 *ctl_fd = strtoul(&str[3], &endptr, 0);
1789 if (endptr == &str[3])
1790 return -EINVAL;
1791
1792 comma = strchr(str, ',');
1793 if (comma) {
1794 if (endptr != comma)
1795 return -EINVAL;
1796
1797 *ctl_fd_ack = strtoul(comma + 1, &endptr, 0);
1798 if (endptr == comma + 1 || *endptr != '\0')
1799 return -EINVAL;
1800 }
1801
1802 return 0;
1803}
1804
ec886bf5
AB
1805int evlist__initialize_ctlfd(struct evlist *evlist, int fd, int ack)
1806{
1807 if (fd == -1) {
1808 pr_debug("Control descriptor is not initialized\n");
1809 return 0;
1810 }
1811
1812 evlist->ctl_fd.pos = perf_evlist__add_pollfd(&evlist->core, fd, NULL, POLLIN,
1813 fdarray_flag__nonfilterable);
1814 if (evlist->ctl_fd.pos < 0) {
1815 evlist->ctl_fd.pos = -1;
1816 pr_err("Failed to add ctl fd entry: %m\n");
1817 return -1;
1818 }
1819
1820 evlist->ctl_fd.fd = fd;
1821 evlist->ctl_fd.ack = ack;
1822
1823 return 0;
1824}
1825
1826bool evlist__ctlfd_initialized(struct evlist *evlist)
1827{
1828 return evlist->ctl_fd.pos >= 0;
1829}
1830
1831int evlist__finalize_ctlfd(struct evlist *evlist)
1832{
1833 struct pollfd *entries = evlist->core.pollfd.entries;
1834
1835 if (!evlist__ctlfd_initialized(evlist))
1836 return 0;
1837
1838 entries[evlist->ctl_fd.pos].fd = -1;
1839 entries[evlist->ctl_fd.pos].events = 0;
1840 entries[evlist->ctl_fd.pos].revents = 0;
1841
1842 evlist->ctl_fd.pos = -1;
1843 evlist->ctl_fd.ack = -1;
1844 evlist->ctl_fd.fd = -1;
1845
1846 return 0;
1847}
1848
1849static int evlist__ctlfd_recv(struct evlist *evlist, enum evlist_ctl_cmd *cmd,
1850 char *cmd_data, size_t data_size)
1851{
1852 int err;
1853 char c;
1854 size_t bytes_read = 0;
1855
40db8ff5 1856 *cmd = EVLIST_CTL_CMD_UNSUPPORTED;
ec886bf5
AB
1857 memset(cmd_data, 0, data_size);
1858 data_size--;
1859
1860 do {
1861 err = read(evlist->ctl_fd.fd, &c, 1);
1862 if (err > 0) {
1863 if (c == '\n' || c == '\0')
1864 break;
1865 cmd_data[bytes_read++] = c;
1866 if (bytes_read == data_size)
1867 break;
40db8ff5
AH
1868 continue;
1869 } else if (err == -1) {
1870 if (errno == EINTR)
1871 continue;
1872 if (errno == EAGAIN || errno == EWOULDBLOCK)
1873 err = 0;
1874 else
ec886bf5 1875 pr_err("Failed to read from ctlfd %d: %m\n", evlist->ctl_fd.fd);
ec886bf5 1876 }
40db8ff5 1877 break;
ec886bf5
AB
1878 } while (1);
1879
1880 pr_debug("Message from ctl_fd: \"%s%s\"\n", cmd_data,
1881 bytes_read == data_size ? "" : c == '\n' ? "\\n" : "\\0");
1882
40db8ff5 1883 if (bytes_read > 0) {
ec886bf5
AB
1884 if (!strncmp(cmd_data, EVLIST_CTL_CMD_ENABLE_TAG,
1885 (sizeof(EVLIST_CTL_CMD_ENABLE_TAG)-1))) {
1886 *cmd = EVLIST_CTL_CMD_ENABLE;
1887 } else if (!strncmp(cmd_data, EVLIST_CTL_CMD_DISABLE_TAG,
1888 (sizeof(EVLIST_CTL_CMD_DISABLE_TAG)-1))) {
1889 *cmd = EVLIST_CTL_CMD_DISABLE;
1890 }
1891 }
1892
40db8ff5 1893 return bytes_read ? (int)bytes_read : err;
ec886bf5
AB
1894}
1895
1896static int evlist__ctlfd_ack(struct evlist *evlist)
1897{
1898 int err;
1899
1900 if (evlist->ctl_fd.ack == -1)
1901 return 0;
1902
1903 err = write(evlist->ctl_fd.ack, EVLIST_CTL_CMD_ACK_TAG,
1904 sizeof(EVLIST_CTL_CMD_ACK_TAG));
1905 if (err == -1)
1906 pr_err("failed to write to ctl_ack_fd %d: %m\n", evlist->ctl_fd.ack);
1907
1908 return err;
1909}
1910
1911int evlist__ctlfd_process(struct evlist *evlist, enum evlist_ctl_cmd *cmd)
1912{
1913 int err = 0;
1914 char cmd_data[EVLIST_CTL_CMD_MAX_LEN];
1915 int ctlfd_pos = evlist->ctl_fd.pos;
1916 struct pollfd *entries = evlist->core.pollfd.entries;
1917
1918 if (!evlist__ctlfd_initialized(evlist) || !entries[ctlfd_pos].revents)
1919 return 0;
1920
1921 if (entries[ctlfd_pos].revents & POLLIN) {
1922 err = evlist__ctlfd_recv(evlist, cmd, cmd_data,
1923 EVLIST_CTL_CMD_MAX_LEN);
1924 if (err > 0) {
1925 switch (*cmd) {
1926 case EVLIST_CTL_CMD_ENABLE:
1927 evlist__enable(evlist);
1928 break;
1929 case EVLIST_CTL_CMD_DISABLE:
1930 evlist__disable(evlist);
1931 break;
1932 case EVLIST_CTL_CMD_ACK:
1933 case EVLIST_CTL_CMD_UNSUPPORTED:
1934 default:
1935 pr_debug("ctlfd: unsupported %d\n", *cmd);
1936 break;
1937 }
1938 if (!(*cmd == EVLIST_CTL_CMD_ACK || *cmd == EVLIST_CTL_CMD_UNSUPPORTED))
1939 evlist__ctlfd_ack(evlist);
1940 }
1941 }
1942
1943 if (entries[ctlfd_pos].revents & (POLLHUP | POLLERR))
1944 evlist__finalize_ctlfd(evlist);
1945 else
1946 entries[ctlfd_pos].revents = 0;
1947
1948 return err;
1949}