perf evlist: Monitor POLLERR and POLLHUP events too
[linux-block.git] / tools / perf / util / evlist.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
a8c9ae18 9#include "util.h"
553873e1 10#include <api/fs/debugfs.h>
5c581041 11#include <poll.h>
f8a95309
ACM
12#include "cpumap.h"
13#include "thread_map.h"
12864b31 14#include "target.h"
361c99a6
ACM
15#include "evlist.h"
16#include "evsel.h"
e3e1a54f 17#include "debug.h"
35b9d88e 18#include <unistd.h>
361c99a6 19
50d08e47 20#include "parse-events.h"
994a1f78 21#include "parse-options.h"
50d08e47 22
f8a95309
ACM
23#include <sys/mman.h>
24
70db7533
ACM
25#include <linux/bitops.h>
26#include <linux/hash.h>
27
f8a95309 28#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
a91e5431 29#define SID(e, x, y) xyarray__entry(e->sample_id, x, y)
f8a95309 30
7e2ed097
ACM
31void perf_evlist__init(struct perf_evlist *evlist, struct cpu_map *cpus,
32 struct thread_map *threads)
ef1d1af2
ACM
33{
34 int i;
35
36 for (i = 0; i < PERF_EVLIST__HLIST_SIZE; ++i)
37 INIT_HLIST_HEAD(&evlist->heads[i]);
38 INIT_LIST_HEAD(&evlist->entries);
7e2ed097 39 perf_evlist__set_maps(evlist, cpus, threads);
35b9d88e 40 evlist->workload.pid = -1;
ef1d1af2
ACM
41}
42
334fe7a3 43struct perf_evlist *perf_evlist__new(void)
361c99a6
ACM
44{
45 struct perf_evlist *evlist = zalloc(sizeof(*evlist));
46
ef1d1af2 47 if (evlist != NULL)
334fe7a3 48 perf_evlist__init(evlist, NULL, NULL);
361c99a6
ACM
49
50 return evlist;
51}
52
b22d54b0
JO
53struct perf_evlist *perf_evlist__new_default(void)
54{
55 struct perf_evlist *evlist = perf_evlist__new();
56
57 if (evlist && perf_evlist__add_default(evlist)) {
58 perf_evlist__delete(evlist);
59 evlist = NULL;
60 }
61
62 return evlist;
63}
64
75562573
AH
65/**
66 * perf_evlist__set_id_pos - set the positions of event ids.
67 * @evlist: selected event list
68 *
69 * Events with compatible sample types all have the same id_pos
70 * and is_pos. For convenience, put a copy on evlist.
71 */
72void perf_evlist__set_id_pos(struct perf_evlist *evlist)
73{
74 struct perf_evsel *first = perf_evlist__first(evlist);
75
76 evlist->id_pos = first->id_pos;
77 evlist->is_pos = first->is_pos;
78}
79
733cd2fe
AH
80static void perf_evlist__update_id_pos(struct perf_evlist *evlist)
81{
82 struct perf_evsel *evsel;
83
0050f7aa 84 evlist__for_each(evlist, evsel)
733cd2fe
AH
85 perf_evsel__calc_id_pos(evsel);
86
87 perf_evlist__set_id_pos(evlist);
88}
89
361c99a6
ACM
90static void perf_evlist__purge(struct perf_evlist *evlist)
91{
92 struct perf_evsel *pos, *n;
93
0050f7aa 94 evlist__for_each_safe(evlist, n, pos) {
361c99a6
ACM
95 list_del_init(&pos->node);
96 perf_evsel__delete(pos);
97 }
98
99 evlist->nr_entries = 0;
100}
101
ef1d1af2 102void perf_evlist__exit(struct perf_evlist *evlist)
361c99a6 103{
04662523
ACM
104 zfree(&evlist->mmap);
105 zfree(&evlist->pollfd);
ef1d1af2
ACM
106}
107
108void perf_evlist__delete(struct perf_evlist *evlist)
109{
983874d1 110 perf_evlist__munmap(evlist);
f26e1c7c 111 perf_evlist__close(evlist);
03ad9747
ACM
112 cpu_map__delete(evlist->cpus);
113 thread_map__delete(evlist->threads);
114 evlist->cpus = NULL;
115 evlist->threads = NULL;
ef1d1af2
ACM
116 perf_evlist__purge(evlist);
117 perf_evlist__exit(evlist);
361c99a6
ACM
118 free(evlist);
119}
120
121void perf_evlist__add(struct perf_evlist *evlist, struct perf_evsel *entry)
122{
123 list_add_tail(&entry->node, &evlist->entries);
ef503831 124 entry->idx = evlist->nr_entries;
60b0896c 125 entry->tracking = !entry->idx;
ef503831 126
75562573
AH
127 if (!evlist->nr_entries++)
128 perf_evlist__set_id_pos(evlist);
361c99a6
ACM
129}
130
0529bc1f
JO
131void perf_evlist__splice_list_tail(struct perf_evlist *evlist,
132 struct list_head *list,
133 int nr_entries)
50d08e47 134{
75562573
AH
135 bool set_id_pos = !evlist->nr_entries;
136
50d08e47
ACM
137 list_splice_tail(list, &evlist->entries);
138 evlist->nr_entries += nr_entries;
75562573
AH
139 if (set_id_pos)
140 perf_evlist__set_id_pos(evlist);
50d08e47
ACM
141}
142
63dab225
ACM
143void __perf_evlist__set_leader(struct list_head *list)
144{
145 struct perf_evsel *evsel, *leader;
146
147 leader = list_entry(list->next, struct perf_evsel, node);
97f63e4a
NK
148 evsel = list_entry(list->prev, struct perf_evsel, node);
149
150 leader->nr_members = evsel->idx - leader->idx + 1;
63dab225 151
0050f7aa 152 __evlist__for_each(list, evsel) {
74b2133d 153 evsel->leader = leader;
63dab225
ACM
154 }
155}
156
157void perf_evlist__set_leader(struct perf_evlist *evlist)
6a4bb04c 158{
97f63e4a
NK
159 if (evlist->nr_entries) {
160 evlist->nr_groups = evlist->nr_entries > 1 ? 1 : 0;
63dab225 161 __perf_evlist__set_leader(&evlist->entries);
97f63e4a 162 }
6a4bb04c
JO
163}
164
361c99a6
ACM
165int perf_evlist__add_default(struct perf_evlist *evlist)
166{
167 struct perf_event_attr attr = {
168 .type = PERF_TYPE_HARDWARE,
169 .config = PERF_COUNT_HW_CPU_CYCLES,
170 };
1aed2671
JR
171 struct perf_evsel *evsel;
172
173 event_attr_init(&attr);
361c99a6 174
ef503831 175 evsel = perf_evsel__new(&attr);
361c99a6 176 if (evsel == NULL)
cc2d86b0
SE
177 goto error;
178
179 /* use strdup() because free(evsel) assumes name is allocated */
180 evsel->name = strdup("cycles");
181 if (!evsel->name)
182 goto error_free;
361c99a6
ACM
183
184 perf_evlist__add(evlist, evsel);
185 return 0;
cc2d86b0
SE
186error_free:
187 perf_evsel__delete(evsel);
188error:
189 return -ENOMEM;
361c99a6 190}
5c581041 191
e60fc847
ACM
192static int perf_evlist__add_attrs(struct perf_evlist *evlist,
193 struct perf_event_attr *attrs, size_t nr_attrs)
50d08e47
ACM
194{
195 struct perf_evsel *evsel, *n;
196 LIST_HEAD(head);
197 size_t i;
198
199 for (i = 0; i < nr_attrs; i++) {
ef503831 200 evsel = perf_evsel__new_idx(attrs + i, evlist->nr_entries + i);
50d08e47
ACM
201 if (evsel == NULL)
202 goto out_delete_partial_list;
203 list_add_tail(&evsel->node, &head);
204 }
205
206 perf_evlist__splice_list_tail(evlist, &head, nr_attrs);
207
208 return 0;
209
210out_delete_partial_list:
0050f7aa 211 __evlist__for_each_safe(&head, n, evsel)
50d08e47
ACM
212 perf_evsel__delete(evsel);
213 return -1;
214}
215
79695e1b
ACM
216int __perf_evlist__add_default_attrs(struct perf_evlist *evlist,
217 struct perf_event_attr *attrs, size_t nr_attrs)
218{
219 size_t i;
220
221 for (i = 0; i < nr_attrs; i++)
222 event_attr_init(attrs + i);
223
224 return perf_evlist__add_attrs(evlist, attrs, nr_attrs);
225}
226
da378962
ACM
227struct perf_evsel *
228perf_evlist__find_tracepoint_by_id(struct perf_evlist *evlist, int id)
ee29be62
ACM
229{
230 struct perf_evsel *evsel;
231
0050f7aa 232 evlist__for_each(evlist, evsel) {
ee29be62
ACM
233 if (evsel->attr.type == PERF_TYPE_TRACEPOINT &&
234 (int)evsel->attr.config == id)
235 return evsel;
236 }
237
238 return NULL;
239}
240
a2f2804a
DA
241struct perf_evsel *
242perf_evlist__find_tracepoint_by_name(struct perf_evlist *evlist,
243 const char *name)
244{
245 struct perf_evsel *evsel;
246
0050f7aa 247 evlist__for_each(evlist, evsel) {
a2f2804a
DA
248 if ((evsel->attr.type == PERF_TYPE_TRACEPOINT) &&
249 (strcmp(evsel->name, name) == 0))
250 return evsel;
251 }
252
253 return NULL;
254}
255
39876e7d
ACM
256int perf_evlist__add_newtp(struct perf_evlist *evlist,
257 const char *sys, const char *name, void *handler)
258{
ef503831 259 struct perf_evsel *evsel = perf_evsel__newtp(sys, name);
39876e7d 260
39876e7d
ACM
261 if (evsel == NULL)
262 return -1;
263
744a9719 264 evsel->handler = handler;
39876e7d
ACM
265 perf_evlist__add(evlist, evsel);
266 return 0;
267}
268
bf8e8f4b
AH
269static int perf_evlist__nr_threads(struct perf_evlist *evlist,
270 struct perf_evsel *evsel)
271{
272 if (evsel->system_wide)
273 return 1;
274 else
275 return thread_map__nr(evlist->threads);
276}
277
4152ab37
ACM
278void perf_evlist__disable(struct perf_evlist *evlist)
279{
280 int cpu, thread;
281 struct perf_evsel *pos;
b3a319d5 282 int nr_cpus = cpu_map__nr(evlist->cpus);
bf8e8f4b 283 int nr_threads;
4152ab37 284
b3a319d5 285 for (cpu = 0; cpu < nr_cpus; cpu++) {
0050f7aa 286 evlist__for_each(evlist, pos) {
395c3070 287 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 288 continue;
bf8e8f4b 289 nr_threads = perf_evlist__nr_threads(evlist, pos);
b3a319d5 290 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
291 ioctl(FD(pos, cpu, thread),
292 PERF_EVENT_IOC_DISABLE, 0);
4152ab37
ACM
293 }
294 }
295}
296
764e16a3
DA
297void perf_evlist__enable(struct perf_evlist *evlist)
298{
299 int cpu, thread;
300 struct perf_evsel *pos;
b3a319d5 301 int nr_cpus = cpu_map__nr(evlist->cpus);
bf8e8f4b 302 int nr_threads;
764e16a3 303
b3a319d5 304 for (cpu = 0; cpu < nr_cpus; cpu++) {
0050f7aa 305 evlist__for_each(evlist, pos) {
395c3070 306 if (!perf_evsel__is_group_leader(pos) || !pos->fd)
3fe4430d 307 continue;
bf8e8f4b 308 nr_threads = perf_evlist__nr_threads(evlist, pos);
b3a319d5 309 for (thread = 0; thread < nr_threads; thread++)
55da8005
NK
310 ioctl(FD(pos, cpu, thread),
311 PERF_EVENT_IOC_ENABLE, 0);
764e16a3
DA
312 }
313 }
314}
315
395c3070
AH
316int perf_evlist__disable_event(struct perf_evlist *evlist,
317 struct perf_evsel *evsel)
318{
319 int cpu, thread, err;
bf8e8f4b
AH
320 int nr_cpus = cpu_map__nr(evlist->cpus);
321 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
395c3070
AH
322
323 if (!evsel->fd)
324 return 0;
325
bf8e8f4b
AH
326 for (cpu = 0; cpu < nr_cpus; cpu++) {
327 for (thread = 0; thread < nr_threads; thread++) {
395c3070
AH
328 err = ioctl(FD(evsel, cpu, thread),
329 PERF_EVENT_IOC_DISABLE, 0);
330 if (err)
331 return err;
332 }
333 }
334 return 0;
335}
336
337int perf_evlist__enable_event(struct perf_evlist *evlist,
338 struct perf_evsel *evsel)
339{
340 int cpu, thread, err;
bf8e8f4b
AH
341 int nr_cpus = cpu_map__nr(evlist->cpus);
342 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
395c3070
AH
343
344 if (!evsel->fd)
345 return -EINVAL;
346
bf8e8f4b
AH
347 for (cpu = 0; cpu < nr_cpus; cpu++) {
348 for (thread = 0; thread < nr_threads; thread++) {
395c3070
AH
349 err = ioctl(FD(evsel, cpu, thread),
350 PERF_EVENT_IOC_ENABLE, 0);
351 if (err)
352 return err;
353 }
354 }
355 return 0;
356}
357
1c65056c
AH
358static int perf_evlist__enable_event_cpu(struct perf_evlist *evlist,
359 struct perf_evsel *evsel, int cpu)
360{
361 int thread, err;
362 int nr_threads = perf_evlist__nr_threads(evlist, evsel);
363
364 if (!evsel->fd)
365 return -EINVAL;
366
367 for (thread = 0; thread < nr_threads; thread++) {
368 err = ioctl(FD(evsel, cpu, thread),
369 PERF_EVENT_IOC_ENABLE, 0);
370 if (err)
371 return err;
372 }
373 return 0;
374}
375
376static int perf_evlist__enable_event_thread(struct perf_evlist *evlist,
377 struct perf_evsel *evsel,
378 int thread)
379{
380 int cpu, err;
381 int nr_cpus = cpu_map__nr(evlist->cpus);
382
383 if (!evsel->fd)
384 return -EINVAL;
385
386 for (cpu = 0; cpu < nr_cpus; cpu++) {
387 err = ioctl(FD(evsel, cpu, thread), PERF_EVENT_IOC_ENABLE, 0);
388 if (err)
389 return err;
390 }
391 return 0;
392}
393
394int perf_evlist__enable_event_idx(struct perf_evlist *evlist,
395 struct perf_evsel *evsel, int idx)
396{
397 bool per_cpu_mmaps = !cpu_map__empty(evlist->cpus);
398
399 if (per_cpu_mmaps)
400 return perf_evlist__enable_event_cpu(evlist, evsel, idx);
401 else
402 return perf_evlist__enable_event_thread(evlist, evsel, idx);
403}
404
806fb630 405static int perf_evlist__alloc_pollfd(struct perf_evlist *evlist)
5c581041 406{
b3a319d5
NK
407 int nr_cpus = cpu_map__nr(evlist->cpus);
408 int nr_threads = thread_map__nr(evlist->threads);
bf8e8f4b
AH
409 int nfds = 0;
410 struct perf_evsel *evsel;
411
412 list_for_each_entry(evsel, &evlist->entries, node) {
413 if (evsel->system_wide)
414 nfds += nr_cpus;
415 else
416 nfds += nr_cpus * nr_threads;
417 }
418
5c581041
ACM
419 evlist->pollfd = malloc(sizeof(struct pollfd) * nfds);
420 return evlist->pollfd != NULL ? 0 : -ENOMEM;
421}
70082dd9
ACM
422
423void perf_evlist__add_pollfd(struct perf_evlist *evlist, int fd)
424{
425 fcntl(fd, F_SETFL, O_NONBLOCK);
426 evlist->pollfd[evlist->nr_fds].fd = fd;
8179672c 427 evlist->pollfd[evlist->nr_fds].events = POLLIN | POLLERR | POLLHUP;
70082dd9
ACM
428 evlist->nr_fds++;
429}
70db7533 430
1ddec7f0
ACM
431int perf_evlist__filter_pollfd(struct perf_evlist *evlist, short revents_and_mask)
432{
433 int fd, nr_fds = 0;
434
435 if (evlist->nr_fds == 0)
436 return 0;
437
438 for (fd = 0; fd < evlist->nr_fds; ++fd) {
439 if (evlist->pollfd[fd].revents & revents_and_mask)
440 continue;
441
442 if (fd != nr_fds)
443 evlist->pollfd[nr_fds] = evlist->pollfd[fd];
444
445 ++nr_fds;
446 }
447
448 evlist->nr_fds = nr_fds;
449 return nr_fds;
450}
451
a91e5431
ACM
452static void perf_evlist__id_hash(struct perf_evlist *evlist,
453 struct perf_evsel *evsel,
454 int cpu, int thread, u64 id)
3d3b5e95
ACM
455{
456 int hash;
457 struct perf_sample_id *sid = SID(evsel, cpu, thread);
458
459 sid->id = id;
460 sid->evsel = evsel;
461 hash = hash_64(sid->id, PERF_EVLIST__HLIST_BITS);
462 hlist_add_head(&sid->node, &evlist->heads[hash]);
463}
464
a91e5431
ACM
465void perf_evlist__id_add(struct perf_evlist *evlist, struct perf_evsel *evsel,
466 int cpu, int thread, u64 id)
467{
468 perf_evlist__id_hash(evlist, evsel, cpu, thread, id);
469 evsel->id[evsel->ids++] = id;
470}
471
472static int perf_evlist__id_add_fd(struct perf_evlist *evlist,
473 struct perf_evsel *evsel,
474 int cpu, int thread, int fd)
f8a95309 475{
f8a95309 476 u64 read_data[4] = { 0, };
3d3b5e95 477 int id_idx = 1; /* The first entry is the counter value */
e2b5abe0
JO
478 u64 id;
479 int ret;
480
481 ret = ioctl(fd, PERF_EVENT_IOC_ID, &id);
482 if (!ret)
483 goto add;
484
485 if (errno != ENOTTY)
486 return -1;
487
488 /* Legacy way to get event id.. All hail to old kernels! */
f8a95309 489
c4861afe
JO
490 /*
491 * This way does not work with group format read, so bail
492 * out in that case.
493 */
494 if (perf_evlist__read_format(evlist) & PERF_FORMAT_GROUP)
495 return -1;
496
f8a95309
ACM
497 if (!(evsel->attr.read_format & PERF_FORMAT_ID) ||
498 read(fd, &read_data, sizeof(read_data)) == -1)
499 return -1;
500
501 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
502 ++id_idx;
503 if (evsel->attr.read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
504 ++id_idx;
505
e2b5abe0
JO
506 id = read_data[id_idx];
507
508 add:
509 perf_evlist__id_add(evlist, evsel, cpu, thread, id);
f8a95309
ACM
510 return 0;
511}
512
932a3594 513struct perf_sample_id *perf_evlist__id2sid(struct perf_evlist *evlist, u64 id)
70db7533
ACM
514{
515 struct hlist_head *head;
70db7533
ACM
516 struct perf_sample_id *sid;
517 int hash;
518
70db7533
ACM
519 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
520 head = &evlist->heads[hash];
521
b67bfe0d 522 hlist_for_each_entry(sid, head, node)
70db7533 523 if (sid->id == id)
932a3594
JO
524 return sid;
525
526 return NULL;
527}
528
529struct perf_evsel *perf_evlist__id2evsel(struct perf_evlist *evlist, u64 id)
530{
531 struct perf_sample_id *sid;
532
533 if (evlist->nr_entries == 1)
534 return perf_evlist__first(evlist);
535
536 sid = perf_evlist__id2sid(evlist, id);
537 if (sid)
538 return sid->evsel;
30e68bcc
NK
539
540 if (!perf_evlist__sample_id_all(evlist))
0c21f736 541 return perf_evlist__first(evlist);
30e68bcc 542
70db7533
ACM
543 return NULL;
544}
04391deb 545
75562573
AH
546static int perf_evlist__event2id(struct perf_evlist *evlist,
547 union perf_event *event, u64 *id)
548{
549 const u64 *array = event->sample.array;
550 ssize_t n;
551
552 n = (event->header.size - sizeof(event->header)) >> 3;
553
554 if (event->header.type == PERF_RECORD_SAMPLE) {
555 if (evlist->id_pos >= n)
556 return -1;
557 *id = array[evlist->id_pos];
558 } else {
559 if (evlist->is_pos > n)
560 return -1;
561 n -= evlist->is_pos;
562 *id = array[n];
563 }
564 return 0;
565}
566
567static struct perf_evsel *perf_evlist__event2evsel(struct perf_evlist *evlist,
568 union perf_event *event)
569{
98be6966 570 struct perf_evsel *first = perf_evlist__first(evlist);
75562573
AH
571 struct hlist_head *head;
572 struct perf_sample_id *sid;
573 int hash;
574 u64 id;
575
576 if (evlist->nr_entries == 1)
98be6966
AH
577 return first;
578
579 if (!first->attr.sample_id_all &&
580 event->header.type != PERF_RECORD_SAMPLE)
581 return first;
75562573
AH
582
583 if (perf_evlist__event2id(evlist, event, &id))
584 return NULL;
585
586 /* Synthesized events have an id of zero */
587 if (!id)
98be6966 588 return first;
75562573
AH
589
590 hash = hash_64(id, PERF_EVLIST__HLIST_BITS);
591 head = &evlist->heads[hash];
592
593 hlist_for_each_entry(sid, head, node) {
594 if (sid->id == id)
595 return sid->evsel;
596 }
597 return NULL;
598}
599
aece948f 600union perf_event *perf_evlist__mmap_read(struct perf_evlist *evlist, int idx)
04391deb 601{
aece948f 602 struct perf_mmap *md = &evlist->mmap[idx];
04391deb
ACM
603 unsigned int head = perf_mmap__read_head(md);
604 unsigned int old = md->prev;
605 unsigned char *data = md->base + page_size;
8115d60c 606 union perf_event *event = NULL;
04391deb 607
7bb41152 608 if (evlist->overwrite) {
04391deb 609 /*
7bb41152
ACM
610 * If we're further behind than half the buffer, there's a chance
611 * the writer will bite our tail and mess up the samples under us.
612 *
613 * If we somehow ended up ahead of the head, we got messed up.
614 *
615 * In either case, truncate and restart at head.
04391deb 616 */
7bb41152
ACM
617 int diff = head - old;
618 if (diff > md->mask / 2 || diff < 0) {
619 fprintf(stderr, "WARNING: failed to keep up with mmap data.\n");
620
621 /*
622 * head points to a known good entry, start there.
623 */
624 old = head;
625 }
04391deb
ACM
626 }
627
628 if (old != head) {
629 size_t size;
630
8115d60c 631 event = (union perf_event *)&data[old & md->mask];
04391deb
ACM
632 size = event->header.size;
633
634 /*
635 * Event straddles the mmap boundary -- header should always
636 * be inside due to u64 alignment of output.
637 */
638 if ((old & md->mask) + size != ((old + size) & md->mask)) {
639 unsigned int offset = old;
640 unsigned int len = min(sizeof(*event), size), cpy;
a65cb4b9 641 void *dst = md->event_copy;
04391deb
ACM
642
643 do {
644 cpy = min(md->mask + 1 - (offset & md->mask), len);
645 memcpy(dst, &data[offset & md->mask], cpy);
646 offset += cpy;
647 dst += cpy;
648 len -= cpy;
649 } while (len);
650
a65cb4b9 651 event = (union perf_event *) md->event_copy;
04391deb
ACM
652 }
653
654 old += size;
655 }
656
657 md->prev = old;
7bb41152 658
04391deb
ACM
659 return event;
660}
f8a95309 661
8e50d384
ZZ
662void perf_evlist__mmap_consume(struct perf_evlist *evlist, int idx)
663{
664 if (!evlist->overwrite) {
665 struct perf_mmap *md = &evlist->mmap[idx];
666 unsigned int old = md->prev;
667
668 perf_mmap__write_tail(md, old);
669 }
670}
671
93edcbd9
AH
672static void __perf_evlist__munmap(struct perf_evlist *evlist, int idx)
673{
674 if (evlist->mmap[idx].base != NULL) {
675 munmap(evlist->mmap[idx].base, evlist->mmap_len);
676 evlist->mmap[idx].base = NULL;
677 }
678}
679
7e2ed097 680void perf_evlist__munmap(struct perf_evlist *evlist)
f8a95309 681{
aece948f 682 int i;
f8a95309 683
983874d1
ACM
684 if (evlist->mmap == NULL)
685 return;
686
93edcbd9
AH
687 for (i = 0; i < evlist->nr_mmaps; i++)
688 __perf_evlist__munmap(evlist, i);
aece948f 689
04662523 690 zfree(&evlist->mmap);
f8a95309
ACM
691}
692
806fb630 693static int perf_evlist__alloc_mmap(struct perf_evlist *evlist)
f8a95309 694{
a14bb7a6 695 evlist->nr_mmaps = cpu_map__nr(evlist->cpus);
ec1e7e43 696 if (cpu_map__empty(evlist->cpus))
b3a319d5 697 evlist->nr_mmaps = thread_map__nr(evlist->threads);
aece948f 698 evlist->mmap = zalloc(evlist->nr_mmaps * sizeof(struct perf_mmap));
f8a95309
ACM
699 return evlist->mmap != NULL ? 0 : -ENOMEM;
700}
701
a8a8f3eb
AH
702struct mmap_params {
703 int prot;
704 int mask;
705};
706
707static int __perf_evlist__mmap(struct perf_evlist *evlist, int idx,
708 struct mmap_params *mp, int fd)
f8a95309 709{
aece948f 710 evlist->mmap[idx].prev = 0;
a8a8f3eb
AH
711 evlist->mmap[idx].mask = mp->mask;
712 evlist->mmap[idx].base = mmap(NULL, evlist->mmap_len, mp->prot,
f8a95309 713 MAP_SHARED, fd, 0);
301b195d 714 if (evlist->mmap[idx].base == MAP_FAILED) {
02635965
AH
715 pr_debug2("failed to mmap perf event ring buffer, error %d\n",
716 errno);
301b195d 717 evlist->mmap[idx].base = NULL;
f8a95309 718 return -1;
301b195d 719 }
f8a95309
ACM
720
721 perf_evlist__add_pollfd(evlist, fd);
722 return 0;
723}
724
04e21314 725static int perf_evlist__mmap_per_evsel(struct perf_evlist *evlist, int idx,
a8a8f3eb
AH
726 struct mmap_params *mp, int cpu,
727 int thread, int *output)
aece948f
ACM
728{
729 struct perf_evsel *evsel;
04e21314 730
0050f7aa 731 evlist__for_each(evlist, evsel) {
bf8e8f4b
AH
732 int fd;
733
734 if (evsel->system_wide && thread)
735 continue;
736
737 fd = FD(evsel, cpu, thread);
04e21314
AH
738
739 if (*output == -1) {
740 *output = fd;
a8a8f3eb 741 if (__perf_evlist__mmap(evlist, idx, mp, *output) < 0)
04e21314
AH
742 return -1;
743 } else {
744 if (ioctl(fd, PERF_EVENT_IOC_SET_OUTPUT, *output) != 0)
745 return -1;
746 }
747
748 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
749 perf_evlist__id_add_fd(evlist, evsel, cpu, thread, fd) < 0)
750 return -1;
751 }
752
753 return 0;
754}
755
a8a8f3eb
AH
756static int perf_evlist__mmap_per_cpu(struct perf_evlist *evlist,
757 struct mmap_params *mp)
04e21314 758{
aece948f 759 int cpu, thread;
b3a319d5
NK
760 int nr_cpus = cpu_map__nr(evlist->cpus);
761 int nr_threads = thread_map__nr(evlist->threads);
aece948f 762
e3e1a54f 763 pr_debug2("perf event ring buffer mmapped per cpu\n");
b3a319d5 764 for (cpu = 0; cpu < nr_cpus; cpu++) {
aece948f
ACM
765 int output = -1;
766
b3a319d5 767 for (thread = 0; thread < nr_threads; thread++) {
a8a8f3eb
AH
768 if (perf_evlist__mmap_per_evsel(evlist, cpu, mp, cpu,
769 thread, &output))
04e21314 770 goto out_unmap;
aece948f
ACM
771 }
772 }
773
774 return 0;
775
776out_unmap:
93edcbd9
AH
777 for (cpu = 0; cpu < nr_cpus; cpu++)
778 __perf_evlist__munmap(evlist, cpu);
aece948f
ACM
779 return -1;
780}
781
a8a8f3eb
AH
782static int perf_evlist__mmap_per_thread(struct perf_evlist *evlist,
783 struct mmap_params *mp)
aece948f 784{
aece948f 785 int thread;
b3a319d5 786 int nr_threads = thread_map__nr(evlist->threads);
aece948f 787
e3e1a54f 788 pr_debug2("perf event ring buffer mmapped per thread\n");
b3a319d5 789 for (thread = 0; thread < nr_threads; thread++) {
aece948f
ACM
790 int output = -1;
791
a8a8f3eb
AH
792 if (perf_evlist__mmap_per_evsel(evlist, thread, mp, 0, thread,
793 &output))
04e21314 794 goto out_unmap;
aece948f
ACM
795 }
796
797 return 0;
798
799out_unmap:
93edcbd9
AH
800 for (thread = 0; thread < nr_threads; thread++)
801 __perf_evlist__munmap(evlist, thread);
aece948f
ACM
802 return -1;
803}
804
994a1f78
JO
805static size_t perf_evlist__mmap_size(unsigned long pages)
806{
807 /* 512 kiB: default amount of unprivileged mlocked memory */
808 if (pages == UINT_MAX)
809 pages = (512 * 1024) / page_size;
810 else if (!is_power_of_2(pages))
811 return 0;
812
813 return (pages + 1) * page_size;
814}
815
33c2dcfd
DA
816static long parse_pages_arg(const char *str, unsigned long min,
817 unsigned long max)
994a1f78 818{
2fbe4abe 819 unsigned long pages, val;
27050f53
JO
820 static struct parse_tag tags[] = {
821 { .tag = 'B', .mult = 1 },
822 { .tag = 'K', .mult = 1 << 10 },
823 { .tag = 'M', .mult = 1 << 20 },
824 { .tag = 'G', .mult = 1 << 30 },
825 { .tag = 0 },
826 };
994a1f78 827
8973504b 828 if (str == NULL)
33c2dcfd 829 return -EINVAL;
8973504b 830
27050f53 831 val = parse_tag_value(str, tags);
2fbe4abe 832 if (val != (unsigned long) -1) {
27050f53
JO
833 /* we got file size value */
834 pages = PERF_ALIGN(val, page_size) / page_size;
27050f53
JO
835 } else {
836 /* we got pages count value */
837 char *eptr;
838 pages = strtoul(str, &eptr, 10);
33c2dcfd
DA
839 if (*eptr != '\0')
840 return -EINVAL;
994a1f78
JO
841 }
842
2bcab6c1 843 if (pages == 0 && min == 0) {
33c2dcfd 844 /* leave number of pages at 0 */
1dbfa938 845 } else if (!is_power_of_2(pages)) {
33c2dcfd 846 /* round pages up to next power of 2 */
1dbfa938
AH
847 pages = next_pow2_l(pages);
848 if (!pages)
849 return -EINVAL;
9639837e
DA
850 pr_info("rounding mmap pages size to %lu bytes (%lu pages)\n",
851 pages * page_size, pages);
2fbe4abe
AH
852 }
853
33c2dcfd
DA
854 if (pages > max)
855 return -EINVAL;
856
857 return pages;
858}
859
860int perf_evlist__parse_mmap_pages(const struct option *opt, const char *str,
861 int unset __maybe_unused)
862{
863 unsigned int *mmap_pages = opt->value;
864 unsigned long max = UINT_MAX;
865 long pages;
866
f5ae9c42 867 if (max > SIZE_MAX / page_size)
33c2dcfd
DA
868 max = SIZE_MAX / page_size;
869
870 pages = parse_pages_arg(str, 1, max);
871 if (pages < 0) {
872 pr_err("Invalid argument for --mmap_pages/-m\n");
994a1f78
JO
873 return -1;
874 }
875
876 *mmap_pages = pages;
877 return 0;
878}
879
c83fa7f2
AH
880/**
881 * perf_evlist__mmap - Create mmaps to receive events.
882 * @evlist: list of events
883 * @pages: map length in pages
884 * @overwrite: overwrite older events?
f8a95309 885 *
c83fa7f2
AH
886 * If @overwrite is %false the user needs to signal event consumption using
887 * perf_mmap__write_tail(). Using perf_evlist__mmap_read() does this
888 * automatically.
7e2ed097 889 *
c83fa7f2 890 * Return: %0 on success, negative error code otherwise.
f8a95309 891 */
50a682ce
ACM
892int perf_evlist__mmap(struct perf_evlist *evlist, unsigned int pages,
893 bool overwrite)
f8a95309 894{
aece948f 895 struct perf_evsel *evsel;
7e2ed097
ACM
896 const struct cpu_map *cpus = evlist->cpus;
897 const struct thread_map *threads = evlist->threads;
a8a8f3eb
AH
898 struct mmap_params mp = {
899 .prot = PROT_READ | (overwrite ? 0 : PROT_WRITE),
900 };
50a682ce 901
7e2ed097 902 if (evlist->mmap == NULL && perf_evlist__alloc_mmap(evlist) < 0)
f8a95309
ACM
903 return -ENOMEM;
904
7e2ed097 905 if (evlist->pollfd == NULL && perf_evlist__alloc_pollfd(evlist) < 0)
f8a95309
ACM
906 return -ENOMEM;
907
908 evlist->overwrite = overwrite;
994a1f78 909 evlist->mmap_len = perf_evlist__mmap_size(pages);
2af68ef5 910 pr_debug("mmap size %zuB\n", evlist->mmap_len);
a8a8f3eb 911 mp.mask = evlist->mmap_len - page_size - 1;
f8a95309 912
0050f7aa 913 evlist__for_each(evlist, evsel) {
f8a95309 914 if ((evsel->attr.read_format & PERF_FORMAT_ID) &&
a91e5431 915 evsel->sample_id == NULL &&
a14bb7a6 916 perf_evsel__alloc_id(evsel, cpu_map__nr(cpus), threads->nr) < 0)
f8a95309 917 return -ENOMEM;
f8a95309
ACM
918 }
919
ec1e7e43 920 if (cpu_map__empty(cpus))
a8a8f3eb 921 return perf_evlist__mmap_per_thread(evlist, &mp);
f8a95309 922
a8a8f3eb 923 return perf_evlist__mmap_per_cpu(evlist, &mp);
f8a95309 924}
7e2ed097 925
602ad878 926int perf_evlist__create_maps(struct perf_evlist *evlist, struct target *target)
7e2ed097 927{
b809ac10
NK
928 evlist->threads = thread_map__new_str(target->pid, target->tid,
929 target->uid);
7e2ed097
ACM
930
931 if (evlist->threads == NULL)
932 return -1;
933
9c105fbc 934 if (target__uses_dummy_map(target))
d1cb9fce 935 evlist->cpus = cpu_map__dummy_new();
879d77d0
NK
936 else
937 evlist->cpus = cpu_map__new(target->cpu_list);
7e2ed097
ACM
938
939 if (evlist->cpus == NULL)
940 goto out_delete_threads;
941
942 return 0;
943
944out_delete_threads:
945 thread_map__delete(evlist->threads);
946 return -1;
947}
948
1491a632 949int perf_evlist__apply_filters(struct perf_evlist *evlist)
0a102479 950{
0a102479 951 struct perf_evsel *evsel;
745cefc5
ACM
952 int err = 0;
953 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 954 nthreads = thread_map__nr(evlist->threads);
0a102479 955
0050f7aa 956 evlist__for_each(evlist, evsel) {
745cefc5 957 if (evsel->filter == NULL)
0a102479 958 continue;
745cefc5
ACM
959
960 err = perf_evsel__set_filter(evsel, ncpus, nthreads, evsel->filter);
961 if (err)
962 break;
0a102479
FW
963 }
964
745cefc5
ACM
965 return err;
966}
967
968int perf_evlist__set_filter(struct perf_evlist *evlist, const char *filter)
969{
970 struct perf_evsel *evsel;
971 int err = 0;
972 const int ncpus = cpu_map__nr(evlist->cpus),
b3a319d5 973 nthreads = thread_map__nr(evlist->threads);
745cefc5 974
0050f7aa 975 evlist__for_each(evlist, evsel) {
745cefc5
ACM
976 err = perf_evsel__set_filter(evsel, ncpus, nthreads, filter);
977 if (err)
978 break;
979 }
980
981 return err;
0a102479 982}
74429964 983
0c21f736 984bool perf_evlist__valid_sample_type(struct perf_evlist *evlist)
74429964 985{
75562573 986 struct perf_evsel *pos;
c2a70653 987
75562573
AH
988 if (evlist->nr_entries == 1)
989 return true;
990
991 if (evlist->id_pos < 0 || evlist->is_pos < 0)
992 return false;
993
0050f7aa 994 evlist__for_each(evlist, pos) {
75562573
AH
995 if (pos->id_pos != evlist->id_pos ||
996 pos->is_pos != evlist->is_pos)
c2a70653 997 return false;
74429964
FW
998 }
999
c2a70653 1000 return true;
74429964
FW
1001}
1002
75562573 1003u64 __perf_evlist__combined_sample_type(struct perf_evlist *evlist)
c2a70653 1004{
75562573
AH
1005 struct perf_evsel *evsel;
1006
1007 if (evlist->combined_sample_type)
1008 return evlist->combined_sample_type;
1009
0050f7aa 1010 evlist__for_each(evlist, evsel)
75562573
AH
1011 evlist->combined_sample_type |= evsel->attr.sample_type;
1012
1013 return evlist->combined_sample_type;
1014}
1015
1016u64 perf_evlist__combined_sample_type(struct perf_evlist *evlist)
1017{
1018 evlist->combined_sample_type = 0;
1019 return __perf_evlist__combined_sample_type(evlist);
c2a70653
ACM
1020}
1021
9ede473c
JO
1022bool perf_evlist__valid_read_format(struct perf_evlist *evlist)
1023{
1024 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
1025 u64 read_format = first->attr.read_format;
1026 u64 sample_type = first->attr.sample_type;
1027
0050f7aa 1028 evlist__for_each(evlist, pos) {
9ede473c
JO
1029 if (read_format != pos->attr.read_format)
1030 return false;
1031 }
1032
1033 /* PERF_SAMPLE_READ imples PERF_FORMAT_ID. */
1034 if ((sample_type & PERF_SAMPLE_READ) &&
1035 !(read_format & PERF_FORMAT_ID)) {
1036 return false;
1037 }
1038
1039 return true;
1040}
1041
1042u64 perf_evlist__read_format(struct perf_evlist *evlist)
1043{
1044 struct perf_evsel *first = perf_evlist__first(evlist);
1045 return first->attr.read_format;
1046}
1047
0c21f736 1048u16 perf_evlist__id_hdr_size(struct perf_evlist *evlist)
81e36bff 1049{
0c21f736 1050 struct perf_evsel *first = perf_evlist__first(evlist);
81e36bff
ACM
1051 struct perf_sample *data;
1052 u64 sample_type;
1053 u16 size = 0;
1054
81e36bff
ACM
1055 if (!first->attr.sample_id_all)
1056 goto out;
1057
1058 sample_type = first->attr.sample_type;
1059
1060 if (sample_type & PERF_SAMPLE_TID)
1061 size += sizeof(data->tid) * 2;
1062
1063 if (sample_type & PERF_SAMPLE_TIME)
1064 size += sizeof(data->time);
1065
1066 if (sample_type & PERF_SAMPLE_ID)
1067 size += sizeof(data->id);
1068
1069 if (sample_type & PERF_SAMPLE_STREAM_ID)
1070 size += sizeof(data->stream_id);
1071
1072 if (sample_type & PERF_SAMPLE_CPU)
1073 size += sizeof(data->cpu) * 2;
75562573
AH
1074
1075 if (sample_type & PERF_SAMPLE_IDENTIFIER)
1076 size += sizeof(data->id);
81e36bff
ACM
1077out:
1078 return size;
1079}
1080
0c21f736 1081bool perf_evlist__valid_sample_id_all(struct perf_evlist *evlist)
74429964 1082{
0c21f736 1083 struct perf_evsel *first = perf_evlist__first(evlist), *pos = first;
c2a70653 1084
0050f7aa 1085 evlist__for_each_continue(evlist, pos) {
c2a70653
ACM
1086 if (first->attr.sample_id_all != pos->attr.sample_id_all)
1087 return false;
74429964
FW
1088 }
1089
c2a70653
ACM
1090 return true;
1091}
1092
0c21f736 1093bool perf_evlist__sample_id_all(struct perf_evlist *evlist)
c2a70653 1094{
0c21f736 1095 struct perf_evsel *first = perf_evlist__first(evlist);
c2a70653 1096 return first->attr.sample_id_all;
74429964 1097}
81cce8de
ACM
1098
1099void perf_evlist__set_selected(struct perf_evlist *evlist,
1100 struct perf_evsel *evsel)
1101{
1102 evlist->selected = evsel;
1103}
727ab04e 1104
a74b4b66
NK
1105void perf_evlist__close(struct perf_evlist *evlist)
1106{
1107 struct perf_evsel *evsel;
1108 int ncpus = cpu_map__nr(evlist->cpus);
1109 int nthreads = thread_map__nr(evlist->threads);
8ad9219e 1110 int n;
a74b4b66 1111
8ad9219e
SE
1112 evlist__for_each_reverse(evlist, evsel) {
1113 n = evsel->cpus ? evsel->cpus->nr : ncpus;
1114 perf_evsel__close(evsel, n, nthreads);
1115 }
a74b4b66
NK
1116}
1117
6a4bb04c 1118int perf_evlist__open(struct perf_evlist *evlist)
727ab04e 1119{
6a4bb04c 1120 struct perf_evsel *evsel;
a74b4b66 1121 int err;
727ab04e 1122
733cd2fe
AH
1123 perf_evlist__update_id_pos(evlist);
1124
0050f7aa 1125 evlist__for_each(evlist, evsel) {
6a4bb04c 1126 err = perf_evsel__open(evsel, evlist->cpus, evlist->threads);
727ab04e
ACM
1127 if (err < 0)
1128 goto out_err;
1129 }
1130
1131 return 0;
1132out_err:
a74b4b66 1133 perf_evlist__close(evlist);
41c21a68 1134 errno = -err;
727ab04e
ACM
1135 return err;
1136}
35b9d88e 1137
602ad878 1138int perf_evlist__prepare_workload(struct perf_evlist *evlist, struct target *target,
55e162ea 1139 const char *argv[], bool pipe_output,
735f7e0b 1140 void (*exec_error)(int signo, siginfo_t *info, void *ucontext))
35b9d88e
ACM
1141{
1142 int child_ready_pipe[2], go_pipe[2];
1143 char bf;
1144
1145 if (pipe(child_ready_pipe) < 0) {
1146 perror("failed to create 'ready' pipe");
1147 return -1;
1148 }
1149
1150 if (pipe(go_pipe) < 0) {
1151 perror("failed to create 'go' pipe");
1152 goto out_close_ready_pipe;
1153 }
1154
1155 evlist->workload.pid = fork();
1156 if (evlist->workload.pid < 0) {
1157 perror("failed to fork");
1158 goto out_close_pipes;
1159 }
1160
1161 if (!evlist->workload.pid) {
5f1c4225
ACM
1162 int ret;
1163
119fa3c9 1164 if (pipe_output)
35b9d88e
ACM
1165 dup2(2, 1);
1166
0817df08
DA
1167 signal(SIGTERM, SIG_DFL);
1168
35b9d88e
ACM
1169 close(child_ready_pipe[0]);
1170 close(go_pipe[1]);
1171 fcntl(go_pipe[0], F_SETFD, FD_CLOEXEC);
1172
35b9d88e
ACM
1173 /*
1174 * Tell the parent we're ready to go
1175 */
1176 close(child_ready_pipe[1]);
1177
1178 /*
1179 * Wait until the parent tells us to go.
1180 */
5f1c4225
ACM
1181 ret = read(go_pipe[0], &bf, 1);
1182 /*
1183 * The parent will ask for the execvp() to be performed by
1184 * writing exactly one byte, in workload.cork_fd, usually via
1185 * perf_evlist__start_workload().
1186 *
1187 * For cancelling the workload without actuallin running it,
1188 * the parent will just close workload.cork_fd, without writing
1189 * anything, i.e. read will return zero and we just exit()
1190 * here.
1191 */
1192 if (ret != 1) {
1193 if (ret == -1)
1194 perror("unable to read pipe");
1195 exit(ret);
1196 }
35b9d88e
ACM
1197
1198 execvp(argv[0], (char **)argv);
1199
735f7e0b 1200 if (exec_error) {
f33cbe72
ACM
1201 union sigval val;
1202
1203 val.sival_int = errno;
1204 if (sigqueue(getppid(), SIGUSR1, val))
1205 perror(argv[0]);
1206 } else
1207 perror(argv[0]);
35b9d88e
ACM
1208 exit(-1);
1209 }
1210
735f7e0b
ACM
1211 if (exec_error) {
1212 struct sigaction act = {
1213 .sa_flags = SA_SIGINFO,
1214 .sa_sigaction = exec_error,
1215 };
1216 sigaction(SIGUSR1, &act, NULL);
1217 }
1218
602ad878 1219 if (target__none(target))
35b9d88e
ACM
1220 evlist->threads->map[0] = evlist->workload.pid;
1221
1222 close(child_ready_pipe[1]);
1223 close(go_pipe[0]);
1224 /*
1225 * wait for child to settle
1226 */
1227 if (read(child_ready_pipe[0], &bf, 1) == -1) {
1228 perror("unable to read pipe");
1229 goto out_close_pipes;
1230 }
1231
bcf3145f 1232 fcntl(go_pipe[1], F_SETFD, FD_CLOEXEC);
35b9d88e
ACM
1233 evlist->workload.cork_fd = go_pipe[1];
1234 close(child_ready_pipe[0]);
1235 return 0;
1236
1237out_close_pipes:
1238 close(go_pipe[0]);
1239 close(go_pipe[1]);
1240out_close_ready_pipe:
1241 close(child_ready_pipe[0]);
1242 close(child_ready_pipe[1]);
1243 return -1;
1244}
1245
1246int perf_evlist__start_workload(struct perf_evlist *evlist)
1247{
1248 if (evlist->workload.cork_fd > 0) {
b3824404 1249 char bf = 0;
bcf3145f 1250 int ret;
35b9d88e
ACM
1251 /*
1252 * Remove the cork, let it rip!
1253 */
bcf3145f
NK
1254 ret = write(evlist->workload.cork_fd, &bf, 1);
1255 if (ret < 0)
1256 perror("enable to write to pipe");
1257
1258 close(evlist->workload.cork_fd);
1259 return ret;
35b9d88e
ACM
1260 }
1261
1262 return 0;
1263}
cb0b29e0 1264
a3f698fe 1265int perf_evlist__parse_sample(struct perf_evlist *evlist, union perf_event *event,
0807d2d8 1266 struct perf_sample *sample)
cb0b29e0 1267{
75562573
AH
1268 struct perf_evsel *evsel = perf_evlist__event2evsel(evlist, event);
1269
1270 if (!evsel)
1271 return -EFAULT;
0807d2d8 1272 return perf_evsel__parse_sample(evsel, event, sample);
cb0b29e0 1273}
78f067b3
ACM
1274
1275size_t perf_evlist__fprintf(struct perf_evlist *evlist, FILE *fp)
1276{
1277 struct perf_evsel *evsel;
1278 size_t printed = 0;
1279
0050f7aa 1280 evlist__for_each(evlist, evsel) {
78f067b3
ACM
1281 printed += fprintf(fp, "%s%s", evsel->idx ? ", " : "",
1282 perf_evsel__name(evsel));
1283 }
1284
b2222139 1285 return printed + fprintf(fp, "\n");
78f067b3 1286}
6ef068cb
ACM
1287
1288int perf_evlist__strerror_tp(struct perf_evlist *evlist __maybe_unused,
1289 int err, char *buf, size_t size)
1290{
1291 char sbuf[128];
1292
1293 switch (err) {
1294 case ENOENT:
1295 scnprintf(buf, size, "%s",
1296 "Error:\tUnable to find debugfs\n"
1297 "Hint:\tWas your kernel was compiled with debugfs support?\n"
1298 "Hint:\tIs the debugfs filesystem mounted?\n"
1299 "Hint:\tTry 'sudo mount -t debugfs nodev /sys/kernel/debug'");
1300 break;
1301 case EACCES:
1302 scnprintf(buf, size,
1303 "Error:\tNo permissions to read %s/tracing/events/raw_syscalls\n"
1304 "Hint:\tTry 'sudo mount -o remount,mode=755 %s'\n",
1305 debugfs_mountpoint, debugfs_mountpoint);
1306 break;
1307 default:
1308 scnprintf(buf, size, "%s", strerror_r(err, sbuf, sizeof(sbuf)));
1309 break;
1310 }
1311
1312 return 0;
1313}
a8f23d8f
ACM
1314
1315int perf_evlist__strerror_open(struct perf_evlist *evlist __maybe_unused,
1316 int err, char *buf, size_t size)
1317{
1318 int printed, value;
6e81c74c 1319 char sbuf[STRERR_BUFSIZE], *emsg = strerror_r(err, sbuf, sizeof(sbuf));
a8f23d8f
ACM
1320
1321 switch (err) {
1322 case EACCES:
1323 case EPERM:
1324 printed = scnprintf(buf, size,
1325 "Error:\t%s.\n"
1326 "Hint:\tCheck /proc/sys/kernel/perf_event_paranoid setting.", emsg);
1327
1a47245d 1328 value = perf_event_paranoid();
a8f23d8f
ACM
1329
1330 printed += scnprintf(buf + printed, size - printed, "\nHint:\t");
1331
1332 if (value >= 2) {
1333 printed += scnprintf(buf + printed, size - printed,
1334 "For your workloads it needs to be <= 1\nHint:\t");
1335 }
1336 printed += scnprintf(buf + printed, size - printed,
5229e366 1337 "For system wide tracing it needs to be set to -1.\n");
a8f23d8f
ACM
1338
1339 printed += scnprintf(buf + printed, size - printed,
5229e366
ACM
1340 "Hint:\tTry: 'sudo sh -c \"echo -1 > /proc/sys/kernel/perf_event_paranoid\"'\n"
1341 "Hint:\tThe current value is %d.", value);
a8f23d8f
ACM
1342 break;
1343 default:
1344 scnprintf(buf, size, "%s", emsg);
1345 break;
1346 }
1347
1348 return 0;
1349}
a025e4f0
AH
1350
1351void perf_evlist__to_front(struct perf_evlist *evlist,
1352 struct perf_evsel *move_evsel)
1353{
1354 struct perf_evsel *evsel, *n;
1355 LIST_HEAD(move);
1356
1357 if (move_evsel == perf_evlist__first(evlist))
1358 return;
1359
0050f7aa 1360 evlist__for_each_safe(evlist, n, evsel) {
a025e4f0
AH
1361 if (evsel->leader == move_evsel->leader)
1362 list_move_tail(&evsel->node, &move);
1363 }
1364
1365 list_splice(&move, &evlist->entries);
1366}
60b0896c
AH
1367
1368void perf_evlist__set_tracking_event(struct perf_evlist *evlist,
1369 struct perf_evsel *tracking_evsel)
1370{
1371 struct perf_evsel *evsel;
1372
1373 if (tracking_evsel->tracking)
1374 return;
1375
1376 evlist__for_each(evlist, evsel) {
1377 if (evsel != tracking_evsel)
1378 evsel->tracking = false;
1379 }
1380
1381 tracking_evsel->tracking = true;
1382}