Merge tag 'cgroup-for-6.11-rc4-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / tools / perf / util / evsel.c
CommitLineData
91007045 1// SPDX-License-Identifier: GPL-2.0-only
f8a95309
ACM
2/*
3 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
4 *
5 * Parts came from builtin-{top,stat,record}.c, see those files for further
6 * copyright notes.
f8a95309
ACM
7 */
8
936be503 9#include <byteswap.h>
a43783ae 10#include <errno.h>
fd20e811 11#include <inttypes.h>
0f6a3015 12#include <linux/bitops.h>
6828d692 13#include <api/io.h>
2157f6ee 14#include <api/fs/fs.h>
4605eab3 15#include <api/fs/tracing_path.h>
4e319027
RR
16#include <linux/hw_breakpoint.h>
17#include <linux/perf_event.h>
0353631a 18#include <linux/compiler.h>
8dd2a131 19#include <linux/err.h>
7f7c536f 20#include <linux/zalloc.h>
86a5e0c2 21#include <sys/ioctl.h>
bec19672 22#include <sys/resource.h>
2157f6ee
ACM
23#include <sys/types.h>
24#include <dirent.h>
f2a39fe8 25#include <stdlib.h>
b04c597a 26#include <perf/evsel.h>
4e319027 27#include "asm/bug.h"
fa853c4b 28#include "bpf_counter.h"
8f651eae 29#include "callchain.h"
f14d5707 30#include "cgroup.h"
ddee688a 31#include "counts.h"
5ab8c689 32#include "event.h"
69aad6f1 33#include "evsel.h"
6828d692 34#include "time-utils.h"
9db0e363 35#include "util/env.h"
95be9d19 36#include "util/evsel_config.h"
ca125277 37#include "util/evsel_fprintf.h"
70082dd9 38#include "evlist.h"
87ffb6c6 39#include <perf/cpumap.h>
fd78260b 40#include "thread_map.h"
12864b31 41#include "target.h"
26d33022 42#include "perf_regs.h"
aeb00b1a 43#include "record.h"
e3e1a54f 44#include "debug.h"
97978b3e 45#include "trace-event.h"
a9a3a4d9 46#include "stat.h"
6a9fa4e3 47#include "string2.h"
f9d8adb3 48#include "memswap.h"
2da39f1c 49#include "util.h"
bdf45725 50#include "util/hashmap.h"
49c692b7 51#include "off_cpu.h"
5d9fb666 52#include "pmu.h"
1eaf496e 53#include "pmus.h"
e093a222 54#include "rlimit.h"
91854f9a 55#include "../perf-sys.h"
ac12f676 56#include "util/parse-branch-options.h"
d180aa56 57#include "util/bpf-filter.h"
eae7044b 58#include "util/hist.h"
76466024 59#include <internal/xyarray.h>
fb71c86c 60#include <internal/lib.h>
fd3f518f 61#include <internal/threadmap.h>
69aad6f1 62
3052ba56 63#include <linux/ctype.h>
3d689ed6 64
378ef0f5
IR
65#ifdef HAVE_LIBTRACEEVENT
66#include <traceevent/event-parse.h>
67#endif
68
9a831b3a 69struct perf_missing_features perf_missing_features;
594ac61a 70
814c8c38
PZ
71static clockid_t clockid;
72
79932d16
IR
73static const char *const perf_tool_event__tool_names[PERF_TOOL_MAX] = {
74 NULL,
75 "duration_time",
76 "user_time",
77 "system_time",
78};
79
80const char *perf_tool_event__to_str(enum perf_tool_event ev)
81{
82 if (ev > PERF_TOOL_NONE && ev < PERF_TOOL_MAX)
83 return perf_tool_event__tool_names[ev];
84
85 return NULL;
86}
87
88enum perf_tool_event perf_tool_event__from_str(const char *str)
89{
90 int i;
91
92 perf_tool_event__for_each_event(i) {
93 if (!strcmp(str, perf_tool_event__tool_names[i]))
94 return i;
95 }
96 return PERF_TOOL_NONE;
97}
98
99
4c703828 100static int evsel__no_extra_init(struct evsel *evsel __maybe_unused)
ce8ccff5
ACM
101{
102 return 0;
103}
104
10213e2f
JO
105void __weak test_attr__ready(void) { }
106
4c703828 107static void evsel__no_extra_fini(struct evsel *evsel __maybe_unused)
ce8ccff5
ACM
108{
109}
110
111static struct {
112 size_t size;
32dcd021
JO
113 int (*init)(struct evsel *evsel);
114 void (*fini)(struct evsel *evsel);
ce8ccff5 115} perf_evsel__object = {
32dcd021 116 .size = sizeof(struct evsel),
4c703828
ACM
117 .init = evsel__no_extra_init,
118 .fini = evsel__no_extra_fini,
ce8ccff5
ACM
119};
120
4c703828
ACM
121int evsel__object_config(size_t object_size, int (*init)(struct evsel *evsel),
122 void (*fini)(struct evsel *evsel))
ce8ccff5
ACM
123{
124
125 if (object_size == 0)
126 goto set_methods;
127
128 if (perf_evsel__object.size > object_size)
129 return -EINVAL;
130
131 perf_evsel__object.size = object_size;
132
133set_methods:
134 if (init != NULL)
135 perf_evsel__object.init = init;
136
137 if (fini != NULL)
138 perf_evsel__object.fini = fini;
139
140 return 0;
141}
142
9dfcb759 143#define FD(e, x, y) (*(int *)xyarray__entry(e->core.fd, x, y))
c52b12ed 144
2aaefde4 145int __evsel__sample_size(u64 sample_type)
c2a70653
ACM
146{
147 u64 mask = sample_type & PERF_SAMPLE_MASK;
148 int size = 0;
149 int i;
150
151 for (i = 0; i < 64; i++) {
152 if (mask & (1ULL << i))
153 size++;
154 }
155
156 size *= sizeof(u64);
157
158 return size;
159}
160
75562573
AH
161/**
162 * __perf_evsel__calc_id_pos - calculate id_pos.
163 * @sample_type: sample type
164 *
165 * This function returns the position of the event id (PERF_SAMPLE_ID or
166 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
69d81f09 167 * perf_record_sample.
75562573
AH
168 */
169static int __perf_evsel__calc_id_pos(u64 sample_type)
170{
171 int idx = 0;
172
173 if (sample_type & PERF_SAMPLE_IDENTIFIER)
174 return 0;
175
176 if (!(sample_type & PERF_SAMPLE_ID))
177 return -1;
178
179 if (sample_type & PERF_SAMPLE_IP)
180 idx += 1;
181
182 if (sample_type & PERF_SAMPLE_TID)
183 idx += 1;
184
185 if (sample_type & PERF_SAMPLE_TIME)
186 idx += 1;
187
188 if (sample_type & PERF_SAMPLE_ADDR)
189 idx += 1;
190
191 return idx;
192}
193
194/**
195 * __perf_evsel__calc_is_pos - calculate is_pos.
196 * @sample_type: sample type
197 *
198 * This function returns the position (counting backwards) of the event id
199 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
200 * sample_id_all is used there is an id sample appended to non-sample events.
201 */
202static int __perf_evsel__calc_is_pos(u64 sample_type)
203{
204 int idx = 1;
205
206 if (sample_type & PERF_SAMPLE_IDENTIFIER)
207 return 1;
208
209 if (!(sample_type & PERF_SAMPLE_ID))
210 return -1;
211
212 if (sample_type & PERF_SAMPLE_CPU)
213 idx += 1;
214
215 if (sample_type & PERF_SAMPLE_STREAM_ID)
216 idx += 1;
217
218 return idx;
219}
220
4b5e87b7 221void evsel__calc_id_pos(struct evsel *evsel)
75562573 222{
1fc632ce
JO
223 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->core.attr.sample_type);
224 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->core.attr.sample_type);
75562573
AH
225}
226
862b2f8f 227void __evsel__set_sample_bit(struct evsel *evsel,
7be5ebe8
ACM
228 enum perf_event_sample_format bit)
229{
1fc632ce
JO
230 if (!(evsel->core.attr.sample_type & bit)) {
231 evsel->core.attr.sample_type |= bit;
7be5ebe8 232 evsel->sample_size += sizeof(u64);
4b5e87b7 233 evsel__calc_id_pos(evsel);
7be5ebe8
ACM
234 }
235}
236
862b2f8f 237void __evsel__reset_sample_bit(struct evsel *evsel,
7be5ebe8
ACM
238 enum perf_event_sample_format bit)
239{
1fc632ce
JO
240 if (evsel->core.attr.sample_type & bit) {
241 evsel->core.attr.sample_type &= ~bit;
7be5ebe8 242 evsel->sample_size -= sizeof(u64);
4b5e87b7 243 evsel__calc_id_pos(evsel);
7be5ebe8
ACM
244 }
245}
246
862b2f8f 247void evsel__set_sample_id(struct evsel *evsel,
75562573 248 bool can_sample_identifier)
7a5a5ca5 249{
75562573 250 if (can_sample_identifier) {
862b2f8f
ACM
251 evsel__reset_sample_bit(evsel, ID);
252 evsel__set_sample_bit(evsel, IDENTIFIER);
75562573 253 } else {
862b2f8f 254 evsel__set_sample_bit(evsel, ID);
75562573 255 }
1fc632ce 256 evsel->core.attr.read_format |= PERF_FORMAT_ID;
7a5a5ca5
ACM
257}
258
5496bc0c 259/**
c754c382 260 * evsel__is_function_event - Return whether given evsel is a function
5496bc0c
ACM
261 * trace event
262 *
263 * @evsel - evsel selector to be tested
264 *
265 * Return %true if event is function trace event
266 */
c754c382 267bool evsel__is_function_event(struct evsel *evsel)
5496bc0c
ACM
268{
269#define FUNCTION_EVENT "ftrace:function"
270
271 return evsel->name &&
272 !strncmp(FUNCTION_EVENT, evsel->name, sizeof(FUNCTION_EVENT));
273
274#undef FUNCTION_EVENT
275}
276
b4b62ee6
JO
277void evsel__init(struct evsel *evsel,
278 struct perf_event_attr *attr, int idx)
ef1d1af2 279{
38fe0e01 280 perf_evsel__init(&evsel->core, attr, idx);
60b0896c 281 evsel->tracking = !idx;
b194c9cd 282 evsel->unit = strdup("");
410136f5 283 evsel->scale = 1.0;
2fda5ada 284 evsel->max_events = ULONG_MAX;
d49e4695 285 evsel->evlist = NULL;
af4a0991 286 evsel->bpf_obj = NULL;
1f45b1d4 287 evsel->bpf_fd = -1;
930a2e29 288 INIT_LIST_HEAD(&evsel->config_terms);
fa853c4b 289 INIT_LIST_HEAD(&evsel->bpf_counter_list);
c041d33b 290 INIT_LIST_HEAD(&evsel->bpf_filters);
ce8ccff5 291 perf_evsel__object.init(evsel);
2aaefde4 292 evsel->sample_size = __evsel__sample_size(attr->sample_type);
4b5e87b7 293 evsel__calc_id_pos(evsel);
15bfd2cc 294 evsel->cmdline_group_boundary = false;
37932c18 295 evsel->metric_events = NULL;
f0aef475 296 evsel->per_pkg_mask = NULL;
37932c18 297 evsel->collect_stat = false;
8c5421c0 298 evsel->pmu_name = NULL;
a90cc5a9 299 evsel->group_pmu_name = NULL;
1b114824 300 evsel->skippable = false;
ef1d1af2
ACM
301}
302
8f6725a2 303struct evsel *evsel__new_idx(struct perf_event_attr *attr, int idx)
69aad6f1 304{
32dcd021 305 struct evsel *evsel = zalloc(perf_evsel__object.size);
69aad6f1 306
fd8d2702
HT
307 if (!evsel)
308 return NULL;
b4b62ee6 309 evsel__init(evsel, attr, idx);
69aad6f1 310
303ead45
NK
311 if (evsel__is_bpf_output(evsel) && !attr->sample_type) {
312 evsel->core.attr.sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
d37ba880 313 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
1fc632ce 314 evsel->core.attr.sample_period = 1;
03e0a7df
WN
315 }
316
c754c382 317 if (evsel__is_clock(evsel)) {
b194c9cd
IR
318 free((char *)evsel->unit);
319 evsel->unit = strdup("msec");
0aa802a7
JO
320 evsel->scale = 1e-6;
321 }
322
69aad6f1
ACM
323 return evsel;
324}
325
a7d212fc 326int copy_config_terms(struct list_head *dst, struct list_head *src)
7fedd9b8
NK
327{
328 struct evsel_config_term *pos, *tmp;
329
a7d212fc 330 list_for_each_entry(pos, src, list) {
7fedd9b8
NK
331 tmp = malloc(sizeof(*tmp));
332 if (tmp == NULL)
333 return -ENOMEM;
334
335 *tmp = *pos;
336 if (tmp->free_str) {
337 tmp->val.str = strdup(pos->val.str);
338 if (tmp->val.str == NULL) {
339 free(tmp);
340 return -ENOMEM;
341 }
342 }
a7d212fc 343 list_add_tail(&tmp->list, dst);
7fedd9b8
NK
344 }
345 return 0;
346}
347
a7d212fc
AH
348static int evsel__copy_config_terms(struct evsel *dst, struct evsel *src)
349{
350 return copy_config_terms(&dst->config_terms, &src->config_terms);
351}
352
7fedd9b8
NK
353/**
354 * evsel__clone - create a new evsel copied from @orig
355 * @orig: original evsel
356 *
357 * The assumption is that @orig is not configured nor opened yet.
358 * So we only care about the attributes that can be set while it's parsed.
359 */
360struct evsel *evsel__clone(struct evsel *orig)
361{
362 struct evsel *evsel;
363
364 BUG_ON(orig->core.fd);
365 BUG_ON(orig->counts);
366 BUG_ON(orig->priv);
367 BUG_ON(orig->per_pkg_mask);
368
369 /* cannot handle BPF objects for now */
370 if (orig->bpf_obj)
371 return NULL;
372
373 evsel = evsel__new(&orig->core.attr);
374 if (evsel == NULL)
375 return NULL;
376
377 evsel->core.cpus = perf_cpu_map__get(orig->core.cpus);
378 evsel->core.own_cpus = perf_cpu_map__get(orig->core.own_cpus);
379 evsel->core.threads = perf_thread_map__get(orig->core.threads);
380 evsel->core.nr_members = orig->core.nr_members;
381 evsel->core.system_wide = orig->core.system_wide;
d3345fec 382 evsel->core.requires_cpu = orig->core.requires_cpu;
1578e63d 383 evsel->core.is_pmu_core = orig->core.is_pmu_core;
7fedd9b8
NK
384
385 if (orig->name) {
386 evsel->name = strdup(orig->name);
387 if (evsel->name == NULL)
388 goto out_err;
389 }
390 if (orig->group_name) {
391 evsel->group_name = strdup(orig->group_name);
392 if (evsel->group_name == NULL)
393 goto out_err;
394 }
395 if (orig->pmu_name) {
396 evsel->pmu_name = strdup(orig->pmu_name);
397 if (evsel->pmu_name == NULL)
398 goto out_err;
399 }
a90cc5a9
IR
400 if (orig->group_pmu_name) {
401 evsel->group_pmu_name = strdup(orig->group_pmu_name);
402 if (evsel->group_pmu_name == NULL)
403 goto out_err;
404 }
7fedd9b8
NK
405 if (orig->filter) {
406 evsel->filter = strdup(orig->filter);
407 if (evsel->filter == NULL)
408 goto out_err;
409 }
2b62b3a6
IR
410 if (orig->metric_id) {
411 evsel->metric_id = strdup(orig->metric_id);
412 if (evsel->metric_id == NULL)
413 goto out_err;
414 }
7fedd9b8 415 evsel->cgrp = cgroup__get(orig->cgrp);
378ef0f5 416#ifdef HAVE_LIBTRACEEVENT
7fedd9b8 417 evsel->tp_format = orig->tp_format;
378ef0f5 418#endif
7fedd9b8 419 evsel->handler = orig->handler;
fba7c866 420 evsel->core.leader = orig->core.leader;
7fedd9b8
NK
421
422 evsel->max_events = orig->max_events;
423 evsel->tool_event = orig->tool_event;
b194c9cd
IR
424 free((char *)evsel->unit);
425 evsel->unit = strdup(orig->unit);
426 if (evsel->unit == NULL)
427 goto out_err;
428
7fedd9b8
NK
429 evsel->scale = orig->scale;
430 evsel->snapshot = orig->snapshot;
431 evsel->per_pkg = orig->per_pkg;
432 evsel->percore = orig->percore;
433 evsel->precise_max = orig->precise_max;
7fedd9b8
NK
434 evsel->is_libpfm_event = orig->is_libpfm_event;
435
436 evsel->exclude_GH = orig->exclude_GH;
437 evsel->sample_read = orig->sample_read;
438 evsel->auto_merge_stats = orig->auto_merge_stats;
439 evsel->collect_stat = orig->collect_stat;
440 evsel->weak_group = orig->weak_group;
2dc065ea 441 evsel->use_config_name = orig->use_config_name;
f7400262 442 evsel->pmu = orig->pmu;
7fedd9b8
NK
443
444 if (evsel__copy_config_terms(evsel, orig) < 0)
445 goto out_err;
446
447 return evsel;
448
449out_err:
450 evsel__delete(evsel);
451 return NULL;
452}
453
8dd2a131
JO
454/*
455 * Returns pointer with encoded error via <linux/err.h> interface.
456 */
378ef0f5 457#ifdef HAVE_LIBTRACEEVENT
a2a6604e 458struct evsel *evsel__newtp_idx(const char *sys, const char *name, int idx, bool format)
efd2b924 459{
32dcd021 460 struct evsel *evsel = zalloc(perf_evsel__object.size);
8dd2a131 461 int err = -ENOMEM;
efd2b924 462
8dd2a131
JO
463 if (evsel == NULL) {
464 goto out_err;
465 } else {
efd2b924 466 struct perf_event_attr attr = {
0b80f8b3
ACM
467 .type = PERF_TYPE_TRACEPOINT,
468 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
469 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
efd2b924
ACM
470 };
471
e48ffe2b
ACM
472 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
473 goto out_free;
474
a2a6604e
DM
475 event_attr_init(&attr);
476
477 if (format) {
478 evsel->tp_format = trace_event__tp_format(sys, name);
479 if (IS_ERR(evsel->tp_format)) {
480 err = PTR_ERR(evsel->tp_format);
481 goto out_free;
482 }
483 attr.config = evsel->tp_format->id;
484 } else {
485 attr.config = (__u64) -1;
8dd2a131 486 }
efd2b924 487
a2a6604e 488
0b80f8b3 489 attr.sample_period = 1;
b4b62ee6 490 evsel__init(evsel, &attr, idx);
efd2b924
ACM
491 }
492
493 return evsel;
494
495out_free:
74cf249d 496 zfree(&evsel->name);
efd2b924 497 free(evsel);
8dd2a131
JO
498out_err:
499 return ERR_PTR(err);
efd2b924 500}
378ef0f5 501#endif
efd2b924 502
545a96c9 503const char *const evsel__hw_names[PERF_COUNT_HW_MAX] = {
c410431c
ACM
504 "cycles",
505 "instructions",
506 "cache-references",
507 "cache-misses",
508 "branches",
509 "branch-misses",
510 "bus-cycles",
511 "stalled-cycles-frontend",
512 "stalled-cycles-backend",
513 "ref-cycles",
514};
515
112cb561
SL
516char *evsel__bpf_counter_events;
517
518bool evsel__match_bpf_counter_events(const char *name)
519{
520 int name_len;
521 bool match;
522 char *ptr;
523
524 if (!evsel__bpf_counter_events)
525 return false;
526
527 ptr = strstr(evsel__bpf_counter_events, name);
528 name_len = strlen(name);
529
530 /* check name matches a full token in evsel__bpf_counter_events */
531 match = (ptr != NULL) &&
532 ((ptr == evsel__bpf_counter_events) || (*(ptr - 1) == ',')) &&
533 ((*(ptr + name_len) == ',') || (*(ptr + name_len) == '\0'));
534
535 return match;
536}
537
8ab2e96d 538static const char *__evsel__hw_name(u64 config)
c410431c 539{
c64e85e1
ACM
540 if (config < PERF_COUNT_HW_MAX && evsel__hw_names[config])
541 return evsel__hw_names[config];
c410431c
ACM
542
543 return "unknown-hardware";
544}
545
56933029 546static int evsel__add_modifiers(struct evsel *evsel, char *bf, size_t size)
c410431c 547{
27f18617 548 int colon = 0, r = 0;
1fc632ce 549 struct perf_event_attr *attr = &evsel->core.attr;
c410431c
ACM
550 bool exclude_guest_default = false;
551
552#define MOD_PRINT(context, mod) do { \
553 if (!attr->exclude_##context) { \
27f18617 554 if (!colon) colon = ++r; \
c410431c
ACM
555 r += scnprintf(bf + r, size - r, "%c", mod); \
556 } } while(0)
557
558 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
559 MOD_PRINT(kernel, 'k');
560 MOD_PRINT(user, 'u');
561 MOD_PRINT(hv, 'h');
562 exclude_guest_default = true;
563 }
564
565 if (attr->precise_ip) {
566 if (!colon)
27f18617 567 colon = ++r;
c410431c
ACM
568 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
569 exclude_guest_default = true;
570 }
571
572 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
573 MOD_PRINT(host, 'H');
574 MOD_PRINT(guest, 'G');
575 }
576#undef MOD_PRINT
577 if (colon)
27f18617 578 bf[colon - 1] = ':';
c410431c
ACM
579 return r;
580}
581
ff4207f7
KL
582int __weak arch_evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
583{
584 return scnprintf(bf, size, "%s", __evsel__hw_name(evsel->core.attr.config));
585}
586
8ab2e96d 587static int evsel__hw_name(struct evsel *evsel, char *bf, size_t size)
27f18617 588{
ff4207f7 589 int r = arch_evsel__hw_name(evsel, bf, size);
56933029 590 return r + evsel__add_modifiers(evsel, bf + r, size - r);
27f18617
ACM
591}
592
545a96c9 593const char *const evsel__sw_names[PERF_COUNT_SW_MAX] = {
335c2f5d
ACM
594 "cpu-clock",
595 "task-clock",
596 "page-faults",
597 "context-switches",
8ad7013b 598 "cpu-migrations",
335c2f5d
ACM
599 "minor-faults",
600 "major-faults",
601 "alignment-faults",
602 "emulation-faults",
d22d1a2a 603 "dummy",
335c2f5d
ACM
604};
605
8ab2e96d 606static const char *__evsel__sw_name(u64 config)
335c2f5d 607{
c64e85e1
ACM
608 if (config < PERF_COUNT_SW_MAX && evsel__sw_names[config])
609 return evsel__sw_names[config];
335c2f5d
ACM
610 return "unknown-software";
611}
612
8ab2e96d 613static int evsel__sw_name(struct evsel *evsel, char *bf, size_t size)
335c2f5d 614{
8ab2e96d 615 int r = scnprintf(bf, size, "%s", __evsel__sw_name(evsel->core.attr.config));
56933029 616 return r + evsel__add_modifiers(evsel, bf + r, size - r);
335c2f5d
ACM
617}
618
75eafc97
FF
619static int evsel__tool_name(enum perf_tool_event ev, char *bf, size_t size)
620{
79932d16 621 return scnprintf(bf, size, "%s", perf_tool_event__to_str(ev));
75eafc97
FF
622}
623
8ab2e96d 624static int __evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
287e74aa
JO
625{
626 int r;
627
628 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
629
630 if (type & HW_BREAKPOINT_R)
631 r += scnprintf(bf + r, size - r, "r");
632
633 if (type & HW_BREAKPOINT_W)
634 r += scnprintf(bf + r, size - r, "w");
635
636 if (type & HW_BREAKPOINT_X)
637 r += scnprintf(bf + r, size - r, "x");
638
639 return r;
640}
641
8ab2e96d 642static int evsel__bp_name(struct evsel *evsel, char *bf, size_t size)
287e74aa 643{
1fc632ce 644 struct perf_event_attr *attr = &evsel->core.attr;
8ab2e96d 645 int r = __evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
56933029 646 return r + evsel__add_modifiers(evsel, bf + r, size - r);
287e74aa
JO
647}
648
545a96c9 649const char *const evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX][EVSEL__MAX_ALIASES] = {
0b668bc9
ACM
650 { "L1-dcache", "l1-d", "l1d", "L1-data", },
651 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
652 { "LLC", "L2", },
653 { "dTLB", "d-tlb", "Data-TLB", },
654 { "iTLB", "i-tlb", "Instruction-TLB", },
655 { "branch", "branches", "bpu", "btb", "bpc", },
656 { "node", },
657};
658
545a96c9 659const char *const evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX][EVSEL__MAX_ALIASES] = {
0b668bc9
ACM
660 { "load", "loads", "read", },
661 { "store", "stores", "write", },
662 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
663};
664
545a96c9 665const char *const evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX][EVSEL__MAX_ALIASES] = {
0b668bc9
ACM
666 { "refs", "Reference", "ops", "access", },
667 { "misses", "miss", },
668};
669
670#define C(x) PERF_COUNT_HW_CACHE_##x
671#define CACHE_READ (1 << C(OP_READ))
672#define CACHE_WRITE (1 << C(OP_WRITE))
673#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
674#define COP(x) (1 << x)
675
676/*
4d39c89f 677 * cache operation stat
0b668bc9
ACM
678 * L1I : Read and prefetch only
679 * ITLB and BPU : Read-only
680 */
545a96c9 681static const unsigned long evsel__hw_cache_stat[C(MAX)] = {
0b668bc9
ACM
682 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
683 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
684 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
685 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
686 [C(ITLB)] = (CACHE_READ),
687 [C(BPU)] = (CACHE_READ),
688 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
689};
690
c754c382 691bool evsel__is_cache_op_valid(u8 type, u8 op)
0b668bc9 692{
c64e85e1 693 if (evsel__hw_cache_stat[type] & COP(op))
0b668bc9
ACM
694 return true; /* valid */
695 else
696 return false; /* invalid */
697}
698
8ab2e96d 699int __evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result, char *bf, size_t size)
0b668bc9
ACM
700{
701 if (result) {
c64e85e1
ACM
702 return scnprintf(bf, size, "%s-%s-%s", evsel__hw_cache[type][0],
703 evsel__hw_cache_op[op][0],
704 evsel__hw_cache_result[result][0]);
0b668bc9
ACM
705 }
706
c64e85e1
ACM
707 return scnprintf(bf, size, "%s-%s", evsel__hw_cache[type][0],
708 evsel__hw_cache_op[op][1]);
0b668bc9
ACM
709}
710
8ab2e96d 711static int __evsel__hw_cache_name(u64 config, char *bf, size_t size)
0b668bc9
ACM
712{
713 u8 op, result, type = (config >> 0) & 0xff;
714 const char *err = "unknown-ext-hardware-cache-type";
715
c53412ee 716 if (type >= PERF_COUNT_HW_CACHE_MAX)
0b668bc9
ACM
717 goto out_err;
718
719 op = (config >> 8) & 0xff;
720 err = "unknown-ext-hardware-cache-op";
c53412ee 721 if (op >= PERF_COUNT_HW_CACHE_OP_MAX)
0b668bc9
ACM
722 goto out_err;
723
724 result = (config >> 16) & 0xff;
725 err = "unknown-ext-hardware-cache-result";
c53412ee 726 if (result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
0b668bc9
ACM
727 goto out_err;
728
729 err = "invalid-cache";
c754c382 730 if (!evsel__is_cache_op_valid(type, op))
0b668bc9
ACM
731 goto out_err;
732
8ab2e96d 733 return __evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
0b668bc9
ACM
734out_err:
735 return scnprintf(bf, size, "%s", err);
736}
737
8ab2e96d 738static int evsel__hw_cache_name(struct evsel *evsel, char *bf, size_t size)
0b668bc9 739{
8ab2e96d 740 int ret = __evsel__hw_cache_name(evsel->core.attr.config, bf, size);
56933029 741 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
0b668bc9
ACM
742}
743
8ab2e96d 744static int evsel__raw_name(struct evsel *evsel, char *bf, size_t size)
6eef3d9c 745{
1fc632ce 746 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->core.attr.config);
56933029 747 return ret + evsel__add_modifiers(evsel, bf + ret, size - ret);
6eef3d9c
ACM
748}
749
8ab2e96d 750const char *evsel__name(struct evsel *evsel)
a4460836 751{
7289f83c 752 char bf[128];
a4460836 753
fdbdd7e8
ACM
754 if (!evsel)
755 goto out_unknown;
756
7289f83c
ACM
757 if (evsel->name)
758 return evsel->name;
c410431c 759
1fc632ce 760 switch (evsel->core.attr.type) {
c410431c 761 case PERF_TYPE_RAW:
8ab2e96d 762 evsel__raw_name(evsel, bf, sizeof(bf));
c410431c
ACM
763 break;
764
765 case PERF_TYPE_HARDWARE:
8ab2e96d 766 evsel__hw_name(evsel, bf, sizeof(bf));
c410431c 767 break;
0b668bc9
ACM
768
769 case PERF_TYPE_HW_CACHE:
8ab2e96d 770 evsel__hw_cache_name(evsel, bf, sizeof(bf));
0b668bc9
ACM
771 break;
772
335c2f5d 773 case PERF_TYPE_SOFTWARE:
79932d16 774 if (evsel__is_tool(evsel))
75eafc97 775 evsel__tool_name(evsel->tool_event, bf, sizeof(bf));
3371f389 776 else
8ab2e96d 777 evsel__sw_name(evsel, bf, sizeof(bf));
335c2f5d
ACM
778 break;
779
a4460836 780 case PERF_TYPE_TRACEPOINT:
7289f83c 781 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
a4460836
ACM
782 break;
783
287e74aa 784 case PERF_TYPE_BREAKPOINT:
8ab2e96d 785 evsel__bp_name(evsel, bf, sizeof(bf));
287e74aa
JO
786 break;
787
c410431c 788 default:
ca1b1457 789 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
1fc632ce 790 evsel->core.attr.type);
a4460836 791 break;
c410431c
ACM
792 }
793
7289f83c
ACM
794 evsel->name = strdup(bf);
795
fdbdd7e8
ACM
796 if (evsel->name)
797 return evsel->name;
798out_unknown:
799 return "unknown";
c410431c
ACM
800}
801
ce1d3bc2
ACM
802bool evsel__name_is(struct evsel *evsel, const char *name)
803{
804 return !strcmp(evsel__name(evsel), name);
805}
806
2b62b3a6
IR
807const char *evsel__metric_id(const struct evsel *evsel)
808{
809 if (evsel->metric_id)
810 return evsel->metric_id;
811
79932d16
IR
812 if (evsel__is_tool(evsel))
813 return perf_tool_event__to_str(evsel->tool_event);
2b62b3a6
IR
814
815 return "unknown";
816}
817
8ab2e96d 818const char *evsel__group_name(struct evsel *evsel)
717e263f
NK
819{
820 return evsel->group_name ?: "anon group";
821}
822
8ef278bb
JO
823/*
824 * Returns the group details for the specified leader,
825 * with following rules.
826 *
827 * For record -e '{cycles,instructions}'
828 * 'anon group { cycles:u, instructions:u }'
829 *
830 * For record -e 'cycles,instructions' and report --group
831 * 'cycles:u, instructions:u'
832 */
347c751a 833int evsel__group_desc(struct evsel *evsel, char *buf, size_t size)
717e263f 834{
8ef278bb 835 int ret = 0;
eae7044b 836 bool first = true;
32dcd021 837 struct evsel *pos;
8ab2e96d 838 const char *group_name = evsel__group_name(evsel);
717e263f 839
8ef278bb
JO
840 if (!evsel->forced_leader)
841 ret = scnprintf(buf, size, "%s { ", group_name);
717e263f 842
eae7044b
NK
843 for_each_group_evsel(pos, evsel) {
844 if (symbol_conf.skip_empty &&
845 evsel__hists(pos)->stats.nr_samples == 0)
846 continue;
717e263f 847
eae7044b
NK
848 ret += scnprintf(buf + ret, size - ret, "%s%s",
849 first ? "" : ", ", evsel__name(pos));
850 first = false;
851 }
717e263f 852
8ef278bb
JO
853 if (!evsel->forced_leader)
854 ret += scnprintf(buf + ret, size - ret, " }");
717e263f
NK
855
856 return ret;
857}
858
6ec17b4e
ACM
859static void __evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
860 struct callchain_param *param)
6bedfab6 861{
c754c382 862 bool function = evsel__is_function_event(evsel);
1fc632ce 863 struct perf_event_attr *attr = &evsel->core.attr;
34af56af 864 const char *arch = perf_env__arch(evsel__env(evsel));
6bedfab6 865
862b2f8f 866 evsel__set_sample_bit(evsel, CALLCHAIN);
6bedfab6 867
792d48b4
ACM
868 attr->sample_max_stack = param->max_stack;
869
53651b28 870 if (opts->kernel_callchains)
871 attr->exclude_callchain_user = 1;
872 if (opts->user_callchains)
873 attr->exclude_callchain_kernel = 1;
c3a6a8c4 874 if (param->record_mode == CALLCHAIN_LBR) {
aad2b21c
KL
875 if (!opts->branch_stack) {
876 if (attr->exclude_user) {
877 pr_warning("LBR callstack option is only available "
878 "to get user callchain information. "
879 "Falling back to framepointers.\n");
880 } else {
862b2f8f 881 evsel__set_sample_bit(evsel, BRANCH_STACK);
aad2b21c 882 attr->branch_sample_type = PERF_SAMPLE_BRANCH_USER |
bd0f8895
AK
883 PERF_SAMPLE_BRANCH_CALL_STACK |
884 PERF_SAMPLE_BRANCH_NO_CYCLES |
d3f85437
KL
885 PERF_SAMPLE_BRANCH_NO_FLAGS |
886 PERF_SAMPLE_BRANCH_HW_INDEX;
aad2b21c
KL
887 }
888 } else
889 pr_warning("Cannot use LBR callstack with branch stack. "
890 "Falling back to framepointers.\n");
891 }
892
c3a6a8c4 893 if (param->record_mode == CALLCHAIN_DWARF) {
6bedfab6 894 if (!function) {
862b2f8f
ACM
895 evsel__set_sample_bit(evsel, REGS_USER);
896 evsel__set_sample_bit(evsel, STACK_USER);
34af56af 897 if (opts->sample_user_regs &&
856caabf 898 DWARF_MINIMAL_REGS(arch) != arch__user_reg_mask()) {
34af56af 899 attr->sample_regs_user |= DWARF_MINIMAL_REGS(arch);
d194d8fc
AB
900 pr_warning("WARNING: The use of --call-graph=dwarf may require all the user registers, "
901 "specifying a subset with --user-regs may render DWARF unwinding unreliable, "
902 "so the minimal registers set (IP, SP) is explicitly forced.\n");
903 } else {
72105204 904 attr->sample_regs_user |= arch__user_reg_mask();
d194d8fc 905 }
c3a6a8c4 906 attr->sample_stack_user = param->dump_size;
6bedfab6
JO
907 attr->exclude_callchain_user = 1;
908 } else {
909 pr_info("Cannot use DWARF unwind for function trace event,"
910 " falling back to framepointers.\n");
911 }
912 }
913
914 if (function) {
915 pr_info("Disabling user space callchains for function trace event.\n");
916 attr->exclude_callchain_user = 1;
917 }
918}
919
6ec17b4e
ACM
920void evsel__config_callchain(struct evsel *evsel, struct record_opts *opts,
921 struct callchain_param *param)
1688c2fd
ACM
922{
923 if (param->enabled)
6ec17b4e 924 return __evsel__config_callchain(evsel, opts, param);
1688c2fd
ACM
925}
926
56933029 927static void evsel__reset_callgraph(struct evsel *evsel, struct callchain_param *param)
d457c963 928{
1fc632ce 929 struct perf_event_attr *attr = &evsel->core.attr;
d457c963 930
862b2f8f 931 evsel__reset_sample_bit(evsel, CALLCHAIN);
d457c963 932 if (param->record_mode == CALLCHAIN_LBR) {
862b2f8f 933 evsel__reset_sample_bit(evsel, BRANCH_STACK);
d457c963 934 attr->branch_sample_type &= ~(PERF_SAMPLE_BRANCH_USER |
d3f85437
KL
935 PERF_SAMPLE_BRANCH_CALL_STACK |
936 PERF_SAMPLE_BRANCH_HW_INDEX);
d457c963
KL
937 }
938 if (param->record_mode == CALLCHAIN_DWARF) {
862b2f8f
ACM
939 evsel__reset_sample_bit(evsel, REGS_USER);
940 evsel__reset_sample_bit(evsel, STACK_USER);
d457c963
KL
941 }
942}
943
35ac0cad
ACM
944static void evsel__apply_config_terms(struct evsel *evsel,
945 struct record_opts *opts, bool track)
930a2e29 946{
35ac0cad 947 struct evsel_config_term *term;
32067712 948 struct list_head *config_terms = &evsel->config_terms;
1fc632ce 949 struct perf_event_attr *attr = &evsel->core.attr;
249d98e5
ACM
950 /* callgraph default */
951 struct callchain_param param = {
952 .record_mode = callchain_param.record_mode,
953 };
d457c963 954 u32 dump_size = 0;
792d48b4
ACM
955 int max_stack = 0;
956 const char *callgraph_buf = NULL;
d457c963 957
930a2e29
JO
958 list_for_each_entry(term, config_terms, list) {
959 switch (term->type) {
35ac0cad 960 case EVSEL__CONFIG_TERM_PERIOD:
c2f1cead
AK
961 if (!(term->weak && opts->user_interval != ULLONG_MAX)) {
962 attr->sample_period = term->val.period;
963 attr->freq = 0;
862b2f8f 964 evsel__reset_sample_bit(evsel, PERIOD);
c2f1cead 965 }
32067712 966 break;
35ac0cad 967 case EVSEL__CONFIG_TERM_FREQ:
c2f1cead
AK
968 if (!(term->weak && opts->user_freq != UINT_MAX)) {
969 attr->sample_freq = term->val.freq;
970 attr->freq = 1;
862b2f8f 971 evsel__set_sample_bit(evsel, PERIOD);
c2f1cead 972 }
09af2a55 973 break;
35ac0cad 974 case EVSEL__CONFIG_TERM_TIME:
32067712 975 if (term->val.time)
862b2f8f 976 evsel__set_sample_bit(evsel, TIME);
32067712 977 else
862b2f8f 978 evsel__reset_sample_bit(evsel, TIME);
32067712 979 break;
35ac0cad 980 case EVSEL__CONFIG_TERM_CALLGRAPH:
e884602b 981 callgraph_buf = term->val.str;
d457c963 982 break;
35ac0cad 983 case EVSEL__CONFIG_TERM_BRANCH:
e884602b 984 if (term->val.str && strcmp(term->val.str, "no")) {
862b2f8f 985 evsel__set_sample_bit(evsel, BRANCH_STACK);
e884602b 986 parse_branch_str(term->val.str,
ac12f676
AK
987 &attr->branch_sample_type);
988 } else
862b2f8f 989 evsel__reset_sample_bit(evsel, BRANCH_STACK);
ac12f676 990 break;
35ac0cad 991 case EVSEL__CONFIG_TERM_STACK_USER:
d457c963
KL
992 dump_size = term->val.stack_user;
993 break;
35ac0cad 994 case EVSEL__CONFIG_TERM_MAX_STACK:
792d48b4
ACM
995 max_stack = term->val.max_stack;
996 break;
35ac0cad 997 case EVSEL__CONFIG_TERM_MAX_EVENTS:
2fda5ada
ACM
998 evsel->max_events = term->val.max_events;
999 break;
35ac0cad 1000 case EVSEL__CONFIG_TERM_INHERIT:
374ce938
WN
1001 /*
1002 * attr->inherit should has already been set by
6ec17b4e 1003 * evsel__config. If user explicitly set
374ce938
WN
1004 * inherit using config terms, override global
1005 * opt->no_inherit setting.
1006 */
1007 attr->inherit = term->val.inherit ? 1 : 0;
1008 break;
35ac0cad 1009 case EVSEL__CONFIG_TERM_OVERWRITE:
626a6b78
WN
1010 attr->write_backward = term->val.overwrite ? 1 : 0;
1011 break;
35ac0cad 1012 case EVSEL__CONFIG_TERM_DRV_CFG:
2178790b 1013 break;
35ac0cad 1014 case EVSEL__CONFIG_TERM_PERCORE:
064b4e82 1015 break;
35ac0cad 1016 case EVSEL__CONFIG_TERM_AUX_OUTPUT:
1b992154
AH
1017 attr->aux_output = term->val.aux_output ? 1 : 0;
1018 break;
35ac0cad 1019 case EVSEL__CONFIG_TERM_AUX_SAMPLE_SIZE:
eb7a52d4
AH
1020 /* Already applied by auxtrace */
1021 break;
35ac0cad 1022 case EVSEL__CONFIG_TERM_CFG_CHG:
a1ac7de6 1023 break;
930a2e29
JO
1024 default:
1025 break;
1026 }
1027 }
d457c963
KL
1028
1029 /* User explicitly set per-event callgraph, clear the old setting and reset. */
792d48b4 1030 if ((callgraph_buf != NULL) || (dump_size > 0) || max_stack) {
0d3dcc0e
ACM
1031 bool sample_address = false;
1032
792d48b4
ACM
1033 if (max_stack) {
1034 param.max_stack = max_stack;
1035 if (callgraph_buf == NULL)
1036 callgraph_buf = "fp";
1037 }
d457c963
KL
1038
1039 /* parse callgraph parameters */
1040 if (callgraph_buf != NULL) {
f9db0d0f
KL
1041 if (!strcmp(callgraph_buf, "no")) {
1042 param.enabled = false;
1043 param.record_mode = CALLCHAIN_NONE;
1044 } else {
1045 param.enabled = true;
1046 if (parse_callchain_record(callgraph_buf, &param)) {
1047 pr_err("per-event callgraph setting for %s failed. "
1048 "Apply callgraph global setting for it\n",
1049 evsel->name);
1050 return;
1051 }
0d3dcc0e
ACM
1052 if (param.record_mode == CALLCHAIN_DWARF)
1053 sample_address = true;
d457c963
KL
1054 }
1055 }
1056 if (dump_size > 0) {
1057 dump_size = round_up(dump_size, sizeof(u64));
1058 param.dump_size = dump_size;
1059 }
1060
1061 /* If global callgraph set, clear it */
1062 if (callchain_param.enabled)
56933029 1063 evsel__reset_callgraph(evsel, &callchain_param);
d457c963
KL
1064
1065 /* set perf-event callgraph */
0d3dcc0e
ACM
1066 if (param.enabled) {
1067 if (sample_address) {
862b2f8f
ACM
1068 evsel__set_sample_bit(evsel, ADDR);
1069 evsel__set_sample_bit(evsel, DATA_SRC);
1fc632ce 1070 evsel->core.attr.mmap_data = track;
0d3dcc0e 1071 }
6ec17b4e 1072 evsel__config_callchain(evsel, opts, &param);
0d3dcc0e 1073 }
d457c963 1074 }
930a2e29
JO
1075}
1076
35ac0cad 1077struct evsel_config_term *__evsel__get_config_term(struct evsel *evsel, enum evsel_term_type type)
eb7a52d4 1078{
35ac0cad 1079 struct evsel_config_term *term, *found_term = NULL;
eb7a52d4
AH
1080
1081 list_for_each_entry(term, &evsel->config_terms, list) {
1082 if (term->type == type)
1083 found_term = term;
1084 }
1085
1086 return found_term;
1087}
1088
ea8d0ed6
KL
1089void __weak arch_evsel__set_sample_weight(struct evsel *evsel)
1090{
1091 evsel__set_sample_bit(evsel, WEIGHT);
1092}
1093
9ab95b0b
RB
1094void __weak arch__post_evsel_config(struct evsel *evsel __maybe_unused,
1095 struct perf_event_attr *attr __maybe_unused)
1096{
1097}
1098
3606c0e1
GG
1099static void evsel__set_default_freq_period(struct record_opts *opts,
1100 struct perf_event_attr *attr)
1101{
1102 if (opts->freq) {
1103 attr->freq = 1;
1104 attr->sample_freq = opts->freq;
1105 } else {
1106 attr->sample_period = opts->default_interval;
1107 }
1108}
1109
49c692b7
NK
1110static bool evsel__is_offcpu_event(struct evsel *evsel)
1111{
ce1d3bc2 1112 return evsel__is_bpf_output(evsel) && evsel__name_is(evsel, OFFCPU_EVENT);
49c692b7
NK
1113}
1114
774cb499
JO
1115/*
1116 * The enable_on_exec/disabled value strategy:
1117 *
1118 * 1) For any type of traced program:
1119 * - all independent events and group leaders are disabled
1120 * - all group members are enabled
1121 *
1122 * Group members are ruled by group leaders. They need to
1123 * be enabled, because the group scheduling relies on that.
1124 *
1125 * 2) For traced programs executed by perf:
1126 * - all independent events and group leaders have
1127 * enable_on_exec set
1128 * - we don't specifically enable or disable any event during
1129 * the record command
1130 *
1131 * Independent events and group leaders are initially disabled
1132 * and get enabled by exec. Group members are ruled by group
1133 * leaders as stated in 1).
1134 *
1135 * 3) For traced programs attached by perf (pid/tid):
1136 * - we specifically enable or disable all events during
1137 * the record command
1138 *
1139 * When attaching events to already running traced we
1140 * enable/disable events specifically, as there's no
1141 * initial traced exec call.
1142 */
6ec17b4e
ACM
1143void evsel__config(struct evsel *evsel, struct record_opts *opts,
1144 struct callchain_param *callchain)
0f82ebc4 1145{
fba7c866 1146 struct evsel *leader = evsel__leader(evsel);
1fc632ce 1147 struct perf_event_attr *attr = &evsel->core.attr;
60b0896c 1148 int track = evsel->tracking;
3aa5939d 1149 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
0f82ebc4 1150
594ac61a 1151 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
0f82ebc4 1152 attr->inherit = !opts->no_inherit;
626a6b78 1153 attr->write_backward = opts->overwrite ? 1 : 0;
e17f343c 1154 attr->read_format = PERF_FORMAT_LOST;
0f82ebc4 1155
862b2f8f
ACM
1156 evsel__set_sample_bit(evsel, IP);
1157 evsel__set_sample_bit(evsel, TID);
0f82ebc4 1158
3c176311 1159 if (evsel->sample_read) {
862b2f8f 1160 evsel__set_sample_bit(evsel, READ);
3c176311
JO
1161
1162 /*
1163 * We need ID even in case of single event, because
1164 * PERF_SAMPLE_READ process ID specific data.
1165 */
862b2f8f 1166 evsel__set_sample_id(evsel, false);
3c176311
JO
1167
1168 /*
1169 * Apply group format only if we belong to group
1170 * with more than one members.
1171 */
5643b1a5 1172 if (leader->core.nr_members > 1) {
3c176311
JO
1173 attr->read_format |= PERF_FORMAT_GROUP;
1174 attr->inherit = 0;
1175 }
1176 }
1177
0f82ebc4 1178 /*
17314e23 1179 * We default some events to have a default interval. But keep
0f82ebc4
ACM
1180 * it a weak assumption overridable by the user.
1181 */
3606c0e1
GG
1182 if ((evsel->is_libpfm_event && !attr->sample_period) ||
1183 (!evsel->is_libpfm_event && (!attr->sample_period ||
1184 opts->user_freq != UINT_MAX ||
1185 opts->user_interval != ULLONG_MAX)))
1186 evsel__set_default_freq_period(opts, attr);
1187
ce4326d2
DS
1188 /*
1189 * If attr->freq was set (here or earlier), ask for period
1190 * to be sampled.
1191 */
1192 if (attr->freq)
1193 evsel__set_sample_bit(evsel, PERIOD);
0f82ebc4
ACM
1194
1195 if (opts->no_samples)
1196 attr->sample_freq = 0;
1197
a17f0697 1198 if (opts->inherit_stat) {
1fc632ce 1199 evsel->core.attr.read_format |=
a17f0697
JO
1200 PERF_FORMAT_TOTAL_TIME_ENABLED |
1201 PERF_FORMAT_TOTAL_TIME_RUNNING |
1202 PERF_FORMAT_ID;
0f82ebc4 1203 attr->inherit_stat = 1;
a17f0697 1204 }
0f82ebc4
ACM
1205
1206 if (opts->sample_address) {
862b2f8f 1207 evsel__set_sample_bit(evsel, ADDR);
0f82ebc4
ACM
1208 attr->mmap_data = track;
1209 }
1210
f140373b
JO
1211 /*
1212 * We don't allow user space callchains for function trace
1213 * event, due to issues with page faults while tracing page
1214 * fault handler and its overall trickiness nature.
1215 */
c754c382 1216 if (evsel__is_function_event(evsel))
1fc632ce 1217 evsel->core.attr.exclude_callchain_user = 1;
f140373b 1218
e68ae9cf 1219 if (callchain && callchain->enabled && !evsel->no_aux_samples)
6ec17b4e 1220 evsel__config_callchain(evsel, opts, callchain);
26d33022 1221
c4735d99
JY
1222 if (opts->sample_intr_regs && !evsel->no_aux_samples &&
1223 !evsel__is_dummy_event(evsel)) {
bcc84ec6 1224 attr->sample_regs_intr = opts->sample_intr_regs;
862b2f8f 1225 evsel__set_sample_bit(evsel, REGS_INTR);
6a21c0b5
SE
1226 }
1227
c4735d99
JY
1228 if (opts->sample_user_regs && !evsel->no_aux_samples &&
1229 !evsel__is_dummy_event(evsel)) {
84c41742 1230 attr->sample_regs_user |= opts->sample_user_regs;
862b2f8f 1231 evsel__set_sample_bit(evsel, REGS_USER);
84c41742
AK
1232 }
1233
b6f35ed7 1234 if (target__has_cpu(&opts->target) || opts->sample_cpu)
862b2f8f 1235 evsel__set_sample_bit(evsel, CPU);
0f82ebc4 1236
8affc2b8 1237 /*
bd1a0be5 1238 * When the user explicitly disabled time don't force it here.
8affc2b8
AK
1239 */
1240 if (opts->sample_time &&
1241 (!perf_missing_features.sample_id_all &&
3abebc55
AH
1242 (!opts->no_inherit || target__has_cpu(&opts->target) || per_cpu ||
1243 opts->sample_time_set)))
862b2f8f 1244 evsel__set_sample_bit(evsel, TIME);
0f82ebc4 1245
6ff1ce76 1246 if (opts->raw_samples && !evsel->no_aux_samples) {
862b2f8f
ACM
1247 evsel__set_sample_bit(evsel, TIME);
1248 evsel__set_sample_bit(evsel, RAW);
1249 evsel__set_sample_bit(evsel, CPU);
0f82ebc4
ACM
1250 }
1251
ccf49bfc 1252 if (opts->sample_address)
862b2f8f 1253 evsel__set_sample_bit(evsel, DATA_SRC);
ccf49bfc 1254
3b0a5daa 1255 if (opts->sample_phys_addr)
862b2f8f 1256 evsel__set_sample_bit(evsel, PHYS_ADDR);
3b0a5daa 1257
509051ea 1258 if (opts->no_buffering) {
0f82ebc4
ACM
1259 attr->watermark = 0;
1260 attr->wakeup_events = 1;
1261 }
6ff1ce76 1262 if (opts->branch_stack && !evsel->no_aux_samples) {
862b2f8f 1263 evsel__set_sample_bit(evsel, BRANCH_STACK);
bdfebd84
RAV
1264 attr->branch_sample_type = opts->branch_stack;
1265 }
0f82ebc4 1266
05484298 1267 if (opts->sample_weight)
ea8d0ed6 1268 arch_evsel__set_sample_weight(evsel);
05484298 1269
e29386c8
JO
1270 attr->task = track;
1271 attr->mmap = track;
1272 attr->mmap2 = track && !perf_missing_features.mmap2;
1273 attr->comm = track;
1274 attr->build_id = track && opts->build_id;
1275
246eba8e
AH
1276 /*
1277 * ksymbol is tracked separately with text poke because it needs to be
1278 * system wide and enabled immediately.
1279 */
1280 if (!opts->text_poke)
1281 attr->ksymbol = track && !perf_missing_features.ksymbol;
74a1e863 1282 attr->bpf_event = track && !opts->no_bpf_event && !perf_missing_features.bpf;
0f82ebc4 1283
f3b3614a
HB
1284 if (opts->record_namespaces)
1285 attr->namespaces = track;
1286
8fb4b679
NK
1287 if (opts->record_cgroup) {
1288 attr->cgroup = track && !perf_missing_features.cgroup;
862b2f8f 1289 evsel__set_sample_bit(evsel, CGROUP);
8fb4b679
NK
1290 }
1291
542b88fd
KL
1292 if (opts->sample_data_page_size)
1293 evsel__set_sample_bit(evsel, DATA_PAGE_SIZE);
1294
c1de7f3d
KL
1295 if (opts->sample_code_page_size)
1296 evsel__set_sample_bit(evsel, CODE_PAGE_SIZE);
1297
b757bb09
AH
1298 if (opts->record_switch_events)
1299 attr->context_switch = track;
1300
475eeab9 1301 if (opts->sample_transaction)
862b2f8f 1302 evsel__set_sample_bit(evsel, TRANSACTION);
475eeab9 1303
85c273d2 1304 if (opts->running_time) {
1fc632ce 1305 evsel->core.attr.read_format |=
85c273d2
AK
1306 PERF_FORMAT_TOTAL_TIME_ENABLED |
1307 PERF_FORMAT_TOTAL_TIME_RUNNING;
1308 }
1309
774cb499
JO
1310 /*
1311 * XXX see the function comment above
1312 *
1313 * Disabling only independent events or group leaders,
1314 * keeping group members enabled.
1315 */
c754c382 1316 if (evsel__is_group_leader(evsel))
774cb499
JO
1317 attr->disabled = 1;
1318
1319 /*
1320 * Setting enable_on_exec for independent events and
1321 * group leaders for traced executed by perf.
1322 */
c754c382 1323 if (target__none(&opts->target) && evsel__is_group_leader(evsel) &&
cb4b9e68 1324 !opts->target.initial_delay)
0f82ebc4 1325 attr->enable_on_exec = 1;
2afd2bcf
AH
1326
1327 if (evsel->immediate) {
1328 attr->disabled = 0;
1329 attr->enable_on_exec = 0;
1330 }
814c8c38
PZ
1331
1332 clockid = opts->clockid;
1333 if (opts->use_clockid) {
1334 attr->use_clockid = 1;
1335 attr->clockid = opts->clockid;
1336 }
930a2e29 1337
7f94af7a 1338 if (evsel->precise_max)
4e8a5c15 1339 attr->precise_ip = 3;
7f94af7a 1340
85723885
JO
1341 if (opts->all_user) {
1342 attr->exclude_kernel = 1;
1343 attr->exclude_user = 0;
1344 }
1345
1346 if (opts->all_kernel) {
1347 attr->exclude_kernel = 0;
1348 attr->exclude_user = 1;
1349 }
1350
fe1f61b3 1351 if (evsel->core.own_cpus || evsel->unit)
1fc632ce 1352 evsel->core.attr.read_format |= PERF_FORMAT_ID;
4ab8455f 1353
930a2e29
JO
1354 /*
1355 * Apply event specific term settings,
1356 * it overloads any global configuration.
1357 */
35ac0cad 1358 evsel__apply_config_terms(evsel, opts, track);
a359c17a
JO
1359
1360 evsel->ignore_missing_thread = opts->ignore_missing_thread;
f290aa1f
JO
1361
1362 /* The --period option takes the precedence. */
1363 if (opts->period_set) {
1364 if (opts->period)
862b2f8f 1365 evsel__set_sample_bit(evsel, PERIOD);
f290aa1f 1366 else
862b2f8f 1367 evsel__reset_sample_bit(evsel, PERIOD);
f290aa1f 1368 }
95035c5e
KL
1369
1370 /*
5885a202
IR
1371 * A dummy event never triggers any actual counter and therefore
1372 * cannot be used with branch_stack.
1373 *
95035c5e
KL
1374 * For initial_delay, a dummy event is added implicitly.
1375 * The software event will trigger -EOPNOTSUPP error out,
1376 * if BRANCH_STACK bit is set.
1377 */
442ad225 1378 if (evsel__is_dummy_event(evsel))
862b2f8f 1379 evsel__reset_sample_bit(evsel, BRANCH_STACK);
9ab95b0b 1380
49c692b7
NK
1381 if (evsel__is_offcpu_event(evsel))
1382 evsel->core.attr.sample_type &= OFFCPU_SAMPLE_TYPES;
0698461a 1383
9ab95b0b 1384 arch__post_evsel_config(evsel, attr);
0f82ebc4
ACM
1385}
1386
ad681adf 1387int evsel__set_filter(struct evsel *evsel, const char *filter)
12467ae4
ACM
1388{
1389 char *new_filter = strdup(filter);
1390
1391 if (new_filter != NULL) {
1392 free(evsel->filter);
1393 evsel->filter = new_filter;
1394 return 0;
1395 }
1396
1397 return -1;
1398}
1399
ad681adf 1400static int evsel__append_filter(struct evsel *evsel, const char *fmt, const char *filter)
64ec84f5
ACM
1401{
1402 char *new_filter;
1403
1404 if (evsel->filter == NULL)
ad681adf 1405 return evsel__set_filter(evsel, filter);
64ec84f5 1406
b15d0a4c 1407 if (asprintf(&new_filter, fmt, evsel->filter, filter) > 0) {
64ec84f5
ACM
1408 free(evsel->filter);
1409 evsel->filter = new_filter;
1410 return 0;
1411 }
1412
1413 return -1;
1414}
1415
ad681adf 1416int evsel__append_tp_filter(struct evsel *evsel, const char *filter)
3541c034 1417{
ad681adf 1418 return evsel__append_filter(evsel, "(%s) && (%s)", filter);
3541c034
MP
1419}
1420
ad681adf 1421int evsel__append_addr_filter(struct evsel *evsel, const char *filter)
1e857484 1422{
ad681adf 1423 return evsel__append_filter(evsel, "%s,%s", filter);
1e857484
MP
1424}
1425
363fb121 1426/* Caller has to clear disabled after going through all CPUs. */
6f844b1f 1427int evsel__enable_cpu(struct evsel *evsel, int cpu_map_idx)
363fb121 1428{
6f844b1f 1429 return perf_evsel__enable_cpu(&evsel->core, cpu_map_idx);
363fb121
AK
1430}
1431
ec7f24ef 1432int evsel__enable(struct evsel *evsel)
e2407bef 1433{
a00571fd 1434 int err = perf_evsel__enable(&evsel->core);
b7e8452b
ACM
1435
1436 if (!err)
1437 evsel->disabled = false;
b7e8452b 1438 return err;
e2407bef
AK
1439}
1440
363fb121 1441/* Caller has to set disabled after going through all CPUs. */
6f844b1f 1442int evsel__disable_cpu(struct evsel *evsel, int cpu_map_idx)
363fb121 1443{
6f844b1f 1444 return perf_evsel__disable_cpu(&evsel->core, cpu_map_idx);
363fb121
AK
1445}
1446
9a10bb22 1447int evsel__disable(struct evsel *evsel)
e98a4cbb 1448{
a00571fd 1449 int err = perf_evsel__disable(&evsel->core);
b7e8452b
ACM
1450 /*
1451 * We mark it disabled here so that tools that disable a event can
1452 * ignore events after they disable it. I.e. the ring buffer may have
1453 * already a few more events queued up before the kernel got the stop
1454 * request.
1455 */
1456 if (!err)
1457 evsel->disabled = true;
1458
1459 return err;
e98a4cbb
JO
1460}
1461
a7d212fc 1462void free_config_terms(struct list_head *config_terms)
930a2e29 1463{
35ac0cad 1464 struct evsel_config_term *term, *h;
930a2e29 1465
a7d212fc 1466 list_for_each_entry_safe(term, h, config_terms, list) {
e56fbc9d 1467 list_del_init(&term->list);
3220fb8d
LY
1468 if (term->free_str)
1469 zfree(&term->val.str);
930a2e29
JO
1470 free(term);
1471 }
1472}
1473
a7d212fc
AH
1474static void evsel__free_config_terms(struct evsel *evsel)
1475{
1476 free_config_terms(&evsel->config_terms);
1477}
1478
30f7c591 1479void evsel__exit(struct evsel *evsel)
69aad6f1 1480{
b27c4ece 1481 assert(list_empty(&evsel->core.node));
d49e4695 1482 assert(evsel->evlist == NULL);
fa853c4b 1483 bpf_counter__destroy(evsel);
d180aa56 1484 perf_bpf_filter__destroy(evsel);
7d1e239e 1485 evsel__free_counts(evsel);
88761fa1 1486 perf_evsel__free_fd(&evsel->core);
70c20369 1487 perf_evsel__free_id(&evsel->core);
35ac0cad 1488 evsel__free_config_terms(evsel);
a53b6460 1489 cgroup__put(evsel->cgrp);
d400bd3a 1490 perf_cpu_map__put(evsel->core.cpus);
fe1f61b3 1491 perf_cpu_map__put(evsel->core.own_cpus);
af663bd0 1492 perf_thread_map__put(evsel->core.threads);
597e48c1 1493 zfree(&evsel->group_name);
597e48c1 1494 zfree(&evsel->name);
3f6a74bd 1495 zfree(&evsel->filter);
d4953f7e 1496 zfree(&evsel->pmu_name);
a90cc5a9 1497 zfree(&evsel->group_pmu_name);
b194c9cd 1498 zfree(&evsel->unit);
2b62b3a6 1499 zfree(&evsel->metric_id);
034f7ee1
JY
1500 evsel__zero_per_pkg(evsel);
1501 hashmap__free(evsel->per_pkg_mask);
1502 evsel->per_pkg_mask = NULL;
3efc899d 1503 zfree(&evsel->metric_events);
ce8ccff5 1504 perf_evsel__object.fini(evsel);
6828d692
IR
1505 if (evsel->tool_event == PERF_TOOL_SYSTEM_TIME ||
1506 evsel->tool_event == PERF_TOOL_USER_TIME)
1507 xyarray__delete(evsel->start_times);
ef1d1af2
ACM
1508}
1509
5eb2dd2a 1510void evsel__delete(struct evsel *evsel)
ef1d1af2 1511{
cf57cf51
IR
1512 if (!evsel)
1513 return;
1514
30f7c591 1515 evsel__exit(evsel);
69aad6f1
ACM
1516 free(evsel);
1517}
c52b12ed 1518
6f844b1f 1519void evsel__compute_deltas(struct evsel *evsel, int cpu_map_idx, int thread,
12f5261d 1520 struct perf_counts_values *count)
c7a79c47
SE
1521{
1522 struct perf_counts_values tmp;
1523
1524 if (!evsel->prev_raw_counts)
1525 return;
1526
f976bc6b
NK
1527 tmp = *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread);
1528 *perf_counts(evsel->prev_raw_counts, cpu_map_idx, thread) = *count;
c7a79c47
SE
1529
1530 count->val = count->val - tmp.val;
1531 count->ena = count->ena - tmp.ena;
1532 count->run = count->run - tmp.run;
1533}
1534
da8c94c0 1535static int evsel__read_one(struct evsel *evsel, int cpu_map_idx, int thread)
13112bbf 1536{
da8c94c0 1537 struct perf_counts_values *count = perf_counts(evsel->counts, cpu_map_idx, thread);
13112bbf 1538
da8c94c0 1539 return perf_evsel__read(&evsel->core, cpu_map_idx, thread, count);
f7794d52
JO
1540}
1541
6f844b1f 1542static void evsel__set_count(struct evsel *counter, int cpu_map_idx, int thread,
f52679b7 1543 u64 val, u64 ena, u64 run, u64 lost)
f7794d52
JO
1544{
1545 struct perf_counts_values *count;
1546
6f844b1f 1547 count = perf_counts(counter->counts, cpu_map_idx, thread);
f7794d52
JO
1548
1549 count->val = val;
1550 count->ena = ena;
1551 count->run = run;
f52679b7 1552 count->lost = lost;
df1d6856 1553
6f844b1f 1554 perf_counts__set_loaded(counter->counts, cpu_map_idx, thread, true);
f7794d52
JO
1555}
1556
6f844b1f 1557static int evsel__process_group_data(struct evsel *leader, int cpu_map_idx, int thread, u64 *data)
f7794d52 1558{
1fc632ce 1559 u64 read_format = leader->core.attr.read_format;
f7794d52 1560 struct sample_read_value *v;
f52679b7 1561 u64 nr, ena = 0, run = 0, lost = 0;
f7794d52
JO
1562
1563 nr = *data++;
1564
5643b1a5 1565 if (nr != (u64) leader->core.nr_members)
f7794d52
JO
1566 return -EINVAL;
1567
1568 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1569 ena = *data++;
1570
1571 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1572 run = *data++;
1573
f52679b7
NK
1574 v = (void *)data;
1575 sample_read_group__for_each(v, nr, read_format) {
32dcd021 1576 struct evsel *counter;
f7794d52 1577
f52679b7 1578 counter = evlist__id2evsel(leader->evlist, v->id);
f7794d52
JO
1579 if (!counter)
1580 return -EINVAL;
1581
f52679b7
NK
1582 if (read_format & PERF_FORMAT_LOST)
1583 lost = v->lost;
1584
1585 evsel__set_count(counter, cpu_map_idx, thread, v->value, ena, run, lost);
f7794d52
JO
1586 }
1587
1588 return 0;
1589}
1590
da8c94c0 1591static int evsel__read_group(struct evsel *leader, int cpu_map_idx, int thread)
f7794d52 1592{
82806c3a 1593 struct perf_stat_evsel *ps = leader->stats;
1fc632ce 1594 u64 read_format = leader->core.attr.read_format;
5c30af92 1595 int size = perf_evsel__read_size(&leader->core);
f7794d52
JO
1596 u64 *data = ps->group_data;
1597
1598 if (!(read_format & PERF_FORMAT_ID))
1599 return -EINVAL;
1600
c754c382 1601 if (!evsel__is_group_leader(leader))
f7794d52
JO
1602 return -EINVAL;
1603
1604 if (!data) {
1605 data = zalloc(size);
1606 if (!data)
1607 return -ENOMEM;
1608
1609 ps->group_data = data;
1610 }
1611
da8c94c0 1612 if (FD(leader, cpu_map_idx, thread) < 0)
f7794d52
JO
1613 return -EINVAL;
1614
da8c94c0 1615 if (readn(FD(leader, cpu_map_idx, thread), data, size) <= 0)
f7794d52
JO
1616 return -errno;
1617
da8c94c0 1618 return evsel__process_group_data(leader, cpu_map_idx, thread, data);
f7794d52
JO
1619}
1620
6828d692
IR
1621static bool read_until_char(struct io *io, char e)
1622{
e9ffa312 1623 int c;
6828d692
IR
1624
1625 do {
1626 c = io__get_char(io);
1627 if (c == -1)
1628 return false;
1629 } while (c != e);
1630 return true;
1631}
1632
1633static int read_stat_field(int fd, struct perf_cpu cpu, int field, __u64 *val)
1634{
1635 char buf[256];
1636 struct io io;
1637 int i;
1638
1639 io__init(&io, fd, buf, sizeof(buf));
1640
1641 /* Skip lines to relevant CPU. */
1642 for (i = -1; i < cpu.cpu; i++) {
1643 if (!read_until_char(&io, '\n'))
1644 return -EINVAL;
1645 }
1646 /* Skip to "cpu". */
1647 if (io__get_char(&io) != 'c') return -EINVAL;
1648 if (io__get_char(&io) != 'p') return -EINVAL;
1649 if (io__get_char(&io) != 'u') return -EINVAL;
1650
1651 /* Skip N of cpuN. */
1652 if (!read_until_char(&io, ' '))
1653 return -EINVAL;
1654
1655 i = 1;
1656 while (true) {
1657 if (io__get_dec(&io, val) != ' ')
1658 break;
1659 if (field == i)
1660 return 0;
1661 i++;
1662 }
1663 return -EINVAL;
1664}
1665
1666static int read_pid_stat_field(int fd, int field, __u64 *val)
1667{
1668 char buf[256];
1669 struct io io;
1670 int c, i;
1671
1672 io__init(&io, fd, buf, sizeof(buf));
1673 if (io__get_dec(&io, val) != ' ')
1674 return -EINVAL;
1675 if (field == 1)
1676 return 0;
1677
1678 /* Skip comm. */
1679 if (io__get_char(&io) != '(' || !read_until_char(&io, ')'))
1680 return -EINVAL;
1681 if (field == 2)
1682 return -EINVAL; /* String can't be returned. */
1683
1684 /* Skip state */
1685 if (io__get_char(&io) != ' ' || io__get_char(&io) == -1)
1686 return -EINVAL;
1687 if (field == 3)
1688 return -EINVAL; /* String can't be returned. */
1689
1690 /* Loop over numeric fields*/
1691 if (io__get_char(&io) != ' ')
1692 return -EINVAL;
1693
1694 i = 4;
1695 while (true) {
1696 c = io__get_dec(&io, val);
1697 if (c == -1)
1698 return -EINVAL;
1699 if (c == -2) {
1700 /* Assume a -ve was read */
1701 c = io__get_dec(&io, val);
1702 *val *= -1;
1703 }
1704 if (c != ' ')
1705 return -EINVAL;
1706 if (field == i)
1707 return 0;
1708 i++;
1709 }
1710 return -EINVAL;
1711}
1712
1713static int evsel__read_tool(struct evsel *evsel, int cpu_map_idx, int thread)
1714{
1715 __u64 *start_time, cur_time, delta_start;
1716 int fd, err = 0;
1717 struct perf_counts_values *count;
1718 bool adjust = false;
1719
1720 count = perf_counts(evsel->counts, cpu_map_idx, thread);
1721
1722 switch (evsel->tool_event) {
1723 case PERF_TOOL_DURATION_TIME:
1724 /*
1725 * Pretend duration_time is only on the first CPU and thread, or
1726 * else aggregation will scale duration_time by the number of
1727 * CPUs/threads.
1728 */
1729 start_time = &evsel->start_time;
1730 if (cpu_map_idx == 0 && thread == 0)
1731 cur_time = rdclock();
1732 else
1733 cur_time = *start_time;
1734 break;
1735 case PERF_TOOL_USER_TIME:
1736 case PERF_TOOL_SYSTEM_TIME: {
1737 bool system = evsel->tool_event == PERF_TOOL_SYSTEM_TIME;
1738
1739 start_time = xyarray__entry(evsel->start_times, cpu_map_idx, thread);
1740 fd = FD(evsel, cpu_map_idx, thread);
1741 lseek(fd, SEEK_SET, 0);
1742 if (evsel->pid_stat) {
1743 /* The event exists solely on 1 CPU. */
1744 if (cpu_map_idx == 0)
1745 err = read_pid_stat_field(fd, system ? 15 : 14, &cur_time);
1746 else
1747 cur_time = 0;
1748 } else {
1749 /* The event is for all threads. */
1750 if (thread == 0) {
1751 struct perf_cpu cpu = perf_cpu_map__cpu(evsel->core.cpus,
1752 cpu_map_idx);
1753
1754 err = read_stat_field(fd, cpu, system ? 3 : 1, &cur_time);
1755 } else {
1756 cur_time = 0;
1757 }
1758 }
1759 adjust = true;
1760 break;
1761 }
1762 case PERF_TOOL_NONE:
1763 case PERF_TOOL_MAX:
1764 default:
1765 err = -EINVAL;
1766 }
1767 if (err)
1768 return err;
1769
1770 delta_start = cur_time - *start_time;
1771 if (adjust) {
1772 __u64 ticks_per_sec = sysconf(_SC_CLK_TCK);
1773
1774 delta_start *= 1000000000 / ticks_per_sec;
1775 }
1776 count->val = delta_start;
1777 count->ena = count->run = delta_start;
1778 count->lost = 0;
1779 return 0;
1780}
1781
da8c94c0 1782int evsel__read_counter(struct evsel *evsel, int cpu_map_idx, int thread)
f7794d52 1783{
6828d692
IR
1784 if (evsel__is_tool(evsel))
1785 return evsel__read_tool(evsel, cpu_map_idx, thread);
f7794d52 1786
6828d692 1787 if (evsel->core.attr.read_format & PERF_FORMAT_GROUP)
da8c94c0 1788 return evsel__read_group(evsel, cpu_map_idx, thread);
ea089692 1789
da8c94c0 1790 return evsel__read_one(evsel, cpu_map_idx, thread);
f7794d52
JO
1791}
1792
da8c94c0 1793int __evsel__read_on_cpu(struct evsel *evsel, int cpu_map_idx, int thread, bool scale)
c52b12ed
ACM
1794{
1795 struct perf_counts_values count;
1796 size_t nv = scale ? 3 : 1;
1797
da8c94c0 1798 if (FD(evsel, cpu_map_idx, thread) < 0)
c52b12ed
ACM
1799 return -EINVAL;
1800
2ca0a371 1801 if (evsel->counts == NULL && evsel__alloc_counts(evsel) < 0)
4eed11d5
ACM
1802 return -ENOMEM;
1803
da8c94c0 1804 if (readn(FD(evsel, cpu_map_idx, thread), &count, nv * sizeof(u64)) <= 0)
c52b12ed
ACM
1805 return -errno;
1806
da8c94c0 1807 evsel__compute_deltas(evsel, cpu_map_idx, thread, &count);
13112bbf 1808 perf_counts_values__scale(&count, scale, NULL);
da8c94c0 1809 *perf_counts(evsel->counts, cpu_map_idx, thread) = count;
c52b12ed
ACM
1810 return 0;
1811}
1812
1fcc57b7 1813static int evsel__match_other_cpu(struct evsel *evsel, struct evsel *other,
2daa08c4 1814 int cpu_map_idx)
1fcc57b7 1815{
6d18804b 1816 struct perf_cpu cpu;
1fcc57b7 1817
2daa08c4
IR
1818 cpu = perf_cpu_map__cpu(evsel->core.cpus, cpu_map_idx);
1819 return perf_cpu_map__idx(other->core.cpus, cpu);
1fcc57b7
JY
1820}
1821
2daa08c4 1822static int evsel__hybrid_group_cpu_map_idx(struct evsel *evsel, int cpu_map_idx)
1fcc57b7 1823{
fba7c866 1824 struct evsel *leader = evsel__leader(evsel);
1fcc57b7
JY
1825
1826 if ((evsel__is_hybrid(evsel) && !evsel__is_hybrid(leader)) ||
1827 (!evsel__is_hybrid(evsel) && evsel__is_hybrid(leader))) {
2daa08c4 1828 return evsel__match_other_cpu(evsel, leader, cpu_map_idx);
1fcc57b7
JY
1829 }
1830
2daa08c4 1831 return cpu_map_idx;
1fcc57b7
JY
1832}
1833
2daa08c4 1834static int get_group_fd(struct evsel *evsel, int cpu_map_idx, int thread)
6a4bb04c 1835{
fba7c866 1836 struct evsel *leader = evsel__leader(evsel);
6a4bb04c
JO
1837 int fd;
1838
c754c382 1839 if (evsel__is_group_leader(evsel))
6a4bb04c
JO
1840 return -1;
1841
1842 /*
1843 * Leader must be already processed/open,
1844 * if not it's a bug.
1845 */
9dfcb759 1846 BUG_ON(!leader->core.fd);
6a4bb04c 1847
2daa08c4
IR
1848 cpu_map_idx = evsel__hybrid_group_cpu_map_idx(evsel, cpu_map_idx);
1849 if (cpu_map_idx == -1)
1fcc57b7
JY
1850 return -1;
1851
2daa08c4 1852 fd = FD(leader, cpu_map_idx, thread);
1b114824 1853 BUG_ON(fd == -1 && !leader->skippable);
6a4bb04c 1854
1b114824
IR
1855 /*
1856 * When the leader has been skipped, return -2 to distinguish from no
1857 * group leader case.
1858 */
1859 return fd == -1 ? -2 : fd;
6a4bb04c
JO
1860}
1861
56933029 1862static void evsel__remove_fd(struct evsel *pos, int nr_cpus, int nr_threads, int thread_idx)
ca800068
MZ
1863{
1864 for (int cpu = 0; cpu < nr_cpus; cpu++)
1865 for (int thread = thread_idx; thread < nr_threads - 1; thread++)
1866 FD(pos, cpu, thread) = FD(pos, cpu, thread + 1);
1867}
1868
32dcd021 1869static int update_fds(struct evsel *evsel,
6f844b1f 1870 int nr_cpus, int cpu_map_idx,
ca800068
MZ
1871 int nr_threads, int thread_idx)
1872{
32dcd021 1873 struct evsel *pos;
ca800068 1874
6f844b1f 1875 if (cpu_map_idx >= nr_cpus || thread_idx >= nr_threads)
ca800068
MZ
1876 return -EINVAL;
1877
1878 evlist__for_each_entry(evsel->evlist, pos) {
6f844b1f 1879 nr_cpus = pos != evsel ? nr_cpus : cpu_map_idx;
ca800068 1880
56933029 1881 evsel__remove_fd(pos, nr_cpus, nr_threads, thread_idx);
ca800068
MZ
1882
1883 /*
1884 * Since fds for next evsel has not been created,
1885 * there is no need to iterate whole event list.
1886 */
1887 if (pos == evsel)
1888 break;
1889 }
1890 return 0;
1891}
1892
1fa497d4 1893static bool evsel__ignore_missing_thread(struct evsel *evsel,
6f844b1f 1894 int nr_cpus, int cpu_map_idx,
1fa497d4
IR
1895 struct perf_thread_map *threads,
1896 int thread, int err)
a359c17a 1897{
a2f354e3 1898 pid_t ignore_pid = perf_thread_map__pid(threads, thread);
ca800068 1899
a359c17a
JO
1900 if (!evsel->ignore_missing_thread)
1901 return false;
1902
1903 /* The system wide setup does not work with threads. */
648b5af3 1904 if (evsel->core.system_wide)
a359c17a
JO
1905 return false;
1906
1907 /* The -ESRCH is perf event syscall errno for pid's not found. */
1908 if (err != -ESRCH)
1909 return false;
1910
1911 /* If there's only one thread, let it fail. */
1912 if (threads->nr == 1)
1913 return false;
1914
ca800068
MZ
1915 /*
1916 * We should remove fd for missing_thread first
1917 * because thread_map__remove() will decrease threads->nr.
1918 */
6f844b1f 1919 if (update_fds(evsel, nr_cpus, cpu_map_idx, threads->nr, thread))
ca800068
MZ
1920 return false;
1921
a359c17a
JO
1922 if (thread_map__remove(threads, thread))
1923 return false;
1924
1925 pr_warning("WARNING: Ignored open failure for pid %d\n",
ca800068 1926 ignore_pid);
a359c17a
JO
1927 return true;
1928}
1929
ca125277
ACM
1930static int __open_attr__fprintf(FILE *fp, const char *name, const char *val,
1931 void *priv __maybe_unused)
1932{
1933 return fprintf(fp, " %-32s %s\n", name, val);
1934}
1935
4e8a5c15
JO
1936static void display_attr(struct perf_event_attr *attr)
1937{
ccd26741 1938 if (verbose >= 2 || debug_peo_args) {
4e8a5c15
JO
1939 fprintf(stderr, "%.60s\n", graph_dotted_line);
1940 fprintf(stderr, "perf_event_attr:\n");
1941 perf_event_attr__fprintf(stderr, attr, __open_attr__fprintf, NULL);
1942 fprintf(stderr, "%.60s\n", graph_dotted_line);
1943 }
1944}
1945
28667a52 1946bool evsel__precise_ip_fallback(struct evsel *evsel)
4e8a5c15 1947{
28667a52
RM
1948 /* Do not try less precise if not requested. */
1949 if (!evsel->precise_max)
1950 return false;
4e8a5c15 1951
28667a52
RM
1952 /*
1953 * We tried all the precise_ip values, and it's
1954 * still failing, so leave it to standard fallback.
1955 */
1956 if (!evsel->core.attr.precise_ip) {
1957 evsel->core.attr.precise_ip = evsel->precise_ip_original;
1958 return false;
4e8a5c15
JO
1959 }
1960
28667a52
RM
1961 if (!evsel->precise_ip_original)
1962 evsel->precise_ip_original = evsel->core.attr.precise_ip;
4e8a5c15 1963
28667a52
RM
1964 evsel->core.attr.precise_ip--;
1965 pr_debug2_peo("decreasing precise_ip by one (%d)\n", evsel->core.attr.precise_ip);
1966 display_attr(&evsel->core.attr);
1967 return true;
1968}
d45ce034
RM
1969
1970static struct perf_cpu_map *empty_cpu_map;
1971static struct perf_thread_map *empty_thread_map;
1972
1973static int __evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
1974 struct perf_thread_map *threads)
48290609 1975{
1337b9dc 1976 int nthreads = perf_thread_map__nr(threads);
48290609 1977
acb9f2d4
ACM
1978 if ((perf_missing_features.write_backward && evsel->core.attr.write_backward) ||
1979 (perf_missing_features.aux_output && evsel->core.attr.aux_output))
32a951b4
ACM
1980 return -EINVAL;
1981
c24ae6d9 1982 if (cpus == NULL) {
c24ae6d9 1983 if (empty_cpu_map == NULL) {
48219b08 1984 empty_cpu_map = perf_cpu_map__new_any_cpu();
c24ae6d9
ACM
1985 if (empty_cpu_map == NULL)
1986 return -ENOMEM;
1987 }
1988
1989 cpus = empty_cpu_map;
1990 }
1991
1992 if (threads == NULL) {
c24ae6d9
ACM
1993 if (empty_thread_map == NULL) {
1994 empty_thread_map = thread_map__new_by_tid(-1);
1995 if (empty_thread_map == NULL)
1996 return -ENOMEM;
1997 }
1998
1999 threads = empty_thread_map;
2000 }
2001
9dfcb759 2002 if (evsel->core.fd == NULL &&
44028699 2003 perf_evsel__alloc_fd(&evsel->core, perf_cpu_map__nr(cpus), nthreads) < 0)
727ab04e 2004 return -ENOMEM;
4eed11d5 2005
6828d692
IR
2006 if ((evsel->tool_event == PERF_TOOL_SYSTEM_TIME ||
2007 evsel->tool_event == PERF_TOOL_USER_TIME) &&
2008 !evsel->start_times) {
2009 evsel->start_times = xyarray__new(perf_cpu_map__nr(cpus), nthreads, sizeof(__u64));
2010 if (!evsel->start_times)
2011 return -ENOMEM;
2012 }
2013
46def08f
RM
2014 evsel->open_flags = PERF_FLAG_FD_CLOEXEC;
2015 if (evsel->cgrp)
2016 evsel->open_flags |= PERF_FLAG_PID_CGROUP;
2017
d45ce034
RM
2018 return 0;
2019}
2020
588f4ac7 2021static void evsel__disable_missing_features(struct evsel *evsel)
d45ce034 2022{
9fbb4b02
KL
2023 if (perf_missing_features.branch_counters)
2024 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_COUNTERS;
e17f343c
NK
2025 if (perf_missing_features.read_lost)
2026 evsel->core.attr.read_format &= ~PERF_FORMAT_LOST;
ea8d0ed6
KL
2027 if (perf_missing_features.weight_struct) {
2028 evsel__set_sample_bit(evsel, WEIGHT);
2029 evsel__reset_sample_bit(evsel, WEIGHT_STRUCT);
2030 }
814c8c38 2031 if (perf_missing_features.clockid_wrong)
1fc632ce 2032 evsel->core.attr.clockid = CLOCK_MONOTONIC; /* should always work */
814c8c38 2033 if (perf_missing_features.clockid) {
1fc632ce
JO
2034 evsel->core.attr.use_clockid = 0;
2035 evsel->core.attr.clockid = 0;
814c8c38 2036 }
57480d2c 2037 if (perf_missing_features.cloexec)
46def08f 2038 evsel->open_flags &= ~(unsigned long)PERF_FLAG_FD_CLOEXEC;
5c5e854b 2039 if (perf_missing_features.mmap2)
1fc632ce 2040 evsel->core.attr.mmap2 = 0;
3500eeeb 2041 if (evsel->pmu && evsel->pmu->missing_features.exclude_guest)
1fc632ce 2042 evsel->core.attr.exclude_guest = evsel->core.attr.exclude_host = 0;
bd0f8895 2043 if (perf_missing_features.lbr_flags)
1fc632ce 2044 evsel->core.attr.branch_sample_type &= ~(PERF_SAMPLE_BRANCH_NO_FLAGS |
bd0f8895 2045 PERF_SAMPLE_BRANCH_NO_CYCLES);
1fc632ce
JO
2046 if (perf_missing_features.group_read && evsel->core.attr.inherit)
2047 evsel->core.attr.read_format &= ~(PERF_FORMAT_GROUP|PERF_FORMAT_ID);
9aa0bfa3 2048 if (perf_missing_features.ksymbol)
1fc632ce 2049 evsel->core.attr.ksymbol = 0;
74a1e863 2050 if (perf_missing_features.bpf)
1fc632ce 2051 evsel->core.attr.bpf_event = 0;
d3f85437
KL
2052 if (perf_missing_features.branch_hw_idx)
2053 evsel->core.attr.branch_sample_type &= ~PERF_SAMPLE_BRANCH_HW_INDEX;
594ac61a 2054 if (perf_missing_features.sample_id_all)
1fc632ce 2055 evsel->core.attr.sample_id_all = 0;
588f4ac7
RM
2056}
2057
6efd06e3
RM
2058int evsel__prepare_open(struct evsel *evsel, struct perf_cpu_map *cpus,
2059 struct perf_thread_map *threads)
2060{
2061 int err;
2062
2063 err = __evsel__prepare_open(evsel, cpus, threads);
2064 if (err)
2065 return err;
2066
2067 evsel__disable_missing_features(evsel);
2068
2069 return err;
2070}
2071
d21fc5f0
RM
2072bool evsel__detect_missing_features(struct evsel *evsel)
2073{
2074 /*
2075 * Must probe features in the order they were added to the
2076 * perf_event_attr interface.
2077 */
9fbb4b02
KL
2078 if (!perf_missing_features.branch_counters &&
2079 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS)) {
2080 perf_missing_features.branch_counters = true;
2081 pr_debug2("switching off branch counters support\n");
2082 return true;
2083 } else if (!perf_missing_features.read_lost &&
e17f343c
NK
2084 (evsel->core.attr.read_format & PERF_FORMAT_LOST)) {
2085 perf_missing_features.read_lost = true;
2086 pr_debug2("switching off PERF_FORMAT_LOST support\n");
2087 return true;
2088 } else if (!perf_missing_features.weight_struct &&
d21fc5f0
RM
2089 (evsel->core.attr.sample_type & PERF_SAMPLE_WEIGHT_STRUCT)) {
2090 perf_missing_features.weight_struct = true;
2091 pr_debug2("switching off weight struct support\n");
2092 return true;
2093 } else if (!perf_missing_features.code_page_size &&
2094 (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE)) {
2095 perf_missing_features.code_page_size = true;
2096 pr_debug2_peo("Kernel has no PERF_SAMPLE_CODE_PAGE_SIZE support, bailing out\n");
2097 return false;
2098 } else if (!perf_missing_features.data_page_size &&
2099 (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE)) {
2100 perf_missing_features.data_page_size = true;
2101 pr_debug2_peo("Kernel has no PERF_SAMPLE_DATA_PAGE_SIZE support, bailing out\n");
2102 return false;
2103 } else if (!perf_missing_features.cgroup && evsel->core.attr.cgroup) {
2104 perf_missing_features.cgroup = true;
2105 pr_debug2_peo("Kernel has no cgroup sampling support, bailing out\n");
2106 return false;
2107 } else if (!perf_missing_features.branch_hw_idx &&
2108 (evsel->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_HW_INDEX)) {
2109 perf_missing_features.branch_hw_idx = true;
2110 pr_debug2("switching off branch HW index support\n");
2111 return true;
2112 } else if (!perf_missing_features.aux_output && evsel->core.attr.aux_output) {
2113 perf_missing_features.aux_output = true;
2114 pr_debug2_peo("Kernel has no attr.aux_output support, bailing out\n");
2115 return false;
2116 } else if (!perf_missing_features.bpf && evsel->core.attr.bpf_event) {
2117 perf_missing_features.bpf = true;
2118 pr_debug2_peo("switching off bpf_event\n");
2119 return true;
2120 } else if (!perf_missing_features.ksymbol && evsel->core.attr.ksymbol) {
2121 perf_missing_features.ksymbol = true;
2122 pr_debug2_peo("switching off ksymbol\n");
2123 return true;
2124 } else if (!perf_missing_features.write_backward && evsel->core.attr.write_backward) {
2125 perf_missing_features.write_backward = true;
2126 pr_debug2_peo("switching off write_backward\n");
2127 return false;
2128 } else if (!perf_missing_features.clockid_wrong && evsel->core.attr.use_clockid) {
2129 perf_missing_features.clockid_wrong = true;
2130 pr_debug2_peo("switching off clockid\n");
2131 return true;
2132 } else if (!perf_missing_features.clockid && evsel->core.attr.use_clockid) {
2133 perf_missing_features.clockid = true;
2134 pr_debug2_peo("switching off use_clockid\n");
2135 return true;
2136 } else if (!perf_missing_features.cloexec && (evsel->open_flags & PERF_FLAG_FD_CLOEXEC)) {
2137 perf_missing_features.cloexec = true;
2138 pr_debug2_peo("switching off cloexec flag\n");
2139 return true;
2140 } else if (!perf_missing_features.mmap2 && evsel->core.attr.mmap2) {
2141 perf_missing_features.mmap2 = true;
2142 pr_debug2_peo("switching off mmap2\n");
2143 return true;
f7400262
NK
2144 } else if (evsel->core.attr.exclude_guest || evsel->core.attr.exclude_host) {
2145 if (evsel->pmu == NULL)
3500eeeb 2146 evsel->pmu = evsel__find_pmu(evsel);
f7400262
NK
2147
2148 if (evsel->pmu)
2149 evsel->pmu->missing_features.exclude_guest = true;
2150 else {
2151 /* we cannot find PMU, disable attrs now */
2152 evsel->core.attr.exclude_host = false;
2153 evsel->core.attr.exclude_guest = false;
3500eeeb
NK
2154 }
2155
2156 if (evsel->exclude_GH) {
2157 pr_debug2_peo("PMU has no exclude_host/guest support, bailing out\n");
2158 return false;
2159 }
2160 if (!perf_missing_features.exclude_guest) {
2161 perf_missing_features.exclude_guest = true;
2162 pr_debug2_peo("switching off exclude_guest, exclude_host\n");
2163 }
d21fc5f0
RM
2164 return true;
2165 } else if (!perf_missing_features.sample_id_all) {
2166 perf_missing_features.sample_id_all = true;
2167 pr_debug2_peo("switching off sample_id_all\n");
2168 return true;
2169 } else if (!perf_missing_features.lbr_flags &&
2170 (evsel->core.attr.branch_sample_type &
2171 (PERF_SAMPLE_BRANCH_NO_CYCLES |
2172 PERF_SAMPLE_BRANCH_NO_FLAGS))) {
2173 perf_missing_features.lbr_flags = true;
2174 pr_debug2_peo("switching off branch sample type no (cycles/flags)\n");
2175 return true;
2176 } else if (!perf_missing_features.group_read &&
2177 evsel->core.attr.inherit &&
2178 (evsel->core.attr.read_format & PERF_FORMAT_GROUP) &&
2179 evsel__is_group_leader(evsel)) {
2180 perf_missing_features.group_read = true;
2181 pr_debug2_peo("switching off group read\n");
2182 return true;
2183 } else {
2184 return false;
2185 }
2186}
2187
588f4ac7
RM
2188static int evsel__open_cpu(struct evsel *evsel, struct perf_cpu_map *cpus,
2189 struct perf_thread_map *threads,
6f844b1f 2190 int start_cpu_map_idx, int end_cpu_map_idx)
588f4ac7 2191{
6f844b1f 2192 int idx, thread, nthreads;
588f4ac7 2193 int pid = -1, err, old_errno;
71efc48a 2194 enum rlimit_action set_rlimit = NO_CHANGE;
588f4ac7 2195
6828d692
IR
2196 if (evsel->tool_event == PERF_TOOL_DURATION_TIME) {
2197 if (evsel->core.attr.sample_period) /* no sampling */
2198 return -EINVAL;
2199 evsel->start_time = rdclock();
2200 return 0;
2201 }
2202
588f4ac7
RM
2203 err = __evsel__prepare_open(evsel, cpus, threads);
2204 if (err)
2205 return err;
2206
2207 if (cpus == NULL)
2208 cpus = empty_cpu_map;
2209
2210 if (threads == NULL)
2211 threads = empty_thread_map;
2212
1337b9dc 2213 nthreads = perf_thread_map__nr(threads);
588f4ac7
RM
2214
2215 if (evsel->cgrp)
2216 pid = evsel->cgrp->fd;
2217
2218fallback_missing_features:
2219 evsel__disable_missing_features(evsel);
594ac61a 2220
1f4326bf 2221 pr_debug3("Opening: %s\n", evsel__name(evsel));
1fc632ce 2222 display_attr(&evsel->core.attr);
e3e1a54f 2223
6f844b1f 2224 for (idx = start_cpu_map_idx; idx < end_cpu_map_idx; idx++) {
9d04f178 2225
bf8e8f4b 2226 for (thread = 0; thread < nthreads; thread++) {
83c2e4f3 2227 int fd, group_fd;
da7c3b46
RM
2228retry_open:
2229 if (thread >= nthreads)
2230 break;
023695d9 2231
648b5af3 2232 if (!evsel->cgrp && !evsel->core.system_wide)
a2f354e3 2233 pid = perf_thread_map__pid(threads, thread);
023695d9 2234
6828d692
IR
2235 if (evsel->tool_event == PERF_TOOL_USER_TIME ||
2236 evsel->tool_event == PERF_TOOL_SYSTEM_TIME) {
2237 bool system = evsel->tool_event == PERF_TOOL_SYSTEM_TIME;
2238 __u64 *start_time = NULL;
2239
2240 if (evsel->core.attr.sample_period) {
2241 /* no sampling */
2242 err = -EINVAL;
2243 goto out_close;
2244 }
2245 if (pid > -1) {
2246 char buf[64];
2247
2248 snprintf(buf, sizeof(buf), "/proc/%d/stat", pid);
2249 fd = open(buf, O_RDONLY);
2250 evsel->pid_stat = true;
2251 } else {
2252 fd = open("/proc/stat", O_RDONLY);
2253 }
2254 FD(evsel, idx, thread) = fd;
2255 if (fd < 0) {
2256 err = -errno;
2257 goto out_close;
2258 }
2259 start_time = xyarray__entry(evsel->start_times, idx, thread);
2260 if (pid > -1) {
2261 err = read_pid_stat_field(fd, system ? 15 : 14,
2262 start_time);
2263 } else {
2264 struct perf_cpu cpu;
2265
2266 cpu = perf_cpu_map__cpu(evsel->core.cpus, idx);
2267 err = read_stat_field(fd, cpu, system ? 3 : 1,
2268 start_time);
2269 }
2270 if (err)
2271 goto out_close;
2272 continue;
2273 }
2274
6f844b1f 2275 group_fd = get_group_fd(evsel, idx, thread);
da7c3b46 2276
1b114824
IR
2277 if (group_fd == -2) {
2278 pr_debug("broken group leader for %s\n", evsel->name);
2279 err = -EINVAL;
2280 goto out_close;
2281 }
2282
10213e2f
JO
2283 test_attr__ready();
2284
da406202 2285 /* Debug message used by test scripts */
28667a52 2286 pr_debug2_peo("sys_perf_event_open: pid %d cpu %d group_fd %d flags %#lx",
44028699 2287 pid, perf_cpu_map__cpu(cpus, idx).cpu, group_fd, evsel->open_flags);
28667a52 2288
44028699
IR
2289 fd = sys_perf_event_open(&evsel->core.attr, pid,
2290 perf_cpu_map__cpu(cpus, idx).cpu,
28667a52 2291 group_fd, evsel->open_flags);
83c2e4f3 2292
6f844b1f 2293 FD(evsel, idx, thread) = fd;
83c2e4f3
JO
2294
2295 if (fd < 0) {
727ab04e 2296 err = -errno;
a359c17a 2297
ccd26741 2298 pr_debug2_peo("\nsys_perf_event_open failed, error %d\n",
f852fd62 2299 err);
594ac61a 2300 goto try_fallback;
727ab04e 2301 }
1f45b1d4 2302
6f844b1f 2303 bpf_counter__install_pe(evsel, idx, fd);
91233d00 2304
ebfb045a 2305 if (unlikely(test_attr__enabled)) {
44028699
IR
2306 test_attr__open(&evsel->core.attr, pid,
2307 perf_cpu_map__cpu(cpus, idx),
ebfb045a
RM
2308 fd, group_fd, evsel->open_flags);
2309 }
2310
da406202 2311 /* Debug message used by test scripts */
ccd26741 2312 pr_debug2_peo(" = %d\n", fd);
7b4b82bc 2313
1f45b1d4 2314 if (evsel->bpf_fd >= 0) {
83c2e4f3 2315 int evt_fd = fd;
1f45b1d4
WN
2316 int bpf_fd = evsel->bpf_fd;
2317
2318 err = ioctl(evt_fd,
2319 PERF_EVENT_IOC_SET_BPF,
2320 bpf_fd);
2321 if (err && errno != EEXIST) {
2322 pr_err("failed to attach bpf fd %d: %s\n",
2323 bpf_fd, strerror(errno));
2324 err = -EINVAL;
2325 goto out_close;
2326 }
2327 }
2328
bec19672 2329 set_rlimit = NO_CHANGE;
814c8c38
PZ
2330
2331 /*
2332 * If we succeeded but had to kill clockid, fail and
2bb72dbb 2333 * have evsel__open_strerror() print us a nice error.
814c8c38
PZ
2334 */
2335 if (perf_missing_features.clockid ||
2336 perf_missing_features.clockid_wrong) {
2337 err = -EINVAL;
2338 goto out_close;
2339 }
0252208e 2340 }
48290609
ACM
2341 }
2342
2343 return 0;
2344
594ac61a 2345try_fallback:
28667a52
RM
2346 if (evsel__precise_ip_fallback(evsel))
2347 goto retry_open;
2348
44028699
IR
2349 if (evsel__ignore_missing_thread(evsel, perf_cpu_map__nr(cpus),
2350 idx, threads, thread, err)) {
da7c3b46
RM
2351 /* We just removed 1 thread, so lower the upper nthreads limit. */
2352 nthreads--;
2353
2354 /* ... and pretend like nothing have happened. */
2355 err = 0;
2356 goto retry_open;
2357 }
bec19672
AK
2358 /*
2359 * perf stat needs between 5 and 22 fds per CPU. When we run out
2360 * of them try to increase the limits.
2361 */
e093a222 2362 if (err == -EMFILE && rlimit__increase_nofile(&set_rlimit))
71efc48a 2363 goto retry_open;
bec19672 2364
6f844b1f 2365 if (err != -EINVAL || idx > 0 || thread > 0)
594ac61a
ACM
2366 goto out_close;
2367
d21fc5f0 2368 if (evsel__detect_missing_features(evsel))
9aa0bfa3 2369 goto fallback_missing_features;
48290609 2370out_close:
ab6c79b8
JY
2371 if (err)
2372 threads->err_thread = thread;
2373
796c01a4 2374 old_errno = errno;
0252208e
ACM
2375 do {
2376 while (--thread >= 0) {
6f844b1f
IR
2377 if (FD(evsel, idx, thread) >= 0)
2378 close(FD(evsel, idx, thread));
2379 FD(evsel, idx, thread) = -1;
0252208e 2380 }
bf8e8f4b 2381 thread = nthreads;
6f844b1f 2382 } while (--idx >= 0);
796c01a4 2383 errno = old_errno;
727ab04e
ACM
2384 return err;
2385}
2386
4804e011
AK
2387int evsel__open(struct evsel *evsel, struct perf_cpu_map *cpus,
2388 struct perf_thread_map *threads)
2389{
44028699 2390 return evsel__open_cpu(evsel, cpus, threads, 0, perf_cpu_map__nr(cpus));
4804e011
AK
2391}
2392
88761fa1 2393void evsel__close(struct evsel *evsel)
727ab04e 2394{
88761fa1 2395 perf_evsel__close(&evsel->core);
70c20369 2396 perf_evsel__free_id(&evsel->core);
48290609
ACM
2397}
2398
6f844b1f 2399int evsel__open_per_cpu(struct evsel *evsel, struct perf_cpu_map *cpus, int cpu_map_idx)
48290609 2400{
6f844b1f 2401 if (cpu_map_idx == -1)
44028699 2402 return evsel__open_cpu(evsel, cpus, NULL, 0, perf_cpu_map__nr(cpus));
4804e011 2403
6f844b1f 2404 return evsel__open_cpu(evsel, cpus, NULL, cpu_map_idx, cpu_map_idx + 1);
0252208e 2405}
48290609 2406
aa8c406b 2407int evsel__open_per_thread(struct evsel *evsel, struct perf_thread_map *threads)
0252208e 2408{
5972d1e0 2409 return evsel__open(evsel, NULL, threads);
48290609 2410}
70082dd9 2411
32dcd021 2412static int perf_evsel__parse_id_sample(const struct evsel *evsel,
0807d2d8
ACM
2413 const union perf_event *event,
2414 struct perf_sample *sample)
d0dd74e8 2415{
1fc632ce 2416 u64 type = evsel->core.attr.sample_type;
b1fcd190 2417 const __u64 *array = event->sample.array;
0807d2d8 2418 bool swapped = evsel->needs_swap;
37073f9e 2419 union u64_swap u;
d0dd74e8
ACM
2420
2421 array += ((event->header.size -
2422 sizeof(event->header)) / sizeof(u64)) - 1;
2423
75562573
AH
2424 if (type & PERF_SAMPLE_IDENTIFIER) {
2425 sample->id = *array;
2426 array--;
2427 }
2428
d0dd74e8 2429 if (type & PERF_SAMPLE_CPU) {
37073f9e
JO
2430 u.val64 = *array;
2431 if (swapped) {
2432 /* undo swap of u64, then swap on individual u32s */
2433 u.val64 = bswap_64(u.val64);
2434 u.val32[0] = bswap_32(u.val32[0]);
2435 }
2436
2437 sample->cpu = u.val32[0];
d0dd74e8
ACM
2438 array--;
2439 }
2440
2441 if (type & PERF_SAMPLE_STREAM_ID) {
2442 sample->stream_id = *array;
2443 array--;
2444 }
2445
2446 if (type & PERF_SAMPLE_ID) {
2447 sample->id = *array;
2448 array--;
2449 }
2450
2451 if (type & PERF_SAMPLE_TIME) {
2452 sample->time = *array;
2453 array--;
2454 }
2455
2456 if (type & PERF_SAMPLE_TID) {
37073f9e
JO
2457 u.val64 = *array;
2458 if (swapped) {
2459 /* undo swap of u64, then swap on individual u32s */
2460 u.val64 = bswap_64(u.val64);
2461 u.val32[0] = bswap_32(u.val32[0]);
2462 u.val32[1] = bswap_32(u.val32[1]);
2463 }
2464
2465 sample->pid = u.val32[0];
2466 sample->tid = u.val32[1];
dd44bc6b 2467 array--;
d0dd74e8
ACM
2468 }
2469
2470 return 0;
2471}
2472
03b6ea9b
AH
2473static inline bool overflow(const void *endp, u16 max_size, const void *offset,
2474 u64 size)
98e1da90 2475{
03b6ea9b
AH
2476 return size > max_size || offset + size > endp;
2477}
98e1da90 2478
03b6ea9b
AH
2479#define OVERFLOW_CHECK(offset, size, max_size) \
2480 do { \
2481 if (overflow(endp, (max_size), (offset), (size))) \
2482 return -EFAULT; \
2483 } while (0)
98e1da90 2484
03b6ea9b
AH
2485#define OVERFLOW_CHECK_u64(offset) \
2486 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
98e1da90 2487
01468120
JO
2488static int
2489perf_event__check_size(union perf_event *event, unsigned int sample_size)
2490{
2491 /*
2492 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
2493 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
2494 * check the format does not go past the end of the event.
2495 */
2496 if (sample_size + sizeof(event->header) > event->header.size)
2497 return -EFAULT;
2498
2499 return 0;
2500}
2501
fbefe9c2
KL
2502void __weak arch_perf_parse_sample_weight(struct perf_sample *data,
2503 const __u64 *array,
2504 u64 type __maybe_unused)
2505{
2506 data->weight = *array;
2507}
2508
63c12ae2
MS
2509u64 evsel__bitfield_swap_branch_flags(u64 value)
2510{
2511 u64 new_val = 0;
2512
2513 /*
2514 * branch_flags
2515 * union {
2516 * u64 values;
2517 * struct {
2518 * mispred:1 //target mispredicted
2519 * predicted:1 //target predicted
2520 * in_tx:1 //in transaction
2521 * abort:1 //transaction abort
2522 * cycles:16 //cycle count to last branch
2523 * type:4 //branch type
6ade6c64
SD
2524 * spec:2 //branch speculation info
2525 * new_type:4 //additional branch type
2526 * priv:3 //privilege level
2527 * reserved:31
63c12ae2
MS
2528 * }
2529 * }
2530 *
2531 * Avoid bswap64() the entire branch_flag.value,
2532 * as it has variable bit-field sizes. Instead the
2533 * macro takes the bit-field position/size,
2534 * swaps it based on the host endianness.
63c12ae2 2535 */
5b7a29fb 2536 if (host_is_bigendian()) {
63c12ae2
MS
2537 new_val = bitfield_swap(value, 0, 1);
2538 new_val |= bitfield_swap(value, 1, 1);
2539 new_val |= bitfield_swap(value, 2, 1);
2540 new_val |= bitfield_swap(value, 3, 1);
2541 new_val |= bitfield_swap(value, 4, 16);
2542 new_val |= bitfield_swap(value, 20, 4);
6ade6c64
SD
2543 new_val |= bitfield_swap(value, 24, 2);
2544 new_val |= bitfield_swap(value, 26, 4);
2545 new_val |= bitfield_swap(value, 30, 3);
2546 new_val |= bitfield_swap(value, 33, 31);
63c12ae2
MS
2547 } else {
2548 new_val = bitfield_swap(value, 63, 1);
2549 new_val |= bitfield_swap(value, 62, 1);
2550 new_val |= bitfield_swap(value, 61, 1);
2551 new_val |= bitfield_swap(value, 60, 1);
2552 new_val |= bitfield_swap(value, 44, 16);
2553 new_val |= bitfield_swap(value, 40, 4);
6ade6c64
SD
2554 new_val |= bitfield_swap(value, 38, 2);
2555 new_val |= bitfield_swap(value, 34, 4);
2556 new_val |= bitfield_swap(value, 31, 3);
2557 new_val |= bitfield_swap(value, 0, 31);
63c12ae2
MS
2558 }
2559
2560 return new_val;
2561}
2562
9fbb4b02
KL
2563static inline bool evsel__has_branch_counters(const struct evsel *evsel)
2564{
2565 struct evsel *cur, *leader = evsel__leader(evsel);
2566
2567 /* The branch counters feature only supports group */
2568 if (!leader || !evsel->evlist)
2569 return false;
2570
2571 evlist__for_each_entry(evsel->evlist, cur) {
2572 if ((leader == evsel__leader(cur)) &&
2573 (cur->core.attr.branch_sample_type & PERF_SAMPLE_BRANCH_COUNTERS))
2574 return true;
2575 }
2576 return false;
2577}
2578
6b6017a2
ACM
2579int evsel__parse_sample(struct evsel *evsel, union perf_event *event,
2580 struct perf_sample *data)
d0dd74e8 2581{
1fc632ce 2582 u64 type = evsel->core.attr.sample_type;
0807d2d8 2583 bool swapped = evsel->needs_swap;
b1fcd190 2584 const __u64 *array;
03b6ea9b
AH
2585 u16 max_size = event->header.size;
2586 const void *endp = (void *)event + max_size;
2587 u64 sz;
d0dd74e8 2588
936be503
DA
2589 /*
2590 * used for cross-endian analysis. See git commit 65014ab3
2591 * for why this goofiness is needed.
2592 */
6a11f92e 2593 union u64_swap u;
936be503 2594
f3bda2c9 2595 memset(data, 0, sizeof(*data));
d0dd74e8
ACM
2596 data->cpu = data->pid = data->tid = -1;
2597 data->stream_id = data->id = data->time = -1ULL;
1fc632ce 2598 data->period = evsel->core.attr.sample_period;
473398a2 2599 data->cpumode = event->header.misc & PERF_RECORD_MISC_CPUMODE_MASK;
28a0b398 2600 data->misc = event->header.misc;
3ad31d8a 2601 data->data_src = PERF_MEM_DATA_SRC_NONE;
3461b65d 2602 data->vcpu = -1;
d0dd74e8
ACM
2603
2604 if (event->header.type != PERF_RECORD_SAMPLE) {
1fc632ce 2605 if (!evsel->core.attr.sample_id_all)
d0dd74e8 2606 return 0;
0807d2d8 2607 return perf_evsel__parse_id_sample(evsel, event, data);
d0dd74e8
ACM
2608 }
2609
2610 array = event->sample.array;
2611
01468120 2612 if (perf_event__check_size(event, evsel->sample_size))
a2854124
FW
2613 return -EFAULT;
2614
75562573
AH
2615 if (type & PERF_SAMPLE_IDENTIFIER) {
2616 data->id = *array;
2617 array++;
2618 }
2619
d0dd74e8 2620 if (type & PERF_SAMPLE_IP) {
ef89325f 2621 data->ip = *array;
d0dd74e8
ACM
2622 array++;
2623 }
2624
2625 if (type & PERF_SAMPLE_TID) {
936be503
DA
2626 u.val64 = *array;
2627 if (swapped) {
2628 /* undo swap of u64, then swap on individual u32s */
2629 u.val64 = bswap_64(u.val64);
2630 u.val32[0] = bswap_32(u.val32[0]);
2631 u.val32[1] = bswap_32(u.val32[1]);
2632 }
2633
2634 data->pid = u.val32[0];
2635 data->tid = u.val32[1];
d0dd74e8
ACM
2636 array++;
2637 }
2638
2639 if (type & PERF_SAMPLE_TIME) {
2640 data->time = *array;
2641 array++;
2642 }
2643
2644 if (type & PERF_SAMPLE_ADDR) {
2645 data->addr = *array;
2646 array++;
2647 }
2648
d0dd74e8
ACM
2649 if (type & PERF_SAMPLE_ID) {
2650 data->id = *array;
2651 array++;
2652 }
2653
2654 if (type & PERF_SAMPLE_STREAM_ID) {
2655 data->stream_id = *array;
2656 array++;
2657 }
2658
2659 if (type & PERF_SAMPLE_CPU) {
936be503
DA
2660
2661 u.val64 = *array;
2662 if (swapped) {
2663 /* undo swap of u64, then swap on individual u32s */
2664 u.val64 = bswap_64(u.val64);
2665 u.val32[0] = bswap_32(u.val32[0]);
2666 }
2667
2668 data->cpu = u.val32[0];
d0dd74e8
ACM
2669 array++;
2670 }
2671
2672 if (type & PERF_SAMPLE_PERIOD) {
2673 data->period = *array;
2674 array++;
2675 }
2676
2677 if (type & PERF_SAMPLE_READ) {
1fc632ce 2678 u64 read_format = evsel->core.attr.read_format;
9ede473c 2679
03b6ea9b 2680 OVERFLOW_CHECK_u64(array);
9ede473c
JO
2681 if (read_format & PERF_FORMAT_GROUP)
2682 data->read.group.nr = *array;
2683 else
2684 data->read.one.value = *array;
2685
2686 array++;
2687
2688 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
03b6ea9b 2689 OVERFLOW_CHECK_u64(array);
9ede473c
JO
2690 data->read.time_enabled = *array;
2691 array++;
2692 }
2693
2694 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
03b6ea9b 2695 OVERFLOW_CHECK_u64(array);
9ede473c
JO
2696 data->read.time_running = *array;
2697 array++;
2698 }
2699
2700 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
2701 if (read_format & PERF_FORMAT_GROUP) {
03b6ea9b
AH
2702 const u64 max_group_nr = UINT64_MAX /
2703 sizeof(struct sample_read_value);
2704
2705 if (data->read.group.nr > max_group_nr)
2706 return -EFAULT;
f52679b7
NK
2707
2708 sz = data->read.group.nr * sample_read_value_size(read_format);
03b6ea9b
AH
2709 OVERFLOW_CHECK(array, sz, max_size);
2710 data->read.group.values =
2711 (struct sample_read_value *)array;
2712 array = (void *)array + sz;
9ede473c 2713 } else {
03b6ea9b 2714 OVERFLOW_CHECK_u64(array);
9ede473c
JO
2715 data->read.one.id = *array;
2716 array++;
f52679b7
NK
2717
2718 if (read_format & PERF_FORMAT_LOST) {
2719 OVERFLOW_CHECK_u64(array);
2720 data->read.one.lost = *array;
2721 array++;
2722 }
9ede473c 2723 }
d0dd74e8
ACM
2724 }
2725
8e94b324 2726 if (type & PERF_SAMPLE_CALLCHAIN) {
03b6ea9b 2727 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
98e1da90 2728
03b6ea9b
AH
2729 OVERFLOW_CHECK_u64(array);
2730 data->callchain = (struct ip_callchain *)array++;
2731 if (data->callchain->nr > max_callchain_nr)
98e1da90 2732 return -EFAULT;
03b6ea9b
AH
2733 sz = data->callchain->nr * sizeof(u64);
2734 OVERFLOW_CHECK(array, sz, max_size);
2735 array = (void *)array + sz;
d0dd74e8
ACM
2736 }
2737
2738 if (type & PERF_SAMPLE_RAW) {
03b6ea9b 2739 OVERFLOW_CHECK_u64(array);
936be503 2740 u.val64 = *array;
f9d8adb3
JO
2741
2742 /*
2743 * Undo swap of u64, then swap on individual u32s,
2744 * get the size of the raw area and undo all of the
4d39c89f 2745 * swap. The pevent interface handles endianness by
f9d8adb3
JO
2746 * itself.
2747 */
2748 if (swapped) {
936be503
DA
2749 u.val64 = bswap_64(u.val64);
2750 u.val32[0] = bswap_32(u.val32[0]);
2751 u.val32[1] = bswap_32(u.val32[1]);
2752 }
936be503 2753 data->raw_size = u.val32[0];
f9d8adb3
JO
2754
2755 /*
2756 * The raw data is aligned on 64bits including the
2757 * u32 size, so it's safe to use mem_bswap_64.
2758 */
2759 if (swapped)
2760 mem_bswap_64((void *) array, data->raw_size);
2761
03b6ea9b 2762 array = (void *)array + sizeof(u32);
98e1da90 2763
03b6ea9b
AH
2764 OVERFLOW_CHECK(array, data->raw_size, max_size);
2765 data->raw_data = (void *)array;
2766 array = (void *)array + data->raw_size;
d0dd74e8
ACM
2767 }
2768
b5387528 2769 if (type & PERF_SAMPLE_BRANCH_STACK) {
03b6ea9b
AH
2770 const u64 max_branch_nr = UINT64_MAX /
2771 sizeof(struct branch_entry);
63c12ae2
MS
2772 struct branch_entry *e;
2773 unsigned int i;
b5387528 2774
03b6ea9b
AH
2775 OVERFLOW_CHECK_u64(array);
2776 data->branch_stack = (struct branch_stack *)array++;
b5387528 2777
03b6ea9b
AH
2778 if (data->branch_stack->nr > max_branch_nr)
2779 return -EFAULT;
42bbabed 2780
b5387528 2781 sz = data->branch_stack->nr * sizeof(struct branch_entry);
63c12ae2 2782 if (evsel__has_branch_hw_idx(evsel)) {
42bbabed 2783 sz += sizeof(u64);
63c12ae2
MS
2784 e = &data->branch_stack->entries[0];
2785 } else {
42bbabed 2786 data->no_hw_idx = true;
63c12ae2
MS
2787 /*
2788 * if the PERF_SAMPLE_BRANCH_HW_INDEX is not applied,
2789 * only nr and entries[] will be output by kernel.
2790 */
2791 e = (struct branch_entry *)&data->branch_stack->hw_idx;
2792 }
2793
2794 if (swapped) {
2795 /*
2796 * struct branch_flag does not have endian
2797 * specific bit field definition. And bswap
2798 * will not resolve the issue, since these
2799 * are bit fields.
2800 *
2801 * evsel__bitfield_swap_branch_flags() uses a
2802 * bitfield_swap macro to swap the bit position
2803 * based on the host endians.
2804 */
2805 for (i = 0; i < data->branch_stack->nr; i++, e++)
2806 e->flags.value = evsel__bitfield_swap_branch_flags(e->flags.value);
2807 }
2808
03b6ea9b
AH
2809 OVERFLOW_CHECK(array, sz, max_size);
2810 array = (void *)array + sz;
9fbb4b02
KL
2811
2812 if (evsel__has_branch_counters(evsel)) {
2813 OVERFLOW_CHECK_u64(array);
2814
2815 data->branch_stack_cntr = (u64 *)array;
2816 sz = data->branch_stack->nr * sizeof(u64);
2817
2818 OVERFLOW_CHECK(array, sz, max_size);
2819 array = (void *)array + sz;
2820 }
b5387528 2821 }
0f6a3015
JO
2822
2823 if (type & PERF_SAMPLE_REGS_USER) {
03b6ea9b 2824 OVERFLOW_CHECK_u64(array);
5b95a4a3
AH
2825 data->user_regs.abi = *array;
2826 array++;
0f6a3015 2827
5b95a4a3 2828 if (data->user_regs.abi) {
1fc632ce 2829 u64 mask = evsel->core.attr.sample_regs_user;
03b6ea9b 2830
3a5b64f0 2831 sz = hweight64(mask) * sizeof(u64);
03b6ea9b 2832 OVERFLOW_CHECK(array, sz, max_size);
352ea45a 2833 data->user_regs.mask = mask;
0f6a3015 2834 data->user_regs.regs = (u64 *)array;
03b6ea9b 2835 array = (void *)array + sz;
0f6a3015
JO
2836 }
2837 }
2838
2839 if (type & PERF_SAMPLE_STACK_USER) {
03b6ea9b
AH
2840 OVERFLOW_CHECK_u64(array);
2841 sz = *array++;
0f6a3015
JO
2842
2843 data->user_stack.offset = ((char *)(array - 1)
2844 - (char *) event);
2845
03b6ea9b 2846 if (!sz) {
0f6a3015
JO
2847 data->user_stack.size = 0;
2848 } else {
03b6ea9b 2849 OVERFLOW_CHECK(array, sz, max_size);
0f6a3015 2850 data->user_stack.data = (char *)array;
03b6ea9b
AH
2851 array = (void *)array + sz;
2852 OVERFLOW_CHECK_u64(array);
54bd2692 2853 data->user_stack.size = *array++;
a65cb4b9
JO
2854 if (WARN_ONCE(data->user_stack.size > sz,
2855 "user stack dump failure\n"))
2856 return -EFAULT;
0f6a3015
JO
2857 }
2858 }
2859
ea8d0ed6 2860 if (type & PERF_SAMPLE_WEIGHT_TYPE) {
03b6ea9b 2861 OVERFLOW_CHECK_u64(array);
fbefe9c2 2862 arch_perf_parse_sample_weight(data, array, type);
05484298
AK
2863 array++;
2864 }
2865
98a3b32c 2866 if (type & PERF_SAMPLE_DATA_SRC) {
03b6ea9b 2867 OVERFLOW_CHECK_u64(array);
98a3b32c
SE
2868 data->data_src = *array;
2869 array++;
2870 }
2871
475eeab9 2872 if (type & PERF_SAMPLE_TRANSACTION) {
87b95524 2873 OVERFLOW_CHECK_u64(array);
475eeab9
AK
2874 data->transaction = *array;
2875 array++;
2876 }
2877
6a21c0b5
SE
2878 data->intr_regs.abi = PERF_SAMPLE_REGS_ABI_NONE;
2879 if (type & PERF_SAMPLE_REGS_INTR) {
2880 OVERFLOW_CHECK_u64(array);
2881 data->intr_regs.abi = *array;
2882 array++;
2883
2884 if (data->intr_regs.abi != PERF_SAMPLE_REGS_ABI_NONE) {
1fc632ce 2885 u64 mask = evsel->core.attr.sample_regs_intr;
6a21c0b5 2886
3a5b64f0 2887 sz = hweight64(mask) * sizeof(u64);
6a21c0b5
SE
2888 OVERFLOW_CHECK(array, sz, max_size);
2889 data->intr_regs.mask = mask;
2890 data->intr_regs.regs = (u64 *)array;
2891 array = (void *)array + sz;
2892 }
2893 }
2894
3b0a5daa
KL
2895 data->phys_addr = 0;
2896 if (type & PERF_SAMPLE_PHYS_ADDR) {
2897 data->phys_addr = *array;
2898 array++;
2899 }
2900
ba78c1c5
NK
2901 data->cgroup = 0;
2902 if (type & PERF_SAMPLE_CGROUP) {
2903 data->cgroup = *array;
2904 array++;
2905 }
2906
542b88fd
KL
2907 data->data_page_size = 0;
2908 if (type & PERF_SAMPLE_DATA_PAGE_SIZE) {
2909 data->data_page_size = *array;
2910 array++;
2911 }
2912
c1de7f3d
KL
2913 data->code_page_size = 0;
2914 if (type & PERF_SAMPLE_CODE_PAGE_SIZE) {
2915 data->code_page_size = *array;
2916 array++;
2917 }
2918
98dcf14d
AH
2919 if (type & PERF_SAMPLE_AUX) {
2920 OVERFLOW_CHECK_u64(array);
2921 sz = *array++;
2922
2923 OVERFLOW_CHECK(array, sz, max_size);
2924 /* Undo swap of data */
2925 if (swapped)
2926 mem_bswap_64((char *)array, sz);
2927 data->aux_sample.size = sz;
2928 data->aux_sample.data = (char *)array;
2929 array = (void *)array + sz;
2930 }
2931
d0dd74e8
ACM
2932 return 0;
2933}
74eec26f 2934
6b6017a2
ACM
2935int evsel__parse_sample_timestamp(struct evsel *evsel, union perf_event *event,
2936 u64 *timestamp)
01468120 2937{
1fc632ce 2938 u64 type = evsel->core.attr.sample_type;
b1fcd190 2939 const __u64 *array;
01468120
JO
2940
2941 if (!(type & PERF_SAMPLE_TIME))
2942 return -1;
2943
2944 if (event->header.type != PERF_RECORD_SAMPLE) {
2945 struct perf_sample data = {
2946 .time = -1ULL,
2947 };
2948
1fc632ce 2949 if (!evsel->core.attr.sample_id_all)
01468120
JO
2950 return -1;
2951 if (perf_evsel__parse_id_sample(evsel, event, &data))
2952 return -1;
2953
2954 *timestamp = data.time;
2955 return 0;
2956 }
2957
2958 array = event->sample.array;
2959
2960 if (perf_event__check_size(event, evsel->sample_size))
2961 return -EFAULT;
2962
2963 if (type & PERF_SAMPLE_IDENTIFIER)
2964 array++;
2965
2966 if (type & PERF_SAMPLE_IP)
2967 array++;
2968
2969 if (type & PERF_SAMPLE_TID)
2970 array++;
2971
2972 if (type & PERF_SAMPLE_TIME)
2973 *timestamp = *array;
2974
2975 return 0;
2976}
2977
0a64de04
AH
2978u16 evsel__id_hdr_size(struct evsel *evsel)
2979{
2980 u64 sample_type = evsel->core.attr.sample_type;
2981 u16 size = 0;
2982
2983 if (sample_type & PERF_SAMPLE_TID)
2984 size += sizeof(u64);
2985
2986 if (sample_type & PERF_SAMPLE_TIME)
2987 size += sizeof(u64);
2988
2989 if (sample_type & PERF_SAMPLE_ID)
2990 size += sizeof(u64);
2991
2992 if (sample_type & PERF_SAMPLE_STREAM_ID)
2993 size += sizeof(u64);
2994
2995 if (sample_type & PERF_SAMPLE_CPU)
2996 size += sizeof(u64);
2997
2998 if (sample_type & PERF_SAMPLE_IDENTIFIER)
2999 size += sizeof(u64);
3000
3001 return size;
3002}
3003
378ef0f5 3004#ifdef HAVE_LIBTRACEEVENT
efc0cdc9 3005struct tep_format_field *evsel__field(struct evsel *evsel, const char *name)
efd2b924 3006{
af85cd19 3007 return tep_find_field(evsel->tp_format, name);
efd2b924
ACM
3008}
3009
a8792242
YJ
3010struct tep_format_field *evsel__common_field(struct evsel *evsel, const char *name)
3011{
3012 return tep_find_common_field(evsel->tp_format, name);
3013}
3014
efc0cdc9 3015void *evsel__rawptr(struct evsel *evsel, struct perf_sample *sample, const char *name)
5555ded4 3016{
efc0cdc9 3017 struct tep_format_field *field = evsel__field(evsel, name);
5555ded4
ACM
3018 int offset;
3019
efd2b924
ACM
3020 if (!field)
3021 return NULL;
5555ded4
ACM
3022
3023 offset = field->offset;
3024
bb39ccb2 3025 if (field->flags & TEP_FIELD_IS_DYNAMIC) {
5555ded4
ACM
3026 offset = *(int *)(sample->raw_data + field->offset);
3027 offset &= 0xffff;
1634bad3 3028 if (tep_field_is_relative(field->flags))
7c689c83 3029 offset += field->offset + field->size;
5555ded4
ACM
3030 }
3031
3032 return sample->raw_data + offset;
3033}
3034
2c92f982 3035u64 format_field__intval(struct tep_format_field *field, struct perf_sample *sample,
90525176 3036 bool needs_swap)
5555ded4 3037{
e6b6f679 3038 u64 value;
90525176 3039 void *ptr = sample->raw_data + field->offset;
5555ded4 3040
e6b6f679
ACM
3041 switch (field->size) {
3042 case 1:
3043 return *(u8 *)ptr;
3044 case 2:
3045 value = *(u16 *)ptr;
3046 break;
3047 case 4:
3048 value = *(u32 *)ptr;
3049 break;
3050 case 8:
e94eedab 3051 memcpy(&value, ptr, sizeof(u64));
e6b6f679
ACM
3052 break;
3053 default:
3054 return 0;
3055 }
3056
90525176 3057 if (!needs_swap)
e6b6f679
ACM
3058 return value;
3059
3060 switch (field->size) {
3061 case 2:
3062 return bswap_16(value);
3063 case 4:
3064 return bswap_32(value);
3065 case 8:
3066 return bswap_64(value);
3067 default:
3068 return 0;
3069 }
3070
3071 return 0;
5555ded4 3072}
0698aedd 3073
efc0cdc9 3074u64 evsel__intval(struct evsel *evsel, struct perf_sample *sample, const char *name)
90525176 3075{
efc0cdc9 3076 struct tep_format_field *field = evsel__field(evsel, name);
90525176 3077
90525176
ACM
3078 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
3079}
a8792242
YJ
3080
3081u64 evsel__intval_common(struct evsel *evsel, struct perf_sample *sample, const char *name)
3082{
3083 struct tep_format_field *field = evsel__common_field(evsel, name);
3084
3085 return field ? format_field__intval(field, sample, evsel->needs_swap) : 0;
3086}
3087
df8bc77e
ZG
3088char evsel__taskstate(struct evsel *evsel, struct perf_sample *sample, const char *name)
3089{
3090 static struct tep_format_field *prev_state_field;
3091 static const char *states;
3092 struct tep_format_field *field;
3093 unsigned long long val;
3094 unsigned int bit;
3095 char state = '?'; /* '?' denotes unknown task state */
3096
3097 field = evsel__field(evsel, name);
3098
3099 if (!field)
3100 return state;
3101
3102 if (!states || field != prev_state_field) {
20018398 3103 states = parse_task_states(field);
df8bc77e
ZG
3104 if (!states)
3105 return state;
3106 prev_state_field = field;
3107 }
3108
3109 /*
3110 * Note since the kernel exposes TASK_REPORT_MAX to userspace
3111 * to denote the 'preempted' state, we might as welll report
3112 * 'R' for this case, which make senses to users as well.
3113 *
3114 * We can change this if we have a good reason in the future.
3115 */
3116 val = evsel__intval(evsel, sample, name);
3117 bit = val ? ffs(val) : 0;
3118 state = (!bit || bit > strlen(states)) ? 'R' : states[bit-1];
3119 return state;
3120}
378ef0f5 3121#endif
90525176 3122
eb2eac0c
IR
3123bool evsel__fallback(struct evsel *evsel, struct target *target, int err,
3124 char *msg, size_t msgsize)
c0a54341 3125{
08094828
ACM
3126 int paranoid;
3127
2b821cce 3128 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
1fc632ce
JO
3129 evsel->core.attr.type == PERF_TYPE_HARDWARE &&
3130 evsel->core.attr.config == PERF_COUNT_HW_CPU_CYCLES) {
c0a54341 3131 /*
eb2eac0c
IR
3132 * If it's cycles then fall back to hrtimer based cpu-clock sw
3133 * counter, which is always available even if no PMU support.
c0a54341
ACM
3134 *
3135 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
3136 * b0a873e).
3137 */
1fc632ce 3138 evsel->core.attr.type = PERF_TYPE_SOFTWARE;
eb2eac0c
IR
3139 evsel->core.attr.config = target__has_cpu(target)
3140 ? PERF_COUNT_SW_CPU_CLOCK
3141 : PERF_COUNT_SW_TASK_CLOCK;
3142 scnprintf(msg, msgsize,
3143 "The cycles event is not supported, trying to fall back to %s",
3144 target__has_cpu(target) ? "cpu-clock" : "task-clock");
c0a54341 3145
04662523 3146 zfree(&evsel->name);
08094828 3147 return true;
1fc632ce 3148 } else if (err == EACCES && !evsel->core.attr.exclude_kernel &&
08094828 3149 (paranoid = perf_event_paranoid()) > 1) {
8ab2e96d 3150 const char *name = evsel__name(evsel);
08094828 3151 char *new_name;
129193bb 3152 const char *sep = ":";
08094828 3153
bec49a9e
SE
3154 /* If event has exclude user then don't exclude kernel. */
3155 if (evsel->core.attr.exclude_user)
3156 return false;
3157
129193bb
JO
3158 /* Is there already the separator in the name. */
3159 if (strchr(name, '/') ||
70943490 3160 (strchr(name, ':') && !evsel->is_libpfm_event))
129193bb
JO
3161 sep = "";
3162
3163 if (asprintf(&new_name, "%s%su", name, sep) < 0)
08094828
ACM
3164 return false;
3165
cdf13c09 3166 free(evsel->name);
08094828 3167 evsel->name = new_name;
4ec8d984
SE
3168 scnprintf(msg, msgsize, "kernel.perf_event_paranoid=%d, trying "
3169 "to fall back to excluding kernel and hypervisor "
3170 " samples", paranoid);
1fc632ce 3171 evsel->core.attr.exclude_kernel = 1;
4ec8d984 3172 evsel->core.attr.exclude_hv = 1;
08094828 3173
c0a54341
ACM
3174 return true;
3175 }
3176
3177 return false;
3178}
56e52e85 3179
2157f6ee
ACM
3180static bool find_process(const char *name)
3181{
3182 size_t len = strlen(name);
3183 DIR *dir;
3184 struct dirent *d;
3185 int ret = -1;
3186
3187 dir = opendir(procfs__mountpoint());
3188 if (!dir)
3189 return false;
3190
3191 /* Walk through the directory. */
3192 while (ret && (d = readdir(dir)) != NULL) {
3193 char path[PATH_MAX];
3194 char *data;
3195 size_t size;
3196
3197 if ((d->d_type != DT_DIR) ||
3198 !strcmp(".", d->d_name) ||
3199 !strcmp("..", d->d_name))
3200 continue;
3201
3202 scnprintf(path, sizeof(path), "%s/%s/comm",
3203 procfs__mountpoint(), d->d_name);
3204
3205 if (filename__read_str(path, &data, &size))
3206 continue;
3207
3208 ret = strncmp(name, data, len);
3209 free(data);
3210 }
3211
3212 closedir(dir);
3213 return ret ? false : true;
3214}
3215
b2ad9549
RB
3216int __weak arch_evsel__open_strerror(struct evsel *evsel __maybe_unused,
3217 char *msg __maybe_unused,
3218 size_t size __maybe_unused)
ab0809af 3219{
b2ad9549 3220 return 0;
ab0809af
KP
3221}
3222
2bb72dbb
ACM
3223int evsel__open_strerror(struct evsel *evsel, struct target *target,
3224 int err, char *msg, size_t size)
56e52e85 3225{
6e81c74c 3226 char sbuf[STRERR_BUFSIZE];
c1034eb0 3227 int printed = 0, enforced = 0;
b2ad9549 3228 int ret;
6e81c74c 3229
56e52e85
ACM
3230 switch (err) {
3231 case EPERM:
3232 case EACCES:
c1034eb0
AB
3233 printed += scnprintf(msg + printed, size - printed,
3234 "Access to performance monitoring and observability operations is limited.\n");
3235
3236 if (!sysfs__read_int("fs/selinux/enforce", &enforced)) {
3237 if (enforced) {
3238 printed += scnprintf(msg + printed, size - printed,
3239 "Enforced MAC policy settings (SELinux) can limit access to performance\n"
3240 "monitoring and observability operations. Inspect system audit records for\n"
3241 "more perf_event access control information and adjusting the policy.\n");
3242 }
3243 }
3244
32ccb130 3245 if (err == EPERM)
c1034eb0 3246 printed += scnprintf(msg, size,
8ab2e96d 3247 "No permission to enable %s event.\n\n", evsel__name(evsel));
32ccb130
JY
3248
3249 return scnprintf(msg + printed, size - printed,
c1034eb0 3250 "Consider adjusting /proc/sys/kernel/perf_event_paranoid setting to open\n"
4b0297ef
AB
3251 "access to performance monitoring and observability operations for processes\n"
3252 "without CAP_PERFMON, CAP_SYS_PTRACE or CAP_SYS_ADMIN Linux capability.\n"
3253 "More information can be found at 'Perf events and tool security' document:\n"
3254 "https://www.kernel.org/doc/html/latest/admin-guide/perf-security.html\n"
c1034eb0 3255 "perf_event_paranoid setting is %d:\n"
3379e0c3 3256 " -1: Allow use of (almost) all events by all users\n"
ac0bb6b7 3257 " Ignore mlock limit after perf_event_mlock_kb without CAP_IPC_LOCK\n"
c1034eb0
AB
3258 ">= 0: Disallow raw and ftrace function tracepoint access\n"
3259 ">= 1: Disallow CPU event access\n"
3260 ">= 2: Disallow kernel profiling\n"
3261 "To make the adjusted perf_event_paranoid setting permanent preserve it\n"
3262 "in /etc/sysctl.conf (e.g. kernel.perf_event_paranoid = <setting>)",
3263 perf_event_paranoid());
56e52e85 3264 case ENOENT:
8ab2e96d 3265 return scnprintf(msg, size, "The %s event is not supported.", evsel__name(evsel));
56e52e85
ACM
3266 case EMFILE:
3267 return scnprintf(msg, size, "%s",
3268 "Too many events are opened.\n"
18ffdfe8
JO
3269 "Probably the maximum number of open file descriptors has been reached.\n"
3270 "Hint: Try again after reducing the number of events.\n"
3271 "Hint: Try increasing the limit with 'ulimit -n <limit>'");
de46d526 3272 case ENOMEM:
27de9b2b 3273 if (evsel__has_callchain(evsel) &&
de46d526
ACM
3274 access("/proc/sys/kernel/perf_event_max_stack", F_OK) == 0)
3275 return scnprintf(msg, size,
3276 "Not enough memory to setup event with callchain.\n"
3277 "Hint: Try tweaking /proc/sys/kernel/perf_event_max_stack\n"
029c75e5 3278 "Hint: Current value: %d", sysctl__max_stack());
de46d526 3279 break;
56e52e85
ACM
3280 case ENODEV:
3281 if (target->cpu_list)
3282 return scnprintf(msg, size, "%s",
81d64f46 3283 "No such device - did you specify an out-of-range profile CPU?");
56e52e85
ACM
3284 break;
3285 case EOPNOTSUPP:
8f431a28
JC
3286 if (evsel->core.attr.sample_type & PERF_SAMPLE_BRANCH_STACK)
3287 return scnprintf(msg, size,
3288 "%s: PMU Hardware or event type doesn't support branch stack sampling.",
3289 evsel__name(evsel));
2c9a11af
AH
3290 if (evsel->core.attr.aux_output)
3291 return scnprintf(msg, size,
3292 "%s: PMU Hardware doesn't support 'aux_output' feature",
3293 evsel__name(evsel));
1fc632ce 3294 if (evsel->core.attr.sample_period != 0)
114bc191
KP
3295 return scnprintf(msg, size,
3296 "%s: PMU Hardware doesn't support sampling/overflow-interrupts. Try 'perf stat'",
8ab2e96d 3297 evsel__name(evsel));
1fc632ce 3298 if (evsel->core.attr.precise_ip)
56e52e85
ACM
3299 return scnprintf(msg, size, "%s",
3300 "\'precise\' request may not be supported. Try removing 'p' modifier.");
3301#if defined(__i386__) || defined(__x86_64__)
1fc632ce 3302 if (evsel->core.attr.type == PERF_TYPE_HARDWARE)
56e52e85 3303 return scnprintf(msg, size, "%s",
ccbb6afe 3304 "No hardware sampling interrupt available.\n");
56e52e85
ACM
3305#endif
3306 break;
63914aca
JO
3307 case EBUSY:
3308 if (find_process("oprofiled"))
3309 return scnprintf(msg, size,
3310 "The PMU counters are busy/taken by another profiler.\n"
3311 "We found oprofile daemon running, please stop it and try again.");
3312 break;
814c8c38 3313 case EINVAL:
c1de7f3d
KL
3314 if (evsel->core.attr.sample_type & PERF_SAMPLE_CODE_PAGE_SIZE && perf_missing_features.code_page_size)
3315 return scnprintf(msg, size, "Asking for the code page size isn't supported by this kernel.");
456ef4c1
ACM
3316 if (evsel->core.attr.sample_type & PERF_SAMPLE_DATA_PAGE_SIZE && perf_missing_features.data_page_size)
3317 return scnprintf(msg, size, "Asking for the data page size isn't supported by this kernel.");
1fc632ce 3318 if (evsel->core.attr.write_backward && perf_missing_features.write_backward)
7da36e94 3319 return scnprintf(msg, size, "Reading from overwrite event is not supported by this kernel.");
814c8c38
PZ
3320 if (perf_missing_features.clockid)
3321 return scnprintf(msg, size, "clockid feature not supported.");
3322 if (perf_missing_features.clockid_wrong)
3323 return scnprintf(msg, size, "wrong clockid (%d).", clockid);
acb9f2d4
ACM
3324 if (perf_missing_features.aux_output)
3325 return scnprintf(msg, size, "The 'aux_output' feature is not supported, update the kernel.");
dcffc5eb
IR
3326 if (!target__has_cpu(target))
3327 return scnprintf(msg, size,
3328 "Invalid event (%s) in per-thread mode, enable system wide with '-a'.",
3329 evsel__name(evsel));
ab0809af 3330
814c8c38 3331 break;
2a57d408
KL
3332 case ENODATA:
3333 return scnprintf(msg, size, "Cannot collect data source with the load latency event alone. "
3334 "Please add an auxiliary event in front of the load latency event.");
56e52e85
ACM
3335 default:
3336 break;
3337 }
3338
b2ad9549
RB
3339 ret = arch_evsel__open_strerror(evsel, msg, size);
3340 if (ret)
3341 return ret;
3342
56e52e85 3343 return scnprintf(msg, size,
6e81c74c 3344 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s).\n"
ec394845 3345 "/bin/dmesg | grep -i perf may provide additional information.\n",
8ab2e96d 3346 err, str_error_r(err, sbuf, sizeof(sbuf)), evsel__name(evsel));
56e52e85 3347}
f4e47f9f 3348
6e6d1d65 3349struct perf_env *evsel__env(struct evsel *evsel)
69fb09f6 3350{
7b830875 3351 if (evsel && evsel->evlist && evsel->evlist->env)
5449f13c 3352 return evsel->evlist->env;
9db0e363 3353 return &perf_env;
69fb09f6 3354}
650d6220 3355
63503dba 3356static int store_evsel_ids(struct evsel *evsel, struct evlist *evlist)
650d6220 3357{
6f844b1f 3358 int cpu_map_idx, thread;
650d6220 3359
6f844b1f 3360 for (cpu_map_idx = 0; cpu_map_idx < xyarray__max_x(evsel->core.fd); cpu_map_idx++) {
9dfcb759 3361 for (thread = 0; thread < xyarray__max_y(evsel->core.fd);
650d6220 3362 thread++) {
6f844b1f 3363 int fd = FD(evsel, cpu_map_idx, thread);
650d6220 3364
d5a99483 3365 if (perf_evlist__id_add_fd(&evlist->core, &evsel->core,
6f844b1f 3366 cpu_map_idx, thread, fd) < 0)
650d6220
JO
3367 return -1;
3368 }
3369 }
3370
3371 return 0;
3372}
3373
34397753 3374int evsel__store_ids(struct evsel *evsel, struct evlist *evlist)
650d6220 3375{
d400bd3a 3376 struct perf_cpu_map *cpus = evsel->core.cpus;
af663bd0 3377 struct perf_thread_map *threads = evsel->core.threads;
650d6220 3378
44028699 3379 if (perf_evsel__alloc_id(&evsel->core, perf_cpu_map__nr(cpus), threads->nr))
650d6220
JO
3380 return -ENOMEM;
3381
3382 return store_evsel_ids(evsel, evlist);
3383}
034f7ee1
JY
3384
3385void evsel__zero_per_pkg(struct evsel *evsel)
3386{
3387 struct hashmap_entry *cur;
3388 size_t bkt;
3389
3390 if (evsel->per_pkg_mask) {
3391 hashmap__for_each_entry(evsel->per_pkg_mask, cur, bkt)
cdf13c09 3392 zfree(&cur->pkey);
034f7ee1
JY
3393
3394 hashmap__clear(evsel->per_pkg_mask);
3395 }
3396}
660e533e 3397
5d9fb666
IR
3398/**
3399 * evsel__is_hybrid - does the evsel have a known PMU that is hybrid. Note, this
3400 * will be false on hybrid systems for hardware and legacy
3401 * cache events.
3402 */
e5f4afbe 3403bool evsel__is_hybrid(const struct evsel *evsel)
660e533e 3404{
94f9eb95 3405 if (perf_pmus__num_core_pmus() == 1)
5d9fb666
IR
3406 return false;
3407
3408 return evsel->core.is_pmu_core;
660e533e 3409}
fba7c866 3410
c6d616fe 3411struct evsel *evsel__leader(const struct evsel *evsel)
fba7c866
JO
3412{
3413 return container_of(evsel->core.leader, struct evsel, core);
3414}
3415
3416bool evsel__has_leader(struct evsel *evsel, struct evsel *leader)
3417{
3418 return evsel->core.leader == &leader->core;
3419}
3420
3421bool evsel__is_leader(struct evsel *evsel)
3422{
3423 return evsel__has_leader(evsel, evsel);
3424}
3425
3426void evsel__set_leader(struct evsel *evsel, struct evsel *leader)
3427{
3428 evsel->core.leader = &leader->core;
3429}
9aba0ada
IR
3430
3431int evsel__source_count(const struct evsel *evsel)
3432{
3433 struct evsel *pos;
3434 int count = 0;
3435
3436 evlist__for_each_entry(evsel->evlist, pos) {
3437 if (pos->metric_leader == evsel)
3438 count++;
3439 }
3440 return count;
3441}
d98079c0
IR
3442
3443bool __weak arch_evsel__must_be_in_group(const struct evsel *evsel __maybe_unused)
3444{
3445 return false;
3446}
3447
e8f4f794
KL
3448/*
3449 * Remove an event from a given group (leader).
3450 * Some events, e.g., perf metrics Topdown events,
3451 * must always be grouped. Ignore the events.
3452 */
3453void evsel__remove_from_group(struct evsel *evsel, struct evsel *leader)
d98079c0 3454{
e8f4f794
KL
3455 if (!arch_evsel__must_be_in_group(evsel) && evsel != leader) {
3456 evsel__set_leader(evsel, evsel);
3457 evsel->core.nr_members = 0;
3458 leader->core.nr_members--;
3459 }
d98079c0 3460}