perf: Disallow user-space stack dumps for function trace events
[linux-2.6-block.git] / tools / perf / util / evsel.c
CommitLineData
f8a95309
ACM
1/*
2 * Copyright (C) 2011, Red Hat Inc, Arnaldo Carvalho de Melo <acme@redhat.com>
3 *
4 * Parts came from builtin-{top,stat,record}.c, see those files for further
5 * copyright notes.
6 *
7 * Released under the GPL v2. (and only v2, not any later version)
8 */
9
936be503 10#include <byteswap.h>
0f6a3015 11#include <linux/bitops.h>
553873e1 12#include <api/fs/debugfs.h>
4e319027
RR
13#include <traceevent/event-parse.h>
14#include <linux/hw_breakpoint.h>
15#include <linux/perf_event.h>
bec19672 16#include <sys/resource.h>
4e319027 17#include "asm/bug.h"
69aad6f1 18#include "evsel.h"
70082dd9 19#include "evlist.h"
69aad6f1 20#include "util.h"
86bd5e86 21#include "cpumap.h"
fd78260b 22#include "thread_map.h"
12864b31 23#include "target.h"
26d33022 24#include "perf_regs.h"
e3e1a54f 25#include "debug.h"
97978b3e 26#include "trace-event.h"
69aad6f1 27
594ac61a
ACM
28static struct {
29 bool sample_id_all;
30 bool exclude_guest;
5c5e854b 31 bool mmap2;
594ac61a
ACM
32} perf_missing_features;
33
c52b12ed
ACM
34#define FD(e, x, y) (*(int *)xyarray__entry(e->fd, x, y))
35
75562573 36int __perf_evsel__sample_size(u64 sample_type)
c2a70653
ACM
37{
38 u64 mask = sample_type & PERF_SAMPLE_MASK;
39 int size = 0;
40 int i;
41
42 for (i = 0; i < 64; i++) {
43 if (mask & (1ULL << i))
44 size++;
45 }
46
47 size *= sizeof(u64);
48
49 return size;
50}
51
75562573
AH
52/**
53 * __perf_evsel__calc_id_pos - calculate id_pos.
54 * @sample_type: sample type
55 *
56 * This function returns the position of the event id (PERF_SAMPLE_ID or
57 * PERF_SAMPLE_IDENTIFIER) in a sample event i.e. in the array of struct
58 * sample_event.
59 */
60static int __perf_evsel__calc_id_pos(u64 sample_type)
61{
62 int idx = 0;
63
64 if (sample_type & PERF_SAMPLE_IDENTIFIER)
65 return 0;
66
67 if (!(sample_type & PERF_SAMPLE_ID))
68 return -1;
69
70 if (sample_type & PERF_SAMPLE_IP)
71 idx += 1;
72
73 if (sample_type & PERF_SAMPLE_TID)
74 idx += 1;
75
76 if (sample_type & PERF_SAMPLE_TIME)
77 idx += 1;
78
79 if (sample_type & PERF_SAMPLE_ADDR)
80 idx += 1;
81
82 return idx;
83}
84
85/**
86 * __perf_evsel__calc_is_pos - calculate is_pos.
87 * @sample_type: sample type
88 *
89 * This function returns the position (counting backwards) of the event id
90 * (PERF_SAMPLE_ID or PERF_SAMPLE_IDENTIFIER) in a non-sample event i.e. if
91 * sample_id_all is used there is an id sample appended to non-sample events.
92 */
93static int __perf_evsel__calc_is_pos(u64 sample_type)
94{
95 int idx = 1;
96
97 if (sample_type & PERF_SAMPLE_IDENTIFIER)
98 return 1;
99
100 if (!(sample_type & PERF_SAMPLE_ID))
101 return -1;
102
103 if (sample_type & PERF_SAMPLE_CPU)
104 idx += 1;
105
106 if (sample_type & PERF_SAMPLE_STREAM_ID)
107 idx += 1;
108
109 return idx;
110}
111
112void perf_evsel__calc_id_pos(struct perf_evsel *evsel)
113{
114 evsel->id_pos = __perf_evsel__calc_id_pos(evsel->attr.sample_type);
115 evsel->is_pos = __perf_evsel__calc_is_pos(evsel->attr.sample_type);
116}
117
4bf9ce1b 118void hists__init(struct hists *hists)
0e2a5f10
ACM
119{
120 memset(hists, 0, sizeof(*hists));
121 hists->entries_in_array[0] = hists->entries_in_array[1] = RB_ROOT;
122 hists->entries_in = &hists->entries_in_array[0];
123 hists->entries_collapsed = RB_ROOT;
124 hists->entries = RB_ROOT;
125 pthread_mutex_init(&hists->lock, NULL);
126}
127
7be5ebe8
ACM
128void __perf_evsel__set_sample_bit(struct perf_evsel *evsel,
129 enum perf_event_sample_format bit)
130{
131 if (!(evsel->attr.sample_type & bit)) {
132 evsel->attr.sample_type |= bit;
133 evsel->sample_size += sizeof(u64);
75562573 134 perf_evsel__calc_id_pos(evsel);
7be5ebe8
ACM
135 }
136}
137
138void __perf_evsel__reset_sample_bit(struct perf_evsel *evsel,
139 enum perf_event_sample_format bit)
140{
141 if (evsel->attr.sample_type & bit) {
142 evsel->attr.sample_type &= ~bit;
143 evsel->sample_size -= sizeof(u64);
75562573 144 perf_evsel__calc_id_pos(evsel);
7be5ebe8
ACM
145 }
146}
147
75562573
AH
148void perf_evsel__set_sample_id(struct perf_evsel *evsel,
149 bool can_sample_identifier)
7a5a5ca5 150{
75562573
AH
151 if (can_sample_identifier) {
152 perf_evsel__reset_sample_bit(evsel, ID);
153 perf_evsel__set_sample_bit(evsel, IDENTIFIER);
154 } else {
155 perf_evsel__set_sample_bit(evsel, ID);
156 }
7a5a5ca5
ACM
157 evsel->attr.read_format |= PERF_FORMAT_ID;
158}
159
ef1d1af2
ACM
160void perf_evsel__init(struct perf_evsel *evsel,
161 struct perf_event_attr *attr, int idx)
162{
163 evsel->idx = idx;
164 evsel->attr = *attr;
2cfda562 165 evsel->leader = evsel;
410136f5
SE
166 evsel->unit = "";
167 evsel->scale = 1.0;
ef1d1af2 168 INIT_LIST_HEAD(&evsel->node);
1980c2eb 169 hists__init(&evsel->hists);
bde09467 170 evsel->sample_size = __perf_evsel__sample_size(attr->sample_type);
75562573 171 perf_evsel__calc_id_pos(evsel);
ef1d1af2
ACM
172}
173
ef503831 174struct perf_evsel *perf_evsel__new_idx(struct perf_event_attr *attr, int idx)
69aad6f1
ACM
175{
176 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
177
ef1d1af2
ACM
178 if (evsel != NULL)
179 perf_evsel__init(evsel, attr, idx);
69aad6f1
ACM
180
181 return evsel;
182}
183
ef503831 184struct perf_evsel *perf_evsel__newtp_idx(const char *sys, const char *name, int idx)
efd2b924
ACM
185{
186 struct perf_evsel *evsel = zalloc(sizeof(*evsel));
187
188 if (evsel != NULL) {
189 struct perf_event_attr attr = {
0b80f8b3
ACM
190 .type = PERF_TYPE_TRACEPOINT,
191 .sample_type = (PERF_SAMPLE_RAW | PERF_SAMPLE_TIME |
192 PERF_SAMPLE_CPU | PERF_SAMPLE_PERIOD),
efd2b924
ACM
193 };
194
e48ffe2b
ACM
195 if (asprintf(&evsel->name, "%s:%s", sys, name) < 0)
196 goto out_free;
197
97978b3e 198 evsel->tp_format = trace_event__tp_format(sys, name);
efd2b924
ACM
199 if (evsel->tp_format == NULL)
200 goto out_free;
201
0b80f8b3 202 event_attr_init(&attr);
efd2b924 203 attr.config = evsel->tp_format->id;
0b80f8b3 204 attr.sample_period = 1;
efd2b924 205 perf_evsel__init(evsel, &attr, idx);
efd2b924
ACM
206 }
207
208 return evsel;
209
210out_free:
74cf249d 211 zfree(&evsel->name);
efd2b924
ACM
212 free(evsel);
213 return NULL;
214}
215
8ad7013b 216const char *perf_evsel__hw_names[PERF_COUNT_HW_MAX] = {
c410431c
ACM
217 "cycles",
218 "instructions",
219 "cache-references",
220 "cache-misses",
221 "branches",
222 "branch-misses",
223 "bus-cycles",
224 "stalled-cycles-frontend",
225 "stalled-cycles-backend",
226 "ref-cycles",
227};
228
dd4f5223 229static const char *__perf_evsel__hw_name(u64 config)
c410431c
ACM
230{
231 if (config < PERF_COUNT_HW_MAX && perf_evsel__hw_names[config])
232 return perf_evsel__hw_names[config];
233
234 return "unknown-hardware";
235}
236
27f18617 237static int perf_evsel__add_modifiers(struct perf_evsel *evsel, char *bf, size_t size)
c410431c 238{
27f18617 239 int colon = 0, r = 0;
c410431c 240 struct perf_event_attr *attr = &evsel->attr;
c410431c
ACM
241 bool exclude_guest_default = false;
242
243#define MOD_PRINT(context, mod) do { \
244 if (!attr->exclude_##context) { \
27f18617 245 if (!colon) colon = ++r; \
c410431c
ACM
246 r += scnprintf(bf + r, size - r, "%c", mod); \
247 } } while(0)
248
249 if (attr->exclude_kernel || attr->exclude_user || attr->exclude_hv) {
250 MOD_PRINT(kernel, 'k');
251 MOD_PRINT(user, 'u');
252 MOD_PRINT(hv, 'h');
253 exclude_guest_default = true;
254 }
255
256 if (attr->precise_ip) {
257 if (!colon)
27f18617 258 colon = ++r;
c410431c
ACM
259 r += scnprintf(bf + r, size - r, "%.*s", attr->precise_ip, "ppp");
260 exclude_guest_default = true;
261 }
262
263 if (attr->exclude_host || attr->exclude_guest == exclude_guest_default) {
264 MOD_PRINT(host, 'H');
265 MOD_PRINT(guest, 'G');
266 }
267#undef MOD_PRINT
268 if (colon)
27f18617 269 bf[colon - 1] = ':';
c410431c
ACM
270 return r;
271}
272
27f18617
ACM
273static int perf_evsel__hw_name(struct perf_evsel *evsel, char *bf, size_t size)
274{
275 int r = scnprintf(bf, size, "%s", __perf_evsel__hw_name(evsel->attr.config));
276 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
277}
278
8ad7013b 279const char *perf_evsel__sw_names[PERF_COUNT_SW_MAX] = {
335c2f5d
ACM
280 "cpu-clock",
281 "task-clock",
282 "page-faults",
283 "context-switches",
8ad7013b 284 "cpu-migrations",
335c2f5d
ACM
285 "minor-faults",
286 "major-faults",
287 "alignment-faults",
288 "emulation-faults",
d22d1a2a 289 "dummy",
335c2f5d
ACM
290};
291
dd4f5223 292static const char *__perf_evsel__sw_name(u64 config)
335c2f5d
ACM
293{
294 if (config < PERF_COUNT_SW_MAX && perf_evsel__sw_names[config])
295 return perf_evsel__sw_names[config];
296 return "unknown-software";
297}
298
299static int perf_evsel__sw_name(struct perf_evsel *evsel, char *bf, size_t size)
300{
301 int r = scnprintf(bf, size, "%s", __perf_evsel__sw_name(evsel->attr.config));
302 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
303}
304
287e74aa
JO
305static int __perf_evsel__bp_name(char *bf, size_t size, u64 addr, u64 type)
306{
307 int r;
308
309 r = scnprintf(bf, size, "mem:0x%" PRIx64 ":", addr);
310
311 if (type & HW_BREAKPOINT_R)
312 r += scnprintf(bf + r, size - r, "r");
313
314 if (type & HW_BREAKPOINT_W)
315 r += scnprintf(bf + r, size - r, "w");
316
317 if (type & HW_BREAKPOINT_X)
318 r += scnprintf(bf + r, size - r, "x");
319
320 return r;
321}
322
323static int perf_evsel__bp_name(struct perf_evsel *evsel, char *bf, size_t size)
324{
325 struct perf_event_attr *attr = &evsel->attr;
326 int r = __perf_evsel__bp_name(bf, size, attr->bp_addr, attr->bp_type);
327 return r + perf_evsel__add_modifiers(evsel, bf + r, size - r);
328}
329
0b668bc9
ACM
330const char *perf_evsel__hw_cache[PERF_COUNT_HW_CACHE_MAX]
331 [PERF_EVSEL__MAX_ALIASES] = {
332 { "L1-dcache", "l1-d", "l1d", "L1-data", },
333 { "L1-icache", "l1-i", "l1i", "L1-instruction", },
334 { "LLC", "L2", },
335 { "dTLB", "d-tlb", "Data-TLB", },
336 { "iTLB", "i-tlb", "Instruction-TLB", },
337 { "branch", "branches", "bpu", "btb", "bpc", },
338 { "node", },
339};
340
341const char *perf_evsel__hw_cache_op[PERF_COUNT_HW_CACHE_OP_MAX]
342 [PERF_EVSEL__MAX_ALIASES] = {
343 { "load", "loads", "read", },
344 { "store", "stores", "write", },
345 { "prefetch", "prefetches", "speculative-read", "speculative-load", },
346};
347
348const char *perf_evsel__hw_cache_result[PERF_COUNT_HW_CACHE_RESULT_MAX]
349 [PERF_EVSEL__MAX_ALIASES] = {
350 { "refs", "Reference", "ops", "access", },
351 { "misses", "miss", },
352};
353
354#define C(x) PERF_COUNT_HW_CACHE_##x
355#define CACHE_READ (1 << C(OP_READ))
356#define CACHE_WRITE (1 << C(OP_WRITE))
357#define CACHE_PREFETCH (1 << C(OP_PREFETCH))
358#define COP(x) (1 << x)
359
360/*
361 * cache operartion stat
362 * L1I : Read and prefetch only
363 * ITLB and BPU : Read-only
364 */
365static unsigned long perf_evsel__hw_cache_stat[C(MAX)] = {
366 [C(L1D)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
367 [C(L1I)] = (CACHE_READ | CACHE_PREFETCH),
368 [C(LL)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
369 [C(DTLB)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
370 [C(ITLB)] = (CACHE_READ),
371 [C(BPU)] = (CACHE_READ),
372 [C(NODE)] = (CACHE_READ | CACHE_WRITE | CACHE_PREFETCH),
373};
374
375bool perf_evsel__is_cache_op_valid(u8 type, u8 op)
376{
377 if (perf_evsel__hw_cache_stat[type] & COP(op))
378 return true; /* valid */
379 else
380 return false; /* invalid */
381}
382
383int __perf_evsel__hw_cache_type_op_res_name(u8 type, u8 op, u8 result,
384 char *bf, size_t size)
385{
386 if (result) {
387 return scnprintf(bf, size, "%s-%s-%s", perf_evsel__hw_cache[type][0],
388 perf_evsel__hw_cache_op[op][0],
389 perf_evsel__hw_cache_result[result][0]);
390 }
391
392 return scnprintf(bf, size, "%s-%s", perf_evsel__hw_cache[type][0],
393 perf_evsel__hw_cache_op[op][1]);
394}
395
dd4f5223 396static int __perf_evsel__hw_cache_name(u64 config, char *bf, size_t size)
0b668bc9
ACM
397{
398 u8 op, result, type = (config >> 0) & 0xff;
399 const char *err = "unknown-ext-hardware-cache-type";
400
401 if (type > PERF_COUNT_HW_CACHE_MAX)
402 goto out_err;
403
404 op = (config >> 8) & 0xff;
405 err = "unknown-ext-hardware-cache-op";
406 if (op > PERF_COUNT_HW_CACHE_OP_MAX)
407 goto out_err;
408
409 result = (config >> 16) & 0xff;
410 err = "unknown-ext-hardware-cache-result";
411 if (result > PERF_COUNT_HW_CACHE_RESULT_MAX)
412 goto out_err;
413
414 err = "invalid-cache";
415 if (!perf_evsel__is_cache_op_valid(type, op))
416 goto out_err;
417
418 return __perf_evsel__hw_cache_type_op_res_name(type, op, result, bf, size);
419out_err:
420 return scnprintf(bf, size, "%s", err);
421}
422
423static int perf_evsel__hw_cache_name(struct perf_evsel *evsel, char *bf, size_t size)
424{
425 int ret = __perf_evsel__hw_cache_name(evsel->attr.config, bf, size);
426 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
427}
428
6eef3d9c
ACM
429static int perf_evsel__raw_name(struct perf_evsel *evsel, char *bf, size_t size)
430{
431 int ret = scnprintf(bf, size, "raw 0x%" PRIx64, evsel->attr.config);
432 return ret + perf_evsel__add_modifiers(evsel, bf + ret, size - ret);
433}
434
7289f83c 435const char *perf_evsel__name(struct perf_evsel *evsel)
a4460836 436{
7289f83c 437 char bf[128];
a4460836 438
7289f83c
ACM
439 if (evsel->name)
440 return evsel->name;
c410431c
ACM
441
442 switch (evsel->attr.type) {
443 case PERF_TYPE_RAW:
6eef3d9c 444 perf_evsel__raw_name(evsel, bf, sizeof(bf));
c410431c
ACM
445 break;
446
447 case PERF_TYPE_HARDWARE:
7289f83c 448 perf_evsel__hw_name(evsel, bf, sizeof(bf));
c410431c 449 break;
0b668bc9
ACM
450
451 case PERF_TYPE_HW_CACHE:
7289f83c 452 perf_evsel__hw_cache_name(evsel, bf, sizeof(bf));
0b668bc9
ACM
453 break;
454
335c2f5d 455 case PERF_TYPE_SOFTWARE:
7289f83c 456 perf_evsel__sw_name(evsel, bf, sizeof(bf));
335c2f5d
ACM
457 break;
458
a4460836 459 case PERF_TYPE_TRACEPOINT:
7289f83c 460 scnprintf(bf, sizeof(bf), "%s", "unknown tracepoint");
a4460836
ACM
461 break;
462
287e74aa
JO
463 case PERF_TYPE_BREAKPOINT:
464 perf_evsel__bp_name(evsel, bf, sizeof(bf));
465 break;
466
c410431c 467 default:
ca1b1457
RR
468 scnprintf(bf, sizeof(bf), "unknown attr type: %d",
469 evsel->attr.type);
a4460836 470 break;
c410431c
ACM
471 }
472
7289f83c
ACM
473 evsel->name = strdup(bf);
474
475 return evsel->name ?: "unknown";
c410431c
ACM
476}
477
717e263f
NK
478const char *perf_evsel__group_name(struct perf_evsel *evsel)
479{
480 return evsel->group_name ?: "anon group";
481}
482
483int perf_evsel__group_desc(struct perf_evsel *evsel, char *buf, size_t size)
484{
485 int ret;
486 struct perf_evsel *pos;
487 const char *group_name = perf_evsel__group_name(evsel);
488
489 ret = scnprintf(buf, size, "%s", group_name);
490
491 ret += scnprintf(buf + ret, size - ret, " { %s",
492 perf_evsel__name(evsel));
493
494 for_each_group_member(pos, evsel)
495 ret += scnprintf(buf + ret, size - ret, ", %s",
496 perf_evsel__name(pos));
497
498 ret += scnprintf(buf + ret, size - ret, " }");
499
500 return ret;
501}
502
774cb499
JO
503/*
504 * The enable_on_exec/disabled value strategy:
505 *
506 * 1) For any type of traced program:
507 * - all independent events and group leaders are disabled
508 * - all group members are enabled
509 *
510 * Group members are ruled by group leaders. They need to
511 * be enabled, because the group scheduling relies on that.
512 *
513 * 2) For traced programs executed by perf:
514 * - all independent events and group leaders have
515 * enable_on_exec set
516 * - we don't specifically enable or disable any event during
517 * the record command
518 *
519 * Independent events and group leaders are initially disabled
520 * and get enabled by exec. Group members are ruled by group
521 * leaders as stated in 1).
522 *
523 * 3) For traced programs attached by perf (pid/tid):
524 * - we specifically enable or disable all events during
525 * the record command
526 *
527 * When attaching events to already running traced we
528 * enable/disable events specifically, as there's no
529 * initial traced exec call.
530 */
b4006796 531void perf_evsel__config(struct perf_evsel *evsel, struct record_opts *opts)
0f82ebc4 532{
3c176311 533 struct perf_evsel *leader = evsel->leader;
0f82ebc4
ACM
534 struct perf_event_attr *attr = &evsel->attr;
535 int track = !evsel->idx; /* only the first counter needs these */
3aa5939d 536 bool per_cpu = opts->target.default_per_cpu && !opts->target.per_thread;
0f82ebc4 537
594ac61a 538 attr->sample_id_all = perf_missing_features.sample_id_all ? 0 : 1;
0f82ebc4 539 attr->inherit = !opts->no_inherit;
0f82ebc4 540
7be5ebe8
ACM
541 perf_evsel__set_sample_bit(evsel, IP);
542 perf_evsel__set_sample_bit(evsel, TID);
0f82ebc4 543
3c176311
JO
544 if (evsel->sample_read) {
545 perf_evsel__set_sample_bit(evsel, READ);
546
547 /*
548 * We need ID even in case of single event, because
549 * PERF_SAMPLE_READ process ID specific data.
550 */
75562573 551 perf_evsel__set_sample_id(evsel, false);
3c176311
JO
552
553 /*
554 * Apply group format only if we belong to group
555 * with more than one members.
556 */
557 if (leader->nr_members > 1) {
558 attr->read_format |= PERF_FORMAT_GROUP;
559 attr->inherit = 0;
560 }
561 }
562
0f82ebc4
ACM
563 /*
564 * We default some events to a 1 default interval. But keep
565 * it a weak assumption overridable by the user.
566 */
567 if (!attr->sample_period || (opts->user_freq != UINT_MAX &&
568 opts->user_interval != ULLONG_MAX)) {
569 if (opts->freq) {
7be5ebe8 570 perf_evsel__set_sample_bit(evsel, PERIOD);
0f82ebc4
ACM
571 attr->freq = 1;
572 attr->sample_freq = opts->freq;
573 } else {
574 attr->sample_period = opts->default_interval;
575 }
576 }
577
3c176311
JO
578 /*
579 * Disable sampling for all group members other
580 * than leader in case leader 'leads' the sampling.
581 */
582 if ((leader != evsel) && leader->sample_read) {
583 attr->sample_freq = 0;
584 attr->sample_period = 0;
585 }
586
0f82ebc4
ACM
587 if (opts->no_samples)
588 attr->sample_freq = 0;
589
590 if (opts->inherit_stat)
591 attr->inherit_stat = 1;
592
593 if (opts->sample_address) {
7be5ebe8 594 perf_evsel__set_sample_bit(evsel, ADDR);
0f82ebc4
ACM
595 attr->mmap_data = track;
596 }
597
eb853e80 598 if (opts->call_graph_enabled) {
7be5ebe8 599 perf_evsel__set_sample_bit(evsel, CALLCHAIN);
0f82ebc4 600
26d33022 601 if (opts->call_graph == CALLCHAIN_DWARF) {
7be5ebe8
ACM
602 perf_evsel__set_sample_bit(evsel, REGS_USER);
603 perf_evsel__set_sample_bit(evsel, STACK_USER);
26d33022
JO
604 attr->sample_regs_user = PERF_REGS_MASK;
605 attr->sample_stack_user = opts->stack_dump_size;
606 attr->exclude_callchain_user = 1;
607 }
608 }
609
3aa5939d 610 if (target__has_cpu(&opts->target))
7be5ebe8 611 perf_evsel__set_sample_bit(evsel, CPU);
0f82ebc4 612
3e76ac78 613 if (opts->period)
7be5ebe8 614 perf_evsel__set_sample_bit(evsel, PERIOD);
3e76ac78 615
594ac61a 616 if (!perf_missing_features.sample_id_all &&
d67356e7 617 (opts->sample_time || !opts->no_inherit ||
3aa5939d 618 target__has_cpu(&opts->target) || per_cpu))
7be5ebe8 619 perf_evsel__set_sample_bit(evsel, TIME);
0f82ebc4
ACM
620
621 if (opts->raw_samples) {
7be5ebe8
ACM
622 perf_evsel__set_sample_bit(evsel, TIME);
623 perf_evsel__set_sample_bit(evsel, RAW);
624 perf_evsel__set_sample_bit(evsel, CPU);
0f82ebc4
ACM
625 }
626
ccf49bfc 627 if (opts->sample_address)
1e7ed5ec 628 perf_evsel__set_sample_bit(evsel, DATA_SRC);
ccf49bfc 629
509051ea 630 if (opts->no_buffering) {
0f82ebc4
ACM
631 attr->watermark = 0;
632 attr->wakeup_events = 1;
633 }
bdfebd84 634 if (opts->branch_stack) {
7be5ebe8 635 perf_evsel__set_sample_bit(evsel, BRANCH_STACK);
bdfebd84
RAV
636 attr->branch_sample_type = opts->branch_stack;
637 }
0f82ebc4 638
05484298 639 if (opts->sample_weight)
1e7ed5ec 640 perf_evsel__set_sample_bit(evsel, WEIGHT);
05484298 641
5c5e854b 642 attr->mmap = track;
5c5e854b 643 attr->comm = track;
0f82ebc4 644
475eeab9 645 if (opts->sample_transaction)
1e7ed5ec 646 perf_evsel__set_sample_bit(evsel, TRANSACTION);
475eeab9 647
774cb499
JO
648 /*
649 * XXX see the function comment above
650 *
651 * Disabling only independent events or group leaders,
652 * keeping group members enabled.
653 */
823254ed 654 if (perf_evsel__is_group_leader(evsel))
774cb499
JO
655 attr->disabled = 1;
656
657 /*
658 * Setting enable_on_exec for independent events and
659 * group leaders for traced executed by perf.
660 */
6619a53e
AK
661 if (target__none(&opts->target) && perf_evsel__is_group_leader(evsel) &&
662 !opts->initial_delay)
0f82ebc4 663 attr->enable_on_exec = 1;
0f82ebc4
ACM
664}
665
69aad6f1
ACM
666int perf_evsel__alloc_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
667{
4af4c955 668 int cpu, thread;
69aad6f1 669 evsel->fd = xyarray__new(ncpus, nthreads, sizeof(int));
4af4c955
DA
670
671 if (evsel->fd) {
672 for (cpu = 0; cpu < ncpus; cpu++) {
673 for (thread = 0; thread < nthreads; thread++) {
674 FD(evsel, cpu, thread) = -1;
675 }
676 }
677 }
678
69aad6f1
ACM
679 return evsel->fd != NULL ? 0 : -ENOMEM;
680}
681
e2407bef
AK
682static int perf_evsel__run_ioctl(struct perf_evsel *evsel, int ncpus, int nthreads,
683 int ioc, void *arg)
745cefc5
ACM
684{
685 int cpu, thread;
686
687 for (cpu = 0; cpu < ncpus; cpu++) {
688 for (thread = 0; thread < nthreads; thread++) {
689 int fd = FD(evsel, cpu, thread),
e2407bef 690 err = ioctl(fd, ioc, arg);
745cefc5
ACM
691
692 if (err)
693 return err;
694 }
695 }
696
697 return 0;
698}
699
e2407bef
AK
700int perf_evsel__set_filter(struct perf_evsel *evsel, int ncpus, int nthreads,
701 const char *filter)
702{
703 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
704 PERF_EVENT_IOC_SET_FILTER,
705 (void *)filter);
706}
707
708int perf_evsel__enable(struct perf_evsel *evsel, int ncpus, int nthreads)
709{
710 return perf_evsel__run_ioctl(evsel, ncpus, nthreads,
711 PERF_EVENT_IOC_ENABLE,
712 0);
713}
714
70db7533
ACM
715int perf_evsel__alloc_id(struct perf_evsel *evsel, int ncpus, int nthreads)
716{
a91e5431
ACM
717 evsel->sample_id = xyarray__new(ncpus, nthreads, sizeof(struct perf_sample_id));
718 if (evsel->sample_id == NULL)
719 return -ENOMEM;
720
721 evsel->id = zalloc(ncpus * nthreads * sizeof(u64));
722 if (evsel->id == NULL) {
723 xyarray__delete(evsel->sample_id);
724 evsel->sample_id = NULL;
725 return -ENOMEM;
726 }
727
728 return 0;
70db7533
ACM
729}
730
a7e191c3
FD
731void perf_evsel__reset_counts(struct perf_evsel *evsel, int ncpus)
732{
733 memset(evsel->counts, 0, (sizeof(*evsel->counts) +
734 (ncpus * sizeof(struct perf_counts_values))));
735}
736
c52b12ed
ACM
737int perf_evsel__alloc_counts(struct perf_evsel *evsel, int ncpus)
738{
739 evsel->counts = zalloc((sizeof(*evsel->counts) +
740 (ncpus * sizeof(struct perf_counts_values))));
741 return evsel->counts != NULL ? 0 : -ENOMEM;
742}
743
69aad6f1
ACM
744void perf_evsel__free_fd(struct perf_evsel *evsel)
745{
746 xyarray__delete(evsel->fd);
747 evsel->fd = NULL;
748}
749
70db7533
ACM
750void perf_evsel__free_id(struct perf_evsel *evsel)
751{
a91e5431
ACM
752 xyarray__delete(evsel->sample_id);
753 evsel->sample_id = NULL;
04662523 754 zfree(&evsel->id);
70db7533
ACM
755}
756
c52b12ed
ACM
757void perf_evsel__close_fd(struct perf_evsel *evsel, int ncpus, int nthreads)
758{
759 int cpu, thread;
760
761 for (cpu = 0; cpu < ncpus; cpu++)
762 for (thread = 0; thread < nthreads; ++thread) {
763 close(FD(evsel, cpu, thread));
764 FD(evsel, cpu, thread) = -1;
765 }
766}
767
43f8e76e
NK
768void perf_evsel__free_counts(struct perf_evsel *evsel)
769{
74cf249d 770 zfree(&evsel->counts);
43f8e76e
NK
771}
772
ef1d1af2 773void perf_evsel__exit(struct perf_evsel *evsel)
69aad6f1
ACM
774{
775 assert(list_empty(&evsel->node));
736b05a0
NK
776 perf_evsel__free_fd(evsel);
777 perf_evsel__free_id(evsel);
ef1d1af2
ACM
778}
779
780void perf_evsel__delete(struct perf_evsel *evsel)
781{
782 perf_evsel__exit(evsel);
023695d9 783 close_cgroup(evsel->cgrp);
74cf249d 784 zfree(&evsel->group_name);
e48ffe2b 785 if (evsel->tp_format)
efd2b924 786 pevent_free_format(evsel->tp_format);
74cf249d 787 zfree(&evsel->name);
69aad6f1
ACM
788 free(evsel);
789}
c52b12ed 790
c7a79c47
SE
791static inline void compute_deltas(struct perf_evsel *evsel,
792 int cpu,
793 struct perf_counts_values *count)
794{
795 struct perf_counts_values tmp;
796
797 if (!evsel->prev_raw_counts)
798 return;
799
800 if (cpu == -1) {
801 tmp = evsel->prev_raw_counts->aggr;
802 evsel->prev_raw_counts->aggr = *count;
803 } else {
804 tmp = evsel->prev_raw_counts->cpu[cpu];
805 evsel->prev_raw_counts->cpu[cpu] = *count;
806 }
807
808 count->val = count->val - tmp.val;
809 count->ena = count->ena - tmp.ena;
810 count->run = count->run - tmp.run;
811}
812
c52b12ed
ACM
813int __perf_evsel__read_on_cpu(struct perf_evsel *evsel,
814 int cpu, int thread, bool scale)
815{
816 struct perf_counts_values count;
817 size_t nv = scale ? 3 : 1;
818
819 if (FD(evsel, cpu, thread) < 0)
820 return -EINVAL;
821
4eed11d5
ACM
822 if (evsel->counts == NULL && perf_evsel__alloc_counts(evsel, cpu + 1) < 0)
823 return -ENOMEM;
824
c52b12ed
ACM
825 if (readn(FD(evsel, cpu, thread), &count, nv * sizeof(u64)) < 0)
826 return -errno;
827
c7a79c47
SE
828 compute_deltas(evsel, cpu, &count);
829
c52b12ed
ACM
830 if (scale) {
831 if (count.run == 0)
832 count.val = 0;
833 else if (count.run < count.ena)
834 count.val = (u64)((double)count.val * count.ena / count.run + 0.5);
835 } else
836 count.ena = count.run = 0;
837
838 evsel->counts->cpu[cpu] = count;
839 return 0;
840}
841
842int __perf_evsel__read(struct perf_evsel *evsel,
843 int ncpus, int nthreads, bool scale)
844{
845 size_t nv = scale ? 3 : 1;
846 int cpu, thread;
847 struct perf_counts_values *aggr = &evsel->counts->aggr, count;
848
52bcd994 849 aggr->val = aggr->ena = aggr->run = 0;
c52b12ed
ACM
850
851 for (cpu = 0; cpu < ncpus; cpu++) {
852 for (thread = 0; thread < nthreads; thread++) {
853 if (FD(evsel, cpu, thread) < 0)
854 continue;
855
856 if (readn(FD(evsel, cpu, thread),
857 &count, nv * sizeof(u64)) < 0)
858 return -errno;
859
860 aggr->val += count.val;
861 if (scale) {
862 aggr->ena += count.ena;
863 aggr->run += count.run;
864 }
865 }
866 }
867
c7a79c47
SE
868 compute_deltas(evsel, -1, aggr);
869
c52b12ed
ACM
870 evsel->counts->scaled = 0;
871 if (scale) {
872 if (aggr->run == 0) {
873 evsel->counts->scaled = -1;
874 aggr->val = 0;
875 return 0;
876 }
877
878 if (aggr->run < aggr->ena) {
879 evsel->counts->scaled = 1;
880 aggr->val = (u64)((double)aggr->val * aggr->ena / aggr->run + 0.5);
881 }
882 } else
883 aggr->ena = aggr->run = 0;
884
885 return 0;
886}
48290609 887
6a4bb04c
JO
888static int get_group_fd(struct perf_evsel *evsel, int cpu, int thread)
889{
890 struct perf_evsel *leader = evsel->leader;
891 int fd;
892
823254ed 893 if (perf_evsel__is_group_leader(evsel))
6a4bb04c
JO
894 return -1;
895
896 /*
897 * Leader must be already processed/open,
898 * if not it's a bug.
899 */
900 BUG_ON(!leader->fd);
901
902 fd = FD(leader, cpu, thread);
903 BUG_ON(fd == -1);
904
905 return fd;
906}
907
e3e1a54f
AH
908#define __PRINT_ATTR(fmt, cast, field) \
909 fprintf(fp, " %-19s "fmt"\n", #field, cast attr->field)
910
911#define PRINT_ATTR_U32(field) __PRINT_ATTR("%u" , , field)
912#define PRINT_ATTR_X32(field) __PRINT_ATTR("%#x", , field)
913#define PRINT_ATTR_U64(field) __PRINT_ATTR("%" PRIu64, (uint64_t), field)
914#define PRINT_ATTR_X64(field) __PRINT_ATTR("%#"PRIx64, (uint64_t), field)
915
916#define PRINT_ATTR2N(name1, field1, name2, field2) \
917 fprintf(fp, " %-19s %u %-19s %u\n", \
918 name1, attr->field1, name2, attr->field2)
919
920#define PRINT_ATTR2(field1, field2) \
921 PRINT_ATTR2N(#field1, field1, #field2, field2)
922
923static size_t perf_event_attr__fprintf(struct perf_event_attr *attr, FILE *fp)
924{
925 size_t ret = 0;
926
927 ret += fprintf(fp, "%.60s\n", graph_dotted_line);
928 ret += fprintf(fp, "perf_event_attr:\n");
929
930 ret += PRINT_ATTR_U32(type);
931 ret += PRINT_ATTR_U32(size);
932 ret += PRINT_ATTR_X64(config);
933 ret += PRINT_ATTR_U64(sample_period);
934 ret += PRINT_ATTR_U64(sample_freq);
935 ret += PRINT_ATTR_X64(sample_type);
936 ret += PRINT_ATTR_X64(read_format);
937
938 ret += PRINT_ATTR2(disabled, inherit);
939 ret += PRINT_ATTR2(pinned, exclusive);
940 ret += PRINT_ATTR2(exclude_user, exclude_kernel);
941 ret += PRINT_ATTR2(exclude_hv, exclude_idle);
942 ret += PRINT_ATTR2(mmap, comm);
943 ret += PRINT_ATTR2(freq, inherit_stat);
944 ret += PRINT_ATTR2(enable_on_exec, task);
945 ret += PRINT_ATTR2(watermark, precise_ip);
946 ret += PRINT_ATTR2(mmap_data, sample_id_all);
947 ret += PRINT_ATTR2(exclude_host, exclude_guest);
948 ret += PRINT_ATTR2N("excl.callchain_kern", exclude_callchain_kernel,
949 "excl.callchain_user", exclude_callchain_user);
40d54ec2 950 ret += PRINT_ATTR_U32(mmap2);
e3e1a54f
AH
951
952 ret += PRINT_ATTR_U32(wakeup_events);
953 ret += PRINT_ATTR_U32(wakeup_watermark);
954 ret += PRINT_ATTR_X32(bp_type);
955 ret += PRINT_ATTR_X64(bp_addr);
956 ret += PRINT_ATTR_X64(config1);
957 ret += PRINT_ATTR_U64(bp_len);
958 ret += PRINT_ATTR_X64(config2);
959 ret += PRINT_ATTR_X64(branch_sample_type);
960 ret += PRINT_ATTR_X64(sample_regs_user);
961 ret += PRINT_ATTR_U32(sample_stack_user);
962
963 ret += fprintf(fp, "%.60s\n", graph_dotted_line);
964
965 return ret;
966}
967
0252208e 968static int __perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 969 struct thread_map *threads)
48290609 970{
0252208e 971 int cpu, thread;
023695d9 972 unsigned long flags = 0;
727ab04e 973 int pid = -1, err;
bec19672 974 enum { NO_CHANGE, SET_TO_MAX, INCREASED_MAX } set_rlimit = NO_CHANGE;
48290609 975
0252208e
ACM
976 if (evsel->fd == NULL &&
977 perf_evsel__alloc_fd(evsel, cpus->nr, threads->nr) < 0)
727ab04e 978 return -ENOMEM;
4eed11d5 979
023695d9
SE
980 if (evsel->cgrp) {
981 flags = PERF_FLAG_PID_CGROUP;
982 pid = evsel->cgrp->fd;
983 }
984
594ac61a 985fallback_missing_features:
5c5e854b
SE
986 if (perf_missing_features.mmap2)
987 evsel->attr.mmap2 = 0;
594ac61a
ACM
988 if (perf_missing_features.exclude_guest)
989 evsel->attr.exclude_guest = evsel->attr.exclude_host = 0;
990retry_sample_id:
991 if (perf_missing_features.sample_id_all)
992 evsel->attr.sample_id_all = 0;
993
e3e1a54f
AH
994 if (verbose >= 2)
995 perf_event_attr__fprintf(&evsel->attr, stderr);
996
86bd5e86 997 for (cpu = 0; cpu < cpus->nr; cpu++) {
9d04f178 998
0252208e 999 for (thread = 0; thread < threads->nr; thread++) {
6a4bb04c 1000 int group_fd;
023695d9
SE
1001
1002 if (!evsel->cgrp)
1003 pid = threads->map[thread];
1004
6a4bb04c 1005 group_fd = get_group_fd(evsel, cpu, thread);
bec19672 1006retry_open:
e3e1a54f
AH
1007 pr_debug2("perf_event_open: pid %d cpu %d group_fd %d flags %#lx\n",
1008 pid, cpus->map[cpu], group_fd, flags);
1009
0252208e 1010 FD(evsel, cpu, thread) = sys_perf_event_open(&evsel->attr,
023695d9 1011 pid,
f08199d3 1012 cpus->map[cpu],
023695d9 1013 group_fd, flags);
727ab04e
ACM
1014 if (FD(evsel, cpu, thread) < 0) {
1015 err = -errno;
f852fd62
AH
1016 pr_debug2("perf_event_open failed, error %d\n",
1017 err);
594ac61a 1018 goto try_fallback;
727ab04e 1019 }
bec19672 1020 set_rlimit = NO_CHANGE;
0252208e 1021 }
48290609
ACM
1022 }
1023
1024 return 0;
1025
594ac61a 1026try_fallback:
bec19672
AK
1027 /*
1028 * perf stat needs between 5 and 22 fds per CPU. When we run out
1029 * of them try to increase the limits.
1030 */
1031 if (err == -EMFILE && set_rlimit < INCREASED_MAX) {
1032 struct rlimit l;
1033 int old_errno = errno;
1034
1035 if (getrlimit(RLIMIT_NOFILE, &l) == 0) {
1036 if (set_rlimit == NO_CHANGE)
1037 l.rlim_cur = l.rlim_max;
1038 else {
1039 l.rlim_cur = l.rlim_max + 1000;
1040 l.rlim_max = l.rlim_cur;
1041 }
1042 if (setrlimit(RLIMIT_NOFILE, &l) == 0) {
1043 set_rlimit++;
1044 errno = old_errno;
1045 goto retry_open;
1046 }
1047 }
1048 errno = old_errno;
1049 }
1050
594ac61a
ACM
1051 if (err != -EINVAL || cpu > 0 || thread > 0)
1052 goto out_close;
1053
5c5e854b
SE
1054 if (!perf_missing_features.mmap2 && evsel->attr.mmap2) {
1055 perf_missing_features.mmap2 = true;
1056 goto fallback_missing_features;
1057 } else if (!perf_missing_features.exclude_guest &&
1058 (evsel->attr.exclude_guest || evsel->attr.exclude_host)) {
594ac61a
ACM
1059 perf_missing_features.exclude_guest = true;
1060 goto fallback_missing_features;
1061 } else if (!perf_missing_features.sample_id_all) {
1062 perf_missing_features.sample_id_all = true;
1063 goto retry_sample_id;
1064 }
1065
48290609 1066out_close:
0252208e
ACM
1067 do {
1068 while (--thread >= 0) {
1069 close(FD(evsel, cpu, thread));
1070 FD(evsel, cpu, thread) = -1;
1071 }
1072 thread = threads->nr;
1073 } while (--cpu >= 0);
727ab04e
ACM
1074 return err;
1075}
1076
1077void perf_evsel__close(struct perf_evsel *evsel, int ncpus, int nthreads)
1078{
1079 if (evsel->fd == NULL)
1080 return;
1081
1082 perf_evsel__close_fd(evsel, ncpus, nthreads);
1083 perf_evsel__free_fd(evsel);
48290609
ACM
1084}
1085
0252208e
ACM
1086static struct {
1087 struct cpu_map map;
1088 int cpus[1];
1089} empty_cpu_map = {
1090 .map.nr = 1,
1091 .cpus = { -1, },
1092};
1093
1094static struct {
1095 struct thread_map map;
1096 int threads[1];
1097} empty_thread_map = {
1098 .map.nr = 1,
1099 .threads = { -1, },
1100};
1101
f08199d3 1102int perf_evsel__open(struct perf_evsel *evsel, struct cpu_map *cpus,
6a4bb04c 1103 struct thread_map *threads)
48290609 1104{
0252208e
ACM
1105 if (cpus == NULL) {
1106 /* Work around old compiler warnings about strict aliasing */
1107 cpus = &empty_cpu_map.map;
48290609
ACM
1108 }
1109
0252208e
ACM
1110 if (threads == NULL)
1111 threads = &empty_thread_map.map;
48290609 1112
6a4bb04c 1113 return __perf_evsel__open(evsel, cpus, threads);
48290609
ACM
1114}
1115
f08199d3 1116int perf_evsel__open_per_cpu(struct perf_evsel *evsel,
6a4bb04c 1117 struct cpu_map *cpus)
48290609 1118{
6a4bb04c 1119 return __perf_evsel__open(evsel, cpus, &empty_thread_map.map);
0252208e 1120}
48290609 1121
f08199d3 1122int perf_evsel__open_per_thread(struct perf_evsel *evsel,
6a4bb04c 1123 struct thread_map *threads)
0252208e 1124{
6a4bb04c 1125 return __perf_evsel__open(evsel, &empty_cpu_map.map, threads);
48290609 1126}
70082dd9 1127
0807d2d8
ACM
1128static int perf_evsel__parse_id_sample(const struct perf_evsel *evsel,
1129 const union perf_event *event,
1130 struct perf_sample *sample)
d0dd74e8 1131{
0807d2d8 1132 u64 type = evsel->attr.sample_type;
d0dd74e8 1133 const u64 *array = event->sample.array;
0807d2d8 1134 bool swapped = evsel->needs_swap;
37073f9e 1135 union u64_swap u;
d0dd74e8
ACM
1136
1137 array += ((event->header.size -
1138 sizeof(event->header)) / sizeof(u64)) - 1;
1139
75562573
AH
1140 if (type & PERF_SAMPLE_IDENTIFIER) {
1141 sample->id = *array;
1142 array--;
1143 }
1144
d0dd74e8 1145 if (type & PERF_SAMPLE_CPU) {
37073f9e
JO
1146 u.val64 = *array;
1147 if (swapped) {
1148 /* undo swap of u64, then swap on individual u32s */
1149 u.val64 = bswap_64(u.val64);
1150 u.val32[0] = bswap_32(u.val32[0]);
1151 }
1152
1153 sample->cpu = u.val32[0];
d0dd74e8
ACM
1154 array--;
1155 }
1156
1157 if (type & PERF_SAMPLE_STREAM_ID) {
1158 sample->stream_id = *array;
1159 array--;
1160 }
1161
1162 if (type & PERF_SAMPLE_ID) {
1163 sample->id = *array;
1164 array--;
1165 }
1166
1167 if (type & PERF_SAMPLE_TIME) {
1168 sample->time = *array;
1169 array--;
1170 }
1171
1172 if (type & PERF_SAMPLE_TID) {
37073f9e
JO
1173 u.val64 = *array;
1174 if (swapped) {
1175 /* undo swap of u64, then swap on individual u32s */
1176 u.val64 = bswap_64(u.val64);
1177 u.val32[0] = bswap_32(u.val32[0]);
1178 u.val32[1] = bswap_32(u.val32[1]);
1179 }
1180
1181 sample->pid = u.val32[0];
1182 sample->tid = u.val32[1];
dd44bc6b 1183 array--;
d0dd74e8
ACM
1184 }
1185
1186 return 0;
1187}
1188
03b6ea9b
AH
1189static inline bool overflow(const void *endp, u16 max_size, const void *offset,
1190 u64 size)
98e1da90 1191{
03b6ea9b
AH
1192 return size > max_size || offset + size > endp;
1193}
98e1da90 1194
03b6ea9b
AH
1195#define OVERFLOW_CHECK(offset, size, max_size) \
1196 do { \
1197 if (overflow(endp, (max_size), (offset), (size))) \
1198 return -EFAULT; \
1199 } while (0)
98e1da90 1200
03b6ea9b
AH
1201#define OVERFLOW_CHECK_u64(offset) \
1202 OVERFLOW_CHECK(offset, sizeof(u64), sizeof(u64))
98e1da90 1203
a3f698fe 1204int perf_evsel__parse_sample(struct perf_evsel *evsel, union perf_event *event,
0807d2d8 1205 struct perf_sample *data)
d0dd74e8 1206{
a3f698fe 1207 u64 type = evsel->attr.sample_type;
0807d2d8 1208 bool swapped = evsel->needs_swap;
d0dd74e8 1209 const u64 *array;
03b6ea9b
AH
1210 u16 max_size = event->header.size;
1211 const void *endp = (void *)event + max_size;
1212 u64 sz;
d0dd74e8 1213
936be503
DA
1214 /*
1215 * used for cross-endian analysis. See git commit 65014ab3
1216 * for why this goofiness is needed.
1217 */
6a11f92e 1218 union u64_swap u;
936be503 1219
f3bda2c9 1220 memset(data, 0, sizeof(*data));
d0dd74e8
ACM
1221 data->cpu = data->pid = data->tid = -1;
1222 data->stream_id = data->id = data->time = -1ULL;
bc529086 1223 data->period = evsel->attr.sample_period;
05484298 1224 data->weight = 0;
d0dd74e8
ACM
1225
1226 if (event->header.type != PERF_RECORD_SAMPLE) {
a3f698fe 1227 if (!evsel->attr.sample_id_all)
d0dd74e8 1228 return 0;
0807d2d8 1229 return perf_evsel__parse_id_sample(evsel, event, data);
d0dd74e8
ACM
1230 }
1231
1232 array = event->sample.array;
1233
03b6ea9b
AH
1234 /*
1235 * The evsel's sample_size is based on PERF_SAMPLE_MASK which includes
1236 * up to PERF_SAMPLE_PERIOD. After that overflow() must be used to
1237 * check the format does not go past the end of the event.
1238 */
a3f698fe 1239 if (evsel->sample_size + sizeof(event->header) > event->header.size)
a2854124
FW
1240 return -EFAULT;
1241
75562573
AH
1242 data->id = -1ULL;
1243 if (type & PERF_SAMPLE_IDENTIFIER) {
1244 data->id = *array;
1245 array++;
1246 }
1247
d0dd74e8 1248 if (type & PERF_SAMPLE_IP) {
ef89325f 1249 data->ip = *array;
d0dd74e8
ACM
1250 array++;
1251 }
1252
1253 if (type & PERF_SAMPLE_TID) {
936be503
DA
1254 u.val64 = *array;
1255 if (swapped) {
1256 /* undo swap of u64, then swap on individual u32s */
1257 u.val64 = bswap_64(u.val64);
1258 u.val32[0] = bswap_32(u.val32[0]);
1259 u.val32[1] = bswap_32(u.val32[1]);
1260 }
1261
1262 data->pid = u.val32[0];
1263 data->tid = u.val32[1];
d0dd74e8
ACM
1264 array++;
1265 }
1266
1267 if (type & PERF_SAMPLE_TIME) {
1268 data->time = *array;
1269 array++;
1270 }
1271
7cec0922 1272 data->addr = 0;
d0dd74e8
ACM
1273 if (type & PERF_SAMPLE_ADDR) {
1274 data->addr = *array;
1275 array++;
1276 }
1277
d0dd74e8
ACM
1278 if (type & PERF_SAMPLE_ID) {
1279 data->id = *array;
1280 array++;
1281 }
1282
1283 if (type & PERF_SAMPLE_STREAM_ID) {
1284 data->stream_id = *array;
1285 array++;
1286 }
1287
1288 if (type & PERF_SAMPLE_CPU) {
936be503
DA
1289
1290 u.val64 = *array;
1291 if (swapped) {
1292 /* undo swap of u64, then swap on individual u32s */
1293 u.val64 = bswap_64(u.val64);
1294 u.val32[0] = bswap_32(u.val32[0]);
1295 }
1296
1297 data->cpu = u.val32[0];
d0dd74e8
ACM
1298 array++;
1299 }
1300
1301 if (type & PERF_SAMPLE_PERIOD) {
1302 data->period = *array;
1303 array++;
1304 }
1305
1306 if (type & PERF_SAMPLE_READ) {
9ede473c
JO
1307 u64 read_format = evsel->attr.read_format;
1308
03b6ea9b 1309 OVERFLOW_CHECK_u64(array);
9ede473c
JO
1310 if (read_format & PERF_FORMAT_GROUP)
1311 data->read.group.nr = *array;
1312 else
1313 data->read.one.value = *array;
1314
1315 array++;
1316
1317 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
03b6ea9b 1318 OVERFLOW_CHECK_u64(array);
9ede473c
JO
1319 data->read.time_enabled = *array;
1320 array++;
1321 }
1322
1323 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
03b6ea9b 1324 OVERFLOW_CHECK_u64(array);
9ede473c
JO
1325 data->read.time_running = *array;
1326 array++;
1327 }
1328
1329 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1330 if (read_format & PERF_FORMAT_GROUP) {
03b6ea9b
AH
1331 const u64 max_group_nr = UINT64_MAX /
1332 sizeof(struct sample_read_value);
1333
1334 if (data->read.group.nr > max_group_nr)
1335 return -EFAULT;
1336 sz = data->read.group.nr *
1337 sizeof(struct sample_read_value);
1338 OVERFLOW_CHECK(array, sz, max_size);
1339 data->read.group.values =
1340 (struct sample_read_value *)array;
1341 array = (void *)array + sz;
9ede473c 1342 } else {
03b6ea9b 1343 OVERFLOW_CHECK_u64(array);
9ede473c
JO
1344 data->read.one.id = *array;
1345 array++;
1346 }
d0dd74e8
ACM
1347 }
1348
1349 if (type & PERF_SAMPLE_CALLCHAIN) {
03b6ea9b 1350 const u64 max_callchain_nr = UINT64_MAX / sizeof(u64);
98e1da90 1351
03b6ea9b
AH
1352 OVERFLOW_CHECK_u64(array);
1353 data->callchain = (struct ip_callchain *)array++;
1354 if (data->callchain->nr > max_callchain_nr)
98e1da90 1355 return -EFAULT;
03b6ea9b
AH
1356 sz = data->callchain->nr * sizeof(u64);
1357 OVERFLOW_CHECK(array, sz, max_size);
1358 array = (void *)array + sz;
d0dd74e8
ACM
1359 }
1360
1361 if (type & PERF_SAMPLE_RAW) {
03b6ea9b 1362 OVERFLOW_CHECK_u64(array);
936be503
DA
1363 u.val64 = *array;
1364 if (WARN_ONCE(swapped,
1365 "Endianness of raw data not corrected!\n")) {
1366 /* undo swap of u64, then swap on individual u32s */
1367 u.val64 = bswap_64(u.val64);
1368 u.val32[0] = bswap_32(u.val32[0]);
1369 u.val32[1] = bswap_32(u.val32[1]);
1370 }
936be503 1371 data->raw_size = u.val32[0];
03b6ea9b 1372 array = (void *)array + sizeof(u32);
98e1da90 1373
03b6ea9b
AH
1374 OVERFLOW_CHECK(array, data->raw_size, max_size);
1375 data->raw_data = (void *)array;
1376 array = (void *)array + data->raw_size;
d0dd74e8
ACM
1377 }
1378
b5387528 1379 if (type & PERF_SAMPLE_BRANCH_STACK) {
03b6ea9b
AH
1380 const u64 max_branch_nr = UINT64_MAX /
1381 sizeof(struct branch_entry);
b5387528 1382
03b6ea9b
AH
1383 OVERFLOW_CHECK_u64(array);
1384 data->branch_stack = (struct branch_stack *)array++;
b5387528 1385
03b6ea9b
AH
1386 if (data->branch_stack->nr > max_branch_nr)
1387 return -EFAULT;
b5387528 1388 sz = data->branch_stack->nr * sizeof(struct branch_entry);
03b6ea9b
AH
1389 OVERFLOW_CHECK(array, sz, max_size);
1390 array = (void *)array + sz;
b5387528 1391 }
0f6a3015
JO
1392
1393 if (type & PERF_SAMPLE_REGS_USER) {
03b6ea9b 1394 OVERFLOW_CHECK_u64(array);
5b95a4a3
AH
1395 data->user_regs.abi = *array;
1396 array++;
0f6a3015 1397
5b95a4a3 1398 if (data->user_regs.abi) {
352ea45a 1399 u64 mask = evsel->attr.sample_regs_user;
03b6ea9b 1400
352ea45a 1401 sz = hweight_long(mask) * sizeof(u64);
03b6ea9b 1402 OVERFLOW_CHECK(array, sz, max_size);
352ea45a 1403 data->user_regs.mask = mask;
0f6a3015 1404 data->user_regs.regs = (u64 *)array;
03b6ea9b 1405 array = (void *)array + sz;
0f6a3015
JO
1406 }
1407 }
1408
1409 if (type & PERF_SAMPLE_STACK_USER) {
03b6ea9b
AH
1410 OVERFLOW_CHECK_u64(array);
1411 sz = *array++;
0f6a3015
JO
1412
1413 data->user_stack.offset = ((char *)(array - 1)
1414 - (char *) event);
1415
03b6ea9b 1416 if (!sz) {
0f6a3015
JO
1417 data->user_stack.size = 0;
1418 } else {
03b6ea9b 1419 OVERFLOW_CHECK(array, sz, max_size);
0f6a3015 1420 data->user_stack.data = (char *)array;
03b6ea9b
AH
1421 array = (void *)array + sz;
1422 OVERFLOW_CHECK_u64(array);
54bd2692 1423 data->user_stack.size = *array++;
a65cb4b9
JO
1424 if (WARN_ONCE(data->user_stack.size > sz,
1425 "user stack dump failure\n"))
1426 return -EFAULT;
0f6a3015
JO
1427 }
1428 }
1429
05484298
AK
1430 data->weight = 0;
1431 if (type & PERF_SAMPLE_WEIGHT) {
03b6ea9b 1432 OVERFLOW_CHECK_u64(array);
05484298
AK
1433 data->weight = *array;
1434 array++;
1435 }
1436
98a3b32c
SE
1437 data->data_src = PERF_MEM_DATA_SRC_NONE;
1438 if (type & PERF_SAMPLE_DATA_SRC) {
03b6ea9b 1439 OVERFLOW_CHECK_u64(array);
98a3b32c
SE
1440 data->data_src = *array;
1441 array++;
1442 }
1443
475eeab9
AK
1444 data->transaction = 0;
1445 if (type & PERF_SAMPLE_TRANSACTION) {
87b95524 1446 OVERFLOW_CHECK_u64(array);
475eeab9
AK
1447 data->transaction = *array;
1448 array++;
1449 }
1450
d0dd74e8
ACM
1451 return 0;
1452}
74eec26f 1453
b1cf6f65 1454size_t perf_event__sample_event_size(const struct perf_sample *sample, u64 type,
352ea45a 1455 u64 read_format)
b1cf6f65
AH
1456{
1457 size_t sz, result = sizeof(struct sample_event);
1458
1459 if (type & PERF_SAMPLE_IDENTIFIER)
1460 result += sizeof(u64);
1461
1462 if (type & PERF_SAMPLE_IP)
1463 result += sizeof(u64);
1464
1465 if (type & PERF_SAMPLE_TID)
1466 result += sizeof(u64);
1467
1468 if (type & PERF_SAMPLE_TIME)
1469 result += sizeof(u64);
1470
1471 if (type & PERF_SAMPLE_ADDR)
1472 result += sizeof(u64);
1473
1474 if (type & PERF_SAMPLE_ID)
1475 result += sizeof(u64);
1476
1477 if (type & PERF_SAMPLE_STREAM_ID)
1478 result += sizeof(u64);
1479
1480 if (type & PERF_SAMPLE_CPU)
1481 result += sizeof(u64);
1482
1483 if (type & PERF_SAMPLE_PERIOD)
1484 result += sizeof(u64);
1485
1486 if (type & PERF_SAMPLE_READ) {
1487 result += sizeof(u64);
1488 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED)
1489 result += sizeof(u64);
1490 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING)
1491 result += sizeof(u64);
1492 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1493 if (read_format & PERF_FORMAT_GROUP) {
1494 sz = sample->read.group.nr *
1495 sizeof(struct sample_read_value);
1496 result += sz;
1497 } else {
1498 result += sizeof(u64);
1499 }
1500 }
1501
1502 if (type & PERF_SAMPLE_CALLCHAIN) {
1503 sz = (sample->callchain->nr + 1) * sizeof(u64);
1504 result += sz;
1505 }
1506
1507 if (type & PERF_SAMPLE_RAW) {
1508 result += sizeof(u32);
1509 result += sample->raw_size;
1510 }
1511
1512 if (type & PERF_SAMPLE_BRANCH_STACK) {
1513 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1514 sz += sizeof(u64);
1515 result += sz;
1516 }
1517
1518 if (type & PERF_SAMPLE_REGS_USER) {
1519 if (sample->user_regs.abi) {
1520 result += sizeof(u64);
352ea45a 1521 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
b1cf6f65
AH
1522 result += sz;
1523 } else {
1524 result += sizeof(u64);
1525 }
1526 }
1527
1528 if (type & PERF_SAMPLE_STACK_USER) {
1529 sz = sample->user_stack.size;
1530 result += sizeof(u64);
1531 if (sz) {
1532 result += sz;
1533 result += sizeof(u64);
1534 }
1535 }
1536
1537 if (type & PERF_SAMPLE_WEIGHT)
1538 result += sizeof(u64);
1539
1540 if (type & PERF_SAMPLE_DATA_SRC)
1541 result += sizeof(u64);
1542
42d88910
AH
1543 if (type & PERF_SAMPLE_TRANSACTION)
1544 result += sizeof(u64);
1545
b1cf6f65
AH
1546 return result;
1547}
1548
74eec26f 1549int perf_event__synthesize_sample(union perf_event *event, u64 type,
352ea45a 1550 u64 read_format,
74eec26f
AV
1551 const struct perf_sample *sample,
1552 bool swapped)
1553{
1554 u64 *array;
d03f2170 1555 size_t sz;
74eec26f
AV
1556 /*
1557 * used for cross-endian analysis. See git commit 65014ab3
1558 * for why this goofiness is needed.
1559 */
6a11f92e 1560 union u64_swap u;
74eec26f
AV
1561
1562 array = event->sample.array;
1563
75562573
AH
1564 if (type & PERF_SAMPLE_IDENTIFIER) {
1565 *array = sample->id;
1566 array++;
1567 }
1568
74eec26f 1569 if (type & PERF_SAMPLE_IP) {
ef89325f 1570 *array = sample->ip;
74eec26f
AV
1571 array++;
1572 }
1573
1574 if (type & PERF_SAMPLE_TID) {
1575 u.val32[0] = sample->pid;
1576 u.val32[1] = sample->tid;
1577 if (swapped) {
1578 /*
a3f698fe 1579 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1580 */
1581 u.val32[0] = bswap_32(u.val32[0]);
1582 u.val32[1] = bswap_32(u.val32[1]);
1583 u.val64 = bswap_64(u.val64);
1584 }
1585
1586 *array = u.val64;
1587 array++;
1588 }
1589
1590 if (type & PERF_SAMPLE_TIME) {
1591 *array = sample->time;
1592 array++;
1593 }
1594
1595 if (type & PERF_SAMPLE_ADDR) {
1596 *array = sample->addr;
1597 array++;
1598 }
1599
1600 if (type & PERF_SAMPLE_ID) {
1601 *array = sample->id;
1602 array++;
1603 }
1604
1605 if (type & PERF_SAMPLE_STREAM_ID) {
1606 *array = sample->stream_id;
1607 array++;
1608 }
1609
1610 if (type & PERF_SAMPLE_CPU) {
1611 u.val32[0] = sample->cpu;
1612 if (swapped) {
1613 /*
a3f698fe 1614 * Inverse of what is done in perf_evsel__parse_sample
74eec26f
AV
1615 */
1616 u.val32[0] = bswap_32(u.val32[0]);
1617 u.val64 = bswap_64(u.val64);
1618 }
1619 *array = u.val64;
1620 array++;
1621 }
1622
1623 if (type & PERF_SAMPLE_PERIOD) {
1624 *array = sample->period;
1625 array++;
1626 }
1627
d03f2170
AH
1628 if (type & PERF_SAMPLE_READ) {
1629 if (read_format & PERF_FORMAT_GROUP)
1630 *array = sample->read.group.nr;
1631 else
1632 *array = sample->read.one.value;
1633 array++;
1634
1635 if (read_format & PERF_FORMAT_TOTAL_TIME_ENABLED) {
1636 *array = sample->read.time_enabled;
1637 array++;
1638 }
1639
1640 if (read_format & PERF_FORMAT_TOTAL_TIME_RUNNING) {
1641 *array = sample->read.time_running;
1642 array++;
1643 }
1644
1645 /* PERF_FORMAT_ID is forced for PERF_SAMPLE_READ */
1646 if (read_format & PERF_FORMAT_GROUP) {
1647 sz = sample->read.group.nr *
1648 sizeof(struct sample_read_value);
1649 memcpy(array, sample->read.group.values, sz);
1650 array = (void *)array + sz;
1651 } else {
1652 *array = sample->read.one.id;
1653 array++;
1654 }
1655 }
1656
1657 if (type & PERF_SAMPLE_CALLCHAIN) {
1658 sz = (sample->callchain->nr + 1) * sizeof(u64);
1659 memcpy(array, sample->callchain, sz);
1660 array = (void *)array + sz;
1661 }
1662
1663 if (type & PERF_SAMPLE_RAW) {
1664 u.val32[0] = sample->raw_size;
1665 if (WARN_ONCE(swapped,
1666 "Endianness of raw data not corrected!\n")) {
1667 /*
1668 * Inverse of what is done in perf_evsel__parse_sample
1669 */
1670 u.val32[0] = bswap_32(u.val32[0]);
1671 u.val32[1] = bswap_32(u.val32[1]);
1672 u.val64 = bswap_64(u.val64);
1673 }
1674 *array = u.val64;
1675 array = (void *)array + sizeof(u32);
1676
1677 memcpy(array, sample->raw_data, sample->raw_size);
1678 array = (void *)array + sample->raw_size;
1679 }
1680
1681 if (type & PERF_SAMPLE_BRANCH_STACK) {
1682 sz = sample->branch_stack->nr * sizeof(struct branch_entry);
1683 sz += sizeof(u64);
1684 memcpy(array, sample->branch_stack, sz);
1685 array = (void *)array + sz;
1686 }
1687
1688 if (type & PERF_SAMPLE_REGS_USER) {
1689 if (sample->user_regs.abi) {
1690 *array++ = sample->user_regs.abi;
352ea45a 1691 sz = hweight_long(sample->user_regs.mask) * sizeof(u64);
d03f2170
AH
1692 memcpy(array, sample->user_regs.regs, sz);
1693 array = (void *)array + sz;
1694 } else {
1695 *array++ = 0;
1696 }
1697 }
1698
1699 if (type & PERF_SAMPLE_STACK_USER) {
1700 sz = sample->user_stack.size;
1701 *array++ = sz;
1702 if (sz) {
1703 memcpy(array, sample->user_stack.data, sz);
1704 array = (void *)array + sz;
1705 *array++ = sz;
1706 }
1707 }
1708
1709 if (type & PERF_SAMPLE_WEIGHT) {
1710 *array = sample->weight;
1711 array++;
1712 }
1713
1714 if (type & PERF_SAMPLE_DATA_SRC) {
1715 *array = sample->data_src;
1716 array++;
1717 }
1718
42d88910
AH
1719 if (type & PERF_SAMPLE_TRANSACTION) {
1720 *array = sample->transaction;
1721 array++;
1722 }
1723
74eec26f
AV
1724 return 0;
1725}
5555ded4 1726
efd2b924
ACM
1727struct format_field *perf_evsel__field(struct perf_evsel *evsel, const char *name)
1728{
1729 return pevent_find_field(evsel->tp_format, name);
1730}
1731
5d2074ea 1732void *perf_evsel__rawptr(struct perf_evsel *evsel, struct perf_sample *sample,
5555ded4
ACM
1733 const char *name)
1734{
efd2b924 1735 struct format_field *field = perf_evsel__field(evsel, name);
5555ded4
ACM
1736 int offset;
1737
efd2b924
ACM
1738 if (!field)
1739 return NULL;
5555ded4
ACM
1740
1741 offset = field->offset;
1742
1743 if (field->flags & FIELD_IS_DYNAMIC) {
1744 offset = *(int *)(sample->raw_data + field->offset);
1745 offset &= 0xffff;
1746 }
1747
1748 return sample->raw_data + offset;
1749}
1750
1751u64 perf_evsel__intval(struct perf_evsel *evsel, struct perf_sample *sample,
1752 const char *name)
1753{
efd2b924 1754 struct format_field *field = perf_evsel__field(evsel, name);
e6b6f679
ACM
1755 void *ptr;
1756 u64 value;
5555ded4 1757
efd2b924
ACM
1758 if (!field)
1759 return 0;
5555ded4 1760
e6b6f679 1761 ptr = sample->raw_data + field->offset;
5555ded4 1762
e6b6f679
ACM
1763 switch (field->size) {
1764 case 1:
1765 return *(u8 *)ptr;
1766 case 2:
1767 value = *(u16 *)ptr;
1768 break;
1769 case 4:
1770 value = *(u32 *)ptr;
1771 break;
1772 case 8:
1773 value = *(u64 *)ptr;
1774 break;
1775 default:
1776 return 0;
1777 }
1778
1779 if (!evsel->needs_swap)
1780 return value;
1781
1782 switch (field->size) {
1783 case 2:
1784 return bswap_16(value);
1785 case 4:
1786 return bswap_32(value);
1787 case 8:
1788 return bswap_64(value);
1789 default:
1790 return 0;
1791 }
1792
1793 return 0;
5555ded4 1794}
0698aedd
ACM
1795
1796static int comma_fprintf(FILE *fp, bool *first, const char *fmt, ...)
1797{
1798 va_list args;
1799 int ret = 0;
1800
1801 if (!*first) {
1802 ret += fprintf(fp, ",");
1803 } else {
1804 ret += fprintf(fp, ":");
1805 *first = false;
1806 }
1807
1808 va_start(args, fmt);
1809 ret += vfprintf(fp, fmt, args);
1810 va_end(args);
1811 return ret;
1812}
1813
1814static int __if_fprintf(FILE *fp, bool *first, const char *field, u64 value)
1815{
1816 if (value == 0)
1817 return 0;
1818
1819 return comma_fprintf(fp, first, " %s: %" PRIu64, field, value);
1820}
1821
1822#define if_print(field) printed += __if_fprintf(fp, &first, #field, evsel->attr.field)
1823
c79a4393
ACM
1824struct bit_names {
1825 int bit;
1826 const char *name;
1827};
1828
1829static int bits__fprintf(FILE *fp, const char *field, u64 value,
1830 struct bit_names *bits, bool *first)
1831{
1832 int i = 0, printed = comma_fprintf(fp, first, " %s: ", field);
1833 bool first_bit = true;
1834
1835 do {
1836 if (value & bits[i].bit) {
1837 printed += fprintf(fp, "%s%s", first_bit ? "" : "|", bits[i].name);
1838 first_bit = false;
1839 }
1840 } while (bits[++i].name != NULL);
1841
1842 return printed;
1843}
1844
1845static int sample_type__fprintf(FILE *fp, bool *first, u64 value)
1846{
1847#define bit_name(n) { PERF_SAMPLE_##n, #n }
1848 struct bit_names bits[] = {
1849 bit_name(IP), bit_name(TID), bit_name(TIME), bit_name(ADDR),
1850 bit_name(READ), bit_name(CALLCHAIN), bit_name(ID), bit_name(CPU),
1851 bit_name(PERIOD), bit_name(STREAM_ID), bit_name(RAW),
1852 bit_name(BRANCH_STACK), bit_name(REGS_USER), bit_name(STACK_USER),
75562573 1853 bit_name(IDENTIFIER),
c79a4393
ACM
1854 { .name = NULL, }
1855 };
1856#undef bit_name
1857 return bits__fprintf(fp, "sample_type", value, bits, first);
1858}
1859
1860static int read_format__fprintf(FILE *fp, bool *first, u64 value)
1861{
1862#define bit_name(n) { PERF_FORMAT_##n, #n }
1863 struct bit_names bits[] = {
1864 bit_name(TOTAL_TIME_ENABLED), bit_name(TOTAL_TIME_RUNNING),
1865 bit_name(ID), bit_name(GROUP),
1866 { .name = NULL, }
1867 };
1868#undef bit_name
1869 return bits__fprintf(fp, "read_format", value, bits, first);
1870}
1871
0698aedd
ACM
1872int perf_evsel__fprintf(struct perf_evsel *evsel,
1873 struct perf_attr_details *details, FILE *fp)
1874{
1875 bool first = true;
e6ab07d0
NK
1876 int printed = 0;
1877
e35ef355 1878 if (details->event_group) {
e6ab07d0
NK
1879 struct perf_evsel *pos;
1880
1881 if (!perf_evsel__is_group_leader(evsel))
1882 return 0;
1883
1884 if (evsel->nr_members > 1)
1885 printed += fprintf(fp, "%s{", evsel->group_name ?: "");
1886
1887 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
1888 for_each_group_member(pos, evsel)
1889 printed += fprintf(fp, ",%s", perf_evsel__name(pos));
1890
1891 if (evsel->nr_members > 1)
1892 printed += fprintf(fp, "}");
1893 goto out;
1894 }
1895
1896 printed += fprintf(fp, "%s", perf_evsel__name(evsel));
0698aedd
ACM
1897
1898 if (details->verbose || details->freq) {
1899 printed += comma_fprintf(fp, &first, " sample_freq=%" PRIu64,
1900 (u64)evsel->attr.sample_freq);
1901 }
1902
1903 if (details->verbose) {
1904 if_print(type);
1905 if_print(config);
1906 if_print(config1);
1907 if_print(config2);
1908 if_print(size);
c79a4393
ACM
1909 printed += sample_type__fprintf(fp, &first, evsel->attr.sample_type);
1910 if (evsel->attr.read_format)
1911 printed += read_format__fprintf(fp, &first, evsel->attr.read_format);
0698aedd
ACM
1912 if_print(disabled);
1913 if_print(inherit);
1914 if_print(pinned);
1915 if_print(exclusive);
1916 if_print(exclude_user);
1917 if_print(exclude_kernel);
1918 if_print(exclude_hv);
1919 if_print(exclude_idle);
1920 if_print(mmap);
5c5e854b 1921 if_print(mmap2);
0698aedd
ACM
1922 if_print(comm);
1923 if_print(freq);
1924 if_print(inherit_stat);
1925 if_print(enable_on_exec);
1926 if_print(task);
1927 if_print(watermark);
1928 if_print(precise_ip);
1929 if_print(mmap_data);
1930 if_print(sample_id_all);
1931 if_print(exclude_host);
1932 if_print(exclude_guest);
1933 if_print(__reserved_1);
1934 if_print(wakeup_events);
1935 if_print(bp_type);
1936 if_print(branch_sample_type);
1937 }
e6ab07d0 1938out:
0698aedd
ACM
1939 fputc('\n', fp);
1940 return ++printed;
1941}
c0a54341
ACM
1942
1943bool perf_evsel__fallback(struct perf_evsel *evsel, int err,
1944 char *msg, size_t msgsize)
1945{
2b821cce 1946 if ((err == ENOENT || err == ENXIO || err == ENODEV) &&
c0a54341
ACM
1947 evsel->attr.type == PERF_TYPE_HARDWARE &&
1948 evsel->attr.config == PERF_COUNT_HW_CPU_CYCLES) {
1949 /*
1950 * If it's cycles then fall back to hrtimer based
1951 * cpu-clock-tick sw counter, which is always available even if
1952 * no PMU support.
1953 *
1954 * PPC returns ENXIO until 2.6.37 (behavior changed with commit
1955 * b0a873e).
1956 */
1957 scnprintf(msg, msgsize, "%s",
1958"The cycles event is not supported, trying to fall back to cpu-clock-ticks");
1959
1960 evsel->attr.type = PERF_TYPE_SOFTWARE;
1961 evsel->attr.config = PERF_COUNT_SW_CPU_CLOCK;
1962
04662523 1963 zfree(&evsel->name);
c0a54341
ACM
1964 return true;
1965 }
1966
1967 return false;
1968}
56e52e85 1969
602ad878 1970int perf_evsel__open_strerror(struct perf_evsel *evsel, struct target *target,
56e52e85
ACM
1971 int err, char *msg, size_t size)
1972{
1973 switch (err) {
1974 case EPERM:
1975 case EACCES:
b69e63a4 1976 return scnprintf(msg, size,
56e52e85
ACM
1977 "You may not have permission to collect %sstats.\n"
1978 "Consider tweaking /proc/sys/kernel/perf_event_paranoid:\n"
1979 " -1 - Not paranoid at all\n"
1980 " 0 - Disallow raw tracepoint access for unpriv\n"
1981 " 1 - Disallow cpu events for unpriv\n"
1982 " 2 - Disallow kernel profiling for unpriv",
1983 target->system_wide ? "system-wide " : "");
1984 case ENOENT:
1985 return scnprintf(msg, size, "The %s event is not supported.",
1986 perf_evsel__name(evsel));
1987 case EMFILE:
1988 return scnprintf(msg, size, "%s",
1989 "Too many events are opened.\n"
1990 "Try again after reducing the number of events.");
1991 case ENODEV:
1992 if (target->cpu_list)
1993 return scnprintf(msg, size, "%s",
1994 "No such device - did you specify an out-of-range profile CPU?\n");
1995 break;
1996 case EOPNOTSUPP:
1997 if (evsel->attr.precise_ip)
1998 return scnprintf(msg, size, "%s",
1999 "\'precise\' request may not be supported. Try removing 'p' modifier.");
2000#if defined(__i386__) || defined(__x86_64__)
2001 if (evsel->attr.type == PERF_TYPE_HARDWARE)
2002 return scnprintf(msg, size, "%s",
2003 "No hardware sampling interrupt available.\n"
2004 "No APIC? If so then you can boot the kernel with the \"lapic\" boot parameter to force-enable it.");
2005#endif
2006 break;
2007 default:
2008 break;
2009 }
2010
2011 return scnprintf(msg, size,
2012 "The sys_perf_event_open() syscall returned with %d (%s) for event (%s). \n"
2013 "/bin/dmesg may provide additional information.\n"
2014 "No CONFIG_PERF_EVENTS=y kernel support configured?\n",
2015 err, strerror(err), perf_evsel__name(evsel));
2016}