perf data: Explicitly set byte order for integer types
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
4b6ab94e 14#include <subcmd/parse-options.h>
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
8f651eae 17#include "util/callchain.h"
f14d5707 18#include "util/cgroup.h"
7c6a1c65 19#include "util/header.h"
66e274f3 20#include "util/event.h"
361c99a6 21#include "util/evlist.h"
69aad6f1 22#include "util/evsel.h"
8f28827a 23#include "util/debug.h"
94c744b6 24#include "util/session.h"
45694aa7 25#include "util/tool.h"
8d06367f 26#include "util/symbol.h"
a12b51c4 27#include "util/cpumap.h"
fd78260b 28#include "util/thread_map.h"
f5fc1412 29#include "util/data.h"
bcc84ec6 30#include "util/perf_regs.h"
ef149c25 31#include "util/auxtrace.h"
f00898f4 32#include "util/parse-branch-options.h"
bcc84ec6 33#include "util/parse-regs-options.h"
71dc2326 34#include "util/llvm-utils.h"
8690a2a7 35#include "util/bpf-loader.h"
7c6a1c65 36
97124d5e 37#include <unistd.h>
de9ac07b 38#include <sched.h>
a41794cd 39#include <sys/mman.h>
de9ac07b 40
78da39fa 41
8c6f45a7 42struct record {
45694aa7 43 struct perf_tool tool;
b4006796 44 struct record_opts opts;
d20deb64 45 u64 bytes_written;
f5fc1412 46 struct perf_data_file file;
ef149c25 47 struct auxtrace_record *itr;
d20deb64
ACM
48 struct perf_evlist *evlist;
49 struct perf_session *session;
50 const char *progname;
d20deb64 51 int realtime_prio;
d20deb64 52 bool no_buildid;
d2db9a98 53 bool no_buildid_set;
d20deb64 54 bool no_buildid_cache;
d2db9a98 55 bool no_buildid_cache_set;
6156681b 56 bool buildid_all;
9f065194 57 unsigned long long samples;
0f82ebc4 58};
a21ca2ca 59
8c6f45a7 60static int record__write(struct record *rec, void *bf, size_t size)
f5970550 61{
cf8b2e69 62 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
63 pr_err("failed to write perf data, error: %m\n");
64 return -1;
f5970550 65 }
8d3eca20 66
cf8b2e69 67 rec->bytes_written += size;
8d3eca20 68 return 0;
f5970550
PZ
69}
70
45694aa7 71static int process_synthesized_event(struct perf_tool *tool,
d20deb64 72 union perf_event *event,
1d037ca1
IT
73 struct perf_sample *sample __maybe_unused,
74 struct machine *machine __maybe_unused)
234fbbf5 75{
8c6f45a7
ACM
76 struct record *rec = container_of(tool, struct record, tool);
77 return record__write(rec, event, event->header.size);
234fbbf5
ACM
78}
79
e5685730 80static int record__mmap_read(struct record *rec, int idx)
de9ac07b 81{
e5685730 82 struct perf_mmap *md = &rec->evlist->mmap[idx];
7b8283b5
DA
83 u64 head = perf_mmap__read_head(md);
84 u64 old = md->prev;
918512b4 85 unsigned char *data = md->base + page_size;
de9ac07b
PZ
86 unsigned long size;
87 void *buf;
8d3eca20 88 int rc = 0;
de9ac07b 89
dc82009a 90 if (old == head)
8d3eca20 91 return 0;
dc82009a 92
d20deb64 93 rec->samples++;
de9ac07b
PZ
94
95 size = head - old;
96
97 if ((old & md->mask) + size != (head & md->mask)) {
98 buf = &data[old & md->mask];
99 size = md->mask + 1 - (old & md->mask);
100 old += size;
021e9f47 101
8c6f45a7 102 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
103 rc = -1;
104 goto out;
105 }
de9ac07b
PZ
106 }
107
108 buf = &data[old & md->mask];
109 size = head - old;
110 old += size;
021e9f47 111
8c6f45a7 112 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
113 rc = -1;
114 goto out;
115 }
de9ac07b
PZ
116
117 md->prev = old;
e5685730 118 perf_evlist__mmap_consume(rec->evlist, idx);
8d3eca20
DA
119out:
120 return rc;
de9ac07b
PZ
121}
122
2dd6d8a1
AH
123static volatile int done;
124static volatile int signr = -1;
125static volatile int child_finished;
126static volatile int auxtrace_snapshot_enabled;
127static volatile int auxtrace_snapshot_err;
128static volatile int auxtrace_record__snapshot_started;
129
130static void sig_handler(int sig)
131{
132 if (sig == SIGCHLD)
133 child_finished = 1;
134 else
135 signr = sig;
136
137 done = 1;
138}
139
140static void record__sig_exit(void)
141{
142 if (signr == -1)
143 return;
144
145 signal(signr, SIG_DFL);
146 raise(signr);
147}
148
e31f0d01
AH
149#ifdef HAVE_AUXTRACE_SUPPORT
150
ef149c25
AH
151static int record__process_auxtrace(struct perf_tool *tool,
152 union perf_event *event, void *data1,
153 size_t len1, void *data2, size_t len2)
154{
155 struct record *rec = container_of(tool, struct record, tool);
99fa2984 156 struct perf_data_file *file = &rec->file;
ef149c25
AH
157 size_t padding;
158 u8 pad[8] = {0};
159
99fa2984
AH
160 if (!perf_data_file__is_pipe(file)) {
161 off_t file_offset;
162 int fd = perf_data_file__fd(file);
163 int err;
164
165 file_offset = lseek(fd, 0, SEEK_CUR);
166 if (file_offset == -1)
167 return -1;
168 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
169 event, file_offset);
170 if (err)
171 return err;
172 }
173
ef149c25
AH
174 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
175 padding = (len1 + len2) & 7;
176 if (padding)
177 padding = 8 - padding;
178
179 record__write(rec, event, event->header.size);
180 record__write(rec, data1, len1);
181 if (len2)
182 record__write(rec, data2, len2);
183 record__write(rec, &pad, padding);
184
185 return 0;
186}
187
188static int record__auxtrace_mmap_read(struct record *rec,
189 struct auxtrace_mmap *mm)
190{
191 int ret;
192
193 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
194 record__process_auxtrace);
195 if (ret < 0)
196 return ret;
197
198 if (ret)
199 rec->samples++;
200
201 return 0;
202}
203
2dd6d8a1
AH
204static int record__auxtrace_mmap_read_snapshot(struct record *rec,
205 struct auxtrace_mmap *mm)
206{
207 int ret;
208
209 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
210 record__process_auxtrace,
211 rec->opts.auxtrace_snapshot_size);
212 if (ret < 0)
213 return ret;
214
215 if (ret)
216 rec->samples++;
217
218 return 0;
219}
220
221static int record__auxtrace_read_snapshot_all(struct record *rec)
222{
223 int i;
224 int rc = 0;
225
226 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
227 struct auxtrace_mmap *mm =
228 &rec->evlist->mmap[i].auxtrace_mmap;
229
230 if (!mm->base)
231 continue;
232
233 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
234 rc = -1;
235 goto out;
236 }
237 }
238out:
239 return rc;
240}
241
242static void record__read_auxtrace_snapshot(struct record *rec)
243{
244 pr_debug("Recording AUX area tracing snapshot\n");
245 if (record__auxtrace_read_snapshot_all(rec) < 0) {
246 auxtrace_snapshot_err = -1;
247 } else {
248 auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
249 if (!auxtrace_snapshot_err)
250 auxtrace_snapshot_enabled = 1;
251 }
252}
253
e31f0d01
AH
254#else
255
256static inline
257int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
258 struct auxtrace_mmap *mm __maybe_unused)
259{
260 return 0;
261}
262
2dd6d8a1
AH
263static inline
264void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 265{
f7b7c26e
PZ
266}
267
2dd6d8a1
AH
268static inline
269int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 270{
2dd6d8a1 271 return 0;
de9ac07b
PZ
272}
273
2dd6d8a1
AH
274#endif
275
8c6f45a7 276static int record__open(struct record *rec)
dd7927f4 277{
56e52e85 278 char msg[512];
6a4bb04c 279 struct perf_evsel *pos;
d20deb64
ACM
280 struct perf_evlist *evlist = rec->evlist;
281 struct perf_session *session = rec->session;
b4006796 282 struct record_opts *opts = &rec->opts;
8d3eca20 283 int rc = 0;
dd7927f4 284
f77a9518 285 perf_evlist__config(evlist, opts);
cac21425 286
0050f7aa 287 evlist__for_each(evlist, pos) {
dd7927f4 288try_again:
d988d5ee 289 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 290 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 291 if (verbose)
c0a54341 292 ui__warning("%s\n", msg);
d6d901c2
ZY
293 goto try_again;
294 }
ca6a4258 295
56e52e85
ACM
296 rc = -errno;
297 perf_evsel__open_strerror(pos, &opts->target,
298 errno, msg, sizeof(msg));
299 ui__error("%s\n", msg);
8d3eca20 300 goto out;
c171b552
LZ
301 }
302 }
a43d3f08 303
23d4aad4
ACM
304 if (perf_evlist__apply_filters(evlist, &pos)) {
305 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
306 pos->filter, perf_evsel__name(pos), errno,
35550da3 307 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
308 rc = -1;
309 goto out;
0a102479
FW
310 }
311
ef149c25 312 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
2dd6d8a1
AH
313 opts->auxtrace_mmap_pages,
314 opts->auxtrace_snapshot_mode) < 0) {
8d3eca20
DA
315 if (errno == EPERM) {
316 pr_err("Permission error mapping pages.\n"
317 "Consider increasing "
318 "/proc/sys/kernel/perf_event_mlock_kb,\n"
319 "or try again with a smaller value of -m/--mmap_pages.\n"
ef149c25
AH
320 "(current value: %u,%u)\n",
321 opts->mmap_pages, opts->auxtrace_mmap_pages);
8d3eca20 322 rc = -errno;
8d3eca20 323 } else {
35550da3
MH
324 pr_err("failed to mmap with %d (%s)\n", errno,
325 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
326 rc = -errno;
327 }
328 goto out;
18e60939 329 }
0a27d7f9 330
563aecb2 331 session->evlist = evlist;
7b56cce2 332 perf_session__set_id_hdr_size(session);
8d3eca20
DA
333out:
334 return rc;
16c8a109
PZ
335}
336
e3d59112
NK
337static int process_sample_event(struct perf_tool *tool,
338 union perf_event *event,
339 struct perf_sample *sample,
340 struct perf_evsel *evsel,
341 struct machine *machine)
342{
343 struct record *rec = container_of(tool, struct record, tool);
344
345 rec->samples++;
346
347 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
348}
349
8c6f45a7 350static int process_buildids(struct record *rec)
6122e4e4 351{
f5fc1412
JO
352 struct perf_data_file *file = &rec->file;
353 struct perf_session *session = rec->session;
6122e4e4 354
457ae94a 355 if (file->size == 0)
9f591fd7
ACM
356 return 0;
357
00dc8657
NK
358 /*
359 * During this process, it'll load kernel map and replace the
360 * dso->long_name to a real pathname it found. In this case
361 * we prefer the vmlinux path like
362 * /lib/modules/3.16.4/build/vmlinux
363 *
364 * rather than build-id path (in debug directory).
365 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
366 */
367 symbol_conf.ignore_vmlinux_buildid = true;
368
6156681b
NK
369 /*
370 * If --buildid-all is given, it marks all DSO regardless of hits,
371 * so no need to process samples.
372 */
373 if (rec->buildid_all)
374 rec->tool.sample = NULL;
375
b7b61cbe 376 return perf_session__process_events(session);
6122e4e4
ACM
377}
378
8115d60c 379static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
380{
381 int err;
45694aa7 382 struct perf_tool *tool = data;
a1645ce1
ZY
383 /*
384 *As for guest kernel when processing subcommand record&report,
385 *we arrange module mmap prior to guest kernel mmap and trigger
386 *a preload dso because default guest module symbols are loaded
387 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
388 *method is used to avoid symbol missing when the first addr is
389 *in module instead of in guest kernel.
390 */
45694aa7 391 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 392 machine);
a1645ce1
ZY
393 if (err < 0)
394 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 395 " relocation symbol.\n", machine->pid);
a1645ce1 396
a1645ce1
ZY
397 /*
398 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
399 * have no _text sometimes.
400 */
45694aa7 401 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 402 machine);
a1645ce1
ZY
403 if (err < 0)
404 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 405 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
406}
407
98402807
FW
408static struct perf_event_header finished_round_event = {
409 .size = sizeof(struct perf_event_header),
410 .type = PERF_RECORD_FINISHED_ROUND,
411};
412
8c6f45a7 413static int record__mmap_read_all(struct record *rec)
98402807 414{
dcabb507 415 u64 bytes_written = rec->bytes_written;
0e2e63dd 416 int i;
8d3eca20 417 int rc = 0;
98402807 418
d20deb64 419 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
ef149c25
AH
420 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
421
8d3eca20 422 if (rec->evlist->mmap[i].base) {
e5685730 423 if (record__mmap_read(rec, i) != 0) {
8d3eca20
DA
424 rc = -1;
425 goto out;
426 }
427 }
ef149c25 428
2dd6d8a1 429 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
ef149c25
AH
430 record__auxtrace_mmap_read(rec, mm) != 0) {
431 rc = -1;
432 goto out;
433 }
98402807
FW
434 }
435
dcabb507
JO
436 /*
437 * Mark the round finished in case we wrote
438 * at least one event.
439 */
440 if (bytes_written != rec->bytes_written)
441 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
442
443out:
444 return rc;
98402807
FW
445}
446
8c6f45a7 447static void record__init_features(struct record *rec)
57706abc 448{
57706abc
DA
449 struct perf_session *session = rec->session;
450 int feat;
451
452 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
453 perf_header__set_feat(&session->header, feat);
454
455 if (rec->no_buildid)
456 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
457
3e2be2da 458 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
459 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
460
461 if (!rec->opts.branch_stack)
462 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
463
464 if (!rec->opts.full_auxtrace)
465 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
ffa517ad
JO
466
467 perf_header__clear_feat(&session->header, HEADER_STAT);
57706abc
DA
468}
469
f33cbe72
ACM
470static volatile int workload_exec_errno;
471
472/*
473 * perf_evlist__prepare_workload will send a SIGUSR1
474 * if the fork fails, since we asked by setting its
475 * want_signal to true.
476 */
45604710
NK
477static void workload_exec_failed_signal(int signo __maybe_unused,
478 siginfo_t *info,
f33cbe72
ACM
479 void *ucontext __maybe_unused)
480{
481 workload_exec_errno = info->si_value.sival_int;
482 done = 1;
f33cbe72
ACM
483 child_finished = 1;
484}
485
2dd6d8a1
AH
486static void snapshot_sig_handler(int sig);
487
8c6f45a7 488static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 489{
57706abc 490 int err;
45604710 491 int status = 0;
8b412664 492 unsigned long waking = 0;
46be604b 493 const bool forks = argc > 0;
23346f21 494 struct machine *machine;
45694aa7 495 struct perf_tool *tool = &rec->tool;
b4006796 496 struct record_opts *opts = &rec->opts;
f5fc1412 497 struct perf_data_file *file = &rec->file;
d20deb64 498 struct perf_session *session;
6dcf45ef 499 bool disabled = false, draining = false;
42aa276f 500 int fd;
de9ac07b 501
d20deb64 502 rec->progname = argv[0];
33e49ea7 503
45604710 504 atexit(record__sig_exit);
f5970550
PZ
505 signal(SIGCHLD, sig_handler);
506 signal(SIGINT, sig_handler);
804f7ac7 507 signal(SIGTERM, sig_handler);
2dd6d8a1
AH
508 if (rec->opts.auxtrace_snapshot_mode)
509 signal(SIGUSR2, snapshot_sig_handler);
510 else
511 signal(SIGUSR2, SIG_IGN);
f5970550 512
b7b61cbe 513 session = perf_session__new(file, false, tool);
94c744b6 514 if (session == NULL) {
ffa91880 515 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
516 return -1;
517 }
518
42aa276f 519 fd = perf_data_file__fd(file);
d20deb64
ACM
520 rec->session = session;
521
8c6f45a7 522 record__init_features(rec);
330aa675 523
d4db3f16 524 if (forks) {
3e2be2da 525 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 526 argv, file->is_pipe,
735f7e0b 527 workload_exec_failed_signal);
35b9d88e
ACM
528 if (err < 0) {
529 pr_err("Couldn't run the workload!\n");
45604710 530 status = err;
35b9d88e 531 goto out_delete_session;
856e9660 532 }
856e9660
PZ
533 }
534
8c6f45a7 535 if (record__open(rec) != 0) {
8d3eca20 536 err = -1;
45604710 537 goto out_child;
8d3eca20 538 }
de9ac07b 539
8690a2a7
WN
540 err = bpf__apply_obj_config();
541 if (err) {
542 char errbuf[BUFSIZ];
543
544 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
545 pr_err("ERROR: Apply config to BPF failed: %s\n",
546 errbuf);
547 goto out_child;
548 }
549
cca8482c
AH
550 /*
551 * Normally perf_session__new would do this, but it doesn't have the
552 * evlist.
553 */
554 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
555 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
556 rec->tool.ordered_events = false;
557 }
558
3e2be2da 559 if (!rec->evlist->nr_groups)
a8bb559b
NK
560 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
561
f5fc1412 562 if (file->is_pipe) {
42aa276f 563 err = perf_header__write_pipe(fd);
529870e3 564 if (err < 0)
45604710 565 goto out_child;
563aecb2 566 } else {
42aa276f 567 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 568 if (err < 0)
45604710 569 goto out_child;
56b03f3c
ACM
570 }
571
d3665498 572 if (!rec->no_buildid
e20960c0 573 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 574 pr_err("Couldn't generate buildids. "
e20960c0 575 "Use --no-buildid to profile anyway.\n");
8d3eca20 576 err = -1;
45604710 577 goto out_child;
e20960c0
RR
578 }
579
34ba5122 580 machine = &session->machines.host;
743eb868 581
f5fc1412 582 if (file->is_pipe) {
45694aa7 583 err = perf_event__synthesize_attrs(tool, session,
d20deb64 584 process_synthesized_event);
2c46dbb5
TZ
585 if (err < 0) {
586 pr_err("Couldn't synthesize attrs.\n");
45604710 587 goto out_child;
2c46dbb5 588 }
cd19a035 589
3e2be2da 590 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
591 /*
592 * FIXME err <= 0 here actually means that
593 * there were no tracepoints so its not really
594 * an error, just that we don't need to
595 * synthesize anything. We really have to
596 * return this more properly and also
597 * propagate errors that now are calling die()
598 */
42aa276f 599 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
743eb868 600 process_synthesized_event);
63e0c771
TZ
601 if (err <= 0) {
602 pr_err("Couldn't record tracing data.\n");
45604710 603 goto out_child;
63e0c771 604 }
f34b9001 605 rec->bytes_written += err;
63e0c771 606 }
2c46dbb5
TZ
607 }
608
ef149c25
AH
609 if (rec->opts.full_auxtrace) {
610 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
611 session, process_synthesized_event);
612 if (err)
613 goto out_delete_session;
614 }
615
45694aa7 616 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 617 machine);
c1a3a4b9
ACM
618 if (err < 0)
619 pr_err("Couldn't record kernel reference relocation symbol\n"
620 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
621 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 622
45694aa7 623 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 624 machine);
c1a3a4b9
ACM
625 if (err < 0)
626 pr_err("Couldn't record kernel module information.\n"
627 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
628 "Check /proc/modules permission or run as root.\n");
629
7e383de4 630 if (perf_guest) {
876650e6
ACM
631 machines__process_guests(&session->machines,
632 perf_event__synthesize_guest_os, tool);
7e383de4 633 }
7c6a1c65 634
3e2be2da 635 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
9d9cad76
KL
636 process_synthesized_event, opts->sample_address,
637 opts->proc_map_timeout);
8d3eca20 638 if (err != 0)
45604710 639 goto out_child;
8d3eca20 640
d20deb64 641 if (rec->realtime_prio) {
de9ac07b
PZ
642 struct sched_param param;
643
d20deb64 644 param.sched_priority = rec->realtime_prio;
de9ac07b 645 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 646 pr_err("Could not set realtime priority.\n");
8d3eca20 647 err = -1;
45604710 648 goto out_child;
de9ac07b
PZ
649 }
650 }
651
774cb499
JO
652 /*
653 * When perf is starting the traced process, all the events
654 * (apart from group members) have enable_on_exec=1 set,
655 * so don't spoil it by prematurely enabling them.
656 */
6619a53e 657 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 658 perf_evlist__enable(rec->evlist);
764e16a3 659
856e9660
PZ
660 /*
661 * Let the child rip
662 */
e803cf97 663 if (forks) {
e5bed564
NK
664 union perf_event *event;
665
666 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
667 if (event == NULL) {
668 err = -ENOMEM;
669 goto out_child;
670 }
671
e803cf97
NK
672 /*
673 * Some H/W events are generated before COMM event
674 * which is emitted during exec(), so perf script
675 * cannot see a correct process name for those events.
676 * Synthesize COMM event to prevent it.
677 */
e5bed564 678 perf_event__synthesize_comm(tool, event,
e803cf97
NK
679 rec->evlist->workload.pid,
680 process_synthesized_event,
681 machine);
e5bed564 682 free(event);
e803cf97 683
3e2be2da 684 perf_evlist__start_workload(rec->evlist);
e803cf97 685 }
856e9660 686
6619a53e
AK
687 if (opts->initial_delay) {
688 usleep(opts->initial_delay * 1000);
689 perf_evlist__enable(rec->evlist);
690 }
691
2dd6d8a1 692 auxtrace_snapshot_enabled = 1;
649c48a9 693 for (;;) {
9f065194 694 unsigned long long hits = rec->samples;
de9ac07b 695
8c6f45a7 696 if (record__mmap_read_all(rec) < 0) {
2dd6d8a1 697 auxtrace_snapshot_enabled = 0;
8d3eca20 698 err = -1;
45604710 699 goto out_child;
8d3eca20 700 }
de9ac07b 701
2dd6d8a1
AH
702 if (auxtrace_record__snapshot_started) {
703 auxtrace_record__snapshot_started = 0;
704 if (!auxtrace_snapshot_err)
705 record__read_auxtrace_snapshot(rec);
706 if (auxtrace_snapshot_err) {
707 pr_err("AUX area tracing snapshot failed\n");
708 err = -1;
709 goto out_child;
710 }
711 }
712
d20deb64 713 if (hits == rec->samples) {
6dcf45ef 714 if (done || draining)
649c48a9 715 break;
f66a889d 716 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
717 /*
718 * Propagate error, only if there's any. Ignore positive
719 * number of returned events and interrupt error.
720 */
721 if (err > 0 || (err < 0 && errno == EINTR))
45604710 722 err = 0;
8b412664 723 waking++;
6dcf45ef
ACM
724
725 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
726 draining = true;
8b412664
PZ
727 }
728
774cb499
JO
729 /*
730 * When perf is starting the traced process, at the end events
731 * die with the process and we wait for that. Thus no need to
732 * disable events in this case.
733 */
602ad878 734 if (done && !disabled && !target__none(&opts->target)) {
2dd6d8a1 735 auxtrace_snapshot_enabled = 0;
3e2be2da 736 perf_evlist__disable(rec->evlist);
2711926a
JO
737 disabled = true;
738 }
de9ac07b 739 }
2dd6d8a1 740 auxtrace_snapshot_enabled = 0;
de9ac07b 741
f33cbe72 742 if (forks && workload_exec_errno) {
35550da3 743 char msg[STRERR_BUFSIZE];
f33cbe72
ACM
744 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
745 pr_err("Workload failed: %s\n", emsg);
746 err = -1;
45604710 747 goto out_child;
f33cbe72
ACM
748 }
749
e3d59112 750 if (!quiet)
45604710 751 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 752
45604710
NK
753out_child:
754 if (forks) {
755 int exit_status;
addc2785 756
45604710
NK
757 if (!child_finished)
758 kill(rec->evlist->workload.pid, SIGTERM);
759
760 wait(&exit_status);
761
762 if (err < 0)
763 status = err;
764 else if (WIFEXITED(exit_status))
765 status = WEXITSTATUS(exit_status);
766 else if (WIFSIGNALED(exit_status))
767 signr = WTERMSIG(exit_status);
768 } else
769 status = err;
770
e3d59112
NK
771 /* this will be recalculated during process_buildids() */
772 rec->samples = 0;
773
45604710
NK
774 if (!err && !file->is_pipe) {
775 rec->session->header.data_size += rec->bytes_written;
457ae94a 776 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
45604710 777
cd10b289 778 if (!rec->no_buildid) {
45604710 779 process_buildids(rec);
6156681b
NK
780
781 if (rec->buildid_all)
cd10b289
AH
782 dsos__hit_all(rec->session);
783 }
42aa276f 784 perf_session__write_header(rec->session, rec->evlist, fd, true);
45604710 785 }
39d17dac 786
e3d59112
NK
787 if (!err && !quiet) {
788 char samples[128];
789
ef149c25 790 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
791 scnprintf(samples, sizeof(samples),
792 " (%" PRIu64 " samples)", rec->samples);
793 else
794 samples[0] = '\0';
795
796 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
797 perf_data_file__size(file) / 1024.0 / 1024.0,
798 file->path, samples);
799 }
800
39d17dac
ACM
801out_delete_session:
802 perf_session__delete(session);
45604710 803 return status;
de9ac07b 804}
0e9b20b8 805
72a128aa 806static void callchain_debug(void)
09b0fd45 807{
aad2b21c 808 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 809
72a128aa 810 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
26d33022 811
72a128aa 812 if (callchain_param.record_mode == CALLCHAIN_DWARF)
09b0fd45 813 pr_debug("callchain: stack dump size %d\n",
72a128aa 814 callchain_param.dump_size);
09b0fd45
JO
815}
816
c421e80b 817int record_parse_callchain_opt(const struct option *opt,
09b0fd45
JO
818 const char *arg,
819 int unset)
820{
09b0fd45 821 int ret;
c421e80b 822 struct record_opts *record = (struct record_opts *)opt->value;
09b0fd45 823
c421e80b 824 record->callgraph_set = true;
72a128aa 825 callchain_param.enabled = !unset;
eb853e80 826
09b0fd45
JO
827 /* --no-call-graph */
828 if (unset) {
72a128aa 829 callchain_param.record_mode = CALLCHAIN_NONE;
09b0fd45
JO
830 pr_debug("callchain: disabled\n");
831 return 0;
832 }
833
c3a6a8c4 834 ret = parse_callchain_record_opt(arg, &callchain_param);
5c0cf224
JO
835 if (!ret) {
836 /* Enable data address sampling for DWARF unwind. */
837 if (callchain_param.record_mode == CALLCHAIN_DWARF)
838 record->sample_address = true;
72a128aa 839 callchain_debug();
5c0cf224 840 }
26d33022
JO
841
842 return ret;
843}
844
c421e80b 845int record_callchain_opt(const struct option *opt,
09b0fd45
JO
846 const char *arg __maybe_unused,
847 int unset __maybe_unused)
848{
c421e80b
KL
849 struct record_opts *record = (struct record_opts *)opt->value;
850
851 record->callgraph_set = true;
72a128aa 852 callchain_param.enabled = true;
09b0fd45 853
72a128aa
NK
854 if (callchain_param.record_mode == CALLCHAIN_NONE)
855 callchain_param.record_mode = CALLCHAIN_FP;
eb853e80 856
72a128aa 857 callchain_debug();
09b0fd45
JO
858 return 0;
859}
860
eb853e80
JO
861static int perf_record_config(const char *var, const char *value, void *cb)
862{
7a29c087
NK
863 struct record *rec = cb;
864
865 if (!strcmp(var, "record.build-id")) {
866 if (!strcmp(value, "cache"))
867 rec->no_buildid_cache = false;
868 else if (!strcmp(value, "no-cache"))
869 rec->no_buildid_cache = true;
870 else if (!strcmp(value, "skip"))
871 rec->no_buildid = true;
872 else
873 return -1;
874 return 0;
875 }
eb853e80 876 if (!strcmp(var, "record.call-graph"))
5a2e5e85 877 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
878
879 return perf_default_config(var, value, cb);
880}
881
814c8c38
PZ
882struct clockid_map {
883 const char *name;
884 int clockid;
885};
886
887#define CLOCKID_MAP(n, c) \
888 { .name = n, .clockid = (c), }
889
890#define CLOCKID_END { .name = NULL, }
891
892
893/*
894 * Add the missing ones, we need to build on many distros...
895 */
896#ifndef CLOCK_MONOTONIC_RAW
897#define CLOCK_MONOTONIC_RAW 4
898#endif
899#ifndef CLOCK_BOOTTIME
900#define CLOCK_BOOTTIME 7
901#endif
902#ifndef CLOCK_TAI
903#define CLOCK_TAI 11
904#endif
905
906static const struct clockid_map clockids[] = {
907 /* available for all events, NMI safe */
908 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
909 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
910
911 /* available for some events */
912 CLOCKID_MAP("realtime", CLOCK_REALTIME),
913 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
914 CLOCKID_MAP("tai", CLOCK_TAI),
915
916 /* available for the lazy */
917 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
918 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
919 CLOCKID_MAP("real", CLOCK_REALTIME),
920 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
921
922 CLOCKID_END,
923};
924
925static int parse_clockid(const struct option *opt, const char *str, int unset)
926{
927 struct record_opts *opts = (struct record_opts *)opt->value;
928 const struct clockid_map *cm;
929 const char *ostr = str;
930
931 if (unset) {
932 opts->use_clockid = 0;
933 return 0;
934 }
935
936 /* no arg passed */
937 if (!str)
938 return 0;
939
940 /* no setting it twice */
941 if (opts->use_clockid)
942 return -1;
943
944 opts->use_clockid = true;
945
946 /* if its a number, we're done */
947 if (sscanf(str, "%d", &opts->clockid) == 1)
948 return 0;
949
950 /* allow a "CLOCK_" prefix to the name */
951 if (!strncasecmp(str, "CLOCK_", 6))
952 str += 6;
953
954 for (cm = clockids; cm->name; cm++) {
955 if (!strcasecmp(str, cm->name)) {
956 opts->clockid = cm->clockid;
957 return 0;
958 }
959 }
960
961 opts->use_clockid = false;
962 ui__warning("unknown clockid %s, check man page\n", ostr);
963 return -1;
964}
965
e9db1310
AH
966static int record__parse_mmap_pages(const struct option *opt,
967 const char *str,
968 int unset __maybe_unused)
969{
970 struct record_opts *opts = opt->value;
971 char *s, *p;
972 unsigned int mmap_pages;
973 int ret;
974
975 if (!str)
976 return -EINVAL;
977
978 s = strdup(str);
979 if (!s)
980 return -ENOMEM;
981
982 p = strchr(s, ',');
983 if (p)
984 *p = '\0';
985
986 if (*s) {
987 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
988 if (ret)
989 goto out_free;
990 opts->mmap_pages = mmap_pages;
991 }
992
993 if (!p) {
994 ret = 0;
995 goto out_free;
996 }
997
998 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
999 if (ret)
1000 goto out_free;
1001
1002 opts->auxtrace_mmap_pages = mmap_pages;
1003
1004out_free:
1005 free(s);
1006 return ret;
1007}
1008
e5b2c207 1009static const char * const __record_usage[] = {
9e096753
MG
1010 "perf record [<options>] [<command>]",
1011 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
1012 NULL
1013};
e5b2c207 1014const char * const *record_usage = __record_usage;
0e9b20b8 1015
d20deb64 1016/*
8c6f45a7
ACM
1017 * XXX Ideally would be local to cmd_record() and passed to a record__new
1018 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
1019 * after cmd_record() exits, but since record_options need to be accessible to
1020 * builtin-script, leave it here.
1021 *
1022 * At least we don't ouch it in all the other functions here directly.
1023 *
1024 * Just say no to tons of global variables, sigh.
1025 */
8c6f45a7 1026static struct record record = {
d20deb64 1027 .opts = {
8affc2b8 1028 .sample_time = true,
d20deb64
ACM
1029 .mmap_pages = UINT_MAX,
1030 .user_freq = UINT_MAX,
1031 .user_interval = ULLONG_MAX,
447a6013 1032 .freq = 4000,
d1cb9fce
NK
1033 .target = {
1034 .uses_mmap = true,
3aa5939d 1035 .default_per_cpu = true,
d1cb9fce 1036 },
9d9cad76 1037 .proc_map_timeout = 500,
d20deb64 1038 },
e3d59112
NK
1039 .tool = {
1040 .sample = process_sample_event,
1041 .fork = perf_event__process_fork,
cca8482c 1042 .exit = perf_event__process_exit,
e3d59112
NK
1043 .comm = perf_event__process_comm,
1044 .mmap = perf_event__process_mmap,
1045 .mmap2 = perf_event__process_mmap2,
cca8482c 1046 .ordered_events = true,
e3d59112 1047 },
d20deb64 1048};
7865e817 1049
76a26549
NK
1050const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1051 "\n\t\t\t\tDefault: fp";
61eaa3be 1052
d20deb64
ACM
1053/*
1054 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1055 * with it and switch to use the library functions in perf_evlist that came
b4006796 1056 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1057 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1058 * using pipes, etc.
1059 */
e5b2c207 1060struct option __record_options[] = {
d20deb64 1061 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1062 "event selector. use 'perf list' to list available events",
f120f9d5 1063 parse_events_option),
d20deb64 1064 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1065 "event filter", parse_filter),
4ba1faa1
WN
1066 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1067 NULL, "don't record events from perf itself",
1068 exclude_perf),
bea03405 1069 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1070 "record events on existing process id"),
bea03405 1071 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1072 "record events on existing thread id"),
d20deb64 1073 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1074 "collect data with this RT SCHED_FIFO priority"),
509051ea 1075 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1076 "collect data without buffering"),
d20deb64 1077 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1078 "collect raw sample records from all opened counters"),
bea03405 1079 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1080 "system-wide collection from all CPUs"),
bea03405 1081 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1082 "list of cpus to monitor"),
d20deb64 1083 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 1084 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 1085 "output file name"),
69e7e5b0
AH
1086 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1087 &record.opts.no_inherit_set,
1088 "child tasks do not inherit counters"),
d20deb64 1089 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
e9db1310
AH
1090 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1091 "number of mmap data pages and AUX area tracing mmap pages",
1092 record__parse_mmap_pages),
d20deb64 1093 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1094 "put the counters into a counter group"),
09b0fd45
JO
1095 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
1096 NULL, "enables call-graph recording" ,
1097 &record_callchain_opt),
1098 OPT_CALLBACK(0, "call-graph", &record.opts,
76a26549 1099 "record_mode[,record_size]", record_callchain_help,
09b0fd45 1100 &record_parse_callchain_opt),
c0555642 1101 OPT_INCR('v', "verbose", &verbose,
3da297a6 1102 "be more verbose (show counter open errors, etc)"),
b44308f5 1103 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1104 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1105 "per thread counts"),
56100321 1106 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3abebc55
AH
1107 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1108 &record.opts.sample_time_set,
1109 "Record the sample timestamps"),
56100321 1110 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
d20deb64 1111 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1112 "don't sample"),
d2db9a98
WN
1113 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1114 &record.no_buildid_cache_set,
1115 "do not update the buildid cache"),
1116 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1117 &record.no_buildid_set,
1118 "do not collect buildids in perf.data"),
d20deb64 1119 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1120 "monitor event in cgroup name only",
1121 parse_cgroups),
a6205a35 1122 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1123 "ms to wait before starting measurement after program start"),
bea03405
NK
1124 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1125 "user to profile"),
a5aabdac
SE
1126
1127 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1128 "branch any", "sample any taken branches",
1129 parse_branch_stack),
1130
1131 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1132 "branch filter mask", "branch stack filter modes",
bdfebd84 1133 parse_branch_stack),
05484298
AK
1134 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1135 "sample by weight (on special events only)"),
475eeab9
AK
1136 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1137 "sample transaction flags (special events only)"),
3aa5939d
AH
1138 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1139 "use per-thread mmaps"),
bcc84ec6
SE
1140 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1141 "sample selected machine registers on interrupt,"
1142 " use -I ? to list register names", parse_regs),
85c273d2
AK
1143 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1144 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1145 OPT_CALLBACK('k', "clockid", &record.opts,
1146 "clockid", "clockid to use for events, see clock_gettime()",
1147 parse_clockid),
2dd6d8a1
AH
1148 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1149 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1150 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1151 "per thread proc mmap processing timeout in ms"),
b757bb09
AH
1152 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1153 "Record context switch events"),
85723885
JO
1154 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1155 "Configure all used events to run in kernel space.",
1156 PARSE_OPT_EXCLUSIVE),
1157 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1158 "Configure all used events to run in user space.",
1159 PARSE_OPT_EXCLUSIVE),
71dc2326
WN
1160 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1161 "clang binary to use for compiling BPF scriptlets"),
1162 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1163 "options passed to clang when compiling BPF scriptlets"),
7efe0e03
HK
1164 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1165 "file", "vmlinux pathname"),
6156681b
NK
1166 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1167 "Record build-id of all DSOs regardless of hits"),
0e9b20b8
IM
1168 OPT_END()
1169};
1170
e5b2c207
NK
1171struct option *record_options = __record_options;
1172
1d037ca1 1173int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 1174{
ef149c25 1175 int err;
8c6f45a7 1176 struct record *rec = &record;
16ad2ffb 1177 char errbuf[BUFSIZ];
0e9b20b8 1178
48e1cab1
WN
1179#ifndef HAVE_LIBBPF_SUPPORT
1180# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1181 set_nobuild('\0', "clang-path", true);
1182 set_nobuild('\0', "clang-opt", true);
1183# undef set_nobuild
7efe0e03
HK
1184#endif
1185
1186#ifndef HAVE_BPF_PROLOGUE
1187# if !defined (HAVE_DWARF_SUPPORT)
1188# define REASON "NO_DWARF=1"
1189# elif !defined (HAVE_LIBBPF_SUPPORT)
1190# define REASON "NO_LIBBPF=1"
1191# else
1192# define REASON "this architecture doesn't support BPF prologue"
1193# endif
1194# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1195 set_nobuild('\0', "vmlinux", true);
1196# undef set_nobuild
1197# undef REASON
48e1cab1
WN
1198#endif
1199
3e2be2da
ACM
1200 rec->evlist = perf_evlist__new();
1201 if (rec->evlist == NULL)
361c99a6
ACM
1202 return -ENOMEM;
1203
eb853e80
JO
1204 perf_config(perf_record_config, rec);
1205
bca647aa 1206 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1207 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 1208 if (!argc && target__none(&rec->opts.target))
bca647aa 1209 usage_with_options(record_usage, record_options);
0e9b20b8 1210
bea03405 1211 if (nr_cgroups && !rec->opts.target.system_wide) {
c7118369
NK
1212 usage_with_options_msg(record_usage, record_options,
1213 "cgroup monitoring only available in system-wide mode");
1214
023695d9 1215 }
b757bb09
AH
1216 if (rec->opts.record_switch_events &&
1217 !perf_can_record_switch_events()) {
c7118369
NK
1218 ui__error("kernel does not support recording context switch events\n");
1219 parse_options_usage(record_usage, record_options, "switch-events", 0);
1220 return -EINVAL;
b757bb09 1221 }
023695d9 1222
ef149c25
AH
1223 if (!rec->itr) {
1224 rec->itr = auxtrace_record__init(rec->evlist, &err);
1225 if (err)
1226 return err;
1227 }
1228
2dd6d8a1
AH
1229 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1230 rec->opts.auxtrace_snapshot_opts);
1231 if (err)
1232 return err;
1233
ef149c25
AH
1234 err = -ENOMEM;
1235
0a7e6d1b 1236 symbol__init(NULL);
baa2f6ce 1237
ec80fde7 1238 if (symbol_conf.kptr_restrict)
646aaea6
ACM
1239 pr_warning(
1240"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1241"check /proc/sys/kernel/kptr_restrict.\n\n"
1242"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1243"file is not found in the buildid cache or in the vmlinux path.\n\n"
1244"Samples in kernel modules won't be resolved at all.\n\n"
1245"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1246"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1247
d20deb64 1248 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 1249 disable_buildid_cache();
655000e7 1250
3e2be2da
ACM
1251 if (rec->evlist->nr_entries == 0 &&
1252 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
1253 pr_err("Not enough memory for event selector list\n");
1254 goto out_symbol_exit;
bbd36e5e 1255 }
0e9b20b8 1256
69e7e5b0
AH
1257 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1258 rec->opts.no_inherit = true;
1259
602ad878 1260 err = target__validate(&rec->opts.target);
16ad2ffb 1261 if (err) {
602ad878 1262 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
1263 ui__warning("%s", errbuf);
1264 }
1265
602ad878 1266 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1267 if (err) {
1268 int saved_errno = errno;
4bd0f2d2 1269
602ad878 1270 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1271 ui__error("%s", errbuf);
16ad2ffb
NK
1272
1273 err = -saved_errno;
8fa60e1f 1274 goto out_symbol_exit;
16ad2ffb 1275 }
0d37aa34 1276
16ad2ffb 1277 err = -ENOMEM;
3e2be2da 1278 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1279 usage_with_options(record_usage, record_options);
69aad6f1 1280
ef149c25
AH
1281 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1282 if (err)
1283 goto out_symbol_exit;
1284
6156681b
NK
1285 /*
1286 * We take all buildids when the file contains
1287 * AUX area tracing data because we do not decode the
1288 * trace because it would take too long.
1289 */
1290 if (rec->opts.full_auxtrace)
1291 rec->buildid_all = true;
1292
b4006796 1293 if (record_opts__config(&rec->opts)) {
39d17dac 1294 err = -EINVAL;
03ad9747 1295 goto out_symbol_exit;
7e4ff9e3
MG
1296 }
1297
d20deb64 1298 err = __cmd_record(&record, argc, argv);
d65a458b 1299out_symbol_exit:
45604710 1300 perf_evlist__delete(rec->evlist);
d65a458b 1301 symbol__exit();
ef149c25 1302 auxtrace_record__free(rec->itr);
39d17dac 1303 return err;
0e9b20b8 1304}
2dd6d8a1
AH
1305
1306static void snapshot_sig_handler(int sig __maybe_unused)
1307{
1308 if (!auxtrace_snapshot_enabled)
1309 return;
1310 auxtrace_snapshot_enabled = 0;
1311 auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1312 auxtrace_record__snapshot_started = 1;
1313}