perf trace: Pretty print getrandom() args
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
4b6ab94e 14#include <subcmd/parse-options.h>
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
8f651eae 17#include "util/callchain.h"
f14d5707 18#include "util/cgroup.h"
7c6a1c65 19#include "util/header.h"
66e274f3 20#include "util/event.h"
361c99a6 21#include "util/evlist.h"
69aad6f1 22#include "util/evsel.h"
8f28827a 23#include "util/debug.h"
94c744b6 24#include "util/session.h"
45694aa7 25#include "util/tool.h"
8d06367f 26#include "util/symbol.h"
a12b51c4 27#include "util/cpumap.h"
fd78260b 28#include "util/thread_map.h"
f5fc1412 29#include "util/data.h"
bcc84ec6 30#include "util/perf_regs.h"
ef149c25 31#include "util/auxtrace.h"
f00898f4 32#include "util/parse-branch-options.h"
bcc84ec6 33#include "util/parse-regs-options.h"
71dc2326 34#include "util/llvm-utils.h"
8690a2a7 35#include "util/bpf-loader.h"
d8871ea7 36#include "asm/bug.h"
7c6a1c65 37
97124d5e 38#include <unistd.h>
de9ac07b 39#include <sched.h>
a41794cd 40#include <sys/mman.h>
de9ac07b 41
78da39fa 42
8c6f45a7 43struct record {
45694aa7 44 struct perf_tool tool;
b4006796 45 struct record_opts opts;
d20deb64 46 u64 bytes_written;
f5fc1412 47 struct perf_data_file file;
ef149c25 48 struct auxtrace_record *itr;
d20deb64
ACM
49 struct perf_evlist *evlist;
50 struct perf_session *session;
51 const char *progname;
d20deb64 52 int realtime_prio;
d20deb64 53 bool no_buildid;
d2db9a98 54 bool no_buildid_set;
d20deb64 55 bool no_buildid_cache;
d2db9a98 56 bool no_buildid_cache_set;
6156681b 57 bool buildid_all;
9f065194 58 unsigned long long samples;
0f82ebc4 59};
a21ca2ca 60
8c6f45a7 61static int record__write(struct record *rec, void *bf, size_t size)
f5970550 62{
cf8b2e69 63 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
64 pr_err("failed to write perf data, error: %m\n");
65 return -1;
f5970550 66 }
8d3eca20 67
cf8b2e69 68 rec->bytes_written += size;
8d3eca20 69 return 0;
f5970550
PZ
70}
71
45694aa7 72static int process_synthesized_event(struct perf_tool *tool,
d20deb64 73 union perf_event *event,
1d037ca1
IT
74 struct perf_sample *sample __maybe_unused,
75 struct machine *machine __maybe_unused)
234fbbf5 76{
8c6f45a7
ACM
77 struct record *rec = container_of(tool, struct record, tool);
78 return record__write(rec, event, event->header.size);
234fbbf5
ACM
79}
80
e5685730 81static int record__mmap_read(struct record *rec, int idx)
de9ac07b 82{
e5685730 83 struct perf_mmap *md = &rec->evlist->mmap[idx];
7b8283b5
DA
84 u64 head = perf_mmap__read_head(md);
85 u64 old = md->prev;
918512b4 86 unsigned char *data = md->base + page_size;
de9ac07b
PZ
87 unsigned long size;
88 void *buf;
8d3eca20 89 int rc = 0;
de9ac07b 90
dc82009a 91 if (old == head)
8d3eca20 92 return 0;
dc82009a 93
d20deb64 94 rec->samples++;
de9ac07b
PZ
95
96 size = head - old;
97
98 if ((old & md->mask) + size != (head & md->mask)) {
99 buf = &data[old & md->mask];
100 size = md->mask + 1 - (old & md->mask);
101 old += size;
021e9f47 102
8c6f45a7 103 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
104 rc = -1;
105 goto out;
106 }
de9ac07b
PZ
107 }
108
109 buf = &data[old & md->mask];
110 size = head - old;
111 old += size;
021e9f47 112
8c6f45a7 113 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
114 rc = -1;
115 goto out;
116 }
de9ac07b
PZ
117
118 md->prev = old;
e5685730 119 perf_evlist__mmap_consume(rec->evlist, idx);
8d3eca20
DA
120out:
121 return rc;
de9ac07b
PZ
122}
123
2dd6d8a1
AH
124static volatile int done;
125static volatile int signr = -1;
126static volatile int child_finished;
127static volatile int auxtrace_snapshot_enabled;
128static volatile int auxtrace_snapshot_err;
129static volatile int auxtrace_record__snapshot_started;
130
131static void sig_handler(int sig)
132{
133 if (sig == SIGCHLD)
134 child_finished = 1;
135 else
136 signr = sig;
137
138 done = 1;
139}
140
141static void record__sig_exit(void)
142{
143 if (signr == -1)
144 return;
145
146 signal(signr, SIG_DFL);
147 raise(signr);
148}
149
e31f0d01
AH
150#ifdef HAVE_AUXTRACE_SUPPORT
151
ef149c25
AH
152static int record__process_auxtrace(struct perf_tool *tool,
153 union perf_event *event, void *data1,
154 size_t len1, void *data2, size_t len2)
155{
156 struct record *rec = container_of(tool, struct record, tool);
99fa2984 157 struct perf_data_file *file = &rec->file;
ef149c25
AH
158 size_t padding;
159 u8 pad[8] = {0};
160
99fa2984
AH
161 if (!perf_data_file__is_pipe(file)) {
162 off_t file_offset;
163 int fd = perf_data_file__fd(file);
164 int err;
165
166 file_offset = lseek(fd, 0, SEEK_CUR);
167 if (file_offset == -1)
168 return -1;
169 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
170 event, file_offset);
171 if (err)
172 return err;
173 }
174
ef149c25
AH
175 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
176 padding = (len1 + len2) & 7;
177 if (padding)
178 padding = 8 - padding;
179
180 record__write(rec, event, event->header.size);
181 record__write(rec, data1, len1);
182 if (len2)
183 record__write(rec, data2, len2);
184 record__write(rec, &pad, padding);
185
186 return 0;
187}
188
189static int record__auxtrace_mmap_read(struct record *rec,
190 struct auxtrace_mmap *mm)
191{
192 int ret;
193
194 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
195 record__process_auxtrace);
196 if (ret < 0)
197 return ret;
198
199 if (ret)
200 rec->samples++;
201
202 return 0;
203}
204
2dd6d8a1
AH
205static int record__auxtrace_mmap_read_snapshot(struct record *rec,
206 struct auxtrace_mmap *mm)
207{
208 int ret;
209
210 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
211 record__process_auxtrace,
212 rec->opts.auxtrace_snapshot_size);
213 if (ret < 0)
214 return ret;
215
216 if (ret)
217 rec->samples++;
218
219 return 0;
220}
221
222static int record__auxtrace_read_snapshot_all(struct record *rec)
223{
224 int i;
225 int rc = 0;
226
227 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
228 struct auxtrace_mmap *mm =
229 &rec->evlist->mmap[i].auxtrace_mmap;
230
231 if (!mm->base)
232 continue;
233
234 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
235 rc = -1;
236 goto out;
237 }
238 }
239out:
240 return rc;
241}
242
243static void record__read_auxtrace_snapshot(struct record *rec)
244{
245 pr_debug("Recording AUX area tracing snapshot\n");
246 if (record__auxtrace_read_snapshot_all(rec) < 0) {
247 auxtrace_snapshot_err = -1;
248 } else {
249 auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
250 if (!auxtrace_snapshot_err)
251 auxtrace_snapshot_enabled = 1;
252 }
253}
254
e31f0d01
AH
255#else
256
257static inline
258int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
259 struct auxtrace_mmap *mm __maybe_unused)
260{
261 return 0;
262}
263
2dd6d8a1
AH
264static inline
265void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 266{
f7b7c26e
PZ
267}
268
2dd6d8a1
AH
269static inline
270int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 271{
2dd6d8a1 272 return 0;
de9ac07b
PZ
273}
274
2dd6d8a1
AH
275#endif
276
8c6f45a7 277static int record__open(struct record *rec)
dd7927f4 278{
56e52e85 279 char msg[512];
6a4bb04c 280 struct perf_evsel *pos;
d20deb64
ACM
281 struct perf_evlist *evlist = rec->evlist;
282 struct perf_session *session = rec->session;
b4006796 283 struct record_opts *opts = &rec->opts;
8d3eca20 284 int rc = 0;
dd7927f4 285
f77a9518 286 perf_evlist__config(evlist, opts);
cac21425 287
0050f7aa 288 evlist__for_each(evlist, pos) {
dd7927f4 289try_again:
d988d5ee 290 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 291 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 292 if (verbose)
c0a54341 293 ui__warning("%s\n", msg);
d6d901c2
ZY
294 goto try_again;
295 }
ca6a4258 296
56e52e85
ACM
297 rc = -errno;
298 perf_evsel__open_strerror(pos, &opts->target,
299 errno, msg, sizeof(msg));
300 ui__error("%s\n", msg);
8d3eca20 301 goto out;
c171b552
LZ
302 }
303 }
a43d3f08 304
23d4aad4
ACM
305 if (perf_evlist__apply_filters(evlist, &pos)) {
306 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
307 pos->filter, perf_evsel__name(pos), errno,
35550da3 308 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
309 rc = -1;
310 goto out;
0a102479
FW
311 }
312
ef149c25 313 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
2dd6d8a1
AH
314 opts->auxtrace_mmap_pages,
315 opts->auxtrace_snapshot_mode) < 0) {
8d3eca20
DA
316 if (errno == EPERM) {
317 pr_err("Permission error mapping pages.\n"
318 "Consider increasing "
319 "/proc/sys/kernel/perf_event_mlock_kb,\n"
320 "or try again with a smaller value of -m/--mmap_pages.\n"
ef149c25
AH
321 "(current value: %u,%u)\n",
322 opts->mmap_pages, opts->auxtrace_mmap_pages);
8d3eca20 323 rc = -errno;
8d3eca20 324 } else {
35550da3
MH
325 pr_err("failed to mmap with %d (%s)\n", errno,
326 strerror_r(errno, msg, sizeof(msg)));
95c36561
WN
327 if (errno)
328 rc = -errno;
329 else
330 rc = -EINVAL;
8d3eca20
DA
331 }
332 goto out;
18e60939 333 }
0a27d7f9 334
563aecb2 335 session->evlist = evlist;
7b56cce2 336 perf_session__set_id_hdr_size(session);
8d3eca20
DA
337out:
338 return rc;
16c8a109
PZ
339}
340
e3d59112
NK
341static int process_sample_event(struct perf_tool *tool,
342 union perf_event *event,
343 struct perf_sample *sample,
344 struct perf_evsel *evsel,
345 struct machine *machine)
346{
347 struct record *rec = container_of(tool, struct record, tool);
348
349 rec->samples++;
350
351 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
352}
353
8c6f45a7 354static int process_buildids(struct record *rec)
6122e4e4 355{
f5fc1412
JO
356 struct perf_data_file *file = &rec->file;
357 struct perf_session *session = rec->session;
6122e4e4 358
457ae94a 359 if (file->size == 0)
9f591fd7
ACM
360 return 0;
361
00dc8657
NK
362 /*
363 * During this process, it'll load kernel map and replace the
364 * dso->long_name to a real pathname it found. In this case
365 * we prefer the vmlinux path like
366 * /lib/modules/3.16.4/build/vmlinux
367 *
368 * rather than build-id path (in debug directory).
369 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
370 */
371 symbol_conf.ignore_vmlinux_buildid = true;
372
6156681b
NK
373 /*
374 * If --buildid-all is given, it marks all DSO regardless of hits,
375 * so no need to process samples.
376 */
377 if (rec->buildid_all)
378 rec->tool.sample = NULL;
379
b7b61cbe 380 return perf_session__process_events(session);
6122e4e4
ACM
381}
382
8115d60c 383static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
384{
385 int err;
45694aa7 386 struct perf_tool *tool = data;
a1645ce1
ZY
387 /*
388 *As for guest kernel when processing subcommand record&report,
389 *we arrange module mmap prior to guest kernel mmap and trigger
390 *a preload dso because default guest module symbols are loaded
391 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
392 *method is used to avoid symbol missing when the first addr is
393 *in module instead of in guest kernel.
394 */
45694aa7 395 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 396 machine);
a1645ce1
ZY
397 if (err < 0)
398 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 399 " relocation symbol.\n", machine->pid);
a1645ce1 400
a1645ce1
ZY
401 /*
402 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
403 * have no _text sometimes.
404 */
45694aa7 405 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 406 machine);
a1645ce1
ZY
407 if (err < 0)
408 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 409 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
410}
411
98402807
FW
412static struct perf_event_header finished_round_event = {
413 .size = sizeof(struct perf_event_header),
414 .type = PERF_RECORD_FINISHED_ROUND,
415};
416
8c6f45a7 417static int record__mmap_read_all(struct record *rec)
98402807 418{
dcabb507 419 u64 bytes_written = rec->bytes_written;
0e2e63dd 420 int i;
8d3eca20 421 int rc = 0;
98402807 422
d20deb64 423 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
ef149c25
AH
424 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
425
8d3eca20 426 if (rec->evlist->mmap[i].base) {
e5685730 427 if (record__mmap_read(rec, i) != 0) {
8d3eca20
DA
428 rc = -1;
429 goto out;
430 }
431 }
ef149c25 432
2dd6d8a1 433 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
ef149c25
AH
434 record__auxtrace_mmap_read(rec, mm) != 0) {
435 rc = -1;
436 goto out;
437 }
98402807
FW
438 }
439
dcabb507
JO
440 /*
441 * Mark the round finished in case we wrote
442 * at least one event.
443 */
444 if (bytes_written != rec->bytes_written)
445 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
446
447out:
448 return rc;
98402807
FW
449}
450
8c6f45a7 451static void record__init_features(struct record *rec)
57706abc 452{
57706abc
DA
453 struct perf_session *session = rec->session;
454 int feat;
455
456 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
457 perf_header__set_feat(&session->header, feat);
458
459 if (rec->no_buildid)
460 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
461
3e2be2da 462 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
463 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
464
465 if (!rec->opts.branch_stack)
466 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
467
468 if (!rec->opts.full_auxtrace)
469 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
ffa517ad
JO
470
471 perf_header__clear_feat(&session->header, HEADER_STAT);
57706abc
DA
472}
473
e1ab48ba
WN
474static void
475record__finish_output(struct record *rec)
476{
477 struct perf_data_file *file = &rec->file;
478 int fd = perf_data_file__fd(file);
479
480 if (file->is_pipe)
481 return;
482
483 rec->session->header.data_size += rec->bytes_written;
484 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
485
486 if (!rec->no_buildid) {
487 process_buildids(rec);
488
489 if (rec->buildid_all)
490 dsos__hit_all(rec->session);
491 }
492 perf_session__write_header(rec->session, rec->evlist, fd, true);
493
494 return;
495}
496
f33cbe72
ACM
497static volatile int workload_exec_errno;
498
499/*
500 * perf_evlist__prepare_workload will send a SIGUSR1
501 * if the fork fails, since we asked by setting its
502 * want_signal to true.
503 */
45604710
NK
504static void workload_exec_failed_signal(int signo __maybe_unused,
505 siginfo_t *info,
f33cbe72
ACM
506 void *ucontext __maybe_unused)
507{
508 workload_exec_errno = info->si_value.sival_int;
509 done = 1;
f33cbe72
ACM
510 child_finished = 1;
511}
512
2dd6d8a1
AH
513static void snapshot_sig_handler(int sig);
514
c45c86eb
WN
515static int record__synthesize(struct record *rec)
516{
517 struct perf_session *session = rec->session;
518 struct machine *machine = &session->machines.host;
519 struct perf_data_file *file = &rec->file;
520 struct record_opts *opts = &rec->opts;
521 struct perf_tool *tool = &rec->tool;
522 int fd = perf_data_file__fd(file);
523 int err = 0;
524
525 if (file->is_pipe) {
526 err = perf_event__synthesize_attrs(tool, session,
527 process_synthesized_event);
528 if (err < 0) {
529 pr_err("Couldn't synthesize attrs.\n");
530 goto out;
531 }
532
533 if (have_tracepoints(&rec->evlist->entries)) {
534 /*
535 * FIXME err <= 0 here actually means that
536 * there were no tracepoints so its not really
537 * an error, just that we don't need to
538 * synthesize anything. We really have to
539 * return this more properly and also
540 * propagate errors that now are calling die()
541 */
542 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
543 process_synthesized_event);
544 if (err <= 0) {
545 pr_err("Couldn't record tracing data.\n");
546 goto out;
547 }
548 rec->bytes_written += err;
549 }
550 }
551
552 if (rec->opts.full_auxtrace) {
553 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
554 session, process_synthesized_event);
555 if (err)
556 goto out;
557 }
558
559 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
560 machine);
561 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
562 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
563 "Check /proc/kallsyms permission or run as root.\n");
564
565 err = perf_event__synthesize_modules(tool, process_synthesized_event,
566 machine);
567 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
568 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
569 "Check /proc/modules permission or run as root.\n");
570
571 if (perf_guest) {
572 machines__process_guests(&session->machines,
573 perf_event__synthesize_guest_os, tool);
574 }
575
576 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
577 process_synthesized_event, opts->sample_address,
578 opts->proc_map_timeout);
579out:
580 return err;
581}
582
8c6f45a7 583static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 584{
57706abc 585 int err;
45604710 586 int status = 0;
8b412664 587 unsigned long waking = 0;
46be604b 588 const bool forks = argc > 0;
23346f21 589 struct machine *machine;
45694aa7 590 struct perf_tool *tool = &rec->tool;
b4006796 591 struct record_opts *opts = &rec->opts;
f5fc1412 592 struct perf_data_file *file = &rec->file;
d20deb64 593 struct perf_session *session;
6dcf45ef 594 bool disabled = false, draining = false;
42aa276f 595 int fd;
de9ac07b 596
d20deb64 597 rec->progname = argv[0];
33e49ea7 598
45604710 599 atexit(record__sig_exit);
f5970550
PZ
600 signal(SIGCHLD, sig_handler);
601 signal(SIGINT, sig_handler);
804f7ac7 602 signal(SIGTERM, sig_handler);
2dd6d8a1
AH
603 if (rec->opts.auxtrace_snapshot_mode)
604 signal(SIGUSR2, snapshot_sig_handler);
605 else
606 signal(SIGUSR2, SIG_IGN);
f5970550 607
b7b61cbe 608 session = perf_session__new(file, false, tool);
94c744b6 609 if (session == NULL) {
ffa91880 610 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
611 return -1;
612 }
613
42aa276f 614 fd = perf_data_file__fd(file);
d20deb64
ACM
615 rec->session = session;
616
8c6f45a7 617 record__init_features(rec);
330aa675 618
d4db3f16 619 if (forks) {
3e2be2da 620 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 621 argv, file->is_pipe,
735f7e0b 622 workload_exec_failed_signal);
35b9d88e
ACM
623 if (err < 0) {
624 pr_err("Couldn't run the workload!\n");
45604710 625 status = err;
35b9d88e 626 goto out_delete_session;
856e9660 627 }
856e9660
PZ
628 }
629
8c6f45a7 630 if (record__open(rec) != 0) {
8d3eca20 631 err = -1;
45604710 632 goto out_child;
8d3eca20 633 }
de9ac07b 634
8690a2a7
WN
635 err = bpf__apply_obj_config();
636 if (err) {
637 char errbuf[BUFSIZ];
638
639 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
640 pr_err("ERROR: Apply config to BPF failed: %s\n",
641 errbuf);
642 goto out_child;
643 }
644
cca8482c
AH
645 /*
646 * Normally perf_session__new would do this, but it doesn't have the
647 * evlist.
648 */
649 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
650 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
651 rec->tool.ordered_events = false;
652 }
653
3e2be2da 654 if (!rec->evlist->nr_groups)
a8bb559b
NK
655 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
656
f5fc1412 657 if (file->is_pipe) {
42aa276f 658 err = perf_header__write_pipe(fd);
529870e3 659 if (err < 0)
45604710 660 goto out_child;
563aecb2 661 } else {
42aa276f 662 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 663 if (err < 0)
45604710 664 goto out_child;
56b03f3c
ACM
665 }
666
d3665498 667 if (!rec->no_buildid
e20960c0 668 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 669 pr_err("Couldn't generate buildids. "
e20960c0 670 "Use --no-buildid to profile anyway.\n");
8d3eca20 671 err = -1;
45604710 672 goto out_child;
e20960c0
RR
673 }
674
34ba5122 675 machine = &session->machines.host;
743eb868 676
c45c86eb
WN
677 err = record__synthesize(rec);
678 if (err < 0)
45604710 679 goto out_child;
8d3eca20 680
d20deb64 681 if (rec->realtime_prio) {
de9ac07b
PZ
682 struct sched_param param;
683
d20deb64 684 param.sched_priority = rec->realtime_prio;
de9ac07b 685 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 686 pr_err("Could not set realtime priority.\n");
8d3eca20 687 err = -1;
45604710 688 goto out_child;
de9ac07b
PZ
689 }
690 }
691
774cb499
JO
692 /*
693 * When perf is starting the traced process, all the events
694 * (apart from group members) have enable_on_exec=1 set,
695 * so don't spoil it by prematurely enabling them.
696 */
6619a53e 697 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 698 perf_evlist__enable(rec->evlist);
764e16a3 699
856e9660
PZ
700 /*
701 * Let the child rip
702 */
e803cf97 703 if (forks) {
e5bed564
NK
704 union perf_event *event;
705
706 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
707 if (event == NULL) {
708 err = -ENOMEM;
709 goto out_child;
710 }
711
e803cf97
NK
712 /*
713 * Some H/W events are generated before COMM event
714 * which is emitted during exec(), so perf script
715 * cannot see a correct process name for those events.
716 * Synthesize COMM event to prevent it.
717 */
e5bed564 718 perf_event__synthesize_comm(tool, event,
e803cf97
NK
719 rec->evlist->workload.pid,
720 process_synthesized_event,
721 machine);
e5bed564 722 free(event);
e803cf97 723
3e2be2da 724 perf_evlist__start_workload(rec->evlist);
e803cf97 725 }
856e9660 726
6619a53e
AK
727 if (opts->initial_delay) {
728 usleep(opts->initial_delay * 1000);
729 perf_evlist__enable(rec->evlist);
730 }
731
2dd6d8a1 732 auxtrace_snapshot_enabled = 1;
649c48a9 733 for (;;) {
9f065194 734 unsigned long long hits = rec->samples;
de9ac07b 735
8c6f45a7 736 if (record__mmap_read_all(rec) < 0) {
2dd6d8a1 737 auxtrace_snapshot_enabled = 0;
8d3eca20 738 err = -1;
45604710 739 goto out_child;
8d3eca20 740 }
de9ac07b 741
2dd6d8a1
AH
742 if (auxtrace_record__snapshot_started) {
743 auxtrace_record__snapshot_started = 0;
744 if (!auxtrace_snapshot_err)
745 record__read_auxtrace_snapshot(rec);
746 if (auxtrace_snapshot_err) {
747 pr_err("AUX area tracing snapshot failed\n");
748 err = -1;
749 goto out_child;
750 }
751 }
752
d20deb64 753 if (hits == rec->samples) {
6dcf45ef 754 if (done || draining)
649c48a9 755 break;
f66a889d 756 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
757 /*
758 * Propagate error, only if there's any. Ignore positive
759 * number of returned events and interrupt error.
760 */
761 if (err > 0 || (err < 0 && errno == EINTR))
45604710 762 err = 0;
8b412664 763 waking++;
6dcf45ef
ACM
764
765 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
766 draining = true;
8b412664
PZ
767 }
768
774cb499
JO
769 /*
770 * When perf is starting the traced process, at the end events
771 * die with the process and we wait for that. Thus no need to
772 * disable events in this case.
773 */
602ad878 774 if (done && !disabled && !target__none(&opts->target)) {
2dd6d8a1 775 auxtrace_snapshot_enabled = 0;
3e2be2da 776 perf_evlist__disable(rec->evlist);
2711926a
JO
777 disabled = true;
778 }
de9ac07b 779 }
2dd6d8a1 780 auxtrace_snapshot_enabled = 0;
de9ac07b 781
f33cbe72 782 if (forks && workload_exec_errno) {
35550da3 783 char msg[STRERR_BUFSIZE];
f33cbe72
ACM
784 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
785 pr_err("Workload failed: %s\n", emsg);
786 err = -1;
45604710 787 goto out_child;
f33cbe72
ACM
788 }
789
e3d59112 790 if (!quiet)
45604710 791 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 792
45604710
NK
793out_child:
794 if (forks) {
795 int exit_status;
addc2785 796
45604710
NK
797 if (!child_finished)
798 kill(rec->evlist->workload.pid, SIGTERM);
799
800 wait(&exit_status);
801
802 if (err < 0)
803 status = err;
804 else if (WIFEXITED(exit_status))
805 status = WEXITSTATUS(exit_status);
806 else if (WIFSIGNALED(exit_status))
807 signr = WTERMSIG(exit_status);
808 } else
809 status = err;
810
e3d59112
NK
811 /* this will be recalculated during process_buildids() */
812 rec->samples = 0;
813
e1ab48ba
WN
814 if (!err)
815 record__finish_output(rec);
39d17dac 816
e3d59112
NK
817 if (!err && !quiet) {
818 char samples[128];
819
ef149c25 820 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
821 scnprintf(samples, sizeof(samples),
822 " (%" PRIu64 " samples)", rec->samples);
823 else
824 samples[0] = '\0';
825
826 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
827 perf_data_file__size(file) / 1024.0 / 1024.0,
828 file->path, samples);
829 }
830
39d17dac
ACM
831out_delete_session:
832 perf_session__delete(session);
45604710 833 return status;
de9ac07b 834}
0e9b20b8 835
72a128aa 836static void callchain_debug(void)
09b0fd45 837{
aad2b21c 838 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 839
72a128aa 840 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
26d33022 841
72a128aa 842 if (callchain_param.record_mode == CALLCHAIN_DWARF)
09b0fd45 843 pr_debug("callchain: stack dump size %d\n",
72a128aa 844 callchain_param.dump_size);
09b0fd45
JO
845}
846
c421e80b 847int record_parse_callchain_opt(const struct option *opt,
09b0fd45
JO
848 const char *arg,
849 int unset)
850{
09b0fd45 851 int ret;
c421e80b 852 struct record_opts *record = (struct record_opts *)opt->value;
09b0fd45 853
c421e80b 854 record->callgraph_set = true;
72a128aa 855 callchain_param.enabled = !unset;
eb853e80 856
09b0fd45
JO
857 /* --no-call-graph */
858 if (unset) {
72a128aa 859 callchain_param.record_mode = CALLCHAIN_NONE;
09b0fd45
JO
860 pr_debug("callchain: disabled\n");
861 return 0;
862 }
863
c3a6a8c4 864 ret = parse_callchain_record_opt(arg, &callchain_param);
5c0cf224
JO
865 if (!ret) {
866 /* Enable data address sampling for DWARF unwind. */
867 if (callchain_param.record_mode == CALLCHAIN_DWARF)
868 record->sample_address = true;
72a128aa 869 callchain_debug();
5c0cf224 870 }
26d33022
JO
871
872 return ret;
873}
874
c421e80b 875int record_callchain_opt(const struct option *opt,
09b0fd45
JO
876 const char *arg __maybe_unused,
877 int unset __maybe_unused)
878{
c421e80b
KL
879 struct record_opts *record = (struct record_opts *)opt->value;
880
881 record->callgraph_set = true;
72a128aa 882 callchain_param.enabled = true;
09b0fd45 883
72a128aa
NK
884 if (callchain_param.record_mode == CALLCHAIN_NONE)
885 callchain_param.record_mode = CALLCHAIN_FP;
eb853e80 886
72a128aa 887 callchain_debug();
09b0fd45
JO
888 return 0;
889}
890
eb853e80
JO
891static int perf_record_config(const char *var, const char *value, void *cb)
892{
7a29c087
NK
893 struct record *rec = cb;
894
895 if (!strcmp(var, "record.build-id")) {
896 if (!strcmp(value, "cache"))
897 rec->no_buildid_cache = false;
898 else if (!strcmp(value, "no-cache"))
899 rec->no_buildid_cache = true;
900 else if (!strcmp(value, "skip"))
901 rec->no_buildid = true;
902 else
903 return -1;
904 return 0;
905 }
eb853e80 906 if (!strcmp(var, "record.call-graph"))
5a2e5e85 907 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
908
909 return perf_default_config(var, value, cb);
910}
911
814c8c38
PZ
912struct clockid_map {
913 const char *name;
914 int clockid;
915};
916
917#define CLOCKID_MAP(n, c) \
918 { .name = n, .clockid = (c), }
919
920#define CLOCKID_END { .name = NULL, }
921
922
923/*
924 * Add the missing ones, we need to build on many distros...
925 */
926#ifndef CLOCK_MONOTONIC_RAW
927#define CLOCK_MONOTONIC_RAW 4
928#endif
929#ifndef CLOCK_BOOTTIME
930#define CLOCK_BOOTTIME 7
931#endif
932#ifndef CLOCK_TAI
933#define CLOCK_TAI 11
934#endif
935
936static const struct clockid_map clockids[] = {
937 /* available for all events, NMI safe */
938 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
939 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
940
941 /* available for some events */
942 CLOCKID_MAP("realtime", CLOCK_REALTIME),
943 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
944 CLOCKID_MAP("tai", CLOCK_TAI),
945
946 /* available for the lazy */
947 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
948 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
949 CLOCKID_MAP("real", CLOCK_REALTIME),
950 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
951
952 CLOCKID_END,
953};
954
955static int parse_clockid(const struct option *opt, const char *str, int unset)
956{
957 struct record_opts *opts = (struct record_opts *)opt->value;
958 const struct clockid_map *cm;
959 const char *ostr = str;
960
961 if (unset) {
962 opts->use_clockid = 0;
963 return 0;
964 }
965
966 /* no arg passed */
967 if (!str)
968 return 0;
969
970 /* no setting it twice */
971 if (opts->use_clockid)
972 return -1;
973
974 opts->use_clockid = true;
975
976 /* if its a number, we're done */
977 if (sscanf(str, "%d", &opts->clockid) == 1)
978 return 0;
979
980 /* allow a "CLOCK_" prefix to the name */
981 if (!strncasecmp(str, "CLOCK_", 6))
982 str += 6;
983
984 for (cm = clockids; cm->name; cm++) {
985 if (!strcasecmp(str, cm->name)) {
986 opts->clockid = cm->clockid;
987 return 0;
988 }
989 }
990
991 opts->use_clockid = false;
992 ui__warning("unknown clockid %s, check man page\n", ostr);
993 return -1;
994}
995
e9db1310
AH
996static int record__parse_mmap_pages(const struct option *opt,
997 const char *str,
998 int unset __maybe_unused)
999{
1000 struct record_opts *opts = opt->value;
1001 char *s, *p;
1002 unsigned int mmap_pages;
1003 int ret;
1004
1005 if (!str)
1006 return -EINVAL;
1007
1008 s = strdup(str);
1009 if (!s)
1010 return -ENOMEM;
1011
1012 p = strchr(s, ',');
1013 if (p)
1014 *p = '\0';
1015
1016 if (*s) {
1017 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1018 if (ret)
1019 goto out_free;
1020 opts->mmap_pages = mmap_pages;
1021 }
1022
1023 if (!p) {
1024 ret = 0;
1025 goto out_free;
1026 }
1027
1028 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1029 if (ret)
1030 goto out_free;
1031
1032 opts->auxtrace_mmap_pages = mmap_pages;
1033
1034out_free:
1035 free(s);
1036 return ret;
1037}
1038
e5b2c207 1039static const char * const __record_usage[] = {
9e096753
MG
1040 "perf record [<options>] [<command>]",
1041 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
1042 NULL
1043};
e5b2c207 1044const char * const *record_usage = __record_usage;
0e9b20b8 1045
d20deb64 1046/*
8c6f45a7
ACM
1047 * XXX Ideally would be local to cmd_record() and passed to a record__new
1048 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
1049 * after cmd_record() exits, but since record_options need to be accessible to
1050 * builtin-script, leave it here.
1051 *
1052 * At least we don't ouch it in all the other functions here directly.
1053 *
1054 * Just say no to tons of global variables, sigh.
1055 */
8c6f45a7 1056static struct record record = {
d20deb64 1057 .opts = {
8affc2b8 1058 .sample_time = true,
d20deb64
ACM
1059 .mmap_pages = UINT_MAX,
1060 .user_freq = UINT_MAX,
1061 .user_interval = ULLONG_MAX,
447a6013 1062 .freq = 4000,
d1cb9fce
NK
1063 .target = {
1064 .uses_mmap = true,
3aa5939d 1065 .default_per_cpu = true,
d1cb9fce 1066 },
9d9cad76 1067 .proc_map_timeout = 500,
d20deb64 1068 },
e3d59112
NK
1069 .tool = {
1070 .sample = process_sample_event,
1071 .fork = perf_event__process_fork,
cca8482c 1072 .exit = perf_event__process_exit,
e3d59112
NK
1073 .comm = perf_event__process_comm,
1074 .mmap = perf_event__process_mmap,
1075 .mmap2 = perf_event__process_mmap2,
cca8482c 1076 .ordered_events = true,
e3d59112 1077 },
d20deb64 1078};
7865e817 1079
76a26549
NK
1080const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1081 "\n\t\t\t\tDefault: fp";
61eaa3be 1082
d20deb64
ACM
1083/*
1084 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1085 * with it and switch to use the library functions in perf_evlist that came
b4006796 1086 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1087 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1088 * using pipes, etc.
1089 */
e5b2c207 1090struct option __record_options[] = {
d20deb64 1091 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1092 "event selector. use 'perf list' to list available events",
f120f9d5 1093 parse_events_option),
d20deb64 1094 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1095 "event filter", parse_filter),
4ba1faa1
WN
1096 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1097 NULL, "don't record events from perf itself",
1098 exclude_perf),
bea03405 1099 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1100 "record events on existing process id"),
bea03405 1101 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1102 "record events on existing thread id"),
d20deb64 1103 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1104 "collect data with this RT SCHED_FIFO priority"),
509051ea 1105 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1106 "collect data without buffering"),
d20deb64 1107 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1108 "collect raw sample records from all opened counters"),
bea03405 1109 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1110 "system-wide collection from all CPUs"),
bea03405 1111 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1112 "list of cpus to monitor"),
d20deb64 1113 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 1114 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 1115 "output file name"),
69e7e5b0
AH
1116 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1117 &record.opts.no_inherit_set,
1118 "child tasks do not inherit counters"),
d20deb64 1119 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
e9db1310
AH
1120 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1121 "number of mmap data pages and AUX area tracing mmap pages",
1122 record__parse_mmap_pages),
d20deb64 1123 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1124 "put the counters into a counter group"),
09b0fd45
JO
1125 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
1126 NULL, "enables call-graph recording" ,
1127 &record_callchain_opt),
1128 OPT_CALLBACK(0, "call-graph", &record.opts,
76a26549 1129 "record_mode[,record_size]", record_callchain_help,
09b0fd45 1130 &record_parse_callchain_opt),
c0555642 1131 OPT_INCR('v', "verbose", &verbose,
3da297a6 1132 "be more verbose (show counter open errors, etc)"),
b44308f5 1133 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1134 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1135 "per thread counts"),
56100321 1136 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3abebc55
AH
1137 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1138 &record.opts.sample_time_set,
1139 "Record the sample timestamps"),
56100321 1140 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
d20deb64 1141 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1142 "don't sample"),
d2db9a98
WN
1143 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1144 &record.no_buildid_cache_set,
1145 "do not update the buildid cache"),
1146 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1147 &record.no_buildid_set,
1148 "do not collect buildids in perf.data"),
d20deb64 1149 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1150 "monitor event in cgroup name only",
1151 parse_cgroups),
a6205a35 1152 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1153 "ms to wait before starting measurement after program start"),
bea03405
NK
1154 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1155 "user to profile"),
a5aabdac
SE
1156
1157 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1158 "branch any", "sample any taken branches",
1159 parse_branch_stack),
1160
1161 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1162 "branch filter mask", "branch stack filter modes",
bdfebd84 1163 parse_branch_stack),
05484298
AK
1164 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1165 "sample by weight (on special events only)"),
475eeab9
AK
1166 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1167 "sample transaction flags (special events only)"),
3aa5939d
AH
1168 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1169 "use per-thread mmaps"),
bcc84ec6
SE
1170 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1171 "sample selected machine registers on interrupt,"
1172 " use -I ? to list register names", parse_regs),
85c273d2
AK
1173 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1174 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1175 OPT_CALLBACK('k', "clockid", &record.opts,
1176 "clockid", "clockid to use for events, see clock_gettime()",
1177 parse_clockid),
2dd6d8a1
AH
1178 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1179 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1180 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1181 "per thread proc mmap processing timeout in ms"),
b757bb09
AH
1182 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1183 "Record context switch events"),
85723885
JO
1184 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1185 "Configure all used events to run in kernel space.",
1186 PARSE_OPT_EXCLUSIVE),
1187 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1188 "Configure all used events to run in user space.",
1189 PARSE_OPT_EXCLUSIVE),
71dc2326
WN
1190 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1191 "clang binary to use for compiling BPF scriptlets"),
1192 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1193 "options passed to clang when compiling BPF scriptlets"),
7efe0e03
HK
1194 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1195 "file", "vmlinux pathname"),
6156681b
NK
1196 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1197 "Record build-id of all DSOs regardless of hits"),
0e9b20b8
IM
1198 OPT_END()
1199};
1200
e5b2c207
NK
1201struct option *record_options = __record_options;
1202
1d037ca1 1203int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 1204{
ef149c25 1205 int err;
8c6f45a7 1206 struct record *rec = &record;
16ad2ffb 1207 char errbuf[BUFSIZ];
0e9b20b8 1208
48e1cab1
WN
1209#ifndef HAVE_LIBBPF_SUPPORT
1210# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1211 set_nobuild('\0', "clang-path", true);
1212 set_nobuild('\0', "clang-opt", true);
1213# undef set_nobuild
7efe0e03
HK
1214#endif
1215
1216#ifndef HAVE_BPF_PROLOGUE
1217# if !defined (HAVE_DWARF_SUPPORT)
1218# define REASON "NO_DWARF=1"
1219# elif !defined (HAVE_LIBBPF_SUPPORT)
1220# define REASON "NO_LIBBPF=1"
1221# else
1222# define REASON "this architecture doesn't support BPF prologue"
1223# endif
1224# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1225 set_nobuild('\0', "vmlinux", true);
1226# undef set_nobuild
1227# undef REASON
48e1cab1
WN
1228#endif
1229
3e2be2da
ACM
1230 rec->evlist = perf_evlist__new();
1231 if (rec->evlist == NULL)
361c99a6
ACM
1232 return -ENOMEM;
1233
eb853e80
JO
1234 perf_config(perf_record_config, rec);
1235
bca647aa 1236 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1237 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 1238 if (!argc && target__none(&rec->opts.target))
bca647aa 1239 usage_with_options(record_usage, record_options);
0e9b20b8 1240
bea03405 1241 if (nr_cgroups && !rec->opts.target.system_wide) {
c7118369
NK
1242 usage_with_options_msg(record_usage, record_options,
1243 "cgroup monitoring only available in system-wide mode");
1244
023695d9 1245 }
b757bb09
AH
1246 if (rec->opts.record_switch_events &&
1247 !perf_can_record_switch_events()) {
c7118369
NK
1248 ui__error("kernel does not support recording context switch events\n");
1249 parse_options_usage(record_usage, record_options, "switch-events", 0);
1250 return -EINVAL;
b757bb09 1251 }
023695d9 1252
ef149c25
AH
1253 if (!rec->itr) {
1254 rec->itr = auxtrace_record__init(rec->evlist, &err);
1255 if (err)
1256 return err;
1257 }
1258
2dd6d8a1
AH
1259 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1260 rec->opts.auxtrace_snapshot_opts);
1261 if (err)
1262 return err;
1263
ef149c25
AH
1264 err = -ENOMEM;
1265
0a7e6d1b 1266 symbol__init(NULL);
baa2f6ce 1267
ec80fde7 1268 if (symbol_conf.kptr_restrict)
646aaea6
ACM
1269 pr_warning(
1270"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1271"check /proc/sys/kernel/kptr_restrict.\n\n"
1272"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1273"file is not found in the buildid cache or in the vmlinux path.\n\n"
1274"Samples in kernel modules won't be resolved at all.\n\n"
1275"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1276"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1277
d20deb64 1278 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 1279 disable_buildid_cache();
655000e7 1280
3e2be2da
ACM
1281 if (rec->evlist->nr_entries == 0 &&
1282 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
1283 pr_err("Not enough memory for event selector list\n");
1284 goto out_symbol_exit;
bbd36e5e 1285 }
0e9b20b8 1286
69e7e5b0
AH
1287 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1288 rec->opts.no_inherit = true;
1289
602ad878 1290 err = target__validate(&rec->opts.target);
16ad2ffb 1291 if (err) {
602ad878 1292 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
1293 ui__warning("%s", errbuf);
1294 }
1295
602ad878 1296 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1297 if (err) {
1298 int saved_errno = errno;
4bd0f2d2 1299
602ad878 1300 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1301 ui__error("%s", errbuf);
16ad2ffb
NK
1302
1303 err = -saved_errno;
8fa60e1f 1304 goto out_symbol_exit;
16ad2ffb 1305 }
0d37aa34 1306
16ad2ffb 1307 err = -ENOMEM;
3e2be2da 1308 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1309 usage_with_options(record_usage, record_options);
69aad6f1 1310
ef149c25
AH
1311 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1312 if (err)
1313 goto out_symbol_exit;
1314
6156681b
NK
1315 /*
1316 * We take all buildids when the file contains
1317 * AUX area tracing data because we do not decode the
1318 * trace because it would take too long.
1319 */
1320 if (rec->opts.full_auxtrace)
1321 rec->buildid_all = true;
1322
b4006796 1323 if (record_opts__config(&rec->opts)) {
39d17dac 1324 err = -EINVAL;
03ad9747 1325 goto out_symbol_exit;
7e4ff9e3
MG
1326 }
1327
d20deb64 1328 err = __cmd_record(&record, argc, argv);
d65a458b 1329out_symbol_exit:
45604710 1330 perf_evlist__delete(rec->evlist);
d65a458b 1331 symbol__exit();
ef149c25 1332 auxtrace_record__free(rec->itr);
39d17dac 1333 return err;
0e9b20b8 1334}
2dd6d8a1
AH
1335
1336static void snapshot_sig_handler(int sig __maybe_unused)
1337{
1338 if (!auxtrace_snapshot_enabled)
1339 return;
1340 auxtrace_snapshot_enabled = 0;
1341 auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1342 auxtrace_record__snapshot_started = 1;
1343}