perf/x86/intel/uncore: Do not use macro DEFINE_PCI_DEVICE_TABLE()
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
8f651eae 17#include "util/callchain.h"
f14d5707 18#include "util/cgroup.h"
7c6a1c65 19#include "util/header.h"
66e274f3 20#include "util/event.h"
361c99a6 21#include "util/evlist.h"
69aad6f1 22#include "util/evsel.h"
8f28827a 23#include "util/debug.h"
94c744b6 24#include "util/session.h"
45694aa7 25#include "util/tool.h"
8d06367f 26#include "util/symbol.h"
a12b51c4 27#include "util/cpumap.h"
fd78260b 28#include "util/thread_map.h"
f5fc1412 29#include "util/data.h"
bcc84ec6 30#include "util/perf_regs.h"
ef149c25 31#include "util/auxtrace.h"
f00898f4 32#include "util/parse-branch-options.h"
bcc84ec6 33#include "util/parse-regs-options.h"
7c6a1c65 34
97124d5e 35#include <unistd.h>
de9ac07b 36#include <sched.h>
a41794cd 37#include <sys/mman.h>
de9ac07b 38
78da39fa 39
8c6f45a7 40struct record {
45694aa7 41 struct perf_tool tool;
b4006796 42 struct record_opts opts;
d20deb64 43 u64 bytes_written;
f5fc1412 44 struct perf_data_file file;
ef149c25 45 struct auxtrace_record *itr;
d20deb64
ACM
46 struct perf_evlist *evlist;
47 struct perf_session *session;
48 const char *progname;
d20deb64 49 int realtime_prio;
d20deb64
ACM
50 bool no_buildid;
51 bool no_buildid_cache;
d20deb64 52 long samples;
0f82ebc4 53};
a21ca2ca 54
8c6f45a7 55static int record__write(struct record *rec, void *bf, size_t size)
f5970550 56{
cf8b2e69 57 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
58 pr_err("failed to write perf data, error: %m\n");
59 return -1;
f5970550 60 }
8d3eca20 61
cf8b2e69 62 rec->bytes_written += size;
8d3eca20 63 return 0;
f5970550
PZ
64}
65
45694aa7 66static int process_synthesized_event(struct perf_tool *tool,
d20deb64 67 union perf_event *event,
1d037ca1
IT
68 struct perf_sample *sample __maybe_unused,
69 struct machine *machine __maybe_unused)
234fbbf5 70{
8c6f45a7
ACM
71 struct record *rec = container_of(tool, struct record, tool);
72 return record__write(rec, event, event->header.size);
234fbbf5
ACM
73}
74
e5685730 75static int record__mmap_read(struct record *rec, int idx)
de9ac07b 76{
e5685730 77 struct perf_mmap *md = &rec->evlist->mmap[idx];
7b8283b5
DA
78 u64 head = perf_mmap__read_head(md);
79 u64 old = md->prev;
918512b4 80 unsigned char *data = md->base + page_size;
de9ac07b
PZ
81 unsigned long size;
82 void *buf;
8d3eca20 83 int rc = 0;
de9ac07b 84
dc82009a 85 if (old == head)
8d3eca20 86 return 0;
dc82009a 87
d20deb64 88 rec->samples++;
de9ac07b
PZ
89
90 size = head - old;
91
92 if ((old & md->mask) + size != (head & md->mask)) {
93 buf = &data[old & md->mask];
94 size = md->mask + 1 - (old & md->mask);
95 old += size;
021e9f47 96
8c6f45a7 97 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
98 rc = -1;
99 goto out;
100 }
de9ac07b
PZ
101 }
102
103 buf = &data[old & md->mask];
104 size = head - old;
105 old += size;
021e9f47 106
8c6f45a7 107 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
108 rc = -1;
109 goto out;
110 }
de9ac07b
PZ
111
112 md->prev = old;
e5685730 113 perf_evlist__mmap_consume(rec->evlist, idx);
8d3eca20
DA
114out:
115 return rc;
de9ac07b
PZ
116}
117
2dd6d8a1
AH
118static volatile int done;
119static volatile int signr = -1;
120static volatile int child_finished;
121static volatile int auxtrace_snapshot_enabled;
122static volatile int auxtrace_snapshot_err;
123static volatile int auxtrace_record__snapshot_started;
124
125static void sig_handler(int sig)
126{
127 if (sig == SIGCHLD)
128 child_finished = 1;
129 else
130 signr = sig;
131
132 done = 1;
133}
134
135static void record__sig_exit(void)
136{
137 if (signr == -1)
138 return;
139
140 signal(signr, SIG_DFL);
141 raise(signr);
142}
143
e31f0d01
AH
144#ifdef HAVE_AUXTRACE_SUPPORT
145
ef149c25
AH
146static int record__process_auxtrace(struct perf_tool *tool,
147 union perf_event *event, void *data1,
148 size_t len1, void *data2, size_t len2)
149{
150 struct record *rec = container_of(tool, struct record, tool);
99fa2984 151 struct perf_data_file *file = &rec->file;
ef149c25
AH
152 size_t padding;
153 u8 pad[8] = {0};
154
99fa2984
AH
155 if (!perf_data_file__is_pipe(file)) {
156 off_t file_offset;
157 int fd = perf_data_file__fd(file);
158 int err;
159
160 file_offset = lseek(fd, 0, SEEK_CUR);
161 if (file_offset == -1)
162 return -1;
163 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
164 event, file_offset);
165 if (err)
166 return err;
167 }
168
ef149c25
AH
169 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
170 padding = (len1 + len2) & 7;
171 if (padding)
172 padding = 8 - padding;
173
174 record__write(rec, event, event->header.size);
175 record__write(rec, data1, len1);
176 if (len2)
177 record__write(rec, data2, len2);
178 record__write(rec, &pad, padding);
179
180 return 0;
181}
182
183static int record__auxtrace_mmap_read(struct record *rec,
184 struct auxtrace_mmap *mm)
185{
186 int ret;
187
188 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
189 record__process_auxtrace);
190 if (ret < 0)
191 return ret;
192
193 if (ret)
194 rec->samples++;
195
196 return 0;
197}
198
2dd6d8a1
AH
199static int record__auxtrace_mmap_read_snapshot(struct record *rec,
200 struct auxtrace_mmap *mm)
201{
202 int ret;
203
204 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
205 record__process_auxtrace,
206 rec->opts.auxtrace_snapshot_size);
207 if (ret < 0)
208 return ret;
209
210 if (ret)
211 rec->samples++;
212
213 return 0;
214}
215
216static int record__auxtrace_read_snapshot_all(struct record *rec)
217{
218 int i;
219 int rc = 0;
220
221 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
222 struct auxtrace_mmap *mm =
223 &rec->evlist->mmap[i].auxtrace_mmap;
224
225 if (!mm->base)
226 continue;
227
228 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
229 rc = -1;
230 goto out;
231 }
232 }
233out:
234 return rc;
235}
236
237static void record__read_auxtrace_snapshot(struct record *rec)
238{
239 pr_debug("Recording AUX area tracing snapshot\n");
240 if (record__auxtrace_read_snapshot_all(rec) < 0) {
241 auxtrace_snapshot_err = -1;
242 } else {
243 auxtrace_snapshot_err = auxtrace_record__snapshot_finish(rec->itr);
244 if (!auxtrace_snapshot_err)
245 auxtrace_snapshot_enabled = 1;
246 }
247}
248
e31f0d01
AH
249#else
250
251static inline
252int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
253 struct auxtrace_mmap *mm __maybe_unused)
254{
255 return 0;
256}
257
2dd6d8a1
AH
258static inline
259void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 260{
f7b7c26e
PZ
261}
262
2dd6d8a1
AH
263static inline
264int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 265{
2dd6d8a1 266 return 0;
de9ac07b
PZ
267}
268
2dd6d8a1
AH
269#endif
270
8c6f45a7 271static int record__open(struct record *rec)
dd7927f4 272{
56e52e85 273 char msg[512];
6a4bb04c 274 struct perf_evsel *pos;
d20deb64
ACM
275 struct perf_evlist *evlist = rec->evlist;
276 struct perf_session *session = rec->session;
b4006796 277 struct record_opts *opts = &rec->opts;
8d3eca20 278 int rc = 0;
dd7927f4 279
f77a9518 280 perf_evlist__config(evlist, opts);
cac21425 281
0050f7aa 282 evlist__for_each(evlist, pos) {
dd7927f4 283try_again:
d988d5ee 284 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 285 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 286 if (verbose)
c0a54341 287 ui__warning("%s\n", msg);
d6d901c2
ZY
288 goto try_again;
289 }
ca6a4258 290
56e52e85
ACM
291 rc = -errno;
292 perf_evsel__open_strerror(pos, &opts->target,
293 errno, msg, sizeof(msg));
294 ui__error("%s\n", msg);
8d3eca20 295 goto out;
c171b552
LZ
296 }
297 }
a43d3f08 298
23d4aad4
ACM
299 if (perf_evlist__apply_filters(evlist, &pos)) {
300 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
301 pos->filter, perf_evsel__name(pos), errno,
35550da3 302 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
303 rc = -1;
304 goto out;
0a102479
FW
305 }
306
ef149c25 307 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
2dd6d8a1
AH
308 opts->auxtrace_mmap_pages,
309 opts->auxtrace_snapshot_mode) < 0) {
8d3eca20
DA
310 if (errno == EPERM) {
311 pr_err("Permission error mapping pages.\n"
312 "Consider increasing "
313 "/proc/sys/kernel/perf_event_mlock_kb,\n"
314 "or try again with a smaller value of -m/--mmap_pages.\n"
ef149c25
AH
315 "(current value: %u,%u)\n",
316 opts->mmap_pages, opts->auxtrace_mmap_pages);
8d3eca20 317 rc = -errno;
8d3eca20 318 } else {
35550da3
MH
319 pr_err("failed to mmap with %d (%s)\n", errno,
320 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
321 rc = -errno;
322 }
323 goto out;
18e60939 324 }
0a27d7f9 325
563aecb2 326 session->evlist = evlist;
7b56cce2 327 perf_session__set_id_hdr_size(session);
8d3eca20
DA
328out:
329 return rc;
16c8a109
PZ
330}
331
e3d59112
NK
332static int process_sample_event(struct perf_tool *tool,
333 union perf_event *event,
334 struct perf_sample *sample,
335 struct perf_evsel *evsel,
336 struct machine *machine)
337{
338 struct record *rec = container_of(tool, struct record, tool);
339
340 rec->samples++;
341
342 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
343}
344
8c6f45a7 345static int process_buildids(struct record *rec)
6122e4e4 346{
f5fc1412
JO
347 struct perf_data_file *file = &rec->file;
348 struct perf_session *session = rec->session;
6122e4e4 349
457ae94a 350 if (file->size == 0)
9f591fd7
ACM
351 return 0;
352
00dc8657
NK
353 /*
354 * During this process, it'll load kernel map and replace the
355 * dso->long_name to a real pathname it found. In this case
356 * we prefer the vmlinux path like
357 * /lib/modules/3.16.4/build/vmlinux
358 *
359 * rather than build-id path (in debug directory).
360 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
361 */
362 symbol_conf.ignore_vmlinux_buildid = true;
363
b7b61cbe 364 return perf_session__process_events(session);
6122e4e4
ACM
365}
366
8115d60c 367static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
368{
369 int err;
45694aa7 370 struct perf_tool *tool = data;
a1645ce1
ZY
371 /*
372 *As for guest kernel when processing subcommand record&report,
373 *we arrange module mmap prior to guest kernel mmap and trigger
374 *a preload dso because default guest module symbols are loaded
375 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
376 *method is used to avoid symbol missing when the first addr is
377 *in module instead of in guest kernel.
378 */
45694aa7 379 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 380 machine);
a1645ce1
ZY
381 if (err < 0)
382 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 383 " relocation symbol.\n", machine->pid);
a1645ce1 384
a1645ce1
ZY
385 /*
386 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
387 * have no _text sometimes.
388 */
45694aa7 389 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 390 machine);
a1645ce1
ZY
391 if (err < 0)
392 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 393 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
394}
395
98402807
FW
396static struct perf_event_header finished_round_event = {
397 .size = sizeof(struct perf_event_header),
398 .type = PERF_RECORD_FINISHED_ROUND,
399};
400
8c6f45a7 401static int record__mmap_read_all(struct record *rec)
98402807 402{
dcabb507 403 u64 bytes_written = rec->bytes_written;
0e2e63dd 404 int i;
8d3eca20 405 int rc = 0;
98402807 406
d20deb64 407 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
ef149c25
AH
408 struct auxtrace_mmap *mm = &rec->evlist->mmap[i].auxtrace_mmap;
409
8d3eca20 410 if (rec->evlist->mmap[i].base) {
e5685730 411 if (record__mmap_read(rec, i) != 0) {
8d3eca20
DA
412 rc = -1;
413 goto out;
414 }
415 }
ef149c25 416
2dd6d8a1 417 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
ef149c25
AH
418 record__auxtrace_mmap_read(rec, mm) != 0) {
419 rc = -1;
420 goto out;
421 }
98402807
FW
422 }
423
dcabb507
JO
424 /*
425 * Mark the round finished in case we wrote
426 * at least one event.
427 */
428 if (bytes_written != rec->bytes_written)
429 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
430
431out:
432 return rc;
98402807
FW
433}
434
8c6f45a7 435static void record__init_features(struct record *rec)
57706abc 436{
57706abc
DA
437 struct perf_session *session = rec->session;
438 int feat;
439
440 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
441 perf_header__set_feat(&session->header, feat);
442
443 if (rec->no_buildid)
444 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
445
3e2be2da 446 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
447 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
448
449 if (!rec->opts.branch_stack)
450 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
451
452 if (!rec->opts.full_auxtrace)
453 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
57706abc
DA
454}
455
f33cbe72
ACM
456static volatile int workload_exec_errno;
457
458/*
459 * perf_evlist__prepare_workload will send a SIGUSR1
460 * if the fork fails, since we asked by setting its
461 * want_signal to true.
462 */
45604710
NK
463static void workload_exec_failed_signal(int signo __maybe_unused,
464 siginfo_t *info,
f33cbe72
ACM
465 void *ucontext __maybe_unused)
466{
467 workload_exec_errno = info->si_value.sival_int;
468 done = 1;
f33cbe72
ACM
469 child_finished = 1;
470}
471
2dd6d8a1
AH
472static void snapshot_sig_handler(int sig);
473
8c6f45a7 474static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 475{
57706abc 476 int err;
45604710 477 int status = 0;
8b412664 478 unsigned long waking = 0;
46be604b 479 const bool forks = argc > 0;
23346f21 480 struct machine *machine;
45694aa7 481 struct perf_tool *tool = &rec->tool;
b4006796 482 struct record_opts *opts = &rec->opts;
f5fc1412 483 struct perf_data_file *file = &rec->file;
d20deb64 484 struct perf_session *session;
6dcf45ef 485 bool disabled = false, draining = false;
42aa276f 486 int fd;
de9ac07b 487
d20deb64 488 rec->progname = argv[0];
33e49ea7 489
45604710 490 atexit(record__sig_exit);
f5970550
PZ
491 signal(SIGCHLD, sig_handler);
492 signal(SIGINT, sig_handler);
804f7ac7 493 signal(SIGTERM, sig_handler);
2dd6d8a1
AH
494 if (rec->opts.auxtrace_snapshot_mode)
495 signal(SIGUSR2, snapshot_sig_handler);
496 else
497 signal(SIGUSR2, SIG_IGN);
f5970550 498
b7b61cbe 499 session = perf_session__new(file, false, tool);
94c744b6 500 if (session == NULL) {
ffa91880 501 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
502 return -1;
503 }
504
42aa276f 505 fd = perf_data_file__fd(file);
d20deb64
ACM
506 rec->session = session;
507
8c6f45a7 508 record__init_features(rec);
330aa675 509
d4db3f16 510 if (forks) {
3e2be2da 511 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 512 argv, file->is_pipe,
735f7e0b 513 workload_exec_failed_signal);
35b9d88e
ACM
514 if (err < 0) {
515 pr_err("Couldn't run the workload!\n");
45604710 516 status = err;
35b9d88e 517 goto out_delete_session;
856e9660 518 }
856e9660
PZ
519 }
520
8c6f45a7 521 if (record__open(rec) != 0) {
8d3eca20 522 err = -1;
45604710 523 goto out_child;
8d3eca20 524 }
de9ac07b 525
cca8482c
AH
526 /*
527 * Normally perf_session__new would do this, but it doesn't have the
528 * evlist.
529 */
530 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
531 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
532 rec->tool.ordered_events = false;
533 }
534
3e2be2da 535 if (!rec->evlist->nr_groups)
a8bb559b
NK
536 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
537
f5fc1412 538 if (file->is_pipe) {
42aa276f 539 err = perf_header__write_pipe(fd);
529870e3 540 if (err < 0)
45604710 541 goto out_child;
563aecb2 542 } else {
42aa276f 543 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 544 if (err < 0)
45604710 545 goto out_child;
56b03f3c
ACM
546 }
547
d3665498 548 if (!rec->no_buildid
e20960c0 549 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 550 pr_err("Couldn't generate buildids. "
e20960c0 551 "Use --no-buildid to profile anyway.\n");
8d3eca20 552 err = -1;
45604710 553 goto out_child;
e20960c0
RR
554 }
555
34ba5122 556 machine = &session->machines.host;
743eb868 557
f5fc1412 558 if (file->is_pipe) {
45694aa7 559 err = perf_event__synthesize_attrs(tool, session,
d20deb64 560 process_synthesized_event);
2c46dbb5
TZ
561 if (err < 0) {
562 pr_err("Couldn't synthesize attrs.\n");
45604710 563 goto out_child;
2c46dbb5 564 }
cd19a035 565
3e2be2da 566 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
567 /*
568 * FIXME err <= 0 here actually means that
569 * there were no tracepoints so its not really
570 * an error, just that we don't need to
571 * synthesize anything. We really have to
572 * return this more properly and also
573 * propagate errors that now are calling die()
574 */
42aa276f 575 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
743eb868 576 process_synthesized_event);
63e0c771
TZ
577 if (err <= 0) {
578 pr_err("Couldn't record tracing data.\n");
45604710 579 goto out_child;
63e0c771 580 }
f34b9001 581 rec->bytes_written += err;
63e0c771 582 }
2c46dbb5
TZ
583 }
584
ef149c25
AH
585 if (rec->opts.full_auxtrace) {
586 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
587 session, process_synthesized_event);
588 if (err)
589 goto out_delete_session;
590 }
591
45694aa7 592 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 593 machine);
c1a3a4b9
ACM
594 if (err < 0)
595 pr_err("Couldn't record kernel reference relocation symbol\n"
596 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
597 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 598
45694aa7 599 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 600 machine);
c1a3a4b9
ACM
601 if (err < 0)
602 pr_err("Couldn't record kernel module information.\n"
603 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
604 "Check /proc/modules permission or run as root.\n");
605
7e383de4 606 if (perf_guest) {
876650e6
ACM
607 machines__process_guests(&session->machines,
608 perf_event__synthesize_guest_os, tool);
7e383de4 609 }
7c6a1c65 610
3e2be2da 611 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
9d9cad76
KL
612 process_synthesized_event, opts->sample_address,
613 opts->proc_map_timeout);
8d3eca20 614 if (err != 0)
45604710 615 goto out_child;
8d3eca20 616
d20deb64 617 if (rec->realtime_prio) {
de9ac07b
PZ
618 struct sched_param param;
619
d20deb64 620 param.sched_priority = rec->realtime_prio;
de9ac07b 621 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 622 pr_err("Could not set realtime priority.\n");
8d3eca20 623 err = -1;
45604710 624 goto out_child;
de9ac07b
PZ
625 }
626 }
627
774cb499
JO
628 /*
629 * When perf is starting the traced process, all the events
630 * (apart from group members) have enable_on_exec=1 set,
631 * so don't spoil it by prematurely enabling them.
632 */
6619a53e 633 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 634 perf_evlist__enable(rec->evlist);
764e16a3 635
856e9660
PZ
636 /*
637 * Let the child rip
638 */
e803cf97
NK
639 if (forks) {
640 union perf_event event;
641 /*
642 * Some H/W events are generated before COMM event
643 * which is emitted during exec(), so perf script
644 * cannot see a correct process name for those events.
645 * Synthesize COMM event to prevent it.
646 */
647 perf_event__synthesize_comm(tool, &event,
648 rec->evlist->workload.pid,
649 process_synthesized_event,
650 machine);
651
3e2be2da 652 perf_evlist__start_workload(rec->evlist);
e803cf97 653 }
856e9660 654
6619a53e
AK
655 if (opts->initial_delay) {
656 usleep(opts->initial_delay * 1000);
657 perf_evlist__enable(rec->evlist);
658 }
659
2dd6d8a1 660 auxtrace_snapshot_enabled = 1;
649c48a9 661 for (;;) {
d20deb64 662 int hits = rec->samples;
de9ac07b 663
8c6f45a7 664 if (record__mmap_read_all(rec) < 0) {
2dd6d8a1 665 auxtrace_snapshot_enabled = 0;
8d3eca20 666 err = -1;
45604710 667 goto out_child;
8d3eca20 668 }
de9ac07b 669
2dd6d8a1
AH
670 if (auxtrace_record__snapshot_started) {
671 auxtrace_record__snapshot_started = 0;
672 if (!auxtrace_snapshot_err)
673 record__read_auxtrace_snapshot(rec);
674 if (auxtrace_snapshot_err) {
675 pr_err("AUX area tracing snapshot failed\n");
676 err = -1;
677 goto out_child;
678 }
679 }
680
d20deb64 681 if (hits == rec->samples) {
6dcf45ef 682 if (done || draining)
649c48a9 683 break;
f66a889d 684 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
685 /*
686 * Propagate error, only if there's any. Ignore positive
687 * number of returned events and interrupt error.
688 */
689 if (err > 0 || (err < 0 && errno == EINTR))
45604710 690 err = 0;
8b412664 691 waking++;
6dcf45ef
ACM
692
693 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
694 draining = true;
8b412664
PZ
695 }
696
774cb499
JO
697 /*
698 * When perf is starting the traced process, at the end events
699 * die with the process and we wait for that. Thus no need to
700 * disable events in this case.
701 */
602ad878 702 if (done && !disabled && !target__none(&opts->target)) {
2dd6d8a1 703 auxtrace_snapshot_enabled = 0;
3e2be2da 704 perf_evlist__disable(rec->evlist);
2711926a
JO
705 disabled = true;
706 }
de9ac07b 707 }
2dd6d8a1 708 auxtrace_snapshot_enabled = 0;
de9ac07b 709
f33cbe72 710 if (forks && workload_exec_errno) {
35550da3 711 char msg[STRERR_BUFSIZE];
f33cbe72
ACM
712 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
713 pr_err("Workload failed: %s\n", emsg);
714 err = -1;
45604710 715 goto out_child;
f33cbe72
ACM
716 }
717
e3d59112 718 if (!quiet)
45604710 719 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 720
45604710
NK
721out_child:
722 if (forks) {
723 int exit_status;
addc2785 724
45604710
NK
725 if (!child_finished)
726 kill(rec->evlist->workload.pid, SIGTERM);
727
728 wait(&exit_status);
729
730 if (err < 0)
731 status = err;
732 else if (WIFEXITED(exit_status))
733 status = WEXITSTATUS(exit_status);
734 else if (WIFSIGNALED(exit_status))
735 signr = WTERMSIG(exit_status);
736 } else
737 status = err;
738
e3d59112
NK
739 /* this will be recalculated during process_buildids() */
740 rec->samples = 0;
741
45604710
NK
742 if (!err && !file->is_pipe) {
743 rec->session->header.data_size += rec->bytes_written;
457ae94a 744 file->size = lseek(perf_data_file__fd(file), 0, SEEK_CUR);
45604710 745
cd10b289 746 if (!rec->no_buildid) {
45604710 747 process_buildids(rec);
cd10b289
AH
748 /*
749 * We take all buildids when the file contains
750 * AUX area tracing data because we do not decode the
751 * trace because it would take too long.
752 */
753 if (rec->opts.full_auxtrace)
754 dsos__hit_all(rec->session);
755 }
42aa276f 756 perf_session__write_header(rec->session, rec->evlist, fd, true);
45604710 757 }
39d17dac 758
e3d59112
NK
759 if (!err && !quiet) {
760 char samples[128];
761
ef149c25 762 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
763 scnprintf(samples, sizeof(samples),
764 " (%" PRIu64 " samples)", rec->samples);
765 else
766 samples[0] = '\0';
767
768 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s ]\n",
769 perf_data_file__size(file) / 1024.0 / 1024.0,
770 file->path, samples);
771 }
772
39d17dac
ACM
773out_delete_session:
774 perf_session__delete(session);
45604710 775 return status;
de9ac07b 776}
0e9b20b8 777
72a128aa 778static void callchain_debug(void)
09b0fd45 779{
aad2b21c 780 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 781
72a128aa 782 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
26d33022 783
72a128aa 784 if (callchain_param.record_mode == CALLCHAIN_DWARF)
09b0fd45 785 pr_debug("callchain: stack dump size %d\n",
72a128aa 786 callchain_param.dump_size);
09b0fd45
JO
787}
788
c421e80b 789int record_parse_callchain_opt(const struct option *opt,
09b0fd45
JO
790 const char *arg,
791 int unset)
792{
09b0fd45 793 int ret;
c421e80b 794 struct record_opts *record = (struct record_opts *)opt->value;
09b0fd45 795
c421e80b 796 record->callgraph_set = true;
72a128aa 797 callchain_param.enabled = !unset;
eb853e80 798
09b0fd45
JO
799 /* --no-call-graph */
800 if (unset) {
72a128aa 801 callchain_param.record_mode = CALLCHAIN_NONE;
09b0fd45
JO
802 pr_debug("callchain: disabled\n");
803 return 0;
804 }
805
c3a6a8c4 806 ret = parse_callchain_record_opt(arg, &callchain_param);
26d33022 807 if (!ret)
72a128aa 808 callchain_debug();
26d33022
JO
809
810 return ret;
811}
812
c421e80b 813int record_callchain_opt(const struct option *opt,
09b0fd45
JO
814 const char *arg __maybe_unused,
815 int unset __maybe_unused)
816{
c421e80b
KL
817 struct record_opts *record = (struct record_opts *)opt->value;
818
819 record->callgraph_set = true;
72a128aa 820 callchain_param.enabled = true;
09b0fd45 821
72a128aa
NK
822 if (callchain_param.record_mode == CALLCHAIN_NONE)
823 callchain_param.record_mode = CALLCHAIN_FP;
eb853e80 824
72a128aa 825 callchain_debug();
09b0fd45
JO
826 return 0;
827}
828
eb853e80
JO
829static int perf_record_config(const char *var, const char *value, void *cb)
830{
eb853e80 831 if (!strcmp(var, "record.call-graph"))
5a2e5e85 832 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
833
834 return perf_default_config(var, value, cb);
835}
836
814c8c38
PZ
837struct clockid_map {
838 const char *name;
839 int clockid;
840};
841
842#define CLOCKID_MAP(n, c) \
843 { .name = n, .clockid = (c), }
844
845#define CLOCKID_END { .name = NULL, }
846
847
848/*
849 * Add the missing ones, we need to build on many distros...
850 */
851#ifndef CLOCK_MONOTONIC_RAW
852#define CLOCK_MONOTONIC_RAW 4
853#endif
854#ifndef CLOCK_BOOTTIME
855#define CLOCK_BOOTTIME 7
856#endif
857#ifndef CLOCK_TAI
858#define CLOCK_TAI 11
859#endif
860
861static const struct clockid_map clockids[] = {
862 /* available for all events, NMI safe */
863 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
864 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
865
866 /* available for some events */
867 CLOCKID_MAP("realtime", CLOCK_REALTIME),
868 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
869 CLOCKID_MAP("tai", CLOCK_TAI),
870
871 /* available for the lazy */
872 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
873 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
874 CLOCKID_MAP("real", CLOCK_REALTIME),
875 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
876
877 CLOCKID_END,
878};
879
880static int parse_clockid(const struct option *opt, const char *str, int unset)
881{
882 struct record_opts *opts = (struct record_opts *)opt->value;
883 const struct clockid_map *cm;
884 const char *ostr = str;
885
886 if (unset) {
887 opts->use_clockid = 0;
888 return 0;
889 }
890
891 /* no arg passed */
892 if (!str)
893 return 0;
894
895 /* no setting it twice */
896 if (opts->use_clockid)
897 return -1;
898
899 opts->use_clockid = true;
900
901 /* if its a number, we're done */
902 if (sscanf(str, "%d", &opts->clockid) == 1)
903 return 0;
904
905 /* allow a "CLOCK_" prefix to the name */
906 if (!strncasecmp(str, "CLOCK_", 6))
907 str += 6;
908
909 for (cm = clockids; cm->name; cm++) {
910 if (!strcasecmp(str, cm->name)) {
911 opts->clockid = cm->clockid;
912 return 0;
913 }
914 }
915
916 opts->use_clockid = false;
917 ui__warning("unknown clockid %s, check man page\n", ostr);
918 return -1;
919}
920
e9db1310
AH
921static int record__parse_mmap_pages(const struct option *opt,
922 const char *str,
923 int unset __maybe_unused)
924{
925 struct record_opts *opts = opt->value;
926 char *s, *p;
927 unsigned int mmap_pages;
928 int ret;
929
930 if (!str)
931 return -EINVAL;
932
933 s = strdup(str);
934 if (!s)
935 return -ENOMEM;
936
937 p = strchr(s, ',');
938 if (p)
939 *p = '\0';
940
941 if (*s) {
942 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
943 if (ret)
944 goto out_free;
945 opts->mmap_pages = mmap_pages;
946 }
947
948 if (!p) {
949 ret = 0;
950 goto out_free;
951 }
952
953 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
954 if (ret)
955 goto out_free;
956
957 opts->auxtrace_mmap_pages = mmap_pages;
958
959out_free:
960 free(s);
961 return ret;
962}
963
e5b2c207 964static const char * const __record_usage[] = {
9e096753
MG
965 "perf record [<options>] [<command>]",
966 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
967 NULL
968};
e5b2c207 969const char * const *record_usage = __record_usage;
0e9b20b8 970
d20deb64 971/*
8c6f45a7
ACM
972 * XXX Ideally would be local to cmd_record() and passed to a record__new
973 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
974 * after cmd_record() exits, but since record_options need to be accessible to
975 * builtin-script, leave it here.
976 *
977 * At least we don't ouch it in all the other functions here directly.
978 *
979 * Just say no to tons of global variables, sigh.
980 */
8c6f45a7 981static struct record record = {
d20deb64 982 .opts = {
8affc2b8 983 .sample_time = true,
d20deb64
ACM
984 .mmap_pages = UINT_MAX,
985 .user_freq = UINT_MAX,
986 .user_interval = ULLONG_MAX,
447a6013 987 .freq = 4000,
d1cb9fce
NK
988 .target = {
989 .uses_mmap = true,
3aa5939d 990 .default_per_cpu = true,
d1cb9fce 991 },
9d9cad76 992 .proc_map_timeout = 500,
d20deb64 993 },
e3d59112
NK
994 .tool = {
995 .sample = process_sample_event,
996 .fork = perf_event__process_fork,
cca8482c 997 .exit = perf_event__process_exit,
e3d59112
NK
998 .comm = perf_event__process_comm,
999 .mmap = perf_event__process_mmap,
1000 .mmap2 = perf_event__process_mmap2,
cca8482c 1001 .ordered_events = true,
e3d59112 1002 },
d20deb64 1003};
7865e817 1004
09b0fd45 1005#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 1006
9ff125d1 1007#ifdef HAVE_DWARF_UNWIND_SUPPORT
aad2b21c 1008const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf lbr";
61eaa3be 1009#else
aad2b21c 1010const char record_callchain_help[] = CALLCHAIN_HELP "fp lbr";
61eaa3be
ACM
1011#endif
1012
d20deb64
ACM
1013/*
1014 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1015 * with it and switch to use the library functions in perf_evlist that came
b4006796 1016 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1017 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1018 * using pipes, etc.
1019 */
e5b2c207 1020struct option __record_options[] = {
d20deb64 1021 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1022 "event selector. use 'perf list' to list available events",
f120f9d5 1023 parse_events_option),
d20deb64 1024 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1025 "event filter", parse_filter),
4ba1faa1
WN
1026 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1027 NULL, "don't record events from perf itself",
1028 exclude_perf),
bea03405 1029 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1030 "record events on existing process id"),
bea03405 1031 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1032 "record events on existing thread id"),
d20deb64 1033 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1034 "collect data with this RT SCHED_FIFO priority"),
509051ea 1035 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1036 "collect data without buffering"),
d20deb64 1037 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1038 "collect raw sample records from all opened counters"),
bea03405 1039 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1040 "system-wide collection from all CPUs"),
bea03405 1041 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1042 "list of cpus to monitor"),
d20deb64 1043 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 1044 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 1045 "output file name"),
69e7e5b0
AH
1046 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1047 &record.opts.no_inherit_set,
1048 "child tasks do not inherit counters"),
d20deb64 1049 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
e9db1310
AH
1050 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1051 "number of mmap data pages and AUX area tracing mmap pages",
1052 record__parse_mmap_pages),
d20deb64 1053 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1054 "put the counters into a counter group"),
09b0fd45
JO
1055 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
1056 NULL, "enables call-graph recording" ,
1057 &record_callchain_opt),
1058 OPT_CALLBACK(0, "call-graph", &record.opts,
1059 "mode[,dump_size]", record_callchain_help,
1060 &record_parse_callchain_opt),
c0555642 1061 OPT_INCR('v', "verbose", &verbose,
3da297a6 1062 "be more verbose (show counter open errors, etc)"),
b44308f5 1063 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1064 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1065 "per thread counts"),
56100321 1066 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3abebc55
AH
1067 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1068 &record.opts.sample_time_set,
1069 "Record the sample timestamps"),
56100321 1070 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
d20deb64 1071 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1072 "don't sample"),
d20deb64 1073 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 1074 "do not update the buildid cache"),
d20deb64 1075 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 1076 "do not collect buildids in perf.data"),
d20deb64 1077 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1078 "monitor event in cgroup name only",
1079 parse_cgroups),
a6205a35 1080 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1081 "ms to wait before starting measurement after program start"),
bea03405
NK
1082 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1083 "user to profile"),
a5aabdac
SE
1084
1085 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1086 "branch any", "sample any taken branches",
1087 parse_branch_stack),
1088
1089 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1090 "branch filter mask", "branch stack filter modes",
bdfebd84 1091 parse_branch_stack),
05484298
AK
1092 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1093 "sample by weight (on special events only)"),
475eeab9
AK
1094 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1095 "sample transaction flags (special events only)"),
3aa5939d
AH
1096 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1097 "use per-thread mmaps"),
bcc84ec6
SE
1098 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1099 "sample selected machine registers on interrupt,"
1100 " use -I ? to list register names", parse_regs),
85c273d2
AK
1101 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1102 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1103 OPT_CALLBACK('k', "clockid", &record.opts,
1104 "clockid", "clockid to use for events, see clock_gettime()",
1105 parse_clockid),
2dd6d8a1
AH
1106 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1107 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1108 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1109 "per thread proc mmap processing timeout in ms"),
b757bb09
AH
1110 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1111 "Record context switch events"),
0e9b20b8
IM
1112 OPT_END()
1113};
1114
e5b2c207
NK
1115struct option *record_options = __record_options;
1116
1d037ca1 1117int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 1118{
ef149c25 1119 int err;
8c6f45a7 1120 struct record *rec = &record;
16ad2ffb 1121 char errbuf[BUFSIZ];
0e9b20b8 1122
3e2be2da
ACM
1123 rec->evlist = perf_evlist__new();
1124 if (rec->evlist == NULL)
361c99a6
ACM
1125 return -ENOMEM;
1126
eb853e80
JO
1127 perf_config(perf_record_config, rec);
1128
bca647aa 1129 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1130 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 1131 if (!argc && target__none(&rec->opts.target))
bca647aa 1132 usage_with_options(record_usage, record_options);
0e9b20b8 1133
bea03405 1134 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
1135 ui__error("cgroup monitoring only available in"
1136 " system-wide mode\n");
023695d9
SE
1137 usage_with_options(record_usage, record_options);
1138 }
b757bb09
AH
1139 if (rec->opts.record_switch_events &&
1140 !perf_can_record_switch_events()) {
1141 ui__error("kernel does not support recording context switch events (--switch-events option)\n");
1142 usage_with_options(record_usage, record_options);
1143 }
023695d9 1144
ef149c25
AH
1145 if (!rec->itr) {
1146 rec->itr = auxtrace_record__init(rec->evlist, &err);
1147 if (err)
1148 return err;
1149 }
1150
2dd6d8a1
AH
1151 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1152 rec->opts.auxtrace_snapshot_opts);
1153 if (err)
1154 return err;
1155
ef149c25
AH
1156 err = -ENOMEM;
1157
0a7e6d1b 1158 symbol__init(NULL);
baa2f6ce 1159
ec80fde7 1160 if (symbol_conf.kptr_restrict)
646aaea6
ACM
1161 pr_warning(
1162"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1163"check /proc/sys/kernel/kptr_restrict.\n\n"
1164"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1165"file is not found in the buildid cache or in the vmlinux path.\n\n"
1166"Samples in kernel modules won't be resolved at all.\n\n"
1167"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1168"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1169
d20deb64 1170 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 1171 disable_buildid_cache();
655000e7 1172
3e2be2da
ACM
1173 if (rec->evlist->nr_entries == 0 &&
1174 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
1175 pr_err("Not enough memory for event selector list\n");
1176 goto out_symbol_exit;
bbd36e5e 1177 }
0e9b20b8 1178
69e7e5b0
AH
1179 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1180 rec->opts.no_inherit = true;
1181
602ad878 1182 err = target__validate(&rec->opts.target);
16ad2ffb 1183 if (err) {
602ad878 1184 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
1185 ui__warning("%s", errbuf);
1186 }
1187
602ad878 1188 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1189 if (err) {
1190 int saved_errno = errno;
4bd0f2d2 1191
602ad878 1192 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1193 ui__error("%s", errbuf);
16ad2ffb
NK
1194
1195 err = -saved_errno;
8fa60e1f 1196 goto out_symbol_exit;
16ad2ffb 1197 }
0d37aa34 1198
16ad2ffb 1199 err = -ENOMEM;
3e2be2da 1200 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1201 usage_with_options(record_usage, record_options);
69aad6f1 1202
ef149c25
AH
1203 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1204 if (err)
1205 goto out_symbol_exit;
1206
b4006796 1207 if (record_opts__config(&rec->opts)) {
39d17dac 1208 err = -EINVAL;
03ad9747 1209 goto out_symbol_exit;
7e4ff9e3
MG
1210 }
1211
d20deb64 1212 err = __cmd_record(&record, argc, argv);
d65a458b 1213out_symbol_exit:
45604710 1214 perf_evlist__delete(rec->evlist);
d65a458b 1215 symbol__exit();
ef149c25 1216 auxtrace_record__free(rec->itr);
39d17dac 1217 return err;
0e9b20b8 1218}
2dd6d8a1
AH
1219
1220static void snapshot_sig_handler(int sig __maybe_unused)
1221{
1222 if (!auxtrace_snapshot_enabled)
1223 return;
1224 auxtrace_snapshot_enabled = 0;
1225 auxtrace_snapshot_err = auxtrace_record__snapshot_start(record.itr);
1226 auxtrace_record__snapshot_started = 1;
1227}