Merge remote-tracking branches 'asoc/topic/mc13783', 'asoc/topic/msm8916', 'asoc...
[linux-block.git] / tools / perf / builtin-record.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
abaff32a 2/*
bf9e1876
IM
3 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
abaff32a 8 */
16f762a2 9#include "builtin.h"
bf9e1876
IM
10
11#include "perf.h"
12
6122e4e4 13#include "util/build-id.h"
6eda5838 14#include "util/util.h"
4b6ab94e 15#include <subcmd/parse-options.h>
8ad8db37 16#include "util/parse-events.h"
41840d21 17#include "util/config.h"
6eda5838 18
8f651eae 19#include "util/callchain.h"
f14d5707 20#include "util/cgroup.h"
7c6a1c65 21#include "util/header.h"
66e274f3 22#include "util/event.h"
361c99a6 23#include "util/evlist.h"
69aad6f1 24#include "util/evsel.h"
8f28827a 25#include "util/debug.h"
5d8bb1ec 26#include "util/drv_configs.h"
94c744b6 27#include "util/session.h"
45694aa7 28#include "util/tool.h"
8d06367f 29#include "util/symbol.h"
a12b51c4 30#include "util/cpumap.h"
fd78260b 31#include "util/thread_map.h"
f5fc1412 32#include "util/data.h"
bcc84ec6 33#include "util/perf_regs.h"
ef149c25 34#include "util/auxtrace.h"
46bc29b9 35#include "util/tsc.h"
f00898f4 36#include "util/parse-branch-options.h"
bcc84ec6 37#include "util/parse-regs-options.h"
71dc2326 38#include "util/llvm-utils.h"
8690a2a7 39#include "util/bpf-loader.h"
5f9cf599 40#include "util/trigger.h"
a074865e 41#include "util/perf-hooks.h"
c5e4027e 42#include "util/time-utils.h"
58db1d6e 43#include "util/units.h"
d8871ea7 44#include "asm/bug.h"
7c6a1c65 45
a43783ae 46#include <errno.h>
fd20e811 47#include <inttypes.h>
4208735d 48#include <poll.h>
97124d5e 49#include <unistd.h>
de9ac07b 50#include <sched.h>
9607ad3a 51#include <signal.h>
a41794cd 52#include <sys/mman.h>
4208735d 53#include <sys/wait.h>
2d11c650 54#include <asm/bug.h>
0693e680 55#include <linux/time64.h>
78da39fa 56
1b43b704 57struct switch_output {
dc0c6127 58 bool enabled;
1b43b704 59 bool signal;
dc0c6127 60 unsigned long size;
bfacbe3b 61 unsigned long time;
cb4e1ebb
JO
62 const char *str;
63 bool set;
1b43b704
JO
64};
65
8c6f45a7 66struct record {
45694aa7 67 struct perf_tool tool;
b4006796 68 struct record_opts opts;
d20deb64 69 u64 bytes_written;
8ceb41d7 70 struct perf_data data;
ef149c25 71 struct auxtrace_record *itr;
d20deb64
ACM
72 struct perf_evlist *evlist;
73 struct perf_session *session;
74 const char *progname;
d20deb64 75 int realtime_prio;
d20deb64 76 bool no_buildid;
d2db9a98 77 bool no_buildid_set;
d20deb64 78 bool no_buildid_cache;
d2db9a98 79 bool no_buildid_cache_set;
6156681b 80 bool buildid_all;
ecfd7a9c 81 bool timestamp_filename;
1b43b704 82 struct switch_output switch_output;
9f065194 83 unsigned long long samples;
0f82ebc4 84};
a21ca2ca 85
dc0c6127
JO
86static volatile int auxtrace_record__snapshot_started;
87static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
88static DEFINE_TRIGGER(switch_output_trigger);
89
90static bool switch_output_signal(struct record *rec)
91{
92 return rec->switch_output.signal &&
93 trigger_is_ready(&switch_output_trigger);
94}
95
96static bool switch_output_size(struct record *rec)
97{
98 return rec->switch_output.size &&
99 trigger_is_ready(&switch_output_trigger) &&
100 (rec->bytes_written >= rec->switch_output.size);
101}
102
bfacbe3b
JO
103static bool switch_output_time(struct record *rec)
104{
105 return rec->switch_output.time &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
8c6f45a7 109static int record__write(struct record *rec, void *bf, size_t size)
f5970550 110{
8ceb41d7 111 if (perf_data__write(rec->session->data, bf, size) < 0) {
50a9b868
JO
112 pr_err("failed to write perf data, error: %m\n");
113 return -1;
f5970550 114 }
8d3eca20 115
cf8b2e69 116 rec->bytes_written += size;
dc0c6127
JO
117
118 if (switch_output_size(rec))
119 trigger_hit(&switch_output_trigger);
120
8d3eca20 121 return 0;
f5970550
PZ
122}
123
45694aa7 124static int process_synthesized_event(struct perf_tool *tool,
d20deb64 125 union perf_event *event,
1d037ca1
IT
126 struct perf_sample *sample __maybe_unused,
127 struct machine *machine __maybe_unused)
234fbbf5 128{
8c6f45a7
ACM
129 struct record *rec = container_of(tool, struct record, tool);
130 return record__write(rec, event, event->header.size);
234fbbf5
ACM
131}
132
d37f1586
ACM
133static int record__pushfn(void *to, void *bf, size_t size)
134{
135 struct record *rec = to;
136
137 rec->samples++;
138 return record__write(rec, bf, size);
139}
140
2dd6d8a1
AH
141static volatile int done;
142static volatile int signr = -1;
143static volatile int child_finished;
c0bdc1c4 144
2dd6d8a1
AH
145static void sig_handler(int sig)
146{
147 if (sig == SIGCHLD)
148 child_finished = 1;
149 else
150 signr = sig;
151
152 done = 1;
153}
154
a074865e
WN
155static void sigsegv_handler(int sig)
156{
157 perf_hooks__recover();
158 sighandler_dump_stack(sig);
159}
160
2dd6d8a1
AH
161static void record__sig_exit(void)
162{
163 if (signr == -1)
164 return;
165
166 signal(signr, SIG_DFL);
167 raise(signr);
168}
169
e31f0d01
AH
170#ifdef HAVE_AUXTRACE_SUPPORT
171
ef149c25
AH
172static int record__process_auxtrace(struct perf_tool *tool,
173 union perf_event *event, void *data1,
174 size_t len1, void *data2, size_t len2)
175{
176 struct record *rec = container_of(tool, struct record, tool);
8ceb41d7 177 struct perf_data *data = &rec->data;
ef149c25
AH
178 size_t padding;
179 u8 pad[8] = {0};
180
8ceb41d7 181 if (!perf_data__is_pipe(data)) {
99fa2984 182 off_t file_offset;
8ceb41d7 183 int fd = perf_data__fd(data);
99fa2984
AH
184 int err;
185
186 file_offset = lseek(fd, 0, SEEK_CUR);
187 if (file_offset == -1)
188 return -1;
189 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
190 event, file_offset);
191 if (err)
192 return err;
193 }
194
ef149c25
AH
195 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
196 padding = (len1 + len2) & 7;
197 if (padding)
198 padding = 8 - padding;
199
200 record__write(rec, event, event->header.size);
201 record__write(rec, data1, len1);
202 if (len2)
203 record__write(rec, data2, len2);
204 record__write(rec, &pad, padding);
205
206 return 0;
207}
208
209static int record__auxtrace_mmap_read(struct record *rec,
210 struct auxtrace_mmap *mm)
211{
212 int ret;
213
214 ret = auxtrace_mmap__read(mm, rec->itr, &rec->tool,
215 record__process_auxtrace);
216 if (ret < 0)
217 return ret;
218
219 if (ret)
220 rec->samples++;
221
222 return 0;
223}
224
2dd6d8a1
AH
225static int record__auxtrace_mmap_read_snapshot(struct record *rec,
226 struct auxtrace_mmap *mm)
227{
228 int ret;
229
230 ret = auxtrace_mmap__read_snapshot(mm, rec->itr, &rec->tool,
231 record__process_auxtrace,
232 rec->opts.auxtrace_snapshot_size);
233 if (ret < 0)
234 return ret;
235
236 if (ret)
237 rec->samples++;
238
239 return 0;
240}
241
242static int record__auxtrace_read_snapshot_all(struct record *rec)
243{
244 int i;
245 int rc = 0;
246
247 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
248 struct auxtrace_mmap *mm =
249 &rec->evlist->mmap[i].auxtrace_mmap;
250
251 if (!mm->base)
252 continue;
253
254 if (record__auxtrace_mmap_read_snapshot(rec, mm) != 0) {
255 rc = -1;
256 goto out;
257 }
258 }
259out:
260 return rc;
261}
262
263static void record__read_auxtrace_snapshot(struct record *rec)
264{
265 pr_debug("Recording AUX area tracing snapshot\n");
266 if (record__auxtrace_read_snapshot_all(rec) < 0) {
5f9cf599 267 trigger_error(&auxtrace_snapshot_trigger);
2dd6d8a1 268 } else {
5f9cf599
WN
269 if (auxtrace_record__snapshot_finish(rec->itr))
270 trigger_error(&auxtrace_snapshot_trigger);
271 else
272 trigger_ready(&auxtrace_snapshot_trigger);
2dd6d8a1
AH
273 }
274}
275
e31f0d01
AH
276#else
277
278static inline
279int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
280 struct auxtrace_mmap *mm __maybe_unused)
281{
282 return 0;
283}
284
2dd6d8a1
AH
285static inline
286void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 287{
f7b7c26e
PZ
288}
289
2dd6d8a1
AH
290static inline
291int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 292{
2dd6d8a1 293 return 0;
de9ac07b
PZ
294}
295
2dd6d8a1
AH
296#endif
297
cda57a8c
WN
298static int record__mmap_evlist(struct record *rec,
299 struct perf_evlist *evlist)
300{
301 struct record_opts *opts = &rec->opts;
302 char msg[512];
303
304 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages, false,
305 opts->auxtrace_mmap_pages,
306 opts->auxtrace_snapshot_mode) < 0) {
307 if (errno == EPERM) {
308 pr_err("Permission error mapping pages.\n"
309 "Consider increasing "
310 "/proc/sys/kernel/perf_event_mlock_kb,\n"
311 "or try again with a smaller value of -m/--mmap_pages.\n"
312 "(current value: %u,%u)\n",
313 opts->mmap_pages, opts->auxtrace_mmap_pages);
314 return -errno;
315 } else {
316 pr_err("failed to mmap with %d (%s)\n", errno,
c8b5f2c9 317 str_error_r(errno, msg, sizeof(msg)));
cda57a8c
WN
318 if (errno)
319 return -errno;
320 else
321 return -EINVAL;
322 }
323 }
324 return 0;
325}
326
327static int record__mmap(struct record *rec)
328{
329 return record__mmap_evlist(rec, rec->evlist);
330}
331
8c6f45a7 332static int record__open(struct record *rec)
dd7927f4 333{
d6195a6a 334 char msg[BUFSIZ];
6a4bb04c 335 struct perf_evsel *pos;
d20deb64
ACM
336 struct perf_evlist *evlist = rec->evlist;
337 struct perf_session *session = rec->session;
b4006796 338 struct record_opts *opts = &rec->opts;
5d8bb1ec 339 struct perf_evsel_config_term *err_term;
8d3eca20 340 int rc = 0;
dd7927f4 341
dffdcbdb
ACM
342 /*
343 * For initial_delay we need to add a dummy event so that we can track
344 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
345 * real events, the ones asked by the user.
346 */
347 if (opts->initial_delay) {
348 if (perf_evlist__add_dummy(evlist))
349 return -ENOMEM;
350
351 pos = perf_evlist__first(evlist);
352 pos->tracking = 0;
353 pos = perf_evlist__last(evlist);
354 pos->tracking = 1;
355 pos->attr.enable_on_exec = 1;
356 }
357
e68ae9cf 358 perf_evlist__config(evlist, opts, &callchain_param);
cac21425 359
e5cadb93 360 evlist__for_each_entry(evlist, pos) {
dd7927f4 361try_again:
d988d5ee 362 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 363 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
bb963e16 364 if (verbose > 0)
c0a54341 365 ui__warning("%s\n", msg);
d6d901c2
ZY
366 goto try_again;
367 }
ca6a4258 368
56e52e85
ACM
369 rc = -errno;
370 perf_evsel__open_strerror(pos, &opts->target,
371 errno, msg, sizeof(msg));
372 ui__error("%s\n", msg);
8d3eca20 373 goto out;
c171b552
LZ
374 }
375 }
a43d3f08 376
23d4aad4 377 if (perf_evlist__apply_filters(evlist, &pos)) {
62d94b00 378 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
23d4aad4 379 pos->filter, perf_evsel__name(pos), errno,
c8b5f2c9 380 str_error_r(errno, msg, sizeof(msg)));
8d3eca20 381 rc = -1;
5d8bb1ec
MP
382 goto out;
383 }
384
385 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
62d94b00 386 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
5d8bb1ec
MP
387 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
388 str_error_r(errno, msg, sizeof(msg)));
389 rc = -1;
8d3eca20 390 goto out;
0a102479
FW
391 }
392
cda57a8c
WN
393 rc = record__mmap(rec);
394 if (rc)
8d3eca20 395 goto out;
0a27d7f9 396
563aecb2 397 session->evlist = evlist;
7b56cce2 398 perf_session__set_id_hdr_size(session);
8d3eca20
DA
399out:
400 return rc;
16c8a109
PZ
401}
402
e3d59112
NK
403static int process_sample_event(struct perf_tool *tool,
404 union perf_event *event,
405 struct perf_sample *sample,
406 struct perf_evsel *evsel,
407 struct machine *machine)
408{
409 struct record *rec = container_of(tool, struct record, tool);
410
411 rec->samples++;
412
413 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
414}
415
8c6f45a7 416static int process_buildids(struct record *rec)
6122e4e4 417{
8ceb41d7 418 struct perf_data *data = &rec->data;
f5fc1412 419 struct perf_session *session = rec->session;
6122e4e4 420
8ceb41d7 421 if (data->size == 0)
9f591fd7
ACM
422 return 0;
423
00dc8657
NK
424 /*
425 * During this process, it'll load kernel map and replace the
426 * dso->long_name to a real pathname it found. In this case
427 * we prefer the vmlinux path like
428 * /lib/modules/3.16.4/build/vmlinux
429 *
430 * rather than build-id path (in debug directory).
431 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
432 */
433 symbol_conf.ignore_vmlinux_buildid = true;
434
6156681b
NK
435 /*
436 * If --buildid-all is given, it marks all DSO regardless of hits,
437 * so no need to process samples.
438 */
439 if (rec->buildid_all)
440 rec->tool.sample = NULL;
441
b7b61cbe 442 return perf_session__process_events(session);
6122e4e4
ACM
443}
444
8115d60c 445static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
446{
447 int err;
45694aa7 448 struct perf_tool *tool = data;
a1645ce1
ZY
449 /*
450 *As for guest kernel when processing subcommand record&report,
451 *we arrange module mmap prior to guest kernel mmap and trigger
452 *a preload dso because default guest module symbols are loaded
453 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
454 *method is used to avoid symbol missing when the first addr is
455 *in module instead of in guest kernel.
456 */
45694aa7 457 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 458 machine);
a1645ce1
ZY
459 if (err < 0)
460 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 461 " relocation symbol.\n", machine->pid);
a1645ce1 462
a1645ce1
ZY
463 /*
464 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
465 * have no _text sometimes.
466 */
45694aa7 467 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 468 machine);
a1645ce1
ZY
469 if (err < 0)
470 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 471 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
472}
473
98402807
FW
474static struct perf_event_header finished_round_event = {
475 .size = sizeof(struct perf_event_header),
476 .type = PERF_RECORD_FINISHED_ROUND,
477};
478
a4ea0ec4
WN
479static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
480 bool backward)
98402807 481{
dcabb507 482 u64 bytes_written = rec->bytes_written;
0e2e63dd 483 int i;
8d3eca20 484 int rc = 0;
a4ea0ec4 485 struct perf_mmap *maps;
98402807 486
cb21686b
WN
487 if (!evlist)
488 return 0;
ef149c25 489
b2cb615d 490 maps = backward ? evlist->backward_mmap : evlist->mmap;
a4ea0ec4
WN
491 if (!maps)
492 return 0;
493
54cc54de
WN
494 if (backward && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
495 return 0;
496
cb21686b 497 for (i = 0; i < evlist->nr_mmaps; i++) {
a4ea0ec4 498 struct auxtrace_mmap *mm = &maps[i].auxtrace_mmap;
cb21686b 499
a4ea0ec4 500 if (maps[i].base) {
d37f1586 501 if (perf_mmap__push(&maps[i], evlist->overwrite, backward, rec, record__pushfn) != 0) {
8d3eca20
DA
502 rc = -1;
503 goto out;
504 }
505 }
ef149c25 506
2dd6d8a1 507 if (mm->base && !rec->opts.auxtrace_snapshot_mode &&
ef149c25
AH
508 record__auxtrace_mmap_read(rec, mm) != 0) {
509 rc = -1;
510 goto out;
511 }
98402807
FW
512 }
513
dcabb507
JO
514 /*
515 * Mark the round finished in case we wrote
516 * at least one event.
517 */
518 if (bytes_written != rec->bytes_written)
519 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20 520
54cc54de
WN
521 if (backward)
522 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
8d3eca20
DA
523out:
524 return rc;
98402807
FW
525}
526
cb21686b
WN
527static int record__mmap_read_all(struct record *rec)
528{
529 int err;
530
a4ea0ec4 531 err = record__mmap_read_evlist(rec, rec->evlist, false);
cb21686b
WN
532 if (err)
533 return err;
534
05737464 535 return record__mmap_read_evlist(rec, rec->evlist, true);
cb21686b
WN
536}
537
8c6f45a7 538static void record__init_features(struct record *rec)
57706abc 539{
57706abc
DA
540 struct perf_session *session = rec->session;
541 int feat;
542
543 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
544 perf_header__set_feat(&session->header, feat);
545
546 if (rec->no_buildid)
547 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
548
3e2be2da 549 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
550 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
551
552 if (!rec->opts.branch_stack)
553 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
554
555 if (!rec->opts.full_auxtrace)
556 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
ffa517ad
JO
557
558 perf_header__clear_feat(&session->header, HEADER_STAT);
57706abc
DA
559}
560
e1ab48ba
WN
561static void
562record__finish_output(struct record *rec)
563{
8ceb41d7
JO
564 struct perf_data *data = &rec->data;
565 int fd = perf_data__fd(data);
e1ab48ba 566
8ceb41d7 567 if (data->is_pipe)
e1ab48ba
WN
568 return;
569
570 rec->session->header.data_size += rec->bytes_written;
8ceb41d7 571 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
e1ab48ba
WN
572
573 if (!rec->no_buildid) {
574 process_buildids(rec);
575
576 if (rec->buildid_all)
577 dsos__hit_all(rec->session);
578 }
579 perf_session__write_header(rec->session, rec->evlist, fd, true);
580
581 return;
582}
583
4ea648ae 584static int record__synthesize_workload(struct record *rec, bool tail)
be7b0c9e 585{
9d6aae72
ACM
586 int err;
587 struct thread_map *thread_map;
be7b0c9e 588
4ea648ae
WN
589 if (rec->opts.tail_synthesize != tail)
590 return 0;
591
9d6aae72
ACM
592 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
593 if (thread_map == NULL)
594 return -1;
595
596 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
be7b0c9e
WN
597 process_synthesized_event,
598 &rec->session->machines.host,
599 rec->opts.sample_address,
600 rec->opts.proc_map_timeout);
9d6aae72
ACM
601 thread_map__put(thread_map);
602 return err;
be7b0c9e
WN
603}
604
4ea648ae 605static int record__synthesize(struct record *rec, bool tail);
3c1cb7e3 606
ecfd7a9c
WN
607static int
608record__switch_output(struct record *rec, bool at_exit)
609{
8ceb41d7 610 struct perf_data *data = &rec->data;
ecfd7a9c
WN
611 int fd, err;
612
613 /* Same Size: "2015122520103046"*/
614 char timestamp[] = "InvalidTimestamp";
615
4ea648ae
WN
616 record__synthesize(rec, true);
617 if (target__none(&rec->opts.target))
618 record__synthesize_workload(rec, true);
619
ecfd7a9c
WN
620 rec->samples = 0;
621 record__finish_output(rec);
622 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
623 if (err) {
624 pr_err("Failed to get current timestamp\n");
625 return -EINVAL;
626 }
627
8ceb41d7 628 fd = perf_data__switch(data, timestamp,
ecfd7a9c
WN
629 rec->session->header.data_offset,
630 at_exit);
631 if (fd >= 0 && !at_exit) {
632 rec->bytes_written = 0;
633 rec->session->header.data_size = 0;
634 }
635
636 if (!quiet)
637 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
eae8ad80 638 data->file.path, timestamp);
3c1cb7e3
WN
639
640 /* Output tracking events */
be7b0c9e 641 if (!at_exit) {
4ea648ae 642 record__synthesize(rec, false);
3c1cb7e3 643
be7b0c9e
WN
644 /*
645 * In 'perf record --switch-output' without -a,
646 * record__synthesize() in record__switch_output() won't
647 * generate tracking events because there's no thread_map
648 * in evlist. Which causes newly created perf.data doesn't
649 * contain map and comm information.
650 * Create a fake thread_map and directly call
651 * perf_event__synthesize_thread_map() for those events.
652 */
653 if (target__none(&rec->opts.target))
4ea648ae 654 record__synthesize_workload(rec, false);
be7b0c9e 655 }
ecfd7a9c
WN
656 return fd;
657}
658
f33cbe72
ACM
659static volatile int workload_exec_errno;
660
661/*
662 * perf_evlist__prepare_workload will send a SIGUSR1
663 * if the fork fails, since we asked by setting its
664 * want_signal to true.
665 */
45604710
NK
666static void workload_exec_failed_signal(int signo __maybe_unused,
667 siginfo_t *info,
f33cbe72
ACM
668 void *ucontext __maybe_unused)
669{
670 workload_exec_errno = info->si_value.sival_int;
671 done = 1;
f33cbe72
ACM
672 child_finished = 1;
673}
674
2dd6d8a1 675static void snapshot_sig_handler(int sig);
bfacbe3b 676static void alarm_sig_handler(int sig);
2dd6d8a1 677
46bc29b9
AH
678int __weak
679perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
680 struct perf_tool *tool __maybe_unused,
681 perf_event__handler_t process __maybe_unused,
682 struct machine *machine __maybe_unused)
683{
684 return 0;
685}
686
ee667f94
WN
687static const struct perf_event_mmap_page *
688perf_evlist__pick_pc(struct perf_evlist *evlist)
689{
b2cb615d
WN
690 if (evlist) {
691 if (evlist->mmap && evlist->mmap[0].base)
692 return evlist->mmap[0].base;
693 if (evlist->backward_mmap && evlist->backward_mmap[0].base)
694 return evlist->backward_mmap[0].base;
695 }
ee667f94
WN
696 return NULL;
697}
698
c45628b0
WN
699static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
700{
ee667f94
WN
701 const struct perf_event_mmap_page *pc;
702
703 pc = perf_evlist__pick_pc(rec->evlist);
704 if (pc)
705 return pc;
c45628b0
WN
706 return NULL;
707}
708
4ea648ae 709static int record__synthesize(struct record *rec, bool tail)
c45c86eb
WN
710{
711 struct perf_session *session = rec->session;
712 struct machine *machine = &session->machines.host;
8ceb41d7 713 struct perf_data *data = &rec->data;
c45c86eb
WN
714 struct record_opts *opts = &rec->opts;
715 struct perf_tool *tool = &rec->tool;
8ceb41d7 716 int fd = perf_data__fd(data);
c45c86eb
WN
717 int err = 0;
718
4ea648ae
WN
719 if (rec->opts.tail_synthesize != tail)
720 return 0;
721
8ceb41d7 722 if (data->is_pipe) {
e9def1b2
DCC
723 err = perf_event__synthesize_features(
724 tool, session, rec->evlist, process_synthesized_event);
725 if (err < 0) {
726 pr_err("Couldn't synthesize features.\n");
727 return err;
728 }
729
c45c86eb
WN
730 err = perf_event__synthesize_attrs(tool, session,
731 process_synthesized_event);
732 if (err < 0) {
733 pr_err("Couldn't synthesize attrs.\n");
734 goto out;
735 }
736
737 if (have_tracepoints(&rec->evlist->entries)) {
738 /*
739 * FIXME err <= 0 here actually means that
740 * there were no tracepoints so its not really
741 * an error, just that we don't need to
742 * synthesize anything. We really have to
743 * return this more properly and also
744 * propagate errors that now are calling die()
745 */
746 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
747 process_synthesized_event);
748 if (err <= 0) {
749 pr_err("Couldn't record tracing data.\n");
750 goto out;
751 }
752 rec->bytes_written += err;
753 }
754 }
755
c45628b0 756 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
46bc29b9
AH
757 process_synthesized_event, machine);
758 if (err)
759 goto out;
760
c45c86eb
WN
761 if (rec->opts.full_auxtrace) {
762 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
763 session, process_synthesized_event);
764 if (err)
765 goto out;
766 }
767
b0ebd811
ACM
768 if (!perf_evlist__exclude_kernel(rec->evlist)) {
769 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
770 machine);
771 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
772 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
773 "Check /proc/kallsyms permission or run as root.\n");
774
775 err = perf_event__synthesize_modules(tool, process_synthesized_event,
776 machine);
777 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
778 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
779 "Check /proc/modules permission or run as root.\n");
780 }
c45c86eb
WN
781
782 if (perf_guest) {
783 machines__process_guests(&session->machines,
784 perf_event__synthesize_guest_os, tool);
785 }
786
787 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
788 process_synthesized_event, opts->sample_address,
340b47f5 789 opts->proc_map_timeout, 1);
c45c86eb
WN
790out:
791 return err;
792}
793
8c6f45a7 794static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 795{
57706abc 796 int err;
45604710 797 int status = 0;
8b412664 798 unsigned long waking = 0;
46be604b 799 const bool forks = argc > 0;
23346f21 800 struct machine *machine;
45694aa7 801 struct perf_tool *tool = &rec->tool;
b4006796 802 struct record_opts *opts = &rec->opts;
8ceb41d7 803 struct perf_data *data = &rec->data;
d20deb64 804 struct perf_session *session;
6dcf45ef 805 bool disabled = false, draining = false;
42aa276f 806 int fd;
de9ac07b 807
d20deb64 808 rec->progname = argv[0];
33e49ea7 809
45604710 810 atexit(record__sig_exit);
f5970550
PZ
811 signal(SIGCHLD, sig_handler);
812 signal(SIGINT, sig_handler);
804f7ac7 813 signal(SIGTERM, sig_handler);
a074865e 814 signal(SIGSEGV, sigsegv_handler);
c0bdc1c4 815
f3b3614a
HB
816 if (rec->opts.record_namespaces)
817 tool->namespace_events = true;
818
dc0c6127 819 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2dd6d8a1 820 signal(SIGUSR2, snapshot_sig_handler);
3c1cb7e3
WN
821 if (rec->opts.auxtrace_snapshot_mode)
822 trigger_on(&auxtrace_snapshot_trigger);
dc0c6127 823 if (rec->switch_output.enabled)
3c1cb7e3 824 trigger_on(&switch_output_trigger);
c0bdc1c4 825 } else {
2dd6d8a1 826 signal(SIGUSR2, SIG_IGN);
c0bdc1c4 827 }
f5970550 828
8ceb41d7 829 session = perf_session__new(data, false, tool);
94c744b6 830 if (session == NULL) {
ffa91880 831 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
832 return -1;
833 }
834
8ceb41d7 835 fd = perf_data__fd(data);
d20deb64
ACM
836 rec->session = session;
837
8c6f45a7 838 record__init_features(rec);
330aa675 839
d4db3f16 840 if (forks) {
3e2be2da 841 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
8ceb41d7 842 argv, data->is_pipe,
735f7e0b 843 workload_exec_failed_signal);
35b9d88e
ACM
844 if (err < 0) {
845 pr_err("Couldn't run the workload!\n");
45604710 846 status = err;
35b9d88e 847 goto out_delete_session;
856e9660 848 }
856e9660
PZ
849 }
850
8c6f45a7 851 if (record__open(rec) != 0) {
8d3eca20 852 err = -1;
45604710 853 goto out_child;
8d3eca20 854 }
de9ac07b 855
8690a2a7
WN
856 err = bpf__apply_obj_config();
857 if (err) {
858 char errbuf[BUFSIZ];
859
860 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
861 pr_err("ERROR: Apply config to BPF failed: %s\n",
862 errbuf);
863 goto out_child;
864 }
865
cca8482c
AH
866 /*
867 * Normally perf_session__new would do this, but it doesn't have the
868 * evlist.
869 */
870 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
871 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
872 rec->tool.ordered_events = false;
873 }
874
3e2be2da 875 if (!rec->evlist->nr_groups)
a8bb559b
NK
876 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
877
8ceb41d7 878 if (data->is_pipe) {
42aa276f 879 err = perf_header__write_pipe(fd);
529870e3 880 if (err < 0)
45604710 881 goto out_child;
563aecb2 882 } else {
42aa276f 883 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 884 if (err < 0)
45604710 885 goto out_child;
56b03f3c
ACM
886 }
887
d3665498 888 if (!rec->no_buildid
e20960c0 889 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 890 pr_err("Couldn't generate buildids. "
e20960c0 891 "Use --no-buildid to profile anyway.\n");
8d3eca20 892 err = -1;
45604710 893 goto out_child;
e20960c0
RR
894 }
895
34ba5122 896 machine = &session->machines.host;
743eb868 897
4ea648ae 898 err = record__synthesize(rec, false);
c45c86eb 899 if (err < 0)
45604710 900 goto out_child;
8d3eca20 901
d20deb64 902 if (rec->realtime_prio) {
de9ac07b
PZ
903 struct sched_param param;
904
d20deb64 905 param.sched_priority = rec->realtime_prio;
de9ac07b 906 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 907 pr_err("Could not set realtime priority.\n");
8d3eca20 908 err = -1;
45604710 909 goto out_child;
de9ac07b
PZ
910 }
911 }
912
774cb499
JO
913 /*
914 * When perf is starting the traced process, all the events
915 * (apart from group members) have enable_on_exec=1 set,
916 * so don't spoil it by prematurely enabling them.
917 */
6619a53e 918 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 919 perf_evlist__enable(rec->evlist);
764e16a3 920
856e9660
PZ
921 /*
922 * Let the child rip
923 */
e803cf97 924 if (forks) {
e5bed564 925 union perf_event *event;
e907caf3 926 pid_t tgid;
e5bed564
NK
927
928 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
929 if (event == NULL) {
930 err = -ENOMEM;
931 goto out_child;
932 }
933
e803cf97
NK
934 /*
935 * Some H/W events are generated before COMM event
936 * which is emitted during exec(), so perf script
937 * cannot see a correct process name for those events.
938 * Synthesize COMM event to prevent it.
939 */
e907caf3
HB
940 tgid = perf_event__synthesize_comm(tool, event,
941 rec->evlist->workload.pid,
942 process_synthesized_event,
943 machine);
944 free(event);
945
946 if (tgid == -1)
947 goto out_child;
948
949 event = malloc(sizeof(event->namespaces) +
950 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
951 machine->id_hdr_size);
952 if (event == NULL) {
953 err = -ENOMEM;
954 goto out_child;
955 }
956
957 /*
958 * Synthesize NAMESPACES event for the command specified.
959 */
960 perf_event__synthesize_namespaces(tool, event,
961 rec->evlist->workload.pid,
962 tgid, process_synthesized_event,
963 machine);
e5bed564 964 free(event);
e803cf97 965
3e2be2da 966 perf_evlist__start_workload(rec->evlist);
e803cf97 967 }
856e9660 968
6619a53e 969 if (opts->initial_delay) {
0693e680 970 usleep(opts->initial_delay * USEC_PER_MSEC);
6619a53e
AK
971 perf_evlist__enable(rec->evlist);
972 }
973
5f9cf599 974 trigger_ready(&auxtrace_snapshot_trigger);
3c1cb7e3 975 trigger_ready(&switch_output_trigger);
a074865e 976 perf_hooks__invoke_record_start();
649c48a9 977 for (;;) {
9f065194 978 unsigned long long hits = rec->samples;
de9ac07b 979
05737464
WN
980 /*
981 * rec->evlist->bkw_mmap_state is possible to be
982 * BKW_MMAP_EMPTY here: when done == true and
983 * hits != rec->samples in previous round.
984 *
985 * perf_evlist__toggle_bkw_mmap ensure we never
986 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
987 */
988 if (trigger_is_hit(&switch_output_trigger) || done || draining)
989 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
990
8c6f45a7 991 if (record__mmap_read_all(rec) < 0) {
5f9cf599 992 trigger_error(&auxtrace_snapshot_trigger);
3c1cb7e3 993 trigger_error(&switch_output_trigger);
8d3eca20 994 err = -1;
45604710 995 goto out_child;
8d3eca20 996 }
de9ac07b 997
2dd6d8a1
AH
998 if (auxtrace_record__snapshot_started) {
999 auxtrace_record__snapshot_started = 0;
5f9cf599 1000 if (!trigger_is_error(&auxtrace_snapshot_trigger))
2dd6d8a1 1001 record__read_auxtrace_snapshot(rec);
5f9cf599 1002 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
2dd6d8a1
AH
1003 pr_err("AUX area tracing snapshot failed\n");
1004 err = -1;
1005 goto out_child;
1006 }
1007 }
1008
3c1cb7e3 1009 if (trigger_is_hit(&switch_output_trigger)) {
05737464
WN
1010 /*
1011 * If switch_output_trigger is hit, the data in
1012 * overwritable ring buffer should have been collected,
1013 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1014 *
1015 * If SIGUSR2 raise after or during record__mmap_read_all(),
1016 * record__mmap_read_all() didn't collect data from
1017 * overwritable ring buffer. Read again.
1018 */
1019 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1020 continue;
3c1cb7e3
WN
1021 trigger_ready(&switch_output_trigger);
1022
05737464
WN
1023 /*
1024 * Reenable events in overwrite ring buffer after
1025 * record__mmap_read_all(): we should have collected
1026 * data from it.
1027 */
1028 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1029
3c1cb7e3
WN
1030 if (!quiet)
1031 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1032 waking);
1033 waking = 0;
1034 fd = record__switch_output(rec, false);
1035 if (fd < 0) {
1036 pr_err("Failed to switch to new file\n");
1037 trigger_error(&switch_output_trigger);
1038 err = fd;
1039 goto out_child;
1040 }
bfacbe3b
JO
1041
1042 /* re-arm the alarm */
1043 if (rec->switch_output.time)
1044 alarm(rec->switch_output.time);
3c1cb7e3
WN
1045 }
1046
d20deb64 1047 if (hits == rec->samples) {
6dcf45ef 1048 if (done || draining)
649c48a9 1049 break;
f66a889d 1050 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
1051 /*
1052 * Propagate error, only if there's any. Ignore positive
1053 * number of returned events and interrupt error.
1054 */
1055 if (err > 0 || (err < 0 && errno == EINTR))
45604710 1056 err = 0;
8b412664 1057 waking++;
6dcf45ef
ACM
1058
1059 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1060 draining = true;
8b412664
PZ
1061 }
1062
774cb499
JO
1063 /*
1064 * When perf is starting the traced process, at the end events
1065 * die with the process and we wait for that. Thus no need to
1066 * disable events in this case.
1067 */
602ad878 1068 if (done && !disabled && !target__none(&opts->target)) {
5f9cf599 1069 trigger_off(&auxtrace_snapshot_trigger);
3e2be2da 1070 perf_evlist__disable(rec->evlist);
2711926a
JO
1071 disabled = true;
1072 }
de9ac07b 1073 }
5f9cf599 1074 trigger_off(&auxtrace_snapshot_trigger);
3c1cb7e3 1075 trigger_off(&switch_output_trigger);
de9ac07b 1076
f33cbe72 1077 if (forks && workload_exec_errno) {
35550da3 1078 char msg[STRERR_BUFSIZE];
c8b5f2c9 1079 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
f33cbe72
ACM
1080 pr_err("Workload failed: %s\n", emsg);
1081 err = -1;
45604710 1082 goto out_child;
f33cbe72
ACM
1083 }
1084
e3d59112 1085 if (!quiet)
45604710 1086 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 1087
4ea648ae
WN
1088 if (target__none(&rec->opts.target))
1089 record__synthesize_workload(rec, true);
1090
45604710
NK
1091out_child:
1092 if (forks) {
1093 int exit_status;
addc2785 1094
45604710
NK
1095 if (!child_finished)
1096 kill(rec->evlist->workload.pid, SIGTERM);
1097
1098 wait(&exit_status);
1099
1100 if (err < 0)
1101 status = err;
1102 else if (WIFEXITED(exit_status))
1103 status = WEXITSTATUS(exit_status);
1104 else if (WIFSIGNALED(exit_status))
1105 signr = WTERMSIG(exit_status);
1106 } else
1107 status = err;
1108
4ea648ae 1109 record__synthesize(rec, true);
e3d59112
NK
1110 /* this will be recalculated during process_buildids() */
1111 rec->samples = 0;
1112
ecfd7a9c
WN
1113 if (!err) {
1114 if (!rec->timestamp_filename) {
1115 record__finish_output(rec);
1116 } else {
1117 fd = record__switch_output(rec, true);
1118 if (fd < 0) {
1119 status = fd;
1120 goto out_delete_session;
1121 }
1122 }
1123 }
39d17dac 1124
a074865e
WN
1125 perf_hooks__invoke_record_end();
1126
e3d59112
NK
1127 if (!err && !quiet) {
1128 char samples[128];
ecfd7a9c
WN
1129 const char *postfix = rec->timestamp_filename ?
1130 ".<timestamp>" : "";
e3d59112 1131
ef149c25 1132 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
1133 scnprintf(samples, sizeof(samples),
1134 " (%" PRIu64 " samples)", rec->samples);
1135 else
1136 samples[0] = '\0';
1137
ecfd7a9c 1138 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
8ceb41d7 1139 perf_data__size(data) / 1024.0 / 1024.0,
eae8ad80 1140 data->file.path, postfix, samples);
e3d59112
NK
1141 }
1142
39d17dac
ACM
1143out_delete_session:
1144 perf_session__delete(session);
45604710 1145 return status;
de9ac07b 1146}
0e9b20b8 1147
0883e820 1148static void callchain_debug(struct callchain_param *callchain)
09b0fd45 1149{
aad2b21c 1150 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 1151
0883e820 1152 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
26d33022 1153
0883e820 1154 if (callchain->record_mode == CALLCHAIN_DWARF)
09b0fd45 1155 pr_debug("callchain: stack dump size %d\n",
0883e820 1156 callchain->dump_size);
09b0fd45
JO
1157}
1158
0883e820
ACM
1159int record_opts__parse_callchain(struct record_opts *record,
1160 struct callchain_param *callchain,
1161 const char *arg, bool unset)
09b0fd45 1162{
09b0fd45 1163 int ret;
0883e820 1164 callchain->enabled = !unset;
eb853e80 1165
09b0fd45
JO
1166 /* --no-call-graph */
1167 if (unset) {
0883e820 1168 callchain->record_mode = CALLCHAIN_NONE;
09b0fd45
JO
1169 pr_debug("callchain: disabled\n");
1170 return 0;
1171 }
1172
0883e820 1173 ret = parse_callchain_record_opt(arg, callchain);
5c0cf224
JO
1174 if (!ret) {
1175 /* Enable data address sampling for DWARF unwind. */
0883e820 1176 if (callchain->record_mode == CALLCHAIN_DWARF)
5c0cf224 1177 record->sample_address = true;
0883e820 1178 callchain_debug(callchain);
5c0cf224 1179 }
26d33022
JO
1180
1181 return ret;
1182}
1183
0883e820
ACM
1184int record_parse_callchain_opt(const struct option *opt,
1185 const char *arg,
1186 int unset)
1187{
1188 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1189}
1190
c421e80b 1191int record_callchain_opt(const struct option *opt,
09b0fd45
JO
1192 const char *arg __maybe_unused,
1193 int unset __maybe_unused)
1194{
2ddd5c04 1195 struct callchain_param *callchain = opt->value;
c421e80b 1196
2ddd5c04 1197 callchain->enabled = true;
09b0fd45 1198
2ddd5c04
ACM
1199 if (callchain->record_mode == CALLCHAIN_NONE)
1200 callchain->record_mode = CALLCHAIN_FP;
eb853e80 1201
2ddd5c04 1202 callchain_debug(callchain);
09b0fd45
JO
1203 return 0;
1204}
1205
eb853e80
JO
1206static int perf_record_config(const char *var, const char *value, void *cb)
1207{
7a29c087
NK
1208 struct record *rec = cb;
1209
1210 if (!strcmp(var, "record.build-id")) {
1211 if (!strcmp(value, "cache"))
1212 rec->no_buildid_cache = false;
1213 else if (!strcmp(value, "no-cache"))
1214 rec->no_buildid_cache = true;
1215 else if (!strcmp(value, "skip"))
1216 rec->no_buildid = true;
1217 else
1218 return -1;
1219 return 0;
1220 }
eb853e80 1221 if (!strcmp(var, "record.call-graph"))
5a2e5e85 1222 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
1223
1224 return perf_default_config(var, value, cb);
1225}
1226
814c8c38
PZ
1227struct clockid_map {
1228 const char *name;
1229 int clockid;
1230};
1231
1232#define CLOCKID_MAP(n, c) \
1233 { .name = n, .clockid = (c), }
1234
1235#define CLOCKID_END { .name = NULL, }
1236
1237
1238/*
1239 * Add the missing ones, we need to build on many distros...
1240 */
1241#ifndef CLOCK_MONOTONIC_RAW
1242#define CLOCK_MONOTONIC_RAW 4
1243#endif
1244#ifndef CLOCK_BOOTTIME
1245#define CLOCK_BOOTTIME 7
1246#endif
1247#ifndef CLOCK_TAI
1248#define CLOCK_TAI 11
1249#endif
1250
1251static const struct clockid_map clockids[] = {
1252 /* available for all events, NMI safe */
1253 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1254 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1255
1256 /* available for some events */
1257 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1258 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1259 CLOCKID_MAP("tai", CLOCK_TAI),
1260
1261 /* available for the lazy */
1262 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1263 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1264 CLOCKID_MAP("real", CLOCK_REALTIME),
1265 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1266
1267 CLOCKID_END,
1268};
1269
1270static int parse_clockid(const struct option *opt, const char *str, int unset)
1271{
1272 struct record_opts *opts = (struct record_opts *)opt->value;
1273 const struct clockid_map *cm;
1274 const char *ostr = str;
1275
1276 if (unset) {
1277 opts->use_clockid = 0;
1278 return 0;
1279 }
1280
1281 /* no arg passed */
1282 if (!str)
1283 return 0;
1284
1285 /* no setting it twice */
1286 if (opts->use_clockid)
1287 return -1;
1288
1289 opts->use_clockid = true;
1290
1291 /* if its a number, we're done */
1292 if (sscanf(str, "%d", &opts->clockid) == 1)
1293 return 0;
1294
1295 /* allow a "CLOCK_" prefix to the name */
1296 if (!strncasecmp(str, "CLOCK_", 6))
1297 str += 6;
1298
1299 for (cm = clockids; cm->name; cm++) {
1300 if (!strcasecmp(str, cm->name)) {
1301 opts->clockid = cm->clockid;
1302 return 0;
1303 }
1304 }
1305
1306 opts->use_clockid = false;
1307 ui__warning("unknown clockid %s, check man page\n", ostr);
1308 return -1;
1309}
1310
e9db1310
AH
1311static int record__parse_mmap_pages(const struct option *opt,
1312 const char *str,
1313 int unset __maybe_unused)
1314{
1315 struct record_opts *opts = opt->value;
1316 char *s, *p;
1317 unsigned int mmap_pages;
1318 int ret;
1319
1320 if (!str)
1321 return -EINVAL;
1322
1323 s = strdup(str);
1324 if (!s)
1325 return -ENOMEM;
1326
1327 p = strchr(s, ',');
1328 if (p)
1329 *p = '\0';
1330
1331 if (*s) {
1332 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1333 if (ret)
1334 goto out_free;
1335 opts->mmap_pages = mmap_pages;
1336 }
1337
1338 if (!p) {
1339 ret = 0;
1340 goto out_free;
1341 }
1342
1343 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1344 if (ret)
1345 goto out_free;
1346
1347 opts->auxtrace_mmap_pages = mmap_pages;
1348
1349out_free:
1350 free(s);
1351 return ret;
1352}
1353
0c582449
JO
1354static void switch_output_size_warn(struct record *rec)
1355{
1356 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1357 struct switch_output *s = &rec->switch_output;
1358
1359 wakeup_size /= 2;
1360
1361 if (s->size < wakeup_size) {
1362 char buf[100];
1363
1364 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1365 pr_warning("WARNING: switch-output data size lower than "
1366 "wakeup kernel buffer size (%s) "
1367 "expect bigger perf.data sizes\n", buf);
1368 }
1369}
1370
cb4e1ebb
JO
1371static int switch_output_setup(struct record *rec)
1372{
1373 struct switch_output *s = &rec->switch_output;
dc0c6127
JO
1374 static struct parse_tag tags_size[] = {
1375 { .tag = 'B', .mult = 1 },
1376 { .tag = 'K', .mult = 1 << 10 },
1377 { .tag = 'M', .mult = 1 << 20 },
1378 { .tag = 'G', .mult = 1 << 30 },
1379 { .tag = 0 },
1380 };
bfacbe3b
JO
1381 static struct parse_tag tags_time[] = {
1382 { .tag = 's', .mult = 1 },
1383 { .tag = 'm', .mult = 60 },
1384 { .tag = 'h', .mult = 60*60 },
1385 { .tag = 'd', .mult = 60*60*24 },
1386 { .tag = 0 },
1387 };
dc0c6127 1388 unsigned long val;
cb4e1ebb
JO
1389
1390 if (!s->set)
1391 return 0;
1392
1393 if (!strcmp(s->str, "signal")) {
1394 s->signal = true;
1395 pr_debug("switch-output with SIGUSR2 signal\n");
dc0c6127
JO
1396 goto enabled;
1397 }
1398
1399 val = parse_tag_value(s->str, tags_size);
1400 if (val != (unsigned long) -1) {
1401 s->size = val;
1402 pr_debug("switch-output with %s size threshold\n", s->str);
1403 goto enabled;
cb4e1ebb
JO
1404 }
1405
bfacbe3b
JO
1406 val = parse_tag_value(s->str, tags_time);
1407 if (val != (unsigned long) -1) {
1408 s->time = val;
1409 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1410 s->str, s->time);
1411 goto enabled;
1412 }
1413
cb4e1ebb 1414 return -1;
dc0c6127
JO
1415
1416enabled:
1417 rec->timestamp_filename = true;
1418 s->enabled = true;
0c582449
JO
1419
1420 if (s->size && !rec->opts.no_buffering)
1421 switch_output_size_warn(rec);
1422
dc0c6127 1423 return 0;
cb4e1ebb
JO
1424}
1425
e5b2c207 1426static const char * const __record_usage[] = {
9e096753
MG
1427 "perf record [<options>] [<command>]",
1428 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
1429 NULL
1430};
e5b2c207 1431const char * const *record_usage = __record_usage;
0e9b20b8 1432
d20deb64 1433/*
8c6f45a7
ACM
1434 * XXX Ideally would be local to cmd_record() and passed to a record__new
1435 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
1436 * after cmd_record() exits, but since record_options need to be accessible to
1437 * builtin-script, leave it here.
1438 *
1439 * At least we don't ouch it in all the other functions here directly.
1440 *
1441 * Just say no to tons of global variables, sigh.
1442 */
8c6f45a7 1443static struct record record = {
d20deb64 1444 .opts = {
8affc2b8 1445 .sample_time = true,
d20deb64
ACM
1446 .mmap_pages = UINT_MAX,
1447 .user_freq = UINT_MAX,
1448 .user_interval = ULLONG_MAX,
447a6013 1449 .freq = 4000,
d1cb9fce
NK
1450 .target = {
1451 .uses_mmap = true,
3aa5939d 1452 .default_per_cpu = true,
d1cb9fce 1453 },
9d9cad76 1454 .proc_map_timeout = 500,
d20deb64 1455 },
e3d59112
NK
1456 .tool = {
1457 .sample = process_sample_event,
1458 .fork = perf_event__process_fork,
cca8482c 1459 .exit = perf_event__process_exit,
e3d59112 1460 .comm = perf_event__process_comm,
f3b3614a 1461 .namespaces = perf_event__process_namespaces,
e3d59112
NK
1462 .mmap = perf_event__process_mmap,
1463 .mmap2 = perf_event__process_mmap2,
cca8482c 1464 .ordered_events = true,
e3d59112 1465 },
d20deb64 1466};
7865e817 1467
76a26549
NK
1468const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1469 "\n\t\t\t\tDefault: fp";
61eaa3be 1470
0aab2136
WN
1471static bool dry_run;
1472
d20deb64
ACM
1473/*
1474 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1475 * with it and switch to use the library functions in perf_evlist that came
b4006796 1476 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1477 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1478 * using pipes, etc.
1479 */
efd21307 1480static struct option __record_options[] = {
d20deb64 1481 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1482 "event selector. use 'perf list' to list available events",
f120f9d5 1483 parse_events_option),
d20deb64 1484 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1485 "event filter", parse_filter),
4ba1faa1
WN
1486 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1487 NULL, "don't record events from perf itself",
1488 exclude_perf),
bea03405 1489 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1490 "record events on existing process id"),
bea03405 1491 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1492 "record events on existing thread id"),
d20deb64 1493 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1494 "collect data with this RT SCHED_FIFO priority"),
509051ea 1495 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1496 "collect data without buffering"),
d20deb64 1497 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1498 "collect raw sample records from all opened counters"),
bea03405 1499 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1500 "system-wide collection from all CPUs"),
bea03405 1501 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1502 "list of cpus to monitor"),
d20deb64 1503 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
eae8ad80 1504 OPT_STRING('o', "output", &record.data.file.path, "file",
abaff32a 1505 "output file name"),
69e7e5b0
AH
1506 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1507 &record.opts.no_inherit_set,
1508 "child tasks do not inherit counters"),
4ea648ae
WN
1509 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1510 "synthesize non-sample events at the end of output"),
626a6b78 1511 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
d20deb64 1512 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
e9db1310
AH
1513 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1514 "number of mmap data pages and AUX area tracing mmap pages",
1515 record__parse_mmap_pages),
d20deb64 1516 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1517 "put the counters into a counter group"),
2ddd5c04 1518 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
09b0fd45
JO
1519 NULL, "enables call-graph recording" ,
1520 &record_callchain_opt),
1521 OPT_CALLBACK(0, "call-graph", &record.opts,
76a26549 1522 "record_mode[,record_size]", record_callchain_help,
09b0fd45 1523 &record_parse_callchain_opt),
c0555642 1524 OPT_INCR('v', "verbose", &verbose,
3da297a6 1525 "be more verbose (show counter open errors, etc)"),
b44308f5 1526 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1527 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1528 "per thread counts"),
56100321 1529 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3b0a5daa
KL
1530 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1531 "Record the sample physical addresses"),
b6f35ed7 1532 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3abebc55
AH
1533 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1534 &record.opts.sample_time_set,
1535 "Record the sample timestamps"),
56100321 1536 OPT_BOOLEAN('P', "period", &record.opts.period, "Record the sample period"),
d20deb64 1537 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1538 "don't sample"),
d2db9a98
WN
1539 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1540 &record.no_buildid_cache_set,
1541 "do not update the buildid cache"),
1542 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1543 &record.no_buildid_set,
1544 "do not collect buildids in perf.data"),
d20deb64 1545 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1546 "monitor event in cgroup name only",
1547 parse_cgroups),
a6205a35 1548 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1549 "ms to wait before starting measurement after program start"),
bea03405
NK
1550 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1551 "user to profile"),
a5aabdac
SE
1552
1553 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1554 "branch any", "sample any taken branches",
1555 parse_branch_stack),
1556
1557 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1558 "branch filter mask", "branch stack filter modes",
bdfebd84 1559 parse_branch_stack),
05484298
AK
1560 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1561 "sample by weight (on special events only)"),
475eeab9
AK
1562 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1563 "sample transaction flags (special events only)"),
3aa5939d
AH
1564 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1565 "use per-thread mmaps"),
bcc84ec6
SE
1566 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1567 "sample selected machine registers on interrupt,"
1568 " use -I ? to list register names", parse_regs),
84c41742
AK
1569 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1570 "sample selected machine registers on interrupt,"
1571 " use -I ? to list register names", parse_regs),
85c273d2
AK
1572 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1573 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1574 OPT_CALLBACK('k', "clockid", &record.opts,
1575 "clockid", "clockid to use for events, see clock_gettime()",
1576 parse_clockid),
2dd6d8a1
AH
1577 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1578 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1579 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1580 "per thread proc mmap processing timeout in ms"),
f3b3614a
HB
1581 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1582 "Record namespaces events"),
b757bb09
AH
1583 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1584 "Record context switch events"),
85723885
JO
1585 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1586 "Configure all used events to run in kernel space.",
1587 PARSE_OPT_EXCLUSIVE),
1588 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1589 "Configure all used events to run in user space.",
1590 PARSE_OPT_EXCLUSIVE),
71dc2326
WN
1591 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1592 "clang binary to use for compiling BPF scriptlets"),
1593 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1594 "options passed to clang when compiling BPF scriptlets"),
7efe0e03
HK
1595 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1596 "file", "vmlinux pathname"),
6156681b
NK
1597 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1598 "Record build-id of all DSOs regardless of hits"),
ecfd7a9c
WN
1599 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1600 "append timestamp to output filename"),
cb4e1ebb 1601 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
bfacbe3b
JO
1602 &record.switch_output.set, "signal,size,time",
1603 "Switch output when receive SIGUSR2 or cross size,time threshold",
dc0c6127 1604 "signal"),
0aab2136
WN
1605 OPT_BOOLEAN(0, "dry-run", &dry_run,
1606 "Parse options then exit"),
0e9b20b8
IM
1607 OPT_END()
1608};
1609
e5b2c207
NK
1610struct option *record_options = __record_options;
1611
b0ad8ea6 1612int cmd_record(int argc, const char **argv)
0e9b20b8 1613{
ef149c25 1614 int err;
8c6f45a7 1615 struct record *rec = &record;
16ad2ffb 1616 char errbuf[BUFSIZ];
0e9b20b8 1617
48e1cab1
WN
1618#ifndef HAVE_LIBBPF_SUPPORT
1619# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1620 set_nobuild('\0', "clang-path", true);
1621 set_nobuild('\0', "clang-opt", true);
1622# undef set_nobuild
7efe0e03
HK
1623#endif
1624
1625#ifndef HAVE_BPF_PROLOGUE
1626# if !defined (HAVE_DWARF_SUPPORT)
1627# define REASON "NO_DWARF=1"
1628# elif !defined (HAVE_LIBBPF_SUPPORT)
1629# define REASON "NO_LIBBPF=1"
1630# else
1631# define REASON "this architecture doesn't support BPF prologue"
1632# endif
1633# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1634 set_nobuild('\0', "vmlinux", true);
1635# undef set_nobuild
1636# undef REASON
48e1cab1
WN
1637#endif
1638
3e2be2da
ACM
1639 rec->evlist = perf_evlist__new();
1640 if (rec->evlist == NULL)
361c99a6
ACM
1641 return -ENOMEM;
1642
ecc4c561
ACM
1643 err = perf_config(perf_record_config, rec);
1644 if (err)
1645 return err;
eb853e80 1646
bca647aa 1647 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1648 PARSE_OPT_STOP_AT_NON_OPTION);
68ba3235
NK
1649 if (quiet)
1650 perf_quiet_option();
483635a9
JO
1651
1652 /* Make system wide (-a) the default target. */
602ad878 1653 if (!argc && target__none(&rec->opts.target))
483635a9 1654 rec->opts.target.system_wide = true;
0e9b20b8 1655
bea03405 1656 if (nr_cgroups && !rec->opts.target.system_wide) {
c7118369
NK
1657 usage_with_options_msg(record_usage, record_options,
1658 "cgroup monitoring only available in system-wide mode");
1659
023695d9 1660 }
b757bb09
AH
1661 if (rec->opts.record_switch_events &&
1662 !perf_can_record_switch_events()) {
c7118369
NK
1663 ui__error("kernel does not support recording context switch events\n");
1664 parse_options_usage(record_usage, record_options, "switch-events", 0);
1665 return -EINVAL;
b757bb09 1666 }
023695d9 1667
cb4e1ebb
JO
1668 if (switch_output_setup(rec)) {
1669 parse_options_usage(record_usage, record_options, "switch-output", 0);
1670 return -EINVAL;
1671 }
1672
bfacbe3b
JO
1673 if (rec->switch_output.time) {
1674 signal(SIGALRM, alarm_sig_handler);
1675 alarm(rec->switch_output.time);
1676 }
1677
ef149c25
AH
1678 if (!rec->itr) {
1679 rec->itr = auxtrace_record__init(rec->evlist, &err);
1680 if (err)
5c01ad60 1681 goto out;
ef149c25
AH
1682 }
1683
2dd6d8a1
AH
1684 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
1685 rec->opts.auxtrace_snapshot_opts);
1686 if (err)
5c01ad60 1687 goto out;
2dd6d8a1 1688
1b36c03e
AH
1689 /*
1690 * Allow aliases to facilitate the lookup of symbols for address
1691 * filters. Refer to auxtrace_parse_filters().
1692 */
1693 symbol_conf.allow_aliases = true;
1694
1695 symbol__init(NULL);
1696
1697 err = auxtrace_parse_filters(rec->evlist);
1698 if (err)
1699 goto out;
1700
0aab2136 1701 if (dry_run)
5c01ad60 1702 goto out;
0aab2136 1703
d7888573
WN
1704 err = bpf__setup_stdout(rec->evlist);
1705 if (err) {
1706 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1707 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1708 errbuf);
5c01ad60 1709 goto out;
d7888573
WN
1710 }
1711
ef149c25
AH
1712 err = -ENOMEM;
1713
b0ebd811 1714 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
646aaea6
ACM
1715 pr_warning(
1716"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1717"check /proc/sys/kernel/kptr_restrict.\n\n"
1718"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1719"file is not found in the buildid cache or in the vmlinux path.\n\n"
1720"Samples in kernel modules won't be resolved at all.\n\n"
1721"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1722"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1723
0c1d46a8 1724 if (rec->no_buildid_cache || rec->no_buildid) {
a1ac1d3c 1725 disable_buildid_cache();
dc0c6127 1726 } else if (rec->switch_output.enabled) {
0c1d46a8
WN
1727 /*
1728 * In 'perf record --switch-output', disable buildid
1729 * generation by default to reduce data file switching
1730 * overhead. Still generate buildid if they are required
1731 * explicitly using
1732 *
60437ac0 1733 * perf record --switch-output --no-no-buildid \
0c1d46a8
WN
1734 * --no-no-buildid-cache
1735 *
1736 * Following code equals to:
1737 *
1738 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1739 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1740 * disable_buildid_cache();
1741 */
1742 bool disable = true;
1743
1744 if (rec->no_buildid_set && !rec->no_buildid)
1745 disable = false;
1746 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1747 disable = false;
1748 if (disable) {
1749 rec->no_buildid = true;
1750 rec->no_buildid_cache = true;
1751 disable_buildid_cache();
1752 }
1753 }
655000e7 1754
4ea648ae
WN
1755 if (record.opts.overwrite)
1756 record.opts.tail_synthesize = true;
1757
3e2be2da 1758 if (rec->evlist->nr_entries == 0 &&
4b4cd503 1759 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
69aad6f1 1760 pr_err("Not enough memory for event selector list\n");
394c01ed 1761 goto out;
bbd36e5e 1762 }
0e9b20b8 1763
69e7e5b0
AH
1764 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1765 rec->opts.no_inherit = true;
1766
602ad878 1767 err = target__validate(&rec->opts.target);
16ad2ffb 1768 if (err) {
602ad878 1769 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
1770 ui__warning("%s", errbuf);
1771 }
1772
602ad878 1773 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1774 if (err) {
1775 int saved_errno = errno;
4bd0f2d2 1776
602ad878 1777 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1778 ui__error("%s", errbuf);
16ad2ffb
NK
1779
1780 err = -saved_errno;
394c01ed 1781 goto out;
16ad2ffb 1782 }
0d37aa34 1783
23dc4f15
JO
1784 /* Enable ignoring missing threads when -u option is defined. */
1785 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX;
1786
16ad2ffb 1787 err = -ENOMEM;
3e2be2da 1788 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1789 usage_with_options(record_usage, record_options);
69aad6f1 1790
ef149c25
AH
1791 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1792 if (err)
394c01ed 1793 goto out;
ef149c25 1794
6156681b
NK
1795 /*
1796 * We take all buildids when the file contains
1797 * AUX area tracing data because we do not decode the
1798 * trace because it would take too long.
1799 */
1800 if (rec->opts.full_auxtrace)
1801 rec->buildid_all = true;
1802
b4006796 1803 if (record_opts__config(&rec->opts)) {
39d17dac 1804 err = -EINVAL;
394c01ed 1805 goto out;
7e4ff9e3
MG
1806 }
1807
d20deb64 1808 err = __cmd_record(&record, argc, argv);
394c01ed 1809out:
45604710 1810 perf_evlist__delete(rec->evlist);
d65a458b 1811 symbol__exit();
ef149c25 1812 auxtrace_record__free(rec->itr);
39d17dac 1813 return err;
0e9b20b8 1814}
2dd6d8a1
AH
1815
1816static void snapshot_sig_handler(int sig __maybe_unused)
1817{
dc0c6127
JO
1818 struct record *rec = &record;
1819
5f9cf599
WN
1820 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1821 trigger_hit(&auxtrace_snapshot_trigger);
1822 auxtrace_record__snapshot_started = 1;
1823 if (auxtrace_record__snapshot_start(record.itr))
1824 trigger_error(&auxtrace_snapshot_trigger);
1825 }
3c1cb7e3 1826
dc0c6127 1827 if (switch_output_signal(rec))
3c1cb7e3 1828 trigger_hit(&switch_output_trigger);
2dd6d8a1 1829}
bfacbe3b
JO
1830
1831static void alarm_sig_handler(int sig __maybe_unused)
1832{
1833 struct record *rec = &record;
1834
1835 if (switch_output_time(rec))
1836 trigger_hit(&switch_output_trigger);
1837}