Merge remote-tracking branch 'tip/perf/urgent' into perf/core
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
abaff32a 2/*
bf9e1876
IM
3 * builtin-record.c
4 *
5 * Builtin record command: Record the profile of a workload
6 * (or a CPU, or a PID) into the perf.data output file - for
7 * later analysis via perf report.
abaff32a 8 */
16f762a2 9#include "builtin.h"
bf9e1876
IM
10
11#include "perf.h"
12
6122e4e4 13#include "util/build-id.h"
6eda5838 14#include "util/util.h"
4b6ab94e 15#include <subcmd/parse-options.h>
8ad8db37 16#include "util/parse-events.h"
41840d21 17#include "util/config.h"
6eda5838 18
8f651eae 19#include "util/callchain.h"
f14d5707 20#include "util/cgroup.h"
7c6a1c65 21#include "util/header.h"
66e274f3 22#include "util/event.h"
361c99a6 23#include "util/evlist.h"
69aad6f1 24#include "util/evsel.h"
8f28827a 25#include "util/debug.h"
5d8bb1ec 26#include "util/drv_configs.h"
94c744b6 27#include "util/session.h"
45694aa7 28#include "util/tool.h"
8d06367f 29#include "util/symbol.h"
a12b51c4 30#include "util/cpumap.h"
fd78260b 31#include "util/thread_map.h"
f5fc1412 32#include "util/data.h"
bcc84ec6 33#include "util/perf_regs.h"
ef149c25 34#include "util/auxtrace.h"
46bc29b9 35#include "util/tsc.h"
f00898f4 36#include "util/parse-branch-options.h"
bcc84ec6 37#include "util/parse-regs-options.h"
71dc2326 38#include "util/llvm-utils.h"
8690a2a7 39#include "util/bpf-loader.h"
5f9cf599 40#include "util/trigger.h"
a074865e 41#include "util/perf-hooks.h"
c5e4027e 42#include "util/time-utils.h"
58db1d6e 43#include "util/units.h"
d8871ea7 44#include "asm/bug.h"
7c6a1c65 45
a43783ae 46#include <errno.h>
fd20e811 47#include <inttypes.h>
67230479 48#include <locale.h>
4208735d 49#include <poll.h>
97124d5e 50#include <unistd.h>
de9ac07b 51#include <sched.h>
9607ad3a 52#include <signal.h>
a41794cd 53#include <sys/mman.h>
4208735d 54#include <sys/wait.h>
0693e680 55#include <linux/time64.h>
78da39fa 56
1b43b704 57struct switch_output {
dc0c6127 58 bool enabled;
1b43b704 59 bool signal;
dc0c6127 60 unsigned long size;
bfacbe3b 61 unsigned long time;
cb4e1ebb
JO
62 const char *str;
63 bool set;
1b43b704
JO
64};
65
8c6f45a7 66struct record {
45694aa7 67 struct perf_tool tool;
b4006796 68 struct record_opts opts;
d20deb64 69 u64 bytes_written;
8ceb41d7 70 struct perf_data data;
ef149c25 71 struct auxtrace_record *itr;
d20deb64
ACM
72 struct perf_evlist *evlist;
73 struct perf_session *session;
d20deb64 74 int realtime_prio;
d20deb64 75 bool no_buildid;
d2db9a98 76 bool no_buildid_set;
d20deb64 77 bool no_buildid_cache;
d2db9a98 78 bool no_buildid_cache_set;
6156681b 79 bool buildid_all;
ecfd7a9c 80 bool timestamp_filename;
68588baf 81 bool timestamp_boundary;
1b43b704 82 struct switch_output switch_output;
9f065194 83 unsigned long long samples;
0f82ebc4 84};
a21ca2ca 85
dc0c6127
JO
86static volatile int auxtrace_record__snapshot_started;
87static DEFINE_TRIGGER(auxtrace_snapshot_trigger);
88static DEFINE_TRIGGER(switch_output_trigger);
89
90static bool switch_output_signal(struct record *rec)
91{
92 return rec->switch_output.signal &&
93 trigger_is_ready(&switch_output_trigger);
94}
95
96static bool switch_output_size(struct record *rec)
97{
98 return rec->switch_output.size &&
99 trigger_is_ready(&switch_output_trigger) &&
100 (rec->bytes_written >= rec->switch_output.size);
101}
102
bfacbe3b
JO
103static bool switch_output_time(struct record *rec)
104{
105 return rec->switch_output.time &&
106 trigger_is_ready(&switch_output_trigger);
107}
108
ded2b8fe
JO
109static int record__write(struct record *rec, struct perf_mmap *map __maybe_unused,
110 void *bf, size_t size)
f5970550 111{
ded2b8fe
JO
112 struct perf_data_file *file = &rec->session->data->file;
113
114 if (perf_data_file__write(file, bf, size) < 0) {
50a9b868
JO
115 pr_err("failed to write perf data, error: %m\n");
116 return -1;
f5970550 117 }
8d3eca20 118
cf8b2e69 119 rec->bytes_written += size;
dc0c6127
JO
120
121 if (switch_output_size(rec))
122 trigger_hit(&switch_output_trigger);
123
8d3eca20 124 return 0;
f5970550
PZ
125}
126
45694aa7 127static int process_synthesized_event(struct perf_tool *tool,
d20deb64 128 union perf_event *event,
1d037ca1
IT
129 struct perf_sample *sample __maybe_unused,
130 struct machine *machine __maybe_unused)
234fbbf5 131{
8c6f45a7 132 struct record *rec = container_of(tool, struct record, tool);
ded2b8fe 133 return record__write(rec, NULL, event, event->header.size);
234fbbf5
ACM
134}
135
ded2b8fe 136static int record__pushfn(struct perf_mmap *map, void *to, void *bf, size_t size)
d37f1586
ACM
137{
138 struct record *rec = to;
139
140 rec->samples++;
ded2b8fe 141 return record__write(rec, map, bf, size);
d37f1586
ACM
142}
143
2dd6d8a1
AH
144static volatile int done;
145static volatile int signr = -1;
146static volatile int child_finished;
c0bdc1c4 147
2dd6d8a1
AH
148static void sig_handler(int sig)
149{
150 if (sig == SIGCHLD)
151 child_finished = 1;
152 else
153 signr = sig;
154
155 done = 1;
156}
157
a074865e
WN
158static void sigsegv_handler(int sig)
159{
160 perf_hooks__recover();
161 sighandler_dump_stack(sig);
162}
163
2dd6d8a1
AH
164static void record__sig_exit(void)
165{
166 if (signr == -1)
167 return;
168
169 signal(signr, SIG_DFL);
170 raise(signr);
171}
172
e31f0d01
AH
173#ifdef HAVE_AUXTRACE_SUPPORT
174
ef149c25 175static int record__process_auxtrace(struct perf_tool *tool,
ded2b8fe 176 struct perf_mmap *map,
ef149c25
AH
177 union perf_event *event, void *data1,
178 size_t len1, void *data2, size_t len2)
179{
180 struct record *rec = container_of(tool, struct record, tool);
8ceb41d7 181 struct perf_data *data = &rec->data;
ef149c25
AH
182 size_t padding;
183 u8 pad[8] = {0};
184
8ceb41d7 185 if (!perf_data__is_pipe(data)) {
99fa2984 186 off_t file_offset;
8ceb41d7 187 int fd = perf_data__fd(data);
99fa2984
AH
188 int err;
189
190 file_offset = lseek(fd, 0, SEEK_CUR);
191 if (file_offset == -1)
192 return -1;
193 err = auxtrace_index__auxtrace_event(&rec->session->auxtrace_index,
194 event, file_offset);
195 if (err)
196 return err;
197 }
198
ef149c25
AH
199 /* event.auxtrace.size includes padding, see __auxtrace_mmap__read() */
200 padding = (len1 + len2) & 7;
201 if (padding)
202 padding = 8 - padding;
203
ded2b8fe
JO
204 record__write(rec, map, event, event->header.size);
205 record__write(rec, map, data1, len1);
ef149c25 206 if (len2)
ded2b8fe
JO
207 record__write(rec, map, data2, len2);
208 record__write(rec, map, &pad, padding);
ef149c25
AH
209
210 return 0;
211}
212
213static int record__auxtrace_mmap_read(struct record *rec,
e035f4ca 214 struct perf_mmap *map)
ef149c25
AH
215{
216 int ret;
217
e035f4ca 218 ret = auxtrace_mmap__read(map, rec->itr, &rec->tool,
ef149c25
AH
219 record__process_auxtrace);
220 if (ret < 0)
221 return ret;
222
223 if (ret)
224 rec->samples++;
225
226 return 0;
227}
228
2dd6d8a1 229static int record__auxtrace_mmap_read_snapshot(struct record *rec,
e035f4ca 230 struct perf_mmap *map)
2dd6d8a1
AH
231{
232 int ret;
233
e035f4ca 234 ret = auxtrace_mmap__read_snapshot(map, rec->itr, &rec->tool,
2dd6d8a1
AH
235 record__process_auxtrace,
236 rec->opts.auxtrace_snapshot_size);
237 if (ret < 0)
238 return ret;
239
240 if (ret)
241 rec->samples++;
242
243 return 0;
244}
245
246static int record__auxtrace_read_snapshot_all(struct record *rec)
247{
248 int i;
249 int rc = 0;
250
251 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
e035f4ca 252 struct perf_mmap *map = &rec->evlist->mmap[i];
2dd6d8a1 253
e035f4ca 254 if (!map->auxtrace_mmap.base)
2dd6d8a1
AH
255 continue;
256
e035f4ca 257 if (record__auxtrace_mmap_read_snapshot(rec, map) != 0) {
2dd6d8a1
AH
258 rc = -1;
259 goto out;
260 }
261 }
262out:
263 return rc;
264}
265
266static void record__read_auxtrace_snapshot(struct record *rec)
267{
268 pr_debug("Recording AUX area tracing snapshot\n");
269 if (record__auxtrace_read_snapshot_all(rec) < 0) {
5f9cf599 270 trigger_error(&auxtrace_snapshot_trigger);
2dd6d8a1 271 } else {
5f9cf599
WN
272 if (auxtrace_record__snapshot_finish(rec->itr))
273 trigger_error(&auxtrace_snapshot_trigger);
274 else
275 trigger_ready(&auxtrace_snapshot_trigger);
2dd6d8a1
AH
276 }
277}
278
4b5ea3bd
AH
279static int record__auxtrace_init(struct record *rec)
280{
281 int err;
282
283 if (!rec->itr) {
284 rec->itr = auxtrace_record__init(rec->evlist, &err);
285 if (err)
286 return err;
287 }
288
289 err = auxtrace_parse_snapshot_options(rec->itr, &rec->opts,
290 rec->opts.auxtrace_snapshot_opts);
291 if (err)
292 return err;
293
294 return auxtrace_parse_filters(rec->evlist);
295}
296
e31f0d01
AH
297#else
298
299static inline
300int record__auxtrace_mmap_read(struct record *rec __maybe_unused,
e035f4ca 301 struct perf_mmap *map __maybe_unused)
e31f0d01
AH
302{
303 return 0;
304}
305
2dd6d8a1
AH
306static inline
307void record__read_auxtrace_snapshot(struct record *rec __maybe_unused)
de9ac07b 308{
f7b7c26e
PZ
309}
310
2dd6d8a1
AH
311static inline
312int auxtrace_record__snapshot_start(struct auxtrace_record *itr __maybe_unused)
f7b7c26e 313{
2dd6d8a1 314 return 0;
de9ac07b
PZ
315}
316
4b5ea3bd
AH
317static int record__auxtrace_init(struct record *rec __maybe_unused)
318{
319 return 0;
320}
321
2dd6d8a1
AH
322#endif
323
cda57a8c
WN
324static int record__mmap_evlist(struct record *rec,
325 struct perf_evlist *evlist)
326{
327 struct record_opts *opts = &rec->opts;
328 char msg[512];
329
7a276ff6 330 if (perf_evlist__mmap_ex(evlist, opts->mmap_pages,
cda57a8c
WN
331 opts->auxtrace_mmap_pages,
332 opts->auxtrace_snapshot_mode) < 0) {
333 if (errno == EPERM) {
334 pr_err("Permission error mapping pages.\n"
335 "Consider increasing "
336 "/proc/sys/kernel/perf_event_mlock_kb,\n"
337 "or try again with a smaller value of -m/--mmap_pages.\n"
338 "(current value: %u,%u)\n",
339 opts->mmap_pages, opts->auxtrace_mmap_pages);
340 return -errno;
341 } else {
342 pr_err("failed to mmap with %d (%s)\n", errno,
c8b5f2c9 343 str_error_r(errno, msg, sizeof(msg)));
cda57a8c
WN
344 if (errno)
345 return -errno;
346 else
347 return -EINVAL;
348 }
349 }
350 return 0;
351}
352
353static int record__mmap(struct record *rec)
354{
355 return record__mmap_evlist(rec, rec->evlist);
356}
357
8c6f45a7 358static int record__open(struct record *rec)
dd7927f4 359{
d6195a6a 360 char msg[BUFSIZ];
6a4bb04c 361 struct perf_evsel *pos;
d20deb64
ACM
362 struct perf_evlist *evlist = rec->evlist;
363 struct perf_session *session = rec->session;
b4006796 364 struct record_opts *opts = &rec->opts;
5d8bb1ec 365 struct perf_evsel_config_term *err_term;
8d3eca20 366 int rc = 0;
dd7927f4 367
d3dbf43c
ACM
368 /*
369 * For initial_delay we need to add a dummy event so that we can track
370 * PERF_RECORD_MMAP while we wait for the initial delay to enable the
371 * real events, the ones asked by the user.
372 */
373 if (opts->initial_delay) {
374 if (perf_evlist__add_dummy(evlist))
375 return -ENOMEM;
376
377 pos = perf_evlist__first(evlist);
378 pos->tracking = 0;
379 pos = perf_evlist__last(evlist);
380 pos->tracking = 1;
381 pos->attr.enable_on_exec = 1;
382 }
383
e68ae9cf 384 perf_evlist__config(evlist, opts, &callchain_param);
cac21425 385
e5cadb93 386 evlist__for_each_entry(evlist, pos) {
dd7927f4 387try_again:
d988d5ee 388 if (perf_evsel__open(pos, pos->cpus, pos->threads) < 0) {
56e52e85 389 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
bb963e16 390 if (verbose > 0)
c0a54341 391 ui__warning("%s\n", msg);
d6d901c2
ZY
392 goto try_again;
393 }
ca6a4258 394
56e52e85
ACM
395 rc = -errno;
396 perf_evsel__open_strerror(pos, &opts->target,
397 errno, msg, sizeof(msg));
398 ui__error("%s\n", msg);
8d3eca20 399 goto out;
c171b552 400 }
bfd8f72c
AK
401
402 pos->supported = true;
c171b552 403 }
a43d3f08 404
23d4aad4 405 if (perf_evlist__apply_filters(evlist, &pos)) {
62d94b00 406 pr_err("failed to set filter \"%s\" on event %s with %d (%s)\n",
23d4aad4 407 pos->filter, perf_evsel__name(pos), errno,
c8b5f2c9 408 str_error_r(errno, msg, sizeof(msg)));
8d3eca20 409 rc = -1;
5d8bb1ec
MP
410 goto out;
411 }
412
413 if (perf_evlist__apply_drv_configs(evlist, &pos, &err_term)) {
62d94b00 414 pr_err("failed to set config \"%s\" on event %s with %d (%s)\n",
5d8bb1ec
MP
415 err_term->val.drv_cfg, perf_evsel__name(pos), errno,
416 str_error_r(errno, msg, sizeof(msg)));
417 rc = -1;
8d3eca20 418 goto out;
0a102479
FW
419 }
420
cda57a8c
WN
421 rc = record__mmap(rec);
422 if (rc)
8d3eca20 423 goto out;
0a27d7f9 424
563aecb2 425 session->evlist = evlist;
7b56cce2 426 perf_session__set_id_hdr_size(session);
8d3eca20
DA
427out:
428 return rc;
16c8a109
PZ
429}
430
e3d59112
NK
431static int process_sample_event(struct perf_tool *tool,
432 union perf_event *event,
433 struct perf_sample *sample,
434 struct perf_evsel *evsel,
435 struct machine *machine)
436{
437 struct record *rec = container_of(tool, struct record, tool);
438
68588baf
JY
439 if (rec->evlist->first_sample_time == 0)
440 rec->evlist->first_sample_time = sample->time;
441
442 rec->evlist->last_sample_time = sample->time;
e3d59112 443
68588baf
JY
444 if (rec->buildid_all)
445 return 0;
446
447 rec->samples++;
e3d59112
NK
448 return build_id__mark_dso_hit(tool, event, sample, evsel, machine);
449}
450
8c6f45a7 451static int process_buildids(struct record *rec)
6122e4e4 452{
8ceb41d7 453 struct perf_data *data = &rec->data;
f5fc1412 454 struct perf_session *session = rec->session;
6122e4e4 455
8ceb41d7 456 if (data->size == 0)
9f591fd7
ACM
457 return 0;
458
00dc8657
NK
459 /*
460 * During this process, it'll load kernel map and replace the
461 * dso->long_name to a real pathname it found. In this case
462 * we prefer the vmlinux path like
463 * /lib/modules/3.16.4/build/vmlinux
464 *
465 * rather than build-id path (in debug directory).
466 * $HOME/.debug/.build-id/f0/6e17aa50adf4d00b88925e03775de107611551
467 */
468 symbol_conf.ignore_vmlinux_buildid = true;
469
6156681b
NK
470 /*
471 * If --buildid-all is given, it marks all DSO regardless of hits,
68588baf
JY
472 * so no need to process samples. But if timestamp_boundary is enabled,
473 * it still needs to walk on all samples to get the timestamps of
474 * first/last samples.
6156681b 475 */
68588baf 476 if (rec->buildid_all && !rec->timestamp_boundary)
6156681b
NK
477 rec->tool.sample = NULL;
478
b7b61cbe 479 return perf_session__process_events(session);
6122e4e4
ACM
480}
481
8115d60c 482static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
483{
484 int err;
45694aa7 485 struct perf_tool *tool = data;
a1645ce1
ZY
486 /*
487 *As for guest kernel when processing subcommand record&report,
488 *we arrange module mmap prior to guest kernel mmap and trigger
489 *a preload dso because default guest module symbols are loaded
490 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
491 *method is used to avoid symbol missing when the first addr is
492 *in module instead of in guest kernel.
493 */
45694aa7 494 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 495 machine);
a1645ce1
ZY
496 if (err < 0)
497 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 498 " relocation symbol.\n", machine->pid);
a1645ce1 499
a1645ce1
ZY
500 /*
501 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
502 * have no _text sometimes.
503 */
45694aa7 504 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 505 machine);
a1645ce1
ZY
506 if (err < 0)
507 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 508 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
509}
510
98402807
FW
511static struct perf_event_header finished_round_event = {
512 .size = sizeof(struct perf_event_header),
513 .type = PERF_RECORD_FINISHED_ROUND,
514};
515
a4ea0ec4 516static int record__mmap_read_evlist(struct record *rec, struct perf_evlist *evlist,
0b72d69a 517 bool overwrite)
98402807 518{
dcabb507 519 u64 bytes_written = rec->bytes_written;
0e2e63dd 520 int i;
8d3eca20 521 int rc = 0;
a4ea0ec4 522 struct perf_mmap *maps;
98402807 523
cb21686b
WN
524 if (!evlist)
525 return 0;
ef149c25 526
0b72d69a 527 maps = overwrite ? evlist->overwrite_mmap : evlist->mmap;
a4ea0ec4
WN
528 if (!maps)
529 return 0;
530
0b72d69a 531 if (overwrite && evlist->bkw_mmap_state != BKW_MMAP_DATA_PENDING)
54cc54de
WN
532 return 0;
533
cb21686b 534 for (i = 0; i < evlist->nr_mmaps; i++) {
e035f4ca 535 struct perf_mmap *map = &maps[i];
cb21686b 536
e035f4ca
JO
537 if (map->base) {
538 if (perf_mmap__push(map, rec, record__pushfn) != 0) {
8d3eca20
DA
539 rc = -1;
540 goto out;
541 }
542 }
ef149c25 543
e035f4ca
JO
544 if (map->auxtrace_mmap.base && !rec->opts.auxtrace_snapshot_mode &&
545 record__auxtrace_mmap_read(rec, map) != 0) {
ef149c25
AH
546 rc = -1;
547 goto out;
548 }
98402807
FW
549 }
550
dcabb507
JO
551 /*
552 * Mark the round finished in case we wrote
553 * at least one event.
554 */
555 if (bytes_written != rec->bytes_written)
ded2b8fe 556 rc = record__write(rec, NULL, &finished_round_event, sizeof(finished_round_event));
8d3eca20 557
0b72d69a 558 if (overwrite)
54cc54de 559 perf_evlist__toggle_bkw_mmap(evlist, BKW_MMAP_EMPTY);
8d3eca20
DA
560out:
561 return rc;
98402807
FW
562}
563
cb21686b
WN
564static int record__mmap_read_all(struct record *rec)
565{
566 int err;
567
a4ea0ec4 568 err = record__mmap_read_evlist(rec, rec->evlist, false);
cb21686b
WN
569 if (err)
570 return err;
571
05737464 572 return record__mmap_read_evlist(rec, rec->evlist, true);
cb21686b
WN
573}
574
8c6f45a7 575static void record__init_features(struct record *rec)
57706abc 576{
57706abc
DA
577 struct perf_session *session = rec->session;
578 int feat;
579
580 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
581 perf_header__set_feat(&session->header, feat);
582
583 if (rec->no_buildid)
584 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
585
3e2be2da 586 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
587 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
588
589 if (!rec->opts.branch_stack)
590 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
ef149c25
AH
591
592 if (!rec->opts.full_auxtrace)
593 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
ffa517ad
JO
594
595 perf_header__clear_feat(&session->header, HEADER_STAT);
57706abc
DA
596}
597
e1ab48ba
WN
598static void
599record__finish_output(struct record *rec)
600{
8ceb41d7
JO
601 struct perf_data *data = &rec->data;
602 int fd = perf_data__fd(data);
e1ab48ba 603
8ceb41d7 604 if (data->is_pipe)
e1ab48ba
WN
605 return;
606
607 rec->session->header.data_size += rec->bytes_written;
8ceb41d7 608 data->size = lseek(perf_data__fd(data), 0, SEEK_CUR);
e1ab48ba
WN
609
610 if (!rec->no_buildid) {
611 process_buildids(rec);
612
613 if (rec->buildid_all)
614 dsos__hit_all(rec->session);
615 }
616 perf_session__write_header(rec->session, rec->evlist, fd, true);
617
618 return;
619}
620
4ea648ae 621static int record__synthesize_workload(struct record *rec, bool tail)
be7b0c9e 622{
9d6aae72
ACM
623 int err;
624 struct thread_map *thread_map;
be7b0c9e 625
4ea648ae
WN
626 if (rec->opts.tail_synthesize != tail)
627 return 0;
628
9d6aae72
ACM
629 thread_map = thread_map__new_by_tid(rec->evlist->workload.pid);
630 if (thread_map == NULL)
631 return -1;
632
633 err = perf_event__synthesize_thread_map(&rec->tool, thread_map,
be7b0c9e
WN
634 process_synthesized_event,
635 &rec->session->machines.host,
636 rec->opts.sample_address,
637 rec->opts.proc_map_timeout);
9d6aae72
ACM
638 thread_map__put(thread_map);
639 return err;
be7b0c9e
WN
640}
641
4ea648ae 642static int record__synthesize(struct record *rec, bool tail);
3c1cb7e3 643
ecfd7a9c
WN
644static int
645record__switch_output(struct record *rec, bool at_exit)
646{
8ceb41d7 647 struct perf_data *data = &rec->data;
ecfd7a9c
WN
648 int fd, err;
649
650 /* Same Size: "2015122520103046"*/
651 char timestamp[] = "InvalidTimestamp";
652
4ea648ae
WN
653 record__synthesize(rec, true);
654 if (target__none(&rec->opts.target))
655 record__synthesize_workload(rec, true);
656
ecfd7a9c
WN
657 rec->samples = 0;
658 record__finish_output(rec);
659 err = fetch_current_timestamp(timestamp, sizeof(timestamp));
660 if (err) {
661 pr_err("Failed to get current timestamp\n");
662 return -EINVAL;
663 }
664
8ceb41d7 665 fd = perf_data__switch(data, timestamp,
ecfd7a9c
WN
666 rec->session->header.data_offset,
667 at_exit);
668 if (fd >= 0 && !at_exit) {
669 rec->bytes_written = 0;
670 rec->session->header.data_size = 0;
671 }
672
673 if (!quiet)
674 fprintf(stderr, "[ perf record: Dump %s.%s ]\n",
eae8ad80 675 data->file.path, timestamp);
3c1cb7e3
WN
676
677 /* Output tracking events */
be7b0c9e 678 if (!at_exit) {
4ea648ae 679 record__synthesize(rec, false);
3c1cb7e3 680
be7b0c9e
WN
681 /*
682 * In 'perf record --switch-output' without -a,
683 * record__synthesize() in record__switch_output() won't
684 * generate tracking events because there's no thread_map
685 * in evlist. Which causes newly created perf.data doesn't
686 * contain map and comm information.
687 * Create a fake thread_map and directly call
688 * perf_event__synthesize_thread_map() for those events.
689 */
690 if (target__none(&rec->opts.target))
4ea648ae 691 record__synthesize_workload(rec, false);
be7b0c9e 692 }
ecfd7a9c
WN
693 return fd;
694}
695
f33cbe72
ACM
696static volatile int workload_exec_errno;
697
698/*
699 * perf_evlist__prepare_workload will send a SIGUSR1
700 * if the fork fails, since we asked by setting its
701 * want_signal to true.
702 */
45604710
NK
703static void workload_exec_failed_signal(int signo __maybe_unused,
704 siginfo_t *info,
f33cbe72
ACM
705 void *ucontext __maybe_unused)
706{
707 workload_exec_errno = info->si_value.sival_int;
708 done = 1;
f33cbe72
ACM
709 child_finished = 1;
710}
711
2dd6d8a1 712static void snapshot_sig_handler(int sig);
bfacbe3b 713static void alarm_sig_handler(int sig);
2dd6d8a1 714
46bc29b9
AH
715int __weak
716perf_event__synth_time_conv(const struct perf_event_mmap_page *pc __maybe_unused,
717 struct perf_tool *tool __maybe_unused,
718 perf_event__handler_t process __maybe_unused,
719 struct machine *machine __maybe_unused)
720{
721 return 0;
722}
723
ee667f94
WN
724static const struct perf_event_mmap_page *
725perf_evlist__pick_pc(struct perf_evlist *evlist)
726{
b2cb615d
WN
727 if (evlist) {
728 if (evlist->mmap && evlist->mmap[0].base)
729 return evlist->mmap[0].base;
0b72d69a
WN
730 if (evlist->overwrite_mmap && evlist->overwrite_mmap[0].base)
731 return evlist->overwrite_mmap[0].base;
b2cb615d 732 }
ee667f94
WN
733 return NULL;
734}
735
c45628b0
WN
736static const struct perf_event_mmap_page *record__pick_pc(struct record *rec)
737{
ee667f94
WN
738 const struct perf_event_mmap_page *pc;
739
740 pc = perf_evlist__pick_pc(rec->evlist);
741 if (pc)
742 return pc;
c45628b0
WN
743 return NULL;
744}
745
4ea648ae 746static int record__synthesize(struct record *rec, bool tail)
c45c86eb
WN
747{
748 struct perf_session *session = rec->session;
749 struct machine *machine = &session->machines.host;
8ceb41d7 750 struct perf_data *data = &rec->data;
c45c86eb
WN
751 struct record_opts *opts = &rec->opts;
752 struct perf_tool *tool = &rec->tool;
8ceb41d7 753 int fd = perf_data__fd(data);
c45c86eb
WN
754 int err = 0;
755
4ea648ae
WN
756 if (rec->opts.tail_synthesize != tail)
757 return 0;
758
8ceb41d7 759 if (data->is_pipe) {
a2015516
JO
760 /*
761 * We need to synthesize events first, because some
762 * features works on top of them (on report side).
763 */
318ec184 764 err = perf_event__synthesize_attrs(tool, rec->evlist,
c45c86eb
WN
765 process_synthesized_event);
766 if (err < 0) {
767 pr_err("Couldn't synthesize attrs.\n");
768 goto out;
769 }
770
a2015516
JO
771 err = perf_event__synthesize_features(tool, session, rec->evlist,
772 process_synthesized_event);
773 if (err < 0) {
774 pr_err("Couldn't synthesize features.\n");
775 return err;
776 }
777
c45c86eb
WN
778 if (have_tracepoints(&rec->evlist->entries)) {
779 /*
780 * FIXME err <= 0 here actually means that
781 * there were no tracepoints so its not really
782 * an error, just that we don't need to
783 * synthesize anything. We really have to
784 * return this more properly and also
785 * propagate errors that now are calling die()
786 */
787 err = perf_event__synthesize_tracing_data(tool, fd, rec->evlist,
788 process_synthesized_event);
789 if (err <= 0) {
790 pr_err("Couldn't record tracing data.\n");
791 goto out;
792 }
793 rec->bytes_written += err;
794 }
795 }
796
c45628b0 797 err = perf_event__synth_time_conv(record__pick_pc(rec), tool,
46bc29b9
AH
798 process_synthesized_event, machine);
799 if (err)
800 goto out;
801
c45c86eb
WN
802 if (rec->opts.full_auxtrace) {
803 err = perf_event__synthesize_auxtrace_info(rec->itr, tool,
804 session, process_synthesized_event);
805 if (err)
806 goto out;
807 }
808
6c443954
ACM
809 if (!perf_evlist__exclude_kernel(rec->evlist)) {
810 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
811 machine);
812 WARN_ONCE(err < 0, "Couldn't record kernel reference relocation symbol\n"
813 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
814 "Check /proc/kallsyms permission or run as root.\n");
815
816 err = perf_event__synthesize_modules(tool, process_synthesized_event,
817 machine);
818 WARN_ONCE(err < 0, "Couldn't record kernel module information.\n"
819 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
820 "Check /proc/modules permission or run as root.\n");
821 }
c45c86eb
WN
822
823 if (perf_guest) {
824 machines__process_guests(&session->machines,
825 perf_event__synthesize_guest_os, tool);
826 }
827
bfd8f72c
AK
828 err = perf_event__synthesize_extra_attr(&rec->tool,
829 rec->evlist,
830 process_synthesized_event,
831 data->is_pipe);
832 if (err)
833 goto out;
834
373565d2
AK
835 err = perf_event__synthesize_thread_map2(&rec->tool, rec->evlist->threads,
836 process_synthesized_event,
837 NULL);
838 if (err < 0) {
839 pr_err("Couldn't synthesize thread map.\n");
840 return err;
841 }
842
843 err = perf_event__synthesize_cpu_map(&rec->tool, rec->evlist->cpus,
844 process_synthesized_event, NULL);
845 if (err < 0) {
846 pr_err("Couldn't synthesize cpu map.\n");
847 return err;
848 }
849
c45c86eb
WN
850 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
851 process_synthesized_event, opts->sample_address,
340b47f5 852 opts->proc_map_timeout, 1);
c45c86eb
WN
853out:
854 return err;
855}
856
8c6f45a7 857static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 858{
57706abc 859 int err;
45604710 860 int status = 0;
8b412664 861 unsigned long waking = 0;
46be604b 862 const bool forks = argc > 0;
45694aa7 863 struct perf_tool *tool = &rec->tool;
b4006796 864 struct record_opts *opts = &rec->opts;
8ceb41d7 865 struct perf_data *data = &rec->data;
d20deb64 866 struct perf_session *session;
6dcf45ef 867 bool disabled = false, draining = false;
42aa276f 868 int fd;
de9ac07b 869
45604710 870 atexit(record__sig_exit);
f5970550
PZ
871 signal(SIGCHLD, sig_handler);
872 signal(SIGINT, sig_handler);
804f7ac7 873 signal(SIGTERM, sig_handler);
a074865e 874 signal(SIGSEGV, sigsegv_handler);
c0bdc1c4 875
f3b3614a
HB
876 if (rec->opts.record_namespaces)
877 tool->namespace_events = true;
878
dc0c6127 879 if (rec->opts.auxtrace_snapshot_mode || rec->switch_output.enabled) {
2dd6d8a1 880 signal(SIGUSR2, snapshot_sig_handler);
3c1cb7e3
WN
881 if (rec->opts.auxtrace_snapshot_mode)
882 trigger_on(&auxtrace_snapshot_trigger);
dc0c6127 883 if (rec->switch_output.enabled)
3c1cb7e3 884 trigger_on(&switch_output_trigger);
c0bdc1c4 885 } else {
2dd6d8a1 886 signal(SIGUSR2, SIG_IGN);
c0bdc1c4 887 }
f5970550 888
8ceb41d7 889 session = perf_session__new(data, false, tool);
94c744b6 890 if (session == NULL) {
ffa91880 891 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
892 return -1;
893 }
894
8ceb41d7 895 fd = perf_data__fd(data);
d20deb64
ACM
896 rec->session = session;
897
8c6f45a7 898 record__init_features(rec);
330aa675 899
d4db3f16 900 if (forks) {
3e2be2da 901 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
8ceb41d7 902 argv, data->is_pipe,
735f7e0b 903 workload_exec_failed_signal);
35b9d88e
ACM
904 if (err < 0) {
905 pr_err("Couldn't run the workload!\n");
45604710 906 status = err;
35b9d88e 907 goto out_delete_session;
856e9660 908 }
856e9660
PZ
909 }
910
ad46e48c
JO
911 /*
912 * If we have just single event and are sending data
913 * through pipe, we need to force the ids allocation,
914 * because we synthesize event name through the pipe
915 * and need the id for that.
916 */
917 if (data->is_pipe && rec->evlist->nr_entries == 1)
918 rec->opts.sample_id = true;
919
8c6f45a7 920 if (record__open(rec) != 0) {
8d3eca20 921 err = -1;
45604710 922 goto out_child;
8d3eca20 923 }
de9ac07b 924
8690a2a7
WN
925 err = bpf__apply_obj_config();
926 if (err) {
927 char errbuf[BUFSIZ];
928
929 bpf__strerror_apply_obj_config(err, errbuf, sizeof(errbuf));
930 pr_err("ERROR: Apply config to BPF failed: %s\n",
931 errbuf);
932 goto out_child;
933 }
934
cca8482c
AH
935 /*
936 * Normally perf_session__new would do this, but it doesn't have the
937 * evlist.
938 */
939 if (rec->tool.ordered_events && !perf_evlist__sample_id_all(rec->evlist)) {
940 pr_warning("WARNING: No sample_id_all support, falling back to unordered processing\n");
941 rec->tool.ordered_events = false;
942 }
943
3e2be2da 944 if (!rec->evlist->nr_groups)
a8bb559b
NK
945 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
946
8ceb41d7 947 if (data->is_pipe) {
42aa276f 948 err = perf_header__write_pipe(fd);
529870e3 949 if (err < 0)
45604710 950 goto out_child;
563aecb2 951 } else {
42aa276f 952 err = perf_session__write_header(session, rec->evlist, fd, false);
d5eed904 953 if (err < 0)
45604710 954 goto out_child;
56b03f3c
ACM
955 }
956
d3665498 957 if (!rec->no_buildid
e20960c0 958 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 959 pr_err("Couldn't generate buildids. "
e20960c0 960 "Use --no-buildid to profile anyway.\n");
8d3eca20 961 err = -1;
45604710 962 goto out_child;
e20960c0
RR
963 }
964
4ea648ae 965 err = record__synthesize(rec, false);
c45c86eb 966 if (err < 0)
45604710 967 goto out_child;
8d3eca20 968
d20deb64 969 if (rec->realtime_prio) {
de9ac07b
PZ
970 struct sched_param param;
971
d20deb64 972 param.sched_priority = rec->realtime_prio;
de9ac07b 973 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 974 pr_err("Could not set realtime priority.\n");
8d3eca20 975 err = -1;
45604710 976 goto out_child;
de9ac07b
PZ
977 }
978 }
979
774cb499
JO
980 /*
981 * When perf is starting the traced process, all the events
982 * (apart from group members) have enable_on_exec=1 set,
983 * so don't spoil it by prematurely enabling them.
984 */
6619a53e 985 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 986 perf_evlist__enable(rec->evlist);
764e16a3 987
856e9660
PZ
988 /*
989 * Let the child rip
990 */
e803cf97 991 if (forks) {
20a8a3cf 992 struct machine *machine = &session->machines.host;
e5bed564 993 union perf_event *event;
e907caf3 994 pid_t tgid;
e5bed564
NK
995
996 event = malloc(sizeof(event->comm) + machine->id_hdr_size);
997 if (event == NULL) {
998 err = -ENOMEM;
999 goto out_child;
1000 }
1001
e803cf97
NK
1002 /*
1003 * Some H/W events are generated before COMM event
1004 * which is emitted during exec(), so perf script
1005 * cannot see a correct process name for those events.
1006 * Synthesize COMM event to prevent it.
1007 */
e907caf3
HB
1008 tgid = perf_event__synthesize_comm(tool, event,
1009 rec->evlist->workload.pid,
1010 process_synthesized_event,
1011 machine);
1012 free(event);
1013
1014 if (tgid == -1)
1015 goto out_child;
1016
1017 event = malloc(sizeof(event->namespaces) +
1018 (NR_NAMESPACES * sizeof(struct perf_ns_link_info)) +
1019 machine->id_hdr_size);
1020 if (event == NULL) {
1021 err = -ENOMEM;
1022 goto out_child;
1023 }
1024
1025 /*
1026 * Synthesize NAMESPACES event for the command specified.
1027 */
1028 perf_event__synthesize_namespaces(tool, event,
1029 rec->evlist->workload.pid,
1030 tgid, process_synthesized_event,
1031 machine);
e5bed564 1032 free(event);
e803cf97 1033
3e2be2da 1034 perf_evlist__start_workload(rec->evlist);
e803cf97 1035 }
856e9660 1036
6619a53e 1037 if (opts->initial_delay) {
0693e680 1038 usleep(opts->initial_delay * USEC_PER_MSEC);
6619a53e
AK
1039 perf_evlist__enable(rec->evlist);
1040 }
1041
5f9cf599 1042 trigger_ready(&auxtrace_snapshot_trigger);
3c1cb7e3 1043 trigger_ready(&switch_output_trigger);
a074865e 1044 perf_hooks__invoke_record_start();
649c48a9 1045 for (;;) {
9f065194 1046 unsigned long long hits = rec->samples;
de9ac07b 1047
05737464
WN
1048 /*
1049 * rec->evlist->bkw_mmap_state is possible to be
1050 * BKW_MMAP_EMPTY here: when done == true and
1051 * hits != rec->samples in previous round.
1052 *
1053 * perf_evlist__toggle_bkw_mmap ensure we never
1054 * convert BKW_MMAP_EMPTY to BKW_MMAP_DATA_PENDING.
1055 */
1056 if (trigger_is_hit(&switch_output_trigger) || done || draining)
1057 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_DATA_PENDING);
1058
8c6f45a7 1059 if (record__mmap_read_all(rec) < 0) {
5f9cf599 1060 trigger_error(&auxtrace_snapshot_trigger);
3c1cb7e3 1061 trigger_error(&switch_output_trigger);
8d3eca20 1062 err = -1;
45604710 1063 goto out_child;
8d3eca20 1064 }
de9ac07b 1065
2dd6d8a1
AH
1066 if (auxtrace_record__snapshot_started) {
1067 auxtrace_record__snapshot_started = 0;
5f9cf599 1068 if (!trigger_is_error(&auxtrace_snapshot_trigger))
2dd6d8a1 1069 record__read_auxtrace_snapshot(rec);
5f9cf599 1070 if (trigger_is_error(&auxtrace_snapshot_trigger)) {
2dd6d8a1
AH
1071 pr_err("AUX area tracing snapshot failed\n");
1072 err = -1;
1073 goto out_child;
1074 }
1075 }
1076
3c1cb7e3 1077 if (trigger_is_hit(&switch_output_trigger)) {
05737464
WN
1078 /*
1079 * If switch_output_trigger is hit, the data in
1080 * overwritable ring buffer should have been collected,
1081 * so bkw_mmap_state should be set to BKW_MMAP_EMPTY.
1082 *
1083 * If SIGUSR2 raise after or during record__mmap_read_all(),
1084 * record__mmap_read_all() didn't collect data from
1085 * overwritable ring buffer. Read again.
1086 */
1087 if (rec->evlist->bkw_mmap_state == BKW_MMAP_RUNNING)
1088 continue;
3c1cb7e3
WN
1089 trigger_ready(&switch_output_trigger);
1090
05737464
WN
1091 /*
1092 * Reenable events in overwrite ring buffer after
1093 * record__mmap_read_all(): we should have collected
1094 * data from it.
1095 */
1096 perf_evlist__toggle_bkw_mmap(rec->evlist, BKW_MMAP_RUNNING);
1097
3c1cb7e3
WN
1098 if (!quiet)
1099 fprintf(stderr, "[ perf record: dump data: Woken up %ld times ]\n",
1100 waking);
1101 waking = 0;
1102 fd = record__switch_output(rec, false);
1103 if (fd < 0) {
1104 pr_err("Failed to switch to new file\n");
1105 trigger_error(&switch_output_trigger);
1106 err = fd;
1107 goto out_child;
1108 }
bfacbe3b
JO
1109
1110 /* re-arm the alarm */
1111 if (rec->switch_output.time)
1112 alarm(rec->switch_output.time);
3c1cb7e3
WN
1113 }
1114
d20deb64 1115 if (hits == rec->samples) {
6dcf45ef 1116 if (done || draining)
649c48a9 1117 break;
f66a889d 1118 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
1119 /*
1120 * Propagate error, only if there's any. Ignore positive
1121 * number of returned events and interrupt error.
1122 */
1123 if (err > 0 || (err < 0 && errno == EINTR))
45604710 1124 err = 0;
8b412664 1125 waking++;
6dcf45ef
ACM
1126
1127 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
1128 draining = true;
8b412664
PZ
1129 }
1130
774cb499
JO
1131 /*
1132 * When perf is starting the traced process, at the end events
1133 * die with the process and we wait for that. Thus no need to
1134 * disable events in this case.
1135 */
602ad878 1136 if (done && !disabled && !target__none(&opts->target)) {
5f9cf599 1137 trigger_off(&auxtrace_snapshot_trigger);
3e2be2da 1138 perf_evlist__disable(rec->evlist);
2711926a
JO
1139 disabled = true;
1140 }
de9ac07b 1141 }
5f9cf599 1142 trigger_off(&auxtrace_snapshot_trigger);
3c1cb7e3 1143 trigger_off(&switch_output_trigger);
de9ac07b 1144
f33cbe72 1145 if (forks && workload_exec_errno) {
35550da3 1146 char msg[STRERR_BUFSIZE];
c8b5f2c9 1147 const char *emsg = str_error_r(workload_exec_errno, msg, sizeof(msg));
f33cbe72
ACM
1148 pr_err("Workload failed: %s\n", emsg);
1149 err = -1;
45604710 1150 goto out_child;
f33cbe72
ACM
1151 }
1152
e3d59112 1153 if (!quiet)
45604710 1154 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 1155
4ea648ae
WN
1156 if (target__none(&rec->opts.target))
1157 record__synthesize_workload(rec, true);
1158
45604710
NK
1159out_child:
1160 if (forks) {
1161 int exit_status;
addc2785 1162
45604710
NK
1163 if (!child_finished)
1164 kill(rec->evlist->workload.pid, SIGTERM);
1165
1166 wait(&exit_status);
1167
1168 if (err < 0)
1169 status = err;
1170 else if (WIFEXITED(exit_status))
1171 status = WEXITSTATUS(exit_status);
1172 else if (WIFSIGNALED(exit_status))
1173 signr = WTERMSIG(exit_status);
1174 } else
1175 status = err;
1176
4ea648ae 1177 record__synthesize(rec, true);
e3d59112
NK
1178 /* this will be recalculated during process_buildids() */
1179 rec->samples = 0;
1180
ecfd7a9c
WN
1181 if (!err) {
1182 if (!rec->timestamp_filename) {
1183 record__finish_output(rec);
1184 } else {
1185 fd = record__switch_output(rec, true);
1186 if (fd < 0) {
1187 status = fd;
1188 goto out_delete_session;
1189 }
1190 }
1191 }
39d17dac 1192
a074865e
WN
1193 perf_hooks__invoke_record_end();
1194
e3d59112
NK
1195 if (!err && !quiet) {
1196 char samples[128];
ecfd7a9c
WN
1197 const char *postfix = rec->timestamp_filename ?
1198 ".<timestamp>" : "";
e3d59112 1199
ef149c25 1200 if (rec->samples && !rec->opts.full_auxtrace)
e3d59112
NK
1201 scnprintf(samples, sizeof(samples),
1202 " (%" PRIu64 " samples)", rec->samples);
1203 else
1204 samples[0] = '\0';
1205
ecfd7a9c 1206 fprintf(stderr, "[ perf record: Captured and wrote %.3f MB %s%s%s ]\n",
8ceb41d7 1207 perf_data__size(data) / 1024.0 / 1024.0,
eae8ad80 1208 data->file.path, postfix, samples);
e3d59112
NK
1209 }
1210
39d17dac
ACM
1211out_delete_session:
1212 perf_session__delete(session);
45604710 1213 return status;
de9ac07b 1214}
0e9b20b8 1215
0883e820 1216static void callchain_debug(struct callchain_param *callchain)
09b0fd45 1217{
aad2b21c 1218 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF", "LBR" };
a601fdff 1219
0883e820 1220 pr_debug("callchain: type %s\n", str[callchain->record_mode]);
26d33022 1221
0883e820 1222 if (callchain->record_mode == CALLCHAIN_DWARF)
09b0fd45 1223 pr_debug("callchain: stack dump size %d\n",
0883e820 1224 callchain->dump_size);
09b0fd45
JO
1225}
1226
0883e820
ACM
1227int record_opts__parse_callchain(struct record_opts *record,
1228 struct callchain_param *callchain,
1229 const char *arg, bool unset)
09b0fd45 1230{
09b0fd45 1231 int ret;
0883e820 1232 callchain->enabled = !unset;
eb853e80 1233
09b0fd45
JO
1234 /* --no-call-graph */
1235 if (unset) {
0883e820 1236 callchain->record_mode = CALLCHAIN_NONE;
09b0fd45
JO
1237 pr_debug("callchain: disabled\n");
1238 return 0;
1239 }
1240
0883e820 1241 ret = parse_callchain_record_opt(arg, callchain);
5c0cf224
JO
1242 if (!ret) {
1243 /* Enable data address sampling for DWARF unwind. */
0883e820 1244 if (callchain->record_mode == CALLCHAIN_DWARF)
5c0cf224 1245 record->sample_address = true;
0883e820 1246 callchain_debug(callchain);
5c0cf224 1247 }
26d33022
JO
1248
1249 return ret;
1250}
1251
0883e820
ACM
1252int record_parse_callchain_opt(const struct option *opt,
1253 const char *arg,
1254 int unset)
1255{
1256 return record_opts__parse_callchain(opt->value, &callchain_param, arg, unset);
1257}
1258
c421e80b 1259int record_callchain_opt(const struct option *opt,
09b0fd45
JO
1260 const char *arg __maybe_unused,
1261 int unset __maybe_unused)
1262{
2ddd5c04 1263 struct callchain_param *callchain = opt->value;
c421e80b 1264
2ddd5c04 1265 callchain->enabled = true;
09b0fd45 1266
2ddd5c04
ACM
1267 if (callchain->record_mode == CALLCHAIN_NONE)
1268 callchain->record_mode = CALLCHAIN_FP;
eb853e80 1269
2ddd5c04 1270 callchain_debug(callchain);
09b0fd45
JO
1271 return 0;
1272}
1273
eb853e80
JO
1274static int perf_record_config(const char *var, const char *value, void *cb)
1275{
7a29c087
NK
1276 struct record *rec = cb;
1277
1278 if (!strcmp(var, "record.build-id")) {
1279 if (!strcmp(value, "cache"))
1280 rec->no_buildid_cache = false;
1281 else if (!strcmp(value, "no-cache"))
1282 rec->no_buildid_cache = true;
1283 else if (!strcmp(value, "skip"))
1284 rec->no_buildid = true;
1285 else
1286 return -1;
1287 return 0;
1288 }
cff17205
YX
1289 if (!strcmp(var, "record.call-graph")) {
1290 var = "call-graph.record-mode";
1291 return perf_default_config(var, value, cb);
1292 }
eb853e80 1293
cff17205 1294 return 0;
eb853e80
JO
1295}
1296
814c8c38
PZ
1297struct clockid_map {
1298 const char *name;
1299 int clockid;
1300};
1301
1302#define CLOCKID_MAP(n, c) \
1303 { .name = n, .clockid = (c), }
1304
1305#define CLOCKID_END { .name = NULL, }
1306
1307
1308/*
1309 * Add the missing ones, we need to build on many distros...
1310 */
1311#ifndef CLOCK_MONOTONIC_RAW
1312#define CLOCK_MONOTONIC_RAW 4
1313#endif
1314#ifndef CLOCK_BOOTTIME
1315#define CLOCK_BOOTTIME 7
1316#endif
1317#ifndef CLOCK_TAI
1318#define CLOCK_TAI 11
1319#endif
1320
1321static const struct clockid_map clockids[] = {
1322 /* available for all events, NMI safe */
1323 CLOCKID_MAP("monotonic", CLOCK_MONOTONIC),
1324 CLOCKID_MAP("monotonic_raw", CLOCK_MONOTONIC_RAW),
1325
1326 /* available for some events */
1327 CLOCKID_MAP("realtime", CLOCK_REALTIME),
1328 CLOCKID_MAP("boottime", CLOCK_BOOTTIME),
1329 CLOCKID_MAP("tai", CLOCK_TAI),
1330
1331 /* available for the lazy */
1332 CLOCKID_MAP("mono", CLOCK_MONOTONIC),
1333 CLOCKID_MAP("raw", CLOCK_MONOTONIC_RAW),
1334 CLOCKID_MAP("real", CLOCK_REALTIME),
1335 CLOCKID_MAP("boot", CLOCK_BOOTTIME),
1336
1337 CLOCKID_END,
1338};
1339
1340static int parse_clockid(const struct option *opt, const char *str, int unset)
1341{
1342 struct record_opts *opts = (struct record_opts *)opt->value;
1343 const struct clockid_map *cm;
1344 const char *ostr = str;
1345
1346 if (unset) {
1347 opts->use_clockid = 0;
1348 return 0;
1349 }
1350
1351 /* no arg passed */
1352 if (!str)
1353 return 0;
1354
1355 /* no setting it twice */
1356 if (opts->use_clockid)
1357 return -1;
1358
1359 opts->use_clockid = true;
1360
1361 /* if its a number, we're done */
1362 if (sscanf(str, "%d", &opts->clockid) == 1)
1363 return 0;
1364
1365 /* allow a "CLOCK_" prefix to the name */
1366 if (!strncasecmp(str, "CLOCK_", 6))
1367 str += 6;
1368
1369 for (cm = clockids; cm->name; cm++) {
1370 if (!strcasecmp(str, cm->name)) {
1371 opts->clockid = cm->clockid;
1372 return 0;
1373 }
1374 }
1375
1376 opts->use_clockid = false;
1377 ui__warning("unknown clockid %s, check man page\n", ostr);
1378 return -1;
1379}
1380
e9db1310
AH
1381static int record__parse_mmap_pages(const struct option *opt,
1382 const char *str,
1383 int unset __maybe_unused)
1384{
1385 struct record_opts *opts = opt->value;
1386 char *s, *p;
1387 unsigned int mmap_pages;
1388 int ret;
1389
1390 if (!str)
1391 return -EINVAL;
1392
1393 s = strdup(str);
1394 if (!s)
1395 return -ENOMEM;
1396
1397 p = strchr(s, ',');
1398 if (p)
1399 *p = '\0';
1400
1401 if (*s) {
1402 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, s);
1403 if (ret)
1404 goto out_free;
1405 opts->mmap_pages = mmap_pages;
1406 }
1407
1408 if (!p) {
1409 ret = 0;
1410 goto out_free;
1411 }
1412
1413 ret = __perf_evlist__parse_mmap_pages(&mmap_pages, p + 1);
1414 if (ret)
1415 goto out_free;
1416
1417 opts->auxtrace_mmap_pages = mmap_pages;
1418
1419out_free:
1420 free(s);
1421 return ret;
1422}
1423
0c582449
JO
1424static void switch_output_size_warn(struct record *rec)
1425{
1426 u64 wakeup_size = perf_evlist__mmap_size(rec->opts.mmap_pages);
1427 struct switch_output *s = &rec->switch_output;
1428
1429 wakeup_size /= 2;
1430
1431 if (s->size < wakeup_size) {
1432 char buf[100];
1433
1434 unit_number__scnprintf(buf, sizeof(buf), wakeup_size);
1435 pr_warning("WARNING: switch-output data size lower than "
1436 "wakeup kernel buffer size (%s) "
1437 "expect bigger perf.data sizes\n", buf);
1438 }
1439}
1440
cb4e1ebb
JO
1441static int switch_output_setup(struct record *rec)
1442{
1443 struct switch_output *s = &rec->switch_output;
dc0c6127
JO
1444 static struct parse_tag tags_size[] = {
1445 { .tag = 'B', .mult = 1 },
1446 { .tag = 'K', .mult = 1 << 10 },
1447 { .tag = 'M', .mult = 1 << 20 },
1448 { .tag = 'G', .mult = 1 << 30 },
1449 { .tag = 0 },
1450 };
bfacbe3b
JO
1451 static struct parse_tag tags_time[] = {
1452 { .tag = 's', .mult = 1 },
1453 { .tag = 'm', .mult = 60 },
1454 { .tag = 'h', .mult = 60*60 },
1455 { .tag = 'd', .mult = 60*60*24 },
1456 { .tag = 0 },
1457 };
dc0c6127 1458 unsigned long val;
cb4e1ebb
JO
1459
1460 if (!s->set)
1461 return 0;
1462
1463 if (!strcmp(s->str, "signal")) {
1464 s->signal = true;
1465 pr_debug("switch-output with SIGUSR2 signal\n");
dc0c6127
JO
1466 goto enabled;
1467 }
1468
1469 val = parse_tag_value(s->str, tags_size);
1470 if (val != (unsigned long) -1) {
1471 s->size = val;
1472 pr_debug("switch-output with %s size threshold\n", s->str);
1473 goto enabled;
cb4e1ebb
JO
1474 }
1475
bfacbe3b
JO
1476 val = parse_tag_value(s->str, tags_time);
1477 if (val != (unsigned long) -1) {
1478 s->time = val;
1479 pr_debug("switch-output with %s time threshold (%lu seconds)\n",
1480 s->str, s->time);
1481 goto enabled;
1482 }
1483
cb4e1ebb 1484 return -1;
dc0c6127
JO
1485
1486enabled:
1487 rec->timestamp_filename = true;
1488 s->enabled = true;
0c582449
JO
1489
1490 if (s->size && !rec->opts.no_buffering)
1491 switch_output_size_warn(rec);
1492
dc0c6127 1493 return 0;
cb4e1ebb
JO
1494}
1495
e5b2c207 1496static const char * const __record_usage[] = {
9e096753
MG
1497 "perf record [<options>] [<command>]",
1498 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
1499 NULL
1500};
e5b2c207 1501const char * const *record_usage = __record_usage;
0e9b20b8 1502
d20deb64 1503/*
8c6f45a7
ACM
1504 * XXX Ideally would be local to cmd_record() and passed to a record__new
1505 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
1506 * after cmd_record() exits, but since record_options need to be accessible to
1507 * builtin-script, leave it here.
1508 *
1509 * At least we don't ouch it in all the other functions here directly.
1510 *
1511 * Just say no to tons of global variables, sigh.
1512 */
8c6f45a7 1513static struct record record = {
d20deb64 1514 .opts = {
8affc2b8 1515 .sample_time = true,
d20deb64
ACM
1516 .mmap_pages = UINT_MAX,
1517 .user_freq = UINT_MAX,
1518 .user_interval = ULLONG_MAX,
447a6013 1519 .freq = 4000,
d1cb9fce
NK
1520 .target = {
1521 .uses_mmap = true,
3aa5939d 1522 .default_per_cpu = true,
d1cb9fce 1523 },
9d9cad76 1524 .proc_map_timeout = 500,
d20deb64 1525 },
e3d59112
NK
1526 .tool = {
1527 .sample = process_sample_event,
1528 .fork = perf_event__process_fork,
cca8482c 1529 .exit = perf_event__process_exit,
e3d59112 1530 .comm = perf_event__process_comm,
f3b3614a 1531 .namespaces = perf_event__process_namespaces,
e3d59112
NK
1532 .mmap = perf_event__process_mmap,
1533 .mmap2 = perf_event__process_mmap2,
cca8482c 1534 .ordered_events = true,
e3d59112 1535 },
d20deb64 1536};
7865e817 1537
76a26549
NK
1538const char record_callchain_help[] = CALLCHAIN_RECORD_HELP
1539 "\n\t\t\t\tDefault: fp";
61eaa3be 1540
0aab2136
WN
1541static bool dry_run;
1542
d20deb64
ACM
1543/*
1544 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
1545 * with it and switch to use the library functions in perf_evlist that came
b4006796 1546 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
1547 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
1548 * using pipes, etc.
1549 */
efd21307 1550static struct option __record_options[] = {
d20deb64 1551 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 1552 "event selector. use 'perf list' to list available events",
f120f9d5 1553 parse_events_option),
d20deb64 1554 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 1555 "event filter", parse_filter),
4ba1faa1
WN
1556 OPT_CALLBACK_NOOPT(0, "exclude-perf", &record.evlist,
1557 NULL, "don't record events from perf itself",
1558 exclude_perf),
bea03405 1559 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 1560 "record events on existing process id"),
bea03405 1561 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 1562 "record events on existing thread id"),
d20deb64 1563 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 1564 "collect data with this RT SCHED_FIFO priority"),
509051ea 1565 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 1566 "collect data without buffering"),
d20deb64 1567 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 1568 "collect raw sample records from all opened counters"),
bea03405 1569 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 1570 "system-wide collection from all CPUs"),
bea03405 1571 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 1572 "list of cpus to monitor"),
d20deb64 1573 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
eae8ad80 1574 OPT_STRING('o', "output", &record.data.file.path, "file",
abaff32a 1575 "output file name"),
69e7e5b0
AH
1576 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
1577 &record.opts.no_inherit_set,
1578 "child tasks do not inherit counters"),
4ea648ae
WN
1579 OPT_BOOLEAN(0, "tail-synthesize", &record.opts.tail_synthesize,
1580 "synthesize non-sample events at the end of output"),
626a6b78 1581 OPT_BOOLEAN(0, "overwrite", &record.opts.overwrite, "use overwrite mode"),
b09c2364
ACM
1582 OPT_BOOLEAN(0, "strict-freq", &record.opts.strict_freq,
1583 "Fail if the specified frequency can't be used"),
67230479
ACM
1584 OPT_CALLBACK('F', "freq", &record.opts, "freq or 'max'",
1585 "profile at this frequency",
1586 record__parse_freq),
e9db1310
AH
1587 OPT_CALLBACK('m', "mmap-pages", &record.opts, "pages[,pages]",
1588 "number of mmap data pages and AUX area tracing mmap pages",
1589 record__parse_mmap_pages),
d20deb64 1590 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 1591 "put the counters into a counter group"),
2ddd5c04 1592 OPT_CALLBACK_NOOPT('g', NULL, &callchain_param,
09b0fd45
JO
1593 NULL, "enables call-graph recording" ,
1594 &record_callchain_opt),
1595 OPT_CALLBACK(0, "call-graph", &record.opts,
76a26549 1596 "record_mode[,record_size]", record_callchain_help,
09b0fd45 1597 &record_parse_callchain_opt),
c0555642 1598 OPT_INCR('v', "verbose", &verbose,
3da297a6 1599 "be more verbose (show counter open errors, etc)"),
b44308f5 1600 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 1601 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 1602 "per thread counts"),
56100321 1603 OPT_BOOLEAN('d', "data", &record.opts.sample_address, "Record the sample addresses"),
3b0a5daa
KL
1604 OPT_BOOLEAN(0, "phys-data", &record.opts.sample_phys_addr,
1605 "Record the sample physical addresses"),
b6f35ed7 1606 OPT_BOOLEAN(0, "sample-cpu", &record.opts.sample_cpu, "Record the sample cpu"),
3abebc55
AH
1607 OPT_BOOLEAN_SET('T', "timestamp", &record.opts.sample_time,
1608 &record.opts.sample_time_set,
1609 "Record the sample timestamps"),
f290aa1f
JO
1610 OPT_BOOLEAN_SET('P', "period", &record.opts.period, &record.opts.period_set,
1611 "Record the sample period"),
d20deb64 1612 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 1613 "don't sample"),
d2db9a98
WN
1614 OPT_BOOLEAN_SET('N', "no-buildid-cache", &record.no_buildid_cache,
1615 &record.no_buildid_cache_set,
1616 "do not update the buildid cache"),
1617 OPT_BOOLEAN_SET('B', "no-buildid", &record.no_buildid,
1618 &record.no_buildid_set,
1619 "do not collect buildids in perf.data"),
d20deb64 1620 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
1621 "monitor event in cgroup name only",
1622 parse_cgroups),
a6205a35 1623 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 1624 "ms to wait before starting measurement after program start"),
bea03405
NK
1625 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
1626 "user to profile"),
a5aabdac
SE
1627
1628 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
1629 "branch any", "sample any taken branches",
1630 parse_branch_stack),
1631
1632 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
1633 "branch filter mask", "branch stack filter modes",
bdfebd84 1634 parse_branch_stack),
05484298
AK
1635 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
1636 "sample by weight (on special events only)"),
475eeab9
AK
1637 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
1638 "sample transaction flags (special events only)"),
3aa5939d
AH
1639 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
1640 "use per-thread mmaps"),
bcc84ec6
SE
1641 OPT_CALLBACK_OPTARG('I', "intr-regs", &record.opts.sample_intr_regs, NULL, "any register",
1642 "sample selected machine registers on interrupt,"
1643 " use -I ? to list register names", parse_regs),
84c41742
AK
1644 OPT_CALLBACK_OPTARG(0, "user-regs", &record.opts.sample_user_regs, NULL, "any register",
1645 "sample selected machine registers on interrupt,"
1646 " use -I ? to list register names", parse_regs),
85c273d2
AK
1647 OPT_BOOLEAN(0, "running-time", &record.opts.running_time,
1648 "Record running/enabled time of read (:S) events"),
814c8c38
PZ
1649 OPT_CALLBACK('k', "clockid", &record.opts,
1650 "clockid", "clockid to use for events, see clock_gettime()",
1651 parse_clockid),
2dd6d8a1
AH
1652 OPT_STRING_OPTARG('S', "snapshot", &record.opts.auxtrace_snapshot_opts,
1653 "opts", "AUX area tracing Snapshot Mode", ""),
9d9cad76
KL
1654 OPT_UINTEGER(0, "proc-map-timeout", &record.opts.proc_map_timeout,
1655 "per thread proc mmap processing timeout in ms"),
f3b3614a
HB
1656 OPT_BOOLEAN(0, "namespaces", &record.opts.record_namespaces,
1657 "Record namespaces events"),
b757bb09
AH
1658 OPT_BOOLEAN(0, "switch-events", &record.opts.record_switch_events,
1659 "Record context switch events"),
85723885
JO
1660 OPT_BOOLEAN_FLAG(0, "all-kernel", &record.opts.all_kernel,
1661 "Configure all used events to run in kernel space.",
1662 PARSE_OPT_EXCLUSIVE),
1663 OPT_BOOLEAN_FLAG(0, "all-user", &record.opts.all_user,
1664 "Configure all used events to run in user space.",
1665 PARSE_OPT_EXCLUSIVE),
71dc2326
WN
1666 OPT_STRING(0, "clang-path", &llvm_param.clang_path, "clang path",
1667 "clang binary to use for compiling BPF scriptlets"),
1668 OPT_STRING(0, "clang-opt", &llvm_param.clang_opt, "clang options",
1669 "options passed to clang when compiling BPF scriptlets"),
7efe0e03
HK
1670 OPT_STRING(0, "vmlinux", &symbol_conf.vmlinux_name,
1671 "file", "vmlinux pathname"),
6156681b
NK
1672 OPT_BOOLEAN(0, "buildid-all", &record.buildid_all,
1673 "Record build-id of all DSOs regardless of hits"),
ecfd7a9c
WN
1674 OPT_BOOLEAN(0, "timestamp-filename", &record.timestamp_filename,
1675 "append timestamp to output filename"),
68588baf
JY
1676 OPT_BOOLEAN(0, "timestamp-boundary", &record.timestamp_boundary,
1677 "Record timestamp boundary (time of first/last samples)"),
cb4e1ebb 1678 OPT_STRING_OPTARG_SET(0, "switch-output", &record.switch_output.str,
bfacbe3b
JO
1679 &record.switch_output.set, "signal,size,time",
1680 "Switch output when receive SIGUSR2 or cross size,time threshold",
dc0c6127 1681 "signal"),
0aab2136
WN
1682 OPT_BOOLEAN(0, "dry-run", &dry_run,
1683 "Parse options then exit"),
0e9b20b8
IM
1684 OPT_END()
1685};
1686
e5b2c207
NK
1687struct option *record_options = __record_options;
1688
b0ad8ea6 1689int cmd_record(int argc, const char **argv)
0e9b20b8 1690{
ef149c25 1691 int err;
8c6f45a7 1692 struct record *rec = &record;
16ad2ffb 1693 char errbuf[BUFSIZ];
0e9b20b8 1694
67230479
ACM
1695 setlocale(LC_ALL, "");
1696
48e1cab1
WN
1697#ifndef HAVE_LIBBPF_SUPPORT
1698# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, "NO_LIBBPF=1", c)
1699 set_nobuild('\0', "clang-path", true);
1700 set_nobuild('\0', "clang-opt", true);
1701# undef set_nobuild
7efe0e03
HK
1702#endif
1703
1704#ifndef HAVE_BPF_PROLOGUE
1705# if !defined (HAVE_DWARF_SUPPORT)
1706# define REASON "NO_DWARF=1"
1707# elif !defined (HAVE_LIBBPF_SUPPORT)
1708# define REASON "NO_LIBBPF=1"
1709# else
1710# define REASON "this architecture doesn't support BPF prologue"
1711# endif
1712# define set_nobuild(s, l, c) set_option_nobuild(record_options, s, l, REASON, c)
1713 set_nobuild('\0', "vmlinux", true);
1714# undef set_nobuild
1715# undef REASON
48e1cab1
WN
1716#endif
1717
3e2be2da
ACM
1718 rec->evlist = perf_evlist__new();
1719 if (rec->evlist == NULL)
361c99a6
ACM
1720 return -ENOMEM;
1721
ecc4c561
ACM
1722 err = perf_config(perf_record_config, rec);
1723 if (err)
1724 return err;
eb853e80 1725
bca647aa 1726 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 1727 PARSE_OPT_STOP_AT_NON_OPTION);
68ba3235
NK
1728 if (quiet)
1729 perf_quiet_option();
483635a9
JO
1730
1731 /* Make system wide (-a) the default target. */
602ad878 1732 if (!argc && target__none(&rec->opts.target))
483635a9 1733 rec->opts.target.system_wide = true;
0e9b20b8 1734
bea03405 1735 if (nr_cgroups && !rec->opts.target.system_wide) {
c7118369
NK
1736 usage_with_options_msg(record_usage, record_options,
1737 "cgroup monitoring only available in system-wide mode");
1738
023695d9 1739 }
b757bb09
AH
1740 if (rec->opts.record_switch_events &&
1741 !perf_can_record_switch_events()) {
c7118369
NK
1742 ui__error("kernel does not support recording context switch events\n");
1743 parse_options_usage(record_usage, record_options, "switch-events", 0);
1744 return -EINVAL;
b757bb09 1745 }
023695d9 1746
cb4e1ebb
JO
1747 if (switch_output_setup(rec)) {
1748 parse_options_usage(record_usage, record_options, "switch-output", 0);
1749 return -EINVAL;
1750 }
1751
bfacbe3b
JO
1752 if (rec->switch_output.time) {
1753 signal(SIGALRM, alarm_sig_handler);
1754 alarm(rec->switch_output.time);
1755 }
1756
1b36c03e
AH
1757 /*
1758 * Allow aliases to facilitate the lookup of symbols for address
1759 * filters. Refer to auxtrace_parse_filters().
1760 */
1761 symbol_conf.allow_aliases = true;
1762
1763 symbol__init(NULL);
1764
4b5ea3bd 1765 err = record__auxtrace_init(rec);
1b36c03e
AH
1766 if (err)
1767 goto out;
1768
0aab2136 1769 if (dry_run)
5c01ad60 1770 goto out;
0aab2136 1771
d7888573
WN
1772 err = bpf__setup_stdout(rec->evlist);
1773 if (err) {
1774 bpf__strerror_setup_stdout(rec->evlist, err, errbuf, sizeof(errbuf));
1775 pr_err("ERROR: Setup BPF stdout failed: %s\n",
1776 errbuf);
5c01ad60 1777 goto out;
d7888573
WN
1778 }
1779
ef149c25
AH
1780 err = -ENOMEM;
1781
6c443954 1782 if (symbol_conf.kptr_restrict && !perf_evlist__exclude_kernel(rec->evlist))
646aaea6
ACM
1783 pr_warning(
1784"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
1785"check /proc/sys/kernel/kptr_restrict.\n\n"
1786"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
1787"file is not found in the buildid cache or in the vmlinux path.\n\n"
1788"Samples in kernel modules won't be resolved at all.\n\n"
1789"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
1790"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 1791
0c1d46a8 1792 if (rec->no_buildid_cache || rec->no_buildid) {
a1ac1d3c 1793 disable_buildid_cache();
dc0c6127 1794 } else if (rec->switch_output.enabled) {
0c1d46a8
WN
1795 /*
1796 * In 'perf record --switch-output', disable buildid
1797 * generation by default to reduce data file switching
1798 * overhead. Still generate buildid if they are required
1799 * explicitly using
1800 *
60437ac0 1801 * perf record --switch-output --no-no-buildid \
0c1d46a8
WN
1802 * --no-no-buildid-cache
1803 *
1804 * Following code equals to:
1805 *
1806 * if ((rec->no_buildid || !rec->no_buildid_set) &&
1807 * (rec->no_buildid_cache || !rec->no_buildid_cache_set))
1808 * disable_buildid_cache();
1809 */
1810 bool disable = true;
1811
1812 if (rec->no_buildid_set && !rec->no_buildid)
1813 disable = false;
1814 if (rec->no_buildid_cache_set && !rec->no_buildid_cache)
1815 disable = false;
1816 if (disable) {
1817 rec->no_buildid = true;
1818 rec->no_buildid_cache = true;
1819 disable_buildid_cache();
1820 }
1821 }
655000e7 1822
4ea648ae
WN
1823 if (record.opts.overwrite)
1824 record.opts.tail_synthesize = true;
1825
3e2be2da 1826 if (rec->evlist->nr_entries == 0 &&
4b4cd503 1827 __perf_evlist__add_default(rec->evlist, !record.opts.no_samples) < 0) {
69aad6f1 1828 pr_err("Not enough memory for event selector list\n");
394c01ed 1829 goto out;
bbd36e5e 1830 }
0e9b20b8 1831
69e7e5b0
AH
1832 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
1833 rec->opts.no_inherit = true;
1834
602ad878 1835 err = target__validate(&rec->opts.target);
16ad2ffb 1836 if (err) {
602ad878 1837 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
c3dec27b 1838 ui__warning("%s\n", errbuf);
16ad2ffb
NK
1839 }
1840
602ad878 1841 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
1842 if (err) {
1843 int saved_errno = errno;
4bd0f2d2 1844
602ad878 1845 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 1846 ui__error("%s", errbuf);
16ad2ffb
NK
1847
1848 err = -saved_errno;
394c01ed 1849 goto out;
16ad2ffb 1850 }
0d37aa34 1851
ca800068
MZ
1852 /* Enable ignoring missing threads when -u/-p option is defined. */
1853 rec->opts.ignore_missing_thread = rec->opts.target.uid != UINT_MAX || rec->opts.target.pid;
23dc4f15 1854
16ad2ffb 1855 err = -ENOMEM;
3e2be2da 1856 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 1857 usage_with_options(record_usage, record_options);
69aad6f1 1858
ef149c25
AH
1859 err = auxtrace_record__options(rec->itr, rec->evlist, &rec->opts);
1860 if (err)
394c01ed 1861 goto out;
ef149c25 1862
6156681b
NK
1863 /*
1864 * We take all buildids when the file contains
1865 * AUX area tracing data because we do not decode the
1866 * trace because it would take too long.
1867 */
1868 if (rec->opts.full_auxtrace)
1869 rec->buildid_all = true;
1870
b4006796 1871 if (record_opts__config(&rec->opts)) {
39d17dac 1872 err = -EINVAL;
394c01ed 1873 goto out;
7e4ff9e3
MG
1874 }
1875
d20deb64 1876 err = __cmd_record(&record, argc, argv);
394c01ed 1877out:
45604710 1878 perf_evlist__delete(rec->evlist);
d65a458b 1879 symbol__exit();
ef149c25 1880 auxtrace_record__free(rec->itr);
39d17dac 1881 return err;
0e9b20b8 1882}
2dd6d8a1
AH
1883
1884static void snapshot_sig_handler(int sig __maybe_unused)
1885{
dc0c6127
JO
1886 struct record *rec = &record;
1887
5f9cf599
WN
1888 if (trigger_is_ready(&auxtrace_snapshot_trigger)) {
1889 trigger_hit(&auxtrace_snapshot_trigger);
1890 auxtrace_record__snapshot_started = 1;
1891 if (auxtrace_record__snapshot_start(record.itr))
1892 trigger_error(&auxtrace_snapshot_trigger);
1893 }
3c1cb7e3 1894
dc0c6127 1895 if (switch_output_signal(rec))
3c1cb7e3 1896 trigger_hit(&switch_output_trigger);
2dd6d8a1 1897}
bfacbe3b
JO
1898
1899static void alarm_sig_handler(int sig __maybe_unused)
1900{
1901 struct record *rec = &record;
1902
1903 if (switch_output_time(rec))
1904 trigger_hit(&switch_output_trigger);
1905}