perf tools: Add PARSE_OPT_DISABLED flag
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
8f651eae 17#include "util/callchain.h"
f14d5707 18#include "util/cgroup.h"
7c6a1c65 19#include "util/header.h"
66e274f3 20#include "util/event.h"
361c99a6 21#include "util/evlist.h"
69aad6f1 22#include "util/evsel.h"
8f28827a 23#include "util/debug.h"
94c744b6 24#include "util/session.h"
45694aa7 25#include "util/tool.h"
8d06367f 26#include "util/symbol.h"
a12b51c4 27#include "util/cpumap.h"
fd78260b 28#include "util/thread_map.h"
f5fc1412 29#include "util/data.h"
7c6a1c65 30
97124d5e 31#include <unistd.h>
de9ac07b 32#include <sched.h>
a41794cd 33#include <sys/mman.h>
de9ac07b 34
78da39fa 35
8c6f45a7 36struct record {
45694aa7 37 struct perf_tool tool;
b4006796 38 struct record_opts opts;
d20deb64 39 u64 bytes_written;
f5fc1412 40 struct perf_data_file file;
d20deb64
ACM
41 struct perf_evlist *evlist;
42 struct perf_session *session;
43 const char *progname;
d20deb64 44 int realtime_prio;
d20deb64
ACM
45 bool no_buildid;
46 bool no_buildid_cache;
d20deb64 47 long samples;
0f82ebc4 48};
a21ca2ca 49
8c6f45a7 50static int record__write(struct record *rec, void *bf, size_t size)
f5970550 51{
cf8b2e69 52 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
53 pr_err("failed to write perf data, error: %m\n");
54 return -1;
f5970550 55 }
8d3eca20 56
cf8b2e69 57 rec->bytes_written += size;
8d3eca20 58 return 0;
f5970550
PZ
59}
60
45694aa7 61static int process_synthesized_event(struct perf_tool *tool,
d20deb64 62 union perf_event *event,
1d037ca1
IT
63 struct perf_sample *sample __maybe_unused,
64 struct machine *machine __maybe_unused)
234fbbf5 65{
8c6f45a7
ACM
66 struct record *rec = container_of(tool, struct record, tool);
67 return record__write(rec, event, event->header.size);
234fbbf5
ACM
68}
69
e5685730 70static int record__mmap_read(struct record *rec, int idx)
de9ac07b 71{
e5685730 72 struct perf_mmap *md = &rec->evlist->mmap[idx];
744bd8aa 73 unsigned int head = perf_mmap__read_head(md);
de9ac07b 74 unsigned int old = md->prev;
918512b4 75 unsigned char *data = md->base + page_size;
de9ac07b
PZ
76 unsigned long size;
77 void *buf;
8d3eca20 78 int rc = 0;
de9ac07b 79
dc82009a 80 if (old == head)
8d3eca20 81 return 0;
dc82009a 82
d20deb64 83 rec->samples++;
de9ac07b
PZ
84
85 size = head - old;
86
87 if ((old & md->mask) + size != (head & md->mask)) {
88 buf = &data[old & md->mask];
89 size = md->mask + 1 - (old & md->mask);
90 old += size;
021e9f47 91
8c6f45a7 92 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
93 rc = -1;
94 goto out;
95 }
de9ac07b
PZ
96 }
97
98 buf = &data[old & md->mask];
99 size = head - old;
100 old += size;
021e9f47 101
8c6f45a7 102 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
103 rc = -1;
104 goto out;
105 }
de9ac07b
PZ
106
107 md->prev = old;
e5685730 108 perf_evlist__mmap_consume(rec->evlist, idx);
8d3eca20
DA
109out:
110 return rc;
de9ac07b
PZ
111}
112
113static volatile int done = 0;
f7b7c26e 114static volatile int signr = -1;
33e49ea7 115static volatile int child_finished = 0;
de9ac07b 116
16c8a109 117static void sig_handler(int sig)
de9ac07b 118{
33e49ea7
AK
119 if (sig == SIGCHLD)
120 child_finished = 1;
45604710
NK
121 else
122 signr = sig;
33e49ea7 123
16c8a109 124 done = 1;
f7b7c26e
PZ
125}
126
45604710 127static void record__sig_exit(void)
f7b7c26e 128{
45604710 129 if (signr == -1)
f7b7c26e
PZ
130 return;
131
132 signal(signr, SIG_DFL);
45604710 133 raise(signr);
de9ac07b
PZ
134}
135
8c6f45a7 136static int record__open(struct record *rec)
dd7927f4 137{
56e52e85 138 char msg[512];
6a4bb04c 139 struct perf_evsel *pos;
d20deb64
ACM
140 struct perf_evlist *evlist = rec->evlist;
141 struct perf_session *session = rec->session;
b4006796 142 struct record_opts *opts = &rec->opts;
8d3eca20 143 int rc = 0;
dd7927f4 144
f77a9518 145 perf_evlist__config(evlist, opts);
cac21425 146
0050f7aa 147 evlist__for_each(evlist, pos) {
dd7927f4 148try_again:
6a4bb04c 149 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 150 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 151 if (verbose)
c0a54341 152 ui__warning("%s\n", msg);
d6d901c2
ZY
153 goto try_again;
154 }
ca6a4258 155
56e52e85
ACM
156 rc = -errno;
157 perf_evsel__open_strerror(pos, &opts->target,
158 errno, msg, sizeof(msg));
159 ui__error("%s\n", msg);
8d3eca20 160 goto out;
c171b552
LZ
161 }
162 }
a43d3f08 163
1491a632 164 if (perf_evlist__apply_filters(evlist)) {
0a102479 165 error("failed to set filter with %d (%s)\n", errno,
35550da3 166 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
167 rc = -1;
168 goto out;
0a102479
FW
169 }
170
18e60939 171 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
172 if (errno == EPERM) {
173 pr_err("Permission error mapping pages.\n"
174 "Consider increasing "
175 "/proc/sys/kernel/perf_event_mlock_kb,\n"
176 "or try again with a smaller value of -m/--mmap_pages.\n"
53653d70 177 "(current value: %u)\n", opts->mmap_pages);
8d3eca20 178 rc = -errno;
8d3eca20 179 } else {
35550da3
MH
180 pr_err("failed to mmap with %d (%s)\n", errno,
181 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
182 rc = -errno;
183 }
184 goto out;
18e60939 185 }
0a27d7f9 186
563aecb2 187 session->evlist = evlist;
7b56cce2 188 perf_session__set_id_hdr_size(session);
8d3eca20
DA
189out:
190 return rc;
16c8a109
PZ
191}
192
8c6f45a7 193static int process_buildids(struct record *rec)
6122e4e4 194{
f5fc1412
JO
195 struct perf_data_file *file = &rec->file;
196 struct perf_session *session = rec->session;
7ab75cff 197 u64 start = session->header.data_offset;
6122e4e4 198
f5fc1412 199 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
200 if (size == 0)
201 return 0;
202
7ab75cff
DA
203 return __perf_session__process_events(session, start,
204 size - start,
6122e4e4
ACM
205 size, &build_id__mark_dso_hit_ops);
206}
207
8115d60c 208static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
209{
210 int err;
45694aa7 211 struct perf_tool *tool = data;
a1645ce1
ZY
212 /*
213 *As for guest kernel when processing subcommand record&report,
214 *we arrange module mmap prior to guest kernel mmap and trigger
215 *a preload dso because default guest module symbols are loaded
216 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
217 *method is used to avoid symbol missing when the first addr is
218 *in module instead of in guest kernel.
219 */
45694aa7 220 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 221 machine);
a1645ce1
ZY
222 if (err < 0)
223 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 224 " relocation symbol.\n", machine->pid);
a1645ce1 225
a1645ce1
ZY
226 /*
227 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
228 * have no _text sometimes.
229 */
45694aa7 230 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 231 machine);
a1645ce1
ZY
232 if (err < 0)
233 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 234 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
235}
236
98402807
FW
237static struct perf_event_header finished_round_event = {
238 .size = sizeof(struct perf_event_header),
239 .type = PERF_RECORD_FINISHED_ROUND,
240};
241
8c6f45a7 242static int record__mmap_read_all(struct record *rec)
98402807 243{
dcabb507 244 u64 bytes_written = rec->bytes_written;
0e2e63dd 245 int i;
8d3eca20 246 int rc = 0;
98402807 247
d20deb64 248 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20 249 if (rec->evlist->mmap[i].base) {
e5685730 250 if (record__mmap_read(rec, i) != 0) {
8d3eca20
DA
251 rc = -1;
252 goto out;
253 }
254 }
98402807
FW
255 }
256
dcabb507
JO
257 /*
258 * Mark the round finished in case we wrote
259 * at least one event.
260 */
261 if (bytes_written != rec->bytes_written)
262 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
263
264out:
265 return rc;
98402807
FW
266}
267
8c6f45a7 268static void record__init_features(struct record *rec)
57706abc 269{
57706abc
DA
270 struct perf_session *session = rec->session;
271 int feat;
272
273 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
274 perf_header__set_feat(&session->header, feat);
275
276 if (rec->no_buildid)
277 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
278
3e2be2da 279 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
280 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
281
282 if (!rec->opts.branch_stack)
283 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
284}
285
f33cbe72
ACM
286static volatile int workload_exec_errno;
287
288/*
289 * perf_evlist__prepare_workload will send a SIGUSR1
290 * if the fork fails, since we asked by setting its
291 * want_signal to true.
292 */
45604710
NK
293static void workload_exec_failed_signal(int signo __maybe_unused,
294 siginfo_t *info,
f33cbe72
ACM
295 void *ucontext __maybe_unused)
296{
297 workload_exec_errno = info->si_value.sival_int;
298 done = 1;
f33cbe72
ACM
299 child_finished = 1;
300}
301
8c6f45a7 302static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 303{
57706abc 304 int err;
45604710 305 int status = 0;
8b412664 306 unsigned long waking = 0;
46be604b 307 const bool forks = argc > 0;
23346f21 308 struct machine *machine;
45694aa7 309 struct perf_tool *tool = &rec->tool;
b4006796 310 struct record_opts *opts = &rec->opts;
f5fc1412 311 struct perf_data_file *file = &rec->file;
d20deb64 312 struct perf_session *session;
6dcf45ef 313 bool disabled = false, draining = false;
de9ac07b 314
d20deb64 315 rec->progname = argv[0];
33e49ea7 316
45604710 317 atexit(record__sig_exit);
f5970550
PZ
318 signal(SIGCHLD, sig_handler);
319 signal(SIGINT, sig_handler);
804f7ac7 320 signal(SIGTERM, sig_handler);
f5970550 321
f5fc1412 322 session = perf_session__new(file, false, NULL);
94c744b6 323 if (session == NULL) {
ffa91880 324 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
325 return -1;
326 }
327
d20deb64
ACM
328 rec->session = session;
329
8c6f45a7 330 record__init_features(rec);
330aa675 331
d4db3f16 332 if (forks) {
3e2be2da 333 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 334 argv, file->is_pipe,
735f7e0b 335 workload_exec_failed_signal);
35b9d88e
ACM
336 if (err < 0) {
337 pr_err("Couldn't run the workload!\n");
45604710 338 status = err;
35b9d88e 339 goto out_delete_session;
856e9660 340 }
856e9660
PZ
341 }
342
8c6f45a7 343 if (record__open(rec) != 0) {
8d3eca20 344 err = -1;
45604710 345 goto out_child;
8d3eca20 346 }
de9ac07b 347
3e2be2da 348 if (!rec->evlist->nr_groups)
a8bb559b
NK
349 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
350
f5fc1412
JO
351 if (file->is_pipe) {
352 err = perf_header__write_pipe(file->fd);
529870e3 353 if (err < 0)
45604710 354 goto out_child;
563aecb2 355 } else {
3e2be2da 356 err = perf_session__write_header(session, rec->evlist,
f5fc1412 357 file->fd, false);
d5eed904 358 if (err < 0)
45604710 359 goto out_child;
56b03f3c
ACM
360 }
361
d3665498 362 if (!rec->no_buildid
e20960c0 363 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 364 pr_err("Couldn't generate buildids. "
e20960c0 365 "Use --no-buildid to profile anyway.\n");
8d3eca20 366 err = -1;
45604710 367 goto out_child;
e20960c0
RR
368 }
369
34ba5122 370 machine = &session->machines.host;
743eb868 371
f5fc1412 372 if (file->is_pipe) {
45694aa7 373 err = perf_event__synthesize_attrs(tool, session,
d20deb64 374 process_synthesized_event);
2c46dbb5
TZ
375 if (err < 0) {
376 pr_err("Couldn't synthesize attrs.\n");
45604710 377 goto out_child;
2c46dbb5 378 }
cd19a035 379
3e2be2da 380 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
381 /*
382 * FIXME err <= 0 here actually means that
383 * there were no tracepoints so its not really
384 * an error, just that we don't need to
385 * synthesize anything. We really have to
386 * return this more properly and also
387 * propagate errors that now are calling die()
388 */
3e2be2da 389 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
743eb868 390 process_synthesized_event);
63e0c771
TZ
391 if (err <= 0) {
392 pr_err("Couldn't record tracing data.\n");
45604710 393 goto out_child;
63e0c771 394 }
f34b9001 395 rec->bytes_written += err;
63e0c771 396 }
2c46dbb5
TZ
397 }
398
45694aa7 399 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 400 machine);
c1a3a4b9
ACM
401 if (err < 0)
402 pr_err("Couldn't record kernel reference relocation symbol\n"
403 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
404 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 405
45694aa7 406 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 407 machine);
c1a3a4b9
ACM
408 if (err < 0)
409 pr_err("Couldn't record kernel module information.\n"
410 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
411 "Check /proc/modules permission or run as root.\n");
412
7e383de4 413 if (perf_guest) {
876650e6
ACM
414 machines__process_guests(&session->machines,
415 perf_event__synthesize_guest_os, tool);
7e383de4 416 }
7c6a1c65 417
3e2be2da 418 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
a33fbd56 419 process_synthesized_event, opts->sample_address);
8d3eca20 420 if (err != 0)
45604710 421 goto out_child;
8d3eca20 422
d20deb64 423 if (rec->realtime_prio) {
de9ac07b
PZ
424 struct sched_param param;
425
d20deb64 426 param.sched_priority = rec->realtime_prio;
de9ac07b 427 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 428 pr_err("Could not set realtime priority.\n");
8d3eca20 429 err = -1;
45604710 430 goto out_child;
de9ac07b
PZ
431 }
432 }
433
774cb499
JO
434 /*
435 * When perf is starting the traced process, all the events
436 * (apart from group members) have enable_on_exec=1 set,
437 * so don't spoil it by prematurely enabling them.
438 */
6619a53e 439 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 440 perf_evlist__enable(rec->evlist);
764e16a3 441
856e9660
PZ
442 /*
443 * Let the child rip
444 */
735f7e0b 445 if (forks)
3e2be2da 446 perf_evlist__start_workload(rec->evlist);
856e9660 447
6619a53e
AK
448 if (opts->initial_delay) {
449 usleep(opts->initial_delay * 1000);
450 perf_evlist__enable(rec->evlist);
451 }
452
649c48a9 453 for (;;) {
d20deb64 454 int hits = rec->samples;
de9ac07b 455
8c6f45a7 456 if (record__mmap_read_all(rec) < 0) {
8d3eca20 457 err = -1;
45604710 458 goto out_child;
8d3eca20 459 }
de9ac07b 460
d20deb64 461 if (hits == rec->samples) {
6dcf45ef 462 if (done || draining)
649c48a9 463 break;
f66a889d 464 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
465 /*
466 * Propagate error, only if there's any. Ignore positive
467 * number of returned events and interrupt error.
468 */
469 if (err > 0 || (err < 0 && errno == EINTR))
45604710 470 err = 0;
8b412664 471 waking++;
6dcf45ef
ACM
472
473 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
474 draining = true;
8b412664
PZ
475 }
476
774cb499
JO
477 /*
478 * When perf is starting the traced process, at the end events
479 * die with the process and we wait for that. Thus no need to
480 * disable events in this case.
481 */
602ad878 482 if (done && !disabled && !target__none(&opts->target)) {
3e2be2da 483 perf_evlist__disable(rec->evlist);
2711926a
JO
484 disabled = true;
485 }
de9ac07b
PZ
486 }
487
f33cbe72 488 if (forks && workload_exec_errno) {
35550da3 489 char msg[STRERR_BUFSIZE];
f33cbe72
ACM
490 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
491 pr_err("Workload failed: %s\n", emsg);
492 err = -1;
45604710 493 goto out_child;
f33cbe72
ACM
494 }
495
45604710
NK
496 if (!quiet) {
497 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 498
45604710
NK
499 /*
500 * Approximate RIP event size: 24 bytes.
501 */
502 fprintf(stderr,
503 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
504 (double)rec->bytes_written / 1024.0 / 1024.0,
505 file->path,
506 rec->bytes_written / 24);
507 }
8b412664 508
45604710
NK
509out_child:
510 if (forks) {
511 int exit_status;
addc2785 512
45604710
NK
513 if (!child_finished)
514 kill(rec->evlist->workload.pid, SIGTERM);
515
516 wait(&exit_status);
517
518 if (err < 0)
519 status = err;
520 else if (WIFEXITED(exit_status))
521 status = WEXITSTATUS(exit_status);
522 else if (WIFSIGNALED(exit_status))
523 signr = WTERMSIG(exit_status);
524 } else
525 status = err;
526
527 if (!err && !file->is_pipe) {
528 rec->session->header.data_size += rec->bytes_written;
529
530 if (!rec->no_buildid)
531 process_buildids(rec);
532 perf_session__write_header(rec->session, rec->evlist,
533 file->fd, true);
534 }
39d17dac
ACM
535
536out_delete_session:
537 perf_session__delete(session);
45604710 538 return status;
de9ac07b 539}
0e9b20b8 540
bdfebd84
RAV
541#define BRANCH_OPT(n, m) \
542 { .name = n, .mode = (m) }
543
544#define BRANCH_END { .name = NULL }
545
546struct branch_mode {
547 const char *name;
548 int mode;
549};
550
551static const struct branch_mode branch_modes[] = {
552 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
553 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
554 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
555 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
556 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
557 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
558 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
559 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
560 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
561 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
0fffa5df 562 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
bdfebd84
RAV
563 BRANCH_END
564};
565
566static int
a5aabdac 567parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
568{
569#define ONLY_PLM \
570 (PERF_SAMPLE_BRANCH_USER |\
571 PERF_SAMPLE_BRANCH_KERNEL |\
572 PERF_SAMPLE_BRANCH_HV)
573
574 uint64_t *mode = (uint64_t *)opt->value;
575 const struct branch_mode *br;
a5aabdac 576 char *s, *os = NULL, *p;
bdfebd84
RAV
577 int ret = -1;
578
a5aabdac
SE
579 if (unset)
580 return 0;
bdfebd84 581
a5aabdac
SE
582 /*
583 * cannot set it twice, -b + --branch-filter for instance
584 */
585 if (*mode)
bdfebd84
RAV
586 return -1;
587
a5aabdac
SE
588 /* str may be NULL in case no arg is passed to -b */
589 if (str) {
590 /* because str is read-only */
591 s = os = strdup(str);
592 if (!s)
593 return -1;
594
595 for (;;) {
596 p = strchr(s, ',');
597 if (p)
598 *p = '\0';
599
600 for (br = branch_modes; br->name; br++) {
601 if (!strcasecmp(s, br->name))
602 break;
603 }
604 if (!br->name) {
605 ui__warning("unknown branch filter %s,"
606 " check man page\n", s);
607 goto error;
608 }
bdfebd84 609
a5aabdac 610 *mode |= br->mode;
bdfebd84 611
a5aabdac
SE
612 if (!p)
613 break;
bdfebd84 614
a5aabdac
SE
615 s = p + 1;
616 }
bdfebd84
RAV
617 }
618 ret = 0;
619
a5aabdac 620 /* default to any branch */
bdfebd84 621 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 622 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
623 }
624error:
625 free(os);
626 return ret;
627}
628
72a128aa 629static void callchain_debug(void)
09b0fd45 630{
a601fdff
JO
631 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
632
72a128aa 633 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
26d33022 634
72a128aa 635 if (callchain_param.record_mode == CALLCHAIN_DWARF)
09b0fd45 636 pr_debug("callchain: stack dump size %d\n",
72a128aa 637 callchain_param.dump_size);
09b0fd45
JO
638}
639
72a128aa 640int record_parse_callchain_opt(const struct option *opt __maybe_unused,
09b0fd45
JO
641 const char *arg,
642 int unset)
643{
09b0fd45
JO
644 int ret;
645
72a128aa 646 callchain_param.enabled = !unset;
eb853e80 647
09b0fd45
JO
648 /* --no-call-graph */
649 if (unset) {
72a128aa 650 callchain_param.record_mode = CALLCHAIN_NONE;
09b0fd45
JO
651 pr_debug("callchain: disabled\n");
652 return 0;
653 }
654
f7f084f4 655 ret = parse_callchain_record_opt(arg);
26d33022 656 if (!ret)
72a128aa 657 callchain_debug();
26d33022
JO
658
659 return ret;
660}
661
72a128aa 662int record_callchain_opt(const struct option *opt __maybe_unused,
09b0fd45
JO
663 const char *arg __maybe_unused,
664 int unset __maybe_unused)
665{
72a128aa 666 callchain_param.enabled = true;
09b0fd45 667
72a128aa
NK
668 if (callchain_param.record_mode == CALLCHAIN_NONE)
669 callchain_param.record_mode = CALLCHAIN_FP;
eb853e80 670
72a128aa 671 callchain_debug();
09b0fd45
JO
672 return 0;
673}
674
eb853e80
JO
675static int perf_record_config(const char *var, const char *value, void *cb)
676{
eb853e80 677 if (!strcmp(var, "record.call-graph"))
5a2e5e85 678 var = "call-graph.record-mode"; /* fall-through */
eb853e80
JO
679
680 return perf_default_config(var, value, cb);
681}
682
0e9b20b8 683static const char * const record_usage[] = {
9e096753
MG
684 "perf record [<options>] [<command>]",
685 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
686 NULL
687};
688
d20deb64 689/*
8c6f45a7
ACM
690 * XXX Ideally would be local to cmd_record() and passed to a record__new
691 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
692 * after cmd_record() exits, but since record_options need to be accessible to
693 * builtin-script, leave it here.
694 *
695 * At least we don't ouch it in all the other functions here directly.
696 *
697 * Just say no to tons of global variables, sigh.
698 */
8c6f45a7 699static struct record record = {
d20deb64 700 .opts = {
8affc2b8 701 .sample_time = true,
d20deb64
ACM
702 .mmap_pages = UINT_MAX,
703 .user_freq = UINT_MAX,
704 .user_interval = ULLONG_MAX,
447a6013 705 .freq = 4000,
d1cb9fce
NK
706 .target = {
707 .uses_mmap = true,
3aa5939d 708 .default_per_cpu = true,
d1cb9fce 709 },
d20deb64 710 },
d20deb64 711};
7865e817 712
09b0fd45 713#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 714
9ff125d1 715#ifdef HAVE_DWARF_UNWIND_SUPPORT
09b0fd45 716const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 717#else
09b0fd45 718const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
719#endif
720
d20deb64
ACM
721/*
722 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
723 * with it and switch to use the library functions in perf_evlist that came
b4006796 724 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
725 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
726 * using pipes, etc.
727 */
bca647aa 728const struct option record_options[] = {
d20deb64 729 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 730 "event selector. use 'perf list' to list available events",
f120f9d5 731 parse_events_option),
d20deb64 732 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 733 "event filter", parse_filter),
bea03405 734 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 735 "record events on existing process id"),
bea03405 736 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 737 "record events on existing thread id"),
d20deb64 738 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 739 "collect data with this RT SCHED_FIFO priority"),
509051ea 740 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 741 "collect data without buffering"),
d20deb64 742 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 743 "collect raw sample records from all opened counters"),
bea03405 744 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 745 "system-wide collection from all CPUs"),
bea03405 746 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 747 "list of cpus to monitor"),
d20deb64 748 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 749 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 750 "output file name"),
69e7e5b0
AH
751 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
752 &record.opts.no_inherit_set,
753 "child tasks do not inherit counters"),
d20deb64 754 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
755 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
756 "number of mmap data pages",
757 perf_evlist__parse_mmap_pages),
d20deb64 758 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 759 "put the counters into a counter group"),
09b0fd45
JO
760 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
761 NULL, "enables call-graph recording" ,
762 &record_callchain_opt),
763 OPT_CALLBACK(0, "call-graph", &record.opts,
764 "mode[,dump_size]", record_callchain_help,
765 &record_parse_callchain_opt),
c0555642 766 OPT_INCR('v', "verbose", &verbose,
3da297a6 767 "be more verbose (show counter open errors, etc)"),
b44308f5 768 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 769 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 770 "per thread counts"),
d20deb64 771 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 772 "Sample addresses"),
d20deb64 773 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 774 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 775 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 776 "don't sample"),
d20deb64 777 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 778 "do not update the buildid cache"),
d20deb64 779 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 780 "do not collect buildids in perf.data"),
d20deb64 781 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
782 "monitor event in cgroup name only",
783 parse_cgroups),
a6205a35 784 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 785 "ms to wait before starting measurement after program start"),
bea03405
NK
786 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
787 "user to profile"),
a5aabdac
SE
788
789 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
790 "branch any", "sample any taken branches",
791 parse_branch_stack),
792
793 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
794 "branch filter mask", "branch stack filter modes",
bdfebd84 795 parse_branch_stack),
05484298
AK
796 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
797 "sample by weight (on special events only)"),
475eeab9
AK
798 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
799 "sample transaction flags (special events only)"),
3aa5939d
AH
800 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
801 "use per-thread mmaps"),
0e9b20b8
IM
802 OPT_END()
803};
804
1d037ca1 805int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 806{
69aad6f1 807 int err = -ENOMEM;
8c6f45a7 808 struct record *rec = &record;
16ad2ffb 809 char errbuf[BUFSIZ];
0e9b20b8 810
3e2be2da
ACM
811 rec->evlist = perf_evlist__new();
812 if (rec->evlist == NULL)
361c99a6
ACM
813 return -ENOMEM;
814
eb853e80
JO
815 perf_config(perf_record_config, rec);
816
bca647aa 817 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 818 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 819 if (!argc && target__none(&rec->opts.target))
bca647aa 820 usage_with_options(record_usage, record_options);
0e9b20b8 821
bea03405 822 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
823 ui__error("cgroup monitoring only available in"
824 " system-wide mode\n");
023695d9
SE
825 usage_with_options(record_usage, record_options);
826 }
827
0a7e6d1b 828 symbol__init(NULL);
baa2f6ce 829
ec80fde7 830 if (symbol_conf.kptr_restrict)
646aaea6
ACM
831 pr_warning(
832"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
833"check /proc/sys/kernel/kptr_restrict.\n\n"
834"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
835"file is not found in the buildid cache or in the vmlinux path.\n\n"
836"Samples in kernel modules won't be resolved at all.\n\n"
837"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
838"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 839
d20deb64 840 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 841 disable_buildid_cache();
655000e7 842
3e2be2da
ACM
843 if (rec->evlist->nr_entries == 0 &&
844 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
845 pr_err("Not enough memory for event selector list\n");
846 goto out_symbol_exit;
bbd36e5e 847 }
0e9b20b8 848
69e7e5b0
AH
849 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
850 rec->opts.no_inherit = true;
851
602ad878 852 err = target__validate(&rec->opts.target);
16ad2ffb 853 if (err) {
602ad878 854 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
855 ui__warning("%s", errbuf);
856 }
857
602ad878 858 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
859 if (err) {
860 int saved_errno = errno;
4bd0f2d2 861
602ad878 862 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 863 ui__error("%s", errbuf);
16ad2ffb
NK
864
865 err = -saved_errno;
8fa60e1f 866 goto out_symbol_exit;
16ad2ffb 867 }
0d37aa34 868
16ad2ffb 869 err = -ENOMEM;
3e2be2da 870 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 871 usage_with_options(record_usage, record_options);
69aad6f1 872
b4006796 873 if (record_opts__config(&rec->opts)) {
39d17dac 874 err = -EINVAL;
03ad9747 875 goto out_symbol_exit;
7e4ff9e3
MG
876 }
877
d20deb64 878 err = __cmd_record(&record, argc, argv);
d65a458b 879out_symbol_exit:
45604710 880 perf_evlist__delete(rec->evlist);
d65a458b 881 symbol__exit();
39d17dac 882 return err;
0e9b20b8 883}