perf tools: Introduce perf_callchain_config()
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
7c6a1c65 17#include "util/header.h"
66e274f3 18#include "util/event.h"
361c99a6 19#include "util/evlist.h"
69aad6f1 20#include "util/evsel.h"
8f28827a 21#include "util/debug.h"
94c744b6 22#include "util/session.h"
45694aa7 23#include "util/tool.h"
8d06367f 24#include "util/symbol.h"
a12b51c4 25#include "util/cpumap.h"
fd78260b 26#include "util/thread_map.h"
f5fc1412 27#include "util/data.h"
7c6a1c65 28
97124d5e 29#include <unistd.h>
de9ac07b 30#include <sched.h>
a41794cd 31#include <sys/mman.h>
de9ac07b 32
78da39fa 33
8c6f45a7 34struct record {
45694aa7 35 struct perf_tool tool;
b4006796 36 struct record_opts opts;
d20deb64 37 u64 bytes_written;
f5fc1412 38 struct perf_data_file file;
d20deb64
ACM
39 struct perf_evlist *evlist;
40 struct perf_session *session;
41 const char *progname;
d20deb64 42 int realtime_prio;
d20deb64
ACM
43 bool no_buildid;
44 bool no_buildid_cache;
d20deb64 45 long samples;
0f82ebc4 46};
a21ca2ca 47
8c6f45a7 48static int record__write(struct record *rec, void *bf, size_t size)
f5970550 49{
cf8b2e69 50 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
51 pr_err("failed to write perf data, error: %m\n");
52 return -1;
f5970550 53 }
8d3eca20 54
cf8b2e69 55 rec->bytes_written += size;
8d3eca20 56 return 0;
f5970550
PZ
57}
58
45694aa7 59static int process_synthesized_event(struct perf_tool *tool,
d20deb64 60 union perf_event *event,
1d037ca1
IT
61 struct perf_sample *sample __maybe_unused,
62 struct machine *machine __maybe_unused)
234fbbf5 63{
8c6f45a7
ACM
64 struct record *rec = container_of(tool, struct record, tool);
65 return record__write(rec, event, event->header.size);
234fbbf5
ACM
66}
67
e5685730 68static int record__mmap_read(struct record *rec, int idx)
de9ac07b 69{
e5685730 70 struct perf_mmap *md = &rec->evlist->mmap[idx];
744bd8aa 71 unsigned int head = perf_mmap__read_head(md);
de9ac07b 72 unsigned int old = md->prev;
918512b4 73 unsigned char *data = md->base + page_size;
de9ac07b
PZ
74 unsigned long size;
75 void *buf;
8d3eca20 76 int rc = 0;
de9ac07b 77
dc82009a 78 if (old == head)
8d3eca20 79 return 0;
dc82009a 80
d20deb64 81 rec->samples++;
de9ac07b
PZ
82
83 size = head - old;
84
85 if ((old & md->mask) + size != (head & md->mask)) {
86 buf = &data[old & md->mask];
87 size = md->mask + 1 - (old & md->mask);
88 old += size;
021e9f47 89
8c6f45a7 90 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
91 rc = -1;
92 goto out;
93 }
de9ac07b
PZ
94 }
95
96 buf = &data[old & md->mask];
97 size = head - old;
98 old += size;
021e9f47 99
8c6f45a7 100 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
101 rc = -1;
102 goto out;
103 }
de9ac07b
PZ
104
105 md->prev = old;
e5685730 106 perf_evlist__mmap_consume(rec->evlist, idx);
8d3eca20
DA
107out:
108 return rc;
de9ac07b
PZ
109}
110
111static volatile int done = 0;
f7b7c26e 112static volatile int signr = -1;
33e49ea7 113static volatile int child_finished = 0;
de9ac07b 114
16c8a109 115static void sig_handler(int sig)
de9ac07b 116{
33e49ea7
AK
117 if (sig == SIGCHLD)
118 child_finished = 1;
45604710
NK
119 else
120 signr = sig;
33e49ea7 121
16c8a109 122 done = 1;
f7b7c26e
PZ
123}
124
45604710 125static void record__sig_exit(void)
f7b7c26e 126{
45604710 127 if (signr == -1)
f7b7c26e
PZ
128 return;
129
130 signal(signr, SIG_DFL);
45604710 131 raise(signr);
de9ac07b
PZ
132}
133
8c6f45a7 134static int record__open(struct record *rec)
dd7927f4 135{
56e52e85 136 char msg[512];
6a4bb04c 137 struct perf_evsel *pos;
d20deb64
ACM
138 struct perf_evlist *evlist = rec->evlist;
139 struct perf_session *session = rec->session;
b4006796 140 struct record_opts *opts = &rec->opts;
8d3eca20 141 int rc = 0;
dd7927f4 142
f77a9518 143 perf_evlist__config(evlist, opts);
cac21425 144
0050f7aa 145 evlist__for_each(evlist, pos) {
dd7927f4 146try_again:
6a4bb04c 147 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 148 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 149 if (verbose)
c0a54341 150 ui__warning("%s\n", msg);
d6d901c2
ZY
151 goto try_again;
152 }
ca6a4258 153
56e52e85
ACM
154 rc = -errno;
155 perf_evsel__open_strerror(pos, &opts->target,
156 errno, msg, sizeof(msg));
157 ui__error("%s\n", msg);
8d3eca20 158 goto out;
c171b552
LZ
159 }
160 }
a43d3f08 161
1491a632 162 if (perf_evlist__apply_filters(evlist)) {
0a102479 163 error("failed to set filter with %d (%s)\n", errno,
35550da3 164 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
165 rc = -1;
166 goto out;
0a102479
FW
167 }
168
18e60939 169 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
170 if (errno == EPERM) {
171 pr_err("Permission error mapping pages.\n"
172 "Consider increasing "
173 "/proc/sys/kernel/perf_event_mlock_kb,\n"
174 "or try again with a smaller value of -m/--mmap_pages.\n"
53653d70 175 "(current value: %u)\n", opts->mmap_pages);
8d3eca20 176 rc = -errno;
8d3eca20 177 } else {
35550da3
MH
178 pr_err("failed to mmap with %d (%s)\n", errno,
179 strerror_r(errno, msg, sizeof(msg)));
8d3eca20
DA
180 rc = -errno;
181 }
182 goto out;
18e60939 183 }
0a27d7f9 184
563aecb2 185 session->evlist = evlist;
7b56cce2 186 perf_session__set_id_hdr_size(session);
8d3eca20
DA
187out:
188 return rc;
16c8a109
PZ
189}
190
8c6f45a7 191static int process_buildids(struct record *rec)
6122e4e4 192{
f5fc1412
JO
193 struct perf_data_file *file = &rec->file;
194 struct perf_session *session = rec->session;
7ab75cff 195 u64 start = session->header.data_offset;
6122e4e4 196
f5fc1412 197 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
198 if (size == 0)
199 return 0;
200
7ab75cff
DA
201 return __perf_session__process_events(session, start,
202 size - start,
6122e4e4
ACM
203 size, &build_id__mark_dso_hit_ops);
204}
205
8115d60c 206static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
207{
208 int err;
45694aa7 209 struct perf_tool *tool = data;
a1645ce1
ZY
210 /*
211 *As for guest kernel when processing subcommand record&report,
212 *we arrange module mmap prior to guest kernel mmap and trigger
213 *a preload dso because default guest module symbols are loaded
214 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
215 *method is used to avoid symbol missing when the first addr is
216 *in module instead of in guest kernel.
217 */
45694aa7 218 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 219 machine);
a1645ce1
ZY
220 if (err < 0)
221 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 222 " relocation symbol.\n", machine->pid);
a1645ce1 223
a1645ce1
ZY
224 /*
225 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
226 * have no _text sometimes.
227 */
45694aa7 228 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 229 machine);
a1645ce1
ZY
230 if (err < 0)
231 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 232 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
233}
234
98402807
FW
235static struct perf_event_header finished_round_event = {
236 .size = sizeof(struct perf_event_header),
237 .type = PERF_RECORD_FINISHED_ROUND,
238};
239
8c6f45a7 240static int record__mmap_read_all(struct record *rec)
98402807 241{
dcabb507 242 u64 bytes_written = rec->bytes_written;
0e2e63dd 243 int i;
8d3eca20 244 int rc = 0;
98402807 245
d20deb64 246 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20 247 if (rec->evlist->mmap[i].base) {
e5685730 248 if (record__mmap_read(rec, i) != 0) {
8d3eca20
DA
249 rc = -1;
250 goto out;
251 }
252 }
98402807
FW
253 }
254
dcabb507
JO
255 /*
256 * Mark the round finished in case we wrote
257 * at least one event.
258 */
259 if (bytes_written != rec->bytes_written)
260 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
261
262out:
263 return rc;
98402807
FW
264}
265
8c6f45a7 266static void record__init_features(struct record *rec)
57706abc 267{
57706abc
DA
268 struct perf_session *session = rec->session;
269 int feat;
270
271 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
272 perf_header__set_feat(&session->header, feat);
273
274 if (rec->no_buildid)
275 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
276
3e2be2da 277 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
278 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
279
280 if (!rec->opts.branch_stack)
281 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
282}
283
f33cbe72
ACM
284static volatile int workload_exec_errno;
285
286/*
287 * perf_evlist__prepare_workload will send a SIGUSR1
288 * if the fork fails, since we asked by setting its
289 * want_signal to true.
290 */
45604710
NK
291static void workload_exec_failed_signal(int signo __maybe_unused,
292 siginfo_t *info,
f33cbe72
ACM
293 void *ucontext __maybe_unused)
294{
295 workload_exec_errno = info->si_value.sival_int;
296 done = 1;
f33cbe72
ACM
297 child_finished = 1;
298}
299
8c6f45a7 300static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 301{
57706abc 302 int err;
45604710 303 int status = 0;
8b412664 304 unsigned long waking = 0;
46be604b 305 const bool forks = argc > 0;
23346f21 306 struct machine *machine;
45694aa7 307 struct perf_tool *tool = &rec->tool;
b4006796 308 struct record_opts *opts = &rec->opts;
f5fc1412 309 struct perf_data_file *file = &rec->file;
d20deb64 310 struct perf_session *session;
6dcf45ef 311 bool disabled = false, draining = false;
de9ac07b 312
d20deb64 313 rec->progname = argv[0];
33e49ea7 314
45604710 315 atexit(record__sig_exit);
f5970550
PZ
316 signal(SIGCHLD, sig_handler);
317 signal(SIGINT, sig_handler);
804f7ac7 318 signal(SIGTERM, sig_handler);
f5970550 319
f5fc1412 320 session = perf_session__new(file, false, NULL);
94c744b6 321 if (session == NULL) {
ffa91880 322 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
323 return -1;
324 }
325
d20deb64
ACM
326 rec->session = session;
327
8c6f45a7 328 record__init_features(rec);
330aa675 329
d4db3f16 330 if (forks) {
3e2be2da 331 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 332 argv, file->is_pipe,
735f7e0b 333 workload_exec_failed_signal);
35b9d88e
ACM
334 if (err < 0) {
335 pr_err("Couldn't run the workload!\n");
45604710 336 status = err;
35b9d88e 337 goto out_delete_session;
856e9660 338 }
856e9660
PZ
339 }
340
8c6f45a7 341 if (record__open(rec) != 0) {
8d3eca20 342 err = -1;
45604710 343 goto out_child;
8d3eca20 344 }
de9ac07b 345
3e2be2da 346 if (!rec->evlist->nr_groups)
a8bb559b
NK
347 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
348
f5fc1412
JO
349 if (file->is_pipe) {
350 err = perf_header__write_pipe(file->fd);
529870e3 351 if (err < 0)
45604710 352 goto out_child;
563aecb2 353 } else {
3e2be2da 354 err = perf_session__write_header(session, rec->evlist,
f5fc1412 355 file->fd, false);
d5eed904 356 if (err < 0)
45604710 357 goto out_child;
56b03f3c
ACM
358 }
359
d3665498 360 if (!rec->no_buildid
e20960c0 361 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 362 pr_err("Couldn't generate buildids. "
e20960c0 363 "Use --no-buildid to profile anyway.\n");
8d3eca20 364 err = -1;
45604710 365 goto out_child;
e20960c0
RR
366 }
367
34ba5122 368 machine = &session->machines.host;
743eb868 369
f5fc1412 370 if (file->is_pipe) {
45694aa7 371 err = perf_event__synthesize_attrs(tool, session,
d20deb64 372 process_synthesized_event);
2c46dbb5
TZ
373 if (err < 0) {
374 pr_err("Couldn't synthesize attrs.\n");
45604710 375 goto out_child;
2c46dbb5 376 }
cd19a035 377
3e2be2da 378 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
379 /*
380 * FIXME err <= 0 here actually means that
381 * there were no tracepoints so its not really
382 * an error, just that we don't need to
383 * synthesize anything. We really have to
384 * return this more properly and also
385 * propagate errors that now are calling die()
386 */
3e2be2da 387 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
743eb868 388 process_synthesized_event);
63e0c771
TZ
389 if (err <= 0) {
390 pr_err("Couldn't record tracing data.\n");
45604710 391 goto out_child;
63e0c771 392 }
f34b9001 393 rec->bytes_written += err;
63e0c771 394 }
2c46dbb5
TZ
395 }
396
45694aa7 397 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 398 machine);
c1a3a4b9
ACM
399 if (err < 0)
400 pr_err("Couldn't record kernel reference relocation symbol\n"
401 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
402 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 403
45694aa7 404 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 405 machine);
c1a3a4b9
ACM
406 if (err < 0)
407 pr_err("Couldn't record kernel module information.\n"
408 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
409 "Check /proc/modules permission or run as root.\n");
410
7e383de4 411 if (perf_guest) {
876650e6
ACM
412 machines__process_guests(&session->machines,
413 perf_event__synthesize_guest_os, tool);
7e383de4 414 }
7c6a1c65 415
3e2be2da 416 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
a33fbd56 417 process_synthesized_event, opts->sample_address);
8d3eca20 418 if (err != 0)
45604710 419 goto out_child;
8d3eca20 420
d20deb64 421 if (rec->realtime_prio) {
de9ac07b
PZ
422 struct sched_param param;
423
d20deb64 424 param.sched_priority = rec->realtime_prio;
de9ac07b 425 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 426 pr_err("Could not set realtime priority.\n");
8d3eca20 427 err = -1;
45604710 428 goto out_child;
de9ac07b
PZ
429 }
430 }
431
774cb499
JO
432 /*
433 * When perf is starting the traced process, all the events
434 * (apart from group members) have enable_on_exec=1 set,
435 * so don't spoil it by prematurely enabling them.
436 */
6619a53e 437 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 438 perf_evlist__enable(rec->evlist);
764e16a3 439
856e9660
PZ
440 /*
441 * Let the child rip
442 */
735f7e0b 443 if (forks)
3e2be2da 444 perf_evlist__start_workload(rec->evlist);
856e9660 445
6619a53e
AK
446 if (opts->initial_delay) {
447 usleep(opts->initial_delay * 1000);
448 perf_evlist__enable(rec->evlist);
449 }
450
649c48a9 451 for (;;) {
d20deb64 452 int hits = rec->samples;
de9ac07b 453
8c6f45a7 454 if (record__mmap_read_all(rec) < 0) {
8d3eca20 455 err = -1;
45604710 456 goto out_child;
8d3eca20 457 }
de9ac07b 458
d20deb64 459 if (hits == rec->samples) {
6dcf45ef 460 if (done || draining)
649c48a9 461 break;
f66a889d 462 err = perf_evlist__poll(rec->evlist, -1);
a515114f
JO
463 /*
464 * Propagate error, only if there's any. Ignore positive
465 * number of returned events and interrupt error.
466 */
467 if (err > 0 || (err < 0 && errno == EINTR))
45604710 468 err = 0;
8b412664 469 waking++;
6dcf45ef
ACM
470
471 if (perf_evlist__filter_pollfd(rec->evlist, POLLERR | POLLHUP) == 0)
472 draining = true;
8b412664
PZ
473 }
474
774cb499
JO
475 /*
476 * When perf is starting the traced process, at the end events
477 * die with the process and we wait for that. Thus no need to
478 * disable events in this case.
479 */
602ad878 480 if (done && !disabled && !target__none(&opts->target)) {
3e2be2da 481 perf_evlist__disable(rec->evlist);
2711926a
JO
482 disabled = true;
483 }
de9ac07b
PZ
484 }
485
f33cbe72 486 if (forks && workload_exec_errno) {
35550da3 487 char msg[STRERR_BUFSIZE];
f33cbe72
ACM
488 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
489 pr_err("Workload failed: %s\n", emsg);
490 err = -1;
45604710 491 goto out_child;
f33cbe72
ACM
492 }
493
45604710
NK
494 if (!quiet) {
495 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 496
45604710
NK
497 /*
498 * Approximate RIP event size: 24 bytes.
499 */
500 fprintf(stderr,
501 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
502 (double)rec->bytes_written / 1024.0 / 1024.0,
503 file->path,
504 rec->bytes_written / 24);
505 }
8b412664 506
45604710
NK
507out_child:
508 if (forks) {
509 int exit_status;
addc2785 510
45604710
NK
511 if (!child_finished)
512 kill(rec->evlist->workload.pid, SIGTERM);
513
514 wait(&exit_status);
515
516 if (err < 0)
517 status = err;
518 else if (WIFEXITED(exit_status))
519 status = WEXITSTATUS(exit_status);
520 else if (WIFSIGNALED(exit_status))
521 signr = WTERMSIG(exit_status);
522 } else
523 status = err;
524
525 if (!err && !file->is_pipe) {
526 rec->session->header.data_size += rec->bytes_written;
527
528 if (!rec->no_buildid)
529 process_buildids(rec);
530 perf_session__write_header(rec->session, rec->evlist,
531 file->fd, true);
532 }
39d17dac
ACM
533
534out_delete_session:
535 perf_session__delete(session);
45604710 536 return status;
de9ac07b 537}
0e9b20b8 538
bdfebd84
RAV
539#define BRANCH_OPT(n, m) \
540 { .name = n, .mode = (m) }
541
542#define BRANCH_END { .name = NULL }
543
544struct branch_mode {
545 const char *name;
546 int mode;
547};
548
549static const struct branch_mode branch_modes[] = {
550 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
551 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
552 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
553 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
554 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
555 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
556 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
557 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
558 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
559 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
0fffa5df 560 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
bdfebd84
RAV
561 BRANCH_END
562};
563
564static int
a5aabdac 565parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
566{
567#define ONLY_PLM \
568 (PERF_SAMPLE_BRANCH_USER |\
569 PERF_SAMPLE_BRANCH_KERNEL |\
570 PERF_SAMPLE_BRANCH_HV)
571
572 uint64_t *mode = (uint64_t *)opt->value;
573 const struct branch_mode *br;
a5aabdac 574 char *s, *os = NULL, *p;
bdfebd84
RAV
575 int ret = -1;
576
a5aabdac
SE
577 if (unset)
578 return 0;
bdfebd84 579
a5aabdac
SE
580 /*
581 * cannot set it twice, -b + --branch-filter for instance
582 */
583 if (*mode)
bdfebd84
RAV
584 return -1;
585
a5aabdac
SE
586 /* str may be NULL in case no arg is passed to -b */
587 if (str) {
588 /* because str is read-only */
589 s = os = strdup(str);
590 if (!s)
591 return -1;
592
593 for (;;) {
594 p = strchr(s, ',');
595 if (p)
596 *p = '\0';
597
598 for (br = branch_modes; br->name; br++) {
599 if (!strcasecmp(s, br->name))
600 break;
601 }
602 if (!br->name) {
603 ui__warning("unknown branch filter %s,"
604 " check man page\n", s);
605 goto error;
606 }
bdfebd84 607
a5aabdac 608 *mode |= br->mode;
bdfebd84 609
a5aabdac
SE
610 if (!p)
611 break;
bdfebd84 612
a5aabdac
SE
613 s = p + 1;
614 }
bdfebd84
RAV
615 }
616 ret = 0;
617
a5aabdac 618 /* default to any branch */
bdfebd84 619 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 620 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
621 }
622error:
623 free(os);
624 return ret;
625}
626
72a128aa 627static void callchain_debug(void)
09b0fd45 628{
a601fdff
JO
629 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
630
72a128aa 631 pr_debug("callchain: type %s\n", str[callchain_param.record_mode]);
26d33022 632
72a128aa 633 if (callchain_param.record_mode == CALLCHAIN_DWARF)
09b0fd45 634 pr_debug("callchain: stack dump size %d\n",
72a128aa 635 callchain_param.dump_size);
09b0fd45
JO
636}
637
72a128aa 638int record_parse_callchain_opt(const struct option *opt __maybe_unused,
09b0fd45
JO
639 const char *arg,
640 int unset)
641{
09b0fd45
JO
642 int ret;
643
72a128aa 644 callchain_param.enabled = !unset;
eb853e80 645
09b0fd45
JO
646 /* --no-call-graph */
647 if (unset) {
72a128aa 648 callchain_param.record_mode = CALLCHAIN_NONE;
09b0fd45
JO
649 pr_debug("callchain: disabled\n");
650 return 0;
651 }
652
f7f084f4 653 ret = parse_callchain_record_opt(arg);
26d33022 654 if (!ret)
72a128aa 655 callchain_debug();
26d33022
JO
656
657 return ret;
658}
659
72a128aa 660int record_callchain_opt(const struct option *opt __maybe_unused,
09b0fd45
JO
661 const char *arg __maybe_unused,
662 int unset __maybe_unused)
663{
72a128aa 664 callchain_param.enabled = true;
09b0fd45 665
72a128aa
NK
666 if (callchain_param.record_mode == CALLCHAIN_NONE)
667 callchain_param.record_mode = CALLCHAIN_FP;
eb853e80 668
72a128aa 669 callchain_debug();
09b0fd45
JO
670 return 0;
671}
672
eb853e80
JO
673static int perf_record_config(const char *var, const char *value, void *cb)
674{
eb853e80 675 if (!strcmp(var, "record.call-graph"))
f7f084f4 676 return parse_callchain_record_opt(value);
eb853e80
JO
677
678 return perf_default_config(var, value, cb);
679}
680
0e9b20b8 681static const char * const record_usage[] = {
9e096753
MG
682 "perf record [<options>] [<command>]",
683 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
684 NULL
685};
686
d20deb64 687/*
8c6f45a7
ACM
688 * XXX Ideally would be local to cmd_record() and passed to a record__new
689 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
690 * after cmd_record() exits, but since record_options need to be accessible to
691 * builtin-script, leave it here.
692 *
693 * At least we don't ouch it in all the other functions here directly.
694 *
695 * Just say no to tons of global variables, sigh.
696 */
8c6f45a7 697static struct record record = {
d20deb64 698 .opts = {
8affc2b8 699 .sample_time = true,
d20deb64
ACM
700 .mmap_pages = UINT_MAX,
701 .user_freq = UINT_MAX,
702 .user_interval = ULLONG_MAX,
447a6013 703 .freq = 4000,
d1cb9fce
NK
704 .target = {
705 .uses_mmap = true,
3aa5939d 706 .default_per_cpu = true,
d1cb9fce 707 },
d20deb64 708 },
d20deb64 709};
7865e817 710
09b0fd45 711#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 712
9ff125d1 713#ifdef HAVE_DWARF_UNWIND_SUPPORT
09b0fd45 714const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 715#else
09b0fd45 716const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
717#endif
718
d20deb64
ACM
719/*
720 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
721 * with it and switch to use the library functions in perf_evlist that came
b4006796 722 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
723 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
724 * using pipes, etc.
725 */
bca647aa 726const struct option record_options[] = {
d20deb64 727 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 728 "event selector. use 'perf list' to list available events",
f120f9d5 729 parse_events_option),
d20deb64 730 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 731 "event filter", parse_filter),
bea03405 732 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 733 "record events on existing process id"),
bea03405 734 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 735 "record events on existing thread id"),
d20deb64 736 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 737 "collect data with this RT SCHED_FIFO priority"),
509051ea 738 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 739 "collect data without buffering"),
d20deb64 740 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 741 "collect raw sample records from all opened counters"),
bea03405 742 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 743 "system-wide collection from all CPUs"),
bea03405 744 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 745 "list of cpus to monitor"),
d20deb64 746 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 747 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 748 "output file name"),
69e7e5b0
AH
749 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
750 &record.opts.no_inherit_set,
751 "child tasks do not inherit counters"),
d20deb64 752 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
753 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
754 "number of mmap data pages",
755 perf_evlist__parse_mmap_pages),
d20deb64 756 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 757 "put the counters into a counter group"),
09b0fd45
JO
758 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
759 NULL, "enables call-graph recording" ,
760 &record_callchain_opt),
761 OPT_CALLBACK(0, "call-graph", &record.opts,
762 "mode[,dump_size]", record_callchain_help,
763 &record_parse_callchain_opt),
c0555642 764 OPT_INCR('v', "verbose", &verbose,
3da297a6 765 "be more verbose (show counter open errors, etc)"),
b44308f5 766 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 767 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 768 "per thread counts"),
d20deb64 769 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 770 "Sample addresses"),
d20deb64 771 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 772 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 773 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 774 "don't sample"),
d20deb64 775 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 776 "do not update the buildid cache"),
d20deb64 777 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 778 "do not collect buildids in perf.data"),
d20deb64 779 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
780 "monitor event in cgroup name only",
781 parse_cgroups),
a6205a35 782 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 783 "ms to wait before starting measurement after program start"),
bea03405
NK
784 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
785 "user to profile"),
a5aabdac
SE
786
787 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
788 "branch any", "sample any taken branches",
789 parse_branch_stack),
790
791 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
792 "branch filter mask", "branch stack filter modes",
bdfebd84 793 parse_branch_stack),
05484298
AK
794 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
795 "sample by weight (on special events only)"),
475eeab9
AK
796 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
797 "sample transaction flags (special events only)"),
3aa5939d
AH
798 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
799 "use per-thread mmaps"),
0e9b20b8
IM
800 OPT_END()
801};
802
1d037ca1 803int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 804{
69aad6f1 805 int err = -ENOMEM;
8c6f45a7 806 struct record *rec = &record;
16ad2ffb 807 char errbuf[BUFSIZ];
0e9b20b8 808
3e2be2da
ACM
809 rec->evlist = perf_evlist__new();
810 if (rec->evlist == NULL)
361c99a6
ACM
811 return -ENOMEM;
812
eb853e80
JO
813 perf_config(perf_record_config, rec);
814
bca647aa 815 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 816 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 817 if (!argc && target__none(&rec->opts.target))
bca647aa 818 usage_with_options(record_usage, record_options);
0e9b20b8 819
bea03405 820 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
821 ui__error("cgroup monitoring only available in"
822 " system-wide mode\n");
023695d9
SE
823 usage_with_options(record_usage, record_options);
824 }
825
0a7e6d1b 826 symbol__init(NULL);
baa2f6ce 827
ec80fde7 828 if (symbol_conf.kptr_restrict)
646aaea6
ACM
829 pr_warning(
830"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
831"check /proc/sys/kernel/kptr_restrict.\n\n"
832"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
833"file is not found in the buildid cache or in the vmlinux path.\n\n"
834"Samples in kernel modules won't be resolved at all.\n\n"
835"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
836"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 837
d20deb64 838 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 839 disable_buildid_cache();
655000e7 840
3e2be2da
ACM
841 if (rec->evlist->nr_entries == 0 &&
842 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
843 pr_err("Not enough memory for event selector list\n");
844 goto out_symbol_exit;
bbd36e5e 845 }
0e9b20b8 846
69e7e5b0
AH
847 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
848 rec->opts.no_inherit = true;
849
602ad878 850 err = target__validate(&rec->opts.target);
16ad2ffb 851 if (err) {
602ad878 852 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
853 ui__warning("%s", errbuf);
854 }
855
602ad878 856 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
857 if (err) {
858 int saved_errno = errno;
4bd0f2d2 859
602ad878 860 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 861 ui__error("%s", errbuf);
16ad2ffb
NK
862
863 err = -saved_errno;
8fa60e1f 864 goto out_symbol_exit;
16ad2ffb 865 }
0d37aa34 866
16ad2ffb 867 err = -ENOMEM;
3e2be2da 868 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 869 usage_with_options(record_usage, record_options);
69aad6f1 870
b4006796 871 if (record_opts__config(&rec->opts)) {
39d17dac 872 err = -EINVAL;
03ad9747 873 goto out_symbol_exit;
7e4ff9e3
MG
874 }
875
d20deb64 876 err = __cmd_record(&record, argc, argv);
d65a458b 877out_symbol_exit:
45604710 878 perf_evlist__delete(rec->evlist);
d65a458b 879 symbol__exit();
39d17dac 880 return err;
0e9b20b8 881}