perf trace: Move call to symbol__init() after creating session
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
7c6a1c65 17#include "util/header.h"
66e274f3 18#include "util/event.h"
361c99a6 19#include "util/evlist.h"
69aad6f1 20#include "util/evsel.h"
8f28827a 21#include "util/debug.h"
94c744b6 22#include "util/session.h"
45694aa7 23#include "util/tool.h"
8d06367f 24#include "util/symbol.h"
a12b51c4 25#include "util/cpumap.h"
fd78260b 26#include "util/thread_map.h"
f5fc1412 27#include "util/data.h"
7c6a1c65 28
97124d5e 29#include <unistd.h>
de9ac07b 30#include <sched.h>
a41794cd 31#include <sys/mman.h>
de9ac07b 32
78da39fa 33
8c6f45a7 34struct record {
45694aa7 35 struct perf_tool tool;
b4006796 36 struct record_opts opts;
d20deb64 37 u64 bytes_written;
f5fc1412 38 struct perf_data_file file;
d20deb64
ACM
39 struct perf_evlist *evlist;
40 struct perf_session *session;
41 const char *progname;
d20deb64 42 int realtime_prio;
d20deb64
ACM
43 bool no_buildid;
44 bool no_buildid_cache;
d20deb64 45 long samples;
0f82ebc4 46};
a21ca2ca 47
8c6f45a7 48static int record__write(struct record *rec, void *bf, size_t size)
f5970550 49{
cf8b2e69 50 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
51 pr_err("failed to write perf data, error: %m\n");
52 return -1;
f5970550 53 }
8d3eca20 54
cf8b2e69 55 rec->bytes_written += size;
8d3eca20 56 return 0;
f5970550
PZ
57}
58
45694aa7 59static int process_synthesized_event(struct perf_tool *tool,
d20deb64 60 union perf_event *event,
1d037ca1
IT
61 struct perf_sample *sample __maybe_unused,
62 struct machine *machine __maybe_unused)
234fbbf5 63{
8c6f45a7
ACM
64 struct record *rec = container_of(tool, struct record, tool);
65 return record__write(rec, event, event->header.size);
234fbbf5
ACM
66}
67
8c6f45a7 68static int record__mmap_read(struct record *rec, struct perf_mmap *md)
de9ac07b 69{
744bd8aa 70 unsigned int head = perf_mmap__read_head(md);
de9ac07b 71 unsigned int old = md->prev;
918512b4 72 unsigned char *data = md->base + page_size;
de9ac07b
PZ
73 unsigned long size;
74 void *buf;
8d3eca20 75 int rc = 0;
de9ac07b 76
dc82009a 77 if (old == head)
8d3eca20 78 return 0;
dc82009a 79
d20deb64 80 rec->samples++;
de9ac07b
PZ
81
82 size = head - old;
83
84 if ((old & md->mask) + size != (head & md->mask)) {
85 buf = &data[old & md->mask];
86 size = md->mask + 1 - (old & md->mask);
87 old += size;
021e9f47 88
8c6f45a7 89 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
90 rc = -1;
91 goto out;
92 }
de9ac07b
PZ
93 }
94
95 buf = &data[old & md->mask];
96 size = head - old;
97 old += size;
021e9f47 98
8c6f45a7 99 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
100 rc = -1;
101 goto out;
102 }
de9ac07b
PZ
103
104 md->prev = old;
115d2d89 105 perf_mmap__write_tail(md, old);
8d3eca20
DA
106
107out:
108 return rc;
de9ac07b
PZ
109}
110
111static volatile int done = 0;
f7b7c26e 112static volatile int signr = -1;
33e49ea7 113static volatile int child_finished = 0;
de9ac07b 114
16c8a109 115static void sig_handler(int sig)
de9ac07b 116{
33e49ea7
AK
117 if (sig == SIGCHLD)
118 child_finished = 1;
45604710
NK
119 else
120 signr = sig;
33e49ea7 121
16c8a109 122 done = 1;
f7b7c26e
PZ
123}
124
45604710 125static void record__sig_exit(void)
f7b7c26e 126{
45604710 127 if (signr == -1)
f7b7c26e
PZ
128 return;
129
130 signal(signr, SIG_DFL);
45604710 131 raise(signr);
de9ac07b
PZ
132}
133
8c6f45a7 134static int record__open(struct record *rec)
dd7927f4 135{
56e52e85 136 char msg[512];
6a4bb04c 137 struct perf_evsel *pos;
d20deb64
ACM
138 struct perf_evlist *evlist = rec->evlist;
139 struct perf_session *session = rec->session;
b4006796 140 struct record_opts *opts = &rec->opts;
8d3eca20 141 int rc = 0;
dd7927f4 142
f77a9518 143 perf_evlist__config(evlist, opts);
cac21425 144
0050f7aa 145 evlist__for_each(evlist, pos) {
dd7927f4 146try_again:
6a4bb04c 147 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 148 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 149 if (verbose)
c0a54341 150 ui__warning("%s\n", msg);
d6d901c2
ZY
151 goto try_again;
152 }
ca6a4258 153
56e52e85
ACM
154 rc = -errno;
155 perf_evsel__open_strerror(pos, &opts->target,
156 errno, msg, sizeof(msg));
157 ui__error("%s\n", msg);
8d3eca20 158 goto out;
c171b552
LZ
159 }
160 }
a43d3f08 161
1491a632 162 if (perf_evlist__apply_filters(evlist)) {
0a102479
FW
163 error("failed to set filter with %d (%s)\n", errno,
164 strerror(errno));
8d3eca20
DA
165 rc = -1;
166 goto out;
0a102479
FW
167 }
168
18e60939 169 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
170 if (errno == EPERM) {
171 pr_err("Permission error mapping pages.\n"
172 "Consider increasing "
173 "/proc/sys/kernel/perf_event_mlock_kb,\n"
174 "or try again with a smaller value of -m/--mmap_pages.\n"
53653d70 175 "(current value: %u)\n", opts->mmap_pages);
8d3eca20 176 rc = -errno;
8d3eca20
DA
177 } else {
178 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
179 rc = -errno;
180 }
181 goto out;
18e60939 182 }
0a27d7f9 183
563aecb2 184 session->evlist = evlist;
7b56cce2 185 perf_session__set_id_hdr_size(session);
8d3eca20
DA
186out:
187 return rc;
16c8a109
PZ
188}
189
8c6f45a7 190static int process_buildids(struct record *rec)
6122e4e4 191{
f5fc1412
JO
192 struct perf_data_file *file = &rec->file;
193 struct perf_session *session = rec->session;
7ab75cff 194 u64 start = session->header.data_offset;
6122e4e4 195
f5fc1412 196 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
197 if (size == 0)
198 return 0;
199
7ab75cff
DA
200 return __perf_session__process_events(session, start,
201 size - start,
6122e4e4
ACM
202 size, &build_id__mark_dso_hit_ops);
203}
204
8115d60c 205static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
206{
207 int err;
45694aa7 208 struct perf_tool *tool = data;
a1645ce1
ZY
209 /*
210 *As for guest kernel when processing subcommand record&report,
211 *we arrange module mmap prior to guest kernel mmap and trigger
212 *a preload dso because default guest module symbols are loaded
213 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
214 *method is used to avoid symbol missing when the first addr is
215 *in module instead of in guest kernel.
216 */
45694aa7 217 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 218 machine);
a1645ce1
ZY
219 if (err < 0)
220 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 221 " relocation symbol.\n", machine->pid);
a1645ce1 222
a1645ce1
ZY
223 /*
224 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
225 * have no _text sometimes.
226 */
45694aa7 227 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 228 machine);
a1645ce1
ZY
229 if (err < 0)
230 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 231 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
232}
233
98402807
FW
234static struct perf_event_header finished_round_event = {
235 .size = sizeof(struct perf_event_header),
236 .type = PERF_RECORD_FINISHED_ROUND,
237};
238
8c6f45a7 239static int record__mmap_read_all(struct record *rec)
98402807 240{
dcabb507 241 u64 bytes_written = rec->bytes_written;
0e2e63dd 242 int i;
8d3eca20 243 int rc = 0;
98402807 244
d20deb64 245 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20 246 if (rec->evlist->mmap[i].base) {
8c6f45a7 247 if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
8d3eca20
DA
248 rc = -1;
249 goto out;
250 }
251 }
98402807
FW
252 }
253
dcabb507
JO
254 /*
255 * Mark the round finished in case we wrote
256 * at least one event.
257 */
258 if (bytes_written != rec->bytes_written)
259 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
260
261out:
262 return rc;
98402807
FW
263}
264
8c6f45a7 265static void record__init_features(struct record *rec)
57706abc 266{
57706abc
DA
267 struct perf_session *session = rec->session;
268 int feat;
269
270 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
271 perf_header__set_feat(&session->header, feat);
272
273 if (rec->no_buildid)
274 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
275
3e2be2da 276 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
277 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
278
279 if (!rec->opts.branch_stack)
280 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
281}
282
f33cbe72
ACM
283static volatile int workload_exec_errno;
284
285/*
286 * perf_evlist__prepare_workload will send a SIGUSR1
287 * if the fork fails, since we asked by setting its
288 * want_signal to true.
289 */
45604710
NK
290static void workload_exec_failed_signal(int signo __maybe_unused,
291 siginfo_t *info,
f33cbe72
ACM
292 void *ucontext __maybe_unused)
293{
294 workload_exec_errno = info->si_value.sival_int;
295 done = 1;
f33cbe72
ACM
296 child_finished = 1;
297}
298
8c6f45a7 299static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 300{
57706abc 301 int err;
45604710 302 int status = 0;
8b412664 303 unsigned long waking = 0;
46be604b 304 const bool forks = argc > 0;
23346f21 305 struct machine *machine;
45694aa7 306 struct perf_tool *tool = &rec->tool;
b4006796 307 struct record_opts *opts = &rec->opts;
f5fc1412 308 struct perf_data_file *file = &rec->file;
d20deb64 309 struct perf_session *session;
2711926a 310 bool disabled = false;
de9ac07b 311
d20deb64 312 rec->progname = argv[0];
33e49ea7 313
45604710 314 atexit(record__sig_exit);
f5970550
PZ
315 signal(SIGCHLD, sig_handler);
316 signal(SIGINT, sig_handler);
804f7ac7 317 signal(SIGTERM, sig_handler);
f5970550 318
f5fc1412 319 session = perf_session__new(file, false, NULL);
94c744b6 320 if (session == NULL) {
ffa91880 321 pr_err("Perf session creation failed.\n");
a9a70bbc
ACM
322 return -1;
323 }
324
d20deb64
ACM
325 rec->session = session;
326
8c6f45a7 327 record__init_features(rec);
330aa675 328
d4db3f16 329 if (forks) {
3e2be2da 330 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 331 argv, file->is_pipe,
735f7e0b 332 workload_exec_failed_signal);
35b9d88e
ACM
333 if (err < 0) {
334 pr_err("Couldn't run the workload!\n");
45604710 335 status = err;
35b9d88e 336 goto out_delete_session;
856e9660 337 }
856e9660
PZ
338 }
339
8c6f45a7 340 if (record__open(rec) != 0) {
8d3eca20 341 err = -1;
45604710 342 goto out_child;
8d3eca20 343 }
de9ac07b 344
3e2be2da 345 if (!rec->evlist->nr_groups)
a8bb559b
NK
346 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
347
f5fc1412
JO
348 if (file->is_pipe) {
349 err = perf_header__write_pipe(file->fd);
529870e3 350 if (err < 0)
45604710 351 goto out_child;
563aecb2 352 } else {
3e2be2da 353 err = perf_session__write_header(session, rec->evlist,
f5fc1412 354 file->fd, false);
d5eed904 355 if (err < 0)
45604710 356 goto out_child;
56b03f3c
ACM
357 }
358
d3665498 359 if (!rec->no_buildid
e20960c0 360 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 361 pr_err("Couldn't generate buildids. "
e20960c0 362 "Use --no-buildid to profile anyway.\n");
8d3eca20 363 err = -1;
45604710 364 goto out_child;
e20960c0
RR
365 }
366
34ba5122 367 machine = &session->machines.host;
743eb868 368
f5fc1412 369 if (file->is_pipe) {
45694aa7 370 err = perf_event__synthesize_attrs(tool, session,
d20deb64 371 process_synthesized_event);
2c46dbb5
TZ
372 if (err < 0) {
373 pr_err("Couldn't synthesize attrs.\n");
45604710 374 goto out_child;
2c46dbb5 375 }
cd19a035 376
3e2be2da 377 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
378 /*
379 * FIXME err <= 0 here actually means that
380 * there were no tracepoints so its not really
381 * an error, just that we don't need to
382 * synthesize anything. We really have to
383 * return this more properly and also
384 * propagate errors that now are calling die()
385 */
3e2be2da 386 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
743eb868 387 process_synthesized_event);
63e0c771
TZ
388 if (err <= 0) {
389 pr_err("Couldn't record tracing data.\n");
45604710 390 goto out_child;
63e0c771 391 }
f34b9001 392 rec->bytes_written += err;
63e0c771 393 }
2c46dbb5
TZ
394 }
395
45694aa7 396 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
0ae617be 397 machine);
c1a3a4b9
ACM
398 if (err < 0)
399 pr_err("Couldn't record kernel reference relocation symbol\n"
400 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
401 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 402
45694aa7 403 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 404 machine);
c1a3a4b9
ACM
405 if (err < 0)
406 pr_err("Couldn't record kernel module information.\n"
407 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
408 "Check /proc/modules permission or run as root.\n");
409
7e383de4 410 if (perf_guest) {
876650e6
ACM
411 machines__process_guests(&session->machines,
412 perf_event__synthesize_guest_os, tool);
7e383de4 413 }
7c6a1c65 414
3e2be2da 415 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
a33fbd56 416 process_synthesized_event, opts->sample_address);
8d3eca20 417 if (err != 0)
45604710 418 goto out_child;
8d3eca20 419
d20deb64 420 if (rec->realtime_prio) {
de9ac07b
PZ
421 struct sched_param param;
422
d20deb64 423 param.sched_priority = rec->realtime_prio;
de9ac07b 424 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 425 pr_err("Could not set realtime priority.\n");
8d3eca20 426 err = -1;
45604710 427 goto out_child;
de9ac07b
PZ
428 }
429 }
430
774cb499
JO
431 /*
432 * When perf is starting the traced process, all the events
433 * (apart from group members) have enable_on_exec=1 set,
434 * so don't spoil it by prematurely enabling them.
435 */
6619a53e 436 if (!target__none(&opts->target) && !opts->initial_delay)
3e2be2da 437 perf_evlist__enable(rec->evlist);
764e16a3 438
856e9660
PZ
439 /*
440 * Let the child rip
441 */
735f7e0b 442 if (forks)
3e2be2da 443 perf_evlist__start_workload(rec->evlist);
856e9660 444
6619a53e
AK
445 if (opts->initial_delay) {
446 usleep(opts->initial_delay * 1000);
447 perf_evlist__enable(rec->evlist);
448 }
449
649c48a9 450 for (;;) {
d20deb64 451 int hits = rec->samples;
de9ac07b 452
8c6f45a7 453 if (record__mmap_read_all(rec) < 0) {
8d3eca20 454 err = -1;
45604710 455 goto out_child;
8d3eca20 456 }
de9ac07b 457
d20deb64 458 if (hits == rec->samples) {
649c48a9
PZ
459 if (done)
460 break;
3e2be2da 461 err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
a515114f
JO
462 /*
463 * Propagate error, only if there's any. Ignore positive
464 * number of returned events and interrupt error.
465 */
466 if (err > 0 || (err < 0 && errno == EINTR))
45604710 467 err = 0;
8b412664
PZ
468 waking++;
469 }
470
774cb499
JO
471 /*
472 * When perf is starting the traced process, at the end events
473 * die with the process and we wait for that. Thus no need to
474 * disable events in this case.
475 */
602ad878 476 if (done && !disabled && !target__none(&opts->target)) {
3e2be2da 477 perf_evlist__disable(rec->evlist);
2711926a
JO
478 disabled = true;
479 }
de9ac07b
PZ
480 }
481
f33cbe72
ACM
482 if (forks && workload_exec_errno) {
483 char msg[512];
484 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
485 pr_err("Workload failed: %s\n", emsg);
486 err = -1;
45604710 487 goto out_child;
f33cbe72
ACM
488 }
489
45604710
NK
490 if (!quiet) {
491 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
b44308f5 492
45604710
NK
493 /*
494 * Approximate RIP event size: 24 bytes.
495 */
496 fprintf(stderr,
497 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
498 (double)rec->bytes_written / 1024.0 / 1024.0,
499 file->path,
500 rec->bytes_written / 24);
501 }
8b412664 502
45604710
NK
503out_child:
504 if (forks) {
505 int exit_status;
addc2785 506
45604710
NK
507 if (!child_finished)
508 kill(rec->evlist->workload.pid, SIGTERM);
509
510 wait(&exit_status);
511
512 if (err < 0)
513 status = err;
514 else if (WIFEXITED(exit_status))
515 status = WEXITSTATUS(exit_status);
516 else if (WIFSIGNALED(exit_status))
517 signr = WTERMSIG(exit_status);
518 } else
519 status = err;
520
521 if (!err && !file->is_pipe) {
522 rec->session->header.data_size += rec->bytes_written;
523
524 if (!rec->no_buildid)
525 process_buildids(rec);
526 perf_session__write_header(rec->session, rec->evlist,
527 file->fd, true);
528 }
39d17dac
ACM
529
530out_delete_session:
531 perf_session__delete(session);
45604710 532 return status;
de9ac07b 533}
0e9b20b8 534
bdfebd84
RAV
535#define BRANCH_OPT(n, m) \
536 { .name = n, .mode = (m) }
537
538#define BRANCH_END { .name = NULL }
539
540struct branch_mode {
541 const char *name;
542 int mode;
543};
544
545static const struct branch_mode branch_modes[] = {
546 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
547 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
548 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
549 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
550 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
551 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
552 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
553 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
554 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
555 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
0fffa5df 556 BRANCH_OPT("cond", PERF_SAMPLE_BRANCH_COND),
bdfebd84
RAV
557 BRANCH_END
558};
559
560static int
a5aabdac 561parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
562{
563#define ONLY_PLM \
564 (PERF_SAMPLE_BRANCH_USER |\
565 PERF_SAMPLE_BRANCH_KERNEL |\
566 PERF_SAMPLE_BRANCH_HV)
567
568 uint64_t *mode = (uint64_t *)opt->value;
569 const struct branch_mode *br;
a5aabdac 570 char *s, *os = NULL, *p;
bdfebd84
RAV
571 int ret = -1;
572
a5aabdac
SE
573 if (unset)
574 return 0;
bdfebd84 575
a5aabdac
SE
576 /*
577 * cannot set it twice, -b + --branch-filter for instance
578 */
579 if (*mode)
bdfebd84
RAV
580 return -1;
581
a5aabdac
SE
582 /* str may be NULL in case no arg is passed to -b */
583 if (str) {
584 /* because str is read-only */
585 s = os = strdup(str);
586 if (!s)
587 return -1;
588
589 for (;;) {
590 p = strchr(s, ',');
591 if (p)
592 *p = '\0';
593
594 for (br = branch_modes; br->name; br++) {
595 if (!strcasecmp(s, br->name))
596 break;
597 }
598 if (!br->name) {
599 ui__warning("unknown branch filter %s,"
600 " check man page\n", s);
601 goto error;
602 }
bdfebd84 603
a5aabdac 604 *mode |= br->mode;
bdfebd84 605
a5aabdac
SE
606 if (!p)
607 break;
bdfebd84 608
a5aabdac
SE
609 s = p + 1;
610 }
bdfebd84
RAV
611 }
612 ret = 0;
613
a5aabdac 614 /* default to any branch */
bdfebd84 615 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 616 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
617 }
618error:
619 free(os);
620 return ret;
621}
622
9ff125d1 623#ifdef HAVE_DWARF_UNWIND_SUPPORT
26d33022
JO
624static int get_stack_size(char *str, unsigned long *_size)
625{
626 char *endptr;
627 unsigned long size;
628 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
629
630 size = strtoul(str, &endptr, 0);
631
632 do {
633 if (*endptr)
634 break;
635
636 size = round_up(size, sizeof(u64));
637 if (!size || size > max_size)
638 break;
639
640 *_size = size;
641 return 0;
642
643 } while (0);
644
645 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
646 max_size, str);
647 return -1;
648}
9ff125d1 649#endif /* HAVE_DWARF_UNWIND_SUPPORT */
26d33022 650
b4006796 651int record_parse_callchain(const char *arg, struct record_opts *opts)
26d33022 652{
26d33022
JO
653 char *tok, *name, *saveptr = NULL;
654 char *buf;
655 int ret = -1;
656
26d33022
JO
657 /* We need buffer that we know we can write to. */
658 buf = malloc(strlen(arg) + 1);
659 if (!buf)
660 return -ENOMEM;
661
662 strcpy(buf, arg);
663
664 tok = strtok_r((char *)buf, ",", &saveptr);
665 name = tok ? : (char *)buf;
666
667 do {
668 /* Framepointer style */
669 if (!strncmp(name, "fp", sizeof("fp"))) {
670 if (!strtok_r(NULL, ",", &saveptr)) {
c5ff78c3 671 opts->call_graph = CALLCHAIN_FP;
26d33022
JO
672 ret = 0;
673 } else
674 pr_err("callchain: No more arguments "
675 "needed for -g fp\n");
676 break;
677
9ff125d1 678#ifdef HAVE_DWARF_UNWIND_SUPPORT
26d33022
JO
679 /* Dwarf style */
680 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
61eaa3be
ACM
681 const unsigned long default_stack_dump_size = 8192;
682
26d33022 683 ret = 0;
c5ff78c3
ACM
684 opts->call_graph = CALLCHAIN_DWARF;
685 opts->stack_dump_size = default_stack_dump_size;
26d33022
JO
686
687 tok = strtok_r(NULL, ",", &saveptr);
688 if (tok) {
689 unsigned long size = 0;
690
691 ret = get_stack_size(tok, &size);
c5ff78c3 692 opts->stack_dump_size = size;
26d33022 693 }
9ff125d1 694#endif /* HAVE_DWARF_UNWIND_SUPPORT */
26d33022 695 } else {
09b0fd45 696 pr_err("callchain: Unknown --call-graph option "
26d33022
JO
697 "value: %s\n", arg);
698 break;
699 }
700
701 } while (0);
702
703 free(buf);
09b0fd45
JO
704 return ret;
705}
706
b4006796 707static void callchain_debug(struct record_opts *opts)
09b0fd45 708{
a601fdff
JO
709 static const char *str[CALLCHAIN_MAX] = { "NONE", "FP", "DWARF" };
710
711 pr_debug("callchain: type %s\n", str[opts->call_graph]);
26d33022 712
09b0fd45
JO
713 if (opts->call_graph == CALLCHAIN_DWARF)
714 pr_debug("callchain: stack dump size %d\n",
715 opts->stack_dump_size);
716}
717
718int record_parse_callchain_opt(const struct option *opt,
719 const char *arg,
720 int unset)
721{
b4006796 722 struct record_opts *opts = opt->value;
09b0fd45
JO
723 int ret;
724
eb853e80
JO
725 opts->call_graph_enabled = !unset;
726
09b0fd45
JO
727 /* --no-call-graph */
728 if (unset) {
729 opts->call_graph = CALLCHAIN_NONE;
730 pr_debug("callchain: disabled\n");
731 return 0;
732 }
733
734 ret = record_parse_callchain(arg, opts);
26d33022 735 if (!ret)
09b0fd45 736 callchain_debug(opts);
26d33022
JO
737
738 return ret;
739}
740
09b0fd45
JO
741int record_callchain_opt(const struct option *opt,
742 const char *arg __maybe_unused,
743 int unset __maybe_unused)
744{
b4006796 745 struct record_opts *opts = opt->value;
09b0fd45 746
eb853e80
JO
747 opts->call_graph_enabled = !unset;
748
09b0fd45
JO
749 if (opts->call_graph == CALLCHAIN_NONE)
750 opts->call_graph = CALLCHAIN_FP;
751
752 callchain_debug(opts);
753 return 0;
754}
755
eb853e80
JO
756static int perf_record_config(const char *var, const char *value, void *cb)
757{
758 struct record *rec = cb;
759
760 if (!strcmp(var, "record.call-graph"))
761 return record_parse_callchain(value, &rec->opts);
762
763 return perf_default_config(var, value, cb);
764}
765
0e9b20b8 766static const char * const record_usage[] = {
9e096753
MG
767 "perf record [<options>] [<command>]",
768 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
769 NULL
770};
771
d20deb64 772/*
8c6f45a7
ACM
773 * XXX Ideally would be local to cmd_record() and passed to a record__new
774 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
775 * after cmd_record() exits, but since record_options need to be accessible to
776 * builtin-script, leave it here.
777 *
778 * At least we don't ouch it in all the other functions here directly.
779 *
780 * Just say no to tons of global variables, sigh.
781 */
8c6f45a7 782static struct record record = {
d20deb64 783 .opts = {
8affc2b8 784 .sample_time = true,
d20deb64
ACM
785 .mmap_pages = UINT_MAX,
786 .user_freq = UINT_MAX,
787 .user_interval = ULLONG_MAX,
447a6013 788 .freq = 4000,
d1cb9fce
NK
789 .target = {
790 .uses_mmap = true,
3aa5939d 791 .default_per_cpu = true,
d1cb9fce 792 },
d20deb64 793 },
d20deb64 794};
7865e817 795
09b0fd45 796#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 797
9ff125d1 798#ifdef HAVE_DWARF_UNWIND_SUPPORT
09b0fd45 799const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 800#else
09b0fd45 801const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
802#endif
803
d20deb64
ACM
804/*
805 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
806 * with it and switch to use the library functions in perf_evlist that came
b4006796 807 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
808 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
809 * using pipes, etc.
810 */
bca647aa 811const struct option record_options[] = {
d20deb64 812 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 813 "event selector. use 'perf list' to list available events",
f120f9d5 814 parse_events_option),
d20deb64 815 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 816 "event filter", parse_filter),
bea03405 817 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 818 "record events on existing process id"),
bea03405 819 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 820 "record events on existing thread id"),
d20deb64 821 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 822 "collect data with this RT SCHED_FIFO priority"),
509051ea 823 OPT_BOOLEAN(0, "no-buffering", &record.opts.no_buffering,
acac03fa 824 "collect data without buffering"),
d20deb64 825 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 826 "collect raw sample records from all opened counters"),
bea03405 827 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 828 "system-wide collection from all CPUs"),
bea03405 829 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 830 "list of cpus to monitor"),
d20deb64 831 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 832 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 833 "output file name"),
69e7e5b0
AH
834 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
835 &record.opts.no_inherit_set,
836 "child tasks do not inherit counters"),
d20deb64 837 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
838 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
839 "number of mmap data pages",
840 perf_evlist__parse_mmap_pages),
d20deb64 841 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 842 "put the counters into a counter group"),
09b0fd45
JO
843 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
844 NULL, "enables call-graph recording" ,
845 &record_callchain_opt),
846 OPT_CALLBACK(0, "call-graph", &record.opts,
847 "mode[,dump_size]", record_callchain_help,
848 &record_parse_callchain_opt),
c0555642 849 OPT_INCR('v', "verbose", &verbose,
3da297a6 850 "be more verbose (show counter open errors, etc)"),
b44308f5 851 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 852 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 853 "per thread counts"),
d20deb64 854 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 855 "Sample addresses"),
d20deb64 856 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 857 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 858 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 859 "don't sample"),
d20deb64 860 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 861 "do not update the buildid cache"),
d20deb64 862 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 863 "do not collect buildids in perf.data"),
d20deb64 864 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
865 "monitor event in cgroup name only",
866 parse_cgroups),
a6205a35 867 OPT_UINTEGER('D', "delay", &record.opts.initial_delay,
6619a53e 868 "ms to wait before starting measurement after program start"),
bea03405
NK
869 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
870 "user to profile"),
a5aabdac
SE
871
872 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
873 "branch any", "sample any taken branches",
874 parse_branch_stack),
875
876 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
877 "branch filter mask", "branch stack filter modes",
bdfebd84 878 parse_branch_stack),
05484298
AK
879 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
880 "sample by weight (on special events only)"),
475eeab9
AK
881 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
882 "sample transaction flags (special events only)"),
3aa5939d
AH
883 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
884 "use per-thread mmaps"),
0e9b20b8
IM
885 OPT_END()
886};
887
1d037ca1 888int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 889{
69aad6f1 890 int err = -ENOMEM;
8c6f45a7 891 struct record *rec = &record;
16ad2ffb 892 char errbuf[BUFSIZ];
0e9b20b8 893
3e2be2da
ACM
894 rec->evlist = perf_evlist__new();
895 if (rec->evlist == NULL)
361c99a6
ACM
896 return -ENOMEM;
897
eb853e80
JO
898 perf_config(perf_record_config, rec);
899
bca647aa 900 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 901 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 902 if (!argc && target__none(&rec->opts.target))
bca647aa 903 usage_with_options(record_usage, record_options);
0e9b20b8 904
bea03405 905 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
906 ui__error("cgroup monitoring only available in"
907 " system-wide mode\n");
023695d9
SE
908 usage_with_options(record_usage, record_options);
909 }
910
655000e7 911 symbol__init();
baa2f6ce 912
ec80fde7 913 if (symbol_conf.kptr_restrict)
646aaea6
ACM
914 pr_warning(
915"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
916"check /proc/sys/kernel/kptr_restrict.\n\n"
917"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
918"file is not found in the buildid cache or in the vmlinux path.\n\n"
919"Samples in kernel modules won't be resolved at all.\n\n"
920"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
921"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 922
d20deb64 923 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 924 disable_buildid_cache();
655000e7 925
3e2be2da
ACM
926 if (rec->evlist->nr_entries == 0 &&
927 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
928 pr_err("Not enough memory for event selector list\n");
929 goto out_symbol_exit;
bbd36e5e 930 }
0e9b20b8 931
69e7e5b0
AH
932 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
933 rec->opts.no_inherit = true;
934
602ad878 935 err = target__validate(&rec->opts.target);
16ad2ffb 936 if (err) {
602ad878 937 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
938 ui__warning("%s", errbuf);
939 }
940
602ad878 941 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
942 if (err) {
943 int saved_errno = errno;
4bd0f2d2 944
602ad878 945 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 946 ui__error("%s", errbuf);
16ad2ffb
NK
947
948 err = -saved_errno;
8fa60e1f 949 goto out_symbol_exit;
16ad2ffb 950 }
0d37aa34 951
16ad2ffb 952 err = -ENOMEM;
3e2be2da 953 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 954 usage_with_options(record_usage, record_options);
69aad6f1 955
b4006796 956 if (record_opts__config(&rec->opts)) {
39d17dac 957 err = -EINVAL;
03ad9747 958 goto out_symbol_exit;
7e4ff9e3
MG
959 }
960
d20deb64 961 err = __cmd_record(&record, argc, argv);
d65a458b 962out_symbol_exit:
45604710 963 perf_evlist__delete(rec->evlist);
d65a458b 964 symbol__exit();
39d17dac 965 return err;
0e9b20b8 966}