perf record: Remove old evsel_list usage
[linux-2.6-block.git] / tools / perf / builtin-record.c
CommitLineData
abaff32a 1/*
bf9e1876
IM
2 * builtin-record.c
3 *
4 * Builtin record command: Record the profile of a workload
5 * (or a CPU, or a PID) into the perf.data output file - for
6 * later analysis via perf report.
abaff32a 7 */
16f762a2 8#include "builtin.h"
bf9e1876
IM
9
10#include "perf.h"
11
6122e4e4 12#include "util/build-id.h"
6eda5838 13#include "util/util.h"
0e9b20b8 14#include "util/parse-options.h"
8ad8db37 15#include "util/parse-events.h"
6eda5838 16
7c6a1c65 17#include "util/header.h"
66e274f3 18#include "util/event.h"
361c99a6 19#include "util/evlist.h"
69aad6f1 20#include "util/evsel.h"
8f28827a 21#include "util/debug.h"
94c744b6 22#include "util/session.h"
45694aa7 23#include "util/tool.h"
8d06367f 24#include "util/symbol.h"
a12b51c4 25#include "util/cpumap.h"
fd78260b 26#include "util/thread_map.h"
f5fc1412 27#include "util/data.h"
7c6a1c65 28
97124d5e 29#include <unistd.h>
de9ac07b 30#include <sched.h>
a41794cd 31#include <sys/mman.h>
de9ac07b 32
89fe808a 33#ifndef HAVE_ON_EXIT_SUPPORT
78da39fa
BR
34#ifndef ATEXIT_MAX
35#define ATEXIT_MAX 32
36#endif
37static int __on_exit_count = 0;
38typedef void (*on_exit_func_t) (int, void *);
39static on_exit_func_t __on_exit_funcs[ATEXIT_MAX];
40static void *__on_exit_args[ATEXIT_MAX];
41static int __exitcode = 0;
42static void __handle_on_exit_funcs(void);
43static int on_exit(on_exit_func_t function, void *arg);
44#define exit(x) (exit)(__exitcode = (x))
45
46static int on_exit(on_exit_func_t function, void *arg)
47{
48 if (__on_exit_count == ATEXIT_MAX)
49 return -ENOMEM;
50 else if (__on_exit_count == 0)
51 atexit(__handle_on_exit_funcs);
52 __on_exit_funcs[__on_exit_count] = function;
53 __on_exit_args[__on_exit_count++] = arg;
54 return 0;
55}
56
57static void __handle_on_exit_funcs(void)
58{
59 int i;
60 for (i = 0; i < __on_exit_count; i++)
61 __on_exit_funcs[i] (__exitcode, __on_exit_args[i]);
62}
63#endif
64
8c6f45a7 65struct record {
45694aa7 66 struct perf_tool tool;
b4006796 67 struct record_opts opts;
d20deb64 68 u64 bytes_written;
f5fc1412 69 struct perf_data_file file;
d20deb64
ACM
70 struct perf_evlist *evlist;
71 struct perf_session *session;
72 const char *progname;
d20deb64 73 int realtime_prio;
d20deb64
ACM
74 bool no_buildid;
75 bool no_buildid_cache;
d20deb64 76 long samples;
0f82ebc4 77};
a21ca2ca 78
8c6f45a7 79static int record__write(struct record *rec, void *bf, size_t size)
f5970550 80{
cf8b2e69 81 if (perf_data_file__write(rec->session->file, bf, size) < 0) {
50a9b868
JO
82 pr_err("failed to write perf data, error: %m\n");
83 return -1;
f5970550 84 }
8d3eca20 85
cf8b2e69 86 rec->bytes_written += size;
8d3eca20 87 return 0;
f5970550
PZ
88}
89
45694aa7 90static int process_synthesized_event(struct perf_tool *tool,
d20deb64 91 union perf_event *event,
1d037ca1
IT
92 struct perf_sample *sample __maybe_unused,
93 struct machine *machine __maybe_unused)
234fbbf5 94{
8c6f45a7
ACM
95 struct record *rec = container_of(tool, struct record, tool);
96 return record__write(rec, event, event->header.size);
234fbbf5
ACM
97}
98
8c6f45a7 99static int record__mmap_read(struct record *rec, struct perf_mmap *md)
de9ac07b 100{
744bd8aa 101 unsigned int head = perf_mmap__read_head(md);
de9ac07b 102 unsigned int old = md->prev;
918512b4 103 unsigned char *data = md->base + page_size;
de9ac07b
PZ
104 unsigned long size;
105 void *buf;
8d3eca20 106 int rc = 0;
de9ac07b 107
dc82009a 108 if (old == head)
8d3eca20 109 return 0;
dc82009a 110
d20deb64 111 rec->samples++;
de9ac07b
PZ
112
113 size = head - old;
114
115 if ((old & md->mask) + size != (head & md->mask)) {
116 buf = &data[old & md->mask];
117 size = md->mask + 1 - (old & md->mask);
118 old += size;
021e9f47 119
8c6f45a7 120 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
121 rc = -1;
122 goto out;
123 }
de9ac07b
PZ
124 }
125
126 buf = &data[old & md->mask];
127 size = head - old;
128 old += size;
021e9f47 129
8c6f45a7 130 if (record__write(rec, buf, size) < 0) {
8d3eca20
DA
131 rc = -1;
132 goto out;
133 }
de9ac07b
PZ
134
135 md->prev = old;
115d2d89 136 perf_mmap__write_tail(md, old);
8d3eca20
DA
137
138out:
139 return rc;
de9ac07b
PZ
140}
141
142static volatile int done = 0;
f7b7c26e 143static volatile int signr = -1;
33e49ea7 144static volatile int child_finished = 0;
de9ac07b 145
16c8a109 146static void sig_handler(int sig)
de9ac07b 147{
33e49ea7
AK
148 if (sig == SIGCHLD)
149 child_finished = 1;
150
16c8a109 151 done = 1;
f7b7c26e
PZ
152 signr = sig;
153}
154
8c6f45a7 155static void record__sig_exit(int exit_status __maybe_unused, void *arg)
f7b7c26e 156{
8c6f45a7 157 struct record *rec = arg;
33e49ea7
AK
158 int status;
159
d20deb64 160 if (rec->evlist->workload.pid > 0) {
33e49ea7 161 if (!child_finished)
d20deb64 162 kill(rec->evlist->workload.pid, SIGTERM);
33e49ea7
AK
163
164 wait(&status);
165 if (WIFSIGNALED(status))
d20deb64 166 psignal(WTERMSIG(status), rec->progname);
33e49ea7 167 }
933da83a 168
18483b81 169 if (signr == -1 || signr == SIGUSR1)
f7b7c26e
PZ
170 return;
171
172 signal(signr, SIG_DFL);
de9ac07b
PZ
173}
174
8c6f45a7 175static int record__open(struct record *rec)
dd7927f4 176{
56e52e85 177 char msg[512];
6a4bb04c 178 struct perf_evsel *pos;
d20deb64
ACM
179 struct perf_evlist *evlist = rec->evlist;
180 struct perf_session *session = rec->session;
b4006796 181 struct record_opts *opts = &rec->opts;
8d3eca20 182 int rc = 0;
dd7927f4 183
f77a9518 184 perf_evlist__config(evlist, opts);
cac21425 185
dd7927f4 186 list_for_each_entry(pos, &evlist->entries, node) {
dd7927f4 187try_again:
6a4bb04c 188 if (perf_evsel__open(pos, evlist->cpus, evlist->threads) < 0) {
56e52e85 189 if (perf_evsel__fallback(pos, errno, msg, sizeof(msg))) {
d6d901c2 190 if (verbose)
c0a54341 191 ui__warning("%s\n", msg);
d6d901c2
ZY
192 goto try_again;
193 }
ca6a4258 194
56e52e85
ACM
195 rc = -errno;
196 perf_evsel__open_strerror(pos, &opts->target,
197 errno, msg, sizeof(msg));
198 ui__error("%s\n", msg);
8d3eca20 199 goto out;
c171b552
LZ
200 }
201 }
a43d3f08 202
1491a632 203 if (perf_evlist__apply_filters(evlist)) {
0a102479
FW
204 error("failed to set filter with %d (%s)\n", errno,
205 strerror(errno));
8d3eca20
DA
206 rc = -1;
207 goto out;
0a102479
FW
208 }
209
18e60939 210 if (perf_evlist__mmap(evlist, opts->mmap_pages, false) < 0) {
8d3eca20
DA
211 if (errno == EPERM) {
212 pr_err("Permission error mapping pages.\n"
213 "Consider increasing "
214 "/proc/sys/kernel/perf_event_mlock_kb,\n"
215 "or try again with a smaller value of -m/--mmap_pages.\n"
53653d70 216 "(current value: %u)\n", opts->mmap_pages);
8d3eca20 217 rc = -errno;
8d3eca20
DA
218 } else {
219 pr_err("failed to mmap with %d (%s)\n", errno, strerror(errno));
220 rc = -errno;
221 }
222 goto out;
18e60939 223 }
0a27d7f9 224
563aecb2 225 session->evlist = evlist;
7b56cce2 226 perf_session__set_id_hdr_size(session);
8d3eca20
DA
227out:
228 return rc;
16c8a109
PZ
229}
230
8c6f45a7 231static int process_buildids(struct record *rec)
6122e4e4 232{
f5fc1412
JO
233 struct perf_data_file *file = &rec->file;
234 struct perf_session *session = rec->session;
7ab75cff 235 u64 start = session->header.data_offset;
6122e4e4 236
f5fc1412 237 u64 size = lseek(file->fd, 0, SEEK_CUR);
9f591fd7
ACM
238 if (size == 0)
239 return 0;
240
7ab75cff
DA
241 return __perf_session__process_events(session, start,
242 size - start,
6122e4e4
ACM
243 size, &build_id__mark_dso_hit_ops);
244}
245
8c6f45a7 246static void record__exit(int status, void *arg)
f5970550 247{
8c6f45a7 248 struct record *rec = arg;
f5fc1412 249 struct perf_data_file *file = &rec->file;
d20deb64 250
8d3eca20
DA
251 if (status != 0)
252 return;
253
f5fc1412 254 if (!file->is_pipe) {
d20deb64
ACM
255 rec->session->header.data_size += rec->bytes_written;
256
257 if (!rec->no_buildid)
258 process_buildids(rec);
259 perf_session__write_header(rec->session, rec->evlist,
f5fc1412 260 file->fd, true);
d20deb64
ACM
261 perf_session__delete(rec->session);
262 perf_evlist__delete(rec->evlist);
d65a458b 263 symbol__exit();
c7929e47 264 }
f5970550
PZ
265}
266
8115d60c 267static void perf_event__synthesize_guest_os(struct machine *machine, void *data)
a1645ce1
ZY
268{
269 int err;
45694aa7 270 struct perf_tool *tool = data;
a1645ce1
ZY
271 /*
272 *As for guest kernel when processing subcommand record&report,
273 *we arrange module mmap prior to guest kernel mmap and trigger
274 *a preload dso because default guest module symbols are loaded
275 *from guest kallsyms instead of /lib/modules/XXX/XXX. This
276 *method is used to avoid symbol missing when the first addr is
277 *in module instead of in guest kernel.
278 */
45694aa7 279 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 280 machine);
a1645ce1
ZY
281 if (err < 0)
282 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 283 " relocation symbol.\n", machine->pid);
a1645ce1 284
a1645ce1
ZY
285 /*
286 * We use _stext for guest kernel because guest kernel's /proc/kallsyms
287 * have no _text sometimes.
288 */
45694aa7 289 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 290 machine, "_text");
a1645ce1 291 if (err < 0)
45694aa7 292 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 293 machine, "_stext");
a1645ce1
ZY
294 if (err < 0)
295 pr_err("Couldn't record guest kernel [%d]'s reference"
23346f21 296 " relocation symbol.\n", machine->pid);
a1645ce1
ZY
297}
298
98402807
FW
299static struct perf_event_header finished_round_event = {
300 .size = sizeof(struct perf_event_header),
301 .type = PERF_RECORD_FINISHED_ROUND,
302};
303
8c6f45a7 304static int record__mmap_read_all(struct record *rec)
98402807 305{
0e2e63dd 306 int i;
8d3eca20 307 int rc = 0;
98402807 308
d20deb64 309 for (i = 0; i < rec->evlist->nr_mmaps; i++) {
8d3eca20 310 if (rec->evlist->mmap[i].base) {
8c6f45a7 311 if (record__mmap_read(rec, &rec->evlist->mmap[i]) != 0) {
8d3eca20
DA
312 rc = -1;
313 goto out;
314 }
315 }
98402807
FW
316 }
317
2eeaaa09 318 if (perf_header__has_feat(&rec->session->header, HEADER_TRACING_DATA))
8c6f45a7 319 rc = record__write(rec, &finished_round_event, sizeof(finished_round_event));
8d3eca20
DA
320
321out:
322 return rc;
98402807
FW
323}
324
8c6f45a7 325static void record__init_features(struct record *rec)
57706abc 326{
57706abc
DA
327 struct perf_session *session = rec->session;
328 int feat;
329
330 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
331 perf_header__set_feat(&session->header, feat);
332
333 if (rec->no_buildid)
334 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
335
3e2be2da 336 if (!have_tracepoints(&rec->evlist->entries))
57706abc
DA
337 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
338
339 if (!rec->opts.branch_stack)
340 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
341}
342
f33cbe72
ACM
343static volatile int workload_exec_errno;
344
345/*
346 * perf_evlist__prepare_workload will send a SIGUSR1
347 * if the fork fails, since we asked by setting its
348 * want_signal to true.
349 */
350static void workload_exec_failed_signal(int signo, siginfo_t *info,
351 void *ucontext __maybe_unused)
352{
353 workload_exec_errno = info->si_value.sival_int;
354 done = 1;
355 signr = signo;
356 child_finished = 1;
357}
358
8c6f45a7 359static int __cmd_record(struct record *rec, int argc, const char **argv)
16c8a109 360{
57706abc 361 int err;
8b412664 362 unsigned long waking = 0;
46be604b 363 const bool forks = argc > 0;
23346f21 364 struct machine *machine;
45694aa7 365 struct perf_tool *tool = &rec->tool;
b4006796 366 struct record_opts *opts = &rec->opts;
f5fc1412 367 struct perf_data_file *file = &rec->file;
d20deb64 368 struct perf_session *session;
2711926a 369 bool disabled = false;
de9ac07b 370
d20deb64 371 rec->progname = argv[0];
33e49ea7 372
8c6f45a7 373 on_exit(record__sig_exit, rec);
f5970550
PZ
374 signal(SIGCHLD, sig_handler);
375 signal(SIGINT, sig_handler);
804f7ac7 376 signal(SIGTERM, sig_handler);
f5970550 377
f5fc1412 378 session = perf_session__new(file, false, NULL);
94c744b6 379 if (session == NULL) {
a9a70bbc
ACM
380 pr_err("Not enough memory for reading perf file header\n");
381 return -1;
382 }
383
d20deb64
ACM
384 rec->session = session;
385
8c6f45a7 386 record__init_features(rec);
330aa675 387
d4db3f16 388 if (forks) {
3e2be2da 389 err = perf_evlist__prepare_workload(rec->evlist, &opts->target,
f5fc1412 390 argv, file->is_pipe,
735f7e0b 391 workload_exec_failed_signal);
35b9d88e
ACM
392 if (err < 0) {
393 pr_err("Couldn't run the workload!\n");
394 goto out_delete_session;
856e9660 395 }
856e9660
PZ
396 }
397
8c6f45a7 398 if (record__open(rec) != 0) {
8d3eca20
DA
399 err = -1;
400 goto out_delete_session;
401 }
de9ac07b 402
3e2be2da 403 if (!rec->evlist->nr_groups)
a8bb559b
NK
404 perf_header__clear_feat(&session->header, HEADER_GROUP_DESC);
405
712a4b60 406 /*
8c6f45a7 407 * perf_session__delete(session) will be called at record__exit()
712a4b60 408 */
8c6f45a7 409 on_exit(record__exit, rec);
712a4b60 410
f5fc1412
JO
411 if (file->is_pipe) {
412 err = perf_header__write_pipe(file->fd);
529870e3 413 if (err < 0)
8d3eca20 414 goto out_delete_session;
563aecb2 415 } else {
3e2be2da 416 err = perf_session__write_header(session, rec->evlist,
f5fc1412 417 file->fd, false);
d5eed904 418 if (err < 0)
8d3eca20 419 goto out_delete_session;
56b03f3c
ACM
420 }
421
d3665498 422 if (!rec->no_buildid
e20960c0 423 && !perf_header__has_feat(&session->header, HEADER_BUILD_ID)) {
d3665498 424 pr_err("Couldn't generate buildids. "
e20960c0 425 "Use --no-buildid to profile anyway.\n");
8d3eca20
DA
426 err = -1;
427 goto out_delete_session;
e20960c0
RR
428 }
429
34ba5122 430 machine = &session->machines.host;
743eb868 431
f5fc1412 432 if (file->is_pipe) {
45694aa7 433 err = perf_event__synthesize_attrs(tool, session,
d20deb64 434 process_synthesized_event);
2c46dbb5
TZ
435 if (err < 0) {
436 pr_err("Couldn't synthesize attrs.\n");
8d3eca20 437 goto out_delete_session;
2c46dbb5 438 }
cd19a035 439
3e2be2da 440 if (have_tracepoints(&rec->evlist->entries)) {
63e0c771
TZ
441 /*
442 * FIXME err <= 0 here actually means that
443 * there were no tracepoints so its not really
444 * an error, just that we don't need to
445 * synthesize anything. We really have to
446 * return this more properly and also
447 * propagate errors that now are calling die()
448 */
3e2be2da 449 err = perf_event__synthesize_tracing_data(tool, file->fd, rec->evlist,
743eb868 450 process_synthesized_event);
63e0c771
TZ
451 if (err <= 0) {
452 pr_err("Couldn't record tracing data.\n");
8d3eca20 453 goto out_delete_session;
63e0c771 454 }
f34b9001 455 rec->bytes_written += err;
63e0c771 456 }
2c46dbb5
TZ
457 }
458
45694aa7 459 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 460 machine, "_text");
70162138 461 if (err < 0)
45694aa7 462 err = perf_event__synthesize_kernel_mmap(tool, process_synthesized_event,
743eb868 463 machine, "_stext");
c1a3a4b9
ACM
464 if (err < 0)
465 pr_err("Couldn't record kernel reference relocation symbol\n"
466 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
467 "Check /proc/kallsyms permission or run as root.\n");
b7cece76 468
45694aa7 469 err = perf_event__synthesize_modules(tool, process_synthesized_event,
743eb868 470 machine);
c1a3a4b9
ACM
471 if (err < 0)
472 pr_err("Couldn't record kernel module information.\n"
473 "Symbol resolution may be skewed if relocation was used (e.g. kexec).\n"
474 "Check /proc/modules permission or run as root.\n");
475
7e383de4 476 if (perf_guest) {
876650e6
ACM
477 machines__process_guests(&session->machines,
478 perf_event__synthesize_guest_os, tool);
7e383de4 479 }
7c6a1c65 480
3e2be2da 481 err = __machine__synthesize_threads(machine, tool, &opts->target, rec->evlist->threads,
a33fbd56 482 process_synthesized_event, opts->sample_address);
8d3eca20
DA
483 if (err != 0)
484 goto out_delete_session;
485
d20deb64 486 if (rec->realtime_prio) {
de9ac07b
PZ
487 struct sched_param param;
488
d20deb64 489 param.sched_priority = rec->realtime_prio;
de9ac07b 490 if (sched_setscheduler(0, SCHED_FIFO, &param)) {
6beba7ad 491 pr_err("Could not set realtime priority.\n");
8d3eca20
DA
492 err = -1;
493 goto out_delete_session;
de9ac07b
PZ
494 }
495 }
496
774cb499
JO
497 /*
498 * When perf is starting the traced process, all the events
499 * (apart from group members) have enable_on_exec=1 set,
500 * so don't spoil it by prematurely enabling them.
501 */
602ad878 502 if (!target__none(&opts->target))
3e2be2da 503 perf_evlist__enable(rec->evlist);
764e16a3 504
856e9660
PZ
505 /*
506 * Let the child rip
507 */
735f7e0b 508 if (forks)
3e2be2da 509 perf_evlist__start_workload(rec->evlist);
856e9660 510
649c48a9 511 for (;;) {
d20deb64 512 int hits = rec->samples;
de9ac07b 513
8c6f45a7 514 if (record__mmap_read_all(rec) < 0) {
8d3eca20
DA
515 err = -1;
516 goto out_delete_session;
517 }
de9ac07b 518
d20deb64 519 if (hits == rec->samples) {
649c48a9
PZ
520 if (done)
521 break;
3e2be2da 522 err = poll(rec->evlist->pollfd, rec->evlist->nr_fds, -1);
8b412664
PZ
523 waking++;
524 }
525
774cb499
JO
526 /*
527 * When perf is starting the traced process, at the end events
528 * die with the process and we wait for that. Thus no need to
529 * disable events in this case.
530 */
602ad878 531 if (done && !disabled && !target__none(&opts->target)) {
3e2be2da 532 perf_evlist__disable(rec->evlist);
2711926a
JO
533 disabled = true;
534 }
de9ac07b
PZ
535 }
536
f33cbe72
ACM
537 if (forks && workload_exec_errno) {
538 char msg[512];
539 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
540 pr_err("Workload failed: %s\n", emsg);
541 err = -1;
542 goto out_delete_session;
543 }
544
18483b81 545 if (quiet || signr == SIGUSR1)
b44308f5
ACM
546 return 0;
547
8b412664
PZ
548 fprintf(stderr, "[ perf record: Woken up %ld times to write data ]\n", waking);
549
021e9f47
IM
550 /*
551 * Approximate RIP event size: 24 bytes.
552 */
553 fprintf(stderr,
9486aa38 554 "[ perf record: Captured and wrote %.3f MB %s (~%" PRIu64 " samples) ]\n",
d20deb64 555 (double)rec->bytes_written / 1024.0 / 1024.0,
6a4d98d7 556 file->path,
d20deb64 557 rec->bytes_written / 24);
addc2785 558
de9ac07b 559 return 0;
39d17dac
ACM
560
561out_delete_session:
562 perf_session__delete(session);
563 return err;
de9ac07b 564}
0e9b20b8 565
bdfebd84
RAV
566#define BRANCH_OPT(n, m) \
567 { .name = n, .mode = (m) }
568
569#define BRANCH_END { .name = NULL }
570
571struct branch_mode {
572 const char *name;
573 int mode;
574};
575
576static const struct branch_mode branch_modes[] = {
577 BRANCH_OPT("u", PERF_SAMPLE_BRANCH_USER),
578 BRANCH_OPT("k", PERF_SAMPLE_BRANCH_KERNEL),
579 BRANCH_OPT("hv", PERF_SAMPLE_BRANCH_HV),
580 BRANCH_OPT("any", PERF_SAMPLE_BRANCH_ANY),
581 BRANCH_OPT("any_call", PERF_SAMPLE_BRANCH_ANY_CALL),
582 BRANCH_OPT("any_ret", PERF_SAMPLE_BRANCH_ANY_RETURN),
583 BRANCH_OPT("ind_call", PERF_SAMPLE_BRANCH_IND_CALL),
0126d493
AK
584 BRANCH_OPT("abort_tx", PERF_SAMPLE_BRANCH_ABORT_TX),
585 BRANCH_OPT("in_tx", PERF_SAMPLE_BRANCH_IN_TX),
586 BRANCH_OPT("no_tx", PERF_SAMPLE_BRANCH_NO_TX),
bdfebd84
RAV
587 BRANCH_END
588};
589
590static int
a5aabdac 591parse_branch_stack(const struct option *opt, const char *str, int unset)
bdfebd84
RAV
592{
593#define ONLY_PLM \
594 (PERF_SAMPLE_BRANCH_USER |\
595 PERF_SAMPLE_BRANCH_KERNEL |\
596 PERF_SAMPLE_BRANCH_HV)
597
598 uint64_t *mode = (uint64_t *)opt->value;
599 const struct branch_mode *br;
a5aabdac 600 char *s, *os = NULL, *p;
bdfebd84
RAV
601 int ret = -1;
602
a5aabdac
SE
603 if (unset)
604 return 0;
bdfebd84 605
a5aabdac
SE
606 /*
607 * cannot set it twice, -b + --branch-filter for instance
608 */
609 if (*mode)
bdfebd84
RAV
610 return -1;
611
a5aabdac
SE
612 /* str may be NULL in case no arg is passed to -b */
613 if (str) {
614 /* because str is read-only */
615 s = os = strdup(str);
616 if (!s)
617 return -1;
618
619 for (;;) {
620 p = strchr(s, ',');
621 if (p)
622 *p = '\0';
623
624 for (br = branch_modes; br->name; br++) {
625 if (!strcasecmp(s, br->name))
626 break;
627 }
628 if (!br->name) {
629 ui__warning("unknown branch filter %s,"
630 " check man page\n", s);
631 goto error;
632 }
bdfebd84 633
a5aabdac 634 *mode |= br->mode;
bdfebd84 635
a5aabdac
SE
636 if (!p)
637 break;
bdfebd84 638
a5aabdac
SE
639 s = p + 1;
640 }
bdfebd84
RAV
641 }
642 ret = 0;
643
a5aabdac 644 /* default to any branch */
bdfebd84 645 if ((*mode & ~ONLY_PLM) == 0) {
a5aabdac 646 *mode = PERF_SAMPLE_BRANCH_ANY;
bdfebd84
RAV
647 }
648error:
649 free(os);
650 return ret;
651}
652
89fe808a 653#ifdef HAVE_LIBUNWIND_SUPPORT
26d33022
JO
654static int get_stack_size(char *str, unsigned long *_size)
655{
656 char *endptr;
657 unsigned long size;
658 unsigned long max_size = round_down(USHRT_MAX, sizeof(u64));
659
660 size = strtoul(str, &endptr, 0);
661
662 do {
663 if (*endptr)
664 break;
665
666 size = round_up(size, sizeof(u64));
667 if (!size || size > max_size)
668 break;
669
670 *_size = size;
671 return 0;
672
673 } while (0);
674
675 pr_err("callchain: Incorrect stack dump size (max %ld): %s\n",
676 max_size, str);
677 return -1;
678}
89fe808a 679#endif /* HAVE_LIBUNWIND_SUPPORT */
26d33022 680
b4006796 681int record_parse_callchain(const char *arg, struct record_opts *opts)
26d33022 682{
26d33022
JO
683 char *tok, *name, *saveptr = NULL;
684 char *buf;
685 int ret = -1;
686
26d33022
JO
687 /* We need buffer that we know we can write to. */
688 buf = malloc(strlen(arg) + 1);
689 if (!buf)
690 return -ENOMEM;
691
692 strcpy(buf, arg);
693
694 tok = strtok_r((char *)buf, ",", &saveptr);
695 name = tok ? : (char *)buf;
696
697 do {
698 /* Framepointer style */
699 if (!strncmp(name, "fp", sizeof("fp"))) {
700 if (!strtok_r(NULL, ",", &saveptr)) {
c5ff78c3 701 opts->call_graph = CALLCHAIN_FP;
26d33022
JO
702 ret = 0;
703 } else
704 pr_err("callchain: No more arguments "
705 "needed for -g fp\n");
706 break;
707
89fe808a 708#ifdef HAVE_LIBUNWIND_SUPPORT
26d33022
JO
709 /* Dwarf style */
710 } else if (!strncmp(name, "dwarf", sizeof("dwarf"))) {
61eaa3be
ACM
711 const unsigned long default_stack_dump_size = 8192;
712
26d33022 713 ret = 0;
c5ff78c3
ACM
714 opts->call_graph = CALLCHAIN_DWARF;
715 opts->stack_dump_size = default_stack_dump_size;
26d33022
JO
716
717 tok = strtok_r(NULL, ",", &saveptr);
718 if (tok) {
719 unsigned long size = 0;
720
721 ret = get_stack_size(tok, &size);
c5ff78c3 722 opts->stack_dump_size = size;
26d33022 723 }
89fe808a 724#endif /* HAVE_LIBUNWIND_SUPPORT */
26d33022 725 } else {
09b0fd45 726 pr_err("callchain: Unknown --call-graph option "
26d33022
JO
727 "value: %s\n", arg);
728 break;
729 }
730
731 } while (0);
732
733 free(buf);
09b0fd45
JO
734 return ret;
735}
736
b4006796 737static void callchain_debug(struct record_opts *opts)
09b0fd45
JO
738{
739 pr_debug("callchain: type %d\n", opts->call_graph);
26d33022 740
09b0fd45
JO
741 if (opts->call_graph == CALLCHAIN_DWARF)
742 pr_debug("callchain: stack dump size %d\n",
743 opts->stack_dump_size);
744}
745
746int record_parse_callchain_opt(const struct option *opt,
747 const char *arg,
748 int unset)
749{
b4006796 750 struct record_opts *opts = opt->value;
09b0fd45
JO
751 int ret;
752
753 /* --no-call-graph */
754 if (unset) {
755 opts->call_graph = CALLCHAIN_NONE;
756 pr_debug("callchain: disabled\n");
757 return 0;
758 }
759
760 ret = record_parse_callchain(arg, opts);
26d33022 761 if (!ret)
09b0fd45 762 callchain_debug(opts);
26d33022
JO
763
764 return ret;
765}
766
09b0fd45
JO
767int record_callchain_opt(const struct option *opt,
768 const char *arg __maybe_unused,
769 int unset __maybe_unused)
770{
b4006796 771 struct record_opts *opts = opt->value;
09b0fd45
JO
772
773 if (opts->call_graph == CALLCHAIN_NONE)
774 opts->call_graph = CALLCHAIN_FP;
775
776 callchain_debug(opts);
777 return 0;
778}
779
0e9b20b8 780static const char * const record_usage[] = {
9e096753
MG
781 "perf record [<options>] [<command>]",
782 "perf record [<options>] -- <command> [<options>]",
0e9b20b8
IM
783 NULL
784};
785
d20deb64 786/*
8c6f45a7
ACM
787 * XXX Ideally would be local to cmd_record() and passed to a record__new
788 * because we need to have access to it in record__exit, that is called
d20deb64
ACM
789 * after cmd_record() exits, but since record_options need to be accessible to
790 * builtin-script, leave it here.
791 *
792 * At least we don't ouch it in all the other functions here directly.
793 *
794 * Just say no to tons of global variables, sigh.
795 */
8c6f45a7 796static struct record record = {
d20deb64 797 .opts = {
d20deb64
ACM
798 .mmap_pages = UINT_MAX,
799 .user_freq = UINT_MAX,
800 .user_interval = ULLONG_MAX,
447a6013 801 .freq = 4000,
d1cb9fce
NK
802 .target = {
803 .uses_mmap = true,
3aa5939d 804 .default_per_cpu = true,
d1cb9fce 805 },
d20deb64 806 },
d20deb64 807};
7865e817 808
09b0fd45 809#define CALLCHAIN_HELP "setup and enables call-graph (stack chain/backtrace) recording: "
61eaa3be 810
89fe808a 811#ifdef HAVE_LIBUNWIND_SUPPORT
09b0fd45 812const char record_callchain_help[] = CALLCHAIN_HELP "fp dwarf";
61eaa3be 813#else
09b0fd45 814const char record_callchain_help[] = CALLCHAIN_HELP "fp";
61eaa3be
ACM
815#endif
816
d20deb64
ACM
817/*
818 * XXX Will stay a global variable till we fix builtin-script.c to stop messing
819 * with it and switch to use the library functions in perf_evlist that came
b4006796 820 * from builtin-record.c, i.e. use record_opts,
d20deb64
ACM
821 * perf_evlist__prepare_workload, etc instead of fork+exec'in 'perf record',
822 * using pipes, etc.
823 */
bca647aa 824const struct option record_options[] = {
d20deb64 825 OPT_CALLBACK('e', "event", &record.evlist, "event",
86847b62 826 "event selector. use 'perf list' to list available events",
f120f9d5 827 parse_events_option),
d20deb64 828 OPT_CALLBACK(0, "filter", &record.evlist, "filter",
c171b552 829 "event filter", parse_filter),
bea03405 830 OPT_STRING('p', "pid", &record.opts.target.pid, "pid",
d6d901c2 831 "record events on existing process id"),
bea03405 832 OPT_STRING('t', "tid", &record.opts.target.tid, "tid",
d6d901c2 833 "record events on existing thread id"),
d20deb64 834 OPT_INTEGER('r', "realtime", &record.realtime_prio,
0e9b20b8 835 "collect data with this RT SCHED_FIFO priority"),
d20deb64 836 OPT_BOOLEAN('D', "no-delay", &record.opts.no_delay,
acac03fa 837 "collect data without buffering"),
d20deb64 838 OPT_BOOLEAN('R', "raw-samples", &record.opts.raw_samples,
daac07b2 839 "collect raw sample records from all opened counters"),
bea03405 840 OPT_BOOLEAN('a', "all-cpus", &record.opts.target.system_wide,
0e9b20b8 841 "system-wide collection from all CPUs"),
bea03405 842 OPT_STRING('C', "cpu", &record.opts.target.cpu_list, "cpu",
c45c6ea2 843 "list of cpus to monitor"),
d20deb64 844 OPT_U64('c', "count", &record.opts.user_interval, "event period to sample"),
f5fc1412 845 OPT_STRING('o', "output", &record.file.path, "file",
abaff32a 846 "output file name"),
69e7e5b0
AH
847 OPT_BOOLEAN_SET('i', "no-inherit", &record.opts.no_inherit,
848 &record.opts.no_inherit_set,
849 "child tasks do not inherit counters"),
d20deb64 850 OPT_UINTEGER('F', "freq", &record.opts.user_freq, "profile at this frequency"),
994a1f78
JO
851 OPT_CALLBACK('m', "mmap-pages", &record.opts.mmap_pages, "pages",
852 "number of mmap data pages",
853 perf_evlist__parse_mmap_pages),
d20deb64 854 OPT_BOOLEAN(0, "group", &record.opts.group,
43bece79 855 "put the counters into a counter group"),
09b0fd45
JO
856 OPT_CALLBACK_NOOPT('g', NULL, &record.opts,
857 NULL, "enables call-graph recording" ,
858 &record_callchain_opt),
859 OPT_CALLBACK(0, "call-graph", &record.opts,
860 "mode[,dump_size]", record_callchain_help,
861 &record_parse_callchain_opt),
c0555642 862 OPT_INCR('v', "verbose", &verbose,
3da297a6 863 "be more verbose (show counter open errors, etc)"),
b44308f5 864 OPT_BOOLEAN('q', "quiet", &quiet, "don't print any message"),
d20deb64 865 OPT_BOOLEAN('s', "stat", &record.opts.inherit_stat,
649c48a9 866 "per thread counts"),
d20deb64 867 OPT_BOOLEAN('d', "data", &record.opts.sample_address,
4bba828d 868 "Sample addresses"),
d20deb64 869 OPT_BOOLEAN('T', "timestamp", &record.opts.sample_time, "Sample timestamps"),
3e76ac78 870 OPT_BOOLEAN('P', "period", &record.opts.period, "Sample period"),
d20deb64 871 OPT_BOOLEAN('n', "no-samples", &record.opts.no_samples,
649c48a9 872 "don't sample"),
d20deb64 873 OPT_BOOLEAN('N', "no-buildid-cache", &record.no_buildid_cache,
a1ac1d3c 874 "do not update the buildid cache"),
d20deb64 875 OPT_BOOLEAN('B', "no-buildid", &record.no_buildid,
baa2f6ce 876 "do not collect buildids in perf.data"),
d20deb64 877 OPT_CALLBACK('G', "cgroup", &record.evlist, "name",
023695d9
SE
878 "monitor event in cgroup name only",
879 parse_cgroups),
bea03405
NK
880 OPT_STRING('u', "uid", &record.opts.target.uid_str, "user",
881 "user to profile"),
a5aabdac
SE
882
883 OPT_CALLBACK_NOOPT('b', "branch-any", &record.opts.branch_stack,
884 "branch any", "sample any taken branches",
885 parse_branch_stack),
886
887 OPT_CALLBACK('j', "branch-filter", &record.opts.branch_stack,
888 "branch filter mask", "branch stack filter modes",
bdfebd84 889 parse_branch_stack),
05484298
AK
890 OPT_BOOLEAN('W', "weight", &record.opts.sample_weight,
891 "sample by weight (on special events only)"),
475eeab9
AK
892 OPT_BOOLEAN(0, "transaction", &record.opts.sample_transaction,
893 "sample transaction flags (special events only)"),
3aa5939d
AH
894 OPT_BOOLEAN(0, "per-thread", &record.opts.target.per_thread,
895 "use per-thread mmaps"),
0e9b20b8
IM
896 OPT_END()
897};
898
1d037ca1 899int cmd_record(int argc, const char **argv, const char *prefix __maybe_unused)
0e9b20b8 900{
69aad6f1 901 int err = -ENOMEM;
8c6f45a7 902 struct record *rec = &record;
16ad2ffb 903 char errbuf[BUFSIZ];
0e9b20b8 904
3e2be2da
ACM
905 rec->evlist = perf_evlist__new();
906 if (rec->evlist == NULL)
361c99a6
ACM
907 return -ENOMEM;
908
bca647aa 909 argc = parse_options(argc, argv, record_options, record_usage,
655000e7 910 PARSE_OPT_STOP_AT_NON_OPTION);
602ad878 911 if (!argc && target__none(&rec->opts.target))
bca647aa 912 usage_with_options(record_usage, record_options);
0e9b20b8 913
bea03405 914 if (nr_cgroups && !rec->opts.target.system_wide) {
3780f488
NK
915 ui__error("cgroup monitoring only available in"
916 " system-wide mode\n");
023695d9
SE
917 usage_with_options(record_usage, record_options);
918 }
919
655000e7 920 symbol__init();
baa2f6ce 921
ec80fde7 922 if (symbol_conf.kptr_restrict)
646aaea6
ACM
923 pr_warning(
924"WARNING: Kernel address maps (/proc/{kallsyms,modules}) are restricted,\n"
925"check /proc/sys/kernel/kptr_restrict.\n\n"
926"Samples in kernel functions may not be resolved if a suitable vmlinux\n"
927"file is not found in the buildid cache or in the vmlinux path.\n\n"
928"Samples in kernel modules won't be resolved at all.\n\n"
929"If some relocation was applied (e.g. kexec) symbols may be misresolved\n"
930"even with a suitable vmlinux or kallsyms file.\n\n");
ec80fde7 931
d20deb64 932 if (rec->no_buildid_cache || rec->no_buildid)
a1ac1d3c 933 disable_buildid_cache();
655000e7 934
3e2be2da
ACM
935 if (rec->evlist->nr_entries == 0 &&
936 perf_evlist__add_default(rec->evlist) < 0) {
69aad6f1
ACM
937 pr_err("Not enough memory for event selector list\n");
938 goto out_symbol_exit;
bbd36e5e 939 }
0e9b20b8 940
69e7e5b0
AH
941 if (rec->opts.target.tid && !rec->opts.no_inherit_set)
942 rec->opts.no_inherit = true;
943
602ad878 944 err = target__validate(&rec->opts.target);
16ad2ffb 945 if (err) {
602ad878 946 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
16ad2ffb
NK
947 ui__warning("%s", errbuf);
948 }
949
602ad878 950 err = target__parse_uid(&rec->opts.target);
16ad2ffb
NK
951 if (err) {
952 int saved_errno = errno;
4bd0f2d2 953
602ad878 954 target__strerror(&rec->opts.target, err, errbuf, BUFSIZ);
3780f488 955 ui__error("%s", errbuf);
16ad2ffb
NK
956
957 err = -saved_errno;
8fa60e1f 958 goto out_symbol_exit;
16ad2ffb 959 }
0d37aa34 960
16ad2ffb 961 err = -ENOMEM;
3e2be2da 962 if (perf_evlist__create_maps(rec->evlist, &rec->opts.target) < 0)
dd7927f4 963 usage_with_options(record_usage, record_options);
69aad6f1 964
b4006796 965 if (record_opts__config(&rec->opts)) {
39d17dac 966 err = -EINVAL;
5c581041 967 goto out_free_fd;
7e4ff9e3
MG
968 }
969
d20deb64 970 err = __cmd_record(&record, argc, argv);
8fa60e1f 971
3e2be2da
ACM
972 perf_evlist__munmap(rec->evlist);
973 perf_evlist__close(rec->evlist);
39d17dac 974out_free_fd:
3e2be2da 975 perf_evlist__delete_maps(rec->evlist);
d65a458b
ACM
976out_symbol_exit:
977 symbol__exit();
39d17dac 978 return err;
0e9b20b8 979}