perf evlist: Export id_add_fd()
[linux-2.6-block.git] / tools / perf / builtin-stat.c
... / ...
CommitLineData
1/*
2 * builtin-stat.c
3 *
4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
6 *
7 * Sample output:
8
9 $ perf stat ./hackbench 10
10
11 Time: 0.118
12
13 Performance counter stats for './hackbench 10':
14
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
26
27 0.154822978 seconds time elapsed
28
29 *
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
31 *
32 * Improvements and fixes by:
33 *
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
40 *
41 * Released under the GPL v2. (and only v2, not any later version)
42 */
43
44#include "perf.h"
45#include "builtin.h"
46#include "util/cgroup.h"
47#include "util/util.h"
48#include <subcmd/parse-options.h>
49#include "util/parse-events.h"
50#include "util/pmu.h"
51#include "util/event.h"
52#include "util/evlist.h"
53#include "util/evsel.h"
54#include "util/debug.h"
55#include "util/color.h"
56#include "util/stat.h"
57#include "util/header.h"
58#include "util/cpumap.h"
59#include "util/thread.h"
60#include "util/thread_map.h"
61#include "util/counts.h"
62#include "util/session.h"
63
64#include <stdlib.h>
65#include <sys/prctl.h>
66#include <locale.h>
67
68#define DEFAULT_SEPARATOR " "
69#define CNTR_NOT_SUPPORTED "<not supported>"
70#define CNTR_NOT_COUNTED "<not counted>"
71
72static void print_counters(struct timespec *ts, int argc, const char **argv);
73
74/* Default events used for perf stat -T */
75static const char *transaction_attrs = {
76 "task-clock,"
77 "{"
78 "instructions,"
79 "cycles,"
80 "cpu/cycles-t/,"
81 "cpu/tx-start/,"
82 "cpu/el-start/,"
83 "cpu/cycles-ct/"
84 "}"
85};
86
87/* More limited version when the CPU does not have all events. */
88static const char * transaction_limited_attrs = {
89 "task-clock,"
90 "{"
91 "instructions,"
92 "cycles,"
93 "cpu/cycles-t/,"
94 "cpu/tx-start/"
95 "}"
96};
97
98static struct perf_evlist *evsel_list;
99
100static struct target target = {
101 .uid = UINT_MAX,
102};
103
104typedef int (*aggr_get_id_t)(struct cpu_map *m, int cpu);
105
106static int run_count = 1;
107static bool no_inherit = false;
108static volatile pid_t child_pid = -1;
109static bool null_run = false;
110static int detailed_run = 0;
111static bool transaction_run;
112static bool big_num = true;
113static int big_num_opt = -1;
114static const char *csv_sep = NULL;
115static bool csv_output = false;
116static bool group = false;
117static const char *pre_cmd = NULL;
118static const char *post_cmd = NULL;
119static bool sync_run = false;
120static unsigned int initial_delay = 0;
121static unsigned int unit_width = 4; /* strlen("unit") */
122static bool forever = false;
123static struct timespec ref_time;
124static struct cpu_map *aggr_map;
125static aggr_get_id_t aggr_get_id;
126static bool append_file;
127static const char *output_name;
128static int output_fd;
129
130struct perf_stat {
131 bool record;
132 struct perf_data_file file;
133 struct perf_session *session;
134 u64 bytes_written;
135};
136
137static struct perf_stat perf_stat;
138#define STAT_RECORD perf_stat.record
139
140static volatile int done = 0;
141
142static struct perf_stat_config stat_config = {
143 .aggr_mode = AGGR_GLOBAL,
144 .scale = true,
145};
146
147static inline void diff_timespec(struct timespec *r, struct timespec *a,
148 struct timespec *b)
149{
150 r->tv_sec = a->tv_sec - b->tv_sec;
151 if (a->tv_nsec < b->tv_nsec) {
152 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
153 r->tv_sec--;
154 } else {
155 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
156 }
157}
158
159static void perf_stat__reset_stats(void)
160{
161 perf_evlist__reset_stats(evsel_list);
162 perf_stat__reset_shadow_stats();
163}
164
165static int create_perf_stat_counter(struct perf_evsel *evsel)
166{
167 struct perf_event_attr *attr = &evsel->attr;
168
169 if (stat_config.scale)
170 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
171 PERF_FORMAT_TOTAL_TIME_RUNNING;
172
173 attr->inherit = !no_inherit;
174
175 /*
176 * Some events get initialized with sample_(period/type) set,
177 * like tracepoints. Clear it up for counting.
178 */
179 attr->sample_period = 0;
180 /*
181 * But set sample_type to PERF_SAMPLE_IDENTIFIER, which should be harmless
182 * while avoiding that older tools show confusing messages.
183 */
184 attr->sample_type = PERF_SAMPLE_IDENTIFIER;
185
186 /*
187 * Disabling all counters initially, they will be enabled
188 * either manually by us or by kernel via enable_on_exec
189 * set later.
190 */
191 if (perf_evsel__is_group_leader(evsel)) {
192 attr->disabled = 1;
193
194 /*
195 * In case of initial_delay we enable tracee
196 * events manually.
197 */
198 if (target__none(&target) && !initial_delay)
199 attr->enable_on_exec = 1;
200 }
201
202 if (target__has_cpu(&target))
203 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
204
205 return perf_evsel__open_per_thread(evsel, evsel_list->threads);
206}
207
208/*
209 * Does the counter have nsecs as a unit?
210 */
211static inline int nsec_counter(struct perf_evsel *evsel)
212{
213 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
214 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
215 return 1;
216
217 return 0;
218}
219
220static int process_synthesized_event(struct perf_tool *tool __maybe_unused,
221 union perf_event *event,
222 struct perf_sample *sample __maybe_unused,
223 struct machine *machine __maybe_unused)
224{
225 if (perf_data_file__write(&perf_stat.file, event, event->header.size) < 0) {
226 pr_err("failed to write perf data, error: %m\n");
227 return -1;
228 }
229
230 perf_stat.bytes_written += event->header.size;
231 return 0;
232}
233
234/*
235 * Read out the results of a single counter:
236 * do not aggregate counts across CPUs in system-wide mode
237 */
238static int read_counter(struct perf_evsel *counter)
239{
240 int nthreads = thread_map__nr(evsel_list->threads);
241 int ncpus = perf_evsel__nr_cpus(counter);
242 int cpu, thread;
243
244 if (!counter->supported)
245 return -ENOENT;
246
247 if (counter->system_wide)
248 nthreads = 1;
249
250 for (thread = 0; thread < nthreads; thread++) {
251 for (cpu = 0; cpu < ncpus; cpu++) {
252 struct perf_counts_values *count;
253
254 count = perf_counts(counter->counts, cpu, thread);
255 if (perf_evsel__read(counter, cpu, thread, count))
256 return -1;
257 }
258 }
259
260 return 0;
261}
262
263static void read_counters(bool close_counters)
264{
265 struct perf_evsel *counter;
266
267 evlist__for_each(evsel_list, counter) {
268 if (read_counter(counter))
269 pr_debug("failed to read counter %s\n", counter->name);
270
271 if (perf_stat_process_counter(&stat_config, counter))
272 pr_warning("failed to process counter %s\n", counter->name);
273
274 if (close_counters) {
275 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
276 thread_map__nr(evsel_list->threads));
277 }
278 }
279}
280
281static void process_interval(void)
282{
283 struct timespec ts, rs;
284
285 read_counters(false);
286
287 clock_gettime(CLOCK_MONOTONIC, &ts);
288 diff_timespec(&rs, &ts, &ref_time);
289
290 print_counters(&rs, 0, NULL);
291}
292
293static void enable_counters(void)
294{
295 if (initial_delay)
296 usleep(initial_delay * 1000);
297
298 /*
299 * We need to enable counters only if:
300 * - we don't have tracee (attaching to task or cpu)
301 * - we have initial delay configured
302 */
303 if (!target__none(&target) || initial_delay)
304 perf_evlist__enable(evsel_list);
305}
306
307static volatile int workload_exec_errno;
308
309/*
310 * perf_evlist__prepare_workload will send a SIGUSR1
311 * if the fork fails, since we asked by setting its
312 * want_signal to true.
313 */
314static void workload_exec_failed_signal(int signo __maybe_unused, siginfo_t *info,
315 void *ucontext __maybe_unused)
316{
317 workload_exec_errno = info->si_value.sival_int;
318}
319
320static int perf_stat_synthesize_config(void)
321{
322 int err;
323
324 err = perf_event__synthesize_thread_map2(NULL, evsel_list->threads,
325 process_synthesized_event,
326 NULL);
327 if (err < 0) {
328 pr_err("Couldn't synthesize thread map.\n");
329 return err;
330 }
331
332 err = perf_event__synthesize_cpu_map(NULL, evsel_list->cpus,
333 process_synthesized_event, NULL);
334 if (err < 0) {
335 pr_err("Couldn't synthesize thread map.\n");
336 return err;
337 }
338
339 err = perf_event__synthesize_stat_config(NULL, &stat_config,
340 process_synthesized_event, NULL);
341 if (err < 0) {
342 pr_err("Couldn't synthesize config.\n");
343 return err;
344 }
345
346 return 0;
347}
348
349static int __run_perf_stat(int argc, const char **argv)
350{
351 int interval = stat_config.interval;
352 char msg[512];
353 unsigned long long t0, t1;
354 struct perf_evsel *counter;
355 struct timespec ts;
356 size_t l;
357 int status = 0;
358 const bool forks = (argc > 0);
359
360 if (interval) {
361 ts.tv_sec = interval / 1000;
362 ts.tv_nsec = (interval % 1000) * 1000000;
363 } else {
364 ts.tv_sec = 1;
365 ts.tv_nsec = 0;
366 }
367
368 if (forks) {
369 if (perf_evlist__prepare_workload(evsel_list, &target, argv, false,
370 workload_exec_failed_signal) < 0) {
371 perror("failed to prepare workload");
372 return -1;
373 }
374 child_pid = evsel_list->workload.pid;
375 }
376
377 if (group)
378 perf_evlist__set_leader(evsel_list);
379
380 evlist__for_each(evsel_list, counter) {
381 if (create_perf_stat_counter(counter) < 0) {
382 /*
383 * PPC returns ENXIO for HW counters until 2.6.37
384 * (behavior changed with commit b0a873e).
385 */
386 if (errno == EINVAL || errno == ENOSYS ||
387 errno == ENOENT || errno == EOPNOTSUPP ||
388 errno == ENXIO) {
389 if (verbose)
390 ui__warning("%s event is not supported by the kernel.\n",
391 perf_evsel__name(counter));
392 counter->supported = false;
393
394 if ((counter->leader != counter) ||
395 !(counter->leader->nr_members > 1))
396 continue;
397 }
398
399 perf_evsel__open_strerror(counter, &target,
400 errno, msg, sizeof(msg));
401 ui__error("%s\n", msg);
402
403 if (child_pid != -1)
404 kill(child_pid, SIGTERM);
405
406 return -1;
407 }
408 counter->supported = true;
409
410 l = strlen(counter->unit);
411 if (l > unit_width)
412 unit_width = l;
413 }
414
415 if (perf_evlist__apply_filters(evsel_list, &counter)) {
416 error("failed to set filter \"%s\" on event %s with %d (%s)\n",
417 counter->filter, perf_evsel__name(counter), errno,
418 strerror_r(errno, msg, sizeof(msg)));
419 return -1;
420 }
421
422 if (STAT_RECORD) {
423 int err, fd = perf_data_file__fd(&perf_stat.file);
424
425 err = perf_session__write_header(perf_stat.session, evsel_list,
426 fd, false);
427 if (err < 0)
428 return err;
429
430 err = perf_stat_synthesize_config();
431 if (err < 0)
432 return err;
433 }
434
435 /*
436 * Enable counters and exec the command:
437 */
438 t0 = rdclock();
439 clock_gettime(CLOCK_MONOTONIC, &ref_time);
440
441 if (forks) {
442 perf_evlist__start_workload(evsel_list);
443 enable_counters();
444
445 if (interval) {
446 while (!waitpid(child_pid, &status, WNOHANG)) {
447 nanosleep(&ts, NULL);
448 process_interval();
449 }
450 }
451 wait(&status);
452
453 if (workload_exec_errno) {
454 const char *emsg = strerror_r(workload_exec_errno, msg, sizeof(msg));
455 pr_err("Workload failed: %s\n", emsg);
456 return -1;
457 }
458
459 if (WIFSIGNALED(status))
460 psignal(WTERMSIG(status), argv[0]);
461 } else {
462 enable_counters();
463 while (!done) {
464 nanosleep(&ts, NULL);
465 if (interval)
466 process_interval();
467 }
468 }
469
470 t1 = rdclock();
471
472 update_stats(&walltime_nsecs_stats, t1 - t0);
473
474 read_counters(true);
475
476 return WEXITSTATUS(status);
477}
478
479static int run_perf_stat(int argc, const char **argv)
480{
481 int ret;
482
483 if (pre_cmd) {
484 ret = system(pre_cmd);
485 if (ret)
486 return ret;
487 }
488
489 if (sync_run)
490 sync();
491
492 ret = __run_perf_stat(argc, argv);
493 if (ret)
494 return ret;
495
496 if (post_cmd) {
497 ret = system(post_cmd);
498 if (ret)
499 return ret;
500 }
501
502 return ret;
503}
504
505static void print_running(u64 run, u64 ena)
506{
507 if (csv_output) {
508 fprintf(stat_config.output, "%s%" PRIu64 "%s%.2f",
509 csv_sep,
510 run,
511 csv_sep,
512 ena ? 100.0 * run / ena : 100.0);
513 } else if (run != ena) {
514 fprintf(stat_config.output, " (%.2f%%)", 100.0 * run / ena);
515 }
516}
517
518static void print_noise_pct(double total, double avg)
519{
520 double pct = rel_stddev_stats(total, avg);
521
522 if (csv_output)
523 fprintf(stat_config.output, "%s%.2f%%", csv_sep, pct);
524 else if (pct)
525 fprintf(stat_config.output, " ( +-%6.2f%% )", pct);
526}
527
528static void print_noise(struct perf_evsel *evsel, double avg)
529{
530 struct perf_stat_evsel *ps;
531
532 if (run_count == 1)
533 return;
534
535 ps = evsel->priv;
536 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
537}
538
539static void aggr_printout(struct perf_evsel *evsel, int id, int nr)
540{
541 switch (stat_config.aggr_mode) {
542 case AGGR_CORE:
543 fprintf(stat_config.output, "S%d-C%*d%s%*d%s",
544 cpu_map__id_to_socket(id),
545 csv_output ? 0 : -8,
546 cpu_map__id_to_cpu(id),
547 csv_sep,
548 csv_output ? 0 : 4,
549 nr,
550 csv_sep);
551 break;
552 case AGGR_SOCKET:
553 fprintf(stat_config.output, "S%*d%s%*d%s",
554 csv_output ? 0 : -5,
555 id,
556 csv_sep,
557 csv_output ? 0 : 4,
558 nr,
559 csv_sep);
560 break;
561 case AGGR_NONE:
562 fprintf(stat_config.output, "CPU%*d%s",
563 csv_output ? 0 : -4,
564 perf_evsel__cpus(evsel)->map[id], csv_sep);
565 break;
566 case AGGR_THREAD:
567 fprintf(stat_config.output, "%*s-%*d%s",
568 csv_output ? 0 : 16,
569 thread_map__comm(evsel->threads, id),
570 csv_output ? 0 : -8,
571 thread_map__pid(evsel->threads, id),
572 csv_sep);
573 break;
574 case AGGR_GLOBAL:
575 case AGGR_UNSET:
576 default:
577 break;
578 }
579}
580
581static void nsec_printout(int id, int nr, struct perf_evsel *evsel, double avg)
582{
583 FILE *output = stat_config.output;
584 double msecs = avg / 1e6;
585 const char *fmt_v, *fmt_n;
586 char name[25];
587
588 fmt_v = csv_output ? "%.6f%s" : "%18.6f%s";
589 fmt_n = csv_output ? "%s" : "%-25s";
590
591 aggr_printout(evsel, id, nr);
592
593 scnprintf(name, sizeof(name), "%s%s",
594 perf_evsel__name(evsel), csv_output ? "" : " (msec)");
595
596 fprintf(output, fmt_v, msecs, csv_sep);
597
598 if (csv_output)
599 fprintf(output, "%s%s", evsel->unit, csv_sep);
600 else
601 fprintf(output, "%-*s%s", unit_width, evsel->unit, csv_sep);
602
603 fprintf(output, fmt_n, name);
604
605 if (evsel->cgrp)
606 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
607}
608
609static void abs_printout(int id, int nr, struct perf_evsel *evsel, double avg)
610{
611 FILE *output = stat_config.output;
612 double sc = evsel->scale;
613 const char *fmt;
614
615 if (csv_output) {
616 fmt = sc != 1.0 ? "%.2f%s" : "%.0f%s";
617 } else {
618 if (big_num)
619 fmt = sc != 1.0 ? "%'18.2f%s" : "%'18.0f%s";
620 else
621 fmt = sc != 1.0 ? "%18.2f%s" : "%18.0f%s";
622 }
623
624 aggr_printout(evsel, id, nr);
625
626 fprintf(output, fmt, avg, csv_sep);
627
628 if (evsel->unit)
629 fprintf(output, "%-*s%s",
630 csv_output ? 0 : unit_width,
631 evsel->unit, csv_sep);
632
633 fprintf(output, "%-*s", csv_output ? 0 : 25, perf_evsel__name(evsel));
634
635 if (evsel->cgrp)
636 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
637}
638
639static void printout(int id, int nr, struct perf_evsel *counter, double uval)
640{
641 int cpu = cpu_map__id_to_cpu(id);
642
643 if (stat_config.aggr_mode == AGGR_GLOBAL)
644 cpu = 0;
645
646 if (nsec_counter(counter))
647 nsec_printout(id, nr, counter, uval);
648 else
649 abs_printout(id, nr, counter, uval);
650
651 if (!csv_output && !stat_config.interval)
652 perf_stat__print_shadow_stats(stat_config.output, counter,
653 uval, cpu,
654 stat_config.aggr_mode);
655}
656
657static void print_aggr(char *prefix)
658{
659 FILE *output = stat_config.output;
660 struct perf_evsel *counter;
661 int cpu, s, s2, id, nr;
662 double uval;
663 u64 ena, run, val;
664
665 if (!(aggr_map || aggr_get_id))
666 return;
667
668 for (s = 0; s < aggr_map->nr; s++) {
669 id = aggr_map->map[s];
670 evlist__for_each(evsel_list, counter) {
671 val = ena = run = 0;
672 nr = 0;
673 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
674 s2 = aggr_get_id(perf_evsel__cpus(counter), cpu);
675 if (s2 != id)
676 continue;
677 val += perf_counts(counter->counts, cpu, 0)->val;
678 ena += perf_counts(counter->counts, cpu, 0)->ena;
679 run += perf_counts(counter->counts, cpu, 0)->run;
680 nr++;
681 }
682 if (prefix)
683 fprintf(output, "%s", prefix);
684
685 if (run == 0 || ena == 0) {
686 aggr_printout(counter, id, nr);
687
688 fprintf(output, "%*s%s",
689 csv_output ? 0 : 18,
690 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
691 csv_sep);
692
693 fprintf(output, "%-*s%s",
694 csv_output ? 0 : unit_width,
695 counter->unit, csv_sep);
696
697 fprintf(output, "%*s",
698 csv_output ? 0 : -25,
699 perf_evsel__name(counter));
700
701 if (counter->cgrp)
702 fprintf(output, "%s%s",
703 csv_sep, counter->cgrp->name);
704
705 print_running(run, ena);
706 fputc('\n', output);
707 continue;
708 }
709 uval = val * counter->scale;
710 printout(id, nr, counter, uval);
711 if (!csv_output)
712 print_noise(counter, 1.0);
713
714 print_running(run, ena);
715 fputc('\n', output);
716 }
717 }
718}
719
720static void print_aggr_thread(struct perf_evsel *counter, char *prefix)
721{
722 FILE *output = stat_config.output;
723 int nthreads = thread_map__nr(counter->threads);
724 int ncpus = cpu_map__nr(counter->cpus);
725 int cpu, thread;
726 double uval;
727
728 for (thread = 0; thread < nthreads; thread++) {
729 u64 ena = 0, run = 0, val = 0;
730
731 for (cpu = 0; cpu < ncpus; cpu++) {
732 val += perf_counts(counter->counts, cpu, thread)->val;
733 ena += perf_counts(counter->counts, cpu, thread)->ena;
734 run += perf_counts(counter->counts, cpu, thread)->run;
735 }
736
737 if (prefix)
738 fprintf(output, "%s", prefix);
739
740 uval = val * counter->scale;
741 printout(thread, 0, counter, uval);
742
743 if (!csv_output)
744 print_noise(counter, 1.0);
745
746 print_running(run, ena);
747 fputc('\n', output);
748 }
749}
750
751/*
752 * Print out the results of a single counter:
753 * aggregated counts in system-wide mode
754 */
755static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
756{
757 FILE *output = stat_config.output;
758 struct perf_stat_evsel *ps = counter->priv;
759 double avg = avg_stats(&ps->res_stats[0]);
760 int scaled = counter->counts->scaled;
761 double uval;
762 double avg_enabled, avg_running;
763
764 avg_enabled = avg_stats(&ps->res_stats[1]);
765 avg_running = avg_stats(&ps->res_stats[2]);
766
767 if (prefix)
768 fprintf(output, "%s", prefix);
769
770 if (scaled == -1 || !counter->supported) {
771 fprintf(output, "%*s%s",
772 csv_output ? 0 : 18,
773 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
774 csv_sep);
775 fprintf(output, "%-*s%s",
776 csv_output ? 0 : unit_width,
777 counter->unit, csv_sep);
778 fprintf(output, "%*s",
779 csv_output ? 0 : -25,
780 perf_evsel__name(counter));
781
782 if (counter->cgrp)
783 fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
784
785 print_running(avg_running, avg_enabled);
786 fputc('\n', output);
787 return;
788 }
789
790 uval = avg * counter->scale;
791 printout(-1, 0, counter, uval);
792
793 print_noise(counter, avg);
794
795 print_running(avg_running, avg_enabled);
796 fprintf(output, "\n");
797}
798
799/*
800 * Print out the results of a single counter:
801 * does not use aggregated count in system-wide
802 */
803static void print_counter(struct perf_evsel *counter, char *prefix)
804{
805 FILE *output = stat_config.output;
806 u64 ena, run, val;
807 double uval;
808 int cpu;
809
810 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
811 val = perf_counts(counter->counts, cpu, 0)->val;
812 ena = perf_counts(counter->counts, cpu, 0)->ena;
813 run = perf_counts(counter->counts, cpu, 0)->run;
814
815 if (prefix)
816 fprintf(output, "%s", prefix);
817
818 if (run == 0 || ena == 0) {
819 fprintf(output, "CPU%*d%s%*s%s",
820 csv_output ? 0 : -4,
821 perf_evsel__cpus(counter)->map[cpu], csv_sep,
822 csv_output ? 0 : 18,
823 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
824 csv_sep);
825
826 fprintf(output, "%-*s%s",
827 csv_output ? 0 : unit_width,
828 counter->unit, csv_sep);
829
830 fprintf(output, "%*s",
831 csv_output ? 0 : -25,
832 perf_evsel__name(counter));
833
834 if (counter->cgrp)
835 fprintf(output, "%s%s",
836 csv_sep, counter->cgrp->name);
837
838 print_running(run, ena);
839 fputc('\n', output);
840 continue;
841 }
842
843 uval = val * counter->scale;
844 printout(cpu, 0, counter, uval);
845 if (!csv_output)
846 print_noise(counter, 1.0);
847 print_running(run, ena);
848
849 fputc('\n', output);
850 }
851}
852
853static void print_interval(char *prefix, struct timespec *ts)
854{
855 FILE *output = stat_config.output;
856 static int num_print_interval;
857
858 sprintf(prefix, "%6lu.%09lu%s", ts->tv_sec, ts->tv_nsec, csv_sep);
859
860 if (num_print_interval == 0 && !csv_output) {
861 switch (stat_config.aggr_mode) {
862 case AGGR_SOCKET:
863 fprintf(output, "# time socket cpus counts %*s events\n", unit_width, "unit");
864 break;
865 case AGGR_CORE:
866 fprintf(output, "# time core cpus counts %*s events\n", unit_width, "unit");
867 break;
868 case AGGR_NONE:
869 fprintf(output, "# time CPU counts %*s events\n", unit_width, "unit");
870 break;
871 case AGGR_THREAD:
872 fprintf(output, "# time comm-pid counts %*s events\n", unit_width, "unit");
873 break;
874 case AGGR_GLOBAL:
875 default:
876 fprintf(output, "# time counts %*s events\n", unit_width, "unit");
877 case AGGR_UNSET:
878 break;
879 }
880 }
881
882 if (++num_print_interval == 25)
883 num_print_interval = 0;
884}
885
886static void print_header(int argc, const char **argv)
887{
888 FILE *output = stat_config.output;
889 int i;
890
891 fflush(stdout);
892
893 if (!csv_output) {
894 fprintf(output, "\n");
895 fprintf(output, " Performance counter stats for ");
896 if (target.system_wide)
897 fprintf(output, "\'system wide");
898 else if (target.cpu_list)
899 fprintf(output, "\'CPU(s) %s", target.cpu_list);
900 else if (!target__has_task(&target)) {
901 fprintf(output, "\'%s", argv[0]);
902 for (i = 1; i < argc; i++)
903 fprintf(output, " %s", argv[i]);
904 } else if (target.pid)
905 fprintf(output, "process id \'%s", target.pid);
906 else
907 fprintf(output, "thread id \'%s", target.tid);
908
909 fprintf(output, "\'");
910 if (run_count > 1)
911 fprintf(output, " (%d runs)", run_count);
912 fprintf(output, ":\n\n");
913 }
914}
915
916static void print_footer(void)
917{
918 FILE *output = stat_config.output;
919
920 if (!null_run)
921 fprintf(output, "\n");
922 fprintf(output, " %17.9f seconds time elapsed",
923 avg_stats(&walltime_nsecs_stats)/1e9);
924 if (run_count > 1) {
925 fprintf(output, " ");
926 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
927 avg_stats(&walltime_nsecs_stats));
928 }
929 fprintf(output, "\n\n");
930}
931
932static void print_counters(struct timespec *ts, int argc, const char **argv)
933{
934 int interval = stat_config.interval;
935 struct perf_evsel *counter;
936 char buf[64], *prefix = NULL;
937
938 if (interval)
939 print_interval(prefix = buf, ts);
940 else
941 print_header(argc, argv);
942
943 switch (stat_config.aggr_mode) {
944 case AGGR_CORE:
945 case AGGR_SOCKET:
946 print_aggr(prefix);
947 break;
948 case AGGR_THREAD:
949 evlist__for_each(evsel_list, counter)
950 print_aggr_thread(counter, prefix);
951 break;
952 case AGGR_GLOBAL:
953 evlist__for_each(evsel_list, counter)
954 print_counter_aggr(counter, prefix);
955 break;
956 case AGGR_NONE:
957 evlist__for_each(evsel_list, counter)
958 print_counter(counter, prefix);
959 break;
960 case AGGR_UNSET:
961 default:
962 break;
963 }
964
965 if (!interval && !csv_output)
966 print_footer();
967
968 fflush(stat_config.output);
969}
970
971static volatile int signr = -1;
972
973static void skip_signal(int signo)
974{
975 if ((child_pid == -1) || stat_config.interval)
976 done = 1;
977
978 signr = signo;
979 /*
980 * render child_pid harmless
981 * won't send SIGTERM to a random
982 * process in case of race condition
983 * and fast PID recycling
984 */
985 child_pid = -1;
986}
987
988static void sig_atexit(void)
989{
990 sigset_t set, oset;
991
992 /*
993 * avoid race condition with SIGCHLD handler
994 * in skip_signal() which is modifying child_pid
995 * goal is to avoid send SIGTERM to a random
996 * process
997 */
998 sigemptyset(&set);
999 sigaddset(&set, SIGCHLD);
1000 sigprocmask(SIG_BLOCK, &set, &oset);
1001
1002 if (child_pid != -1)
1003 kill(child_pid, SIGTERM);
1004
1005 sigprocmask(SIG_SETMASK, &oset, NULL);
1006
1007 if (signr == -1)
1008 return;
1009
1010 signal(signr, SIG_DFL);
1011 kill(getpid(), signr);
1012}
1013
1014static int stat__set_big_num(const struct option *opt __maybe_unused,
1015 const char *s __maybe_unused, int unset)
1016{
1017 big_num_opt = unset ? 0 : 1;
1018 return 0;
1019}
1020
1021static const struct option stat_options[] = {
1022 OPT_BOOLEAN('T', "transaction", &transaction_run,
1023 "hardware transaction statistics"),
1024 OPT_CALLBACK('e', "event", &evsel_list, "event",
1025 "event selector. use 'perf list' to list available events",
1026 parse_events_option),
1027 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1028 "event filter", parse_filter),
1029 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1030 "child tasks do not inherit counters"),
1031 OPT_STRING('p', "pid", &target.pid, "pid",
1032 "stat events on existing process id"),
1033 OPT_STRING('t', "tid", &target.tid, "tid",
1034 "stat events on existing thread id"),
1035 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1036 "system-wide collection from all CPUs"),
1037 OPT_BOOLEAN('g', "group", &group,
1038 "put the counters into a counter group"),
1039 OPT_BOOLEAN('c', "scale", &stat_config.scale, "scale/normalize counters"),
1040 OPT_INCR('v', "verbose", &verbose,
1041 "be more verbose (show counter open errors, etc)"),
1042 OPT_INTEGER('r', "repeat", &run_count,
1043 "repeat command and print average + stddev (max: 100, forever: 0)"),
1044 OPT_BOOLEAN('n', "null", &null_run,
1045 "null run - dont start any counters"),
1046 OPT_INCR('d', "detailed", &detailed_run,
1047 "detailed run - start a lot of events"),
1048 OPT_BOOLEAN('S', "sync", &sync_run,
1049 "call sync() before starting a run"),
1050 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1051 "print large numbers with thousands\' separators",
1052 stat__set_big_num),
1053 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1054 "list of cpus to monitor in system-wide"),
1055 OPT_SET_UINT('A', "no-aggr", &stat_config.aggr_mode,
1056 "disable CPU count aggregation", AGGR_NONE),
1057 OPT_STRING('x', "field-separator", &csv_sep, "separator",
1058 "print counts with custom separator"),
1059 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1060 "monitor event in cgroup name only", parse_cgroups),
1061 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1062 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1063 OPT_INTEGER(0, "log-fd", &output_fd,
1064 "log output to fd, instead of stderr"),
1065 OPT_STRING(0, "pre", &pre_cmd, "command",
1066 "command to run prior to the measured command"),
1067 OPT_STRING(0, "post", &post_cmd, "command",
1068 "command to run after to the measured command"),
1069 OPT_UINTEGER('I', "interval-print", &stat_config.interval,
1070 "print counts at regular interval in ms (>= 10)"),
1071 OPT_SET_UINT(0, "per-socket", &stat_config.aggr_mode,
1072 "aggregate counts per processor socket", AGGR_SOCKET),
1073 OPT_SET_UINT(0, "per-core", &stat_config.aggr_mode,
1074 "aggregate counts per physical processor core", AGGR_CORE),
1075 OPT_SET_UINT(0, "per-thread", &stat_config.aggr_mode,
1076 "aggregate counts per thread", AGGR_THREAD),
1077 OPT_UINTEGER('D', "delay", &initial_delay,
1078 "ms to wait before starting measurement after program start"),
1079 OPT_END()
1080};
1081
1082static int perf_stat__get_socket(struct cpu_map *map, int cpu)
1083{
1084 return cpu_map__get_socket(map, cpu, NULL);
1085}
1086
1087static int perf_stat__get_core(struct cpu_map *map, int cpu)
1088{
1089 return cpu_map__get_core(map, cpu, NULL);
1090}
1091
1092static int cpu_map__get_max(struct cpu_map *map)
1093{
1094 int i, max = -1;
1095
1096 for (i = 0; i < map->nr; i++) {
1097 if (map->map[i] > max)
1098 max = map->map[i];
1099 }
1100
1101 return max;
1102}
1103
1104static struct cpu_map *cpus_aggr_map;
1105
1106static int perf_stat__get_aggr(aggr_get_id_t get_id, struct cpu_map *map, int idx)
1107{
1108 int cpu;
1109
1110 if (idx >= map->nr)
1111 return -1;
1112
1113 cpu = map->map[idx];
1114
1115 if (cpus_aggr_map->map[cpu] == -1)
1116 cpus_aggr_map->map[cpu] = get_id(map, idx);
1117
1118 return cpus_aggr_map->map[cpu];
1119}
1120
1121static int perf_stat__get_socket_cached(struct cpu_map *map, int idx)
1122{
1123 return perf_stat__get_aggr(perf_stat__get_socket, map, idx);
1124}
1125
1126static int perf_stat__get_core_cached(struct cpu_map *map, int idx)
1127{
1128 return perf_stat__get_aggr(perf_stat__get_core, map, idx);
1129}
1130
1131static int perf_stat_init_aggr_mode(void)
1132{
1133 int nr;
1134
1135 switch (stat_config.aggr_mode) {
1136 case AGGR_SOCKET:
1137 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
1138 perror("cannot build socket map");
1139 return -1;
1140 }
1141 aggr_get_id = perf_stat__get_socket_cached;
1142 break;
1143 case AGGR_CORE:
1144 if (cpu_map__build_core_map(evsel_list->cpus, &aggr_map)) {
1145 perror("cannot build core map");
1146 return -1;
1147 }
1148 aggr_get_id = perf_stat__get_core_cached;
1149 break;
1150 case AGGR_NONE:
1151 case AGGR_GLOBAL:
1152 case AGGR_THREAD:
1153 case AGGR_UNSET:
1154 default:
1155 break;
1156 }
1157
1158 /*
1159 * The evsel_list->cpus is the base we operate on,
1160 * taking the highest cpu number to be the size of
1161 * the aggregation translate cpumap.
1162 */
1163 nr = cpu_map__get_max(evsel_list->cpus);
1164 cpus_aggr_map = cpu_map__empty_new(nr + 1);
1165 return cpus_aggr_map ? 0 : -ENOMEM;
1166}
1167
1168static void perf_stat__exit_aggr_mode(void)
1169{
1170 cpu_map__put(aggr_map);
1171 cpu_map__put(cpus_aggr_map);
1172 aggr_map = NULL;
1173 cpus_aggr_map = NULL;
1174}
1175
1176/*
1177 * Add default attributes, if there were no attributes specified or
1178 * if -d/--detailed, -d -d or -d -d -d is used:
1179 */
1180static int add_default_attributes(void)
1181{
1182 struct perf_event_attr default_attrs[] = {
1183
1184 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1185 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1186 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1187 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1188
1189 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
1190 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1191 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
1192 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1193 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1194 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1195
1196};
1197
1198/*
1199 * Detailed stats (-d), covering the L1 and last level data caches:
1200 */
1201 struct perf_event_attr detailed_attrs[] = {
1202
1203 { .type = PERF_TYPE_HW_CACHE,
1204 .config =
1205 PERF_COUNT_HW_CACHE_L1D << 0 |
1206 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1207 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1208
1209 { .type = PERF_TYPE_HW_CACHE,
1210 .config =
1211 PERF_COUNT_HW_CACHE_L1D << 0 |
1212 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1213 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1214
1215 { .type = PERF_TYPE_HW_CACHE,
1216 .config =
1217 PERF_COUNT_HW_CACHE_LL << 0 |
1218 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1219 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1220
1221 { .type = PERF_TYPE_HW_CACHE,
1222 .config =
1223 PERF_COUNT_HW_CACHE_LL << 0 |
1224 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1225 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1226};
1227
1228/*
1229 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1230 */
1231 struct perf_event_attr very_detailed_attrs[] = {
1232
1233 { .type = PERF_TYPE_HW_CACHE,
1234 .config =
1235 PERF_COUNT_HW_CACHE_L1I << 0 |
1236 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1237 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1238
1239 { .type = PERF_TYPE_HW_CACHE,
1240 .config =
1241 PERF_COUNT_HW_CACHE_L1I << 0 |
1242 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1243 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1244
1245 { .type = PERF_TYPE_HW_CACHE,
1246 .config =
1247 PERF_COUNT_HW_CACHE_DTLB << 0 |
1248 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1249 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1250
1251 { .type = PERF_TYPE_HW_CACHE,
1252 .config =
1253 PERF_COUNT_HW_CACHE_DTLB << 0 |
1254 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1255 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1256
1257 { .type = PERF_TYPE_HW_CACHE,
1258 .config =
1259 PERF_COUNT_HW_CACHE_ITLB << 0 |
1260 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1261 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1262
1263 { .type = PERF_TYPE_HW_CACHE,
1264 .config =
1265 PERF_COUNT_HW_CACHE_ITLB << 0 |
1266 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1267 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1268
1269};
1270
1271/*
1272 * Very, very detailed stats (-d -d -d), adding prefetch events:
1273 */
1274 struct perf_event_attr very_very_detailed_attrs[] = {
1275
1276 { .type = PERF_TYPE_HW_CACHE,
1277 .config =
1278 PERF_COUNT_HW_CACHE_L1D << 0 |
1279 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1280 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1281
1282 { .type = PERF_TYPE_HW_CACHE,
1283 .config =
1284 PERF_COUNT_HW_CACHE_L1D << 0 |
1285 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1286 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1287};
1288
1289 /* Set attrs if no event is selected and !null_run: */
1290 if (null_run)
1291 return 0;
1292
1293 if (transaction_run) {
1294 int err;
1295 if (pmu_have_event("cpu", "cycles-ct") &&
1296 pmu_have_event("cpu", "el-start"))
1297 err = parse_events(evsel_list, transaction_attrs, NULL);
1298 else
1299 err = parse_events(evsel_list, transaction_limited_attrs, NULL);
1300 if (err) {
1301 fprintf(stderr, "Cannot set up transaction events\n");
1302 return -1;
1303 }
1304 return 0;
1305 }
1306
1307 if (!evsel_list->nr_entries) {
1308 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1309 return -1;
1310 }
1311
1312 /* Detailed events get appended to the event list: */
1313
1314 if (detailed_run < 1)
1315 return 0;
1316
1317 /* Append detailed run extra attributes: */
1318 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1319 return -1;
1320
1321 if (detailed_run < 2)
1322 return 0;
1323
1324 /* Append very detailed run extra attributes: */
1325 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1326 return -1;
1327
1328 if (detailed_run < 3)
1329 return 0;
1330
1331 /* Append very, very detailed run extra attributes: */
1332 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1333}
1334
1335static const char * const recort_usage[] = {
1336 "perf stat record [<options>]",
1337 NULL,
1338};
1339
1340static void init_features(struct perf_session *session)
1341{
1342 int feat;
1343
1344 for (feat = HEADER_FIRST_FEATURE; feat < HEADER_LAST_FEATURE; feat++)
1345 perf_header__set_feat(&session->header, feat);
1346
1347 perf_header__clear_feat(&session->header, HEADER_BUILD_ID);
1348 perf_header__clear_feat(&session->header, HEADER_TRACING_DATA);
1349 perf_header__clear_feat(&session->header, HEADER_BRANCH_STACK);
1350 perf_header__clear_feat(&session->header, HEADER_AUXTRACE);
1351}
1352
1353static int __cmd_record(int argc, const char **argv)
1354{
1355 struct perf_session *session;
1356 struct perf_data_file *file = &perf_stat.file;
1357
1358 argc = parse_options(argc, argv, stat_options, record_usage,
1359 PARSE_OPT_STOP_AT_NON_OPTION);
1360
1361 if (output_name)
1362 file->path = output_name;
1363
1364 session = perf_session__new(file, false, NULL);
1365 if (session == NULL) {
1366 pr_err("Perf session creation failed.\n");
1367 return -1;
1368 }
1369
1370 /* No pipe support ATM */
1371 if (perf_stat.file.is_pipe)
1372 return -EINVAL;
1373
1374 init_features(session);
1375
1376 session->evlist = evsel_list;
1377 perf_stat.session = session;
1378 perf_stat.record = true;
1379 return argc;
1380}
1381
1382int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1383{
1384 const char * const stat_usage[] = {
1385 "perf stat [<options>] [<command>]",
1386 NULL
1387 };
1388 int status = -EINVAL, run_idx;
1389 const char *mode;
1390 FILE *output = stderr;
1391 unsigned int interval;
1392 const char * const stat_subcommands[] = { "record" };
1393
1394 setlocale(LC_ALL, "");
1395
1396 evsel_list = perf_evlist__new();
1397 if (evsel_list == NULL)
1398 return -ENOMEM;
1399
1400 argc = parse_options_subcommand(argc, argv, stat_options, stat_subcommands,
1401 (const char **) stat_usage,
1402 PARSE_OPT_STOP_AT_NON_OPTION);
1403
1404 if (argc && !strncmp(argv[0], "rec", 3)) {
1405 argc = __cmd_record(argc, argv);
1406 if (argc < 0)
1407 return -1;
1408 }
1409
1410 interval = stat_config.interval;
1411
1412 /*
1413 * For record command the -o is already taken care of.
1414 */
1415 if (!STAT_RECORD && output_name && strcmp(output_name, "-"))
1416 output = NULL;
1417
1418 if (output_name && output_fd) {
1419 fprintf(stderr, "cannot use both --output and --log-fd\n");
1420 parse_options_usage(stat_usage, stat_options, "o", 1);
1421 parse_options_usage(NULL, stat_options, "log-fd", 0);
1422 goto out;
1423 }
1424
1425 if (output_fd < 0) {
1426 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1427 parse_options_usage(stat_usage, stat_options, "log-fd", 0);
1428 goto out;
1429 }
1430
1431 if (!output) {
1432 struct timespec tm;
1433 mode = append_file ? "a" : "w";
1434
1435 output = fopen(output_name, mode);
1436 if (!output) {
1437 perror("failed to create output file");
1438 return -1;
1439 }
1440 clock_gettime(CLOCK_REALTIME, &tm);
1441 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1442 } else if (output_fd > 0) {
1443 mode = append_file ? "a" : "w";
1444 output = fdopen(output_fd, mode);
1445 if (!output) {
1446 perror("Failed opening logfd");
1447 return -errno;
1448 }
1449 }
1450
1451 stat_config.output = output;
1452
1453 if (csv_sep) {
1454 csv_output = true;
1455 if (!strcmp(csv_sep, "\\t"))
1456 csv_sep = "\t";
1457 } else
1458 csv_sep = DEFAULT_SEPARATOR;
1459
1460 /*
1461 * let the spreadsheet do the pretty-printing
1462 */
1463 if (csv_output) {
1464 /* User explicitly passed -B? */
1465 if (big_num_opt == 1) {
1466 fprintf(stderr, "-B option not supported with -x\n");
1467 parse_options_usage(stat_usage, stat_options, "B", 1);
1468 parse_options_usage(NULL, stat_options, "x", 1);
1469 goto out;
1470 } else /* Nope, so disable big number formatting */
1471 big_num = false;
1472 } else if (big_num_opt == 0) /* User passed --no-big-num */
1473 big_num = false;
1474
1475 if (!argc && target__none(&target))
1476 usage_with_options(stat_usage, stat_options);
1477
1478 if (run_count < 0) {
1479 pr_err("Run count must be a positive number\n");
1480 parse_options_usage(stat_usage, stat_options, "r", 1);
1481 goto out;
1482 } else if (run_count == 0) {
1483 forever = true;
1484 run_count = 1;
1485 }
1486
1487 if ((stat_config.aggr_mode == AGGR_THREAD) && !target__has_task(&target)) {
1488 fprintf(stderr, "The --per-thread option is only available "
1489 "when monitoring via -p -t options.\n");
1490 parse_options_usage(NULL, stat_options, "p", 1);
1491 parse_options_usage(NULL, stat_options, "t", 1);
1492 goto out;
1493 }
1494
1495 /*
1496 * no_aggr, cgroup are for system-wide only
1497 * --per-thread is aggregated per thread, we dont mix it with cpu mode
1498 */
1499 if (((stat_config.aggr_mode != AGGR_GLOBAL &&
1500 stat_config.aggr_mode != AGGR_THREAD) || nr_cgroups) &&
1501 !target__has_cpu(&target)) {
1502 fprintf(stderr, "both cgroup and no-aggregation "
1503 "modes only available in system-wide mode\n");
1504
1505 parse_options_usage(stat_usage, stat_options, "G", 1);
1506 parse_options_usage(NULL, stat_options, "A", 1);
1507 parse_options_usage(NULL, stat_options, "a", 1);
1508 goto out;
1509 }
1510
1511 if (add_default_attributes())
1512 goto out;
1513
1514 target__validate(&target);
1515
1516 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1517 if (target__has_task(&target)) {
1518 pr_err("Problems finding threads of monitor\n");
1519 parse_options_usage(stat_usage, stat_options, "p", 1);
1520 parse_options_usage(NULL, stat_options, "t", 1);
1521 } else if (target__has_cpu(&target)) {
1522 perror("failed to parse CPUs map");
1523 parse_options_usage(stat_usage, stat_options, "C", 1);
1524 parse_options_usage(NULL, stat_options, "a", 1);
1525 }
1526 goto out;
1527 }
1528
1529 /*
1530 * Initialize thread_map with comm names,
1531 * so we could print it out on output.
1532 */
1533 if (stat_config.aggr_mode == AGGR_THREAD)
1534 thread_map__read_comms(evsel_list->threads);
1535
1536 if (interval && interval < 100) {
1537 if (interval < 10) {
1538 pr_err("print interval must be >= 10ms\n");
1539 parse_options_usage(stat_usage, stat_options, "I", 1);
1540 goto out;
1541 } else
1542 pr_warning("print interval < 100ms. "
1543 "The overhead percentage could be high in some cases. "
1544 "Please proceed with caution.\n");
1545 }
1546
1547 if (perf_evlist__alloc_stats(evsel_list, interval))
1548 goto out;
1549
1550 if (perf_stat_init_aggr_mode())
1551 goto out;
1552
1553 /*
1554 * We dont want to block the signals - that would cause
1555 * child tasks to inherit that and Ctrl-C would not work.
1556 * What we want is for Ctrl-C to work in the exec()-ed
1557 * task, but being ignored by perf stat itself:
1558 */
1559 atexit(sig_atexit);
1560 if (!forever)
1561 signal(SIGINT, skip_signal);
1562 signal(SIGCHLD, skip_signal);
1563 signal(SIGALRM, skip_signal);
1564 signal(SIGABRT, skip_signal);
1565
1566 status = 0;
1567 for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
1568 if (run_count != 1 && verbose)
1569 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1570 run_idx + 1);
1571
1572 status = run_perf_stat(argc, argv);
1573 if (forever && status != -1) {
1574 print_counters(NULL, argc, argv);
1575 perf_stat__reset_stats();
1576 }
1577 }
1578
1579 if (!forever && status != -1 && !interval)
1580 print_counters(NULL, argc, argv);
1581
1582 if (STAT_RECORD) {
1583 /*
1584 * We synthesize the kernel mmap record just so that older tools
1585 * don't emit warnings about not being able to resolve symbols
1586 * due to /proc/sys/kernel/kptr_restrict settings and instear provide
1587 * a saner message about no samples being in the perf.data file.
1588 *
1589 * This also serves to suppress a warning about f_header.data.size == 0
1590 * in header.c at the moment 'perf stat record' gets introduced, which
1591 * is not really needed once we start adding the stat specific PERF_RECORD_
1592 * records, but the need to suppress the kptr_restrict messages in older
1593 * tools remain -acme
1594 */
1595 int fd = perf_data_file__fd(&perf_stat.file);
1596 int err = perf_event__synthesize_kernel_mmap((void *)&perf_stat,
1597 process_synthesized_event,
1598 &perf_stat.session->machines.host);
1599 if (err) {
1600 pr_warning("Couldn't synthesize the kernel mmap record, harmless, "
1601 "older tools may produce warnings about this file\n.");
1602 }
1603
1604 perf_stat.session->header.data_size += perf_stat.bytes_written;
1605 perf_session__write_header(perf_stat.session, evsel_list, fd, true);
1606
1607 perf_session__delete(perf_stat.session);
1608 }
1609
1610 perf_stat__exit_aggr_mode();
1611 perf_evlist__free_stats(evsel_list);
1612out:
1613 perf_evlist__delete(evsel_list);
1614 return status;
1615}