4 * Builtin stat command: Give a precise performance counters summary
5 * overview about any workload, CPU or specific PID.
9 $ perf stat ./hackbench 10
13 Performance counter stats for './hackbench 10':
15 1708.761321 task-clock # 11.037 CPUs utilized
16 41,190 context-switches # 0.024 M/sec
17 6,735 CPU-migrations # 0.004 M/sec
18 17,318 page-faults # 0.010 M/sec
19 5,205,202,243 cycles # 3.046 GHz
20 3,856,436,920 stalled-cycles-frontend # 74.09% frontend cycles idle
21 1,600,790,871 stalled-cycles-backend # 30.75% backend cycles idle
22 2,603,501,247 instructions # 0.50 insns per cycle
23 # 1.48 stalled cycles per insn
24 484,357,498 branches # 283.455 M/sec
25 6,388,934 branch-misses # 1.32% of all branches
27 0.154822978 seconds time elapsed
30 * Copyright (C) 2008-2011, Red Hat Inc, Ingo Molnar <mingo@redhat.com>
32 * Improvements and fixes by:
34 * Arjan van de Ven <arjan@linux.intel.com>
35 * Yanmin Zhang <yanmin.zhang@intel.com>
36 * Wu Fengguang <fengguang.wu@intel.com>
37 * Mike Galbraith <efault@gmx.de>
38 * Paul Mackerras <paulus@samba.org>
39 * Jaswinder Singh Rajput <jaswinder@kernel.org>
41 * Released under the GPL v2. (and only v2, not any later version)
46 #include "util/util.h"
47 #include "util/parse-options.h"
48 #include "util/parse-events.h"
49 #include "util/event.h"
50 #include "util/evlist.h"
51 #include "util/evsel.h"
52 #include "util/debug.h"
53 #include "util/color.h"
54 #include "util/stat.h"
55 #include "util/header.h"
56 #include "util/cpumap.h"
57 #include "util/thread.h"
58 #include "util/thread_map.h"
61 #include <sys/prctl.h>
64 #define DEFAULT_SEPARATOR " "
65 #define CNTR_NOT_SUPPORTED "<not supported>"
66 #define CNTR_NOT_COUNTED "<not counted>"
68 static void print_stat(int argc, const char **argv);
69 static void print_counter_aggr(struct perf_evsel *counter, char *prefix);
70 static void print_counter(struct perf_evsel *counter, char *prefix);
71 static void print_aggr(char *prefix);
73 static struct perf_evlist *evsel_list;
75 static struct perf_target target = {
85 static int run_count = 1;
86 static bool no_inherit = false;
87 static bool scale = true;
88 static enum aggr_mode aggr_mode = AGGR_GLOBAL;
89 static pid_t child_pid = -1;
90 static bool null_run = false;
91 static int detailed_run = 0;
92 static bool big_num = true;
93 static int big_num_opt = -1;
94 static const char *csv_sep = NULL;
95 static bool csv_output = false;
96 static bool group = false;
97 static FILE *output = NULL;
98 static const char *pre_cmd = NULL;
99 static const char *post_cmd = NULL;
100 static bool sync_run = false;
101 static unsigned int interval = 0;
102 static bool forever = false;
103 static struct timespec ref_time;
104 static struct cpu_map *aggr_map;
105 static int (*aggr_get_id)(struct cpu_map *m, int cpu);
107 static volatile int done = 0;
110 struct stats res_stats[3];
113 static inline void diff_timespec(struct timespec *r, struct timespec *a,
116 r->tv_sec = a->tv_sec - b->tv_sec;
117 if (a->tv_nsec < b->tv_nsec) {
118 r->tv_nsec = a->tv_nsec + 1000000000L - b->tv_nsec;
121 r->tv_nsec = a->tv_nsec - b->tv_nsec ;
125 static inline struct cpu_map *perf_evsel__cpus(struct perf_evsel *evsel)
127 return (evsel->cpus && !target.cpu_list) ? evsel->cpus : evsel_list->cpus;
130 static inline int perf_evsel__nr_cpus(struct perf_evsel *evsel)
132 return perf_evsel__cpus(evsel)->nr;
135 static void perf_evsel__reset_stat_priv(struct perf_evsel *evsel)
137 memset(evsel->priv, 0, sizeof(struct perf_stat));
140 static int perf_evsel__alloc_stat_priv(struct perf_evsel *evsel)
142 evsel->priv = zalloc(sizeof(struct perf_stat));
143 return evsel->priv == NULL ? -ENOMEM : 0;
146 static void perf_evsel__free_stat_priv(struct perf_evsel *evsel)
152 static int perf_evsel__alloc_prev_raw_counts(struct perf_evsel *evsel)
157 sz = sizeof(*evsel->counts) +
158 (perf_evsel__nr_cpus(evsel) * sizeof(struct perf_counts_values));
164 evsel->prev_raw_counts = addr;
169 static void perf_evsel__free_prev_raw_counts(struct perf_evsel *evsel)
171 free(evsel->prev_raw_counts);
172 evsel->prev_raw_counts = NULL;
175 static void perf_evlist__free_stats(struct perf_evlist *evlist)
177 struct perf_evsel *evsel;
179 list_for_each_entry(evsel, &evlist->entries, node) {
180 perf_evsel__free_stat_priv(evsel);
181 perf_evsel__free_counts(evsel);
182 perf_evsel__free_prev_raw_counts(evsel);
186 static int perf_evlist__alloc_stats(struct perf_evlist *evlist, bool alloc_raw)
188 struct perf_evsel *evsel;
190 list_for_each_entry(evsel, &evlist->entries, node) {
191 if (perf_evsel__alloc_stat_priv(evsel) < 0 ||
192 perf_evsel__alloc_counts(evsel, perf_evsel__nr_cpus(evsel)) < 0 ||
193 (alloc_raw && perf_evsel__alloc_prev_raw_counts(evsel) < 0))
200 perf_evlist__free_stats(evlist);
204 static struct stats runtime_nsecs_stats[MAX_NR_CPUS];
205 static struct stats runtime_cycles_stats[MAX_NR_CPUS];
206 static struct stats runtime_stalled_cycles_front_stats[MAX_NR_CPUS];
207 static struct stats runtime_stalled_cycles_back_stats[MAX_NR_CPUS];
208 static struct stats runtime_branches_stats[MAX_NR_CPUS];
209 static struct stats runtime_cacherefs_stats[MAX_NR_CPUS];
210 static struct stats runtime_l1_dcache_stats[MAX_NR_CPUS];
211 static struct stats runtime_l1_icache_stats[MAX_NR_CPUS];
212 static struct stats runtime_ll_cache_stats[MAX_NR_CPUS];
213 static struct stats runtime_itlb_cache_stats[MAX_NR_CPUS];
214 static struct stats runtime_dtlb_cache_stats[MAX_NR_CPUS];
215 static struct stats walltime_nsecs_stats;
217 static void perf_stat__reset_stats(struct perf_evlist *evlist)
219 struct perf_evsel *evsel;
221 list_for_each_entry(evsel, &evlist->entries, node) {
222 perf_evsel__reset_stat_priv(evsel);
223 perf_evsel__reset_counts(evsel, perf_evsel__nr_cpus(evsel));
226 memset(runtime_nsecs_stats, 0, sizeof(runtime_nsecs_stats));
227 memset(runtime_cycles_stats, 0, sizeof(runtime_cycles_stats));
228 memset(runtime_stalled_cycles_front_stats, 0, sizeof(runtime_stalled_cycles_front_stats));
229 memset(runtime_stalled_cycles_back_stats, 0, sizeof(runtime_stalled_cycles_back_stats));
230 memset(runtime_branches_stats, 0, sizeof(runtime_branches_stats));
231 memset(runtime_cacherefs_stats, 0, sizeof(runtime_cacherefs_stats));
232 memset(runtime_l1_dcache_stats, 0, sizeof(runtime_l1_dcache_stats));
233 memset(runtime_l1_icache_stats, 0, sizeof(runtime_l1_icache_stats));
234 memset(runtime_ll_cache_stats, 0, sizeof(runtime_ll_cache_stats));
235 memset(runtime_itlb_cache_stats, 0, sizeof(runtime_itlb_cache_stats));
236 memset(runtime_dtlb_cache_stats, 0, sizeof(runtime_dtlb_cache_stats));
237 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
240 static int create_perf_stat_counter(struct perf_evsel *evsel)
242 struct perf_event_attr *attr = &evsel->attr;
245 attr->read_format = PERF_FORMAT_TOTAL_TIME_ENABLED |
246 PERF_FORMAT_TOTAL_TIME_RUNNING;
248 attr->inherit = !no_inherit;
250 if (perf_target__has_cpu(&target))
251 return perf_evsel__open_per_cpu(evsel, perf_evsel__cpus(evsel));
253 if (!perf_target__has_task(&target) &&
254 perf_evsel__is_group_leader(evsel)) {
256 attr->enable_on_exec = 1;
259 return perf_evsel__open_per_thread(evsel, evsel_list->threads);
263 * Does the counter have nsecs as a unit?
265 static inline int nsec_counter(struct perf_evsel *evsel)
267 if (perf_evsel__match(evsel, SOFTWARE, SW_CPU_CLOCK) ||
268 perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
275 * Update various tracking values we maintain to print
276 * more semantic information such as miss/hit ratios,
277 * instruction rates, etc:
279 static void update_shadow_stats(struct perf_evsel *counter, u64 *count)
281 if (perf_evsel__match(counter, SOFTWARE, SW_TASK_CLOCK))
282 update_stats(&runtime_nsecs_stats[0], count[0]);
283 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
284 update_stats(&runtime_cycles_stats[0], count[0]);
285 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
286 update_stats(&runtime_stalled_cycles_front_stats[0], count[0]);
287 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
288 update_stats(&runtime_stalled_cycles_back_stats[0], count[0]);
289 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
290 update_stats(&runtime_branches_stats[0], count[0]);
291 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
292 update_stats(&runtime_cacherefs_stats[0], count[0]);
293 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
294 update_stats(&runtime_l1_dcache_stats[0], count[0]);
295 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
296 update_stats(&runtime_l1_icache_stats[0], count[0]);
297 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
298 update_stats(&runtime_ll_cache_stats[0], count[0]);
299 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
300 update_stats(&runtime_dtlb_cache_stats[0], count[0]);
301 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
302 update_stats(&runtime_itlb_cache_stats[0], count[0]);
306 * Read out the results of a single counter:
307 * aggregate counts across CPUs in system-wide mode
309 static int read_counter_aggr(struct perf_evsel *counter)
311 struct perf_stat *ps = counter->priv;
312 u64 *count = counter->counts->aggr.values;
315 if (__perf_evsel__read(counter, perf_evsel__nr_cpus(counter),
316 thread_map__nr(evsel_list->threads), scale) < 0)
319 for (i = 0; i < 3; i++)
320 update_stats(&ps->res_stats[i], count[i]);
323 fprintf(output, "%s: %" PRIu64 " %" PRIu64 " %" PRIu64 "\n",
324 perf_evsel__name(counter), count[0], count[1], count[2]);
328 * Save the full runtime - to allow normalization during printout:
330 update_shadow_stats(counter, count);
336 * Read out the results of a single counter:
337 * do not aggregate counts across CPUs in system-wide mode
339 static int read_counter(struct perf_evsel *counter)
344 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
345 if (__perf_evsel__read_on_cpu(counter, cpu, 0, scale) < 0)
348 count = counter->counts->cpu[cpu].values;
350 update_shadow_stats(counter, count);
356 static void print_interval(void)
358 static int num_print_interval;
359 struct perf_evsel *counter;
360 struct perf_stat *ps;
361 struct timespec ts, rs;
364 if (aggr_mode == AGGR_GLOBAL) {
365 list_for_each_entry(counter, &evsel_list->entries, node) {
367 memset(ps->res_stats, 0, sizeof(ps->res_stats));
368 read_counter_aggr(counter);
371 list_for_each_entry(counter, &evsel_list->entries, node) {
373 memset(ps->res_stats, 0, sizeof(ps->res_stats));
374 read_counter(counter);
378 clock_gettime(CLOCK_MONOTONIC, &ts);
379 diff_timespec(&rs, &ts, &ref_time);
380 sprintf(prefix, "%6lu.%09lu%s", rs.tv_sec, rs.tv_nsec, csv_sep);
382 if (num_print_interval == 0 && !csv_output) {
385 fprintf(output, "# time socket cpus counts events\n");
388 fprintf(output, "# time CPU counts events\n");
392 fprintf(output, "# time counts events\n");
396 if (++num_print_interval == 25)
397 num_print_interval = 0;
404 list_for_each_entry(counter, &evsel_list->entries, node)
405 print_counter(counter, prefix);
409 list_for_each_entry(counter, &evsel_list->entries, node)
410 print_counter_aggr(counter, prefix);
414 static int __run_perf_stat(int argc, const char **argv)
417 unsigned long long t0, t1;
418 struct perf_evsel *counter;
421 const bool forks = (argc > 0);
424 ts.tv_sec = interval / 1000;
425 ts.tv_nsec = (interval % 1000) * 1000000;
432 if (perf_evlist__prepare_workload(evsel_list, &target, argv,
434 perror("failed to prepare workload");
440 perf_evlist__set_leader(evsel_list);
442 list_for_each_entry(counter, &evsel_list->entries, node) {
443 if (create_perf_stat_counter(counter) < 0) {
445 * PPC returns ENXIO for HW counters until 2.6.37
446 * (behavior changed with commit b0a873e).
448 if (errno == EINVAL || errno == ENOSYS ||
449 errno == ENOENT || errno == EOPNOTSUPP ||
452 ui__warning("%s event is not supported by the kernel.\n",
453 perf_evsel__name(counter));
454 counter->supported = false;
458 perf_evsel__open_strerror(counter, &target,
459 errno, msg, sizeof(msg));
460 ui__error("%s\n", msg);
463 kill(child_pid, SIGTERM);
467 counter->supported = true;
470 if (perf_evlist__apply_filters(evsel_list)) {
471 error("failed to set filter with %d (%s)\n", errno,
477 * Enable counters and exec the command:
480 clock_gettime(CLOCK_MONOTONIC, &ref_time);
483 perf_evlist__start_workload(evsel_list);
486 while (!waitpid(child_pid, &status, WNOHANG)) {
487 nanosleep(&ts, NULL);
492 if (WIFSIGNALED(status))
493 psignal(WTERMSIG(status), argv[0]);
496 nanosleep(&ts, NULL);
504 update_stats(&walltime_nsecs_stats, t1 - t0);
506 if (aggr_mode == AGGR_GLOBAL) {
507 list_for_each_entry(counter, &evsel_list->entries, node) {
508 read_counter_aggr(counter);
509 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter),
510 thread_map__nr(evsel_list->threads));
513 list_for_each_entry(counter, &evsel_list->entries, node) {
514 read_counter(counter);
515 perf_evsel__close_fd(counter, perf_evsel__nr_cpus(counter), 1);
519 return WEXITSTATUS(status);
522 static int run_perf_stat(int argc __maybe_unused, const char **argv)
527 ret = system(pre_cmd);
535 ret = __run_perf_stat(argc, argv);
540 ret = system(post_cmd);
548 static void print_noise_pct(double total, double avg)
550 double pct = rel_stddev_stats(total, avg);
553 fprintf(output, "%s%.2f%%", csv_sep, pct);
555 fprintf(output, " ( +-%6.2f%% )", pct);
558 static void print_noise(struct perf_evsel *evsel, double avg)
560 struct perf_stat *ps;
566 print_noise_pct(stddev_stats(&ps->res_stats[0]), avg);
569 static void aggr_printout(struct perf_evsel *evsel, int cpu, int nr)
573 fprintf(output, "S%*d%s%*d%s",
582 fprintf(output, "CPU%*d%s",
584 perf_evsel__cpus(evsel)->map[cpu], csv_sep);
592 static void nsec_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
594 double msecs = avg / 1e6;
595 const char *fmt = csv_output ? "%.6f%s%s" : "%18.6f%s%-25s";
597 aggr_printout(evsel, cpu, nr);
599 fprintf(output, fmt, msecs, csv_sep, perf_evsel__name(evsel));
602 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
604 if (csv_output || interval)
607 if (perf_evsel__match(evsel, SOFTWARE, SW_TASK_CLOCK))
608 fprintf(output, " # %8.3f CPUs utilized ",
609 avg / avg_stats(&walltime_nsecs_stats));
611 fprintf(output, " ");
614 /* used for get_ratio_color() */
616 GRC_STALLED_CYCLES_FE,
617 GRC_STALLED_CYCLES_BE,
622 static const char *get_ratio_color(enum grc_type type, double ratio)
624 static const double grc_table[GRC_MAX_NR][3] = {
625 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
626 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
627 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
629 const char *color = PERF_COLOR_NORMAL;
631 if (ratio > grc_table[type][0])
632 color = PERF_COLOR_RED;
633 else if (ratio > grc_table[type][1])
634 color = PERF_COLOR_MAGENTA;
635 else if (ratio > grc_table[type][2])
636 color = PERF_COLOR_YELLOW;
641 static void print_stalled_cycles_frontend(int cpu,
642 struct perf_evsel *evsel
643 __maybe_unused, double avg)
645 double total, ratio = 0.0;
648 total = avg_stats(&runtime_cycles_stats[cpu]);
651 ratio = avg / total * 100.0;
653 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
655 fprintf(output, " # ");
656 color_fprintf(output, color, "%6.2f%%", ratio);
657 fprintf(output, " frontend cycles idle ");
660 static void print_stalled_cycles_backend(int cpu,
661 struct perf_evsel *evsel
662 __maybe_unused, double avg)
664 double total, ratio = 0.0;
667 total = avg_stats(&runtime_cycles_stats[cpu]);
670 ratio = avg / total * 100.0;
672 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
674 fprintf(output, " # ");
675 color_fprintf(output, color, "%6.2f%%", ratio);
676 fprintf(output, " backend cycles idle ");
679 static void print_branch_misses(int cpu,
680 struct perf_evsel *evsel __maybe_unused,
683 double total, ratio = 0.0;
686 total = avg_stats(&runtime_branches_stats[cpu]);
689 ratio = avg / total * 100.0;
691 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
693 fprintf(output, " # ");
694 color_fprintf(output, color, "%6.2f%%", ratio);
695 fprintf(output, " of all branches ");
698 static void print_l1_dcache_misses(int cpu,
699 struct perf_evsel *evsel __maybe_unused,
702 double total, ratio = 0.0;
705 total = avg_stats(&runtime_l1_dcache_stats[cpu]);
708 ratio = avg / total * 100.0;
710 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
712 fprintf(output, " # ");
713 color_fprintf(output, color, "%6.2f%%", ratio);
714 fprintf(output, " of all L1-dcache hits ");
717 static void print_l1_icache_misses(int cpu,
718 struct perf_evsel *evsel __maybe_unused,
721 double total, ratio = 0.0;
724 total = avg_stats(&runtime_l1_icache_stats[cpu]);
727 ratio = avg / total * 100.0;
729 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
731 fprintf(output, " # ");
732 color_fprintf(output, color, "%6.2f%%", ratio);
733 fprintf(output, " of all L1-icache hits ");
736 static void print_dtlb_cache_misses(int cpu,
737 struct perf_evsel *evsel __maybe_unused,
740 double total, ratio = 0.0;
743 total = avg_stats(&runtime_dtlb_cache_stats[cpu]);
746 ratio = avg / total * 100.0;
748 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
750 fprintf(output, " # ");
751 color_fprintf(output, color, "%6.2f%%", ratio);
752 fprintf(output, " of all dTLB cache hits ");
755 static void print_itlb_cache_misses(int cpu,
756 struct perf_evsel *evsel __maybe_unused,
759 double total, ratio = 0.0;
762 total = avg_stats(&runtime_itlb_cache_stats[cpu]);
765 ratio = avg / total * 100.0;
767 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
769 fprintf(output, " # ");
770 color_fprintf(output, color, "%6.2f%%", ratio);
771 fprintf(output, " of all iTLB cache hits ");
774 static void print_ll_cache_misses(int cpu,
775 struct perf_evsel *evsel __maybe_unused,
778 double total, ratio = 0.0;
781 total = avg_stats(&runtime_ll_cache_stats[cpu]);
784 ratio = avg / total * 100.0;
786 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
788 fprintf(output, " # ");
789 color_fprintf(output, color, "%6.2f%%", ratio);
790 fprintf(output, " of all LL-cache hits ");
793 static void abs_printout(int cpu, int nr, struct perf_evsel *evsel, double avg)
795 double total, ratio = 0.0;
801 fmt = "%'18.0f%s%-25s";
803 fmt = "%18.0f%s%-25s";
805 aggr_printout(evsel, cpu, nr);
807 if (aggr_mode == AGGR_GLOBAL)
810 fprintf(output, fmt, avg, csv_sep, perf_evsel__name(evsel));
813 fprintf(output, "%s%s", csv_sep, evsel->cgrp->name);
815 if (csv_output || interval)
818 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
819 total = avg_stats(&runtime_cycles_stats[cpu]);
823 fprintf(output, " # %5.2f insns per cycle ", ratio);
825 total = avg_stats(&runtime_stalled_cycles_front_stats[cpu]);
826 total = max(total, avg_stats(&runtime_stalled_cycles_back_stats[cpu]));
830 fprintf(output, "\n # %5.2f stalled cycles per insn", ratio);
833 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES) &&
834 runtime_branches_stats[cpu].n != 0) {
835 print_branch_misses(cpu, evsel, avg);
837 evsel->attr.type == PERF_TYPE_HW_CACHE &&
838 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
839 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
840 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
841 runtime_l1_dcache_stats[cpu].n != 0) {
842 print_l1_dcache_misses(cpu, evsel, avg);
844 evsel->attr.type == PERF_TYPE_HW_CACHE &&
845 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
846 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
847 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
848 runtime_l1_icache_stats[cpu].n != 0) {
849 print_l1_icache_misses(cpu, evsel, avg);
851 evsel->attr.type == PERF_TYPE_HW_CACHE &&
852 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
853 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
854 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
855 runtime_dtlb_cache_stats[cpu].n != 0) {
856 print_dtlb_cache_misses(cpu, evsel, avg);
858 evsel->attr.type == PERF_TYPE_HW_CACHE &&
859 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
860 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
861 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
862 runtime_itlb_cache_stats[cpu].n != 0) {
863 print_itlb_cache_misses(cpu, evsel, avg);
865 evsel->attr.type == PERF_TYPE_HW_CACHE &&
866 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
867 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
868 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16)) &&
869 runtime_ll_cache_stats[cpu].n != 0) {
870 print_ll_cache_misses(cpu, evsel, avg);
871 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES) &&
872 runtime_cacherefs_stats[cpu].n != 0) {
873 total = avg_stats(&runtime_cacherefs_stats[cpu]);
876 ratio = avg * 100 / total;
878 fprintf(output, " # %8.3f %% of all cache refs ", ratio);
880 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
881 print_stalled_cycles_frontend(cpu, evsel, avg);
882 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
883 print_stalled_cycles_backend(cpu, evsel, avg);
884 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
885 total = avg_stats(&runtime_nsecs_stats[cpu]);
888 ratio = 1.0 * avg / total;
890 fprintf(output, " # %8.3f GHz ", ratio);
891 } else if (runtime_nsecs_stats[cpu].n != 0) {
894 total = avg_stats(&runtime_nsecs_stats[cpu]);
897 ratio = 1000.0 * avg / total;
903 fprintf(output, " # %8.3f %c/sec ", ratio, unit);
905 fprintf(output, " ");
909 static void print_aggr(char *prefix)
911 struct perf_evsel *counter;
912 int cpu, s, s2, id, nr;
915 if (!(aggr_map || aggr_get_id))
918 for (s = 0; s < aggr_map->nr; s++) {
919 id = aggr_map->map[s];
920 list_for_each_entry(counter, &evsel_list->entries, node) {
923 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
924 s2 = aggr_get_id(evsel_list->cpus, cpu);
927 val += counter->counts->cpu[cpu].val;
928 ena += counter->counts->cpu[cpu].ena;
929 run += counter->counts->cpu[cpu].run;
933 fprintf(output, "%s", prefix);
935 if (run == 0 || ena == 0) {
936 aggr_printout(counter, cpu, nr);
938 fprintf(output, "%*s%s%*s",
940 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
942 csv_output ? 0 : -24,
943 perf_evsel__name(counter));
946 fprintf(output, "%s%s",
947 csv_sep, counter->cgrp->name);
953 if (nsec_counter(counter))
954 nsec_printout(id, nr, counter, val);
956 abs_printout(id, nr, counter, val);
959 print_noise(counter, 1.0);
962 fprintf(output, " (%.2f%%)",
971 * Print out the results of a single counter:
972 * aggregated counts in system-wide mode
974 static void print_counter_aggr(struct perf_evsel *counter, char *prefix)
976 struct perf_stat *ps = counter->priv;
977 double avg = avg_stats(&ps->res_stats[0]);
978 int scaled = counter->counts->scaled;
981 fprintf(output, "%s", prefix);
984 fprintf(output, "%*s%s%*s",
986 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
988 csv_output ? 0 : -24,
989 perf_evsel__name(counter));
992 fprintf(output, "%s%s", csv_sep, counter->cgrp->name);
998 if (nsec_counter(counter))
999 nsec_printout(-1, 0, counter, avg);
1001 abs_printout(-1, 0, counter, avg);
1003 print_noise(counter, avg);
1006 fputc('\n', output);
1011 double avg_enabled, avg_running;
1013 avg_enabled = avg_stats(&ps->res_stats[1]);
1014 avg_running = avg_stats(&ps->res_stats[2]);
1016 fprintf(output, " [%5.2f%%]", 100 * avg_running / avg_enabled);
1018 fprintf(output, "\n");
1022 * Print out the results of a single counter:
1023 * does not use aggregated count in system-wide
1025 static void print_counter(struct perf_evsel *counter, char *prefix)
1030 for (cpu = 0; cpu < perf_evsel__nr_cpus(counter); cpu++) {
1031 val = counter->counts->cpu[cpu].val;
1032 ena = counter->counts->cpu[cpu].ena;
1033 run = counter->counts->cpu[cpu].run;
1036 fprintf(output, "%s", prefix);
1038 if (run == 0 || ena == 0) {
1039 fprintf(output, "CPU%*d%s%*s%s%*s",
1040 csv_output ? 0 : -4,
1041 perf_evsel__cpus(counter)->map[cpu], csv_sep,
1042 csv_output ? 0 : 18,
1043 counter->supported ? CNTR_NOT_COUNTED : CNTR_NOT_SUPPORTED,
1045 csv_output ? 0 : -24,
1046 perf_evsel__name(counter));
1049 fprintf(output, "%s%s",
1050 csv_sep, counter->cgrp->name);
1052 fputc('\n', output);
1056 if (nsec_counter(counter))
1057 nsec_printout(cpu, 0, counter, val);
1059 abs_printout(cpu, 0, counter, val);
1062 print_noise(counter, 1.0);
1065 fprintf(output, " (%.2f%%)",
1068 fputc('\n', output);
1072 static void print_stat(int argc, const char **argv)
1074 struct perf_evsel *counter;
1080 fprintf(output, "\n");
1081 fprintf(output, " Performance counter stats for ");
1082 if (!perf_target__has_task(&target)) {
1083 fprintf(output, "\'%s", argv[0]);
1084 for (i = 1; i < argc; i++)
1085 fprintf(output, " %s", argv[i]);
1086 } else if (target.pid)
1087 fprintf(output, "process id \'%s", target.pid);
1089 fprintf(output, "thread id \'%s", target.tid);
1091 fprintf(output, "\'");
1093 fprintf(output, " (%d runs)", run_count);
1094 fprintf(output, ":\n\n");
1097 switch (aggr_mode) {
1102 list_for_each_entry(counter, &evsel_list->entries, node)
1103 print_counter_aggr(counter, NULL);
1106 list_for_each_entry(counter, &evsel_list->entries, node)
1107 print_counter(counter, NULL);
1115 fprintf(output, "\n");
1116 fprintf(output, " %17.9f seconds time elapsed",
1117 avg_stats(&walltime_nsecs_stats)/1e9);
1118 if (run_count > 1) {
1119 fprintf(output, " ");
1120 print_noise_pct(stddev_stats(&walltime_nsecs_stats),
1121 avg_stats(&walltime_nsecs_stats));
1123 fprintf(output, "\n\n");
1127 static volatile int signr = -1;
1129 static void skip_signal(int signo)
1131 if ((child_pid == -1) || interval)
1137 static void sig_atexit(void)
1139 if (child_pid != -1)
1140 kill(child_pid, SIGTERM);
1145 signal(signr, SIG_DFL);
1146 kill(getpid(), signr);
1149 static int stat__set_big_num(const struct option *opt __maybe_unused,
1150 const char *s __maybe_unused, int unset)
1152 big_num_opt = unset ? 0 : 1;
1156 static int perf_stat_init_aggr_mode(void)
1158 switch (aggr_mode) {
1160 if (cpu_map__build_socket_map(evsel_list->cpus, &aggr_map)) {
1161 perror("cannot build socket map");
1164 aggr_get_id = cpu_map__get_socket;
1176 * Add default attributes, if there were no attributes specified or
1177 * if -d/--detailed, -d -d or -d -d -d is used:
1179 static int add_default_attributes(void)
1181 struct perf_event_attr default_attrs[] = {
1183 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_TASK_CLOCK },
1184 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CONTEXT_SWITCHES },
1185 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_CPU_MIGRATIONS },
1186 { .type = PERF_TYPE_SOFTWARE, .config = PERF_COUNT_SW_PAGE_FAULTS },
1188 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_CPU_CYCLES },
1189 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_FRONTEND },
1190 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_STALLED_CYCLES_BACKEND },
1191 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_INSTRUCTIONS },
1192 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
1193 { .type = PERF_TYPE_HARDWARE, .config = PERF_COUNT_HW_BRANCH_MISSES },
1198 * Detailed stats (-d), covering the L1 and last level data caches:
1200 struct perf_event_attr detailed_attrs[] = {
1202 { .type = PERF_TYPE_HW_CACHE,
1204 PERF_COUNT_HW_CACHE_L1D << 0 |
1205 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1206 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1208 { .type = PERF_TYPE_HW_CACHE,
1210 PERF_COUNT_HW_CACHE_L1D << 0 |
1211 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1212 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1214 { .type = PERF_TYPE_HW_CACHE,
1216 PERF_COUNT_HW_CACHE_LL << 0 |
1217 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1218 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1220 { .type = PERF_TYPE_HW_CACHE,
1222 PERF_COUNT_HW_CACHE_LL << 0 |
1223 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1224 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1228 * Very detailed stats (-d -d), covering the instruction cache and the TLB caches:
1230 struct perf_event_attr very_detailed_attrs[] = {
1232 { .type = PERF_TYPE_HW_CACHE,
1234 PERF_COUNT_HW_CACHE_L1I << 0 |
1235 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1236 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1238 { .type = PERF_TYPE_HW_CACHE,
1240 PERF_COUNT_HW_CACHE_L1I << 0 |
1241 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1242 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1244 { .type = PERF_TYPE_HW_CACHE,
1246 PERF_COUNT_HW_CACHE_DTLB << 0 |
1247 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1248 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1250 { .type = PERF_TYPE_HW_CACHE,
1252 PERF_COUNT_HW_CACHE_DTLB << 0 |
1253 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1254 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1256 { .type = PERF_TYPE_HW_CACHE,
1258 PERF_COUNT_HW_CACHE_ITLB << 0 |
1259 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1260 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1262 { .type = PERF_TYPE_HW_CACHE,
1264 PERF_COUNT_HW_CACHE_ITLB << 0 |
1265 (PERF_COUNT_HW_CACHE_OP_READ << 8) |
1266 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1271 * Very, very detailed stats (-d -d -d), adding prefetch events:
1273 struct perf_event_attr very_very_detailed_attrs[] = {
1275 { .type = PERF_TYPE_HW_CACHE,
1277 PERF_COUNT_HW_CACHE_L1D << 0 |
1278 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1279 (PERF_COUNT_HW_CACHE_RESULT_ACCESS << 16) },
1281 { .type = PERF_TYPE_HW_CACHE,
1283 PERF_COUNT_HW_CACHE_L1D << 0 |
1284 (PERF_COUNT_HW_CACHE_OP_PREFETCH << 8) |
1285 (PERF_COUNT_HW_CACHE_RESULT_MISS << 16) },
1288 /* Set attrs if no event is selected and !null_run: */
1292 if (!evsel_list->nr_entries) {
1293 if (perf_evlist__add_default_attrs(evsel_list, default_attrs) < 0)
1297 /* Detailed events get appended to the event list: */
1299 if (detailed_run < 1)
1302 /* Append detailed run extra attributes: */
1303 if (perf_evlist__add_default_attrs(evsel_list, detailed_attrs) < 0)
1306 if (detailed_run < 2)
1309 /* Append very detailed run extra attributes: */
1310 if (perf_evlist__add_default_attrs(evsel_list, very_detailed_attrs) < 0)
1313 if (detailed_run < 3)
1316 /* Append very, very detailed run extra attributes: */
1317 return perf_evlist__add_default_attrs(evsel_list, very_very_detailed_attrs);
1320 int cmd_stat(int argc, const char **argv, const char *prefix __maybe_unused)
1322 bool append_file = false;
1324 const char *output_name = NULL;
1325 const struct option options[] = {
1326 OPT_CALLBACK('e', "event", &evsel_list, "event",
1327 "event selector. use 'perf list' to list available events",
1328 parse_events_option),
1329 OPT_CALLBACK(0, "filter", &evsel_list, "filter",
1330 "event filter", parse_filter),
1331 OPT_BOOLEAN('i', "no-inherit", &no_inherit,
1332 "child tasks do not inherit counters"),
1333 OPT_STRING('p', "pid", &target.pid, "pid",
1334 "stat events on existing process id"),
1335 OPT_STRING('t', "tid", &target.tid, "tid",
1336 "stat events on existing thread id"),
1337 OPT_BOOLEAN('a', "all-cpus", &target.system_wide,
1338 "system-wide collection from all CPUs"),
1339 OPT_BOOLEAN('g', "group", &group,
1340 "put the counters into a counter group"),
1341 OPT_BOOLEAN('c', "scale", &scale, "scale/normalize counters"),
1342 OPT_INCR('v', "verbose", &verbose,
1343 "be more verbose (show counter open errors, etc)"),
1344 OPT_INTEGER('r', "repeat", &run_count,
1345 "repeat command and print average + stddev (max: 100, forever: 0)"),
1346 OPT_BOOLEAN('n', "null", &null_run,
1347 "null run - dont start any counters"),
1348 OPT_INCR('d', "detailed", &detailed_run,
1349 "detailed run - start a lot of events"),
1350 OPT_BOOLEAN('S', "sync", &sync_run,
1351 "call sync() before starting a run"),
1352 OPT_CALLBACK_NOOPT('B', "big-num", NULL, NULL,
1353 "print large numbers with thousands\' separators",
1355 OPT_STRING('C', "cpu", &target.cpu_list, "cpu",
1356 "list of cpus to monitor in system-wide"),
1357 OPT_SET_UINT('A', "no-aggr", &aggr_mode,
1358 "disable CPU count aggregation", AGGR_NONE),
1359 OPT_STRING('x', "field-separator", &csv_sep, "separator",
1360 "print counts with custom separator"),
1361 OPT_CALLBACK('G', "cgroup", &evsel_list, "name",
1362 "monitor event in cgroup name only", parse_cgroups),
1363 OPT_STRING('o', "output", &output_name, "file", "output file name"),
1364 OPT_BOOLEAN(0, "append", &append_file, "append to the output file"),
1365 OPT_INTEGER(0, "log-fd", &output_fd,
1366 "log output to fd, instead of stderr"),
1367 OPT_STRING(0, "pre", &pre_cmd, "command",
1368 "command to run prior to the measured command"),
1369 OPT_STRING(0, "post", &post_cmd, "command",
1370 "command to run after to the measured command"),
1371 OPT_UINTEGER('I', "interval-print", &interval,
1372 "print counts at regular interval in ms (>= 100)"),
1373 OPT_SET_UINT(0, "per-socket", &aggr_mode,
1374 "aggregate counts per processor socket", AGGR_SOCKET),
1377 const char * const stat_usage[] = {
1378 "perf stat [<options>] [<command>]",
1381 int status = -ENOMEM, run_idx;
1384 setlocale(LC_ALL, "");
1386 evsel_list = perf_evlist__new();
1387 if (evsel_list == NULL)
1390 argc = parse_options(argc, argv, options, stat_usage,
1391 PARSE_OPT_STOP_AT_NON_OPTION);
1394 if (output_name && strcmp(output_name, "-"))
1397 if (output_name && output_fd) {
1398 fprintf(stderr, "cannot use both --output and --log-fd\n");
1399 usage_with_options(stat_usage, options);
1402 if (output_fd < 0) {
1403 fprintf(stderr, "argument to --log-fd must be a > 0\n");
1404 usage_with_options(stat_usage, options);
1409 mode = append_file ? "a" : "w";
1411 output = fopen(output_name, mode);
1413 perror("failed to create output file");
1416 clock_gettime(CLOCK_REALTIME, &tm);
1417 fprintf(output, "# started on %s\n", ctime(&tm.tv_sec));
1418 } else if (output_fd > 0) {
1419 mode = append_file ? "a" : "w";
1420 output = fdopen(output_fd, mode);
1422 perror("Failed opening logfd");
1429 if (!strcmp(csv_sep, "\\t"))
1432 csv_sep = DEFAULT_SEPARATOR;
1435 * let the spreadsheet do the pretty-printing
1438 /* User explicitly passed -B? */
1439 if (big_num_opt == 1) {
1440 fprintf(stderr, "-B option not supported with -x\n");
1441 usage_with_options(stat_usage, options);
1442 } else /* Nope, so disable big number formatting */
1444 } else if (big_num_opt == 0) /* User passed --no-big-num */
1447 if (!argc && !perf_target__has_task(&target))
1448 usage_with_options(stat_usage, options);
1449 if (run_count < 0) {
1450 usage_with_options(stat_usage, options);
1451 } else if (run_count == 0) {
1456 /* no_aggr, cgroup are for system-wide only */
1457 if ((aggr_mode != AGGR_GLOBAL || nr_cgroups)
1458 && !perf_target__has_cpu(&target)) {
1459 fprintf(stderr, "both cgroup and no-aggregation "
1460 "modes only available in system-wide mode\n");
1462 usage_with_options(stat_usage, options);
1466 if (add_default_attributes())
1469 perf_target__validate(&target);
1471 if (perf_evlist__create_maps(evsel_list, &target) < 0) {
1472 if (perf_target__has_task(&target))
1473 pr_err("Problems finding threads of monitor\n");
1474 if (perf_target__has_cpu(&target))
1475 perror("failed to parse CPUs map");
1477 usage_with_options(stat_usage, options);
1480 if (interval && interval < 100) {
1481 pr_err("print interval must be >= 100ms\n");
1482 usage_with_options(stat_usage, options);
1486 if (perf_evlist__alloc_stats(evsel_list, interval))
1489 if (perf_stat_init_aggr_mode())
1493 * We dont want to block the signals - that would cause
1494 * child tasks to inherit that and Ctrl-C would not work.
1495 * What we want is for Ctrl-C to work in the exec()-ed
1496 * task, but being ignored by perf stat itself:
1500 signal(SIGINT, skip_signal);
1501 signal(SIGCHLD, skip_signal);
1502 signal(SIGALRM, skip_signal);
1503 signal(SIGABRT, skip_signal);
1506 for (run_idx = 0; forever || run_idx < run_count; run_idx++) {
1507 if (run_count != 1 && verbose)
1508 fprintf(output, "[ perf stat: executing run #%d ... ]\n",
1511 status = run_perf_stat(argc, argv);
1512 if (forever && status != -1) {
1513 print_stat(argc, argv);
1514 perf_stat__reset_stats(evsel_list);
1518 if (!forever && status != -1 && !interval)
1519 print_stat(argc, argv);
1521 perf_evlist__free_stats(evsel_list);
1523 perf_evlist__delete_maps(evsel_list);
1525 perf_evlist__delete(evsel_list);