1 // SPDX-License-Identifier: GPL-2.0
10 #include "metricgroup.h"
11 #include <linux/zalloc.h>
14 * AGGR_GLOBAL: Use CPU 0
15 * AGGR_SOCKET: Use first CPU of socket
16 * AGGR_DIE: Use first CPU of die
17 * AGGR_CORE: Use first CPU of core
18 * AGGR_NONE: Use matching CPU
19 * AGGR_THREAD: Not supported?
21 static bool have_frontend_stalled;
23 struct runtime_stat rt_stat;
24 struct stats walltime_nsecs_stats;
27 struct rb_node rb_node;
28 struct perf_evsel *evsel;
32 struct runtime_stat *stat;
36 static int saved_value_cmp(struct rb_node *rb_node, const void *entry)
38 struct saved_value *a = container_of(rb_node,
41 const struct saved_value *b = entry;
44 return a->cpu - b->cpu;
47 * Previously the rbtree was used to link generic metrics.
48 * The keys were evsel/cpu. Now the rbtree is extended to support
49 * per-thread shadow stats. For shadow stats case, the keys
50 * are cpu/type/ctx/stat (evsel is NULL). For generic metrics
51 * case, the keys are still evsel/cpu (type/ctx/stat are 0 or NULL).
53 if (a->type != b->type)
54 return a->type - b->type;
57 return a->ctx - b->ctx;
59 if (a->evsel == NULL && b->evsel == NULL) {
60 if (a->stat == b->stat)
63 if ((char *)a->stat < (char *)b->stat)
69 if (a->evsel == b->evsel)
71 if ((char *)a->evsel < (char *)b->evsel)
76 static struct rb_node *saved_value_new(struct rblist *rblist __maybe_unused,
79 struct saved_value *nd = malloc(sizeof(struct saved_value));
83 memcpy(nd, entry, sizeof(struct saved_value));
87 static void saved_value_delete(struct rblist *rblist __maybe_unused,
88 struct rb_node *rb_node)
90 struct saved_value *v;
93 v = container_of(rb_node, struct saved_value, rb_node);
97 static struct saved_value *saved_value_lookup(struct perf_evsel *evsel,
102 struct runtime_stat *st)
104 struct rblist *rblist;
106 struct saved_value dm = {
114 rblist = &st->value_list;
116 nd = rblist__find(rblist, &dm);
118 return container_of(nd, struct saved_value, rb_node);
120 rblist__add_node(rblist, &dm);
121 nd = rblist__find(rblist, &dm);
123 return container_of(nd, struct saved_value, rb_node);
128 void runtime_stat__init(struct runtime_stat *st)
130 struct rblist *rblist = &st->value_list;
132 rblist__init(rblist);
133 rblist->node_cmp = saved_value_cmp;
134 rblist->node_new = saved_value_new;
135 rblist->node_delete = saved_value_delete;
138 void runtime_stat__exit(struct runtime_stat *st)
140 rblist__exit(&st->value_list);
143 void perf_stat__init_shadow_stats(void)
145 have_frontend_stalled = pmu_have_event("cpu", "stalled-cycles-frontend");
146 runtime_stat__init(&rt_stat);
149 static int evsel_context(struct perf_evsel *evsel)
153 if (evsel->attr.exclude_kernel)
154 ctx |= CTX_BIT_KERNEL;
155 if (evsel->attr.exclude_user)
157 if (evsel->attr.exclude_hv)
159 if (evsel->attr.exclude_host)
161 if (evsel->attr.exclude_idle)
167 static void reset_stat(struct runtime_stat *st)
169 struct rblist *rblist;
170 struct rb_node *pos, *next;
172 rblist = &st->value_list;
173 next = rb_first_cached(&rblist->entries);
177 memset(&container_of(pos, struct saved_value, rb_node)->stats,
179 sizeof(struct stats));
183 void perf_stat__reset_shadow_stats(void)
185 reset_stat(&rt_stat);
186 memset(&walltime_nsecs_stats, 0, sizeof(walltime_nsecs_stats));
189 void perf_stat__reset_shadow_per_stat(struct runtime_stat *st)
194 static void update_runtime_stat(struct runtime_stat *st,
196 int ctx, int cpu, u64 count)
198 struct saved_value *v = saved_value_lookup(NULL, cpu, true,
202 update_stats(&v->stats, count);
206 * Update various tracking values we maintain to print
207 * more semantic information such as miss/hit ratios,
208 * instruction rates, etc:
210 void perf_stat__update_shadow_stats(struct perf_evsel *counter, u64 count,
211 int cpu, struct runtime_stat *st)
213 int ctx = evsel_context(counter);
214 u64 count_ns = count;
216 count *= counter->scale;
218 if (perf_evsel__is_clock(counter))
219 update_runtime_stat(st, STAT_NSECS, 0, cpu, count_ns);
220 else if (perf_evsel__match(counter, HARDWARE, HW_CPU_CYCLES))
221 update_runtime_stat(st, STAT_CYCLES, ctx, cpu, count);
222 else if (perf_stat_evsel__is(counter, CYCLES_IN_TX))
223 update_runtime_stat(st, STAT_CYCLES_IN_TX, ctx, cpu, count);
224 else if (perf_stat_evsel__is(counter, TRANSACTION_START))
225 update_runtime_stat(st, STAT_TRANSACTION, ctx, cpu, count);
226 else if (perf_stat_evsel__is(counter, ELISION_START))
227 update_runtime_stat(st, STAT_ELISION, ctx, cpu, count);
228 else if (perf_stat_evsel__is(counter, TOPDOWN_TOTAL_SLOTS))
229 update_runtime_stat(st, STAT_TOPDOWN_TOTAL_SLOTS,
231 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_ISSUED))
232 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_ISSUED,
234 else if (perf_stat_evsel__is(counter, TOPDOWN_SLOTS_RETIRED))
235 update_runtime_stat(st, STAT_TOPDOWN_SLOTS_RETIRED,
237 else if (perf_stat_evsel__is(counter, TOPDOWN_FETCH_BUBBLES))
238 update_runtime_stat(st, STAT_TOPDOWN_FETCH_BUBBLES,
240 else if (perf_stat_evsel__is(counter, TOPDOWN_RECOVERY_BUBBLES))
241 update_runtime_stat(st, STAT_TOPDOWN_RECOVERY_BUBBLES,
243 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_FRONTEND))
244 update_runtime_stat(st, STAT_STALLED_CYCLES_FRONT,
246 else if (perf_evsel__match(counter, HARDWARE, HW_STALLED_CYCLES_BACKEND))
247 update_runtime_stat(st, STAT_STALLED_CYCLES_BACK,
249 else if (perf_evsel__match(counter, HARDWARE, HW_BRANCH_INSTRUCTIONS))
250 update_runtime_stat(st, STAT_BRANCHES, ctx, cpu, count);
251 else if (perf_evsel__match(counter, HARDWARE, HW_CACHE_REFERENCES))
252 update_runtime_stat(st, STAT_CACHEREFS, ctx, cpu, count);
253 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1D))
254 update_runtime_stat(st, STAT_L1_DCACHE, ctx, cpu, count);
255 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_L1I))
256 update_runtime_stat(st, STAT_L1_ICACHE, ctx, cpu, count);
257 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_LL))
258 update_runtime_stat(st, STAT_LL_CACHE, ctx, cpu, count);
259 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_DTLB))
260 update_runtime_stat(st, STAT_DTLB_CACHE, ctx, cpu, count);
261 else if (perf_evsel__match(counter, HW_CACHE, HW_CACHE_ITLB))
262 update_runtime_stat(st, STAT_ITLB_CACHE, ctx, cpu, count);
263 else if (perf_stat_evsel__is(counter, SMI_NUM))
264 update_runtime_stat(st, STAT_SMI_NUM, ctx, cpu, count);
265 else if (perf_stat_evsel__is(counter, APERF))
266 update_runtime_stat(st, STAT_APERF, ctx, cpu, count);
268 if (counter->collect_stat) {
269 struct saved_value *v = saved_value_lookup(counter, cpu, true,
271 update_stats(&v->stats, count);
275 /* used for get_ratio_color() */
277 GRC_STALLED_CYCLES_FE,
278 GRC_STALLED_CYCLES_BE,
283 static const char *get_ratio_color(enum grc_type type, double ratio)
285 static const double grc_table[GRC_MAX_NR][3] = {
286 [GRC_STALLED_CYCLES_FE] = { 50.0, 30.0, 10.0 },
287 [GRC_STALLED_CYCLES_BE] = { 75.0, 50.0, 20.0 },
288 [GRC_CACHE_MISSES] = { 20.0, 10.0, 5.0 },
290 const char *color = PERF_COLOR_NORMAL;
292 if (ratio > grc_table[type][0])
293 color = PERF_COLOR_RED;
294 else if (ratio > grc_table[type][1])
295 color = PERF_COLOR_MAGENTA;
296 else if (ratio > grc_table[type][2])
297 color = PERF_COLOR_YELLOW;
302 static struct perf_evsel *perf_stat__find_event(struct perf_evlist *evsel_list,
305 struct perf_evsel *c2;
307 evlist__for_each_entry (evsel_list, c2) {
308 if (!strcasecmp(c2->name, name) && !c2->collect_stat)
314 /* Mark MetricExpr target events and link events using them to them. */
315 void perf_stat__collect_metric_expr(struct perf_evlist *evsel_list)
317 struct perf_evsel *counter, *leader, **metric_events, *oc;
319 const char **metric_names;
321 int num_metric_names;
323 evlist__for_each_entry(evsel_list, counter) {
324 bool invalid = false;
326 leader = counter->leader;
327 if (!counter->metric_expr)
329 metric_events = counter->metric_events;
330 if (!metric_events) {
331 if (expr__find_other(counter->metric_expr, counter->name,
332 &metric_names, &num_metric_names) < 0)
335 metric_events = calloc(sizeof(struct perf_evsel *),
336 num_metric_names + 1);
339 counter->metric_events = metric_events;
342 for (i = 0; i < num_metric_names; i++) {
345 /* Search in group */
346 for_each_group_member (oc, leader) {
347 if (!strcasecmp(oc->name, metric_names[i]) &&
355 /* Search ignoring groups */
356 oc = perf_stat__find_event(evsel_list, metric_names[i]);
359 /* Deduping one is good enough to handle duplicated PMUs. */
360 static char *printed;
363 * Adding events automatically would be difficult, because
364 * it would risk creating groups that are not schedulable.
365 * perf stat doesn't understand all the scheduling constraints
366 * of events. So we ask the user instead to add the missing
369 if (!printed || strcasecmp(printed, metric_names[i])) {
371 "Add %s event to groups to get metric expression for %s\n",
374 printed = strdup(metric_names[i]);
379 metric_events[i] = oc;
380 oc->collect_stat = true;
382 metric_events[i] = NULL;
386 counter->metric_events = NULL;
387 counter->metric_expr = NULL;
392 static double runtime_stat_avg(struct runtime_stat *st,
393 enum stat_type type, int ctx, int cpu)
395 struct saved_value *v;
397 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
401 return avg_stats(&v->stats);
404 static double runtime_stat_n(struct runtime_stat *st,
405 enum stat_type type, int ctx, int cpu)
407 struct saved_value *v;
409 v = saved_value_lookup(NULL, cpu, false, type, ctx, st);
416 static void print_stalled_cycles_frontend(struct perf_stat_config *config,
418 struct perf_evsel *evsel, double avg,
419 struct perf_stat_output_ctx *out,
420 struct runtime_stat *st)
422 double total, ratio = 0.0;
424 int ctx = evsel_context(evsel);
426 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
429 ratio = avg / total * 100.0;
431 color = get_ratio_color(GRC_STALLED_CYCLES_FE, ratio);
434 out->print_metric(config, out->ctx, color, "%7.2f%%", "frontend cycles idle",
437 out->print_metric(config, out->ctx, NULL, NULL, "frontend cycles idle", 0);
440 static void print_stalled_cycles_backend(struct perf_stat_config *config,
442 struct perf_evsel *evsel, double avg,
443 struct perf_stat_output_ctx *out,
444 struct runtime_stat *st)
446 double total, ratio = 0.0;
448 int ctx = evsel_context(evsel);
450 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
453 ratio = avg / total * 100.0;
455 color = get_ratio_color(GRC_STALLED_CYCLES_BE, ratio);
457 out->print_metric(config, out->ctx, color, "%7.2f%%", "backend cycles idle", ratio);
460 static void print_branch_misses(struct perf_stat_config *config,
462 struct perf_evsel *evsel,
464 struct perf_stat_output_ctx *out,
465 struct runtime_stat *st)
467 double total, ratio = 0.0;
469 int ctx = evsel_context(evsel);
471 total = runtime_stat_avg(st, STAT_BRANCHES, ctx, cpu);
474 ratio = avg / total * 100.0;
476 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
478 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all branches", ratio);
481 static void print_l1_dcache_misses(struct perf_stat_config *config,
483 struct perf_evsel *evsel,
485 struct perf_stat_output_ctx *out,
486 struct runtime_stat *st)
489 double total, ratio = 0.0;
491 int ctx = evsel_context(evsel);
493 total = runtime_stat_avg(st, STAT_L1_DCACHE, ctx, cpu);
496 ratio = avg / total * 100.0;
498 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
500 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-dcache hits", ratio);
503 static void print_l1_icache_misses(struct perf_stat_config *config,
505 struct perf_evsel *evsel,
507 struct perf_stat_output_ctx *out,
508 struct runtime_stat *st)
511 double total, ratio = 0.0;
513 int ctx = evsel_context(evsel);
515 total = runtime_stat_avg(st, STAT_L1_ICACHE, ctx, cpu);
518 ratio = avg / total * 100.0;
520 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
521 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all L1-icache hits", ratio);
524 static void print_dtlb_cache_misses(struct perf_stat_config *config,
526 struct perf_evsel *evsel,
528 struct perf_stat_output_ctx *out,
529 struct runtime_stat *st)
531 double total, ratio = 0.0;
533 int ctx = evsel_context(evsel);
535 total = runtime_stat_avg(st, STAT_DTLB_CACHE, ctx, cpu);
538 ratio = avg / total * 100.0;
540 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
541 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all dTLB cache hits", ratio);
544 static void print_itlb_cache_misses(struct perf_stat_config *config,
546 struct perf_evsel *evsel,
548 struct perf_stat_output_ctx *out,
549 struct runtime_stat *st)
551 double total, ratio = 0.0;
553 int ctx = evsel_context(evsel);
555 total = runtime_stat_avg(st, STAT_ITLB_CACHE, ctx, cpu);
558 ratio = avg / total * 100.0;
560 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
561 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all iTLB cache hits", ratio);
564 static void print_ll_cache_misses(struct perf_stat_config *config,
566 struct perf_evsel *evsel,
568 struct perf_stat_output_ctx *out,
569 struct runtime_stat *st)
571 double total, ratio = 0.0;
573 int ctx = evsel_context(evsel);
575 total = runtime_stat_avg(st, STAT_LL_CACHE, ctx, cpu);
578 ratio = avg / total * 100.0;
580 color = get_ratio_color(GRC_CACHE_MISSES, ratio);
581 out->print_metric(config, out->ctx, color, "%7.2f%%", "of all LL-cache hits", ratio);
585 * High level "TopDown" CPU core pipe line bottleneck break down.
587 * Basic concept following
588 * Yasin, A Top Down Method for Performance analysis and Counter architecture
591 * The CPU pipeline is divided into 4 areas that can be bottlenecks:
593 * Frontend -> Backend -> Retiring
594 * BadSpeculation in addition means out of order execution that is thrown away
595 * (for example branch mispredictions)
596 * Frontend is instruction decoding.
597 * Backend is execution, like computation and accessing data in memory
598 * Retiring is good execution that is not directly bottlenecked
600 * The formulas are computed in slots.
601 * A slot is an entry in the pipeline each for the pipeline width
602 * (for example a 4-wide pipeline has 4 slots for each cycle)
605 * BadSpeculation = ((SlotsIssued - SlotsRetired) + RecoveryBubbles) /
607 * Retiring = SlotsRetired / TotalSlots
608 * FrontendBound = FetchBubbles / TotalSlots
609 * BackendBound = 1.0 - BadSpeculation - Retiring - FrontendBound
611 * The kernel provides the mapping to the low level CPU events and any scaling
612 * needed for the CPU pipeline width, for example:
614 * TotalSlots = Cycles * 4
616 * The scaling factor is communicated in the sysfs unit.
618 * In some cases the CPU may not be able to measure all the formulas due to
619 * missing events. In this case multiple formulas are combined, as possible.
621 * Full TopDown supports more levels to sub-divide each area: for example
622 * BackendBound into computing bound and memory bound. For now we only
623 * support Level 1 TopDown.
626 static double sanitize_val(double x)
628 if (x < 0 && x >= -0.02)
633 static double td_total_slots(int ctx, int cpu, struct runtime_stat *st)
635 return runtime_stat_avg(st, STAT_TOPDOWN_TOTAL_SLOTS, ctx, cpu);
638 static double td_bad_spec(int ctx, int cpu, struct runtime_stat *st)
644 total = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_ISSUED, ctx, cpu) -
645 runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED, ctx, cpu) +
646 runtime_stat_avg(st, STAT_TOPDOWN_RECOVERY_BUBBLES, ctx, cpu);
648 total_slots = td_total_slots(ctx, cpu, st);
650 bad_spec = total / total_slots;
651 return sanitize_val(bad_spec);
654 static double td_retiring(int ctx, int cpu, struct runtime_stat *st)
657 double total_slots = td_total_slots(ctx, cpu, st);
658 double ret_slots = runtime_stat_avg(st, STAT_TOPDOWN_SLOTS_RETIRED,
662 retiring = ret_slots / total_slots;
666 static double td_fe_bound(int ctx, int cpu, struct runtime_stat *st)
669 double total_slots = td_total_slots(ctx, cpu, st);
670 double fetch_bub = runtime_stat_avg(st, STAT_TOPDOWN_FETCH_BUBBLES,
674 fe_bound = fetch_bub / total_slots;
678 static double td_be_bound(int ctx, int cpu, struct runtime_stat *st)
680 double sum = (td_fe_bound(ctx, cpu, st) +
681 td_bad_spec(ctx, cpu, st) +
682 td_retiring(ctx, cpu, st));
685 return sanitize_val(1.0 - sum);
688 static void print_smi_cost(struct perf_stat_config *config,
689 int cpu, struct perf_evsel *evsel,
690 struct perf_stat_output_ctx *out,
691 struct runtime_stat *st)
693 double smi_num, aperf, cycles, cost = 0.0;
694 int ctx = evsel_context(evsel);
695 const char *color = NULL;
697 smi_num = runtime_stat_avg(st, STAT_SMI_NUM, ctx, cpu);
698 aperf = runtime_stat_avg(st, STAT_APERF, ctx, cpu);
699 cycles = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
701 if ((cycles == 0) || (aperf == 0))
705 cost = (aperf - cycles) / aperf * 100.00;
708 color = PERF_COLOR_RED;
709 out->print_metric(config, out->ctx, color, "%8.1f%%", "SMI cycles%", cost);
710 out->print_metric(config, out->ctx, NULL, "%4.0f", "SMI#", smi_num);
713 static void generic_metric(struct perf_stat_config *config,
714 const char *metric_expr,
715 struct perf_evsel **metric_events,
717 const char *metric_name,
720 struct perf_stat_output_ctx *out,
721 struct runtime_stat *st)
723 print_metric_t print_metric = out->print_metric;
724 struct parse_ctx pctx;
727 void *ctxp = out->ctx;
730 expr__ctx_init(&pctx);
731 expr__add_id(&pctx, name, avg);
732 for (i = 0; metric_events[i]; i++) {
733 struct saved_value *v;
737 if (!strcmp(metric_events[i]->name, "duration_time")) {
738 stats = &walltime_nsecs_stats;
741 v = saved_value_lookup(metric_events[i], cpu, false,
749 n = strdup(metric_events[i]->name);
753 * This display code with --no-merge adds [cpu] postfixes.
754 * These are not supported by the parser. Remove everything
760 expr__add_id(&pctx, n, avg_stats(stats)*scale);
762 if (!metric_events[i]) {
763 const char *p = metric_expr;
765 if (expr__parse(&ratio, &pctx, &p) == 0)
766 print_metric(config, ctxp, NULL, "%8.1f",
769 out->force_header ? name : "",
772 print_metric(config, ctxp, NULL, NULL,
774 (metric_name ? metric_name : name) : "", 0);
776 print_metric(config, ctxp, NULL, NULL, "", 0);
778 for (i = 1; i < pctx.num_ids; i++)
779 zfree(&pctx.ids[i].name);
782 void perf_stat__print_shadow_stats(struct perf_stat_config *config,
783 struct perf_evsel *evsel,
785 struct perf_stat_output_ctx *out,
786 struct rblist *metric_events,
787 struct runtime_stat *st)
789 void *ctxp = out->ctx;
790 print_metric_t print_metric = out->print_metric;
791 double total, ratio = 0.0, total2;
792 const char *color = NULL;
793 int ctx = evsel_context(evsel);
794 struct metric_event *me;
797 if (perf_evsel__match(evsel, HARDWARE, HW_INSTRUCTIONS)) {
798 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
802 print_metric(config, ctxp, NULL, "%7.2f ",
803 "insn per cycle", ratio);
805 print_metric(config, ctxp, NULL, NULL, "insn per cycle", 0);
808 total = runtime_stat_avg(st, STAT_STALLED_CYCLES_FRONT,
811 total = max(total, runtime_stat_avg(st,
812 STAT_STALLED_CYCLES_BACK,
816 out->new_line(config, ctxp);
818 print_metric(config, ctxp, NULL, "%7.2f ",
819 "stalled cycles per insn",
821 } else if (have_frontend_stalled) {
822 print_metric(config, ctxp, NULL, NULL,
823 "stalled cycles per insn", 0);
825 } else if (perf_evsel__match(evsel, HARDWARE, HW_BRANCH_MISSES)) {
826 if (runtime_stat_n(st, STAT_BRANCHES, ctx, cpu) != 0)
827 print_branch_misses(config, cpu, evsel, avg, out, st);
829 print_metric(config, ctxp, NULL, NULL, "of all branches", 0);
831 evsel->attr.type == PERF_TYPE_HW_CACHE &&
832 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1D |
833 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
834 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
836 if (runtime_stat_n(st, STAT_L1_DCACHE, ctx, cpu) != 0)
837 print_l1_dcache_misses(config, cpu, evsel, avg, out, st);
839 print_metric(config, ctxp, NULL, NULL, "of all L1-dcache hits", 0);
841 evsel->attr.type == PERF_TYPE_HW_CACHE &&
842 evsel->attr.config == ( PERF_COUNT_HW_CACHE_L1I |
843 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
844 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
846 if (runtime_stat_n(st, STAT_L1_ICACHE, ctx, cpu) != 0)
847 print_l1_icache_misses(config, cpu, evsel, avg, out, st);
849 print_metric(config, ctxp, NULL, NULL, "of all L1-icache hits", 0);
851 evsel->attr.type == PERF_TYPE_HW_CACHE &&
852 evsel->attr.config == ( PERF_COUNT_HW_CACHE_DTLB |
853 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
854 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
856 if (runtime_stat_n(st, STAT_DTLB_CACHE, ctx, cpu) != 0)
857 print_dtlb_cache_misses(config, cpu, evsel, avg, out, st);
859 print_metric(config, ctxp, NULL, NULL, "of all dTLB cache hits", 0);
861 evsel->attr.type == PERF_TYPE_HW_CACHE &&
862 evsel->attr.config == ( PERF_COUNT_HW_CACHE_ITLB |
863 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
864 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
866 if (runtime_stat_n(st, STAT_ITLB_CACHE, ctx, cpu) != 0)
867 print_itlb_cache_misses(config, cpu, evsel, avg, out, st);
869 print_metric(config, ctxp, NULL, NULL, "of all iTLB cache hits", 0);
871 evsel->attr.type == PERF_TYPE_HW_CACHE &&
872 evsel->attr.config == ( PERF_COUNT_HW_CACHE_LL |
873 ((PERF_COUNT_HW_CACHE_OP_READ) << 8) |
874 ((PERF_COUNT_HW_CACHE_RESULT_MISS) << 16))) {
876 if (runtime_stat_n(st, STAT_LL_CACHE, ctx, cpu) != 0)
877 print_ll_cache_misses(config, cpu, evsel, avg, out, st);
879 print_metric(config, ctxp, NULL, NULL, "of all LL-cache hits", 0);
880 } else if (perf_evsel__match(evsel, HARDWARE, HW_CACHE_MISSES)) {
881 total = runtime_stat_avg(st, STAT_CACHEREFS, ctx, cpu);
884 ratio = avg * 100 / total;
886 if (runtime_stat_n(st, STAT_CACHEREFS, ctx, cpu) != 0)
887 print_metric(config, ctxp, NULL, "%8.3f %%",
888 "of all cache refs", ratio);
890 print_metric(config, ctxp, NULL, NULL, "of all cache refs", 0);
891 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_FRONTEND)) {
892 print_stalled_cycles_frontend(config, cpu, evsel, avg, out, st);
893 } else if (perf_evsel__match(evsel, HARDWARE, HW_STALLED_CYCLES_BACKEND)) {
894 print_stalled_cycles_backend(config, cpu, evsel, avg, out, st);
895 } else if (perf_evsel__match(evsel, HARDWARE, HW_CPU_CYCLES)) {
896 total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
900 print_metric(config, ctxp, NULL, "%8.3f", "GHz", ratio);
902 print_metric(config, ctxp, NULL, NULL, "Ghz", 0);
904 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX)) {
905 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
908 print_metric(config, ctxp, NULL,
909 "%7.2f%%", "transactional cycles",
910 100.0 * (avg / total));
912 print_metric(config, ctxp, NULL, NULL, "transactional cycles",
914 } else if (perf_stat_evsel__is(evsel, CYCLES_IN_TX_CP)) {
915 total = runtime_stat_avg(st, STAT_CYCLES, ctx, cpu);
916 total2 = runtime_stat_avg(st, STAT_CYCLES_IN_TX, ctx, cpu);
921 print_metric(config, ctxp, NULL, "%7.2f%%", "aborted cycles",
922 100.0 * ((total2-avg) / total));
924 print_metric(config, ctxp, NULL, NULL, "aborted cycles", 0);
925 } else if (perf_stat_evsel__is(evsel, TRANSACTION_START)) {
926 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
932 if (runtime_stat_n(st, STAT_CYCLES_IN_TX, ctx, cpu) != 0)
933 print_metric(config, ctxp, NULL, "%8.0f",
934 "cycles / transaction", ratio);
936 print_metric(config, ctxp, NULL, NULL, "cycles / transaction",
938 } else if (perf_stat_evsel__is(evsel, ELISION_START)) {
939 total = runtime_stat_avg(st, STAT_CYCLES_IN_TX,
945 print_metric(config, ctxp, NULL, "%8.0f", "cycles / elision", ratio);
946 } else if (perf_evsel__is_clock(evsel)) {
947 if ((ratio = avg_stats(&walltime_nsecs_stats)) != 0)
948 print_metric(config, ctxp, NULL, "%8.3f", "CPUs utilized",
949 avg / (ratio * evsel->scale));
951 print_metric(config, ctxp, NULL, NULL, "CPUs utilized", 0);
952 } else if (perf_stat_evsel__is(evsel, TOPDOWN_FETCH_BUBBLES)) {
953 double fe_bound = td_fe_bound(ctx, cpu, st);
956 color = PERF_COLOR_RED;
957 print_metric(config, ctxp, color, "%8.1f%%", "frontend bound",
959 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_RETIRED)) {
960 double retiring = td_retiring(ctx, cpu, st);
963 color = PERF_COLOR_GREEN;
964 print_metric(config, ctxp, color, "%8.1f%%", "retiring",
966 } else if (perf_stat_evsel__is(evsel, TOPDOWN_RECOVERY_BUBBLES)) {
967 double bad_spec = td_bad_spec(ctx, cpu, st);
970 color = PERF_COLOR_RED;
971 print_metric(config, ctxp, color, "%8.1f%%", "bad speculation",
973 } else if (perf_stat_evsel__is(evsel, TOPDOWN_SLOTS_ISSUED)) {
974 double be_bound = td_be_bound(ctx, cpu, st);
975 const char *name = "backend bound";
976 static int have_recovery_bubbles = -1;
978 /* In case the CPU does not support topdown-recovery-bubbles */
979 if (have_recovery_bubbles < 0)
980 have_recovery_bubbles = pmu_have_event("cpu",
981 "topdown-recovery-bubbles");
982 if (!have_recovery_bubbles)
983 name = "backend bound/bad spec";
986 color = PERF_COLOR_RED;
987 if (td_total_slots(ctx, cpu, st) > 0)
988 print_metric(config, ctxp, color, "%8.1f%%", name,
991 print_metric(config, ctxp, NULL, NULL, name, 0);
992 } else if (evsel->metric_expr) {
993 generic_metric(config, evsel->metric_expr, evsel->metric_events, evsel->name,
994 evsel->metric_name, avg, cpu, out, st);
995 } else if (runtime_stat_n(st, STAT_NSECS, 0, cpu) != 0) {
999 total = runtime_stat_avg(st, STAT_NSECS, 0, cpu);
1002 ratio = 1000.0 * avg / total;
1003 if (ratio < 0.001) {
1007 snprintf(unit_buf, sizeof(unit_buf), "%c/sec", unit);
1008 print_metric(config, ctxp, NULL, "%8.3f", unit_buf, ratio);
1009 } else if (perf_stat_evsel__is(evsel, SMI_NUM)) {
1010 print_smi_cost(config, cpu, evsel, out, st);
1015 if ((me = metricgroup__lookup(metric_events, evsel, false)) != NULL) {
1016 struct metric_expr *mexp;
1018 list_for_each_entry (mexp, &me->head, nd) {
1020 out->new_line(config, ctxp);
1021 generic_metric(config, mexp->metric_expr, mexp->metric_events,
1022 evsel->name, mexp->metric_name,
1027 print_metric(config, ctxp, NULL, NULL, NULL, 0);