1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Common Performance counter support functions for PowerISA v2.07 processors.
5 * Copyright 2009 Paul Mackerras, IBM Corporation.
6 * Copyright 2013 Michael Ellerman, IBM Corporation.
7 * Copyright 2016 Madhavan Srinivasan, IBM Corporation.
9 #include "isa207-common.h"
11 PMU_FORMAT_ATTR(event, "config:0-49");
12 PMU_FORMAT_ATTR(pmcxsel, "config:0-7");
13 PMU_FORMAT_ATTR(mark, "config:8");
14 PMU_FORMAT_ATTR(combine, "config:11");
15 PMU_FORMAT_ATTR(unit, "config:12-15");
16 PMU_FORMAT_ATTR(pmc, "config:16-19");
17 PMU_FORMAT_ATTR(cache_sel, "config:20-23");
18 PMU_FORMAT_ATTR(sample_mode, "config:24-28");
19 PMU_FORMAT_ATTR(thresh_sel, "config:29-31");
20 PMU_FORMAT_ATTR(thresh_stop, "config:32-35");
21 PMU_FORMAT_ATTR(thresh_start, "config:36-39");
22 PMU_FORMAT_ATTR(thresh_cmp, "config:40-49");
24 struct attribute *isa207_pmu_format_attr[] = {
25 &format_attr_event.attr,
26 &format_attr_pmcxsel.attr,
27 &format_attr_mark.attr,
28 &format_attr_combine.attr,
29 &format_attr_unit.attr,
30 &format_attr_pmc.attr,
31 &format_attr_cache_sel.attr,
32 &format_attr_sample_mode.attr,
33 &format_attr_thresh_sel.attr,
34 &format_attr_thresh_stop.attr,
35 &format_attr_thresh_start.attr,
36 &format_attr_thresh_cmp.attr,
40 struct attribute_group isa207_pmu_format_group = {
42 .attrs = isa207_pmu_format_attr,
45 static inline bool event_is_fab_match(u64 event)
47 /* Only check pmc, unit and pmcxsel, ignore the edge bit (0) */
50 /* PM_MRK_FAB_RSP_MATCH & PM_MRK_FAB_RSP_MATCH_CYC */
51 return (event == 0x30056 || event == 0x4f052);
54 static bool is_event_valid(u64 event)
56 u64 valid_mask = EVENT_VALID_MASK;
58 if (cpu_has_feature(CPU_FTR_ARCH_31))
59 valid_mask = p10_EVENT_VALID_MASK;
60 else if (cpu_has_feature(CPU_FTR_ARCH_300))
61 valid_mask = p9_EVENT_VALID_MASK;
63 return !(event & ~valid_mask);
66 static inline bool is_event_marked(u64 event)
68 if (event & EVENT_IS_MARKED)
74 static unsigned long sdar_mod_val(u64 event)
76 if (cpu_has_feature(CPU_FTR_ARCH_31))
77 return p10_SDAR_MODE(event);
79 return p9_SDAR_MODE(event);
82 static void mmcra_sdar_mode(u64 event, unsigned long *mmcra)
85 * MMCRA[SDAR_MODE] specifices how the SDAR should be updated in
86 * continous sampling mode.
89 * MMCRA[SDAR_MODE] will be programmed as "0b01" for continous sampling
90 * mode and will be un-changed when setting MMCRA[63] (Marked events).
92 * Incase of Power9/power10:
93 * Marked event: MMCRA[SDAR_MODE] will be set to 0b00 ('No Updates'),
94 * or if group already have any marked events.
96 * MMCRA[SDAR_MODE] will be set from event code.
97 * If sdar_mode from event is zero, default to 0b01. Hardware
98 * requires that we set a non-zero value.
100 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
101 if (is_event_marked(event) || (*mmcra & MMCRA_SAMPLE_ENABLE))
102 *mmcra &= MMCRA_SDAR_MODE_NO_UPDATES;
103 else if (sdar_mod_val(event))
104 *mmcra |= sdar_mod_val(event) << MMCRA_SDAR_MODE_SHIFT;
106 *mmcra |= MMCRA_SDAR_MODE_DCACHE;
108 *mmcra |= MMCRA_SDAR_MODE_TLB;
111 static u64 thresh_cmp_val(u64 value)
113 if (cpu_has_feature(CPU_FTR_ARCH_300))
114 return value << p9_MMCRA_THR_CMP_SHIFT;
116 return value << MMCRA_THR_CMP_SHIFT;
119 static unsigned long combine_from_event(u64 event)
121 if (cpu_has_feature(CPU_FTR_ARCH_300))
122 return p9_EVENT_COMBINE(event);
124 return EVENT_COMBINE(event);
127 static unsigned long combine_shift(unsigned long pmc)
129 if (cpu_has_feature(CPU_FTR_ARCH_300))
130 return p9_MMCR1_COMBINE_SHIFT(pmc);
132 return MMCR1_COMBINE_SHIFT(pmc);
135 static inline bool event_is_threshold(u64 event)
137 return (event >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
140 static bool is_thresh_cmp_valid(u64 event)
142 unsigned int cmp, exp;
145 * Check the mantissa upper two bits are not zero, unless the
146 * exponent is also zero. See the THRESH_CMP_MANTISSA doc.
147 * Power10: thresh_cmp is replaced by l2_l3 event select.
149 if (cpu_has_feature(CPU_FTR_ARCH_31))
152 cmp = (event >> EVENT_THR_CMP_SHIFT) & EVENT_THR_CMP_MASK;
155 if (exp && (cmp & 0x60) == 0)
161 static unsigned int dc_ic_rld_quad_l1_sel(u64 event)
165 cache = (event >> EVENT_CACHE_SEL_SHIFT) & MMCR1_DC_IC_QUAL_MASK;
169 static inline u64 isa207_find_source(u64 idx, u32 sub_idx)
171 u64 ret = PERF_MEM_NA;
188 ret = PH(LVL, LOC_RAM);
189 else if (sub_idx > 1 && sub_idx <= 2)
190 ret = PH(LVL, REM_RAM1);
192 ret = PH(LVL, REM_RAM2);
193 ret |= P(SNOOP, HIT);
196 ret = PH(LVL, REM_CCE1);
197 if ((sub_idx == 0) || (sub_idx == 2) || (sub_idx == 4))
198 ret |= P(SNOOP, HIT);
199 else if ((sub_idx == 1) || (sub_idx == 3) || (sub_idx == 5))
200 ret |= P(SNOOP, HITM);
203 ret = PH(LVL, REM_CCE2);
204 if ((sub_idx == 0) || (sub_idx == 2))
205 ret |= P(SNOOP, HIT);
206 else if ((sub_idx == 1) || (sub_idx == 3))
207 ret |= P(SNOOP, HITM);
217 void isa207_get_mem_data_src(union perf_mem_data_src *dsrc, u32 flags,
218 struct pt_regs *regs)
225 /* Skip if no SIER support */
226 if (!(flags & PPMU_HAS_SIER)) {
231 sier = mfspr(SPRN_SIER);
232 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
233 if (val == 1 || val == 2) {
234 idx = (sier & ISA207_SIER_LDST_MASK) >> ISA207_SIER_LDST_SHIFT;
235 sub_idx = (sier & ISA207_SIER_DATA_SRC_MASK) >> ISA207_SIER_DATA_SRC_SHIFT;
237 dsrc->val = isa207_find_source(idx, sub_idx);
238 dsrc->val |= (val == 1) ? P(OP, LOAD) : P(OP, STORE);
242 void isa207_get_mem_weight(u64 *weight)
244 u64 mmcra = mfspr(SPRN_MMCRA);
245 u64 exp = MMCRA_THR_CTR_EXP(mmcra);
246 u64 mantissa = MMCRA_THR_CTR_MANT(mmcra);
247 u64 sier = mfspr(SPRN_SIER);
248 u64 val = (sier & ISA207_SIER_TYPE_MASK) >> ISA207_SIER_TYPE_SHIFT;
250 if (val == 0 || val == 7)
253 *weight = mantissa << (2 * exp);
256 int isa207_get_constraint(u64 event, unsigned long *maskp, unsigned long *valp)
258 unsigned int unit, pmc, cache, ebb;
259 unsigned long mask, value;
263 if (!is_event_valid(event))
266 pmc = (event >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
267 unit = (event >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
268 if (cpu_has_feature(CPU_FTR_ARCH_31))
269 cache = (event >> EVENT_CACHE_SEL_SHIFT) &
270 p10_EVENT_CACHE_SEL_MASK;
272 cache = (event >> EVENT_CACHE_SEL_SHIFT) &
273 EVENT_CACHE_SEL_MASK;
274 ebb = (event >> EVENT_EBB_SHIFT) & EVENT_EBB_MASK;
282 /* Ignore Linux defined bits when checking event below */
283 base_event = event & ~EVENT_LINUX_MASK;
285 if (pmc >= 5 && base_event != 0x500fa &&
286 base_event != 0x600f4)
289 mask |= CNST_PMC_MASK(pmc);
290 value |= CNST_PMC_VAL(pmc);
293 * PMC5 and PMC6 are used to count cycles and instructions and
294 * they do not support most of the constraint bits. Add a check
295 * to exclude PMC5/6 from most of the constraints except for
304 * Add to number of counters in use. Note this includes events with
305 * a PMC of 0 - they still need a PMC, it's just assigned later.
306 * Don't count events on PMC 5 & 6, there is only one valid event
307 * on each of those counters, and they are handled above.
309 mask |= CNST_NC_MASK;
310 value |= CNST_NC_VAL;
313 if (unit >= 6 && unit <= 9) {
314 if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
315 mask |= CNST_L2L3_GROUP_MASK;
316 value |= CNST_L2L3_GROUP_VAL(event >> p10_L2L3_EVENT_SHIFT);
317 } else if (cpu_has_feature(CPU_FTR_ARCH_300)) {
318 mask |= CNST_CACHE_GROUP_MASK;
319 value |= CNST_CACHE_GROUP_VAL(event & 0xff);
321 mask |= CNST_CACHE_PMC4_MASK;
323 value |= CNST_CACHE_PMC4_VAL;
324 } else if (cache & 0x7) {
326 * L2/L3 events contain a cache selector field, which is
327 * supposed to be programmed into MMCRC. However MMCRC is only
328 * HV writable, and there is no API for guest kernels to modify
329 * it. The solution is for the hypervisor to initialise the
330 * field to zeroes, and for us to only ever allow events that
331 * have a cache selector of zero. The bank selector (bit 3) is
332 * irrelevant, as long as the rest of the value is 0.
337 } else if (cpu_has_feature(CPU_FTR_ARCH_300) || (event & EVENT_IS_L1)) {
338 mask |= CNST_L1_QUAL_MASK;
339 value |= CNST_L1_QUAL_VAL(cache);
342 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
343 mask |= CNST_RADIX_SCOPE_GROUP_MASK;
344 value |= CNST_RADIX_SCOPE_GROUP_VAL(event >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT);
347 if (is_event_marked(event)) {
348 mask |= CNST_SAMPLE_MASK;
349 value |= CNST_SAMPLE_VAL(event >> EVENT_SAMPLE_SHIFT);
352 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
353 if (event_is_threshold(event) && is_thresh_cmp_valid(event)) {
354 mask |= CNST_THRESH_MASK;
355 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
359 * Special case for PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
360 * the threshold control bits are used for the match value.
362 if (event_is_fab_match(event)) {
363 mask |= CNST_FAB_MATCH_MASK;
364 value |= CNST_FAB_MATCH_VAL(event >> EVENT_THR_CTL_SHIFT);
366 if (!is_thresh_cmp_valid(event))
369 mask |= CNST_THRESH_MASK;
370 value |= CNST_THRESH_VAL(event >> EVENT_THRESH_SHIFT);
376 /* EBB events must specify the PMC */
379 if (event & EVENT_WANTS_BHRB) {
381 /* Only EBB events can request BHRB */
384 mask |= CNST_IFM_MASK;
385 value |= CNST_IFM_VAL(event >> EVENT_IFM_SHIFT);
389 * All events must agree on EBB, either all request it or none.
390 * EBB events are pinned & exclusive, so this should never actually
391 * hit, but we leave it as a fallback in case.
393 mask |= CNST_EBB_VAL(ebb);
394 value |= CNST_EBB_MASK;
402 int isa207_compute_mmcr(u64 event[], int n_ev,
403 unsigned int hwc[], struct mmcr_regs *mmcr,
404 struct perf_event *pevents[])
406 unsigned long mmcra, mmcr1, mmcr2, unit, combine, psel, cache, val;
408 unsigned int pmc, pmc_inuse;
413 /* First pass to count resource use */
414 for (i = 0; i < n_ev; ++i) {
415 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
417 pmc_inuse |= 1 << pmc;
420 mmcra = mmcr1 = mmcr2 = mmcr3 = 0;
423 * Disable bhrb unless explicitly requested
424 * by setting MMCRA (BHRBRD) bit.
426 if (cpu_has_feature(CPU_FTR_ARCH_31))
427 mmcra |= MMCRA_BHRB_DISABLE;
429 /* Second pass: assign PMCs, set all MMCR1 fields */
430 for (i = 0; i < n_ev; ++i) {
431 pmc = (event[i] >> EVENT_PMC_SHIFT) & EVENT_PMC_MASK;
432 unit = (event[i] >> EVENT_UNIT_SHIFT) & EVENT_UNIT_MASK;
433 combine = combine_from_event(event[i]);
434 psel = event[i] & EVENT_PSEL_MASK;
437 for (pmc = 1; pmc <= 4; ++pmc) {
438 if (!(pmc_inuse & (1 << pmc)))
442 pmc_inuse |= 1 << pmc;
446 mmcr1 |= unit << MMCR1_UNIT_SHIFT(pmc);
447 mmcr1 |= combine << combine_shift(pmc);
448 mmcr1 |= psel << MMCR1_PMCSEL_SHIFT(pmc);
451 /* In continuous sampling mode, update SDAR on TLB miss */
452 mmcra_sdar_mode(event[i], &mmcra);
454 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
455 cache = dc_ic_rld_quad_l1_sel(event[i]);
456 mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
458 if (event[i] & EVENT_IS_L1) {
459 cache = dc_ic_rld_quad_l1_sel(event[i]);
460 mmcr1 |= (cache) << MMCR1_DC_IC_QUAL_SHIFT;
464 /* Set RADIX_SCOPE_QUAL bit */
465 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
466 val = (event[i] >> p10_EVENT_RADIX_SCOPE_QUAL_SHIFT) &
467 p10_EVENT_RADIX_SCOPE_QUAL_MASK;
468 mmcr1 |= val << p10_MMCR1_RADIX_SCOPE_QUAL_SHIFT;
471 if (is_event_marked(event[i])) {
472 mmcra |= MMCRA_SAMPLE_ENABLE;
474 val = (event[i] >> EVENT_SAMPLE_SHIFT) & EVENT_SAMPLE_MASK;
476 mmcra |= (val & 3) << MMCRA_SAMP_MODE_SHIFT;
477 mmcra |= (val >> 2) << MMCRA_SAMP_ELIG_SHIFT;
482 * PM_MRK_FAB_RSP_MATCH and PM_MRK_FAB_RSP_MATCH_CYC,
483 * the threshold bits are used for the match value.
485 if (!cpu_has_feature(CPU_FTR_ARCH_300) && event_is_fab_match(event[i])) {
486 mmcr1 |= ((event[i] >> EVENT_THR_CTL_SHIFT) &
487 EVENT_THR_CTL_MASK) << MMCR1_FAB_SHIFT;
489 val = (event[i] >> EVENT_THR_CTL_SHIFT) & EVENT_THR_CTL_MASK;
490 mmcra |= val << MMCRA_THR_CTL_SHIFT;
491 val = (event[i] >> EVENT_THR_SEL_SHIFT) & EVENT_THR_SEL_MASK;
492 mmcra |= val << MMCRA_THR_SEL_SHIFT;
493 if (!cpu_has_feature(CPU_FTR_ARCH_31)) {
494 val = (event[i] >> EVENT_THR_CMP_SHIFT) &
496 mmcra |= thresh_cmp_val(val);
500 if (cpu_has_feature(CPU_FTR_ARCH_31) && (unit == 6)) {
501 val = (event[i] >> p10_L2L3_EVENT_SHIFT) &
502 p10_EVENT_L2L3_SEL_MASK;
503 mmcr2 |= val << p10_L2L3_SEL_SHIFT;
506 if (event[i] & EVENT_WANTS_BHRB) {
507 val = (event[i] >> EVENT_IFM_SHIFT) & EVENT_IFM_MASK;
508 mmcra |= val << MMCRA_IFM_SHIFT;
511 /* set MMCRA (BHRBRD) to 0 if there is user request for BHRB */
512 if (cpu_has_feature(CPU_FTR_ARCH_31) &&
513 (has_branch_stack(pevents[i]) || (event[i] & EVENT_WANTS_BHRB)))
514 mmcra &= ~MMCRA_BHRB_DISABLE;
516 if (pevents[i]->attr.exclude_user)
517 mmcr2 |= MMCR2_FCP(pmc);
519 if (pevents[i]->attr.exclude_hv)
520 mmcr2 |= MMCR2_FCH(pmc);
522 if (pevents[i]->attr.exclude_kernel) {
523 if (cpu_has_feature(CPU_FTR_HVMODE))
524 mmcr2 |= MMCR2_FCH(pmc);
526 mmcr2 |= MMCR2_FCS(pmc);
529 if (cpu_has_feature(CPU_FTR_ARCH_31)) {
531 val = (event[i] >> p10_EVENT_MMCR3_SHIFT) &
532 p10_EVENT_MMCR3_MASK;
533 mmcr3 |= val << MMCR3_SHIFT(pmc);
540 /* Return MMCRx values */
543 /* pmc_inuse is 1-based */
545 mmcr->mmcr0 = MMCR0_PMC1CE;
547 if (pmc_inuse & 0x7c)
548 mmcr->mmcr0 |= MMCR0_PMCjCE;
550 /* If we're not using PMC 5 or 6, freeze them */
551 if (!(pmc_inuse & 0x60))
552 mmcr->mmcr0 |= MMCR0_FC56;
562 void isa207_disable_pmc(unsigned int pmc, struct mmcr_regs *mmcr)
565 mmcr->mmcr1 &= ~(0xffUL << MMCR1_PMCSEL_SHIFT(pmc + 1));
568 static int find_alternative(u64 event, const unsigned int ev_alt[][MAX_ALT], int size)
572 for (i = 0; i < size; ++i) {
573 if (event < ev_alt[i][0])
576 for (j = 0; j < MAX_ALT && ev_alt[i][j]; ++j)
577 if (event == ev_alt[i][j])
584 int isa207_get_alternatives(u64 event, u64 alt[], int size, unsigned int flags,
585 const unsigned int ev_alt[][MAX_ALT])
587 int i, j, num_alt = 0;
590 alt[num_alt++] = event;
591 i = find_alternative(event, ev_alt, size);
593 /* Filter out the original event, it's already in alt[0] */
594 for (j = 0; j < MAX_ALT; ++j) {
595 alt_event = ev_alt[i][j];
596 if (alt_event && alt_event != event)
597 alt[num_alt++] = alt_event;
601 if (flags & PPMU_ONLY_COUNT_RUN) {
603 * We're only counting in RUN state, so PM_CYC is equivalent to
604 * PM_RUN_CYC and PM_INST_CMPL === PM_RUN_INST_CMPL.
607 for (i = 0; i < num_alt; ++i) {
609 case 0x1e: /* PMC_CYC */
610 alt[j++] = 0x600f4; /* PM_RUN_CYC */
615 case 0x2: /* PM_INST_CMPL */
616 alt[j++] = 0x500fa; /* PM_RUN_INST_CMPL */