Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
03089688 | 2 | /* |
4b47e573 | 3 | * ARMv8 PMUv3 Performance Events handling code. |
03089688 WD |
4 | * |
5 | * Copyright (C) 2012 ARM Limited | |
6 | * Author: Will Deacon <will.deacon@arm.com> | |
7 | * | |
8 | * This code is based heavily on the ARMv7 perf event code. | |
03089688 | 9 | */ |
03089688 | 10 | |
03089688 | 11 | #include <asm/irq_regs.h> |
b8cfadfc | 12 | #include <asm/perf_event.h> |
bf2d4782 | 13 | #include <asm/sysreg.h> |
d98ecdac | 14 | #include <asm/virt.h> |
03089688 | 15 | |
279a811e PZ |
16 | #include <clocksource/arm_arch_timer.h> |
17 | ||
dbee3a74 | 18 | #include <linux/acpi.h> |
9d2dcc8f | 19 | #include <linux/clocksource.h> |
d1947bc4 | 20 | #include <linux/kvm_host.h> |
6475b2d8 MR |
21 | #include <linux/of.h> |
22 | #include <linux/perf/arm_pmu.h> | |
23 | #include <linux/platform_device.h> | |
950b74dd | 24 | #include <linux/sched_clock.h> |
d91cc2f4 | 25 | #include <linux/smp.h> |
03089688 | 26 | |
ac82d127 | 27 | /* ARMv8 Cortex-A53 specific event types. */ |
03598fdb | 28 | #define ARMV8_A53_PERFCTR_PREF_LINEFILL 0xC2 |
ac82d127 | 29 | |
d0aa2bff | 30 | /* ARMv8 Cavium ThunderX specific event types. */ |
03598fdb AK |
31 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST 0xE9 |
32 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS 0xEA | |
33 | #define ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS 0xEB | |
34 | #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS 0xEC | |
35 | #define ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS 0xED | |
62a4dda9 | 36 | |
236b9b91 JL |
37 | /* |
38 | * ARMv8 Architectural defined events, not all of these may | |
342e53bd WD |
39 | * be supported on any given implementation. Unsupported events will |
40 | * be disabled at run-time based on the PMCEID registers. | |
236b9b91 | 41 | */ |
03089688 | 42 | static const unsigned armv8_pmuv3_perf_map[PERF_COUNT_HW_MAX] = { |
ae2fb7ec | 43 | PERF_MAP_ALL_UNSUPPORTED, |
03598fdb AK |
44 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV8_PMUV3_PERFCTR_CPU_CYCLES, |
45 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_INST_RETIRED, | |
46 | [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, | |
47 | [PERF_COUNT_HW_CACHE_MISSES] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, | |
236b9b91 | 48 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED, |
03598fdb | 49 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, |
236b9b91 JL |
50 | [PERF_COUNT_HW_BUS_CYCLES] = ARMV8_PMUV3_PERFCTR_BUS_CYCLES, |
51 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV8_PMUV3_PERFCTR_STALL_FRONTEND, | |
52 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV8_PMUV3_PERFCTR_STALL_BACKEND, | |
03089688 WD |
53 | }; |
54 | ||
55 | static const unsigned armv8_pmuv3_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
56 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
57 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
ae2fb7ec MR |
58 | PERF_CACHE_MAP_ALL_UNSUPPORTED, |
59 | ||
03598fdb AK |
60 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE, |
61 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL, | |
ae2fb7ec | 62 | |
236b9b91 JL |
63 | [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE, |
64 | [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL, | |
65 | ||
66 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL, | |
67 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1D_TLB, | |
68 | ||
69 | [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL, | |
70 | [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_L1I_TLB, | |
71 | ||
ffdbd3d8 LY |
72 | [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD, |
73 | [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_LL_CACHE_RD, | |
74 | ||
03598fdb AK |
75 | [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_PMUV3_PERFCTR_BR_PRED, |
76 | [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_PMUV3_PERFCTR_BR_MIS_PRED, | |
03089688 WD |
77 | }; |
78 | ||
ac82d127 MR |
79 | static const unsigned armv8_a53_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
80 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
81 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
82 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
83 | ||
03598fdb | 84 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_A53_PERFCTR_PREF_LINEFILL, |
ac82d127 | 85 | |
5cf7fb26 JT |
86 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
87 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, | |
ac82d127 MR |
88 | }; |
89 | ||
62a4dda9 MR |
90 | static const unsigned armv8_a57_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
91 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
92 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
93 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
94 | ||
03598fdb AK |
95 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
96 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, | |
97 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
98 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, | |
62a4dda9 | 99 | |
03598fdb AK |
100 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, |
101 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, | |
62a4dda9 | 102 | |
5cf7fb26 JT |
103 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
104 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, | |
62a4dda9 MR |
105 | }; |
106 | ||
5561b6c5 JT |
107 | static const unsigned armv8_a73_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
108 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
109 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
110 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
111 | ||
112 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, | |
113 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
5561b6c5 JT |
114 | }; |
115 | ||
d0aa2bff JG |
116 | static const unsigned armv8_thunder_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
117 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
118 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
119 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
120 | ||
03598fdb AK |
121 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, |
122 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, | |
123 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
124 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_MISS_ST, | |
125 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_ACCESS, | |
126 | [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1D_CACHE_PREF_MISS, | |
127 | ||
03598fdb AK |
128 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_ACCESS, |
129 | [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV8_THUNDER_PERFCTR_L1I_CACHE_PREF_MISS, | |
130 | ||
131 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, | |
132 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, | |
133 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, | |
134 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, | |
62a4dda9 MR |
135 | }; |
136 | ||
201a72b2 AK |
137 | static const unsigned armv8_vulcan_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] |
138 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
139 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
140 | PERF_CACHE_MAP_ALL_UNSUPPORTED, | |
141 | ||
142 | [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_RD, | |
143 | [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_RD, | |
144 | [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_WR, | |
145 | [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_CACHE_REFILL_WR, | |
146 | ||
201a72b2 AK |
147 | [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_RD, |
148 | [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_WR, | |
149 | [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_RD, | |
150 | [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV8_IMPDEF_PERFCTR_L1D_TLB_REFILL_WR, | |
151 | ||
201a72b2 AK |
152 | [C(NODE)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_RD, |
153 | [C(NODE)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV8_IMPDEF_PERFCTR_BUS_ACCESS_WR, | |
154 | }; | |
4b1a9e69 AK |
155 | |
156 | static ssize_t | |
157 | armv8pmu_events_sysfs_show(struct device *dev, | |
158 | struct device_attribute *attr, char *page) | |
159 | { | |
160 | struct perf_pmu_events_attr *pmu_attr; | |
161 | ||
162 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr); | |
163 | ||
539707ca | 164 | return sprintf(page, "event=0x%04llx\n", pmu_attr->id); |
4b1a9e69 AK |
165 | } |
166 | ||
9ef8567c | 167 | #define ARMV8_EVENT_ATTR(name, config) \ |
64432f09 | 168 | PMU_EVENT_ATTR_ID(name, armv8pmu_events_sysfs_show, config) |
9e9caa6a DR |
169 | |
170 | static struct attribute *armv8_pmuv3_event_attrs[] = { | |
9ef8567c SZ |
171 | ARMV8_EVENT_ATTR(sw_incr, ARMV8_PMUV3_PERFCTR_SW_INCR), |
172 | ARMV8_EVENT_ATTR(l1i_cache_refill, ARMV8_PMUV3_PERFCTR_L1I_CACHE_REFILL), | |
173 | ARMV8_EVENT_ATTR(l1i_tlb_refill, ARMV8_PMUV3_PERFCTR_L1I_TLB_REFILL), | |
174 | ARMV8_EVENT_ATTR(l1d_cache_refill, ARMV8_PMUV3_PERFCTR_L1D_CACHE_REFILL), | |
175 | ARMV8_EVENT_ATTR(l1d_cache, ARMV8_PMUV3_PERFCTR_L1D_CACHE), | |
176 | ARMV8_EVENT_ATTR(l1d_tlb_refill, ARMV8_PMUV3_PERFCTR_L1D_TLB_REFILL), | |
177 | ARMV8_EVENT_ATTR(ld_retired, ARMV8_PMUV3_PERFCTR_LD_RETIRED), | |
178 | ARMV8_EVENT_ATTR(st_retired, ARMV8_PMUV3_PERFCTR_ST_RETIRED), | |
179 | ARMV8_EVENT_ATTR(inst_retired, ARMV8_PMUV3_PERFCTR_INST_RETIRED), | |
180 | ARMV8_EVENT_ATTR(exc_taken, ARMV8_PMUV3_PERFCTR_EXC_TAKEN), | |
181 | ARMV8_EVENT_ATTR(exc_return, ARMV8_PMUV3_PERFCTR_EXC_RETURN), | |
182 | ARMV8_EVENT_ATTR(cid_write_retired, ARMV8_PMUV3_PERFCTR_CID_WRITE_RETIRED), | |
183 | ARMV8_EVENT_ATTR(pc_write_retired, ARMV8_PMUV3_PERFCTR_PC_WRITE_RETIRED), | |
184 | ARMV8_EVENT_ATTR(br_immed_retired, ARMV8_PMUV3_PERFCTR_BR_IMMED_RETIRED), | |
185 | ARMV8_EVENT_ATTR(br_return_retired, ARMV8_PMUV3_PERFCTR_BR_RETURN_RETIRED), | |
186 | ARMV8_EVENT_ATTR(unaligned_ldst_retired, ARMV8_PMUV3_PERFCTR_UNALIGNED_LDST_RETIRED), | |
187 | ARMV8_EVENT_ATTR(br_mis_pred, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED), | |
188 | ARMV8_EVENT_ATTR(cpu_cycles, ARMV8_PMUV3_PERFCTR_CPU_CYCLES), | |
189 | ARMV8_EVENT_ATTR(br_pred, ARMV8_PMUV3_PERFCTR_BR_PRED), | |
190 | ARMV8_EVENT_ATTR(mem_access, ARMV8_PMUV3_PERFCTR_MEM_ACCESS), | |
191 | ARMV8_EVENT_ATTR(l1i_cache, ARMV8_PMUV3_PERFCTR_L1I_CACHE), | |
192 | ARMV8_EVENT_ATTR(l1d_cache_wb, ARMV8_PMUV3_PERFCTR_L1D_CACHE_WB), | |
193 | ARMV8_EVENT_ATTR(l2d_cache, ARMV8_PMUV3_PERFCTR_L2D_CACHE), | |
194 | ARMV8_EVENT_ATTR(l2d_cache_refill, ARMV8_PMUV3_PERFCTR_L2D_CACHE_REFILL), | |
195 | ARMV8_EVENT_ATTR(l2d_cache_wb, ARMV8_PMUV3_PERFCTR_L2D_CACHE_WB), | |
196 | ARMV8_EVENT_ATTR(bus_access, ARMV8_PMUV3_PERFCTR_BUS_ACCESS), | |
197 | ARMV8_EVENT_ATTR(memory_error, ARMV8_PMUV3_PERFCTR_MEMORY_ERROR), | |
198 | ARMV8_EVENT_ATTR(inst_spec, ARMV8_PMUV3_PERFCTR_INST_SPEC), | |
199 | ARMV8_EVENT_ATTR(ttbr_write_retired, ARMV8_PMUV3_PERFCTR_TTBR_WRITE_RETIRED), | |
200 | ARMV8_EVENT_ATTR(bus_cycles, ARMV8_PMUV3_PERFCTR_BUS_CYCLES), | |
201 | /* Don't expose the chain event in /sys, since it's useless in isolation */ | |
202 | ARMV8_EVENT_ATTR(l1d_cache_allocate, ARMV8_PMUV3_PERFCTR_L1D_CACHE_ALLOCATE), | |
203 | ARMV8_EVENT_ATTR(l2d_cache_allocate, ARMV8_PMUV3_PERFCTR_L2D_CACHE_ALLOCATE), | |
204 | ARMV8_EVENT_ATTR(br_retired, ARMV8_PMUV3_PERFCTR_BR_RETIRED), | |
205 | ARMV8_EVENT_ATTR(br_mis_pred_retired, ARMV8_PMUV3_PERFCTR_BR_MIS_PRED_RETIRED), | |
206 | ARMV8_EVENT_ATTR(stall_frontend, ARMV8_PMUV3_PERFCTR_STALL_FRONTEND), | |
207 | ARMV8_EVENT_ATTR(stall_backend, ARMV8_PMUV3_PERFCTR_STALL_BACKEND), | |
208 | ARMV8_EVENT_ATTR(l1d_tlb, ARMV8_PMUV3_PERFCTR_L1D_TLB), | |
209 | ARMV8_EVENT_ATTR(l1i_tlb, ARMV8_PMUV3_PERFCTR_L1I_TLB), | |
210 | ARMV8_EVENT_ATTR(l2i_cache, ARMV8_PMUV3_PERFCTR_L2I_CACHE), | |
211 | ARMV8_EVENT_ATTR(l2i_cache_refill, ARMV8_PMUV3_PERFCTR_L2I_CACHE_REFILL), | |
212 | ARMV8_EVENT_ATTR(l3d_cache_allocate, ARMV8_PMUV3_PERFCTR_L3D_CACHE_ALLOCATE), | |
213 | ARMV8_EVENT_ATTR(l3d_cache_refill, ARMV8_PMUV3_PERFCTR_L3D_CACHE_REFILL), | |
214 | ARMV8_EVENT_ATTR(l3d_cache, ARMV8_PMUV3_PERFCTR_L3D_CACHE), | |
215 | ARMV8_EVENT_ATTR(l3d_cache_wb, ARMV8_PMUV3_PERFCTR_L3D_CACHE_WB), | |
216 | ARMV8_EVENT_ATTR(l2d_tlb_refill, ARMV8_PMUV3_PERFCTR_L2D_TLB_REFILL), | |
217 | ARMV8_EVENT_ATTR(l2i_tlb_refill, ARMV8_PMUV3_PERFCTR_L2I_TLB_REFILL), | |
218 | ARMV8_EVENT_ATTR(l2d_tlb, ARMV8_PMUV3_PERFCTR_L2D_TLB), | |
219 | ARMV8_EVENT_ATTR(l2i_tlb, ARMV8_PMUV3_PERFCTR_L2I_TLB), | |
220 | ARMV8_EVENT_ATTR(remote_access, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS), | |
221 | ARMV8_EVENT_ATTR(ll_cache, ARMV8_PMUV3_PERFCTR_LL_CACHE), | |
222 | ARMV8_EVENT_ATTR(ll_cache_miss, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS), | |
223 | ARMV8_EVENT_ATTR(dtlb_walk, ARMV8_PMUV3_PERFCTR_DTLB_WALK), | |
224 | ARMV8_EVENT_ATTR(itlb_walk, ARMV8_PMUV3_PERFCTR_ITLB_WALK), | |
225 | ARMV8_EVENT_ATTR(ll_cache_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_RD), | |
226 | ARMV8_EVENT_ATTR(ll_cache_miss_rd, ARMV8_PMUV3_PERFCTR_LL_CACHE_MISS_RD), | |
227 | ARMV8_EVENT_ATTR(remote_access_rd, ARMV8_PMUV3_PERFCTR_REMOTE_ACCESS_RD), | |
55fdc1f4 SZ |
228 | ARMV8_EVENT_ATTR(l1d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L1D_CACHE_LMISS_RD), |
229 | ARMV8_EVENT_ATTR(op_retired, ARMV8_PMUV3_PERFCTR_OP_RETIRED), | |
230 | ARMV8_EVENT_ATTR(op_spec, ARMV8_PMUV3_PERFCTR_OP_SPEC), | |
231 | ARMV8_EVENT_ATTR(stall, ARMV8_PMUV3_PERFCTR_STALL), | |
232 | ARMV8_EVENT_ATTR(stall_slot_backend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_BACKEND), | |
233 | ARMV8_EVENT_ATTR(stall_slot_frontend, ARMV8_PMUV3_PERFCTR_STALL_SLOT_FRONTEND), | |
234 | ARMV8_EVENT_ATTR(stall_slot, ARMV8_PMUV3_PERFCTR_STALL_SLOT), | |
9ef8567c SZ |
235 | ARMV8_EVENT_ATTR(sample_pop, ARMV8_SPE_PERFCTR_SAMPLE_POP), |
236 | ARMV8_EVENT_ATTR(sample_feed, ARMV8_SPE_PERFCTR_SAMPLE_FEED), | |
237 | ARMV8_EVENT_ATTR(sample_filtrate, ARMV8_SPE_PERFCTR_SAMPLE_FILTRATE), | |
238 | ARMV8_EVENT_ATTR(sample_collision, ARMV8_SPE_PERFCTR_SAMPLE_COLLISION), | |
55fdc1f4 SZ |
239 | ARMV8_EVENT_ATTR(cnt_cycles, ARMV8_AMU_PERFCTR_CNT_CYCLES), |
240 | ARMV8_EVENT_ATTR(stall_backend_mem, ARMV8_AMU_PERFCTR_STALL_BACKEND_MEM), | |
241 | ARMV8_EVENT_ATTR(l1i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L1I_CACHE_LMISS), | |
242 | ARMV8_EVENT_ATTR(l2d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L2D_CACHE_LMISS_RD), | |
243 | ARMV8_EVENT_ATTR(l2i_cache_lmiss, ARMV8_PMUV3_PERFCTR_L2I_CACHE_LMISS), | |
244 | ARMV8_EVENT_ATTR(l3d_cache_lmiss_rd, ARMV8_PMUV3_PERFCTR_L3D_CACHE_LMISS_RD), | |
83f83cc0 SZ |
245 | ARMV8_EVENT_ATTR(trb_wrap, ARMV8_PMUV3_PERFCTR_TRB_WRAP), |
246 | ARMV8_EVENT_ATTR(trb_trig, ARMV8_PMUV3_PERFCTR_TRB_TRIG), | |
247 | ARMV8_EVENT_ATTR(trcextout0, ARMV8_PMUV3_PERFCTR_TRCEXTOUT0), | |
248 | ARMV8_EVENT_ATTR(trcextout1, ARMV8_PMUV3_PERFCTR_TRCEXTOUT1), | |
249 | ARMV8_EVENT_ATTR(trcextout2, ARMV8_PMUV3_PERFCTR_TRCEXTOUT2), | |
250 | ARMV8_EVENT_ATTR(trcextout3, ARMV8_PMUV3_PERFCTR_TRCEXTOUT3), | |
251 | ARMV8_EVENT_ATTR(cti_trigout4, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT4), | |
252 | ARMV8_EVENT_ATTR(cti_trigout5, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT5), | |
253 | ARMV8_EVENT_ATTR(cti_trigout6, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT6), | |
254 | ARMV8_EVENT_ATTR(cti_trigout7, ARMV8_PMUV3_PERFCTR_CTI_TRIGOUT7), | |
55fdc1f4 SZ |
255 | ARMV8_EVENT_ATTR(ldst_align_lat, ARMV8_PMUV3_PERFCTR_LDST_ALIGN_LAT), |
256 | ARMV8_EVENT_ATTR(ld_align_lat, ARMV8_PMUV3_PERFCTR_LD_ALIGN_LAT), | |
257 | ARMV8_EVENT_ATTR(st_align_lat, ARMV8_PMUV3_PERFCTR_ST_ALIGN_LAT), | |
258 | ARMV8_EVENT_ATTR(mem_access_checked, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED), | |
259 | ARMV8_EVENT_ATTR(mem_access_checked_rd, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_RD), | |
260 | ARMV8_EVENT_ATTR(mem_access_checked_wr, ARMV8_MTE_PERFCTR_MEM_ACCESS_CHECKED_WR), | |
57d74123 | 261 | NULL, |
9e9caa6a DR |
262 | }; |
263 | ||
4b1a9e69 AK |
264 | static umode_t |
265 | armv8pmu_event_attr_is_visible(struct kobject *kobj, | |
266 | struct attribute *attr, int unused) | |
267 | { | |
268 | struct device *dev = kobj_to_dev(kobj); | |
269 | struct pmu *pmu = dev_get_drvdata(dev); | |
270 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); | |
271 | struct perf_pmu_events_attr *pmu_attr; | |
272 | ||
273 | pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr.attr); | |
274 | ||
342e53bd WD |
275 | if (pmu_attr->id < ARMV8_PMUV3_MAX_COMMON_EVENTS && |
276 | test_bit(pmu_attr->id, cpu_pmu->pmceid_bitmap)) | |
277 | return attr->mode; | |
278 | ||
539707ca SZ |
279 | if (pmu_attr->id >= ARMV8_PMUV3_EXT_COMMON_EVENT_BASE) { |
280 | u64 id = pmu_attr->id - ARMV8_PMUV3_EXT_COMMON_EVENT_BASE; | |
281 | ||
282 | if (id < ARMV8_PMUV3_MAX_COMMON_EVENTS && | |
283 | test_bit(id, cpu_pmu->pmceid_ext_bitmap)) | |
284 | return attr->mode; | |
285 | } | |
4b1a9e69 AK |
286 | |
287 | return 0; | |
288 | } | |
289 | ||
2ceee7ed | 290 | static const struct attribute_group armv8_pmuv3_events_attr_group = { |
9e9caa6a DR |
291 | .name = "events", |
292 | .attrs = armv8_pmuv3_event_attrs, | |
4b1a9e69 | 293 | .is_visible = armv8pmu_event_attr_is_visible, |
9e9caa6a DR |
294 | }; |
295 | ||
fe7296e1 | 296 | PMU_FORMAT_ATTR(event, "config:0-15"); |
c1320790 | 297 | PMU_FORMAT_ATTR(long, "config1:0"); |
83a7a4d6 | 298 | PMU_FORMAT_ATTR(rdpmc, "config1:1"); |
c1320790 | 299 | |
e2012600 RH |
300 | static int sysctl_perf_user_access __read_mostly; |
301 | ||
c1320790 SP |
302 | static inline bool armv8pmu_event_is_64bit(struct perf_event *event) |
303 | { | |
304 | return event->attr.config1 & 0x1; | |
305 | } | |
57d74123 | 306 | |
83a7a4d6 RH |
307 | static inline bool armv8pmu_event_want_user_access(struct perf_event *event) |
308 | { | |
309 | return event->attr.config1 & 0x2; | |
310 | } | |
311 | ||
57d74123 WD |
312 | static struct attribute *armv8_pmuv3_format_attrs[] = { |
313 | &format_attr_event.attr, | |
c1320790 | 314 | &format_attr_long.attr, |
83a7a4d6 | 315 | &format_attr_rdpmc.attr, |
57d74123 WD |
316 | NULL, |
317 | }; | |
318 | ||
2ceee7ed | 319 | static const struct attribute_group armv8_pmuv3_format_attr_group = { |
57d74123 WD |
320 | .name = "format", |
321 | .attrs = armv8_pmuv3_format_attrs, | |
322 | }; | |
323 | ||
f5be3a61 SZ |
324 | static ssize_t slots_show(struct device *dev, struct device_attribute *attr, |
325 | char *page) | |
326 | { | |
327 | struct pmu *pmu = dev_get_drvdata(dev); | |
328 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); | |
329 | u32 slots = cpu_pmu->reg_pmmir & ARMV8_PMU_SLOTS_MASK; | |
330 | ||
a5740e95 | 331 | return sysfs_emit(page, "0x%08x\n", slots); |
f5be3a61 SZ |
332 | } |
333 | ||
334 | static DEVICE_ATTR_RO(slots); | |
335 | ||
281e44f5 SZ |
336 | static ssize_t bus_slots_show(struct device *dev, struct device_attribute *attr, |
337 | char *page) | |
338 | { | |
339 | struct pmu *pmu = dev_get_drvdata(dev); | |
340 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); | |
341 | u32 bus_slots = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_SLOTS_SHIFT) | |
342 | & ARMV8_PMU_BUS_SLOTS_MASK; | |
343 | ||
344 | return sysfs_emit(page, "0x%08x\n", bus_slots); | |
345 | } | |
346 | ||
347 | static DEVICE_ATTR_RO(bus_slots); | |
348 | ||
349 | static ssize_t bus_width_show(struct device *dev, struct device_attribute *attr, | |
350 | char *page) | |
351 | { | |
352 | struct pmu *pmu = dev_get_drvdata(dev); | |
353 | struct arm_pmu *cpu_pmu = container_of(pmu, struct arm_pmu, pmu); | |
354 | u32 bus_width = (cpu_pmu->reg_pmmir >> ARMV8_PMU_BUS_WIDTH_SHIFT) | |
355 | & ARMV8_PMU_BUS_WIDTH_MASK; | |
356 | u32 val = 0; | |
357 | ||
358 | /* Encoded as Log2(number of bytes), plus one */ | |
359 | if (bus_width > 2 && bus_width < 13) | |
360 | val = 1 << (bus_width - 1); | |
361 | ||
362 | return sysfs_emit(page, "0x%08x\n", val); | |
363 | } | |
364 | ||
365 | static DEVICE_ATTR_RO(bus_width); | |
366 | ||
f5be3a61 SZ |
367 | static struct attribute *armv8_pmuv3_caps_attrs[] = { |
368 | &dev_attr_slots.attr, | |
281e44f5 SZ |
369 | &dev_attr_bus_slots.attr, |
370 | &dev_attr_bus_width.attr, | |
f5be3a61 SZ |
371 | NULL, |
372 | }; | |
373 | ||
2ceee7ed | 374 | static const struct attribute_group armv8_pmuv3_caps_attr_group = { |
f5be3a61 SZ |
375 | .name = "caps", |
376 | .attrs = armv8_pmuv3_caps_attrs, | |
377 | }; | |
378 | ||
03089688 WD |
379 | /* |
380 | * Perf Events' indices | |
381 | */ | |
382 | #define ARMV8_IDX_CYCLE_COUNTER 0 | |
383 | #define ARMV8_IDX_COUNTER0 1 | |
83a7a4d6 | 384 | #define ARMV8_IDX_CYCLE_COUNTER_USER 32 |
8673e02e AM |
385 | |
386 | /* | |
387 | * We unconditionally enable ARMv8.5-PMU long event counter support | |
388 | * (64-bit events) where supported. Indicate if this arm_pmu has long | |
389 | * event counter support. | |
390 | */ | |
391 | static bool armv8pmu_has_long_event(struct arm_pmu *cpu_pmu) | |
392 | { | |
121a8fc0 | 393 | return (cpu_pmu->pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5); |
8673e02e AM |
394 | } |
395 | ||
83a7a4d6 RH |
396 | static inline bool armv8pmu_event_has_user_read(struct perf_event *event) |
397 | { | |
398 | return event->hw.flags & PERF_EVENT_FLAG_USER_READ_CNT; | |
399 | } | |
400 | ||
c1320790 SP |
401 | /* |
402 | * We must chain two programmable counters for 64 bit events, | |
403 | * except when we have allocated the 64bit cycle counter (for CPU | |
83a7a4d6 | 404 | * cycles event) or when user space counter access is enabled. |
c1320790 SP |
405 | */ |
406 | static inline bool armv8pmu_event_is_chained(struct perf_event *event) | |
407 | { | |
408 | int idx = event->hw.idx; | |
8673e02e | 409 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
c1320790 | 410 | |
83a7a4d6 | 411 | return !armv8pmu_event_has_user_read(event) && |
c1320790 | 412 | armv8pmu_event_is_64bit(event) && |
8673e02e | 413 | !armv8pmu_has_long_event(cpu_pmu) && |
c1320790 SP |
414 | (idx != ARMV8_IDX_CYCLE_COUNTER); |
415 | } | |
416 | ||
03089688 WD |
417 | /* |
418 | * ARMv8 low level PMU access | |
419 | */ | |
420 | ||
421 | /* | |
422 | * Perf Event to low level counters mapping | |
423 | */ | |
424 | #define ARMV8_IDX_TO_COUNTER(x) \ | |
b8cfadfc | 425 | (((x) - ARMV8_IDX_COUNTER0) & ARMV8_PMU_COUNTER_MASK) |
03089688 | 426 | |
0fdf1bb7 MR |
427 | /* |
428 | * This code is really good | |
429 | */ | |
430 | ||
431 | #define PMEVN_CASE(n, case_macro) \ | |
432 | case n: case_macro(n); break | |
433 | ||
434 | #define PMEVN_SWITCH(x, case_macro) \ | |
435 | do { \ | |
436 | switch (x) { \ | |
437 | PMEVN_CASE(0, case_macro); \ | |
438 | PMEVN_CASE(1, case_macro); \ | |
439 | PMEVN_CASE(2, case_macro); \ | |
440 | PMEVN_CASE(3, case_macro); \ | |
441 | PMEVN_CASE(4, case_macro); \ | |
442 | PMEVN_CASE(5, case_macro); \ | |
443 | PMEVN_CASE(6, case_macro); \ | |
444 | PMEVN_CASE(7, case_macro); \ | |
445 | PMEVN_CASE(8, case_macro); \ | |
446 | PMEVN_CASE(9, case_macro); \ | |
447 | PMEVN_CASE(10, case_macro); \ | |
448 | PMEVN_CASE(11, case_macro); \ | |
449 | PMEVN_CASE(12, case_macro); \ | |
450 | PMEVN_CASE(13, case_macro); \ | |
451 | PMEVN_CASE(14, case_macro); \ | |
452 | PMEVN_CASE(15, case_macro); \ | |
453 | PMEVN_CASE(16, case_macro); \ | |
454 | PMEVN_CASE(17, case_macro); \ | |
455 | PMEVN_CASE(18, case_macro); \ | |
456 | PMEVN_CASE(19, case_macro); \ | |
457 | PMEVN_CASE(20, case_macro); \ | |
458 | PMEVN_CASE(21, case_macro); \ | |
459 | PMEVN_CASE(22, case_macro); \ | |
460 | PMEVN_CASE(23, case_macro); \ | |
461 | PMEVN_CASE(24, case_macro); \ | |
462 | PMEVN_CASE(25, case_macro); \ | |
463 | PMEVN_CASE(26, case_macro); \ | |
464 | PMEVN_CASE(27, case_macro); \ | |
465 | PMEVN_CASE(28, case_macro); \ | |
466 | PMEVN_CASE(29, case_macro); \ | |
467 | PMEVN_CASE(30, case_macro); \ | |
468 | default: WARN(1, "Invalid PMEV* index\n"); \ | |
469 | } \ | |
470 | } while (0) | |
471 | ||
472 | #define RETURN_READ_PMEVCNTRN(n) \ | |
473 | return read_sysreg(pmevcntr##n##_el0) | |
474 | static unsigned long read_pmevcntrn(int n) | |
475 | { | |
476 | PMEVN_SWITCH(n, RETURN_READ_PMEVCNTRN); | |
477 | return 0; | |
478 | } | |
479 | ||
480 | #define WRITE_PMEVCNTRN(n) \ | |
481 | write_sysreg(val, pmevcntr##n##_el0) | |
482 | static void write_pmevcntrn(int n, unsigned long val) | |
483 | { | |
484 | PMEVN_SWITCH(n, WRITE_PMEVCNTRN); | |
485 | } | |
486 | ||
487 | #define WRITE_PMEVTYPERN(n) \ | |
488 | write_sysreg(val, pmevtyper##n##_el0) | |
489 | static void write_pmevtypern(int n, unsigned long val) | |
490 | { | |
491 | PMEVN_SWITCH(n, WRITE_PMEVTYPERN); | |
492 | } | |
493 | ||
03089688 WD |
494 | static inline u32 armv8pmu_pmcr_read(void) |
495 | { | |
bf2d4782 | 496 | return read_sysreg(pmcr_el0); |
03089688 WD |
497 | } |
498 | ||
499 | static inline void armv8pmu_pmcr_write(u32 val) | |
500 | { | |
b8cfadfc | 501 | val &= ARMV8_PMU_PMCR_MASK; |
03089688 | 502 | isb(); |
bf2d4782 | 503 | write_sysreg(val, pmcr_el0); |
03089688 WD |
504 | } |
505 | ||
506 | static inline int armv8pmu_has_overflowed(u32 pmovsr) | |
507 | { | |
b8cfadfc | 508 | return pmovsr & ARMV8_PMU_OVERFLOWED_MASK; |
03089688 WD |
509 | } |
510 | ||
03089688 WD |
511 | static inline int armv8pmu_counter_has_overflowed(u32 pmnc, int idx) |
512 | { | |
6475b2d8 | 513 | return pmnc & BIT(ARMV8_IDX_TO_COUNTER(idx)); |
03089688 WD |
514 | } |
515 | ||
7bb8bc6e | 516 | static inline u64 armv8pmu_read_evcntr(int idx) |
03089688 | 517 | { |
6475b2d8 | 518 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
03089688 | 519 | |
0fdf1bb7 | 520 | return read_pmevcntrn(counter); |
03089688 WD |
521 | } |
522 | ||
c1320790 SP |
523 | static inline u64 armv8pmu_read_hw_counter(struct perf_event *event) |
524 | { | |
525 | int idx = event->hw.idx; | |
2c2e21e7 | 526 | u64 val = armv8pmu_read_evcntr(idx); |
c1320790 | 527 | |
c1320790 SP |
528 | if (armv8pmu_event_is_chained(event)) |
529 | val = (val << 32) | armv8pmu_read_evcntr(idx - 1); | |
530 | return val; | |
531 | } | |
532 | ||
8673e02e AM |
533 | /* |
534 | * The cycle counter is always a 64-bit counter. When ARMV8_PMU_PMCR_LP | |
535 | * is set the event counters also become 64-bit counters. Unless the | |
536 | * user has requested a long counter (attr.config1) then we want to | |
537 | * interrupt upon 32-bit overflow - we achieve this by applying a bias. | |
538 | */ | |
539 | static bool armv8pmu_event_needs_bias(struct perf_event *event) | |
540 | { | |
541 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); | |
542 | struct hw_perf_event *hwc = &event->hw; | |
543 | int idx = hwc->idx; | |
544 | ||
545 | if (armv8pmu_event_is_64bit(event)) | |
546 | return false; | |
547 | ||
548 | if (armv8pmu_has_long_event(cpu_pmu) || | |
549 | idx == ARMV8_IDX_CYCLE_COUNTER) | |
550 | return true; | |
551 | ||
552 | return false; | |
553 | } | |
554 | ||
555 | static u64 armv8pmu_bias_long_counter(struct perf_event *event, u64 value) | |
556 | { | |
557 | if (armv8pmu_event_needs_bias(event)) | |
558 | value |= GENMASK(63, 32); | |
559 | ||
560 | return value; | |
561 | } | |
562 | ||
563 | static u64 armv8pmu_unbias_long_counter(struct perf_event *event, u64 value) | |
564 | { | |
565 | if (armv8pmu_event_needs_bias(event)) | |
566 | value &= ~GENMASK(63, 32); | |
567 | ||
568 | return value; | |
569 | } | |
570 | ||
3d659e7d | 571 | static u64 armv8pmu_read_counter(struct perf_event *event) |
03089688 | 572 | { |
6475b2d8 MR |
573 | struct hw_perf_event *hwc = &event->hw; |
574 | int idx = hwc->idx; | |
2c2e21e7 | 575 | u64 value; |
03089688 | 576 | |
44fdf4ed | 577 | if (idx == ARMV8_IDX_CYCLE_COUNTER) |
bf2d4782 | 578 | value = read_sysreg(pmccntr_el0); |
0c55d19c | 579 | else |
c1320790 | 580 | value = armv8pmu_read_hw_counter(event); |
03089688 | 581 | |
8673e02e | 582 | return armv8pmu_unbias_long_counter(event, value); |
03089688 WD |
583 | } |
584 | ||
8673e02e | 585 | static inline void armv8pmu_write_evcntr(int idx, u64 value) |
0c55d19c | 586 | { |
0fdf1bb7 MR |
587 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
588 | ||
589 | write_pmevcntrn(counter, value); | |
0c55d19c SP |
590 | } |
591 | ||
c1320790 SP |
592 | static inline void armv8pmu_write_hw_counter(struct perf_event *event, |
593 | u64 value) | |
594 | { | |
595 | int idx = event->hw.idx; | |
596 | ||
597 | if (armv8pmu_event_is_chained(event)) { | |
598 | armv8pmu_write_evcntr(idx, upper_32_bits(value)); | |
599 | armv8pmu_write_evcntr(idx - 1, lower_32_bits(value)); | |
600 | } else { | |
601 | armv8pmu_write_evcntr(idx, value); | |
602 | } | |
603 | } | |
604 | ||
3d659e7d | 605 | static void armv8pmu_write_counter(struct perf_event *event, u64 value) |
03089688 | 606 | { |
6475b2d8 MR |
607 | struct hw_perf_event *hwc = &event->hw; |
608 | int idx = hwc->idx; | |
609 | ||
8673e02e AM |
610 | value = armv8pmu_bias_long_counter(event, value); |
611 | ||
44fdf4ed | 612 | if (idx == ARMV8_IDX_CYCLE_COUNTER) |
3a95200d | 613 | write_sysreg(value, pmccntr_el0); |
8673e02e | 614 | else |
c1320790 | 615 | armv8pmu_write_hw_counter(event, value); |
03089688 WD |
616 | } |
617 | ||
618 | static inline void armv8pmu_write_evtype(int idx, u32 val) | |
619 | { | |
0fdf1bb7 MR |
620 | u32 counter = ARMV8_IDX_TO_COUNTER(idx); |
621 | ||
0c55d19c | 622 | val &= ARMV8_PMU_EVTYPE_MASK; |
0fdf1bb7 | 623 | write_pmevtypern(counter, val); |
03089688 WD |
624 | } |
625 | ||
c1320790 SP |
626 | static inline void armv8pmu_write_event_type(struct perf_event *event) |
627 | { | |
628 | struct hw_perf_event *hwc = &event->hw; | |
629 | int idx = hwc->idx; | |
630 | ||
631 | /* | |
632 | * For chained events, the low counter is programmed to count | |
633 | * the event of interest and the high counter is programmed | |
634 | * with CHAIN event code with filters set to count at all ELs. | |
635 | */ | |
636 | if (armv8pmu_event_is_chained(event)) { | |
637 | u32 chain_evt = ARMV8_PMUV3_PERFCTR_CHAIN | | |
638 | ARMV8_PMU_INCLUDE_EL2; | |
639 | ||
640 | armv8pmu_write_evtype(idx - 1, hwc->config_base); | |
641 | armv8pmu_write_evtype(idx, chain_evt); | |
642 | } else { | |
0fdf1bb7 MR |
643 | if (idx == ARMV8_IDX_CYCLE_COUNTER) |
644 | write_sysreg(hwc->config_base, pmccfiltr_el0); | |
645 | else | |
646 | armv8pmu_write_evtype(idx, hwc->config_base); | |
c1320790 SP |
647 | } |
648 | } | |
649 | ||
29227d6e | 650 | static u32 armv8pmu_event_cnten_mask(struct perf_event *event) |
03089688 | 651 | { |
29227d6e RM |
652 | int counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); |
653 | u32 mask = BIT(counter); | |
654 | ||
655 | if (armv8pmu_event_is_chained(event)) | |
656 | mask |= BIT(counter - 1); | |
657 | return mask; | |
658 | } | |
659 | ||
660 | static inline void armv8pmu_enable_counter(u32 mask) | |
661 | { | |
490d7b7c AE |
662 | /* |
663 | * Make sure event configuration register writes are visible before we | |
664 | * enable the counter. | |
665 | * */ | |
666 | isb(); | |
29227d6e | 667 | write_sysreg(mask, pmcntenset_el0); |
03089688 WD |
668 | } |
669 | ||
c1320790 SP |
670 | static inline void armv8pmu_enable_event_counter(struct perf_event *event) |
671 | { | |
d1947bc4 | 672 | struct perf_event_attr *attr = &event->attr; |
29227d6e | 673 | u32 mask = armv8pmu_event_cnten_mask(event); |
d1947bc4 | 674 | |
29227d6e | 675 | kvm_set_pmu_events(mask, attr); |
d1947bc4 AM |
676 | |
677 | /* We rely on the hypervisor switch code to enable guest counters */ | |
29227d6e RM |
678 | if (!kvm_pmu_counter_deferred(attr)) |
679 | armv8pmu_enable_counter(mask); | |
c1320790 SP |
680 | } |
681 | ||
29227d6e | 682 | static inline void armv8pmu_disable_counter(u32 mask) |
03089688 | 683 | { |
29227d6e | 684 | write_sysreg(mask, pmcntenclr_el0); |
0fdf1bb7 MR |
685 | /* |
686 | * Make sure the effects of disabling the counter are visible before we | |
687 | * start configuring the event. | |
688 | */ | |
689 | isb(); | |
03089688 WD |
690 | } |
691 | ||
c1320790 SP |
692 | static inline void armv8pmu_disable_event_counter(struct perf_event *event) |
693 | { | |
d1947bc4 | 694 | struct perf_event_attr *attr = &event->attr; |
29227d6e | 695 | u32 mask = armv8pmu_event_cnten_mask(event); |
c1320790 | 696 | |
29227d6e | 697 | kvm_clr_pmu_events(mask); |
d1947bc4 AM |
698 | |
699 | /* We rely on the hypervisor switch code to disable guest counters */ | |
29227d6e RM |
700 | if (!kvm_pmu_counter_deferred(attr)) |
701 | armv8pmu_disable_counter(mask); | |
c1320790 SP |
702 | } |
703 | ||
29227d6e | 704 | static inline void armv8pmu_enable_intens(u32 mask) |
03089688 | 705 | { |
29227d6e | 706 | write_sysreg(mask, pmintenset_el1); |
03089688 WD |
707 | } |
708 | ||
29227d6e | 709 | static inline void armv8pmu_enable_event_irq(struct perf_event *event) |
c1320790 | 710 | { |
29227d6e RM |
711 | u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); |
712 | armv8pmu_enable_intens(BIT(counter)); | |
c1320790 SP |
713 | } |
714 | ||
29227d6e | 715 | static inline void armv8pmu_disable_intens(u32 mask) |
03089688 | 716 | { |
29227d6e | 717 | write_sysreg(mask, pmintenclr_el1); |
03089688 WD |
718 | isb(); |
719 | /* Clear the overflow flag in case an interrupt is pending. */ | |
29227d6e | 720 | write_sysreg(mask, pmovsclr_el0); |
03089688 | 721 | isb(); |
03089688 WD |
722 | } |
723 | ||
29227d6e | 724 | static inline void armv8pmu_disable_event_irq(struct perf_event *event) |
c1320790 | 725 | { |
29227d6e RM |
726 | u32 counter = ARMV8_IDX_TO_COUNTER(event->hw.idx); |
727 | armv8pmu_disable_intens(BIT(counter)); | |
c1320790 SP |
728 | } |
729 | ||
03089688 WD |
730 | static inline u32 armv8pmu_getreset_flags(void) |
731 | { | |
732 | u32 value; | |
733 | ||
734 | /* Read */ | |
bf2d4782 | 735 | value = read_sysreg(pmovsclr_el0); |
03089688 WD |
736 | |
737 | /* Write to clear flags */ | |
b8cfadfc | 738 | value &= ARMV8_PMU_OVSR_MASK; |
bf2d4782 | 739 | write_sysreg(value, pmovsclr_el0); |
03089688 WD |
740 | |
741 | return value; | |
742 | } | |
743 | ||
83a7a4d6 RH |
744 | static void armv8pmu_disable_user_access(void) |
745 | { | |
746 | write_sysreg(0, pmuserenr_el0); | |
747 | } | |
748 | ||
749 | static void armv8pmu_enable_user_access(struct arm_pmu *cpu_pmu) | |
750 | { | |
751 | int i; | |
752 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); | |
753 | ||
754 | /* Clear any unused counters to avoid leaking their contents */ | |
755 | for_each_clear_bit(i, cpuc->used_mask, cpu_pmu->num_events) { | |
756 | if (i == ARMV8_IDX_CYCLE_COUNTER) | |
757 | write_sysreg(0, pmccntr_el0); | |
758 | else | |
759 | armv8pmu_write_evcntr(i, 0); | |
760 | } | |
761 | ||
762 | write_sysreg(0, pmuserenr_el0); | |
763 | write_sysreg(ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_CR, pmuserenr_el0); | |
764 | } | |
765 | ||
6475b2d8 | 766 | static void armv8pmu_enable_event(struct perf_event *event) |
03089688 | 767 | { |
03089688 WD |
768 | /* |
769 | * Enable counter and interrupt, and set the counter to count | |
770 | * the event that we're interested in. | |
771 | */ | |
03089688 WD |
772 | |
773 | /* | |
774 | * Disable counter | |
775 | */ | |
c1320790 | 776 | armv8pmu_disable_event_counter(event); |
03089688 WD |
777 | |
778 | /* | |
0fdf1bb7 | 779 | * Set event. |
03089688 | 780 | */ |
c1320790 | 781 | armv8pmu_write_event_type(event); |
03089688 WD |
782 | |
783 | /* | |
784 | * Enable interrupt for this counter | |
785 | */ | |
c1320790 | 786 | armv8pmu_enable_event_irq(event); |
03089688 WD |
787 | |
788 | /* | |
789 | * Enable counter | |
790 | */ | |
c1320790 | 791 | armv8pmu_enable_event_counter(event); |
03089688 WD |
792 | } |
793 | ||
6475b2d8 | 794 | static void armv8pmu_disable_event(struct perf_event *event) |
03089688 | 795 | { |
03089688 WD |
796 | /* |
797 | * Disable counter | |
798 | */ | |
c1320790 | 799 | armv8pmu_disable_event_counter(event); |
03089688 WD |
800 | |
801 | /* | |
802 | * Disable interrupt for this counter | |
803 | */ | |
c1320790 | 804 | armv8pmu_disable_event_irq(event); |
03089688 WD |
805 | } |
806 | ||
3cce50df SP |
807 | static void armv8pmu_start(struct arm_pmu *cpu_pmu) |
808 | { | |
bd275681 PZ |
809 | struct perf_event_context *ctx; |
810 | int nr_user = 0; | |
83a7a4d6 | 811 | |
bd275681 PZ |
812 | ctx = perf_cpu_task_ctx(); |
813 | if (ctx) | |
814 | nr_user = ctx->nr_user; | |
815 | ||
816 | if (sysctl_perf_user_access && nr_user) | |
83a7a4d6 RH |
817 | armv8pmu_enable_user_access(cpu_pmu); |
818 | else | |
819 | armv8pmu_disable_user_access(); | |
820 | ||
3cce50df SP |
821 | /* Enable all counters */ |
822 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() | ARMV8_PMU_PMCR_E); | |
3cce50df SP |
823 | } |
824 | ||
825 | static void armv8pmu_stop(struct arm_pmu *cpu_pmu) | |
826 | { | |
3cce50df SP |
827 | /* Disable all counters */ |
828 | armv8pmu_pmcr_write(armv8pmu_pmcr_read() & ~ARMV8_PMU_PMCR_E); | |
3cce50df SP |
829 | } |
830 | ||
0788f1e9 | 831 | static irqreturn_t armv8pmu_handle_irq(struct arm_pmu *cpu_pmu) |
03089688 WD |
832 | { |
833 | u32 pmovsr; | |
834 | struct perf_sample_data data; | |
6475b2d8 | 835 | struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events); |
03089688 WD |
836 | struct pt_regs *regs; |
837 | int idx; | |
838 | ||
839 | /* | |
840 | * Get and reset the IRQ flags | |
841 | */ | |
842 | pmovsr = armv8pmu_getreset_flags(); | |
843 | ||
844 | /* | |
845 | * Did an overflow occur? | |
846 | */ | |
847 | if (!armv8pmu_has_overflowed(pmovsr)) | |
848 | return IRQ_NONE; | |
849 | ||
850 | /* | |
851 | * Handle the counter(s) overflow(s) | |
852 | */ | |
853 | regs = get_irq_regs(); | |
854 | ||
3cce50df SP |
855 | /* |
856 | * Stop the PMU while processing the counter overflows | |
857 | * to prevent skews in group events. | |
858 | */ | |
859 | armv8pmu_stop(cpu_pmu); | |
03089688 WD |
860 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
861 | struct perf_event *event = cpuc->events[idx]; | |
862 | struct hw_perf_event *hwc; | |
863 | ||
864 | /* Ignore if we don't have an event. */ | |
865 | if (!event) | |
866 | continue; | |
867 | ||
868 | /* | |
869 | * We have a single interrupt for all counters. Check that | |
870 | * each counter has overflowed before we process it. | |
871 | */ | |
872 | if (!armv8pmu_counter_has_overflowed(pmovsr, idx)) | |
873 | continue; | |
874 | ||
875 | hwc = &event->hw; | |
6475b2d8 | 876 | armpmu_event_update(event); |
03089688 | 877 | perf_sample_data_init(&data, 0, hwc->last_period); |
6475b2d8 | 878 | if (!armpmu_event_set_period(event)) |
03089688 WD |
879 | continue; |
880 | ||
05ab7281 JT |
881 | /* |
882 | * Perf event overflow will queue the processing of the event as | |
883 | * an irq_work which will be taken care of in the handling of | |
884 | * IPI_IRQ_WORK. | |
885 | */ | |
03089688 | 886 | if (perf_event_overflow(event, &data, regs)) |
6475b2d8 | 887 | cpu_pmu->disable(event); |
03089688 | 888 | } |
3cce50df | 889 | armv8pmu_start(cpu_pmu); |
03089688 | 890 | |
03089688 WD |
891 | return IRQ_HANDLED; |
892 | } | |
893 | ||
c1320790 SP |
894 | static int armv8pmu_get_single_idx(struct pmu_hw_events *cpuc, |
895 | struct arm_pmu *cpu_pmu) | |
896 | { | |
897 | int idx; | |
898 | ||
d9f1b52a | 899 | for (idx = ARMV8_IDX_COUNTER0; idx < cpu_pmu->num_events; idx++) { |
c1320790 SP |
900 | if (!test_and_set_bit(idx, cpuc->used_mask)) |
901 | return idx; | |
902 | } | |
903 | return -EAGAIN; | |
904 | } | |
905 | ||
906 | static int armv8pmu_get_chain_idx(struct pmu_hw_events *cpuc, | |
907 | struct arm_pmu *cpu_pmu) | |
908 | { | |
909 | int idx; | |
910 | ||
911 | /* | |
912 | * Chaining requires two consecutive event counters, where | |
913 | * the lower idx must be even. | |
914 | */ | |
915 | for (idx = ARMV8_IDX_COUNTER0 + 1; idx < cpu_pmu->num_events; idx += 2) { | |
916 | if (!test_and_set_bit(idx, cpuc->used_mask)) { | |
917 | /* Check if the preceding even counter is available */ | |
918 | if (!test_and_set_bit(idx - 1, cpuc->used_mask)) | |
919 | return idx; | |
920 | /* Release the Odd counter */ | |
921 | clear_bit(idx, cpuc->used_mask); | |
922 | } | |
923 | } | |
924 | return -EAGAIN; | |
925 | } | |
926 | ||
03089688 | 927 | static int armv8pmu_get_event_idx(struct pmu_hw_events *cpuc, |
6475b2d8 | 928 | struct perf_event *event) |
03089688 | 929 | { |
6475b2d8 MR |
930 | struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu); |
931 | struct hw_perf_event *hwc = &event->hw; | |
b8cfadfc | 932 | unsigned long evtype = hwc->config_base & ARMV8_PMU_EVTYPE_EVENT; |
03089688 | 933 | |
1031a159 | 934 | /* Always prefer to place a cycle counter into the cycle counter. */ |
03598fdb | 935 | if (evtype == ARMV8_PMUV3_PERFCTR_CPU_CYCLES) { |
1031a159 PA |
936 | if (!test_and_set_bit(ARMV8_IDX_CYCLE_COUNTER, cpuc->used_mask)) |
937 | return ARMV8_IDX_CYCLE_COUNTER; | |
83a7a4d6 RH |
938 | else if (armv8pmu_event_is_64bit(event) && |
939 | armv8pmu_event_want_user_access(event) && | |
940 | !armv8pmu_has_long_event(cpu_pmu)) | |
941 | return -EAGAIN; | |
03089688 WD |
942 | } |
943 | ||
944 | /* | |
1031a159 | 945 | * Otherwise use events counters |
03089688 | 946 | */ |
83a7a4d6 | 947 | if (armv8pmu_event_is_chained(event)) |
c1320790 SP |
948 | return armv8pmu_get_chain_idx(cpuc, cpu_pmu); |
949 | else | |
950 | return armv8pmu_get_single_idx(cpuc, cpu_pmu); | |
03089688 WD |
951 | } |
952 | ||
7dfc8db1 | 953 | static void armv8pmu_clear_event_idx(struct pmu_hw_events *cpuc, |
c1320790 | 954 | struct perf_event *event) |
7dfc8db1 | 955 | { |
c1320790 SP |
956 | int idx = event->hw.idx; |
957 | ||
958 | clear_bit(idx, cpuc->used_mask); | |
959 | if (armv8pmu_event_is_chained(event)) | |
960 | clear_bit(idx - 1, cpuc->used_mask); | |
7dfc8db1 SP |
961 | } |
962 | ||
83a7a4d6 RH |
963 | static int armv8pmu_user_event_idx(struct perf_event *event) |
964 | { | |
965 | if (!sysctl_perf_user_access || !armv8pmu_event_has_user_read(event)) | |
966 | return 0; | |
967 | ||
968 | /* | |
969 | * We remap the cycle counter index to 32 to | |
970 | * match the offset applied to the rest of | |
971 | * the counter indices. | |
972 | */ | |
973 | if (event->hw.idx == ARMV8_IDX_CYCLE_COUNTER) | |
974 | return ARMV8_IDX_CYCLE_COUNTER_USER; | |
975 | ||
976 | return event->hw.idx; | |
977 | } | |
978 | ||
03089688 | 979 | /* |
b3650678 | 980 | * Add an event filter to a given event. |
03089688 WD |
981 | */ |
982 | static int armv8pmu_set_event_filter(struct hw_perf_event *event, | |
983 | struct perf_event_attr *attr) | |
984 | { | |
985 | unsigned long config_base = 0; | |
986 | ||
987 | if (attr->exclude_idle) | |
988 | return -EPERM; | |
78a19cfd GK |
989 | |
990 | /* | |
991 | * If we're running in hyp mode, then we *are* the hypervisor. | |
992 | * Therefore we ignore exclude_hv in this configuration, since | |
993 | * there's no hypervisor to sample anyway. This is consistent | |
994 | * with other architectures (x86 and Power). | |
995 | */ | |
996 | if (is_kernel_in_hyp_mode()) { | |
435e53fb | 997 | if (!attr->exclude_kernel && !attr->exclude_host) |
78a19cfd | 998 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
435e53fb | 999 | if (attr->exclude_guest) |
78a19cfd | 1000 | config_base |= ARMV8_PMU_EXCLUDE_EL1; |
435e53fb AM |
1001 | if (attr->exclude_host) |
1002 | config_base |= ARMV8_PMU_EXCLUDE_EL0; | |
78a19cfd | 1003 | } else { |
d1947bc4 | 1004 | if (!attr->exclude_hv && !attr->exclude_host) |
78a19cfd GK |
1005 | config_base |= ARMV8_PMU_INCLUDE_EL2; |
1006 | } | |
d1947bc4 AM |
1007 | |
1008 | /* | |
1009 | * Filter out !VHE kernels and guest kernels | |
1010 | */ | |
1011 | if (attr->exclude_kernel) | |
1012 | config_base |= ARMV8_PMU_EXCLUDE_EL1; | |
1013 | ||
03089688 | 1014 | if (attr->exclude_user) |
b8cfadfc | 1015 | config_base |= ARMV8_PMU_EXCLUDE_EL0; |
03089688 WD |
1016 | |
1017 | /* | |
1018 | * Install the filter into config_base as this is used to | |
1019 | * construct the event type. | |
1020 | */ | |
1021 | event->config_base = config_base; | |
1022 | ||
1023 | return 0; | |
1024 | } | |
1025 | ||
bd275681 | 1026 | static bool armv8pmu_filter(struct pmu *pmu, int cpu) |
ca2b4972 | 1027 | { |
bd275681 PZ |
1028 | struct arm_pmu *armpmu = to_arm_pmu(pmu); |
1029 | return !cpumask_test_cpu(smp_processor_id(), &armpmu->supported_cpus); | |
ca2b4972 WD |
1030 | } |
1031 | ||
03089688 WD |
1032 | static void armv8pmu_reset(void *info) |
1033 | { | |
8673e02e AM |
1034 | struct arm_pmu *cpu_pmu = (struct arm_pmu *)info; |
1035 | u32 pmcr; | |
1036 | ||
03089688 | 1037 | /* The counter and interrupt enable registers are unknown at reset. */ |
29227d6e RM |
1038 | armv8pmu_disable_counter(U32_MAX); |
1039 | armv8pmu_disable_intens(U32_MAX); | |
03089688 | 1040 | |
d1947bc4 AM |
1041 | /* Clear the counters we flip at guest entry/exit */ |
1042 | kvm_clr_pmu_events(U32_MAX); | |
1043 | ||
7175f059 JG |
1044 | /* |
1045 | * Initialize & Reset PMNC. Request overflow interrupt for | |
1046 | * 64 bit cycle counter but cheat in armv8pmu_write_counter(). | |
1047 | */ | |
8673e02e AM |
1048 | pmcr = ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C | ARMV8_PMU_PMCR_LC; |
1049 | ||
1050 | /* Enable long event counter support where available */ | |
1051 | if (armv8pmu_has_long_event(cpu_pmu)) | |
1052 | pmcr |= ARMV8_PMU_PMCR_LP; | |
1053 | ||
1054 | armv8pmu_pmcr_write(pmcr); | |
03089688 WD |
1055 | } |
1056 | ||
6c833bb9 WD |
1057 | static int __armv8_pmuv3_map_event(struct perf_event *event, |
1058 | const unsigned (*extra_event_map) | |
1059 | [PERF_COUNT_HW_MAX], | |
1060 | const unsigned (*extra_cache_map) | |
1061 | [PERF_COUNT_HW_CACHE_MAX] | |
1062 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
1063 | [PERF_COUNT_HW_CACHE_RESULT_MAX]) | |
03089688 | 1064 | { |
236b9b91 JL |
1065 | int hw_event_id; |
1066 | struct arm_pmu *armpmu = to_arm_pmu(event->pmu); | |
1067 | ||
1068 | hw_event_id = armpmu_map_event(event, &armv8_pmuv3_perf_map, | |
1069 | &armv8_pmuv3_perf_cache_map, | |
1070 | ARMV8_PMU_EVTYPE_EVENT); | |
236b9b91 | 1071 | |
c1320790 SP |
1072 | if (armv8pmu_event_is_64bit(event)) |
1073 | event->hw.flags |= ARMPMU_EVT_64BIT; | |
1074 | ||
83a7a4d6 RH |
1075 | /* |
1076 | * User events must be allocated into a single counter, and so | |
1077 | * must not be chained. | |
1078 | * | |
1079 | * Most 64-bit events require long counter support, but 64-bit | |
1080 | * CPU_CYCLES events can be placed into the dedicated cycle | |
1081 | * counter when this is free. | |
1082 | */ | |
1083 | if (armv8pmu_event_want_user_access(event)) { | |
1084 | if (!(event->attach_state & PERF_ATTACH_TASK)) | |
1085 | return -EINVAL; | |
1086 | if (armv8pmu_event_is_64bit(event) && | |
1087 | (hw_event_id != ARMV8_PMUV3_PERFCTR_CPU_CYCLES) && | |
1088 | !armv8pmu_has_long_event(armpmu)) | |
1089 | return -EOPNOTSUPP; | |
1090 | ||
1091 | event->hw.flags |= PERF_EVENT_FLAG_USER_READ_CNT; | |
1092 | } | |
1093 | ||
e2b5c5c7 | 1094 | /* Only expose micro/arch events supported by this PMU */ |
6c833bb9 WD |
1095 | if ((hw_event_id > 0) && (hw_event_id < ARMV8_PMUV3_MAX_COMMON_EVENTS) |
1096 | && test_bit(hw_event_id, armpmu->pmceid_bitmap)) { | |
1097 | return hw_event_id; | |
236b9b91 JL |
1098 | } |
1099 | ||
6c833bb9 WD |
1100 | return armpmu_map_event(event, extra_event_map, extra_cache_map, |
1101 | ARMV8_PMU_EVTYPE_EVENT); | |
1102 | } | |
1103 | ||
1104 | static int armv8_pmuv3_map_event(struct perf_event *event) | |
1105 | { | |
1106 | return __armv8_pmuv3_map_event(event, NULL, NULL); | |
03089688 WD |
1107 | } |
1108 | ||
ac82d127 MR |
1109 | static int armv8_a53_map_event(struct perf_event *event) |
1110 | { | |
d0d09d4d | 1111 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a53_perf_cache_map); |
ac82d127 MR |
1112 | } |
1113 | ||
62a4dda9 MR |
1114 | static int armv8_a57_map_event(struct perf_event *event) |
1115 | { | |
d0d09d4d | 1116 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a57_perf_cache_map); |
62a4dda9 MR |
1117 | } |
1118 | ||
5561b6c5 JT |
1119 | static int armv8_a73_map_event(struct perf_event *event) |
1120 | { | |
1121 | return __armv8_pmuv3_map_event(event, NULL, &armv8_a73_perf_cache_map); | |
1122 | } | |
1123 | ||
d0aa2bff JG |
1124 | static int armv8_thunder_map_event(struct perf_event *event) |
1125 | { | |
d0d09d4d | 1126 | return __armv8_pmuv3_map_event(event, NULL, |
6c833bb9 | 1127 | &armv8_thunder_perf_cache_map); |
d0aa2bff JG |
1128 | } |
1129 | ||
201a72b2 AK |
1130 | static int armv8_vulcan_map_event(struct perf_event *event) |
1131 | { | |
d0d09d4d | 1132 | return __armv8_pmuv3_map_event(event, NULL, |
6c833bb9 | 1133 | &armv8_vulcan_perf_cache_map); |
201a72b2 AK |
1134 | } |
1135 | ||
f1b36dcb MR |
1136 | struct armv8pmu_probe_info { |
1137 | struct arm_pmu *pmu; | |
1138 | bool present; | |
1139 | }; | |
1140 | ||
4b1a9e69 | 1141 | static void __armv8pmu_probe_pmu(void *info) |
03089688 | 1142 | { |
f1b36dcb MR |
1143 | struct armv8pmu_probe_info *probe = info; |
1144 | struct arm_pmu *cpu_pmu = probe->pmu; | |
faa9a083 | 1145 | u64 dfr0; |
342e53bd | 1146 | u64 pmceid_raw[2]; |
4b1a9e69 | 1147 | u32 pmceid[2]; |
faa9a083 | 1148 | int pmuver; |
03089688 | 1149 | |
f1b36dcb | 1150 | dfr0 = read_sysreg(id_aa64dfr0_el1); |
0331365e | 1151 | pmuver = cpuid_feature_extract_unsigned_field(dfr0, |
fcf37b38 | 1152 | ID_AA64DFR0_EL1_PMUVer_SHIFT); |
cc91b948 AK |
1153 | if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF || |
1154 | pmuver == ID_AA64DFR0_EL1_PMUVer_NI) | |
f1b36dcb MR |
1155 | return; |
1156 | ||
8673e02e | 1157 | cpu_pmu->pmuver = pmuver; |
f1b36dcb MR |
1158 | probe->present = true; |
1159 | ||
03089688 | 1160 | /* Read the nb of CNTx counters supported from PMNC */ |
4b1a9e69 AK |
1161 | cpu_pmu->num_events = (armv8pmu_pmcr_read() >> ARMV8_PMU_PMCR_N_SHIFT) |
1162 | & ARMV8_PMU_PMCR_N_MASK; | |
03089688 | 1163 | |
6475b2d8 | 1164 | /* Add the CPU cycles counter */ |
4b1a9e69 AK |
1165 | cpu_pmu->num_events += 1; |
1166 | ||
342e53bd WD |
1167 | pmceid[0] = pmceid_raw[0] = read_sysreg(pmceid0_el0); |
1168 | pmceid[1] = pmceid_raw[1] = read_sysreg(pmceid1_el0); | |
4b1a9e69 | 1169 | |
3aa56885 YN |
1170 | bitmap_from_arr32(cpu_pmu->pmceid_bitmap, |
1171 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); | |
342e53bd WD |
1172 | |
1173 | pmceid[0] = pmceid_raw[0] >> 32; | |
1174 | pmceid[1] = pmceid_raw[1] >> 32; | |
1175 | ||
1176 | bitmap_from_arr32(cpu_pmu->pmceid_ext_bitmap, | |
1177 | pmceid, ARMV8_PMUV3_MAX_COMMON_EVENTS); | |
f5be3a61 SZ |
1178 | |
1179 | /* store PMMIR_EL1 register for sysfs */ | |
121a8fc0 | 1180 | if (pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P4 && (pmceid_raw[1] & BIT(31))) |
f5be3a61 SZ |
1181 | cpu_pmu->reg_pmmir = read_cpuid(PMMIR_EL1); |
1182 | else | |
1183 | cpu_pmu->reg_pmmir = 0; | |
03089688 WD |
1184 | } |
1185 | ||
4b1a9e69 | 1186 | static int armv8pmu_probe_pmu(struct arm_pmu *cpu_pmu) |
03089688 | 1187 | { |
f1b36dcb MR |
1188 | struct armv8pmu_probe_info probe = { |
1189 | .pmu = cpu_pmu, | |
1190 | .present = false, | |
1191 | }; | |
1192 | int ret; | |
1193 | ||
1194 | ret = smp_call_function_any(&cpu_pmu->supported_cpus, | |
4b1a9e69 | 1195 | __armv8pmu_probe_pmu, |
f1b36dcb MR |
1196 | &probe, 1); |
1197 | if (ret) | |
1198 | return ret; | |
1199 | ||
1200 | return probe.present ? 0 : -ENODEV; | |
03089688 WD |
1201 | } |
1202 | ||
83a7a4d6 RH |
1203 | static void armv8pmu_disable_user_access_ipi(void *unused) |
1204 | { | |
1205 | armv8pmu_disable_user_access(); | |
1206 | } | |
1207 | ||
1208 | static int armv8pmu_proc_user_access_handler(struct ctl_table *table, int write, | |
1209 | void *buffer, size_t *lenp, loff_t *ppos) | |
1210 | { | |
1211 | int ret = proc_dointvec_minmax(table, write, buffer, lenp, ppos); | |
1212 | if (ret || !write || sysctl_perf_user_access) | |
1213 | return ret; | |
1214 | ||
1215 | on_each_cpu(armv8pmu_disable_user_access_ipi, NULL, 1); | |
1216 | return 0; | |
1217 | } | |
1218 | ||
e2012600 RH |
1219 | static struct ctl_table armv8_pmu_sysctl_table[] = { |
1220 | { | |
1221 | .procname = "perf_user_access", | |
1222 | .data = &sysctl_perf_user_access, | |
1223 | .maxlen = sizeof(unsigned int), | |
1224 | .mode = 0644, | |
83a7a4d6 | 1225 | .proc_handler = armv8pmu_proc_user_access_handler, |
e2012600 RH |
1226 | .extra1 = SYSCTL_ZERO, |
1227 | .extra2 = SYSCTL_ONE, | |
1228 | }, | |
1229 | { } | |
1230 | }; | |
1231 | ||
3da4390b WD |
1232 | static void armv8_pmu_register_sysctl_table(void) |
1233 | { | |
1234 | static u32 tbl_registered = 0; | |
1235 | ||
1236 | if (!cmpxchg_relaxed(&tbl_registered, 0, 1)) | |
1237 | register_sysctl("kernel", armv8_pmu_sysctl_table); | |
1238 | } | |
1239 | ||
e424b179 RM |
1240 | static int armv8_pmu_init(struct arm_pmu *cpu_pmu, char *name, |
1241 | int (*map_event)(struct perf_event *event), | |
1242 | const struct attribute_group *events, | |
f5be3a61 SZ |
1243 | const struct attribute_group *format, |
1244 | const struct attribute_group *caps) | |
03089688 | 1245 | { |
f1b36dcb MR |
1246 | int ret = armv8pmu_probe_pmu(cpu_pmu); |
1247 | if (ret) | |
1248 | return ret; | |
1249 | ||
d3adeed7 WD |
1250 | cpu_pmu->handle_irq = armv8pmu_handle_irq; |
1251 | cpu_pmu->enable = armv8pmu_enable_event; | |
1252 | cpu_pmu->disable = armv8pmu_disable_event; | |
1253 | cpu_pmu->read_counter = armv8pmu_read_counter; | |
1254 | cpu_pmu->write_counter = armv8pmu_write_counter; | |
1255 | cpu_pmu->get_event_idx = armv8pmu_get_event_idx; | |
1256 | cpu_pmu->clear_event_idx = armv8pmu_clear_event_idx; | |
1257 | cpu_pmu->start = armv8pmu_start; | |
1258 | cpu_pmu->stop = armv8pmu_stop; | |
1259 | cpu_pmu->reset = armv8pmu_reset; | |
ac82d127 | 1260 | cpu_pmu->set_event_filter = armv8pmu_set_event_filter; |
bd275681 | 1261 | cpu_pmu->filter = armv8pmu_filter; |
f1b36dcb | 1262 | |
83a7a4d6 RH |
1263 | cpu_pmu->pmu.event_idx = armv8pmu_user_event_idx; |
1264 | ||
e424b179 RM |
1265 | cpu_pmu->name = name; |
1266 | cpu_pmu->map_event = map_event; | |
1267 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_EVENTS] = events ? | |
1268 | events : &armv8_pmuv3_events_attr_group; | |
1269 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_FORMATS] = format ? | |
1270 | format : &armv8_pmuv3_format_attr_group; | |
f5be3a61 SZ |
1271 | cpu_pmu->attr_groups[ARMPMU_ATTR_GROUP_CAPS] = caps ? |
1272 | caps : &armv8_pmuv3_caps_attr_group; | |
e424b179 | 1273 | |
3da4390b | 1274 | armv8_pmu_register_sysctl_table(); |
f1b36dcb | 1275 | return 0; |
ac82d127 MR |
1276 | } |
1277 | ||
f5be3a61 SZ |
1278 | static int armv8_pmu_init_nogroups(struct arm_pmu *cpu_pmu, char *name, |
1279 | int (*map_event)(struct perf_event *event)) | |
1280 | { | |
1281 | return armv8_pmu_init(cpu_pmu, name, map_event, NULL, NULL, NULL); | |
1282 | } | |
1283 | ||
6ac9f30b RM |
1284 | #define PMUV3_INIT_SIMPLE(name) \ |
1285 | static int name##_pmu_init(struct arm_pmu *cpu_pmu) \ | |
1286 | { \ | |
1287 | return armv8_pmu_init_nogroups(cpu_pmu, #name, armv8_pmuv3_map_event);\ | |
ac82d127 MR |
1288 | } |
1289 | ||
6ac9f30b RM |
1290 | PMUV3_INIT_SIMPLE(armv8_pmuv3) |
1291 | ||
1292 | PMUV3_INIT_SIMPLE(armv8_cortex_a34) | |
1293 | PMUV3_INIT_SIMPLE(armv8_cortex_a55) | |
1294 | PMUV3_INIT_SIMPLE(armv8_cortex_a65) | |
1295 | PMUV3_INIT_SIMPLE(armv8_cortex_a75) | |
1296 | PMUV3_INIT_SIMPLE(armv8_cortex_a76) | |
1297 | PMUV3_INIT_SIMPLE(armv8_cortex_a77) | |
1298 | PMUV3_INIT_SIMPLE(armv8_cortex_a78) | |
893c34b6 RM |
1299 | PMUV3_INIT_SIMPLE(armv9_cortex_a510) |
1300 | PMUV3_INIT_SIMPLE(armv9_cortex_a710) | |
1301 | PMUV3_INIT_SIMPLE(armv8_cortex_x1) | |
1302 | PMUV3_INIT_SIMPLE(armv9_cortex_x2) | |
6ac9f30b RM |
1303 | PMUV3_INIT_SIMPLE(armv8_neoverse_e1) |
1304 | PMUV3_INIT_SIMPLE(armv8_neoverse_n1) | |
893c34b6 RM |
1305 | PMUV3_INIT_SIMPLE(armv9_neoverse_n2) |
1306 | PMUV3_INIT_SIMPLE(armv8_neoverse_v1) | |
6ac9f30b RM |
1307 | |
1308 | PMUV3_INIT_SIMPLE(armv8_nvidia_carmel) | |
1309 | PMUV3_INIT_SIMPLE(armv8_nvidia_denver) | |
29cc4cee | 1310 | |
e884f80c JT |
1311 | static int armv8_a35_pmu_init(struct arm_pmu *cpu_pmu) |
1312 | { | |
f5be3a61 SZ |
1313 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a35", |
1314 | armv8_a53_map_event); | |
e884f80c JT |
1315 | } |
1316 | ||
ac82d127 MR |
1317 | static int armv8_a53_pmu_init(struct arm_pmu *cpu_pmu) |
1318 | { | |
f5be3a61 SZ |
1319 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a53", |
1320 | armv8_a53_map_event); | |
03089688 | 1321 | } |
03089688 | 1322 | |
62a4dda9 MR |
1323 | static int armv8_a57_pmu_init(struct arm_pmu *cpu_pmu) |
1324 | { | |
f5be3a61 SZ |
1325 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a57", |
1326 | armv8_a57_map_event); | |
62a4dda9 MR |
1327 | } |
1328 | ||
5d7ee877 WD |
1329 | static int armv8_a72_pmu_init(struct arm_pmu *cpu_pmu) |
1330 | { | |
f5be3a61 SZ |
1331 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a72", |
1332 | armv8_a57_map_event); | |
5d7ee877 WD |
1333 | } |
1334 | ||
5561b6c5 JT |
1335 | static int armv8_a73_pmu_init(struct arm_pmu *cpu_pmu) |
1336 | { | |
f5be3a61 SZ |
1337 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cortex_a73", |
1338 | armv8_a73_map_event); | |
5561b6c5 JT |
1339 | } |
1340 | ||
d0aa2bff JG |
1341 | static int armv8_thunder_pmu_init(struct arm_pmu *cpu_pmu) |
1342 | { | |
f5be3a61 SZ |
1343 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_cavium_thunder", |
1344 | armv8_thunder_map_event); | |
d0aa2bff JG |
1345 | } |
1346 | ||
201a72b2 AK |
1347 | static int armv8_vulcan_pmu_init(struct arm_pmu *cpu_pmu) |
1348 | { | |
f5be3a61 SZ |
1349 | return armv8_pmu_init_nogroups(cpu_pmu, "armv8_brcm_vulcan", |
1350 | armv8_vulcan_map_event); | |
201a72b2 AK |
1351 | } |
1352 | ||
6475b2d8 | 1353 | static const struct of_device_id armv8_pmu_of_device_ids[] = { |
6ac9f30b RM |
1354 | {.compatible = "arm,armv8-pmuv3", .data = armv8_pmuv3_pmu_init}, |
1355 | {.compatible = "arm,cortex-a34-pmu", .data = armv8_cortex_a34_pmu_init}, | |
e884f80c | 1356 | {.compatible = "arm,cortex-a35-pmu", .data = armv8_a35_pmu_init}, |
ac82d127 | 1357 | {.compatible = "arm,cortex-a53-pmu", .data = armv8_a53_pmu_init}, |
6ac9f30b | 1358 | {.compatible = "arm,cortex-a55-pmu", .data = armv8_cortex_a55_pmu_init}, |
62a4dda9 | 1359 | {.compatible = "arm,cortex-a57-pmu", .data = armv8_a57_pmu_init}, |
6ac9f30b | 1360 | {.compatible = "arm,cortex-a65-pmu", .data = armv8_cortex_a65_pmu_init}, |
5d7ee877 | 1361 | {.compatible = "arm,cortex-a72-pmu", .data = armv8_a72_pmu_init}, |
5561b6c5 | 1362 | {.compatible = "arm,cortex-a73-pmu", .data = armv8_a73_pmu_init}, |
6ac9f30b RM |
1363 | {.compatible = "arm,cortex-a75-pmu", .data = armv8_cortex_a75_pmu_init}, |
1364 | {.compatible = "arm,cortex-a76-pmu", .data = armv8_cortex_a76_pmu_init}, | |
1365 | {.compatible = "arm,cortex-a77-pmu", .data = armv8_cortex_a77_pmu_init}, | |
1366 | {.compatible = "arm,cortex-a78-pmu", .data = armv8_cortex_a78_pmu_init}, | |
893c34b6 RM |
1367 | {.compatible = "arm,cortex-a510-pmu", .data = armv9_cortex_a510_pmu_init}, |
1368 | {.compatible = "arm,cortex-a710-pmu", .data = armv9_cortex_a710_pmu_init}, | |
1369 | {.compatible = "arm,cortex-x1-pmu", .data = armv8_cortex_x1_pmu_init}, | |
1370 | {.compatible = "arm,cortex-x2-pmu", .data = armv9_cortex_x2_pmu_init}, | |
6ac9f30b RM |
1371 | {.compatible = "arm,neoverse-e1-pmu", .data = armv8_neoverse_e1_pmu_init}, |
1372 | {.compatible = "arm,neoverse-n1-pmu", .data = armv8_neoverse_n1_pmu_init}, | |
893c34b6 RM |
1373 | {.compatible = "arm,neoverse-n2-pmu", .data = armv9_neoverse_n2_pmu_init}, |
1374 | {.compatible = "arm,neoverse-v1-pmu", .data = armv8_neoverse_v1_pmu_init}, | |
d0aa2bff | 1375 | {.compatible = "cavium,thunder-pmu", .data = armv8_thunder_pmu_init}, |
201a72b2 | 1376 | {.compatible = "brcm,vulcan-pmu", .data = armv8_vulcan_pmu_init}, |
6ac9f30b RM |
1377 | {.compatible = "nvidia,carmel-pmu", .data = armv8_nvidia_carmel_pmu_init}, |
1378 | {.compatible = "nvidia,denver-pmu", .data = armv8_nvidia_denver_pmu_init}, | |
03089688 WD |
1379 | {}, |
1380 | }; | |
1381 | ||
6475b2d8 | 1382 | static int armv8_pmu_device_probe(struct platform_device *pdev) |
03089688 | 1383 | { |
f00fa5f4 | 1384 | return arm_pmu_device_probe(pdev, armv8_pmu_of_device_ids, NULL); |
03089688 WD |
1385 | } |
1386 | ||
6475b2d8 | 1387 | static struct platform_driver armv8_pmu_driver = { |
03089688 | 1388 | .driver = { |
85023b2e | 1389 | .name = ARMV8_PMU_PDEV_NAME, |
6475b2d8 | 1390 | .of_match_table = armv8_pmu_of_device_ids, |
81e9fa8b | 1391 | .suppress_bind_attrs = true, |
03089688 | 1392 | }, |
6475b2d8 | 1393 | .probe = armv8_pmu_device_probe, |
03089688 WD |
1394 | }; |
1395 | ||
f00fa5f4 MR |
1396 | static int __init armv8_pmu_driver_init(void) |
1397 | { | |
1398 | if (acpi_disabled) | |
b90d72a6 | 1399 | return platform_driver_register(&armv8_pmu_driver); |
f00fa5f4 | 1400 | else |
6ac9f30b | 1401 | return arm_pmu_acpi_probe(armv8_pmuv3_pmu_init); |
f00fa5f4 MR |
1402 | } |
1403 | device_initcall(armv8_pmu_driver_init) | |
9d2dcc8f MF |
1404 | |
1405 | void arch_perf_update_userpage(struct perf_event *event, | |
1406 | struct perf_event_mmap_page *userpg, u64 now) | |
1407 | { | |
950b74dd PZ |
1408 | struct clock_read_data *rd; |
1409 | unsigned int seq; | |
1410 | u64 ns; | |
9d2dcc8f | 1411 | |
279a811e PZ |
1412 | userpg->cap_user_time = 0; |
1413 | userpg->cap_user_time_zero = 0; | |
c8f9eb0d | 1414 | userpg->cap_user_time_short = 0; |
83a7a4d6 RH |
1415 | userpg->cap_user_rdpmc = armv8pmu_event_has_user_read(event); |
1416 | ||
1417 | if (userpg->cap_user_rdpmc) { | |
1418 | if (event->hw.flags & ARMPMU_EVT_64BIT) | |
1419 | userpg->pmc_width = 64; | |
1420 | else | |
1421 | userpg->pmc_width = 32; | |
1422 | } | |
950b74dd PZ |
1423 | |
1424 | do { | |
1425 | rd = sched_clock_read_begin(&seq); | |
1426 | ||
279a811e PZ |
1427 | if (rd->read_sched_clock != arch_timer_read_counter) |
1428 | return; | |
1429 | ||
950b74dd PZ |
1430 | userpg->time_mult = rd->mult; |
1431 | userpg->time_shift = rd->shift; | |
1432 | userpg->time_zero = rd->epoch_ns; | |
c8f9eb0d PZ |
1433 | userpg->time_cycles = rd->epoch_cyc; |
1434 | userpg->time_mask = rd->sched_clock_mask; | |
950b74dd PZ |
1435 | |
1436 | /* | |
c8f9eb0d PZ |
1437 | * Subtract the cycle base, such that software that |
1438 | * doesn't know about cap_user_time_short still 'works' | |
1439 | * assuming no wraps. | |
950b74dd PZ |
1440 | */ |
1441 | ns = mul_u64_u32_shr(rd->epoch_cyc, rd->mult, rd->shift); | |
1442 | userpg->time_zero -= ns; | |
1443 | ||
1444 | } while (sched_clock_read_retry(seq)); | |
1445 | ||
1446 | userpg->time_offset = userpg->time_zero - now; | |
9d2dcc8f | 1447 | |
9d2dcc8f MF |
1448 | /* |
1449 | * time_shift is not expected to be greater than 31 due to | |
1450 | * the original published conversion algorithm shifting a | |
1451 | * 32-bit value (now specifies a 64-bit value) - refer | |
1452 | * perf_event_mmap_page documentation in perf_event.h. | |
1453 | */ | |
950b74dd PZ |
1454 | if (userpg->time_shift == 32) { |
1455 | userpg->time_shift = 31; | |
9d2dcc8f MF |
1456 | userpg->time_mult >>= 1; |
1457 | } | |
950b74dd | 1458 | |
279a811e PZ |
1459 | /* |
1460 | * Internal timekeeping for enabled/running/stopped times | |
1461 | * is always computed with the sched_clock. | |
1462 | */ | |
1463 | userpg->cap_user_time = 1; | |
1464 | userpg->cap_user_time_zero = 1; | |
c8f9eb0d | 1465 | userpg->cap_user_time_short = 1; |
9d2dcc8f | 1466 | } |