arm: perf: factor out armv6 pmu driver
[linux-block.git] / arch / arm / kernel / perf_event_v7.c
CommitLineData
43eab878
WD
1/*
2 * ARMv7 Cortex-A8 and Cortex-A9 Performance Events handling code.
3 *
4 * ARMv7 support: Jean Pihet <jpihet@mvista.com>
5 * 2010 (c) MontaVista Software, LLC.
6 *
7 * Copied from ARMv6 code, with the low level code inspired
8 * by the ARMv7 Oprofile code.
9 *
10 * Cortex-A8 has up to 4 configurable performance counters and
11 * a single cycle counter.
12 * Cortex-A9 has up to 31 configurable performance counters and
13 * a single cycle counter.
14 *
15 * All counters can be enabled/disabled and IRQ masked separately. The cycle
16 * counter and all 4 performance counters together can be reset separately.
17 */
18
19#ifdef CONFIG_CPU_V7
a505addc 20
b7aafe99
SB
21#include <asm/cp15.h>
22#include <asm/vfp.h>
23#include "../vfp/vfpinstr.h"
24
6d4eaf99
WD
25/*
26 * Common ARMv7 event types
27 *
28 * Note: An implementation may not be able to count all of these events
29 * but the encodings are considered to be `reserved' in the case that
30 * they are not available.
31 */
43eab878 32enum armv7_perf_types {
4d301512
WD
33 ARMV7_PERFCTR_PMNC_SW_INCR = 0x00,
34 ARMV7_PERFCTR_L1_ICACHE_REFILL = 0x01,
35 ARMV7_PERFCTR_ITLB_REFILL = 0x02,
36 ARMV7_PERFCTR_L1_DCACHE_REFILL = 0x03,
37 ARMV7_PERFCTR_L1_DCACHE_ACCESS = 0x04,
38 ARMV7_PERFCTR_DTLB_REFILL = 0x05,
39 ARMV7_PERFCTR_MEM_READ = 0x06,
40 ARMV7_PERFCTR_MEM_WRITE = 0x07,
41 ARMV7_PERFCTR_INSTR_EXECUTED = 0x08,
42 ARMV7_PERFCTR_EXC_TAKEN = 0x09,
43 ARMV7_PERFCTR_EXC_EXECUTED = 0x0A,
44 ARMV7_PERFCTR_CID_WRITE = 0x0B,
45
46 /*
47 * ARMV7_PERFCTR_PC_WRITE is equivalent to HW_BRANCH_INSTRUCTIONS.
43eab878 48 * It counts:
4d301512 49 * - all (taken) branch instructions,
43eab878
WD
50 * - instructions that explicitly write the PC,
51 * - exception generating instructions.
52 */
4d301512
WD
53 ARMV7_PERFCTR_PC_WRITE = 0x0C,
54 ARMV7_PERFCTR_PC_IMM_BRANCH = 0x0D,
55 ARMV7_PERFCTR_PC_PROC_RETURN = 0x0E,
56 ARMV7_PERFCTR_MEM_UNALIGNED_ACCESS = 0x0F,
57 ARMV7_PERFCTR_PC_BRANCH_MIS_PRED = 0x10,
58 ARMV7_PERFCTR_CLOCK_CYCLES = 0x11,
59 ARMV7_PERFCTR_PC_BRANCH_PRED = 0x12,
6d4eaf99
WD
60
61 /* These events are defined by the PMUv2 supplement (ARM DDI 0457A). */
4d301512
WD
62 ARMV7_PERFCTR_MEM_ACCESS = 0x13,
63 ARMV7_PERFCTR_L1_ICACHE_ACCESS = 0x14,
64 ARMV7_PERFCTR_L1_DCACHE_WB = 0x15,
65 ARMV7_PERFCTR_L2_CACHE_ACCESS = 0x16,
66 ARMV7_PERFCTR_L2_CACHE_REFILL = 0x17,
67 ARMV7_PERFCTR_L2_CACHE_WB = 0x18,
68 ARMV7_PERFCTR_BUS_ACCESS = 0x19,
69 ARMV7_PERFCTR_MEM_ERROR = 0x1A,
70 ARMV7_PERFCTR_INSTR_SPEC = 0x1B,
71 ARMV7_PERFCTR_TTBR_WRITE = 0x1C,
72 ARMV7_PERFCTR_BUS_CYCLES = 0x1D,
73
74 ARMV7_PERFCTR_CPU_CYCLES = 0xFF
43eab878
WD
75};
76
77/* ARMv7 Cortex-A8 specific event types */
78enum armv7_a8_perf_types {
4d301512
WD
79 ARMV7_A8_PERFCTR_L2_CACHE_ACCESS = 0x43,
80 ARMV7_A8_PERFCTR_L2_CACHE_REFILL = 0x44,
81 ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS = 0x50,
0445e7a5 82 ARMV7_A8_PERFCTR_STALL_ISIDE = 0x56,
43eab878
WD
83};
84
85/* ARMv7 Cortex-A9 specific event types */
86enum armv7_a9_perf_types {
4d301512 87 ARMV7_A9_PERFCTR_INSTR_CORE_RENAME = 0x68,
0445e7a5
WD
88 ARMV7_A9_PERFCTR_STALL_ICACHE = 0x60,
89 ARMV7_A9_PERFCTR_STALL_DISPATCH = 0x66,
43eab878
WD
90};
91
0c205cbe
WD
92/* ARMv7 Cortex-A5 specific event types */
93enum armv7_a5_perf_types {
4d301512
WD
94 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL = 0xc2,
95 ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP = 0xc3,
0c205cbe
WD
96};
97
14abd038
WD
98/* ARMv7 Cortex-A15 specific event types */
99enum armv7_a15_perf_types {
4d301512
WD
100 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
101 ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
102 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ = 0x42,
103 ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE = 0x43,
14abd038 104
4d301512
WD
105 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ = 0x4C,
106 ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE = 0x4D,
14abd038 107
4d301512
WD
108 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
109 ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
110 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ = 0x52,
111 ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE = 0x53,
14abd038 112
4d301512 113 ARMV7_A15_PERFCTR_PC_WRITE_SPEC = 0x76,
14abd038
WD
114};
115
8e781f65
AT
116/* ARMv7 Cortex-A12 specific event types */
117enum armv7_a12_perf_types {
118 ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ = 0x40,
119 ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE = 0x41,
120
121 ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ = 0x50,
122 ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE = 0x51,
123
124 ARMV7_A12_PERFCTR_PC_WRITE_SPEC = 0x76,
125
126 ARMV7_A12_PERFCTR_PF_TLB_REFILL = 0xe7,
127};
128
b7aafe99
SB
129/* ARMv7 Krait specific event types */
130enum krait_perf_types {
131 KRAIT_PMRESR0_GROUP0 = 0xcc,
132 KRAIT_PMRESR1_GROUP0 = 0xd0,
133 KRAIT_PMRESR2_GROUP0 = 0xd4,
134 KRAIT_VPMRESR0_GROUP0 = 0xd8,
135
136 KRAIT_PERFCTR_L1_ICACHE_ACCESS = 0x10011,
137 KRAIT_PERFCTR_L1_ICACHE_MISS = 0x10010,
138
139 KRAIT_PERFCTR_L1_ITLB_ACCESS = 0x12222,
140 KRAIT_PERFCTR_L1_DTLB_ACCESS = 0x12210,
141};
142
341e42c4
SB
143/* ARMv7 Scorpion specific event types */
144enum scorpion_perf_types {
145 SCORPION_LPM0_GROUP0 = 0x4c,
146 SCORPION_LPM1_GROUP0 = 0x50,
147 SCORPION_LPM2_GROUP0 = 0x54,
148 SCORPION_L2LPM_GROUP0 = 0x58,
149 SCORPION_VLPM_GROUP0 = 0x5c,
150
151 SCORPION_ICACHE_ACCESS = 0x10053,
152 SCORPION_ICACHE_MISS = 0x10052,
153
154 SCORPION_DTLB_ACCESS = 0x12013,
155 SCORPION_DTLB_MISS = 0x12012,
156
157 SCORPION_ITLB_MISS = 0x12021,
158};
159
43eab878
WD
160/*
161 * Cortex-A8 HW events mapping
162 *
163 * The hardware events that we support. We do support cache operations but
164 * we have harvard caches and no way to combine instruction and data
165 * accesses/misses in hardware.
166 */
167static const unsigned armv7_a8_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 168 PERF_MAP_ALL_UNSUPPORTED,
0445e7a5
WD
169 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
170 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
171 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
172 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
173 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
174 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0445e7a5 175 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A8_PERFCTR_STALL_ISIDE,
43eab878
WD
176};
177
178static const unsigned armv7_a8_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
179 [PERF_COUNT_HW_CACHE_OP_MAX]
180 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
181 PERF_CACHE_MAP_ALL_UNSUPPORTED,
182
183 /*
184 * The performance counters don't differentiate between read and write
185 * accesses/misses so this isn't strictly correct, but it's the best we
186 * can do. Writes and reads get combined.
187 */
188 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
189 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
190 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
191 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
192
193 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L1_ICACHE_ACCESS,
194 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
195
196 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
197 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
198 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A8_PERFCTR_L2_CACHE_ACCESS,
199 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A8_PERFCTR_L2_CACHE_REFILL,
200
201 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
202 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
203
204 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
205 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
206
207 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
208 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
209 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
210 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
43eab878
WD
211};
212
213/*
214 * Cortex-A9 HW events mapping
215 */
216static const unsigned armv7_a9_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 217 PERF_MAP_ALL_UNSUPPORTED,
0445e7a5
WD
218 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
219 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_A9_PERFCTR_INSTR_CORE_RENAME,
220 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
221 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
222 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
223 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0445e7a5
WD
224 [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV7_A9_PERFCTR_STALL_ICACHE,
225 [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV7_A9_PERFCTR_STALL_DISPATCH,
43eab878
WD
226};
227
228static const unsigned armv7_a9_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
229 [PERF_COUNT_HW_CACHE_OP_MAX]
230 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
231 PERF_CACHE_MAP_ALL_UNSUPPORTED,
232
233 /*
234 * The performance counters don't differentiate between read and write
235 * accesses/misses so this isn't strictly correct, but it's the best we
236 * can do. Writes and reads get combined.
237 */
238 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
239 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
240 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
241 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
242
243 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
244
245 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
246 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
247
248 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
249 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
250
251 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
252 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
253 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
254 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
43eab878
WD
255};
256
0c205cbe
WD
257/*
258 * Cortex-A5 HW events mapping
259 */
260static const unsigned armv7_a5_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 261 PERF_MAP_ALL_UNSUPPORTED,
0445e7a5
WD
262 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
263 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
264 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
265 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
266 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
267 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0c205cbe
WD
268};
269
270static const unsigned armv7_a5_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
271 [PERF_COUNT_HW_CACHE_OP_MAX]
272 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
273 PERF_CACHE_MAP_ALL_UNSUPPORTED,
274
275 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
276 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
277 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
278 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
279 [C(L1D)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
280 [C(L1D)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
281
282 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
283 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
284 /*
285 * The prefetch counters don't differentiate between the I side and the
286 * D side.
287 */
288 [C(L1I)][C(OP_PREFETCH)][C(RESULT_ACCESS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL,
289 [C(L1I)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A5_PERFCTR_PREFETCH_LINEFILL_DROP,
290
291 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
292 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
293
294 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
295 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
296
297 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
298 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
299 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
300 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
0c205cbe
WD
301};
302
14abd038
WD
303/*
304 * Cortex-A15 HW events mapping
305 */
306static const unsigned armv7_a15_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 307 PERF_MAP_ALL_UNSUPPORTED,
0445e7a5
WD
308 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
309 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
310 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
311 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
312 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A15_PERFCTR_PC_WRITE_SPEC,
313 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
314 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
14abd038
WD
315};
316
317static const unsigned armv7_a15_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
318 [PERF_COUNT_HW_CACHE_OP_MAX]
319 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
320 PERF_CACHE_MAP_ALL_UNSUPPORTED,
321
322 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_READ,
323 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_READ,
324 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L1_DCACHE_ACCESS_WRITE,
325 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L1_DCACHE_REFILL_WRITE,
326
327 /*
328 * Not all performance counters differentiate between read and write
329 * accesses/misses so we're not always strictly correct, but it's the
330 * best we can do. Writes and reads get combined in these cases.
331 */
332 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
333 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
334
335 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_READ,
336 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_READ,
337 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A15_PERFCTR_L2_CACHE_ACCESS_WRITE,
338 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_L2_CACHE_REFILL_WRITE,
339
340 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_READ,
341 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_A15_PERFCTR_DTLB_REFILL_L1_WRITE,
342
343 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
344 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
345
346 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
347 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
348 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
349 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
14abd038
WD
350};
351
d33c88c6
WD
352/*
353 * Cortex-A7 HW events mapping
354 */
355static const unsigned armv7_a7_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 356 PERF_MAP_ALL_UNSUPPORTED,
d33c88c6
WD
357 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
358 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
359 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
360 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
361 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
362 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
363 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
d33c88c6
WD
364};
365
366static const unsigned armv7_a7_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
367 [PERF_COUNT_HW_CACHE_OP_MAX]
368 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
369 PERF_CACHE_MAP_ALL_UNSUPPORTED,
370
371 /*
372 * The performance counters don't differentiate between read and write
373 * accesses/misses so this isn't strictly correct, but it's the best we
374 * can do. Writes and reads get combined.
375 */
376 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
377 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
378 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
379 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
380
381 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
382 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
383
384 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
385 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
386 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L2_CACHE_ACCESS,
387 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
388
389 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
390 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
391
392 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
393 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
394
395 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
396 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
397 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
398 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
d33c88c6
WD
399};
400
8e781f65
AT
401/*
402 * Cortex-A12 HW events mapping
403 */
404static const unsigned armv7_a12_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 405 PERF_MAP_ALL_UNSUPPORTED,
8e781f65
AT
406 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
407 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
408 [PERF_COUNT_HW_CACHE_REFERENCES] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
409 [PERF_COUNT_HW_CACHE_MISSES] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
410 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_A12_PERFCTR_PC_WRITE_SPEC,
411 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
412 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_BUS_CYCLES,
8e781f65
AT
413};
414
415static const unsigned armv7_a12_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
416 [PERF_COUNT_HW_CACHE_OP_MAX]
417 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
418 PERF_CACHE_MAP_ALL_UNSUPPORTED,
419
420 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_READ,
421 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
422 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L1_DCACHE_ACCESS_WRITE,
423 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
424
425 /*
426 * Not all performance counters differentiate between read and write
427 * accesses/misses so we're not always strictly correct, but it's the
428 * best we can do. Writes and reads get combined in these cases.
429 */
430 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_ICACHE_ACCESS,
431 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_ICACHE_REFILL,
432
433 [C(LL)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_READ,
434 [C(LL)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
435 [C(LL)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_A12_PERFCTR_L2_CACHE_ACCESS_WRITE,
436 [C(LL)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L2_CACHE_REFILL,
437
438 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
439 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_DTLB_REFILL,
440 [C(DTLB)][C(OP_PREFETCH)][C(RESULT_MISS)] = ARMV7_A12_PERFCTR_PF_TLB_REFILL,
441
442 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
443 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_ITLB_REFILL,
444
445 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
446 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
447 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
448 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
8e781f65
AT
449};
450
2a3391cd
SB
451/*
452 * Krait HW events mapping
453 */
454static const unsigned krait_perf_map[PERF_COUNT_HW_MAX] = {
6b7658ec 455 PERF_MAP_ALL_UNSUPPORTED,
2a3391cd
SB
456 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
457 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
2a3391cd
SB
458 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
459 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
460 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
461};
462
463static const unsigned krait_perf_map_no_branch[PERF_COUNT_HW_MAX] = {
6b7658ec 464 PERF_MAP_ALL_UNSUPPORTED,
2a3391cd
SB
465 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
466 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
2a3391cd
SB
467 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
468 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
469};
470
471static const unsigned krait_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
472 [PERF_COUNT_HW_CACHE_OP_MAX]
473 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
6b7658ec
MR
474 PERF_CACHE_MAP_ALL_UNSUPPORTED,
475
476 /*
477 * The performance counters don't differentiate between read and write
478 * accesses/misses so this isn't strictly correct, but it's the best we
479 * can do. Writes and reads get combined.
480 */
481 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
482 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
483 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
484 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
485
486 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ICACHE_ACCESS,
487 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = KRAIT_PERFCTR_L1_ICACHE_MISS,
488
489 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
490 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_DTLB_ACCESS,
491
492 [C(ITLB)][C(OP_READ)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
493 [C(ITLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = KRAIT_PERFCTR_L1_ITLB_ACCESS,
494
495 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
496 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
497 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
498 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
2a3391cd
SB
499};
500
341e42c4
SB
501/*
502 * Scorpion HW events mapping
503 */
504static const unsigned scorpion_perf_map[PERF_COUNT_HW_MAX] = {
505 PERF_MAP_ALL_UNSUPPORTED,
506 [PERF_COUNT_HW_CPU_CYCLES] = ARMV7_PERFCTR_CPU_CYCLES,
507 [PERF_COUNT_HW_INSTRUCTIONS] = ARMV7_PERFCTR_INSTR_EXECUTED,
508 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV7_PERFCTR_PC_WRITE,
509 [PERF_COUNT_HW_BRANCH_MISSES] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
510 [PERF_COUNT_HW_BUS_CYCLES] = ARMV7_PERFCTR_CLOCK_CYCLES,
511};
512
513static const unsigned scorpion_perf_cache_map[PERF_COUNT_HW_CACHE_MAX]
514 [PERF_COUNT_HW_CACHE_OP_MAX]
515 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
516 PERF_CACHE_MAP_ALL_UNSUPPORTED,
517 /*
518 * The performance counters don't differentiate between read and write
519 * accesses/misses so this isn't strictly correct, but it's the best we
520 * can do. Writes and reads get combined.
521 */
522 [C(L1D)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
523 [C(L1D)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
524 [C(L1D)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_L1_DCACHE_ACCESS,
525 [C(L1D)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_L1_DCACHE_REFILL,
526 [C(L1I)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_ICACHE_ACCESS,
527 [C(L1I)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ICACHE_MISS,
528 /*
529 * Only ITLB misses and DTLB refills are supported. If users want the
530 * DTLB refills misses a raw counter must be used.
531 */
532 [C(DTLB)][C(OP_READ)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
533 [C(DTLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
534 [C(DTLB)][C(OP_WRITE)][C(RESULT_ACCESS)] = SCORPION_DTLB_ACCESS,
535 [C(DTLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_DTLB_MISS,
536 [C(ITLB)][C(OP_READ)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
537 [C(ITLB)][C(OP_WRITE)][C(RESULT_MISS)] = SCORPION_ITLB_MISS,
538 [C(BPU)][C(OP_READ)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
539 [C(BPU)][C(OP_READ)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
540 [C(BPU)][C(OP_WRITE)][C(RESULT_ACCESS)] = ARMV7_PERFCTR_PC_BRANCH_PRED,
541 [C(BPU)][C(OP_WRITE)][C(RESULT_MISS)] = ARMV7_PERFCTR_PC_BRANCH_MIS_PRED,
542};
543
43eab878 544/*
c691bb62 545 * Perf Events' indices
43eab878 546 */
c691bb62
WD
547#define ARMV7_IDX_CYCLE_COUNTER 0
548#define ARMV7_IDX_COUNTER0 1
7279adbd
SK
549#define ARMV7_IDX_COUNTER_LAST(cpu_pmu) \
550 (ARMV7_IDX_CYCLE_COUNTER + cpu_pmu->num_events - 1)
c691bb62
WD
551
552#define ARMV7_MAX_COUNTERS 32
553#define ARMV7_COUNTER_MASK (ARMV7_MAX_COUNTERS - 1)
43eab878
WD
554
555/*
c691bb62 556 * ARMv7 low level PMNC access
43eab878 557 */
43eab878
WD
558
559/*
c691bb62 560 * Perf Event to low level counters mapping
43eab878 561 */
c691bb62
WD
562#define ARMV7_IDX_TO_COUNTER(x) \
563 (((x) - ARMV7_IDX_COUNTER0) & ARMV7_COUNTER_MASK)
43eab878
WD
564
565/*
566 * Per-CPU PMNC: config reg
567 */
568#define ARMV7_PMNC_E (1 << 0) /* Enable all counters */
569#define ARMV7_PMNC_P (1 << 1) /* Reset all counters */
570#define ARMV7_PMNC_C (1 << 2) /* Cycle counter reset */
571#define ARMV7_PMNC_D (1 << 3) /* CCNT counts every 64th cpu cycle */
572#define ARMV7_PMNC_X (1 << 4) /* Export to ETM */
573#define ARMV7_PMNC_DP (1 << 5) /* Disable CCNT if non-invasive debug*/
574#define ARMV7_PMNC_N_SHIFT 11 /* Number of counters supported */
575#define ARMV7_PMNC_N_MASK 0x1f
576#define ARMV7_PMNC_MASK 0x3f /* Mask for writable bits */
577
578/*
43eab878 579 * FLAG: counters overflow flag status reg
43eab878 580 */
43eab878
WD
581#define ARMV7_FLAG_MASK 0xffffffff /* Mask for writable bits */
582#define ARMV7_OVERFLOWED_MASK ARMV7_FLAG_MASK
43eab878
WD
583
584/*
a505addc 585 * PMXEVTYPER: Event selection reg
43eab878 586 */
f2fe09b0 587#define ARMV7_EVTYPE_MASK 0xc80000ff /* Mask for writable bits */
a505addc 588#define ARMV7_EVTYPE_EVENT 0xff /* Mask for EVENT bits */
43eab878
WD
589
590/*
a505addc 591 * Event filters for PMUv2
43eab878 592 */
a505addc
WD
593#define ARMV7_EXCLUDE_PL1 (1 << 31)
594#define ARMV7_EXCLUDE_USER (1 << 30)
595#define ARMV7_INCLUDE_HYP (1 << 27)
43eab878 596
6330aae7 597static inline u32 armv7_pmnc_read(void)
43eab878
WD
598{
599 u32 val;
600 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r"(val));
601 return val;
602}
603
6330aae7 604static inline void armv7_pmnc_write(u32 val)
43eab878
WD
605{
606 val &= ARMV7_PMNC_MASK;
d25d3b4c 607 isb();
43eab878
WD
608 asm volatile("mcr p15, 0, %0, c9, c12, 0" : : "r"(val));
609}
610
6330aae7 611static inline int armv7_pmnc_has_overflowed(u32 pmnc)
43eab878
WD
612{
613 return pmnc & ARMV7_OVERFLOWED_MASK;
614}
615
7279adbd 616static inline int armv7_pmnc_counter_valid(struct arm_pmu *cpu_pmu, int idx)
c691bb62 617{
7279adbd
SK
618 return idx >= ARMV7_IDX_CYCLE_COUNTER &&
619 idx <= ARMV7_IDX_COUNTER_LAST(cpu_pmu);
c691bb62
WD
620}
621
622static inline int armv7_pmnc_counter_has_overflowed(u32 pmnc, int idx)
43eab878 623{
7279adbd 624 return pmnc & BIT(ARMV7_IDX_TO_COUNTER(idx));
43eab878
WD
625}
626
cb6eb108 627static inline void armv7_pmnc_select_counter(int idx)
43eab878 628{
7279adbd 629 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 630 asm volatile("mcr p15, 0, %0, c9, c12, 5" : : "r" (counter));
d25d3b4c 631 isb();
43eab878
WD
632}
633
ed6f2a52 634static inline u32 armv7pmu_read_counter(struct perf_event *event)
43eab878 635{
7279adbd 636 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
ed6f2a52
SK
637 struct hw_perf_event *hwc = &event->hw;
638 int idx = hwc->idx;
6330aae7 639 u32 value = 0;
43eab878 640
cb6eb108 641 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
43eab878
WD
642 pr_err("CPU%u reading wrong counter %d\n",
643 smp_processor_id(), idx);
cb6eb108 644 } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
c691bb62 645 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (value));
cb6eb108 646 } else {
647 armv7_pmnc_select_counter(idx);
c691bb62 648 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (value));
cb6eb108 649 }
43eab878
WD
650
651 return value;
652}
653
ed6f2a52 654static inline void armv7pmu_write_counter(struct perf_event *event, u32 value)
43eab878 655{
7279adbd 656 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
ed6f2a52
SK
657 struct hw_perf_event *hwc = &event->hw;
658 int idx = hwc->idx;
659
cb6eb108 660 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
43eab878
WD
661 pr_err("CPU%u writing wrong counter %d\n",
662 smp_processor_id(), idx);
cb6eb108 663 } else if (idx == ARMV7_IDX_CYCLE_COUNTER) {
c691bb62 664 asm volatile("mcr p15, 0, %0, c9, c13, 0" : : "r" (value));
cb6eb108 665 } else {
666 armv7_pmnc_select_counter(idx);
c691bb62 667 asm volatile("mcr p15, 0, %0, c9, c13, 2" : : "r" (value));
cb6eb108 668 }
43eab878
WD
669}
670
25e29c7c 671static inline void armv7_pmnc_write_evtsel(int idx, u32 val)
43eab878 672{
cb6eb108 673 armv7_pmnc_select_counter(idx);
674 val &= ARMV7_EVTYPE_MASK;
675 asm volatile("mcr p15, 0, %0, c9, c13, 1" : : "r" (val));
43eab878
WD
676}
677
cb6eb108 678static inline void armv7_pmnc_enable_counter(int idx)
43eab878 679{
7279adbd 680 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 681 asm volatile("mcr p15, 0, %0, c9, c12, 1" : : "r" (BIT(counter)));
43eab878
WD
682}
683
cb6eb108 684static inline void armv7_pmnc_disable_counter(int idx)
43eab878 685{
7279adbd 686 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 687 asm volatile("mcr p15, 0, %0, c9, c12, 2" : : "r" (BIT(counter)));
43eab878
WD
688}
689
cb6eb108 690static inline void armv7_pmnc_enable_intens(int idx)
43eab878 691{
7279adbd 692 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 693 asm volatile("mcr p15, 0, %0, c9, c14, 1" : : "r" (BIT(counter)));
43eab878
WD
694}
695
cb6eb108 696static inline void armv7_pmnc_disable_intens(int idx)
43eab878 697{
7279adbd 698 u32 counter = ARMV7_IDX_TO_COUNTER(idx);
c691bb62 699 asm volatile("mcr p15, 0, %0, c9, c14, 2" : : "r" (BIT(counter)));
99c1745b
WD
700 isb();
701 /* Clear the overflow flag in case an interrupt is pending. */
702 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (BIT(counter)));
703 isb();
43eab878
WD
704}
705
706static inline u32 armv7_pmnc_getreset_flags(void)
707{
708 u32 val;
709
710 /* Read */
711 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
712
713 /* Write to clear flags */
714 val &= ARMV7_FLAG_MASK;
715 asm volatile("mcr p15, 0, %0, c9, c12, 3" : : "r" (val));
716
717 return val;
718}
719
720#ifdef DEBUG
7279adbd 721static void armv7_pmnc_dump_regs(struct arm_pmu *cpu_pmu)
43eab878
WD
722{
723 u32 val;
724 unsigned int cnt;
725
52a5566e 726 pr_info("PMNC registers dump:\n");
43eab878
WD
727
728 asm volatile("mrc p15, 0, %0, c9, c12, 0" : "=r" (val));
52a5566e 729 pr_info("PMNC =0x%08x\n", val);
43eab878
WD
730
731 asm volatile("mrc p15, 0, %0, c9, c12, 1" : "=r" (val));
52a5566e 732 pr_info("CNTENS=0x%08x\n", val);
43eab878
WD
733
734 asm volatile("mrc p15, 0, %0, c9, c14, 1" : "=r" (val));
52a5566e 735 pr_info("INTENS=0x%08x\n", val);
43eab878
WD
736
737 asm volatile("mrc p15, 0, %0, c9, c12, 3" : "=r" (val));
52a5566e 738 pr_info("FLAGS =0x%08x\n", val);
43eab878
WD
739
740 asm volatile("mrc p15, 0, %0, c9, c12, 5" : "=r" (val));
52a5566e 741 pr_info("SELECT=0x%08x\n", val);
43eab878
WD
742
743 asm volatile("mrc p15, 0, %0, c9, c13, 0" : "=r" (val));
52a5566e 744 pr_info("CCNT =0x%08x\n", val);
43eab878 745
7279adbd
SK
746 for (cnt = ARMV7_IDX_COUNTER0;
747 cnt <= ARMV7_IDX_COUNTER_LAST(cpu_pmu); cnt++) {
43eab878
WD
748 armv7_pmnc_select_counter(cnt);
749 asm volatile("mrc p15, 0, %0, c9, c13, 2" : "=r" (val));
52a5566e 750 pr_info("CNT[%d] count =0x%08x\n",
c691bb62 751 ARMV7_IDX_TO_COUNTER(cnt), val);
43eab878 752 asm volatile("mrc p15, 0, %0, c9, c13, 1" : "=r" (val));
52a5566e 753 pr_info("CNT[%d] evtsel=0x%08x\n",
c691bb62 754 ARMV7_IDX_TO_COUNTER(cnt), val);
43eab878
WD
755 }
756}
757#endif
758
ed6f2a52 759static void armv7pmu_enable_event(struct perf_event *event)
43eab878
WD
760{
761 unsigned long flags;
ed6f2a52
SK
762 struct hw_perf_event *hwc = &event->hw;
763 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
11679250 764 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
ed6f2a52 765 int idx = hwc->idx;
43eab878 766
7279adbd
SK
767 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
768 pr_err("CPU%u enabling wrong PMNC counter IRQ enable %d\n",
769 smp_processor_id(), idx);
770 return;
771 }
772
43eab878
WD
773 /*
774 * Enable counter and interrupt, and set the counter to count
775 * the event that we're interested in.
776 */
0f78d2d5 777 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
778
779 /*
780 * Disable counter
781 */
782 armv7_pmnc_disable_counter(idx);
783
784 /*
785 * Set event (if destined for PMNx counters)
a505addc
WD
786 * We only need to set the event for the cycle counter if we
787 * have the ability to perform event filtering.
43eab878 788 */
513c99ce 789 if (cpu_pmu->set_event_filter || idx != ARMV7_IDX_CYCLE_COUNTER)
43eab878
WD
790 armv7_pmnc_write_evtsel(idx, hwc->config_base);
791
792 /*
793 * Enable interrupt for this counter
794 */
795 armv7_pmnc_enable_intens(idx);
796
797 /*
798 * Enable counter
799 */
800 armv7_pmnc_enable_counter(idx);
801
0f78d2d5 802 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
803}
804
ed6f2a52 805static void armv7pmu_disable_event(struct perf_event *event)
43eab878
WD
806{
807 unsigned long flags;
ed6f2a52
SK
808 struct hw_perf_event *hwc = &event->hw;
809 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
11679250 810 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
ed6f2a52 811 int idx = hwc->idx;
43eab878 812
7279adbd
SK
813 if (!armv7_pmnc_counter_valid(cpu_pmu, idx)) {
814 pr_err("CPU%u disabling wrong PMNC counter IRQ enable %d\n",
815 smp_processor_id(), idx);
816 return;
817 }
818
43eab878
WD
819 /*
820 * Disable counter and interrupt
821 */
0f78d2d5 822 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
823
824 /*
825 * Disable counter
826 */
827 armv7_pmnc_disable_counter(idx);
828
829 /*
830 * Disable interrupt for this counter
831 */
832 armv7_pmnc_disable_intens(idx);
833
0f78d2d5 834 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
835}
836
837static irqreturn_t armv7pmu_handle_irq(int irq_num, void *dev)
838{
6330aae7 839 u32 pmnc;
43eab878 840 struct perf_sample_data data;
ed6f2a52 841 struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
11679250 842 struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
43eab878
WD
843 struct pt_regs *regs;
844 int idx;
845
846 /*
847 * Get and reset the IRQ flags
848 */
849 pmnc = armv7_pmnc_getreset_flags();
850
851 /*
852 * Did an overflow occur?
853 */
854 if (!armv7_pmnc_has_overflowed(pmnc))
855 return IRQ_NONE;
856
857 /*
858 * Handle the counter(s) overflow(s)
859 */
860 regs = get_irq_regs();
861
8be3f9a2 862 for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
43eab878
WD
863 struct perf_event *event = cpuc->events[idx];
864 struct hw_perf_event *hwc;
865
f6f5a30c
WD
866 /* Ignore if we don't have an event. */
867 if (!event)
868 continue;
869
43eab878
WD
870 /*
871 * We have a single interrupt for all counters. Check that
872 * each counter has overflowed before we process it.
873 */
874 if (!armv7_pmnc_counter_has_overflowed(pmnc, idx))
875 continue;
876
877 hwc = &event->hw;
ed6f2a52 878 armpmu_event_update(event);
fd0d000b 879 perf_sample_data_init(&data, 0, hwc->last_period);
ed6f2a52 880 if (!armpmu_event_set_period(event))
43eab878
WD
881 continue;
882
a8b0ca17 883 if (perf_event_overflow(event, &data, regs))
ed6f2a52 884 cpu_pmu->disable(event);
43eab878
WD
885 }
886
887 /*
888 * Handle the pending perf events.
889 *
890 * Note: this call *must* be run with interrupts disabled. For
891 * platforms that can have the PMU interrupts raised as an NMI, this
892 * will not work.
893 */
894 irq_work_run();
895
896 return IRQ_HANDLED;
897}
898
ed6f2a52 899static void armv7pmu_start(struct arm_pmu *cpu_pmu)
43eab878
WD
900{
901 unsigned long flags;
11679250 902 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
43eab878 903
0f78d2d5 904 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
905 /* Enable all counters */
906 armv7_pmnc_write(armv7_pmnc_read() | ARMV7_PMNC_E);
0f78d2d5 907 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
908}
909
ed6f2a52 910static void armv7pmu_stop(struct arm_pmu *cpu_pmu)
43eab878
WD
911{
912 unsigned long flags;
11679250 913 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
43eab878 914
0f78d2d5 915 raw_spin_lock_irqsave(&events->pmu_lock, flags);
43eab878
WD
916 /* Disable all counters */
917 armv7_pmnc_write(armv7_pmnc_read() & ~ARMV7_PMNC_E);
0f78d2d5 918 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
43eab878
WD
919}
920
8be3f9a2 921static int armv7pmu_get_event_idx(struct pmu_hw_events *cpuc,
ed6f2a52 922 struct perf_event *event)
43eab878
WD
923{
924 int idx;
ed6f2a52
SK
925 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
926 struct hw_perf_event *hwc = &event->hw;
927 unsigned long evtype = hwc->config_base & ARMV7_EVTYPE_EVENT;
43eab878
WD
928
929 /* Always place a cycle counter into the cycle counter. */
a505addc 930 if (evtype == ARMV7_PERFCTR_CPU_CYCLES) {
c691bb62 931 if (test_and_set_bit(ARMV7_IDX_CYCLE_COUNTER, cpuc->used_mask))
43eab878
WD
932 return -EAGAIN;
933
c691bb62
WD
934 return ARMV7_IDX_CYCLE_COUNTER;
935 }
43eab878 936
c691bb62
WD
937 /*
938 * For anything other than a cycle counter, try and use
939 * the events counters
940 */
8be3f9a2 941 for (idx = ARMV7_IDX_COUNTER0; idx < cpu_pmu->num_events; ++idx) {
c691bb62
WD
942 if (!test_and_set_bit(idx, cpuc->used_mask))
943 return idx;
43eab878 944 }
c691bb62
WD
945
946 /* The counters are all in use. */
947 return -EAGAIN;
43eab878
WD
948}
949
a505addc
WD
950/*
951 * Add an event filter to a given event. This will only work for PMUv2 PMUs.
952 */
953static int armv7pmu_set_event_filter(struct hw_perf_event *event,
954 struct perf_event_attr *attr)
955{
956 unsigned long config_base = 0;
957
958 if (attr->exclude_idle)
959 return -EPERM;
960 if (attr->exclude_user)
961 config_base |= ARMV7_EXCLUDE_USER;
962 if (attr->exclude_kernel)
963 config_base |= ARMV7_EXCLUDE_PL1;
964 if (!attr->exclude_hv)
965 config_base |= ARMV7_INCLUDE_HYP;
966
967 /*
968 * Install the filter into config_base as this is used to
969 * construct the event type.
970 */
971 event->config_base = config_base;
972
973 return 0;
43eab878
WD
974}
975
574b69cb
WD
976static void armv7pmu_reset(void *info)
977{
ed6f2a52 978 struct arm_pmu *cpu_pmu = (struct arm_pmu *)info;
8be3f9a2 979 u32 idx, nb_cnt = cpu_pmu->num_events;
574b69cb
WD
980
981 /* The counter and interrupt enable registers are unknown at reset. */
ed6f2a52
SK
982 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
983 armv7_pmnc_disable_counter(idx);
984 armv7_pmnc_disable_intens(idx);
985 }
574b69cb
WD
986
987 /* Initialize & Reset PMNC: C and P bits */
988 armv7_pmnc_write(ARMV7_PMNC_P | ARMV7_PMNC_C);
989}
990
e1f431b5
MR
991static int armv7_a8_map_event(struct perf_event *event)
992{
6dbc0029 993 return armpmu_map_event(event, &armv7_a8_perf_map,
e1f431b5
MR
994 &armv7_a8_perf_cache_map, 0xFF);
995}
996
997static int armv7_a9_map_event(struct perf_event *event)
998{
6dbc0029 999 return armpmu_map_event(event, &armv7_a9_perf_map,
e1f431b5
MR
1000 &armv7_a9_perf_cache_map, 0xFF);
1001}
1002
1003static int armv7_a5_map_event(struct perf_event *event)
1004{
6dbc0029 1005 return armpmu_map_event(event, &armv7_a5_perf_map,
e1f431b5
MR
1006 &armv7_a5_perf_cache_map, 0xFF);
1007}
1008
1009static int armv7_a15_map_event(struct perf_event *event)
1010{
6dbc0029 1011 return armpmu_map_event(event, &armv7_a15_perf_map,
e1f431b5
MR
1012 &armv7_a15_perf_cache_map, 0xFF);
1013}
1014
d33c88c6
WD
1015static int armv7_a7_map_event(struct perf_event *event)
1016{
6dbc0029 1017 return armpmu_map_event(event, &armv7_a7_perf_map,
d33c88c6
WD
1018 &armv7_a7_perf_cache_map, 0xFF);
1019}
1020
8e781f65
AT
1021static int armv7_a12_map_event(struct perf_event *event)
1022{
1023 return armpmu_map_event(event, &armv7_a12_perf_map,
1024 &armv7_a12_perf_cache_map, 0xFF);
1025}
1026
2a3391cd
SB
1027static int krait_map_event(struct perf_event *event)
1028{
1029 return armpmu_map_event(event, &krait_perf_map,
1030 &krait_perf_cache_map, 0xFFFFF);
1031}
1032
1033static int krait_map_event_no_branch(struct perf_event *event)
1034{
1035 return armpmu_map_event(event, &krait_perf_map_no_branch,
1036 &krait_perf_cache_map, 0xFFFFF);
1037}
1038
341e42c4
SB
1039static int scorpion_map_event(struct perf_event *event)
1040{
1041 return armpmu_map_event(event, &scorpion_perf_map,
1042 &scorpion_perf_cache_map, 0xFFFFF);
1043}
1044
513c99ce
SK
1045static void armv7pmu_init(struct arm_pmu *cpu_pmu)
1046{
1047 cpu_pmu->handle_irq = armv7pmu_handle_irq;
1048 cpu_pmu->enable = armv7pmu_enable_event;
1049 cpu_pmu->disable = armv7pmu_disable_event;
1050 cpu_pmu->read_counter = armv7pmu_read_counter;
1051 cpu_pmu->write_counter = armv7pmu_write_counter;
1052 cpu_pmu->get_event_idx = armv7pmu_get_event_idx;
1053 cpu_pmu->start = armv7pmu_start;
1054 cpu_pmu->stop = armv7pmu_stop;
1055 cpu_pmu->reset = armv7pmu_reset;
1056 cpu_pmu->max_period = (1LLU << 32) - 1;
43eab878
WD
1057};
1058
0e3038d1 1059static void armv7_read_num_pmnc_events(void *info)
43eab878 1060{
0e3038d1 1061 int *nb_cnt = info;
43eab878 1062
43eab878 1063 /* Read the nb of CNTx counters supported from PMNC */
0e3038d1 1064 *nb_cnt = (armv7_pmnc_read() >> ARMV7_PMNC_N_SHIFT) & ARMV7_PMNC_N_MASK;
43eab878 1065
0e3038d1
MR
1066 /* Add the CPU cycles counter */
1067 *nb_cnt += 1;
1068}
1069
1070static int armv7_probe_num_events(struct arm_pmu *arm_pmu)
1071{
1072 return smp_call_function_any(&arm_pmu->supported_cpus,
1073 armv7_read_num_pmnc_events,
1074 &arm_pmu->num_events, 1);
43eab878
WD
1075}
1076
351a102d 1077static int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1078{
513c99ce 1079 armv7pmu_init(cpu_pmu);
3d1ff755 1080 cpu_pmu->name = "armv7_cortex_a8";
513c99ce 1081 cpu_pmu->map_event = armv7_a8_map_event;
0e3038d1 1082 return armv7_probe_num_events(cpu_pmu);
43eab878
WD
1083}
1084
351a102d 1085static int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1086{
513c99ce 1087 armv7pmu_init(cpu_pmu);
3d1ff755 1088 cpu_pmu->name = "armv7_cortex_a9";
513c99ce 1089 cpu_pmu->map_event = armv7_a9_map_event;
0e3038d1 1090 return armv7_probe_num_events(cpu_pmu);
43eab878 1091}
0c205cbe 1092
351a102d 1093static int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
0c205cbe 1094{
513c99ce 1095 armv7pmu_init(cpu_pmu);
3d1ff755 1096 cpu_pmu->name = "armv7_cortex_a5";
513c99ce 1097 cpu_pmu->map_event = armv7_a5_map_event;
0e3038d1 1098 return armv7_probe_num_events(cpu_pmu);
0c205cbe 1099}
14abd038 1100
351a102d 1101static int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
14abd038 1102{
513c99ce 1103 armv7pmu_init(cpu_pmu);
3d1ff755 1104 cpu_pmu->name = "armv7_cortex_a15";
513c99ce 1105 cpu_pmu->map_event = armv7_a15_map_event;
513c99ce 1106 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
0e3038d1 1107 return armv7_probe_num_events(cpu_pmu);
14abd038 1108}
d33c88c6 1109
351a102d 1110static int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
d33c88c6 1111{
513c99ce 1112 armv7pmu_init(cpu_pmu);
3d1ff755 1113 cpu_pmu->name = "armv7_cortex_a7";
513c99ce 1114 cpu_pmu->map_event = armv7_a7_map_event;
513c99ce 1115 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
0e3038d1 1116 return armv7_probe_num_events(cpu_pmu);
d33c88c6 1117}
2a3391cd 1118
8e781f65
AT
1119static int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1120{
1121 armv7pmu_init(cpu_pmu);
3d1ff755 1122 cpu_pmu->name = "armv7_cortex_a12";
8e781f65 1123 cpu_pmu->map_event = armv7_a12_map_event;
8e781f65 1124 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
0e3038d1 1125 return armv7_probe_num_events(cpu_pmu);
8e781f65
AT
1126}
1127
03eff46c
WD
1128static int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1129{
0e3038d1 1130 int ret = armv7_a12_pmu_init(cpu_pmu);
3d1ff755 1131 cpu_pmu->name = "armv7_cortex_a17";
0e3038d1 1132 return ret;
03eff46c
WD
1133}
1134
b7aafe99
SB
1135/*
1136 * Krait Performance Monitor Region Event Selection Register (PMRESRn)
1137 *
1138 * 31 30 24 16 8 0
1139 * +--------------------------------+
1140 * PMRESR0 | EN | CC | CC | CC | CC | N = 1, R = 0
1141 * +--------------------------------+
1142 * PMRESR1 | EN | CC | CC | CC | CC | N = 1, R = 1
1143 * +--------------------------------+
1144 * PMRESR2 | EN | CC | CC | CC | CC | N = 1, R = 2
1145 * +--------------------------------+
1146 * VPMRESR0 | EN | CC | CC | CC | CC | N = 2, R = ?
1147 * +--------------------------------+
1148 * EN | G=3 | G=2 | G=1 | G=0
1149 *
1150 * Event Encoding:
1151 *
1152 * hwc->config_base = 0xNRCCG
1153 *
1154 * N = prefix, 1 for Krait CPU (PMRESRn), 2 for Venum VFP (VPMRESR)
1155 * R = region register
1156 * CC = class of events the group G is choosing from
1157 * G = group or particular event
1158 *
1159 * Example: 0x12021 is a Krait CPU event in PMRESR2's group 1 with code 2
1160 *
1161 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1162 * unit, etc.) while the event code (CC) corresponds to a particular class of
1163 * events (interrupts for example). An event code is broken down into
1164 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1165 * example).
1166 */
1167
1168#define KRAIT_EVENT (1 << 16)
1169#define VENUM_EVENT (2 << 16)
1170#define KRAIT_EVENT_MASK (KRAIT_EVENT | VENUM_EVENT)
1171#define PMRESRn_EN BIT(31)
1172
65bab451
SB
1173#define EVENT_REGION(event) (((event) >> 12) & 0xf) /* R */
1174#define EVENT_GROUP(event) ((event) & 0xf) /* G */
1175#define EVENT_CODE(event) (((event) >> 4) & 0xff) /* CC */
1176#define EVENT_VENUM(event) (!!(event & VENUM_EVENT)) /* N=2 */
1177#define EVENT_CPU(event) (!!(event & KRAIT_EVENT)) /* N=1 */
1178
b7aafe99
SB
1179static u32 krait_read_pmresrn(int n)
1180{
1181 u32 val;
1182
1183 switch (n) {
1184 case 0:
1185 asm volatile("mrc p15, 1, %0, c9, c15, 0" : "=r" (val));
1186 break;
1187 case 1:
1188 asm volatile("mrc p15, 1, %0, c9, c15, 1" : "=r" (val));
1189 break;
1190 case 2:
1191 asm volatile("mrc p15, 1, %0, c9, c15, 2" : "=r" (val));
1192 break;
1193 default:
1194 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1195 }
1196
1197 return val;
1198}
1199
1200static void krait_write_pmresrn(int n, u32 val)
1201{
1202 switch (n) {
1203 case 0:
1204 asm volatile("mcr p15, 1, %0, c9, c15, 0" : : "r" (val));
1205 break;
1206 case 1:
1207 asm volatile("mcr p15, 1, %0, c9, c15, 1" : : "r" (val));
1208 break;
1209 case 2:
1210 asm volatile("mcr p15, 1, %0, c9, c15, 2" : : "r" (val));
1211 break;
1212 default:
1213 BUG(); /* Should be validated in krait_pmu_get_event_idx() */
1214 }
1215}
1216
65bab451 1217static u32 venum_read_pmresr(void)
b7aafe99
SB
1218{
1219 u32 val;
1220 asm volatile("mrc p10, 7, %0, c11, c0, 0" : "=r" (val));
1221 return val;
1222}
1223
65bab451 1224static void venum_write_pmresr(u32 val)
b7aafe99
SB
1225{
1226 asm volatile("mcr p10, 7, %0, c11, c0, 0" : : "r" (val));
1227}
1228
65bab451 1229static void venum_pre_pmresr(u32 *venum_orig_val, u32 *fp_orig_val)
b7aafe99
SB
1230{
1231 u32 venum_new_val;
1232 u32 fp_new_val;
1233
1234 BUG_ON(preemptible());
1235 /* CPACR Enable CP10 and CP11 access */
1236 *venum_orig_val = get_copro_access();
1237 venum_new_val = *venum_orig_val | CPACC_SVC(10) | CPACC_SVC(11);
1238 set_copro_access(venum_new_val);
1239
1240 /* Enable FPEXC */
1241 *fp_orig_val = fmrx(FPEXC);
1242 fp_new_val = *fp_orig_val | FPEXC_EN;
1243 fmxr(FPEXC, fp_new_val);
1244}
1245
65bab451 1246static void venum_post_pmresr(u32 venum_orig_val, u32 fp_orig_val)
b7aafe99
SB
1247{
1248 BUG_ON(preemptible());
1249 /* Restore FPEXC */
1250 fmxr(FPEXC, fp_orig_val);
1251 isb();
1252 /* Restore CPACR */
1253 set_copro_access(venum_orig_val);
1254}
1255
1256static u32 krait_get_pmresrn_event(unsigned int region)
1257{
1258 static const u32 pmresrn_table[] = { KRAIT_PMRESR0_GROUP0,
1259 KRAIT_PMRESR1_GROUP0,
1260 KRAIT_PMRESR2_GROUP0 };
1261 return pmresrn_table[region];
1262}
1263
1264static void krait_evt_setup(int idx, u32 config_base)
1265{
1266 u32 val;
1267 u32 mask;
1268 u32 vval, fval;
65bab451
SB
1269 unsigned int region = EVENT_REGION(config_base);
1270 unsigned int group = EVENT_GROUP(config_base);
1271 unsigned int code = EVENT_CODE(config_base);
b7aafe99 1272 unsigned int group_shift;
65bab451 1273 bool venum_event = EVENT_VENUM(config_base);
b7aafe99
SB
1274
1275 group_shift = group * 8;
1276 mask = 0xff << group_shift;
1277
1278 /* Configure evtsel for the region and group */
1279 if (venum_event)
1280 val = KRAIT_VPMRESR0_GROUP0;
1281 else
1282 val = krait_get_pmresrn_event(region);
1283 val += group;
1284 /* Mix in mode-exclusion bits */
1285 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1286 armv7_pmnc_write_evtsel(idx, val);
1287
b7aafe99 1288 if (venum_event) {
65bab451
SB
1289 venum_pre_pmresr(&vval, &fval);
1290 val = venum_read_pmresr();
b7aafe99
SB
1291 val &= ~mask;
1292 val |= code << group_shift;
1293 val |= PMRESRn_EN;
65bab451
SB
1294 venum_write_pmresr(val);
1295 venum_post_pmresr(vval, fval);
b7aafe99
SB
1296 } else {
1297 val = krait_read_pmresrn(region);
1298 val &= ~mask;
1299 val |= code << group_shift;
1300 val |= PMRESRn_EN;
1301 krait_write_pmresrn(region, val);
1302 }
1303}
1304
65bab451 1305static u32 clear_pmresrn_group(u32 val, int group)
b7aafe99
SB
1306{
1307 u32 mask;
1308 int group_shift;
1309
1310 group_shift = group * 8;
1311 mask = 0xff << group_shift;
1312 val &= ~mask;
1313
1314 /* Don't clear enable bit if entire region isn't disabled */
1315 if (val & ~PMRESRn_EN)
1316 return val |= PMRESRn_EN;
1317
1318 return 0;
1319}
1320
1321static void krait_clearpmu(u32 config_base)
1322{
1323 u32 val;
1324 u32 vval, fval;
65bab451
SB
1325 unsigned int region = EVENT_REGION(config_base);
1326 unsigned int group = EVENT_GROUP(config_base);
1327 bool venum_event = EVENT_VENUM(config_base);
b7aafe99
SB
1328
1329 if (venum_event) {
65bab451
SB
1330 venum_pre_pmresr(&vval, &fval);
1331 val = venum_read_pmresr();
1332 val = clear_pmresrn_group(val, group);
1333 venum_write_pmresr(val);
1334 venum_post_pmresr(vval, fval);
b7aafe99
SB
1335 } else {
1336 val = krait_read_pmresrn(region);
65bab451 1337 val = clear_pmresrn_group(val, group);
b7aafe99
SB
1338 krait_write_pmresrn(region, val);
1339 }
1340}
1341
1342static void krait_pmu_disable_event(struct perf_event *event)
1343{
1344 unsigned long flags;
1345 struct hw_perf_event *hwc = &event->hw;
1346 int idx = hwc->idx;
037e79aa 1347 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
11679250 1348 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
b7aafe99
SB
1349
1350 /* Disable counter and interrupt */
1351 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1352
1353 /* Disable counter */
1354 armv7_pmnc_disable_counter(idx);
1355
1356 /*
1357 * Clear pmresr code (if destined for PMNx counters)
1358 */
1359 if (hwc->config_base & KRAIT_EVENT_MASK)
1360 krait_clearpmu(hwc->config_base);
1361
1362 /* Disable interrupt for this counter */
1363 armv7_pmnc_disable_intens(idx);
1364
1365 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1366}
1367
1368static void krait_pmu_enable_event(struct perf_event *event)
1369{
1370 unsigned long flags;
1371 struct hw_perf_event *hwc = &event->hw;
1372 int idx = hwc->idx;
037e79aa 1373 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
11679250 1374 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
b7aafe99
SB
1375
1376 /*
1377 * Enable counter and interrupt, and set the counter to count
1378 * the event that we're interested in.
1379 */
1380 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1381
1382 /* Disable counter */
1383 armv7_pmnc_disable_counter(idx);
1384
1385 /*
1386 * Set event (if destined for PMNx counters)
1387 * We set the event for the cycle counter because we
1388 * have the ability to perform event filtering.
1389 */
1390 if (hwc->config_base & KRAIT_EVENT_MASK)
1391 krait_evt_setup(idx, hwc->config_base);
1392 else
1393 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1394
1395 /* Enable interrupt for this counter */
1396 armv7_pmnc_enable_intens(idx);
1397
1398 /* Enable counter */
1399 armv7_pmnc_enable_counter(idx);
1400
1401 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1402}
1403
1404static void krait_pmu_reset(void *info)
1405{
1406 u32 vval, fval;
93499918
SB
1407 struct arm_pmu *cpu_pmu = info;
1408 u32 idx, nb_cnt = cpu_pmu->num_events;
b7aafe99
SB
1409
1410 armv7pmu_reset(info);
1411
1412 /* Clear all pmresrs */
1413 krait_write_pmresrn(0, 0);
1414 krait_write_pmresrn(1, 0);
1415 krait_write_pmresrn(2, 0);
1416
65bab451
SB
1417 venum_pre_pmresr(&vval, &fval);
1418 venum_write_pmresr(0);
1419 venum_post_pmresr(vval, fval);
93499918
SB
1420
1421 /* Reset PMxEVNCTCR to sane default */
1422 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1423 armv7_pmnc_select_counter(idx);
1424 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1425 }
1426
b7aafe99
SB
1427}
1428
1429static int krait_event_to_bit(struct perf_event *event, unsigned int region,
1430 unsigned int group)
1431{
1432 int bit;
1433 struct hw_perf_event *hwc = &event->hw;
1434 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1435
1436 if (hwc->config_base & VENUM_EVENT)
1437 bit = KRAIT_VPMRESR0_GROUP0;
1438 else
1439 bit = krait_get_pmresrn_event(region);
1440 bit -= krait_get_pmresrn_event(0);
1441 bit += group;
1442 /*
1443 * Lower bits are reserved for use by the counters (see
1444 * armv7pmu_get_event_idx() for more info)
1445 */
1446 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1447
1448 return bit;
1449}
1450
1451/*
1452 * We check for column exclusion constraints here.
1453 * Two events cant use the same group within a pmresr register.
1454 */
1455static int krait_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1456 struct perf_event *event)
1457{
1458 int idx;
6a78371a 1459 int bit = -1;
b7aafe99 1460 struct hw_perf_event *hwc = &event->hw;
65bab451
SB
1461 unsigned int region = EVENT_REGION(hwc->config_base);
1462 unsigned int code = EVENT_CODE(hwc->config_base);
1463 unsigned int group = EVENT_GROUP(hwc->config_base);
1464 bool venum_event = EVENT_VENUM(hwc->config_base);
1465 bool krait_event = EVENT_CPU(hwc->config_base);
b7aafe99 1466
65bab451 1467 if (venum_event || krait_event) {
b7aafe99
SB
1468 /* Ignore invalid events */
1469 if (group > 3 || region > 2)
1470 return -EINVAL;
65bab451 1471 if (venum_event && (code & 0xe0))
b7aafe99
SB
1472 return -EINVAL;
1473
1474 bit = krait_event_to_bit(event, region, group);
1475 if (test_and_set_bit(bit, cpuc->used_mask))
1476 return -EAGAIN;
1477 }
1478
1479 idx = armv7pmu_get_event_idx(cpuc, event);
6a78371a 1480 if (idx < 0 && bit >= 0)
b7aafe99
SB
1481 clear_bit(bit, cpuc->used_mask);
1482
1483 return idx;
1484}
1485
1486static void krait_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1487 struct perf_event *event)
1488{
1489 int bit;
1490 struct hw_perf_event *hwc = &event->hw;
65bab451
SB
1491 unsigned int region = EVENT_REGION(hwc->config_base);
1492 unsigned int group = EVENT_GROUP(hwc->config_base);
1493 bool venum_event = EVENT_VENUM(hwc->config_base);
1494 bool krait_event = EVENT_CPU(hwc->config_base);
b7aafe99 1495
65bab451 1496 if (venum_event || krait_event) {
b7aafe99
SB
1497 bit = krait_event_to_bit(event, region, group);
1498 clear_bit(bit, cpuc->used_mask);
1499 }
1500}
1501
2a3391cd
SB
1502static int krait_pmu_init(struct arm_pmu *cpu_pmu)
1503{
1504 armv7pmu_init(cpu_pmu);
3d1ff755 1505 cpu_pmu->name = "armv7_krait";
2a3391cd
SB
1506 /* Some early versions of Krait don't support PC write events */
1507 if (of_property_read_bool(cpu_pmu->plat_device->dev.of_node,
1508 "qcom,no-pc-write"))
1509 cpu_pmu->map_event = krait_map_event_no_branch;
1510 else
1511 cpu_pmu->map_event = krait_map_event;
2a3391cd 1512 cpu_pmu->set_event_filter = armv7pmu_set_event_filter;
b7aafe99
SB
1513 cpu_pmu->reset = krait_pmu_reset;
1514 cpu_pmu->enable = krait_pmu_enable_event;
1515 cpu_pmu->disable = krait_pmu_disable_event;
1516 cpu_pmu->get_event_idx = krait_pmu_get_event_idx;
1517 cpu_pmu->clear_event_idx = krait_pmu_clear_event_idx;
0e3038d1 1518 return armv7_probe_num_events(cpu_pmu);
2a3391cd 1519}
341e42c4
SB
1520
1521/*
1522 * Scorpion Local Performance Monitor Register (LPMn)
1523 *
1524 * 31 30 24 16 8 0
1525 * +--------------------------------+
1526 * LPM0 | EN | CC | CC | CC | CC | N = 1, R = 0
1527 * +--------------------------------+
1528 * LPM1 | EN | CC | CC | CC | CC | N = 1, R = 1
1529 * +--------------------------------+
1530 * LPM2 | EN | CC | CC | CC | CC | N = 1, R = 2
1531 * +--------------------------------+
1532 * L2LPM | EN | CC | CC | CC | CC | N = 1, R = 3
1533 * +--------------------------------+
1534 * VLPM | EN | CC | CC | CC | CC | N = 2, R = ?
1535 * +--------------------------------+
1536 * EN | G=3 | G=2 | G=1 | G=0
1537 *
1538 *
1539 * Event Encoding:
1540 *
1541 * hwc->config_base = 0xNRCCG
1542 *
1543 * N = prefix, 1 for Scorpion CPU (LPMn/L2LPM), 2 for Venum VFP (VLPM)
1544 * R = region register
1545 * CC = class of events the group G is choosing from
1546 * G = group or particular event
1547 *
1548 * Example: 0x12021 is a Scorpion CPU event in LPM2's group 1 with code 2
1549 *
1550 * A region (R) corresponds to a piece of the CPU (execution unit, instruction
1551 * unit, etc.) while the event code (CC) corresponds to a particular class of
1552 * events (interrupts for example). An event code is broken down into
1553 * groups (G) that can be mapped into the PMU (irq, fiqs, and irq+fiqs for
1554 * example).
1555 */
1556
1557static u32 scorpion_read_pmresrn(int n)
1558{
1559 u32 val;
1560
1561 switch (n) {
1562 case 0:
1563 asm volatile("mrc p15, 0, %0, c15, c0, 0" : "=r" (val));
1564 break;
1565 case 1:
1566 asm volatile("mrc p15, 1, %0, c15, c0, 0" : "=r" (val));
1567 break;
1568 case 2:
1569 asm volatile("mrc p15, 2, %0, c15, c0, 0" : "=r" (val));
1570 break;
1571 case 3:
1572 asm volatile("mrc p15, 3, %0, c15, c2, 0" : "=r" (val));
1573 break;
1574 default:
1575 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1576 }
1577
1578 return val;
1579}
1580
1581static void scorpion_write_pmresrn(int n, u32 val)
1582{
1583 switch (n) {
1584 case 0:
1585 asm volatile("mcr p15, 0, %0, c15, c0, 0" : : "r" (val));
1586 break;
1587 case 1:
1588 asm volatile("mcr p15, 1, %0, c15, c0, 0" : : "r" (val));
1589 break;
1590 case 2:
1591 asm volatile("mcr p15, 2, %0, c15, c0, 0" : : "r" (val));
1592 break;
1593 case 3:
1594 asm volatile("mcr p15, 3, %0, c15, c2, 0" : : "r" (val));
1595 break;
1596 default:
1597 BUG(); /* Should be validated in scorpion_pmu_get_event_idx() */
1598 }
1599}
1600
1601static u32 scorpion_get_pmresrn_event(unsigned int region)
1602{
1603 static const u32 pmresrn_table[] = { SCORPION_LPM0_GROUP0,
1604 SCORPION_LPM1_GROUP0,
1605 SCORPION_LPM2_GROUP0,
1606 SCORPION_L2LPM_GROUP0 };
1607 return pmresrn_table[region];
1608}
1609
1610static void scorpion_evt_setup(int idx, u32 config_base)
1611{
1612 u32 val;
1613 u32 mask;
1614 u32 vval, fval;
1615 unsigned int region = EVENT_REGION(config_base);
1616 unsigned int group = EVENT_GROUP(config_base);
1617 unsigned int code = EVENT_CODE(config_base);
1618 unsigned int group_shift;
1619 bool venum_event = EVENT_VENUM(config_base);
1620
1621 group_shift = group * 8;
1622 mask = 0xff << group_shift;
1623
1624 /* Configure evtsel for the region and group */
1625 if (venum_event)
1626 val = SCORPION_VLPM_GROUP0;
1627 else
1628 val = scorpion_get_pmresrn_event(region);
1629 val += group;
1630 /* Mix in mode-exclusion bits */
1631 val |= config_base & (ARMV7_EXCLUDE_USER | ARMV7_EXCLUDE_PL1);
1632 armv7_pmnc_write_evtsel(idx, val);
1633
1634 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1635
1636 if (venum_event) {
1637 venum_pre_pmresr(&vval, &fval);
1638 val = venum_read_pmresr();
1639 val &= ~mask;
1640 val |= code << group_shift;
1641 val |= PMRESRn_EN;
1642 venum_write_pmresr(val);
1643 venum_post_pmresr(vval, fval);
1644 } else {
1645 val = scorpion_read_pmresrn(region);
1646 val &= ~mask;
1647 val |= code << group_shift;
1648 val |= PMRESRn_EN;
1649 scorpion_write_pmresrn(region, val);
1650 }
1651}
1652
1653static void scorpion_clearpmu(u32 config_base)
1654{
1655 u32 val;
1656 u32 vval, fval;
1657 unsigned int region = EVENT_REGION(config_base);
1658 unsigned int group = EVENT_GROUP(config_base);
1659 bool venum_event = EVENT_VENUM(config_base);
1660
1661 if (venum_event) {
1662 venum_pre_pmresr(&vval, &fval);
1663 val = venum_read_pmresr();
1664 val = clear_pmresrn_group(val, group);
1665 venum_write_pmresr(val);
1666 venum_post_pmresr(vval, fval);
1667 } else {
1668 val = scorpion_read_pmresrn(region);
1669 val = clear_pmresrn_group(val, group);
1670 scorpion_write_pmresrn(region, val);
1671 }
1672}
1673
1674static void scorpion_pmu_disable_event(struct perf_event *event)
1675{
1676 unsigned long flags;
1677 struct hw_perf_event *hwc = &event->hw;
1678 int idx = hwc->idx;
1679 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1680 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1681
1682 /* Disable counter and interrupt */
1683 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1684
1685 /* Disable counter */
1686 armv7_pmnc_disable_counter(idx);
1687
1688 /*
1689 * Clear pmresr code (if destined for PMNx counters)
1690 */
1691 if (hwc->config_base & KRAIT_EVENT_MASK)
1692 scorpion_clearpmu(hwc->config_base);
1693
1694 /* Disable interrupt for this counter */
1695 armv7_pmnc_disable_intens(idx);
1696
1697 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1698}
1699
1700static void scorpion_pmu_enable_event(struct perf_event *event)
1701{
1702 unsigned long flags;
1703 struct hw_perf_event *hwc = &event->hw;
1704 int idx = hwc->idx;
1705 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1706 struct pmu_hw_events *events = this_cpu_ptr(cpu_pmu->hw_events);
1707
1708 /*
1709 * Enable counter and interrupt, and set the counter to count
1710 * the event that we're interested in.
1711 */
1712 raw_spin_lock_irqsave(&events->pmu_lock, flags);
1713
1714 /* Disable counter */
1715 armv7_pmnc_disable_counter(idx);
1716
1717 /*
1718 * Set event (if destined for PMNx counters)
1719 * We don't set the event for the cycle counter because we
1720 * don't have the ability to perform event filtering.
1721 */
1722 if (hwc->config_base & KRAIT_EVENT_MASK)
1723 scorpion_evt_setup(idx, hwc->config_base);
1724 else if (idx != ARMV7_IDX_CYCLE_COUNTER)
1725 armv7_pmnc_write_evtsel(idx, hwc->config_base);
1726
1727 /* Enable interrupt for this counter */
1728 armv7_pmnc_enable_intens(idx);
1729
1730 /* Enable counter */
1731 armv7_pmnc_enable_counter(idx);
1732
1733 raw_spin_unlock_irqrestore(&events->pmu_lock, flags);
1734}
1735
1736static void scorpion_pmu_reset(void *info)
1737{
1738 u32 vval, fval;
1739 struct arm_pmu *cpu_pmu = info;
1740 u32 idx, nb_cnt = cpu_pmu->num_events;
1741
1742 armv7pmu_reset(info);
1743
1744 /* Clear all pmresrs */
1745 scorpion_write_pmresrn(0, 0);
1746 scorpion_write_pmresrn(1, 0);
1747 scorpion_write_pmresrn(2, 0);
1748 scorpion_write_pmresrn(3, 0);
1749
1750 venum_pre_pmresr(&vval, &fval);
1751 venum_write_pmresr(0);
1752 venum_post_pmresr(vval, fval);
1753
1754 /* Reset PMxEVNCTCR to sane default */
1755 for (idx = ARMV7_IDX_CYCLE_COUNTER; idx < nb_cnt; ++idx) {
1756 armv7_pmnc_select_counter(idx);
1757 asm volatile("mcr p15, 0, %0, c9, c15, 0" : : "r" (0));
1758 }
1759}
1760
1761static int scorpion_event_to_bit(struct perf_event *event, unsigned int region,
1762 unsigned int group)
1763{
1764 int bit;
1765 struct hw_perf_event *hwc = &event->hw;
1766 struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
1767
1768 if (hwc->config_base & VENUM_EVENT)
1769 bit = SCORPION_VLPM_GROUP0;
1770 else
1771 bit = scorpion_get_pmresrn_event(region);
1772 bit -= scorpion_get_pmresrn_event(0);
1773 bit += group;
1774 /*
1775 * Lower bits are reserved for use by the counters (see
1776 * armv7pmu_get_event_idx() for more info)
1777 */
1778 bit += ARMV7_IDX_COUNTER_LAST(cpu_pmu) + 1;
1779
1780 return bit;
1781}
1782
1783/*
1784 * We check for column exclusion constraints here.
1785 * Two events cant use the same group within a pmresr register.
1786 */
1787static int scorpion_pmu_get_event_idx(struct pmu_hw_events *cpuc,
1788 struct perf_event *event)
1789{
1790 int idx;
1791 int bit = -1;
1792 struct hw_perf_event *hwc = &event->hw;
1793 unsigned int region = EVENT_REGION(hwc->config_base);
1794 unsigned int group = EVENT_GROUP(hwc->config_base);
1795 bool venum_event = EVENT_VENUM(hwc->config_base);
1796 bool scorpion_event = EVENT_CPU(hwc->config_base);
1797
1798 if (venum_event || scorpion_event) {
1799 /* Ignore invalid events */
1800 if (group > 3 || region > 3)
1801 return -EINVAL;
1802
1803 bit = scorpion_event_to_bit(event, region, group);
1804 if (test_and_set_bit(bit, cpuc->used_mask))
1805 return -EAGAIN;
1806 }
1807
1808 idx = armv7pmu_get_event_idx(cpuc, event);
1809 if (idx < 0 && bit >= 0)
1810 clear_bit(bit, cpuc->used_mask);
1811
1812 return idx;
1813}
1814
1815static void scorpion_pmu_clear_event_idx(struct pmu_hw_events *cpuc,
1816 struct perf_event *event)
1817{
1818 int bit;
1819 struct hw_perf_event *hwc = &event->hw;
1820 unsigned int region = EVENT_REGION(hwc->config_base);
1821 unsigned int group = EVENT_GROUP(hwc->config_base);
1822 bool venum_event = EVENT_VENUM(hwc->config_base);
1823 bool scorpion_event = EVENT_CPU(hwc->config_base);
1824
1825 if (venum_event || scorpion_event) {
1826 bit = scorpion_event_to_bit(event, region, group);
1827 clear_bit(bit, cpuc->used_mask);
1828 }
1829}
1830
1831static int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1832{
1833 armv7pmu_init(cpu_pmu);
1834 cpu_pmu->name = "armv7_scorpion";
1835 cpu_pmu->map_event = scorpion_map_event;
341e42c4
SB
1836 cpu_pmu->reset = scorpion_pmu_reset;
1837 cpu_pmu->enable = scorpion_pmu_enable_event;
1838 cpu_pmu->disable = scorpion_pmu_disable_event;
1839 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1840 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
0e3038d1 1841 return armv7_probe_num_events(cpu_pmu);
341e42c4
SB
1842}
1843
1844static int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1845{
1846 armv7pmu_init(cpu_pmu);
1847 cpu_pmu->name = "armv7_scorpion_mp";
1848 cpu_pmu->map_event = scorpion_map_event;
341e42c4
SB
1849 cpu_pmu->reset = scorpion_pmu_reset;
1850 cpu_pmu->enable = scorpion_pmu_enable_event;
1851 cpu_pmu->disable = scorpion_pmu_disable_event;
1852 cpu_pmu->get_event_idx = scorpion_pmu_get_event_idx;
1853 cpu_pmu->clear_event_idx = scorpion_pmu_clear_event_idx;
0e3038d1 1854 return armv7_probe_num_events(cpu_pmu);
341e42c4 1855}
43eab878 1856#else
513c99ce 1857static inline int armv7_a8_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1858{
513c99ce 1859 return -ENODEV;
43eab878
WD
1860}
1861
513c99ce 1862static inline int armv7_a9_pmu_init(struct arm_pmu *cpu_pmu)
43eab878 1863{
513c99ce 1864 return -ENODEV;
43eab878 1865}
0c205cbe 1866
513c99ce 1867static inline int armv7_a5_pmu_init(struct arm_pmu *cpu_pmu)
0c205cbe 1868{
513c99ce 1869 return -ENODEV;
0c205cbe 1870}
14abd038 1871
513c99ce 1872static inline int armv7_a15_pmu_init(struct arm_pmu *cpu_pmu)
14abd038 1873{
513c99ce 1874 return -ENODEV;
14abd038 1875}
d33c88c6 1876
513c99ce 1877static inline int armv7_a7_pmu_init(struct arm_pmu *cpu_pmu)
d33c88c6 1878{
513c99ce 1879 return -ENODEV;
d33c88c6 1880}
2a3391cd 1881
8e781f65
AT
1882static inline int armv7_a12_pmu_init(struct arm_pmu *cpu_pmu)
1883{
1884 return -ENODEV;
1885}
1886
03eff46c
WD
1887static inline int armv7_a17_pmu_init(struct arm_pmu *cpu_pmu)
1888{
1889 return -ENODEV;
1890}
1891
2a3391cd
SB
1892static inline int krait_pmu_init(struct arm_pmu *cpu_pmu)
1893{
1894 return -ENODEV;
1895}
341e42c4
SB
1896
1897static inline int scorpion_pmu_init(struct arm_pmu *cpu_pmu)
1898{
1899 return -ENODEV;
1900}
1901
1902static inline int scorpion_mp_pmu_init(struct arm_pmu *cpu_pmu)
1903{
1904 return -ENODEV;
1905}
43eab878 1906#endif /* CONFIG_CPU_V7 */