Commit | Line | Data |
---|---|---|
43eab878 WD |
1 | /* |
2 | * ARMv6 Performance counter handling code. | |
3 | * | |
4 | * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles | |
5 | * | |
6 | * ARMv6 has 2 configurable performance counters and a single cycle counter. | |
7 | * They all share a single reset bit but can be written to zero so we can use | |
8 | * that for a reset. | |
9 | * | |
10 | * The counters can't be individually enabled or disabled so when we remove | |
11 | * one event and replace it with another we could get spurious counts from the | |
12 | * wrong event. However, we can take advantage of the fact that the | |
13 | * performance counters can export events to the event bus, and the event bus | |
14 | * itself can be monitored. This requires that we *don't* export the events to | |
15 | * the event bus. The procedure for disabling a configurable counter is: | |
16 | * - change the counter to count the ETMEXTOUT[0] signal (0x20). This | |
17 | * effectively stops the counter from counting. | |
18 | * - disable the counter's interrupt generation (each counter has it's | |
19 | * own interrupt enable bit). | |
20 | * Once stopped, the counter value can be written as 0 to reset. | |
21 | * | |
22 | * To enable a counter: | |
23 | * - enable the counter's interrupt generation. | |
24 | * - set the new event type. | |
25 | * | |
26 | * Note: the dedicated cycle counter only counts cycles and can't be | |
27 | * enabled/disabled independently of the others. When we want to disable the | |
28 | * cycle counter, we have to just disable the interrupt reporting and start | |
29 | * ignoring that counter. When re-enabling, we have to reset the value and | |
30 | * enable the interrupt. | |
31 | */ | |
32 | ||
e399b1a4 | 33 | #if defined(CONFIG_CPU_V6) || defined(CONFIG_CPU_V6K) |
43eab878 WD |
34 | enum armv6_perf_types { |
35 | ARMV6_PERFCTR_ICACHE_MISS = 0x0, | |
36 | ARMV6_PERFCTR_IBUF_STALL = 0x1, | |
37 | ARMV6_PERFCTR_DDEP_STALL = 0x2, | |
38 | ARMV6_PERFCTR_ITLB_MISS = 0x3, | |
39 | ARMV6_PERFCTR_DTLB_MISS = 0x4, | |
40 | ARMV6_PERFCTR_BR_EXEC = 0x5, | |
41 | ARMV6_PERFCTR_BR_MISPREDICT = 0x6, | |
42 | ARMV6_PERFCTR_INSTR_EXEC = 0x7, | |
43 | ARMV6_PERFCTR_DCACHE_HIT = 0x9, | |
44 | ARMV6_PERFCTR_DCACHE_ACCESS = 0xA, | |
45 | ARMV6_PERFCTR_DCACHE_MISS = 0xB, | |
46 | ARMV6_PERFCTR_DCACHE_WBACK = 0xC, | |
47 | ARMV6_PERFCTR_SW_PC_CHANGE = 0xD, | |
48 | ARMV6_PERFCTR_MAIN_TLB_MISS = 0xF, | |
49 | ARMV6_PERFCTR_EXPL_D_ACCESS = 0x10, | |
50 | ARMV6_PERFCTR_LSU_FULL_STALL = 0x11, | |
51 | ARMV6_PERFCTR_WBUF_DRAINED = 0x12, | |
52 | ARMV6_PERFCTR_CPU_CYCLES = 0xFF, | |
53 | ARMV6_PERFCTR_NOP = 0x20, | |
54 | }; | |
55 | ||
56 | enum armv6_counters { | |
d2b41f74 | 57 | ARMV6_CYCLE_COUNTER = 0, |
43eab878 WD |
58 | ARMV6_COUNTER0, |
59 | ARMV6_COUNTER1, | |
60 | }; | |
61 | ||
62 | /* | |
63 | * The hardware events that we support. We do support cache operations but | |
64 | * we have harvard caches and no way to combine instruction and data | |
65 | * accesses/misses in hardware. | |
66 | */ | |
67 | static const unsigned armv6_perf_map[PERF_COUNT_HW_MAX] = { | |
0445e7a5 WD |
68 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6_PERFCTR_CPU_CYCLES, |
69 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6_PERFCTR_INSTR_EXEC, | |
70 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | |
71 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | |
72 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6_PERFCTR_BR_EXEC, | |
73 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6_PERFCTR_BR_MISPREDICT, | |
74 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
75 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6_PERFCTR_IBUF_STALL, | |
76 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6_PERFCTR_LSU_FULL_STALL, | |
43eab878 WD |
77 | }; |
78 | ||
79 | static const unsigned armv6_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
80 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
81 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
82 | [C(L1D)] = { | |
83 | /* | |
84 | * The performance counters don't differentiate between read | |
85 | * and write accesses/misses so this isn't strictly correct, | |
86 | * but it's the best we can do. Writes and reads get | |
87 | * combined. | |
88 | */ | |
89 | [C(OP_READ)] = { | |
90 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | |
91 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | |
92 | }, | |
93 | [C(OP_WRITE)] = { | |
94 | [C(RESULT_ACCESS)] = ARMV6_PERFCTR_DCACHE_ACCESS, | |
95 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DCACHE_MISS, | |
96 | }, | |
97 | [C(OP_PREFETCH)] = { | |
98 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
99 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
100 | }, | |
101 | }, | |
102 | [C(L1I)] = { | |
103 | [C(OP_READ)] = { | |
104 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
105 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | |
106 | }, | |
107 | [C(OP_WRITE)] = { | |
108 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
109 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ICACHE_MISS, | |
110 | }, | |
111 | [C(OP_PREFETCH)] = { | |
112 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
113 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
114 | }, | |
115 | }, | |
116 | [C(LL)] = { | |
117 | [C(OP_READ)] = { | |
118 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
119 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
120 | }, | |
121 | [C(OP_WRITE)] = { | |
122 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
123 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
124 | }, | |
125 | [C(OP_PREFETCH)] = { | |
126 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
127 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
128 | }, | |
129 | }, | |
130 | [C(DTLB)] = { | |
131 | /* | |
132 | * The ARM performance counters can count micro DTLB misses, | |
133 | * micro ITLB misses and main TLB misses. There isn't an event | |
134 | * for TLB misses, so use the micro misses here and if users | |
135 | * want the main TLB misses they can use a raw counter. | |
136 | */ | |
137 | [C(OP_READ)] = { | |
138 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
139 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | |
140 | }, | |
141 | [C(OP_WRITE)] = { | |
142 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
143 | [C(RESULT_MISS)] = ARMV6_PERFCTR_DTLB_MISS, | |
144 | }, | |
145 | [C(OP_PREFETCH)] = { | |
146 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
147 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
148 | }, | |
149 | }, | |
150 | [C(ITLB)] = { | |
151 | [C(OP_READ)] = { | |
152 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
153 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | |
154 | }, | |
155 | [C(OP_WRITE)] = { | |
156 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
157 | [C(RESULT_MISS)] = ARMV6_PERFCTR_ITLB_MISS, | |
158 | }, | |
159 | [C(OP_PREFETCH)] = { | |
160 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
161 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
162 | }, | |
163 | }, | |
164 | [C(BPU)] = { | |
165 | [C(OP_READ)] = { | |
166 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
167 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
168 | }, | |
169 | [C(OP_WRITE)] = { | |
170 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
171 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
172 | }, | |
173 | [C(OP_PREFETCH)] = { | |
174 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
175 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
176 | }, | |
177 | }, | |
89d6c0b5 PZ |
178 | [C(NODE)] = { |
179 | [C(OP_READ)] = { | |
180 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
181 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
182 | }, | |
183 | [C(OP_WRITE)] = { | |
184 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
185 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
186 | }, | |
187 | [C(OP_PREFETCH)] = { | |
188 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
189 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
190 | }, | |
191 | }, | |
43eab878 WD |
192 | }; |
193 | ||
194 | enum armv6mpcore_perf_types { | |
195 | ARMV6MPCORE_PERFCTR_ICACHE_MISS = 0x0, | |
196 | ARMV6MPCORE_PERFCTR_IBUF_STALL = 0x1, | |
197 | ARMV6MPCORE_PERFCTR_DDEP_STALL = 0x2, | |
198 | ARMV6MPCORE_PERFCTR_ITLB_MISS = 0x3, | |
199 | ARMV6MPCORE_PERFCTR_DTLB_MISS = 0x4, | |
200 | ARMV6MPCORE_PERFCTR_BR_EXEC = 0x5, | |
201 | ARMV6MPCORE_PERFCTR_BR_NOTPREDICT = 0x6, | |
202 | ARMV6MPCORE_PERFCTR_BR_MISPREDICT = 0x7, | |
203 | ARMV6MPCORE_PERFCTR_INSTR_EXEC = 0x8, | |
204 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS = 0xA, | |
205 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS = 0xB, | |
206 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS = 0xC, | |
207 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS = 0xD, | |
208 | ARMV6MPCORE_PERFCTR_DCACHE_EVICTION = 0xE, | |
209 | ARMV6MPCORE_PERFCTR_SW_PC_CHANGE = 0xF, | |
210 | ARMV6MPCORE_PERFCTR_MAIN_TLB_MISS = 0x10, | |
211 | ARMV6MPCORE_PERFCTR_EXPL_MEM_ACCESS = 0x11, | |
212 | ARMV6MPCORE_PERFCTR_LSU_FULL_STALL = 0x12, | |
213 | ARMV6MPCORE_PERFCTR_WBUF_DRAINED = 0x13, | |
214 | ARMV6MPCORE_PERFCTR_CPU_CYCLES = 0xFF, | |
215 | }; | |
216 | ||
217 | /* | |
218 | * The hardware events that we support. We do support cache operations but | |
219 | * we have harvard caches and no way to combine instruction and data | |
220 | * accesses/misses in hardware. | |
221 | */ | |
222 | static const unsigned armv6mpcore_perf_map[PERF_COUNT_HW_MAX] = { | |
0445e7a5 WD |
223 | [PERF_COUNT_HW_CPU_CYCLES] = ARMV6MPCORE_PERFCTR_CPU_CYCLES, |
224 | [PERF_COUNT_HW_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_INSTR_EXEC, | |
225 | [PERF_COUNT_HW_CACHE_REFERENCES] = HW_OP_UNSUPPORTED, | |
226 | [PERF_COUNT_HW_CACHE_MISSES] = HW_OP_UNSUPPORTED, | |
227 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = ARMV6MPCORE_PERFCTR_BR_EXEC, | |
228 | [PERF_COUNT_HW_BRANCH_MISSES] = ARMV6MPCORE_PERFCTR_BR_MISPREDICT, | |
229 | [PERF_COUNT_HW_BUS_CYCLES] = HW_OP_UNSUPPORTED, | |
230 | [PERF_COUNT_HW_STALLED_CYCLES_FRONTEND] = ARMV6MPCORE_PERFCTR_IBUF_STALL, | |
231 | [PERF_COUNT_HW_STALLED_CYCLES_BACKEND] = ARMV6MPCORE_PERFCTR_LSU_FULL_STALL, | |
43eab878 WD |
232 | }; |
233 | ||
234 | static const unsigned armv6mpcore_perf_cache_map[PERF_COUNT_HW_CACHE_MAX] | |
235 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
236 | [PERF_COUNT_HW_CACHE_RESULT_MAX] = { | |
237 | [C(L1D)] = { | |
238 | [C(OP_READ)] = { | |
239 | [C(RESULT_ACCESS)] = | |
240 | ARMV6MPCORE_PERFCTR_DCACHE_RDACCESS, | |
241 | [C(RESULT_MISS)] = | |
242 | ARMV6MPCORE_PERFCTR_DCACHE_RDMISS, | |
243 | }, | |
244 | [C(OP_WRITE)] = { | |
245 | [C(RESULT_ACCESS)] = | |
246 | ARMV6MPCORE_PERFCTR_DCACHE_WRACCESS, | |
247 | [C(RESULT_MISS)] = | |
248 | ARMV6MPCORE_PERFCTR_DCACHE_WRMISS, | |
249 | }, | |
250 | [C(OP_PREFETCH)] = { | |
251 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
252 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
253 | }, | |
254 | }, | |
255 | [C(L1I)] = { | |
256 | [C(OP_READ)] = { | |
257 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
258 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | |
259 | }, | |
260 | [C(OP_WRITE)] = { | |
261 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
262 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ICACHE_MISS, | |
263 | }, | |
264 | [C(OP_PREFETCH)] = { | |
265 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
266 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
267 | }, | |
268 | }, | |
269 | [C(LL)] = { | |
270 | [C(OP_READ)] = { | |
271 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
272 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
273 | }, | |
274 | [C(OP_WRITE)] = { | |
275 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
276 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
277 | }, | |
278 | [C(OP_PREFETCH)] = { | |
279 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
280 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
281 | }, | |
282 | }, | |
283 | [C(DTLB)] = { | |
284 | /* | |
285 | * The ARM performance counters can count micro DTLB misses, | |
286 | * micro ITLB misses and main TLB misses. There isn't an event | |
287 | * for TLB misses, so use the micro misses here and if users | |
288 | * want the main TLB misses they can use a raw counter. | |
289 | */ | |
290 | [C(OP_READ)] = { | |
291 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
292 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | |
293 | }, | |
294 | [C(OP_WRITE)] = { | |
295 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
296 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_DTLB_MISS, | |
297 | }, | |
298 | [C(OP_PREFETCH)] = { | |
299 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
300 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
301 | }, | |
302 | }, | |
303 | [C(ITLB)] = { | |
304 | [C(OP_READ)] = { | |
305 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
306 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | |
307 | }, | |
308 | [C(OP_WRITE)] = { | |
309 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
310 | [C(RESULT_MISS)] = ARMV6MPCORE_PERFCTR_ITLB_MISS, | |
311 | }, | |
312 | [C(OP_PREFETCH)] = { | |
313 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
314 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
315 | }, | |
316 | }, | |
317 | [C(BPU)] = { | |
318 | [C(OP_READ)] = { | |
319 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
320 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
321 | }, | |
322 | [C(OP_WRITE)] = { | |
323 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
324 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
325 | }, | |
326 | [C(OP_PREFETCH)] = { | |
327 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
328 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
329 | }, | |
330 | }, | |
89d6c0b5 PZ |
331 | [C(NODE)] = { |
332 | [C(OP_READ)] = { | |
333 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
334 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
335 | }, | |
336 | [C(OP_WRITE)] = { | |
337 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
338 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
339 | }, | |
340 | [C(OP_PREFETCH)] = { | |
341 | [C(RESULT_ACCESS)] = CACHE_OP_UNSUPPORTED, | |
342 | [C(RESULT_MISS)] = CACHE_OP_UNSUPPORTED, | |
343 | }, | |
344 | }, | |
43eab878 WD |
345 | }; |
346 | ||
347 | static inline unsigned long | |
348 | armv6_pmcr_read(void) | |
349 | { | |
350 | u32 val; | |
351 | asm volatile("mrc p15, 0, %0, c15, c12, 0" : "=r"(val)); | |
352 | return val; | |
353 | } | |
354 | ||
355 | static inline void | |
356 | armv6_pmcr_write(unsigned long val) | |
357 | { | |
358 | asm volatile("mcr p15, 0, %0, c15, c12, 0" : : "r"(val)); | |
359 | } | |
360 | ||
361 | #define ARMV6_PMCR_ENABLE (1 << 0) | |
362 | #define ARMV6_PMCR_CTR01_RESET (1 << 1) | |
363 | #define ARMV6_PMCR_CCOUNT_RESET (1 << 2) | |
364 | #define ARMV6_PMCR_CCOUNT_DIV (1 << 3) | |
365 | #define ARMV6_PMCR_COUNT0_IEN (1 << 4) | |
366 | #define ARMV6_PMCR_COUNT1_IEN (1 << 5) | |
367 | #define ARMV6_PMCR_CCOUNT_IEN (1 << 6) | |
368 | #define ARMV6_PMCR_COUNT0_OVERFLOW (1 << 8) | |
369 | #define ARMV6_PMCR_COUNT1_OVERFLOW (1 << 9) | |
370 | #define ARMV6_PMCR_CCOUNT_OVERFLOW (1 << 10) | |
371 | #define ARMV6_PMCR_EVT_COUNT0_SHIFT 20 | |
372 | #define ARMV6_PMCR_EVT_COUNT0_MASK (0xFF << ARMV6_PMCR_EVT_COUNT0_SHIFT) | |
373 | #define ARMV6_PMCR_EVT_COUNT1_SHIFT 12 | |
374 | #define ARMV6_PMCR_EVT_COUNT1_MASK (0xFF << ARMV6_PMCR_EVT_COUNT1_SHIFT) | |
375 | ||
376 | #define ARMV6_PMCR_OVERFLOWED_MASK \ | |
377 | (ARMV6_PMCR_COUNT0_OVERFLOW | ARMV6_PMCR_COUNT1_OVERFLOW | \ | |
378 | ARMV6_PMCR_CCOUNT_OVERFLOW) | |
379 | ||
380 | static inline int | |
381 | armv6_pmcr_has_overflowed(unsigned long pmcr) | |
382 | { | |
383 | return pmcr & ARMV6_PMCR_OVERFLOWED_MASK; | |
384 | } | |
385 | ||
386 | static inline int | |
387 | armv6_pmcr_counter_has_overflowed(unsigned long pmcr, | |
388 | enum armv6_counters counter) | |
389 | { | |
390 | int ret = 0; | |
391 | ||
392 | if (ARMV6_CYCLE_COUNTER == counter) | |
393 | ret = pmcr & ARMV6_PMCR_CCOUNT_OVERFLOW; | |
394 | else if (ARMV6_COUNTER0 == counter) | |
395 | ret = pmcr & ARMV6_PMCR_COUNT0_OVERFLOW; | |
396 | else if (ARMV6_COUNTER1 == counter) | |
397 | ret = pmcr & ARMV6_PMCR_COUNT1_OVERFLOW; | |
398 | else | |
399 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
400 | ||
401 | return ret; | |
402 | } | |
403 | ||
404 | static inline u32 | |
405 | armv6pmu_read_counter(int counter) | |
406 | { | |
407 | unsigned long value = 0; | |
408 | ||
409 | if (ARMV6_CYCLE_COUNTER == counter) | |
410 | asm volatile("mrc p15, 0, %0, c15, c12, 1" : "=r"(value)); | |
411 | else if (ARMV6_COUNTER0 == counter) | |
412 | asm volatile("mrc p15, 0, %0, c15, c12, 2" : "=r"(value)); | |
413 | else if (ARMV6_COUNTER1 == counter) | |
414 | asm volatile("mrc p15, 0, %0, c15, c12, 3" : "=r"(value)); | |
415 | else | |
416 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
417 | ||
418 | return value; | |
419 | } | |
420 | ||
421 | static inline void | |
422 | armv6pmu_write_counter(int counter, | |
423 | u32 value) | |
424 | { | |
425 | if (ARMV6_CYCLE_COUNTER == counter) | |
426 | asm volatile("mcr p15, 0, %0, c15, c12, 1" : : "r"(value)); | |
427 | else if (ARMV6_COUNTER0 == counter) | |
428 | asm volatile("mcr p15, 0, %0, c15, c12, 2" : : "r"(value)); | |
429 | else if (ARMV6_COUNTER1 == counter) | |
430 | asm volatile("mcr p15, 0, %0, c15, c12, 3" : : "r"(value)); | |
431 | else | |
432 | WARN_ONCE(1, "invalid counter number (%d)\n", counter); | |
433 | } | |
434 | ||
4d6b7a77 | 435 | static void |
43eab878 WD |
436 | armv6pmu_enable_event(struct hw_perf_event *hwc, |
437 | int idx) | |
438 | { | |
439 | unsigned long val, mask, evt, flags; | |
8be3f9a2 | 440 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 WD |
441 | |
442 | if (ARMV6_CYCLE_COUNTER == idx) { | |
443 | mask = 0; | |
444 | evt = ARMV6_PMCR_CCOUNT_IEN; | |
445 | } else if (ARMV6_COUNTER0 == idx) { | |
446 | mask = ARMV6_PMCR_EVT_COUNT0_MASK; | |
447 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT0_SHIFT) | | |
448 | ARMV6_PMCR_COUNT0_IEN; | |
449 | } else if (ARMV6_COUNTER1 == idx) { | |
450 | mask = ARMV6_PMCR_EVT_COUNT1_MASK; | |
451 | evt = (hwc->config_base << ARMV6_PMCR_EVT_COUNT1_SHIFT) | | |
452 | ARMV6_PMCR_COUNT1_IEN; | |
453 | } else { | |
454 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
455 | return; | |
456 | } | |
457 | ||
458 | /* | |
459 | * Mask out the current event and set the counter to count the event | |
460 | * that we're interested in. | |
461 | */ | |
0f78d2d5 | 462 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
463 | val = armv6_pmcr_read(); |
464 | val &= ~mask; | |
465 | val |= evt; | |
466 | armv6_pmcr_write(val); | |
0f78d2d5 | 467 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
468 | } |
469 | ||
c47f8684 MR |
470 | static int counter_is_active(unsigned long pmcr, int idx) |
471 | { | |
472 | unsigned long mask = 0; | |
473 | if (idx == ARMV6_CYCLE_COUNTER) | |
474 | mask = ARMV6_PMCR_CCOUNT_IEN; | |
475 | else if (idx == ARMV6_COUNTER0) | |
476 | mask = ARMV6_PMCR_COUNT0_IEN; | |
477 | else if (idx == ARMV6_COUNTER1) | |
478 | mask = ARMV6_PMCR_COUNT1_IEN; | |
479 | ||
480 | if (mask) | |
481 | return pmcr & mask; | |
482 | ||
483 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
484 | return 0; | |
485 | } | |
486 | ||
43eab878 WD |
487 | static irqreturn_t |
488 | armv6pmu_handle_irq(int irq_num, | |
489 | void *dev) | |
490 | { | |
491 | unsigned long pmcr = armv6_pmcr_read(); | |
492 | struct perf_sample_data data; | |
8be3f9a2 | 493 | struct pmu_hw_events *cpuc; |
43eab878 WD |
494 | struct pt_regs *regs; |
495 | int idx; | |
496 | ||
497 | if (!armv6_pmcr_has_overflowed(pmcr)) | |
498 | return IRQ_NONE; | |
499 | ||
500 | regs = get_irq_regs(); | |
501 | ||
502 | /* | |
503 | * The interrupts are cleared by writing the overflow flags back to | |
504 | * the control register. All of the other bits don't have any effect | |
505 | * if they are rewritten, so write the whole value back. | |
506 | */ | |
507 | armv6_pmcr_write(pmcr); | |
508 | ||
509 | perf_sample_data_init(&data, 0); | |
510 | ||
511 | cpuc = &__get_cpu_var(cpu_hw_events); | |
8be3f9a2 | 512 | for (idx = 0; idx < cpu_pmu->num_events; ++idx) { |
43eab878 WD |
513 | struct perf_event *event = cpuc->events[idx]; |
514 | struct hw_perf_event *hwc; | |
515 | ||
c47f8684 | 516 | if (!counter_is_active(pmcr, idx)) |
43eab878 WD |
517 | continue; |
518 | ||
519 | /* | |
520 | * We have a single interrupt for all counters. Check that | |
521 | * each counter has overflowed before we process it. | |
522 | */ | |
523 | if (!armv6_pmcr_counter_has_overflowed(pmcr, idx)) | |
524 | continue; | |
525 | ||
526 | hwc = &event->hw; | |
57273471 | 527 | armpmu_event_update(event, hwc, idx); |
43eab878 WD |
528 | data.period = event->hw.last_period; |
529 | if (!armpmu_event_set_period(event, hwc, idx)) | |
530 | continue; | |
531 | ||
a8b0ca17 | 532 | if (perf_event_overflow(event, &data, regs)) |
8be3f9a2 | 533 | cpu_pmu->disable(hwc, idx); |
43eab878 WD |
534 | } |
535 | ||
536 | /* | |
537 | * Handle the pending perf events. | |
538 | * | |
539 | * Note: this call *must* be run with interrupts disabled. For | |
540 | * platforms that can have the PMU interrupts raised as an NMI, this | |
541 | * will not work. | |
542 | */ | |
543 | irq_work_run(); | |
544 | ||
545 | return IRQ_HANDLED; | |
546 | } | |
547 | ||
548 | static void | |
549 | armv6pmu_start(void) | |
550 | { | |
551 | unsigned long flags, val; | |
8be3f9a2 | 552 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 553 | |
0f78d2d5 | 554 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
555 | val = armv6_pmcr_read(); |
556 | val |= ARMV6_PMCR_ENABLE; | |
557 | armv6_pmcr_write(val); | |
0f78d2d5 | 558 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
559 | } |
560 | ||
561 | static void | |
562 | armv6pmu_stop(void) | |
563 | { | |
564 | unsigned long flags, val; | |
8be3f9a2 | 565 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 | 566 | |
0f78d2d5 | 567 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
568 | val = armv6_pmcr_read(); |
569 | val &= ~ARMV6_PMCR_ENABLE; | |
570 | armv6_pmcr_write(val); | |
0f78d2d5 | 571 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
572 | } |
573 | ||
574 | static int | |
8be3f9a2 | 575 | armv6pmu_get_event_idx(struct pmu_hw_events *cpuc, |
43eab878 WD |
576 | struct hw_perf_event *event) |
577 | { | |
578 | /* Always place a cycle counter into the cycle counter. */ | |
579 | if (ARMV6_PERFCTR_CPU_CYCLES == event->config_base) { | |
580 | if (test_and_set_bit(ARMV6_CYCLE_COUNTER, cpuc->used_mask)) | |
581 | return -EAGAIN; | |
582 | ||
583 | return ARMV6_CYCLE_COUNTER; | |
584 | } else { | |
585 | /* | |
586 | * For anything other than a cycle counter, try and use | |
587 | * counter0 and counter1. | |
588 | */ | |
589 | if (!test_and_set_bit(ARMV6_COUNTER1, cpuc->used_mask)) | |
590 | return ARMV6_COUNTER1; | |
591 | ||
592 | if (!test_and_set_bit(ARMV6_COUNTER0, cpuc->used_mask)) | |
593 | return ARMV6_COUNTER0; | |
594 | ||
595 | /* The counters are all in use. */ | |
596 | return -EAGAIN; | |
597 | } | |
598 | } | |
599 | ||
600 | static void | |
601 | armv6pmu_disable_event(struct hw_perf_event *hwc, | |
602 | int idx) | |
603 | { | |
604 | unsigned long val, mask, evt, flags; | |
8be3f9a2 | 605 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 WD |
606 | |
607 | if (ARMV6_CYCLE_COUNTER == idx) { | |
608 | mask = ARMV6_PMCR_CCOUNT_IEN; | |
609 | evt = 0; | |
610 | } else if (ARMV6_COUNTER0 == idx) { | |
611 | mask = ARMV6_PMCR_COUNT0_IEN | ARMV6_PMCR_EVT_COUNT0_MASK; | |
612 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT0_SHIFT; | |
613 | } else if (ARMV6_COUNTER1 == idx) { | |
614 | mask = ARMV6_PMCR_COUNT1_IEN | ARMV6_PMCR_EVT_COUNT1_MASK; | |
615 | evt = ARMV6_PERFCTR_NOP << ARMV6_PMCR_EVT_COUNT1_SHIFT; | |
616 | } else { | |
617 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
618 | return; | |
619 | } | |
620 | ||
621 | /* | |
622 | * Mask out the current event and set the counter to count the number | |
623 | * of ETM bus signal assertion cycles. The external reporting should | |
624 | * be disabled and so this should never increment. | |
625 | */ | |
0f78d2d5 | 626 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
627 | val = armv6_pmcr_read(); |
628 | val &= ~mask; | |
629 | val |= evt; | |
630 | armv6_pmcr_write(val); | |
0f78d2d5 | 631 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
632 | } |
633 | ||
634 | static void | |
635 | armv6mpcore_pmu_disable_event(struct hw_perf_event *hwc, | |
636 | int idx) | |
637 | { | |
638 | unsigned long val, mask, flags, evt = 0; | |
8be3f9a2 | 639 | struct pmu_hw_events *events = cpu_pmu->get_hw_events(); |
43eab878 WD |
640 | |
641 | if (ARMV6_CYCLE_COUNTER == idx) { | |
642 | mask = ARMV6_PMCR_CCOUNT_IEN; | |
643 | } else if (ARMV6_COUNTER0 == idx) { | |
644 | mask = ARMV6_PMCR_COUNT0_IEN; | |
645 | } else if (ARMV6_COUNTER1 == idx) { | |
646 | mask = ARMV6_PMCR_COUNT1_IEN; | |
647 | } else { | |
648 | WARN_ONCE(1, "invalid counter number (%d)\n", idx); | |
649 | return; | |
650 | } | |
651 | ||
652 | /* | |
653 | * Unlike UP ARMv6, we don't have a way of stopping the counters. We | |
654 | * simply disable the interrupt reporting. | |
655 | */ | |
0f78d2d5 | 656 | raw_spin_lock_irqsave(&events->pmu_lock, flags); |
43eab878 WD |
657 | val = armv6_pmcr_read(); |
658 | val &= ~mask; | |
659 | val |= evt; | |
660 | armv6_pmcr_write(val); | |
0f78d2d5 | 661 | raw_spin_unlock_irqrestore(&events->pmu_lock, flags); |
43eab878 WD |
662 | } |
663 | ||
e1f431b5 MR |
664 | static int armv6_map_event(struct perf_event *event) |
665 | { | |
666 | return map_cpu_event(event, &armv6_perf_map, | |
667 | &armv6_perf_cache_map, 0xFF); | |
668 | } | |
669 | ||
a6c93afe | 670 | static struct arm_pmu armv6pmu = { |
43eab878 WD |
671 | .id = ARM_PERF_PMU_ID_V6, |
672 | .name = "v6", | |
673 | .handle_irq = armv6pmu_handle_irq, | |
674 | .enable = armv6pmu_enable_event, | |
675 | .disable = armv6pmu_disable_event, | |
676 | .read_counter = armv6pmu_read_counter, | |
677 | .write_counter = armv6pmu_write_counter, | |
678 | .get_event_idx = armv6pmu_get_event_idx, | |
679 | .start = armv6pmu_start, | |
680 | .stop = armv6pmu_stop, | |
e1f431b5 | 681 | .map_event = armv6_map_event, |
43eab878 WD |
682 | .num_events = 3, |
683 | .max_period = (1LLU << 32) - 1, | |
684 | }; | |
685 | ||
a6c93afe | 686 | static struct arm_pmu *__init armv6pmu_init(void) |
43eab878 WD |
687 | { |
688 | return &armv6pmu; | |
689 | } | |
690 | ||
691 | /* | |
692 | * ARMv6mpcore is almost identical to single core ARMv6 with the exception | |
693 | * that some of the events have different enumerations and that there is no | |
694 | * *hack* to stop the programmable counters. To stop the counters we simply | |
695 | * disable the interrupt reporting and update the event. When unthrottling we | |
696 | * reset the period and enable the interrupt reporting. | |
697 | */ | |
e1f431b5 MR |
698 | |
699 | static int armv6mpcore_map_event(struct perf_event *event) | |
700 | { | |
701 | return map_cpu_event(event, &armv6mpcore_perf_map, | |
702 | &armv6mpcore_perf_cache_map, 0xFF); | |
703 | } | |
704 | ||
a6c93afe | 705 | static struct arm_pmu armv6mpcore_pmu = { |
43eab878 WD |
706 | .id = ARM_PERF_PMU_ID_V6MP, |
707 | .name = "v6mpcore", | |
708 | .handle_irq = armv6pmu_handle_irq, | |
709 | .enable = armv6pmu_enable_event, | |
710 | .disable = armv6mpcore_pmu_disable_event, | |
711 | .read_counter = armv6pmu_read_counter, | |
712 | .write_counter = armv6pmu_write_counter, | |
713 | .get_event_idx = armv6pmu_get_event_idx, | |
714 | .start = armv6pmu_start, | |
715 | .stop = armv6pmu_stop, | |
e1f431b5 | 716 | .map_event = armv6mpcore_map_event, |
43eab878 WD |
717 | .num_events = 3, |
718 | .max_period = (1LLU << 32) - 1, | |
719 | }; | |
720 | ||
a6c93afe | 721 | static struct arm_pmu *__init armv6mpcore_pmu_init(void) |
43eab878 WD |
722 | { |
723 | return &armv6mpcore_pmu; | |
724 | } | |
725 | #else | |
a6c93afe | 726 | static struct arm_pmu *__init armv6pmu_init(void) |
43eab878 WD |
727 | { |
728 | return NULL; | |
729 | } | |
730 | ||
a6c93afe | 731 | static struct arm_pmu *__init armv6mpcore_pmu_init(void) |
43eab878 WD |
732 | { |
733 | return NULL; | |
734 | } | |
e399b1a4 | 735 | #endif /* CONFIG_CPU_V6 || CONFIG_CPU_V6K */ |