Commit | Line | Data |
---|---|---|
cdd6c482 | 1 | /* Performance event support for sparc64. |
59abbd1e DM |
2 | * |
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | |
4 | * | |
cdd6c482 | 5 | * This code is based almost entirely upon the x86 perf event |
59abbd1e DM |
6 | * code, which is: |
7 | * | |
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
13 | */ | |
14 | ||
cdd6c482 | 15 | #include <linux/perf_event.h> |
59abbd1e DM |
16 | #include <linux/kprobes.h> |
17 | #include <linux/kernel.h> | |
18 | #include <linux/kdebug.h> | |
19 | #include <linux/mutex.h> | |
20 | ||
21 | #include <asm/cpudata.h> | |
22 | #include <asm/atomic.h> | |
23 | #include <asm/nmi.h> | |
24 | #include <asm/pcr.h> | |
25 | ||
26 | /* Sparc64 chips have two performance counters, 32-bits each, with | |
27 | * overflow interrupts generated on transition from 0xffffffff to 0. | |
28 | * The counters are accessed in one go using a 64-bit register. | |
29 | * | |
30 | * Both counters are controlled using a single control register. The | |
31 | * only way to stop all sampling is to clear all of the context (user, | |
32 | * supervisor, hypervisor) sampling enable bits. But these bits apply | |
33 | * to both counters, thus the two counters can't be enabled/disabled | |
34 | * individually. | |
35 | * | |
36 | * The control register has two event fields, one for each of the two | |
37 | * counters. It's thus nearly impossible to have one counter going | |
38 | * while keeping the other one stopped. Therefore it is possible to | |
39 | * get overflow interrupts for counters not currently "in use" and | |
40 | * that condition must be checked in the overflow interrupt handler. | |
41 | * | |
42 | * So we use a hack, in that we program inactive counters with the | |
43 | * "sw_count0" and "sw_count1" events. These count how many times | |
44 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | |
45 | * unusual way to encode a NOP and therefore will not trigger in | |
46 | * normal code. | |
47 | */ | |
48 | ||
cdd6c482 | 49 | #define MAX_HWEVENTS 2 |
59abbd1e DM |
50 | #define MAX_PERIOD ((1UL << 32) - 1) |
51 | ||
52 | #define PIC_UPPER_INDEX 0 | |
53 | #define PIC_LOWER_INDEX 1 | |
54 | ||
cdd6c482 IM |
55 | struct cpu_hw_events { |
56 | struct perf_event *events[MAX_HWEVENTS]; | |
57 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
58 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)]; | |
59abbd1e DM |
59 | int enabled; |
60 | }; | |
cdd6c482 | 61 | DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, }; |
59abbd1e DM |
62 | |
63 | struct perf_event_map { | |
64 | u16 encoding; | |
65 | u8 pic_mask; | |
66 | #define PIC_NONE 0x00 | |
67 | #define PIC_UPPER 0x01 | |
68 | #define PIC_LOWER 0x02 | |
69 | }; | |
70 | ||
a72a8a5f DM |
71 | static unsigned long perf_event_encode(const struct perf_event_map *pmap) |
72 | { | |
73 | return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask; | |
74 | } | |
75 | ||
76 | static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk) | |
77 | { | |
78 | *msk = val & 0xff; | |
79 | *enc = val >> 16; | |
80 | } | |
81 | ||
2ce4da2e DM |
82 | #define C(x) PERF_COUNT_HW_CACHE_##x |
83 | ||
84 | #define CACHE_OP_UNSUPPORTED 0xfffe | |
85 | #define CACHE_OP_NONSENSE 0xffff | |
86 | ||
87 | typedef struct perf_event_map cache_map_t | |
88 | [PERF_COUNT_HW_CACHE_MAX] | |
89 | [PERF_COUNT_HW_CACHE_OP_MAX] | |
90 | [PERF_COUNT_HW_CACHE_RESULT_MAX]; | |
91 | ||
59abbd1e DM |
92 | struct sparc_pmu { |
93 | const struct perf_event_map *(*event_map)(int); | |
2ce4da2e | 94 | const cache_map_t *cache_map; |
59abbd1e DM |
95 | int max_events; |
96 | int upper_shift; | |
97 | int lower_shift; | |
98 | int event_mask; | |
91b9286d | 99 | int hv_bit; |
496c07e3 | 100 | int irq_bit; |
660d1376 DM |
101 | int upper_nop; |
102 | int lower_nop; | |
59abbd1e DM |
103 | }; |
104 | ||
28e8f9be | 105 | static const struct perf_event_map ultra3_perfmon_event_map[] = { |
59abbd1e DM |
106 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, |
107 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | |
108 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | |
109 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | |
110 | }; | |
111 | ||
28e8f9be | 112 | static const struct perf_event_map *ultra3_event_map(int event_id) |
59abbd1e | 113 | { |
28e8f9be | 114 | return &ultra3_perfmon_event_map[event_id]; |
59abbd1e DM |
115 | } |
116 | ||
28e8f9be | 117 | static const cache_map_t ultra3_cache_map = { |
2ce4da2e DM |
118 | [C(L1D)] = { |
119 | [C(OP_READ)] = { | |
120 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
121 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
122 | }, | |
123 | [C(OP_WRITE)] = { | |
124 | [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER }, | |
125 | [C(RESULT_MISS)] = { 0x0a, PIC_UPPER }, | |
126 | }, | |
127 | [C(OP_PREFETCH)] = { | |
128 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
129 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
130 | }, | |
131 | }, | |
132 | [C(L1I)] = { | |
133 | [C(OP_READ)] = { | |
134 | [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, }, | |
135 | [C(RESULT_MISS)] = { 0x09, PIC_UPPER, }, | |
136 | }, | |
137 | [ C(OP_WRITE) ] = { | |
138 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
139 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
140 | }, | |
141 | [ C(OP_PREFETCH) ] = { | |
142 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
143 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
144 | }, | |
145 | }, | |
146 | [C(LL)] = { | |
147 | [C(OP_READ)] = { | |
148 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, }, | |
149 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, }, | |
150 | }, | |
151 | [C(OP_WRITE)] = { | |
152 | [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER }, | |
153 | [C(RESULT_MISS)] = { 0x0c, PIC_UPPER }, | |
154 | }, | |
155 | [C(OP_PREFETCH)] = { | |
156 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
157 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
158 | }, | |
159 | }, | |
160 | [C(DTLB)] = { | |
161 | [C(OP_READ)] = { | |
162 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
163 | [C(RESULT_MISS)] = { 0x12, PIC_UPPER, }, | |
164 | }, | |
165 | [ C(OP_WRITE) ] = { | |
166 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
167 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
168 | }, | |
169 | [ C(OP_PREFETCH) ] = { | |
170 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
171 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
172 | }, | |
173 | }, | |
174 | [C(ITLB)] = { | |
175 | [C(OP_READ)] = { | |
176 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
177 | [C(RESULT_MISS)] = { 0x11, PIC_UPPER, }, | |
178 | }, | |
179 | [ C(OP_WRITE) ] = { | |
180 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
181 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
182 | }, | |
183 | [ C(OP_PREFETCH) ] = { | |
184 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
185 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
186 | }, | |
187 | }, | |
188 | [C(BPU)] = { | |
189 | [C(OP_READ)] = { | |
190 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
191 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
192 | }, | |
193 | [ C(OP_WRITE) ] = { | |
194 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
195 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
196 | }, | |
197 | [ C(OP_PREFETCH) ] = { | |
198 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
199 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
200 | }, | |
201 | }, | |
202 | }; | |
203 | ||
28e8f9be DM |
204 | static const struct sparc_pmu ultra3_pmu = { |
205 | .event_map = ultra3_event_map, | |
206 | .cache_map = &ultra3_cache_map, | |
207 | .max_events = ARRAY_SIZE(ultra3_perfmon_event_map), | |
59abbd1e DM |
208 | .upper_shift = 11, |
209 | .lower_shift = 4, | |
210 | .event_mask = 0x3f, | |
660d1376 DM |
211 | .upper_nop = 0x1c, |
212 | .lower_nop = 0x14, | |
59abbd1e DM |
213 | }; |
214 | ||
7eebda60 DM |
215 | /* Niagara1 is very limited. The upper PIC is hard-locked to count |
216 | * only instructions, so it is free running which creates all kinds of | |
217 | * problems. Some hardware designs make one wonder if the creastor | |
218 | * even looked at how this stuff gets used by software. | |
219 | */ | |
220 | static const struct perf_event_map niagara1_perfmon_event_map[] = { | |
221 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER }, | |
222 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER }, | |
223 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE }, | |
224 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER }, | |
225 | }; | |
226 | ||
227 | static const struct perf_event_map *niagara1_event_map(int event_id) | |
228 | { | |
229 | return &niagara1_perfmon_event_map[event_id]; | |
230 | } | |
231 | ||
232 | static const cache_map_t niagara1_cache_map = { | |
233 | [C(L1D)] = { | |
234 | [C(OP_READ)] = { | |
235 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
236 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
237 | }, | |
238 | [C(OP_WRITE)] = { | |
239 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
240 | [C(RESULT_MISS)] = { 0x03, PIC_LOWER, }, | |
241 | }, | |
242 | [C(OP_PREFETCH)] = { | |
243 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
244 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
245 | }, | |
246 | }, | |
247 | [C(L1I)] = { | |
248 | [C(OP_READ)] = { | |
249 | [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER }, | |
250 | [C(RESULT_MISS)] = { 0x02, PIC_LOWER, }, | |
251 | }, | |
252 | [ C(OP_WRITE) ] = { | |
253 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
254 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
255 | }, | |
256 | [ C(OP_PREFETCH) ] = { | |
257 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
258 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
259 | }, | |
260 | }, | |
261 | [C(LL)] = { | |
262 | [C(OP_READ)] = { | |
263 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
264 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
265 | }, | |
266 | [C(OP_WRITE)] = { | |
267 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
268 | [C(RESULT_MISS)] = { 0x07, PIC_LOWER, }, | |
269 | }, | |
270 | [C(OP_PREFETCH)] = { | |
271 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
272 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
273 | }, | |
274 | }, | |
275 | [C(DTLB)] = { | |
276 | [C(OP_READ)] = { | |
277 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
278 | [C(RESULT_MISS)] = { 0x05, PIC_LOWER, }, | |
279 | }, | |
280 | [ C(OP_WRITE) ] = { | |
281 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
282 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
283 | }, | |
284 | [ C(OP_PREFETCH) ] = { | |
285 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
286 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
287 | }, | |
288 | }, | |
289 | [C(ITLB)] = { | |
290 | [C(OP_READ)] = { | |
291 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
292 | [C(RESULT_MISS)] = { 0x04, PIC_LOWER, }, | |
293 | }, | |
294 | [ C(OP_WRITE) ] = { | |
295 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
296 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
297 | }, | |
298 | [ C(OP_PREFETCH) ] = { | |
299 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
300 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
301 | }, | |
302 | }, | |
303 | [C(BPU)] = { | |
304 | [C(OP_READ)] = { | |
305 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
306 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
307 | }, | |
308 | [ C(OP_WRITE) ] = { | |
309 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
310 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
311 | }, | |
312 | [ C(OP_PREFETCH) ] = { | |
313 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
314 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
315 | }, | |
316 | }, | |
317 | }; | |
318 | ||
319 | static const struct sparc_pmu niagara1_pmu = { | |
320 | .event_map = niagara1_event_map, | |
321 | .cache_map = &niagara1_cache_map, | |
322 | .max_events = ARRAY_SIZE(niagara1_perfmon_event_map), | |
323 | .upper_shift = 0, | |
324 | .lower_shift = 4, | |
325 | .event_mask = 0x7, | |
326 | .upper_nop = 0x0, | |
327 | .lower_nop = 0x0, | |
328 | }; | |
329 | ||
b73d8847 DM |
330 | static const struct perf_event_map niagara2_perfmon_event_map[] = { |
331 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
332 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER }, | |
333 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER }, | |
334 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER }, | |
335 | [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER }, | |
336 | [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER }, | |
337 | }; | |
338 | ||
cdd6c482 | 339 | static const struct perf_event_map *niagara2_event_map(int event_id) |
b73d8847 | 340 | { |
cdd6c482 | 341 | return &niagara2_perfmon_event_map[event_id]; |
b73d8847 DM |
342 | } |
343 | ||
d0b86480 DM |
344 | static const cache_map_t niagara2_cache_map = { |
345 | [C(L1D)] = { | |
346 | [C(OP_READ)] = { | |
347 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
348 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
349 | }, | |
350 | [C(OP_WRITE)] = { | |
351 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
352 | [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, }, | |
353 | }, | |
354 | [C(OP_PREFETCH)] = { | |
355 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
356 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
357 | }, | |
358 | }, | |
359 | [C(L1I)] = { | |
360 | [C(OP_READ)] = { | |
361 | [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, }, | |
362 | [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, }, | |
363 | }, | |
364 | [ C(OP_WRITE) ] = { | |
365 | [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE }, | |
366 | [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE }, | |
367 | }, | |
368 | [ C(OP_PREFETCH) ] = { | |
369 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
370 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
371 | }, | |
372 | }, | |
373 | [C(LL)] = { | |
374 | [C(OP_READ)] = { | |
375 | [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, }, | |
376 | [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, }, | |
377 | }, | |
378 | [C(OP_WRITE)] = { | |
379 | [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, }, | |
380 | [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, }, | |
381 | }, | |
382 | [C(OP_PREFETCH)] = { | |
383 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
384 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
385 | }, | |
386 | }, | |
387 | [C(DTLB)] = { | |
388 | [C(OP_READ)] = { | |
389 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
390 | [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, }, | |
391 | }, | |
392 | [ C(OP_WRITE) ] = { | |
393 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
394 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
395 | }, | |
396 | [ C(OP_PREFETCH) ] = { | |
397 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
398 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
399 | }, | |
400 | }, | |
401 | [C(ITLB)] = { | |
402 | [C(OP_READ)] = { | |
403 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
404 | [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, }, | |
405 | }, | |
406 | [ C(OP_WRITE) ] = { | |
407 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
408 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
409 | }, | |
410 | [ C(OP_PREFETCH) ] = { | |
411 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
412 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
413 | }, | |
414 | }, | |
415 | [C(BPU)] = { | |
416 | [C(OP_READ)] = { | |
417 | [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED }, | |
418 | [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED }, | |
419 | }, | |
420 | [ C(OP_WRITE) ] = { | |
421 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
422 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
423 | }, | |
424 | [ C(OP_PREFETCH) ] = { | |
425 | [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED }, | |
426 | [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED }, | |
427 | }, | |
428 | }, | |
429 | }; | |
430 | ||
b73d8847 DM |
431 | static const struct sparc_pmu niagara2_pmu = { |
432 | .event_map = niagara2_event_map, | |
d0b86480 | 433 | .cache_map = &niagara2_cache_map, |
b73d8847 DM |
434 | .max_events = ARRAY_SIZE(niagara2_perfmon_event_map), |
435 | .upper_shift = 19, | |
436 | .lower_shift = 6, | |
437 | .event_mask = 0xfff, | |
438 | .hv_bit = 0x8, | |
439 | .irq_bit = 0x03, | |
440 | .upper_nop = 0x220, | |
441 | .lower_nop = 0x220, | |
442 | }; | |
443 | ||
59abbd1e DM |
444 | static const struct sparc_pmu *sparc_pmu __read_mostly; |
445 | ||
cdd6c482 | 446 | static u64 event_encoding(u64 event_id, int idx) |
59abbd1e DM |
447 | { |
448 | if (idx == PIC_UPPER_INDEX) | |
cdd6c482 | 449 | event_id <<= sparc_pmu->upper_shift; |
59abbd1e | 450 | else |
cdd6c482 IM |
451 | event_id <<= sparc_pmu->lower_shift; |
452 | return event_id; | |
59abbd1e DM |
453 | } |
454 | ||
455 | static u64 mask_for_index(int idx) | |
456 | { | |
457 | return event_encoding(sparc_pmu->event_mask, idx); | |
458 | } | |
459 | ||
460 | static u64 nop_for_index(int idx) | |
461 | { | |
462 | return event_encoding(idx == PIC_UPPER_INDEX ? | |
660d1376 DM |
463 | sparc_pmu->upper_nop : |
464 | sparc_pmu->lower_nop, idx); | |
59abbd1e DM |
465 | } |
466 | ||
cdd6c482 | 467 | static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc, |
59abbd1e DM |
468 | int idx) |
469 | { | |
470 | u64 val, mask = mask_for_index(idx); | |
471 | ||
472 | val = pcr_ops->read(); | |
473 | pcr_ops->write((val & ~mask) | hwc->config); | |
474 | } | |
475 | ||
cdd6c482 | 476 | static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc, |
59abbd1e DM |
477 | int idx) |
478 | { | |
479 | u64 mask = mask_for_index(idx); | |
480 | u64 nop = nop_for_index(idx); | |
481 | u64 val = pcr_ops->read(); | |
482 | ||
483 | pcr_ops->write((val & ~mask) | nop); | |
484 | } | |
485 | ||
486 | void hw_perf_enable(void) | |
487 | { | |
cdd6c482 | 488 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
59abbd1e DM |
489 | u64 val; |
490 | int i; | |
491 | ||
492 | if (cpuc->enabled) | |
493 | return; | |
494 | ||
495 | cpuc->enabled = 1; | |
496 | barrier(); | |
497 | ||
498 | val = pcr_ops->read(); | |
499 | ||
cdd6c482 IM |
500 | for (i = 0; i < MAX_HWEVENTS; i++) { |
501 | struct perf_event *cp = cpuc->events[i]; | |
502 | struct hw_perf_event *hwc; | |
59abbd1e DM |
503 | |
504 | if (!cp) | |
505 | continue; | |
506 | hwc = &cp->hw; | |
507 | val |= hwc->config_base; | |
508 | } | |
509 | ||
510 | pcr_ops->write(val); | |
511 | } | |
512 | ||
513 | void hw_perf_disable(void) | |
514 | { | |
cdd6c482 | 515 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
59abbd1e DM |
516 | u64 val; |
517 | ||
518 | if (!cpuc->enabled) | |
519 | return; | |
520 | ||
521 | cpuc->enabled = 0; | |
522 | ||
523 | val = pcr_ops->read(); | |
496c07e3 DM |
524 | val &= ~(PCR_UTRACE | PCR_STRACE | |
525 | sparc_pmu->hv_bit | sparc_pmu->irq_bit); | |
59abbd1e DM |
526 | pcr_ops->write(val); |
527 | } | |
528 | ||
529 | static u32 read_pmc(int idx) | |
530 | { | |
531 | u64 val; | |
532 | ||
533 | read_pic(val); | |
534 | if (idx == PIC_UPPER_INDEX) | |
535 | val >>= 32; | |
536 | ||
537 | return val & 0xffffffff; | |
538 | } | |
539 | ||
540 | static void write_pmc(int idx, u64 val) | |
541 | { | |
542 | u64 shift, mask, pic; | |
543 | ||
544 | shift = 0; | |
545 | if (idx == PIC_UPPER_INDEX) | |
546 | shift = 32; | |
547 | ||
548 | mask = ((u64) 0xffffffff) << shift; | |
549 | val <<= shift; | |
550 | ||
551 | read_pic(pic); | |
552 | pic &= ~mask; | |
553 | pic |= val; | |
554 | write_pic(pic); | |
555 | } | |
556 | ||
cdd6c482 IM |
557 | static int sparc_perf_event_set_period(struct perf_event *event, |
558 | struct hw_perf_event *hwc, int idx) | |
59abbd1e DM |
559 | { |
560 | s64 left = atomic64_read(&hwc->period_left); | |
561 | s64 period = hwc->sample_period; | |
562 | int ret = 0; | |
563 | ||
564 | if (unlikely(left <= -period)) { | |
565 | left = period; | |
566 | atomic64_set(&hwc->period_left, left); | |
567 | hwc->last_period = period; | |
568 | ret = 1; | |
569 | } | |
570 | ||
571 | if (unlikely(left <= 0)) { | |
572 | left += period; | |
573 | atomic64_set(&hwc->period_left, left); | |
574 | hwc->last_period = period; | |
575 | ret = 1; | |
576 | } | |
577 | if (left > MAX_PERIOD) | |
578 | left = MAX_PERIOD; | |
579 | ||
580 | atomic64_set(&hwc->prev_count, (u64)-left); | |
581 | ||
582 | write_pmc(idx, (u64)(-left) & 0xffffffff); | |
583 | ||
cdd6c482 | 584 | perf_event_update_userpage(event); |
59abbd1e DM |
585 | |
586 | return ret; | |
587 | } | |
588 | ||
cdd6c482 | 589 | static int sparc_pmu_enable(struct perf_event *event) |
59abbd1e | 590 | { |
cdd6c482 IM |
591 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
592 | struct hw_perf_event *hwc = &event->hw; | |
59abbd1e DM |
593 | int idx = hwc->idx; |
594 | ||
595 | if (test_and_set_bit(idx, cpuc->used_mask)) | |
596 | return -EAGAIN; | |
597 | ||
cdd6c482 | 598 | sparc_pmu_disable_event(hwc, idx); |
59abbd1e | 599 | |
cdd6c482 | 600 | cpuc->events[idx] = event; |
59abbd1e DM |
601 | set_bit(idx, cpuc->active_mask); |
602 | ||
cdd6c482 IM |
603 | sparc_perf_event_set_period(event, hwc, idx); |
604 | sparc_pmu_enable_event(hwc, idx); | |
605 | perf_event_update_userpage(event); | |
59abbd1e DM |
606 | return 0; |
607 | } | |
608 | ||
cdd6c482 IM |
609 | static u64 sparc_perf_event_update(struct perf_event *event, |
610 | struct hw_perf_event *hwc, int idx) | |
59abbd1e DM |
611 | { |
612 | int shift = 64 - 32; | |
613 | u64 prev_raw_count, new_raw_count; | |
614 | s64 delta; | |
615 | ||
616 | again: | |
617 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
618 | new_raw_count = read_pmc(idx); | |
619 | ||
620 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
621 | new_raw_count) != prev_raw_count) | |
622 | goto again; | |
623 | ||
624 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
625 | delta >>= shift; | |
626 | ||
cdd6c482 | 627 | atomic64_add(delta, &event->count); |
59abbd1e DM |
628 | atomic64_sub(delta, &hwc->period_left); |
629 | ||
630 | return new_raw_count; | |
631 | } | |
632 | ||
cdd6c482 | 633 | static void sparc_pmu_disable(struct perf_event *event) |
59abbd1e | 634 | { |
cdd6c482 IM |
635 | struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events); |
636 | struct hw_perf_event *hwc = &event->hw; | |
59abbd1e DM |
637 | int idx = hwc->idx; |
638 | ||
639 | clear_bit(idx, cpuc->active_mask); | |
cdd6c482 | 640 | sparc_pmu_disable_event(hwc, idx); |
59abbd1e DM |
641 | |
642 | barrier(); | |
643 | ||
cdd6c482 IM |
644 | sparc_perf_event_update(event, hwc, idx); |
645 | cpuc->events[idx] = NULL; | |
59abbd1e DM |
646 | clear_bit(idx, cpuc->used_mask); |
647 | ||
cdd6c482 | 648 | perf_event_update_userpage(event); |
59abbd1e DM |
649 | } |
650 | ||
cdd6c482 | 651 | static void sparc_pmu_read(struct perf_event *event) |
59abbd1e | 652 | { |
cdd6c482 IM |
653 | struct hw_perf_event *hwc = &event->hw; |
654 | sparc_perf_event_update(event, hwc, hwc->idx); | |
59abbd1e DM |
655 | } |
656 | ||
cdd6c482 | 657 | static void sparc_pmu_unthrottle(struct perf_event *event) |
59abbd1e | 658 | { |
cdd6c482 IM |
659 | struct hw_perf_event *hwc = &event->hw; |
660 | sparc_pmu_enable_event(hwc, hwc->idx); | |
59abbd1e DM |
661 | } |
662 | ||
cdd6c482 | 663 | static atomic_t active_events = ATOMIC_INIT(0); |
59abbd1e DM |
664 | static DEFINE_MUTEX(pmc_grab_mutex); |
665 | ||
cdd6c482 | 666 | void perf_event_grab_pmc(void) |
59abbd1e | 667 | { |
cdd6c482 | 668 | if (atomic_inc_not_zero(&active_events)) |
59abbd1e DM |
669 | return; |
670 | ||
671 | mutex_lock(&pmc_grab_mutex); | |
cdd6c482 | 672 | if (atomic_read(&active_events) == 0) { |
59abbd1e DM |
673 | if (atomic_read(&nmi_active) > 0) { |
674 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | |
675 | BUG_ON(atomic_read(&nmi_active) != 0); | |
676 | } | |
cdd6c482 | 677 | atomic_inc(&active_events); |
59abbd1e DM |
678 | } |
679 | mutex_unlock(&pmc_grab_mutex); | |
680 | } | |
681 | ||
cdd6c482 | 682 | void perf_event_release_pmc(void) |
59abbd1e | 683 | { |
cdd6c482 | 684 | if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) { |
59abbd1e DM |
685 | if (atomic_read(&nmi_active) == 0) |
686 | on_each_cpu(start_nmi_watchdog, NULL, 1); | |
687 | mutex_unlock(&pmc_grab_mutex); | |
688 | } | |
689 | } | |
690 | ||
2ce4da2e DM |
691 | static const struct perf_event_map *sparc_map_cache_event(u64 config) |
692 | { | |
693 | unsigned int cache_type, cache_op, cache_result; | |
694 | const struct perf_event_map *pmap; | |
695 | ||
696 | if (!sparc_pmu->cache_map) | |
697 | return ERR_PTR(-ENOENT); | |
698 | ||
699 | cache_type = (config >> 0) & 0xff; | |
700 | if (cache_type >= PERF_COUNT_HW_CACHE_MAX) | |
701 | return ERR_PTR(-EINVAL); | |
702 | ||
703 | cache_op = (config >> 8) & 0xff; | |
704 | if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX) | |
705 | return ERR_PTR(-EINVAL); | |
706 | ||
707 | cache_result = (config >> 16) & 0xff; | |
708 | if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX) | |
709 | return ERR_PTR(-EINVAL); | |
710 | ||
711 | pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]); | |
712 | ||
713 | if (pmap->encoding == CACHE_OP_UNSUPPORTED) | |
714 | return ERR_PTR(-ENOENT); | |
715 | ||
716 | if (pmap->encoding == CACHE_OP_NONSENSE) | |
717 | return ERR_PTR(-EINVAL); | |
718 | ||
719 | return pmap; | |
720 | } | |
721 | ||
cdd6c482 | 722 | static void hw_perf_event_destroy(struct perf_event *event) |
59abbd1e | 723 | { |
cdd6c482 | 724 | perf_event_release_pmc(); |
59abbd1e DM |
725 | } |
726 | ||
a72a8a5f DM |
727 | /* Make sure all events can be scheduled into the hardware at |
728 | * the same time. This is simplified by the fact that we only | |
729 | * need to support 2 simultaneous HW events. | |
730 | */ | |
731 | static int sparc_check_constraints(unsigned long *events, int n_ev) | |
732 | { | |
733 | if (n_ev <= perf_max_events) { | |
734 | u8 msk1, msk2; | |
735 | u16 dummy; | |
736 | ||
737 | if (n_ev == 1) | |
738 | return 0; | |
739 | BUG_ON(n_ev != 2); | |
740 | perf_event_decode(events[0], &dummy, &msk1); | |
741 | perf_event_decode(events[1], &dummy, &msk2); | |
742 | ||
743 | /* If both events can go on any counter, OK. */ | |
744 | if (msk1 == (PIC_UPPER | PIC_LOWER) && | |
745 | msk2 == (PIC_UPPER | PIC_LOWER)) | |
746 | return 0; | |
747 | ||
748 | /* If one event is limited to a specific counter, | |
749 | * and the other can go on both, OK. | |
750 | */ | |
751 | if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) && | |
752 | msk2 == (PIC_UPPER | PIC_LOWER)) | |
753 | return 0; | |
754 | if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) && | |
755 | msk1 == (PIC_UPPER | PIC_LOWER)) | |
756 | return 0; | |
757 | ||
758 | /* If the events are fixed to different counters, OK. */ | |
759 | if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) || | |
760 | (msk1 == PIC_LOWER && msk2 == PIC_UPPER)) | |
761 | return 0; | |
762 | ||
763 | /* Otherwise, there is a conflict. */ | |
764 | } | |
765 | ||
766 | return -1; | |
767 | } | |
768 | ||
01552f76 DM |
769 | static int check_excludes(struct perf_event **evts, int n_prev, int n_new) |
770 | { | |
771 | int eu = 0, ek = 0, eh = 0; | |
772 | struct perf_event *event; | |
773 | int i, n, first; | |
774 | ||
775 | n = n_prev + n_new; | |
776 | if (n <= 1) | |
777 | return 0; | |
778 | ||
779 | first = 1; | |
780 | for (i = 0; i < n; i++) { | |
781 | event = evts[i]; | |
782 | if (first) { | |
783 | eu = event->attr.exclude_user; | |
784 | ek = event->attr.exclude_kernel; | |
785 | eh = event->attr.exclude_hv; | |
786 | first = 0; | |
787 | } else if (event->attr.exclude_user != eu || | |
788 | event->attr.exclude_kernel != ek || | |
789 | event->attr.exclude_hv != eh) { | |
790 | return -EAGAIN; | |
791 | } | |
792 | } | |
793 | ||
794 | return 0; | |
795 | } | |
796 | ||
797 | static int collect_events(struct perf_event *group, int max_count, | |
a72a8a5f | 798 | struct perf_event *evts[], unsigned long *events) |
01552f76 DM |
799 | { |
800 | struct perf_event *event; | |
801 | int n = 0; | |
802 | ||
803 | if (!is_software_event(group)) { | |
804 | if (n >= max_count) | |
805 | return -1; | |
806 | evts[n] = group; | |
a72a8a5f | 807 | events[n++] = group->hw.event_base; |
01552f76 DM |
808 | } |
809 | list_for_each_entry(event, &group->sibling_list, group_entry) { | |
810 | if (!is_software_event(event) && | |
811 | event->state != PERF_EVENT_STATE_OFF) { | |
812 | if (n >= max_count) | |
813 | return -1; | |
814 | evts[n] = event; | |
a72a8a5f | 815 | events[n++] = event->hw.event_base; |
01552f76 DM |
816 | } |
817 | } | |
818 | return n; | |
819 | } | |
820 | ||
cdd6c482 | 821 | static int __hw_perf_event_init(struct perf_event *event) |
59abbd1e | 822 | { |
cdd6c482 | 823 | struct perf_event_attr *attr = &event->attr; |
01552f76 | 824 | struct perf_event *evts[MAX_HWEVENTS]; |
cdd6c482 | 825 | struct hw_perf_event *hwc = &event->hw; |
a72a8a5f | 826 | unsigned long events[MAX_HWEVENTS]; |
59abbd1e | 827 | const struct perf_event_map *pmap; |
a72a8a5f | 828 | u64 enc; |
01552f76 | 829 | int n; |
59abbd1e DM |
830 | |
831 | if (atomic_read(&nmi_active) < 0) | |
832 | return -ENODEV; | |
833 | ||
2ce4da2e DM |
834 | if (attr->type == PERF_TYPE_HARDWARE) { |
835 | if (attr->config >= sparc_pmu->max_events) | |
836 | return -EINVAL; | |
837 | pmap = sparc_pmu->event_map(attr->config); | |
838 | } else if (attr->type == PERF_TYPE_HW_CACHE) { | |
839 | pmap = sparc_map_cache_event(attr->config); | |
840 | if (IS_ERR(pmap)) | |
841 | return PTR_ERR(pmap); | |
842 | } else | |
59abbd1e DM |
843 | return -EOPNOTSUPP; |
844 | ||
59abbd1e DM |
845 | /* We save the enable bits in the config_base. So to |
846 | * turn off sampling just write 'config', and to enable | |
847 | * things write 'config | config_base'. | |
848 | */ | |
496c07e3 | 849 | hwc->config_base = sparc_pmu->irq_bit; |
59abbd1e DM |
850 | if (!attr->exclude_user) |
851 | hwc->config_base |= PCR_UTRACE; | |
852 | if (!attr->exclude_kernel) | |
853 | hwc->config_base |= PCR_STRACE; | |
91b9286d DM |
854 | if (!attr->exclude_hv) |
855 | hwc->config_base |= sparc_pmu->hv_bit; | |
59abbd1e | 856 | |
a72a8a5f DM |
857 | hwc->event_base = perf_event_encode(pmap); |
858 | ||
01552f76 DM |
859 | enc = pmap->encoding; |
860 | ||
861 | n = 0; | |
862 | if (event->group_leader != event) { | |
863 | n = collect_events(event->group_leader, | |
864 | perf_max_events - 1, | |
865 | evts, events); | |
866 | if (n < 0) | |
867 | return -EINVAL; | |
868 | } | |
a72a8a5f | 869 | events[n] = hwc->event_base; |
01552f76 DM |
870 | evts[n] = event; |
871 | ||
872 | if (check_excludes(evts, n, 1)) | |
873 | return -EINVAL; | |
874 | ||
a72a8a5f DM |
875 | if (sparc_check_constraints(events, n + 1)) |
876 | return -EINVAL; | |
877 | ||
01552f76 DM |
878 | /* Try to do all error checking before this point, as unwinding |
879 | * state after grabbing the PMC is difficult. | |
880 | */ | |
881 | perf_event_grab_pmc(); | |
882 | event->destroy = hw_perf_event_destroy; | |
883 | ||
59abbd1e DM |
884 | if (!hwc->sample_period) { |
885 | hwc->sample_period = MAX_PERIOD; | |
886 | hwc->last_period = hwc->sample_period; | |
887 | atomic64_set(&hwc->period_left, hwc->sample_period); | |
888 | } | |
889 | ||
59abbd1e DM |
890 | if (pmap->pic_mask & PIC_UPPER) { |
891 | hwc->idx = PIC_UPPER_INDEX; | |
892 | enc <<= sparc_pmu->upper_shift; | |
893 | } else { | |
894 | hwc->idx = PIC_LOWER_INDEX; | |
895 | enc <<= sparc_pmu->lower_shift; | |
896 | } | |
897 | ||
898 | hwc->config |= enc; | |
899 | return 0; | |
900 | } | |
901 | ||
902 | static const struct pmu pmu = { | |
903 | .enable = sparc_pmu_enable, | |
904 | .disable = sparc_pmu_disable, | |
905 | .read = sparc_pmu_read, | |
906 | .unthrottle = sparc_pmu_unthrottle, | |
907 | }; | |
908 | ||
cdd6c482 | 909 | const struct pmu *hw_perf_event_init(struct perf_event *event) |
59abbd1e | 910 | { |
cdd6c482 | 911 | int err = __hw_perf_event_init(event); |
59abbd1e DM |
912 | |
913 | if (err) | |
914 | return ERR_PTR(err); | |
915 | return &pmu; | |
916 | } | |
917 | ||
cdd6c482 | 918 | void perf_event_print_debug(void) |
59abbd1e DM |
919 | { |
920 | unsigned long flags; | |
921 | u64 pcr, pic; | |
922 | int cpu; | |
923 | ||
924 | if (!sparc_pmu) | |
925 | return; | |
926 | ||
927 | local_irq_save(flags); | |
928 | ||
929 | cpu = smp_processor_id(); | |
930 | ||
931 | pcr = pcr_ops->read(); | |
932 | read_pic(pic); | |
933 | ||
934 | pr_info("\n"); | |
935 | pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | |
936 | cpu, pcr, pic); | |
937 | ||
938 | local_irq_restore(flags); | |
939 | } | |
940 | ||
cdd6c482 | 941 | static int __kprobes perf_event_nmi_handler(struct notifier_block *self, |
59abbd1e DM |
942 | unsigned long cmd, void *__args) |
943 | { | |
944 | struct die_args *args = __args; | |
945 | struct perf_sample_data data; | |
cdd6c482 | 946 | struct cpu_hw_events *cpuc; |
59abbd1e DM |
947 | struct pt_regs *regs; |
948 | int idx; | |
949 | ||
cdd6c482 | 950 | if (!atomic_read(&active_events)) |
59abbd1e DM |
951 | return NOTIFY_DONE; |
952 | ||
953 | switch (cmd) { | |
954 | case DIE_NMI: | |
955 | break; | |
956 | ||
957 | default: | |
958 | return NOTIFY_DONE; | |
959 | } | |
960 | ||
961 | regs = args->regs; | |
962 | ||
59abbd1e DM |
963 | data.addr = 0; |
964 | ||
cdd6c482 IM |
965 | cpuc = &__get_cpu_var(cpu_hw_events); |
966 | for (idx = 0; idx < MAX_HWEVENTS; idx++) { | |
967 | struct perf_event *event = cpuc->events[idx]; | |
968 | struct hw_perf_event *hwc; | |
59abbd1e DM |
969 | u64 val; |
970 | ||
971 | if (!test_bit(idx, cpuc->active_mask)) | |
972 | continue; | |
cdd6c482 IM |
973 | hwc = &event->hw; |
974 | val = sparc_perf_event_update(event, hwc, idx); | |
59abbd1e DM |
975 | if (val & (1ULL << 31)) |
976 | continue; | |
977 | ||
cdd6c482 IM |
978 | data.period = event->hw.last_period; |
979 | if (!sparc_perf_event_set_period(event, hwc, idx)) | |
59abbd1e DM |
980 | continue; |
981 | ||
cdd6c482 IM |
982 | if (perf_event_overflow(event, 1, &data, regs)) |
983 | sparc_pmu_disable_event(hwc, idx); | |
59abbd1e DM |
984 | } |
985 | ||
986 | return NOTIFY_STOP; | |
987 | } | |
988 | ||
cdd6c482 IM |
989 | static __read_mostly struct notifier_block perf_event_nmi_notifier = { |
990 | .notifier_call = perf_event_nmi_handler, | |
59abbd1e DM |
991 | }; |
992 | ||
993 | static bool __init supported_pmu(void) | |
994 | { | |
28e8f9be DM |
995 | if (!strcmp(sparc_pmu_type, "ultra3") || |
996 | !strcmp(sparc_pmu_type, "ultra3+") || | |
997 | !strcmp(sparc_pmu_type, "ultra3i") || | |
998 | !strcmp(sparc_pmu_type, "ultra4+")) { | |
999 | sparc_pmu = &ultra3_pmu; | |
59abbd1e DM |
1000 | return true; |
1001 | } | |
7eebda60 DM |
1002 | if (!strcmp(sparc_pmu_type, "niagara")) { |
1003 | sparc_pmu = &niagara1_pmu; | |
1004 | return true; | |
1005 | } | |
b73d8847 DM |
1006 | if (!strcmp(sparc_pmu_type, "niagara2")) { |
1007 | sparc_pmu = &niagara2_pmu; | |
1008 | return true; | |
1009 | } | |
59abbd1e DM |
1010 | return false; |
1011 | } | |
1012 | ||
cdd6c482 | 1013 | void __init init_hw_perf_events(void) |
59abbd1e | 1014 | { |
cdd6c482 | 1015 | pr_info("Performance events: "); |
59abbd1e DM |
1016 | |
1017 | if (!supported_pmu()) { | |
1018 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | |
1019 | return; | |
1020 | } | |
1021 | ||
1022 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | |
1023 | ||
cdd6c482 IM |
1024 | /* All sparc64 PMUs currently have 2 events. But this simple |
1025 | * driver only supports one active event at a time. | |
59abbd1e | 1026 | */ |
cdd6c482 | 1027 | perf_max_events = 1; |
59abbd1e | 1028 | |
cdd6c482 | 1029 | register_die_notifier(&perf_event_nmi_notifier); |
59abbd1e | 1030 | } |