Commit | Line | Data |
---|---|---|
59abbd1e DM |
1 | /* Performance counter support for sparc64. |
2 | * | |
3 | * Copyright (C) 2009 David S. Miller <davem@davemloft.net> | |
4 | * | |
5 | * This code is based almost entirely upon the x86 perf counter | |
6 | * code, which is: | |
7 | * | |
8 | * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de> | |
9 | * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar | |
10 | * Copyright (C) 2009 Jaswinder Singh Rajput | |
11 | * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter | |
12 | * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com> | |
13 | */ | |
14 | ||
15 | #include <linux/perf_counter.h> | |
16 | #include <linux/kprobes.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/kdebug.h> | |
19 | #include <linux/mutex.h> | |
20 | ||
21 | #include <asm/cpudata.h> | |
22 | #include <asm/atomic.h> | |
23 | #include <asm/nmi.h> | |
24 | #include <asm/pcr.h> | |
25 | ||
26 | /* Sparc64 chips have two performance counters, 32-bits each, with | |
27 | * overflow interrupts generated on transition from 0xffffffff to 0. | |
28 | * The counters are accessed in one go using a 64-bit register. | |
29 | * | |
30 | * Both counters are controlled using a single control register. The | |
31 | * only way to stop all sampling is to clear all of the context (user, | |
32 | * supervisor, hypervisor) sampling enable bits. But these bits apply | |
33 | * to both counters, thus the two counters can't be enabled/disabled | |
34 | * individually. | |
35 | * | |
36 | * The control register has two event fields, one for each of the two | |
37 | * counters. It's thus nearly impossible to have one counter going | |
38 | * while keeping the other one stopped. Therefore it is possible to | |
39 | * get overflow interrupts for counters not currently "in use" and | |
40 | * that condition must be checked in the overflow interrupt handler. | |
41 | * | |
42 | * So we use a hack, in that we program inactive counters with the | |
43 | * "sw_count0" and "sw_count1" events. These count how many times | |
44 | * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an | |
45 | * unusual way to encode a NOP and therefore will not trigger in | |
46 | * normal code. | |
47 | */ | |
48 | ||
49 | #define MAX_HWCOUNTERS 2 | |
50 | #define MAX_PERIOD ((1UL << 32) - 1) | |
51 | ||
52 | #define PIC_UPPER_INDEX 0 | |
53 | #define PIC_LOWER_INDEX 1 | |
54 | ||
55 | #define PIC_UPPER_NOP 0x1c | |
56 | #define PIC_LOWER_NOP 0x14 | |
57 | ||
58 | struct cpu_hw_counters { | |
59 | struct perf_counter *counters[MAX_HWCOUNTERS]; | |
60 | unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; | |
61 | unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)]; | |
62 | int enabled; | |
63 | }; | |
64 | DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, }; | |
65 | ||
66 | struct perf_event_map { | |
67 | u16 encoding; | |
68 | u8 pic_mask; | |
69 | #define PIC_NONE 0x00 | |
70 | #define PIC_UPPER 0x01 | |
71 | #define PIC_LOWER 0x02 | |
72 | }; | |
73 | ||
74 | struct sparc_pmu { | |
75 | const struct perf_event_map *(*event_map)(int); | |
76 | int max_events; | |
77 | int upper_shift; | |
78 | int lower_shift; | |
79 | int event_mask; | |
80 | }; | |
81 | ||
82 | static const struct perf_event_map ultra3i_perfmon_event_map[] = { | |
83 | [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER }, | |
84 | [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER }, | |
85 | [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER }, | |
86 | [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER }, | |
87 | }; | |
88 | ||
89 | static const struct perf_event_map *ultra3i_event_map(int event) | |
90 | { | |
91 | return &ultra3i_perfmon_event_map[event]; | |
92 | } | |
93 | ||
94 | static const struct sparc_pmu ultra3i_pmu = { | |
95 | .event_map = ultra3i_event_map, | |
96 | .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map), | |
97 | .upper_shift = 11, | |
98 | .lower_shift = 4, | |
99 | .event_mask = 0x3f, | |
100 | }; | |
101 | ||
102 | static const struct sparc_pmu *sparc_pmu __read_mostly; | |
103 | ||
104 | static u64 event_encoding(u64 event, int idx) | |
105 | { | |
106 | if (idx == PIC_UPPER_INDEX) | |
107 | event <<= sparc_pmu->upper_shift; | |
108 | else | |
109 | event <<= sparc_pmu->lower_shift; | |
110 | return event; | |
111 | } | |
112 | ||
113 | static u64 mask_for_index(int idx) | |
114 | { | |
115 | return event_encoding(sparc_pmu->event_mask, idx); | |
116 | } | |
117 | ||
118 | static u64 nop_for_index(int idx) | |
119 | { | |
120 | return event_encoding(idx == PIC_UPPER_INDEX ? | |
121 | PIC_UPPER_NOP : PIC_LOWER_NOP, idx); | |
122 | } | |
123 | ||
124 | static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc, | |
125 | int idx) | |
126 | { | |
127 | u64 val, mask = mask_for_index(idx); | |
128 | ||
129 | val = pcr_ops->read(); | |
130 | pcr_ops->write((val & ~mask) | hwc->config); | |
131 | } | |
132 | ||
133 | static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc, | |
134 | int idx) | |
135 | { | |
136 | u64 mask = mask_for_index(idx); | |
137 | u64 nop = nop_for_index(idx); | |
138 | u64 val = pcr_ops->read(); | |
139 | ||
140 | pcr_ops->write((val & ~mask) | nop); | |
141 | } | |
142 | ||
143 | void hw_perf_enable(void) | |
144 | { | |
145 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
146 | u64 val; | |
147 | int i; | |
148 | ||
149 | if (cpuc->enabled) | |
150 | return; | |
151 | ||
152 | cpuc->enabled = 1; | |
153 | barrier(); | |
154 | ||
155 | val = pcr_ops->read(); | |
156 | ||
157 | for (i = 0; i < MAX_HWCOUNTERS; i++) { | |
158 | struct perf_counter *cp = cpuc->counters[i]; | |
159 | struct hw_perf_counter *hwc; | |
160 | ||
161 | if (!cp) | |
162 | continue; | |
163 | hwc = &cp->hw; | |
164 | val |= hwc->config_base; | |
165 | } | |
166 | ||
167 | pcr_ops->write(val); | |
168 | } | |
169 | ||
170 | void hw_perf_disable(void) | |
171 | { | |
172 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
173 | u64 val; | |
174 | ||
175 | if (!cpuc->enabled) | |
176 | return; | |
177 | ||
178 | cpuc->enabled = 0; | |
179 | ||
180 | val = pcr_ops->read(); | |
181 | val &= ~(PCR_UTRACE | PCR_STRACE); | |
182 | pcr_ops->write(val); | |
183 | } | |
184 | ||
185 | static u32 read_pmc(int idx) | |
186 | { | |
187 | u64 val; | |
188 | ||
189 | read_pic(val); | |
190 | if (idx == PIC_UPPER_INDEX) | |
191 | val >>= 32; | |
192 | ||
193 | return val & 0xffffffff; | |
194 | } | |
195 | ||
196 | static void write_pmc(int idx, u64 val) | |
197 | { | |
198 | u64 shift, mask, pic; | |
199 | ||
200 | shift = 0; | |
201 | if (idx == PIC_UPPER_INDEX) | |
202 | shift = 32; | |
203 | ||
204 | mask = ((u64) 0xffffffff) << shift; | |
205 | val <<= shift; | |
206 | ||
207 | read_pic(pic); | |
208 | pic &= ~mask; | |
209 | pic |= val; | |
210 | write_pic(pic); | |
211 | } | |
212 | ||
213 | static int sparc_perf_counter_set_period(struct perf_counter *counter, | |
214 | struct hw_perf_counter *hwc, int idx) | |
215 | { | |
216 | s64 left = atomic64_read(&hwc->period_left); | |
217 | s64 period = hwc->sample_period; | |
218 | int ret = 0; | |
219 | ||
220 | if (unlikely(left <= -period)) { | |
221 | left = period; | |
222 | atomic64_set(&hwc->period_left, left); | |
223 | hwc->last_period = period; | |
224 | ret = 1; | |
225 | } | |
226 | ||
227 | if (unlikely(left <= 0)) { | |
228 | left += period; | |
229 | atomic64_set(&hwc->period_left, left); | |
230 | hwc->last_period = period; | |
231 | ret = 1; | |
232 | } | |
233 | if (left > MAX_PERIOD) | |
234 | left = MAX_PERIOD; | |
235 | ||
236 | atomic64_set(&hwc->prev_count, (u64)-left); | |
237 | ||
238 | write_pmc(idx, (u64)(-left) & 0xffffffff); | |
239 | ||
240 | perf_counter_update_userpage(counter); | |
241 | ||
242 | return ret; | |
243 | } | |
244 | ||
245 | static int sparc_pmu_enable(struct perf_counter *counter) | |
246 | { | |
247 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
248 | struct hw_perf_counter *hwc = &counter->hw; | |
249 | int idx = hwc->idx; | |
250 | ||
251 | if (test_and_set_bit(idx, cpuc->used_mask)) | |
252 | return -EAGAIN; | |
253 | ||
254 | sparc_pmu_disable_counter(hwc, idx); | |
255 | ||
256 | cpuc->counters[idx] = counter; | |
257 | set_bit(idx, cpuc->active_mask); | |
258 | ||
259 | sparc_perf_counter_set_period(counter, hwc, idx); | |
260 | sparc_pmu_enable_counter(hwc, idx); | |
261 | perf_counter_update_userpage(counter); | |
262 | return 0; | |
263 | } | |
264 | ||
265 | static u64 sparc_perf_counter_update(struct perf_counter *counter, | |
266 | struct hw_perf_counter *hwc, int idx) | |
267 | { | |
268 | int shift = 64 - 32; | |
269 | u64 prev_raw_count, new_raw_count; | |
270 | s64 delta; | |
271 | ||
272 | again: | |
273 | prev_raw_count = atomic64_read(&hwc->prev_count); | |
274 | new_raw_count = read_pmc(idx); | |
275 | ||
276 | if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count, | |
277 | new_raw_count) != prev_raw_count) | |
278 | goto again; | |
279 | ||
280 | delta = (new_raw_count << shift) - (prev_raw_count << shift); | |
281 | delta >>= shift; | |
282 | ||
283 | atomic64_add(delta, &counter->count); | |
284 | atomic64_sub(delta, &hwc->period_left); | |
285 | ||
286 | return new_raw_count; | |
287 | } | |
288 | ||
289 | static void sparc_pmu_disable(struct perf_counter *counter) | |
290 | { | |
291 | struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters); | |
292 | struct hw_perf_counter *hwc = &counter->hw; | |
293 | int idx = hwc->idx; | |
294 | ||
295 | clear_bit(idx, cpuc->active_mask); | |
296 | sparc_pmu_disable_counter(hwc, idx); | |
297 | ||
298 | barrier(); | |
299 | ||
300 | sparc_perf_counter_update(counter, hwc, idx); | |
301 | cpuc->counters[idx] = NULL; | |
302 | clear_bit(idx, cpuc->used_mask); | |
303 | ||
304 | perf_counter_update_userpage(counter); | |
305 | } | |
306 | ||
307 | static void sparc_pmu_read(struct perf_counter *counter) | |
308 | { | |
309 | struct hw_perf_counter *hwc = &counter->hw; | |
310 | sparc_perf_counter_update(counter, hwc, hwc->idx); | |
311 | } | |
312 | ||
313 | static void sparc_pmu_unthrottle(struct perf_counter *counter) | |
314 | { | |
315 | struct hw_perf_counter *hwc = &counter->hw; | |
316 | sparc_pmu_enable_counter(hwc, hwc->idx); | |
317 | } | |
318 | ||
319 | static atomic_t active_counters = ATOMIC_INIT(0); | |
320 | static DEFINE_MUTEX(pmc_grab_mutex); | |
321 | ||
322 | void perf_counter_grab_pmc(void) | |
323 | { | |
324 | if (atomic_inc_not_zero(&active_counters)) | |
325 | return; | |
326 | ||
327 | mutex_lock(&pmc_grab_mutex); | |
328 | if (atomic_read(&active_counters) == 0) { | |
329 | if (atomic_read(&nmi_active) > 0) { | |
330 | on_each_cpu(stop_nmi_watchdog, NULL, 1); | |
331 | BUG_ON(atomic_read(&nmi_active) != 0); | |
332 | } | |
333 | atomic_inc(&active_counters); | |
334 | } | |
335 | mutex_unlock(&pmc_grab_mutex); | |
336 | } | |
337 | ||
338 | void perf_counter_release_pmc(void) | |
339 | { | |
340 | if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) { | |
341 | if (atomic_read(&nmi_active) == 0) | |
342 | on_each_cpu(start_nmi_watchdog, NULL, 1); | |
343 | mutex_unlock(&pmc_grab_mutex); | |
344 | } | |
345 | } | |
346 | ||
347 | static void hw_perf_counter_destroy(struct perf_counter *counter) | |
348 | { | |
349 | perf_counter_release_pmc(); | |
350 | } | |
351 | ||
352 | static int __hw_perf_counter_init(struct perf_counter *counter) | |
353 | { | |
354 | struct perf_counter_attr *attr = &counter->attr; | |
355 | struct hw_perf_counter *hwc = &counter->hw; | |
356 | const struct perf_event_map *pmap; | |
357 | u64 enc; | |
358 | ||
359 | if (atomic_read(&nmi_active) < 0) | |
360 | return -ENODEV; | |
361 | ||
362 | if (attr->type != PERF_TYPE_HARDWARE) | |
363 | return -EOPNOTSUPP; | |
364 | ||
365 | if (attr->config >= sparc_pmu->max_events) | |
366 | return -EINVAL; | |
367 | ||
368 | perf_counter_grab_pmc(); | |
369 | counter->destroy = hw_perf_counter_destroy; | |
370 | ||
371 | /* We save the enable bits in the config_base. So to | |
372 | * turn off sampling just write 'config', and to enable | |
373 | * things write 'config | config_base'. | |
374 | */ | |
375 | hwc->config_base = 0; | |
376 | if (!attr->exclude_user) | |
377 | hwc->config_base |= PCR_UTRACE; | |
378 | if (!attr->exclude_kernel) | |
379 | hwc->config_base |= PCR_STRACE; | |
380 | ||
381 | if (!hwc->sample_period) { | |
382 | hwc->sample_period = MAX_PERIOD; | |
383 | hwc->last_period = hwc->sample_period; | |
384 | atomic64_set(&hwc->period_left, hwc->sample_period); | |
385 | } | |
386 | ||
387 | pmap = sparc_pmu->event_map(attr->config); | |
388 | ||
389 | enc = pmap->encoding; | |
390 | if (pmap->pic_mask & PIC_UPPER) { | |
391 | hwc->idx = PIC_UPPER_INDEX; | |
392 | enc <<= sparc_pmu->upper_shift; | |
393 | } else { | |
394 | hwc->idx = PIC_LOWER_INDEX; | |
395 | enc <<= sparc_pmu->lower_shift; | |
396 | } | |
397 | ||
398 | hwc->config |= enc; | |
399 | return 0; | |
400 | } | |
401 | ||
402 | static const struct pmu pmu = { | |
403 | .enable = sparc_pmu_enable, | |
404 | .disable = sparc_pmu_disable, | |
405 | .read = sparc_pmu_read, | |
406 | .unthrottle = sparc_pmu_unthrottle, | |
407 | }; | |
408 | ||
409 | const struct pmu *hw_perf_counter_init(struct perf_counter *counter) | |
410 | { | |
411 | int err = __hw_perf_counter_init(counter); | |
412 | ||
413 | if (err) | |
414 | return ERR_PTR(err); | |
415 | return &pmu; | |
416 | } | |
417 | ||
418 | void perf_counter_print_debug(void) | |
419 | { | |
420 | unsigned long flags; | |
421 | u64 pcr, pic; | |
422 | int cpu; | |
423 | ||
424 | if (!sparc_pmu) | |
425 | return; | |
426 | ||
427 | local_irq_save(flags); | |
428 | ||
429 | cpu = smp_processor_id(); | |
430 | ||
431 | pcr = pcr_ops->read(); | |
432 | read_pic(pic); | |
433 | ||
434 | pr_info("\n"); | |
435 | pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n", | |
436 | cpu, pcr, pic); | |
437 | ||
438 | local_irq_restore(flags); | |
439 | } | |
440 | ||
441 | static int __kprobes perf_counter_nmi_handler(struct notifier_block *self, | |
442 | unsigned long cmd, void *__args) | |
443 | { | |
444 | struct die_args *args = __args; | |
445 | struct perf_sample_data data; | |
446 | struct cpu_hw_counters *cpuc; | |
447 | struct pt_regs *regs; | |
448 | int idx; | |
449 | ||
450 | if (!atomic_read(&active_counters)) | |
451 | return NOTIFY_DONE; | |
452 | ||
453 | switch (cmd) { | |
454 | case DIE_NMI: | |
455 | break; | |
456 | ||
457 | default: | |
458 | return NOTIFY_DONE; | |
459 | } | |
460 | ||
461 | regs = args->regs; | |
462 | ||
463 | data.regs = regs; | |
464 | data.addr = 0; | |
465 | ||
466 | cpuc = &__get_cpu_var(cpu_hw_counters); | |
467 | for (idx = 0; idx < MAX_HWCOUNTERS; idx++) { | |
468 | struct perf_counter *counter = cpuc->counters[idx]; | |
469 | struct hw_perf_counter *hwc; | |
470 | u64 val; | |
471 | ||
472 | if (!test_bit(idx, cpuc->active_mask)) | |
473 | continue; | |
474 | hwc = &counter->hw; | |
475 | val = sparc_perf_counter_update(counter, hwc, idx); | |
476 | if (val & (1ULL << 31)) | |
477 | continue; | |
478 | ||
479 | data.period = counter->hw.last_period; | |
480 | if (!sparc_perf_counter_set_period(counter, hwc, idx)) | |
481 | continue; | |
482 | ||
483 | if (perf_counter_overflow(counter, 1, &data)) | |
484 | sparc_pmu_disable_counter(hwc, idx); | |
485 | } | |
486 | ||
487 | return NOTIFY_STOP; | |
488 | } | |
489 | ||
490 | static __read_mostly struct notifier_block perf_counter_nmi_notifier = { | |
491 | .notifier_call = perf_counter_nmi_handler, | |
492 | }; | |
493 | ||
494 | static bool __init supported_pmu(void) | |
495 | { | |
496 | if (!strcmp(sparc_pmu_type, "ultra3i")) { | |
497 | sparc_pmu = &ultra3i_pmu; | |
498 | return true; | |
499 | } | |
500 | return false; | |
501 | } | |
502 | ||
503 | void __init init_hw_perf_counters(void) | |
504 | { | |
505 | pr_info("Performance counters: "); | |
506 | ||
507 | if (!supported_pmu()) { | |
508 | pr_cont("No support for PMU type '%s'\n", sparc_pmu_type); | |
509 | return; | |
510 | } | |
511 | ||
512 | pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type); | |
513 | ||
514 | /* All sparc64 PMUs currently have 2 counters. But this simple | |
515 | * driver only supports one active counter at a time. | |
516 | */ | |
517 | perf_max_counters = 1; | |
518 | ||
519 | register_die_notifier(&perf_counter_nmi_notifier); | |
520 | } |