1 /* Performance counter support for sparc64.
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf counter
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_counter.h>
16 #include <linux/kprobes.h>
17 #include <linux/kernel.h>
18 #include <linux/kdebug.h>
19 #include <linux/mutex.h>
21 #include <asm/cpudata.h>
22 #include <asm/atomic.h>
26 /* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register.
30 * Both counters are controlled using a single control register. The
31 * only way to stop all sampling is to clear all of the context (user,
32 * supervisor, hypervisor) sampling enable bits. But these bits apply
33 * to both counters, thus the two counters can't be enabled/disabled
36 * The control register has two event fields, one for each of the two
37 * counters. It's thus nearly impossible to have one counter going
38 * while keeping the other one stopped. Therefore it is possible to
39 * get overflow interrupts for counters not currently "in use" and
40 * that condition must be checked in the overflow interrupt handler.
42 * So we use a hack, in that we program inactive counters with the
43 * "sw_count0" and "sw_count1" events. These count how many times
44 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
45 * unusual way to encode a NOP and therefore will not trigger in
49 #define MAX_HWCOUNTERS 2
50 #define MAX_PERIOD ((1UL << 32) - 1)
52 #define PIC_UPPER_INDEX 0
53 #define PIC_LOWER_INDEX 1
55 #define PIC_UPPER_NOP 0x1c
56 #define PIC_LOWER_NOP 0x14
58 struct cpu_hw_counters {
59 struct perf_counter *counters[MAX_HWCOUNTERS];
60 unsigned long used_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
61 unsigned long active_mask[BITS_TO_LONGS(MAX_HWCOUNTERS)];
64 DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = { .enabled = 1, };
66 struct perf_event_map {
70 #define PIC_UPPER 0x01
71 #define PIC_LOWER 0x02
75 const struct perf_event_map *(*event_map)(int);
84 static const struct perf_event_map ultra3i_perfmon_event_map[] = {
85 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
86 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
87 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
88 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
91 static const struct perf_event_map *ultra3i_event_map(int event)
93 return &ultra3i_perfmon_event_map[event];
96 static const struct sparc_pmu ultra3i_pmu = {
97 .event_map = ultra3i_event_map,
98 .max_events = ARRAY_SIZE(ultra3i_perfmon_event_map),
104 static const struct sparc_pmu *sparc_pmu __read_mostly;
106 static u64 event_encoding(u64 event, int idx)
108 if (idx == PIC_UPPER_INDEX)
109 event <<= sparc_pmu->upper_shift;
111 event <<= sparc_pmu->lower_shift;
115 static u64 mask_for_index(int idx)
117 return event_encoding(sparc_pmu->event_mask, idx);
120 static u64 nop_for_index(int idx)
122 return event_encoding(idx == PIC_UPPER_INDEX ?
123 PIC_UPPER_NOP : PIC_LOWER_NOP, idx);
126 static inline void sparc_pmu_enable_counter(struct hw_perf_counter *hwc,
129 u64 val, mask = mask_for_index(idx);
131 val = pcr_ops->read();
132 pcr_ops->write((val & ~mask) | hwc->config);
135 static inline void sparc_pmu_disable_counter(struct hw_perf_counter *hwc,
138 u64 mask = mask_for_index(idx);
139 u64 nop = nop_for_index(idx);
140 u64 val = pcr_ops->read();
142 pcr_ops->write((val & ~mask) | nop);
145 void hw_perf_enable(void)
147 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
157 val = pcr_ops->read();
159 for (i = 0; i < MAX_HWCOUNTERS; i++) {
160 struct perf_counter *cp = cpuc->counters[i];
161 struct hw_perf_counter *hwc;
166 val |= hwc->config_base;
172 void hw_perf_disable(void)
174 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
182 val = pcr_ops->read();
183 val &= ~(PCR_UTRACE | PCR_STRACE |
184 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
188 static u32 read_pmc(int idx)
193 if (idx == PIC_UPPER_INDEX)
196 return val & 0xffffffff;
199 static void write_pmc(int idx, u64 val)
201 u64 shift, mask, pic;
204 if (idx == PIC_UPPER_INDEX)
207 mask = ((u64) 0xffffffff) << shift;
216 static int sparc_perf_counter_set_period(struct perf_counter *counter,
217 struct hw_perf_counter *hwc, int idx)
219 s64 left = atomic64_read(&hwc->period_left);
220 s64 period = hwc->sample_period;
223 if (unlikely(left <= -period)) {
225 atomic64_set(&hwc->period_left, left);
226 hwc->last_period = period;
230 if (unlikely(left <= 0)) {
232 atomic64_set(&hwc->period_left, left);
233 hwc->last_period = period;
236 if (left > MAX_PERIOD)
239 atomic64_set(&hwc->prev_count, (u64)-left);
241 write_pmc(idx, (u64)(-left) & 0xffffffff);
243 perf_counter_update_userpage(counter);
248 static int sparc_pmu_enable(struct perf_counter *counter)
250 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
251 struct hw_perf_counter *hwc = &counter->hw;
254 if (test_and_set_bit(idx, cpuc->used_mask))
257 sparc_pmu_disable_counter(hwc, idx);
259 cpuc->counters[idx] = counter;
260 set_bit(idx, cpuc->active_mask);
262 sparc_perf_counter_set_period(counter, hwc, idx);
263 sparc_pmu_enable_counter(hwc, idx);
264 perf_counter_update_userpage(counter);
268 static u64 sparc_perf_counter_update(struct perf_counter *counter,
269 struct hw_perf_counter *hwc, int idx)
272 u64 prev_raw_count, new_raw_count;
276 prev_raw_count = atomic64_read(&hwc->prev_count);
277 new_raw_count = read_pmc(idx);
279 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
280 new_raw_count) != prev_raw_count)
283 delta = (new_raw_count << shift) - (prev_raw_count << shift);
286 atomic64_add(delta, &counter->count);
287 atomic64_sub(delta, &hwc->period_left);
289 return new_raw_count;
292 static void sparc_pmu_disable(struct perf_counter *counter)
294 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
295 struct hw_perf_counter *hwc = &counter->hw;
298 clear_bit(idx, cpuc->active_mask);
299 sparc_pmu_disable_counter(hwc, idx);
303 sparc_perf_counter_update(counter, hwc, idx);
304 cpuc->counters[idx] = NULL;
305 clear_bit(idx, cpuc->used_mask);
307 perf_counter_update_userpage(counter);
310 static void sparc_pmu_read(struct perf_counter *counter)
312 struct hw_perf_counter *hwc = &counter->hw;
313 sparc_perf_counter_update(counter, hwc, hwc->idx);
316 static void sparc_pmu_unthrottle(struct perf_counter *counter)
318 struct hw_perf_counter *hwc = &counter->hw;
319 sparc_pmu_enable_counter(hwc, hwc->idx);
322 static atomic_t active_counters = ATOMIC_INIT(0);
323 static DEFINE_MUTEX(pmc_grab_mutex);
325 void perf_counter_grab_pmc(void)
327 if (atomic_inc_not_zero(&active_counters))
330 mutex_lock(&pmc_grab_mutex);
331 if (atomic_read(&active_counters) == 0) {
332 if (atomic_read(&nmi_active) > 0) {
333 on_each_cpu(stop_nmi_watchdog, NULL, 1);
334 BUG_ON(atomic_read(&nmi_active) != 0);
336 atomic_inc(&active_counters);
338 mutex_unlock(&pmc_grab_mutex);
341 void perf_counter_release_pmc(void)
343 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_grab_mutex)) {
344 if (atomic_read(&nmi_active) == 0)
345 on_each_cpu(start_nmi_watchdog, NULL, 1);
346 mutex_unlock(&pmc_grab_mutex);
350 static void hw_perf_counter_destroy(struct perf_counter *counter)
352 perf_counter_release_pmc();
355 static int __hw_perf_counter_init(struct perf_counter *counter)
357 struct perf_counter_attr *attr = &counter->attr;
358 struct hw_perf_counter *hwc = &counter->hw;
359 const struct perf_event_map *pmap;
362 if (atomic_read(&nmi_active) < 0)
365 if (attr->type != PERF_TYPE_HARDWARE)
368 if (attr->config >= sparc_pmu->max_events)
371 perf_counter_grab_pmc();
372 counter->destroy = hw_perf_counter_destroy;
374 /* We save the enable bits in the config_base. So to
375 * turn off sampling just write 'config', and to enable
376 * things write 'config | config_base'.
378 hwc->config_base = sparc_pmu->irq_bit;
379 if (!attr->exclude_user)
380 hwc->config_base |= PCR_UTRACE;
381 if (!attr->exclude_kernel)
382 hwc->config_base |= PCR_STRACE;
383 if (!attr->exclude_hv)
384 hwc->config_base |= sparc_pmu->hv_bit;
386 if (!hwc->sample_period) {
387 hwc->sample_period = MAX_PERIOD;
388 hwc->last_period = hwc->sample_period;
389 atomic64_set(&hwc->period_left, hwc->sample_period);
392 pmap = sparc_pmu->event_map(attr->config);
394 enc = pmap->encoding;
395 if (pmap->pic_mask & PIC_UPPER) {
396 hwc->idx = PIC_UPPER_INDEX;
397 enc <<= sparc_pmu->upper_shift;
399 hwc->idx = PIC_LOWER_INDEX;
400 enc <<= sparc_pmu->lower_shift;
407 static const struct pmu pmu = {
408 .enable = sparc_pmu_enable,
409 .disable = sparc_pmu_disable,
410 .read = sparc_pmu_read,
411 .unthrottle = sparc_pmu_unthrottle,
414 const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
416 int err = __hw_perf_counter_init(counter);
423 void perf_counter_print_debug(void)
432 local_irq_save(flags);
434 cpu = smp_processor_id();
436 pcr = pcr_ops->read();
440 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
443 local_irq_restore(flags);
446 static int __kprobes perf_counter_nmi_handler(struct notifier_block *self,
447 unsigned long cmd, void *__args)
449 struct die_args *args = __args;
450 struct perf_sample_data data;
451 struct cpu_hw_counters *cpuc;
452 struct pt_regs *regs;
455 if (!atomic_read(&active_counters))
471 cpuc = &__get_cpu_var(cpu_hw_counters);
472 for (idx = 0; idx < MAX_HWCOUNTERS; idx++) {
473 struct perf_counter *counter = cpuc->counters[idx];
474 struct hw_perf_counter *hwc;
477 if (!test_bit(idx, cpuc->active_mask))
480 val = sparc_perf_counter_update(counter, hwc, idx);
481 if (val & (1ULL << 31))
484 data.period = counter->hw.last_period;
485 if (!sparc_perf_counter_set_period(counter, hwc, idx))
488 if (perf_counter_overflow(counter, 1, &data))
489 sparc_pmu_disable_counter(hwc, idx);
495 static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
496 .notifier_call = perf_counter_nmi_handler,
499 static bool __init supported_pmu(void)
501 if (!strcmp(sparc_pmu_type, "ultra3i")) {
502 sparc_pmu = &ultra3i_pmu;
508 void __init init_hw_perf_counters(void)
510 pr_info("Performance counters: ");
512 if (!supported_pmu()) {
513 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
517 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
519 /* All sparc64 PMUs currently have 2 counters. But this simple
520 * driver only supports one active counter at a time.
522 perf_max_counters = 1;
524 register_die_notifier(&perf_counter_nmi_notifier);