1 /* Performance event support for sparc64.
3 * Copyright (C) 2009 David S. Miller <davem@davemloft.net>
5 * This code is based almost entirely upon the x86 perf event
8 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
9 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
10 * Copyright (C) 2009 Jaswinder Singh Rajput
11 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
12 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
15 #include <linux/perf_event.h>
16 #include <linux/kprobes.h>
17 #include <linux/kernel.h>
18 #include <linux/kdebug.h>
19 #include <linux/mutex.h>
21 #include <asm/cpudata.h>
22 #include <asm/atomic.h>
26 /* Sparc64 chips have two performance counters, 32-bits each, with
27 * overflow interrupts generated on transition from 0xffffffff to 0.
28 * The counters are accessed in one go using a 64-bit register.
30 * Both counters are controlled using a single control register. The
31 * only way to stop all sampling is to clear all of the context (user,
32 * supervisor, hypervisor) sampling enable bits. But these bits apply
33 * to both counters, thus the two counters can't be enabled/disabled
36 * The control register has two event fields, one for each of the two
37 * counters. It's thus nearly impossible to have one counter going
38 * while keeping the other one stopped. Therefore it is possible to
39 * get overflow interrupts for counters not currently "in use" and
40 * that condition must be checked in the overflow interrupt handler.
42 * So we use a hack, in that we program inactive counters with the
43 * "sw_count0" and "sw_count1" events. These count how many times
44 * the instruction "sethi %hi(0xfc000), %g0" is executed. It's an
45 * unusual way to encode a NOP and therefore will not trigger in
49 #define MAX_HWEVENTS 2
50 #define MAX_PERIOD ((1UL << 32) - 1)
52 #define PIC_UPPER_INDEX 0
53 #define PIC_LOWER_INDEX 1
55 struct cpu_hw_events {
56 struct perf_event *events[MAX_HWEVENTS];
57 unsigned long used_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
58 unsigned long active_mask[BITS_TO_LONGS(MAX_HWEVENTS)];
61 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = { .enabled = 1, };
63 struct perf_event_map {
67 #define PIC_UPPER 0x01
68 #define PIC_LOWER 0x02
71 static unsigned long perf_event_encode(const struct perf_event_map *pmap)
73 return ((unsigned long) pmap->encoding << 16) | pmap->pic_mask;
76 static void perf_event_decode(unsigned long val, u16 *enc, u8 *msk)
82 #define C(x) PERF_COUNT_HW_CACHE_##x
84 #define CACHE_OP_UNSUPPORTED 0xfffe
85 #define CACHE_OP_NONSENSE 0xffff
87 typedef struct perf_event_map cache_map_t
88 [PERF_COUNT_HW_CACHE_MAX]
89 [PERF_COUNT_HW_CACHE_OP_MAX]
90 [PERF_COUNT_HW_CACHE_RESULT_MAX];
93 const struct perf_event_map *(*event_map)(int);
94 const cache_map_t *cache_map;
105 static const struct perf_event_map ultra3_perfmon_event_map[] = {
106 [PERF_COUNT_HW_CPU_CYCLES] = { 0x0000, PIC_UPPER | PIC_LOWER },
107 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x0001, PIC_UPPER | PIC_LOWER },
108 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0009, PIC_LOWER },
109 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0009, PIC_UPPER },
112 static const struct perf_event_map *ultra3_event_map(int event_id)
114 return &ultra3_perfmon_event_map[event_id];
117 static const cache_map_t ultra3_cache_map = {
120 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
121 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
124 [C(RESULT_ACCESS)] = { 0x0a, PIC_LOWER },
125 [C(RESULT_MISS)] = { 0x0a, PIC_UPPER },
128 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
129 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
134 [C(RESULT_ACCESS)] = { 0x09, PIC_LOWER, },
135 [C(RESULT_MISS)] = { 0x09, PIC_UPPER, },
138 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
139 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
141 [ C(OP_PREFETCH) ] = {
142 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
143 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
148 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER, },
149 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER, },
152 [C(RESULT_ACCESS)] = { 0x0c, PIC_LOWER },
153 [C(RESULT_MISS)] = { 0x0c, PIC_UPPER },
156 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
157 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
162 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
163 [C(RESULT_MISS)] = { 0x12, PIC_UPPER, },
166 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
167 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
169 [ C(OP_PREFETCH) ] = {
170 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
171 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
176 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
177 [C(RESULT_MISS)] = { 0x11, PIC_UPPER, },
180 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
181 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
183 [ C(OP_PREFETCH) ] = {
184 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
185 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
190 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
191 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
194 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
195 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
197 [ C(OP_PREFETCH) ] = {
198 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
199 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
204 static const struct sparc_pmu ultra3_pmu = {
205 .event_map = ultra3_event_map,
206 .cache_map = &ultra3_cache_map,
207 .max_events = ARRAY_SIZE(ultra3_perfmon_event_map),
215 /* Niagara1 is very limited. The upper PIC is hard-locked to count
216 * only instructions, so it is free running which creates all kinds of
217 * problems. Some hardware designs make one wonder if the creastor
218 * even looked at how this stuff gets used by software.
220 static const struct perf_event_map niagara1_perfmon_event_map[] = {
221 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, PIC_UPPER },
222 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, PIC_UPPER },
223 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0, PIC_NONE },
224 [PERF_COUNT_HW_CACHE_MISSES] = { 0x03, PIC_LOWER },
227 static const struct perf_event_map *niagara1_event_map(int event_id)
229 return &niagara1_perfmon_event_map[event_id];
232 static const cache_map_t niagara1_cache_map = {
235 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
236 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
239 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
240 [C(RESULT_MISS)] = { 0x03, PIC_LOWER, },
243 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
244 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
249 [C(RESULT_ACCESS)] = { 0x00, PIC_UPPER },
250 [C(RESULT_MISS)] = { 0x02, PIC_LOWER, },
253 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
254 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
256 [ C(OP_PREFETCH) ] = {
257 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
258 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
263 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
264 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
267 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
268 [C(RESULT_MISS)] = { 0x07, PIC_LOWER, },
271 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
272 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
277 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
278 [C(RESULT_MISS)] = { 0x05, PIC_LOWER, },
281 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
282 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
284 [ C(OP_PREFETCH) ] = {
285 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
286 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
291 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
292 [C(RESULT_MISS)] = { 0x04, PIC_LOWER, },
295 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
296 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
298 [ C(OP_PREFETCH) ] = {
299 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
300 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
305 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
306 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
309 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
310 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
312 [ C(OP_PREFETCH) ] = {
313 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
314 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
319 static const struct sparc_pmu niagara1_pmu = {
320 .event_map = niagara1_event_map,
321 .cache_map = &niagara1_cache_map,
322 .max_events = ARRAY_SIZE(niagara1_perfmon_event_map),
330 static const struct perf_event_map niagara2_perfmon_event_map[] = {
331 [PERF_COUNT_HW_CPU_CYCLES] = { 0x02ff, PIC_UPPER | PIC_LOWER },
332 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x02ff, PIC_UPPER | PIC_LOWER },
333 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x0208, PIC_UPPER | PIC_LOWER },
334 [PERF_COUNT_HW_CACHE_MISSES] = { 0x0302, PIC_UPPER | PIC_LOWER },
335 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x0201, PIC_UPPER | PIC_LOWER },
336 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x0202, PIC_UPPER | PIC_LOWER },
339 static const struct perf_event_map *niagara2_event_map(int event_id)
341 return &niagara2_perfmon_event_map[event_id];
344 static const cache_map_t niagara2_cache_map = {
347 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
348 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
351 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
352 [C(RESULT_MISS)] = { 0x0302, PIC_UPPER | PIC_LOWER, },
355 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
356 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
361 [C(RESULT_ACCESS)] = { 0x02ff, PIC_UPPER | PIC_LOWER, },
362 [C(RESULT_MISS)] = { 0x0301, PIC_UPPER | PIC_LOWER, },
365 [ C(RESULT_ACCESS) ] = { CACHE_OP_NONSENSE },
366 [ C(RESULT_MISS) ] = { CACHE_OP_NONSENSE },
368 [ C(OP_PREFETCH) ] = {
369 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
370 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
375 [C(RESULT_ACCESS)] = { 0x0208, PIC_UPPER | PIC_LOWER, },
376 [C(RESULT_MISS)] = { 0x0330, PIC_UPPER | PIC_LOWER, },
379 [C(RESULT_ACCESS)] = { 0x0210, PIC_UPPER | PIC_LOWER, },
380 [C(RESULT_MISS)] = { 0x0320, PIC_UPPER | PIC_LOWER, },
383 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
384 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
389 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
390 [C(RESULT_MISS)] = { 0x0b08, PIC_UPPER | PIC_LOWER, },
393 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
394 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
396 [ C(OP_PREFETCH) ] = {
397 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
398 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
403 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
404 [C(RESULT_MISS)] = { 0xb04, PIC_UPPER | PIC_LOWER, },
407 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
408 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
410 [ C(OP_PREFETCH) ] = {
411 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
412 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
417 [C(RESULT_ACCESS)] = { CACHE_OP_UNSUPPORTED },
418 [C(RESULT_MISS)] = { CACHE_OP_UNSUPPORTED },
421 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
422 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
424 [ C(OP_PREFETCH) ] = {
425 [ C(RESULT_ACCESS) ] = { CACHE_OP_UNSUPPORTED },
426 [ C(RESULT_MISS) ] = { CACHE_OP_UNSUPPORTED },
431 static const struct sparc_pmu niagara2_pmu = {
432 .event_map = niagara2_event_map,
433 .cache_map = &niagara2_cache_map,
434 .max_events = ARRAY_SIZE(niagara2_perfmon_event_map),
444 static const struct sparc_pmu *sparc_pmu __read_mostly;
446 static u64 event_encoding(u64 event_id, int idx)
448 if (idx == PIC_UPPER_INDEX)
449 event_id <<= sparc_pmu->upper_shift;
451 event_id <<= sparc_pmu->lower_shift;
455 static u64 mask_for_index(int idx)
457 return event_encoding(sparc_pmu->event_mask, idx);
460 static u64 nop_for_index(int idx)
462 return event_encoding(idx == PIC_UPPER_INDEX ?
463 sparc_pmu->upper_nop :
464 sparc_pmu->lower_nop, idx);
467 static inline void sparc_pmu_enable_event(struct hw_perf_event *hwc,
470 u64 val, mask = mask_for_index(idx);
472 val = pcr_ops->read();
473 pcr_ops->write((val & ~mask) | hwc->config);
476 static inline void sparc_pmu_disable_event(struct hw_perf_event *hwc,
479 u64 mask = mask_for_index(idx);
480 u64 nop = nop_for_index(idx);
481 u64 val = pcr_ops->read();
483 pcr_ops->write((val & ~mask) | nop);
486 void hw_perf_enable(void)
488 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
498 val = pcr_ops->read();
500 for (i = 0; i < MAX_HWEVENTS; i++) {
501 struct perf_event *cp = cpuc->events[i];
502 struct hw_perf_event *hwc;
507 val |= hwc->config_base;
513 void hw_perf_disable(void)
515 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
523 val = pcr_ops->read();
524 val &= ~(PCR_UTRACE | PCR_STRACE |
525 sparc_pmu->hv_bit | sparc_pmu->irq_bit);
529 static u32 read_pmc(int idx)
534 if (idx == PIC_UPPER_INDEX)
537 return val & 0xffffffff;
540 static void write_pmc(int idx, u64 val)
542 u64 shift, mask, pic;
545 if (idx == PIC_UPPER_INDEX)
548 mask = ((u64) 0xffffffff) << shift;
557 static int sparc_perf_event_set_period(struct perf_event *event,
558 struct hw_perf_event *hwc, int idx)
560 s64 left = atomic64_read(&hwc->period_left);
561 s64 period = hwc->sample_period;
564 if (unlikely(left <= -period)) {
566 atomic64_set(&hwc->period_left, left);
567 hwc->last_period = period;
571 if (unlikely(left <= 0)) {
573 atomic64_set(&hwc->period_left, left);
574 hwc->last_period = period;
577 if (left > MAX_PERIOD)
580 atomic64_set(&hwc->prev_count, (u64)-left);
582 write_pmc(idx, (u64)(-left) & 0xffffffff);
584 perf_event_update_userpage(event);
589 static int sparc_pmu_enable(struct perf_event *event)
591 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
592 struct hw_perf_event *hwc = &event->hw;
595 if (test_and_set_bit(idx, cpuc->used_mask))
598 sparc_pmu_disable_event(hwc, idx);
600 cpuc->events[idx] = event;
601 set_bit(idx, cpuc->active_mask);
603 sparc_perf_event_set_period(event, hwc, idx);
604 sparc_pmu_enable_event(hwc, idx);
605 perf_event_update_userpage(event);
609 static u64 sparc_perf_event_update(struct perf_event *event,
610 struct hw_perf_event *hwc, int idx)
613 u64 prev_raw_count, new_raw_count;
617 prev_raw_count = atomic64_read(&hwc->prev_count);
618 new_raw_count = read_pmc(idx);
620 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
621 new_raw_count) != prev_raw_count)
624 delta = (new_raw_count << shift) - (prev_raw_count << shift);
627 atomic64_add(delta, &event->count);
628 atomic64_sub(delta, &hwc->period_left);
630 return new_raw_count;
633 static void sparc_pmu_disable(struct perf_event *event)
635 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
636 struct hw_perf_event *hwc = &event->hw;
639 clear_bit(idx, cpuc->active_mask);
640 sparc_pmu_disable_event(hwc, idx);
644 sparc_perf_event_update(event, hwc, idx);
645 cpuc->events[idx] = NULL;
646 clear_bit(idx, cpuc->used_mask);
648 perf_event_update_userpage(event);
651 static void sparc_pmu_read(struct perf_event *event)
653 struct hw_perf_event *hwc = &event->hw;
654 sparc_perf_event_update(event, hwc, hwc->idx);
657 static void sparc_pmu_unthrottle(struct perf_event *event)
659 struct hw_perf_event *hwc = &event->hw;
660 sparc_pmu_enable_event(hwc, hwc->idx);
663 static atomic_t active_events = ATOMIC_INIT(0);
664 static DEFINE_MUTEX(pmc_grab_mutex);
666 void perf_event_grab_pmc(void)
668 if (atomic_inc_not_zero(&active_events))
671 mutex_lock(&pmc_grab_mutex);
672 if (atomic_read(&active_events) == 0) {
673 if (atomic_read(&nmi_active) > 0) {
674 on_each_cpu(stop_nmi_watchdog, NULL, 1);
675 BUG_ON(atomic_read(&nmi_active) != 0);
677 atomic_inc(&active_events);
679 mutex_unlock(&pmc_grab_mutex);
682 void perf_event_release_pmc(void)
684 if (atomic_dec_and_mutex_lock(&active_events, &pmc_grab_mutex)) {
685 if (atomic_read(&nmi_active) == 0)
686 on_each_cpu(start_nmi_watchdog, NULL, 1);
687 mutex_unlock(&pmc_grab_mutex);
691 static const struct perf_event_map *sparc_map_cache_event(u64 config)
693 unsigned int cache_type, cache_op, cache_result;
694 const struct perf_event_map *pmap;
696 if (!sparc_pmu->cache_map)
697 return ERR_PTR(-ENOENT);
699 cache_type = (config >> 0) & 0xff;
700 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
701 return ERR_PTR(-EINVAL);
703 cache_op = (config >> 8) & 0xff;
704 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
705 return ERR_PTR(-EINVAL);
707 cache_result = (config >> 16) & 0xff;
708 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
709 return ERR_PTR(-EINVAL);
711 pmap = &((*sparc_pmu->cache_map)[cache_type][cache_op][cache_result]);
713 if (pmap->encoding == CACHE_OP_UNSUPPORTED)
714 return ERR_PTR(-ENOENT);
716 if (pmap->encoding == CACHE_OP_NONSENSE)
717 return ERR_PTR(-EINVAL);
722 static void hw_perf_event_destroy(struct perf_event *event)
724 perf_event_release_pmc();
727 /* Make sure all events can be scheduled into the hardware at
728 * the same time. This is simplified by the fact that we only
729 * need to support 2 simultaneous HW events.
731 static int sparc_check_constraints(unsigned long *events, int n_ev)
733 if (n_ev <= perf_max_events) {
740 perf_event_decode(events[0], &dummy, &msk1);
741 perf_event_decode(events[1], &dummy, &msk2);
743 /* If both events can go on any counter, OK. */
744 if (msk1 == (PIC_UPPER | PIC_LOWER) &&
745 msk2 == (PIC_UPPER | PIC_LOWER))
748 /* If one event is limited to a specific counter,
749 * and the other can go on both, OK.
751 if ((msk1 == PIC_UPPER || msk1 == PIC_LOWER) &&
752 msk2 == (PIC_UPPER | PIC_LOWER))
754 if ((msk2 == PIC_UPPER || msk2 == PIC_LOWER) &&
755 msk1 == (PIC_UPPER | PIC_LOWER))
758 /* If the events are fixed to different counters, OK. */
759 if ((msk1 == PIC_UPPER && msk2 == PIC_LOWER) ||
760 (msk1 == PIC_LOWER && msk2 == PIC_UPPER))
763 /* Otherwise, there is a conflict. */
769 static int check_excludes(struct perf_event **evts, int n_prev, int n_new)
771 int eu = 0, ek = 0, eh = 0;
772 struct perf_event *event;
780 for (i = 0; i < n; i++) {
783 eu = event->attr.exclude_user;
784 ek = event->attr.exclude_kernel;
785 eh = event->attr.exclude_hv;
787 } else if (event->attr.exclude_user != eu ||
788 event->attr.exclude_kernel != ek ||
789 event->attr.exclude_hv != eh) {
797 static int collect_events(struct perf_event *group, int max_count,
798 struct perf_event *evts[], unsigned long *events)
800 struct perf_event *event;
803 if (!is_software_event(group)) {
807 events[n++] = group->hw.event_base;
809 list_for_each_entry(event, &group->sibling_list, group_entry) {
810 if (!is_software_event(event) &&
811 event->state != PERF_EVENT_STATE_OFF) {
815 events[n++] = event->hw.event_base;
821 static int __hw_perf_event_init(struct perf_event *event)
823 struct perf_event_attr *attr = &event->attr;
824 struct perf_event *evts[MAX_HWEVENTS];
825 struct hw_perf_event *hwc = &event->hw;
826 unsigned long events[MAX_HWEVENTS];
827 const struct perf_event_map *pmap;
831 if (atomic_read(&nmi_active) < 0)
834 if (attr->type == PERF_TYPE_HARDWARE) {
835 if (attr->config >= sparc_pmu->max_events)
837 pmap = sparc_pmu->event_map(attr->config);
838 } else if (attr->type == PERF_TYPE_HW_CACHE) {
839 pmap = sparc_map_cache_event(attr->config);
841 return PTR_ERR(pmap);
845 /* We save the enable bits in the config_base. So to
846 * turn off sampling just write 'config', and to enable
847 * things write 'config | config_base'.
849 hwc->config_base = sparc_pmu->irq_bit;
850 if (!attr->exclude_user)
851 hwc->config_base |= PCR_UTRACE;
852 if (!attr->exclude_kernel)
853 hwc->config_base |= PCR_STRACE;
854 if (!attr->exclude_hv)
855 hwc->config_base |= sparc_pmu->hv_bit;
857 hwc->event_base = perf_event_encode(pmap);
859 enc = pmap->encoding;
862 if (event->group_leader != event) {
863 n = collect_events(event->group_leader,
869 events[n] = hwc->event_base;
872 if (check_excludes(evts, n, 1))
875 if (sparc_check_constraints(events, n + 1))
878 /* Try to do all error checking before this point, as unwinding
879 * state after grabbing the PMC is difficult.
881 perf_event_grab_pmc();
882 event->destroy = hw_perf_event_destroy;
884 if (!hwc->sample_period) {
885 hwc->sample_period = MAX_PERIOD;
886 hwc->last_period = hwc->sample_period;
887 atomic64_set(&hwc->period_left, hwc->sample_period);
890 if (pmap->pic_mask & PIC_UPPER) {
891 hwc->idx = PIC_UPPER_INDEX;
892 enc <<= sparc_pmu->upper_shift;
894 hwc->idx = PIC_LOWER_INDEX;
895 enc <<= sparc_pmu->lower_shift;
902 static const struct pmu pmu = {
903 .enable = sparc_pmu_enable,
904 .disable = sparc_pmu_disable,
905 .read = sparc_pmu_read,
906 .unthrottle = sparc_pmu_unthrottle,
909 const struct pmu *hw_perf_event_init(struct perf_event *event)
911 int err = __hw_perf_event_init(event);
918 void perf_event_print_debug(void)
927 local_irq_save(flags);
929 cpu = smp_processor_id();
931 pcr = pcr_ops->read();
935 pr_info("CPU#%d: PCR[%016llx] PIC[%016llx]\n",
938 local_irq_restore(flags);
941 static int __kprobes perf_event_nmi_handler(struct notifier_block *self,
942 unsigned long cmd, void *__args)
944 struct die_args *args = __args;
945 struct perf_sample_data data;
946 struct cpu_hw_events *cpuc;
947 struct pt_regs *regs;
950 if (!atomic_read(&active_events))
965 cpuc = &__get_cpu_var(cpu_hw_events);
966 for (idx = 0; idx < MAX_HWEVENTS; idx++) {
967 struct perf_event *event = cpuc->events[idx];
968 struct hw_perf_event *hwc;
971 if (!test_bit(idx, cpuc->active_mask))
974 val = sparc_perf_event_update(event, hwc, idx);
975 if (val & (1ULL << 31))
978 data.period = event->hw.last_period;
979 if (!sparc_perf_event_set_period(event, hwc, idx))
982 if (perf_event_overflow(event, 1, &data, regs))
983 sparc_pmu_disable_event(hwc, idx);
989 static __read_mostly struct notifier_block perf_event_nmi_notifier = {
990 .notifier_call = perf_event_nmi_handler,
993 static bool __init supported_pmu(void)
995 if (!strcmp(sparc_pmu_type, "ultra3") ||
996 !strcmp(sparc_pmu_type, "ultra3+") ||
997 !strcmp(sparc_pmu_type, "ultra3i") ||
998 !strcmp(sparc_pmu_type, "ultra4+")) {
999 sparc_pmu = &ultra3_pmu;
1002 if (!strcmp(sparc_pmu_type, "niagara")) {
1003 sparc_pmu = &niagara1_pmu;
1006 if (!strcmp(sparc_pmu_type, "niagara2")) {
1007 sparc_pmu = &niagara2_pmu;
1013 void __init init_hw_perf_events(void)
1015 pr_info("Performance events: ");
1017 if (!supported_pmu()) {
1018 pr_cont("No support for PMU type '%s'\n", sparc_pmu_type);
1022 pr_cont("Supported PMU type is '%s'\n", sparc_pmu_type);
1024 /* All sparc64 PMUs currently have 2 events. But this simple
1025 * driver only supports one active event at a time.
1027 perf_max_events = 1;
1029 register_die_notifier(&perf_event_nmi_notifier);