4 * ARM performance counter support.
6 * Copyright (C) 2009 picoChip Designs, Ltd., Jamie Iles
7 * Copyright (C) 2010 ARM Ltd., Will Deacon <will.deacon@arm.com>
9 * This code is based on the sparc64 perf event code, which is in turn based
10 * on the x86 code. Callchain code is based on the ARM OProfile backtrace
13 #define pr_fmt(fmt) "hw perfevents: " fmt
15 #include <linux/interrupt.h>
16 #include <linux/kernel.h>
17 #include <linux/module.h>
18 #include <linux/perf_event.h>
19 #include <linux/platform_device.h>
20 #include <linux/spinlock.h>
21 #include <linux/uaccess.h>
23 #include <asm/cputype.h>
25 #include <asm/irq_regs.h>
27 #include <asm/stacktrace.h>
29 static struct platform_device *pmu_device;
32 * Hardware lock to serialize accesses to PMU registers. Needed for the
33 * read/modify/write sequences.
35 static DEFINE_RAW_SPINLOCK(pmu_lock);
38 * ARMv6 supports a maximum of 3 events, starting from index 0. If we add
39 * another platform that supports more, we need to increase this to be the
40 * largest of all platforms.
42 * ARMv7 supports up to 32 events:
43 * cycle counter CCNT + 31 events counters CNT0..30.
44 * Cortex-A8 has 1+4 counters, Cortex-A9 has 1+6 counters.
46 #define ARMPMU_MAX_HWEVENTS 32
48 /* The events for a given CPU. */
49 struct cpu_hw_events {
51 * The events that are active on the CPU for the given index.
53 struct perf_event *events[ARMPMU_MAX_HWEVENTS];
56 * A 1 bit for an index indicates that the counter is being used for
57 * an event. A 0 means that the counter can be used.
59 unsigned long used_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
62 * A 1 bit for an index indicates that the counter is actively being
65 unsigned long active_mask[BITS_TO_LONGS(ARMPMU_MAX_HWEVENTS)];
67 static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
70 enum arm_perf_pmu_ids id;
71 cpumask_t active_irqs;
73 irqreturn_t (*handle_irq)(int irq_num, void *dev);
74 void (*enable)(struct hw_perf_event *evt, int idx);
75 void (*disable)(struct hw_perf_event *evt, int idx);
76 int (*get_event_idx)(struct cpu_hw_events *cpuc,
77 struct hw_perf_event *hwc);
78 u32 (*read_counter)(int idx);
79 void (*write_counter)(int idx, u32 val);
82 void (*reset)(void *);
83 const unsigned (*cache_map)[PERF_COUNT_HW_CACHE_MAX]
84 [PERF_COUNT_HW_CACHE_OP_MAX]
85 [PERF_COUNT_HW_CACHE_RESULT_MAX];
86 const unsigned (*event_map)[PERF_COUNT_HW_MAX];
92 /* Set at runtime when we know what CPU type we are. */
93 static struct arm_pmu *armpmu;
96 armpmu_get_pmu_id(void)
105 EXPORT_SYMBOL_GPL(armpmu_get_pmu_id);
108 armpmu_get_max_events(void)
113 max_events = armpmu->num_events;
117 EXPORT_SYMBOL_GPL(armpmu_get_max_events);
119 int perf_num_counters(void)
121 return armpmu_get_max_events();
123 EXPORT_SYMBOL_GPL(perf_num_counters);
125 #define HW_OP_UNSUPPORTED 0xFFFF
128 PERF_COUNT_HW_CACHE_##_x
130 #define CACHE_OP_UNSUPPORTED 0xFFFF
133 armpmu_map_cache_event(u64 config)
135 unsigned int cache_type, cache_op, cache_result, ret;
137 cache_type = (config >> 0) & 0xff;
138 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
141 cache_op = (config >> 8) & 0xff;
142 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
145 cache_result = (config >> 16) & 0xff;
146 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
149 ret = (int)(*armpmu->cache_map)[cache_type][cache_op][cache_result];
151 if (ret == CACHE_OP_UNSUPPORTED)
158 armpmu_map_event(u64 config)
160 int mapping = (*armpmu->event_map)[config];
161 return mapping == HW_OP_UNSUPPORTED ? -EOPNOTSUPP : mapping;
165 armpmu_map_raw_event(u64 config)
167 return (int)(config & armpmu->raw_event_mask);
171 armpmu_event_set_period(struct perf_event *event,
172 struct hw_perf_event *hwc,
175 s64 left = local64_read(&hwc->period_left);
176 s64 period = hwc->sample_period;
179 if (unlikely(left <= -period)) {
181 local64_set(&hwc->period_left, left);
182 hwc->last_period = period;
186 if (unlikely(left <= 0)) {
188 local64_set(&hwc->period_left, left);
189 hwc->last_period = period;
193 if (left > (s64)armpmu->max_period)
194 left = armpmu->max_period;
196 local64_set(&hwc->prev_count, (u64)-left);
198 armpmu->write_counter(idx, (u64)(-left) & 0xffffffff);
200 perf_event_update_userpage(event);
206 armpmu_event_update(struct perf_event *event,
207 struct hw_perf_event *hwc,
208 int idx, int overflow)
210 u64 delta, prev_raw_count, new_raw_count;
213 prev_raw_count = local64_read(&hwc->prev_count);
214 new_raw_count = armpmu->read_counter(idx);
216 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
217 new_raw_count) != prev_raw_count)
220 new_raw_count &= armpmu->max_period;
221 prev_raw_count &= armpmu->max_period;
224 delta = armpmu->max_period - prev_raw_count + new_raw_count + 1;
226 delta = new_raw_count - prev_raw_count;
228 local64_add(delta, &event->count);
229 local64_sub(delta, &hwc->period_left);
231 return new_raw_count;
235 armpmu_read(struct perf_event *event)
237 struct hw_perf_event *hwc = &event->hw;
239 /* Don't read disabled counters! */
243 armpmu_event_update(event, hwc, hwc->idx, 0);
247 armpmu_stop(struct perf_event *event, int flags)
249 struct hw_perf_event *hwc = &event->hw;
255 * ARM pmu always has to update the counter, so ignore
256 * PERF_EF_UPDATE, see comments in armpmu_start().
258 if (!(hwc->state & PERF_HES_STOPPED)) {
259 armpmu->disable(hwc, hwc->idx);
260 barrier(); /* why? */
261 armpmu_event_update(event, hwc, hwc->idx, 0);
262 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
267 armpmu_start(struct perf_event *event, int flags)
269 struct hw_perf_event *hwc = &event->hw;
275 * ARM pmu always has to reprogram the period, so ignore
276 * PERF_EF_RELOAD, see the comment below.
278 if (flags & PERF_EF_RELOAD)
279 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
283 * Set the period again. Some counters can't be stopped, so when we
284 * were stopped we simply disabled the IRQ source and the counter
285 * may have been left counting. If we don't do this step then we may
286 * get an interrupt too soon or *way* too late if the overflow has
287 * happened since disabling.
289 armpmu_event_set_period(event, hwc, hwc->idx);
290 armpmu->enable(hwc, hwc->idx);
294 armpmu_del(struct perf_event *event, int flags)
296 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
297 struct hw_perf_event *hwc = &event->hw;
302 clear_bit(idx, cpuc->active_mask);
303 armpmu_stop(event, PERF_EF_UPDATE);
304 cpuc->events[idx] = NULL;
305 clear_bit(idx, cpuc->used_mask);
307 perf_event_update_userpage(event);
311 armpmu_add(struct perf_event *event, int flags)
313 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
314 struct hw_perf_event *hwc = &event->hw;
318 perf_pmu_disable(event->pmu);
320 /* If we don't have a space for the counter then finish early. */
321 idx = armpmu->get_event_idx(cpuc, hwc);
328 * If there is an event in the counter we are going to use then make
329 * sure it is disabled.
332 armpmu->disable(hwc, idx);
333 cpuc->events[idx] = event;
334 set_bit(idx, cpuc->active_mask);
336 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
337 if (flags & PERF_EF_START)
338 armpmu_start(event, PERF_EF_RELOAD);
340 /* Propagate our changes to the userspace mapping. */
341 perf_event_update_userpage(event);
344 perf_pmu_enable(event->pmu);
348 static struct pmu pmu;
351 validate_event(struct cpu_hw_events *cpuc,
352 struct perf_event *event)
354 struct hw_perf_event fake_event = event->hw;
356 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
359 return armpmu->get_event_idx(cpuc, &fake_event) >= 0;
363 validate_group(struct perf_event *event)
365 struct perf_event *sibling, *leader = event->group_leader;
366 struct cpu_hw_events fake_pmu;
368 memset(&fake_pmu, 0, sizeof(fake_pmu));
370 if (!validate_event(&fake_pmu, leader))
373 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
374 if (!validate_event(&fake_pmu, sibling))
378 if (!validate_event(&fake_pmu, event))
384 static irqreturn_t armpmu_platform_irq(int irq, void *dev)
386 struct arm_pmu_platdata *plat = dev_get_platdata(&pmu_device->dev);
388 return plat->handle_irq(irq, dev, armpmu->handle_irq);
392 armpmu_release_hardware(void)
396 irqs = min(pmu_device->num_resources, num_possible_cpus());
398 for (i = 0; i < irqs; ++i) {
399 if (!cpumask_test_and_clear_cpu(i, &armpmu->active_irqs))
401 irq = platform_get_irq(pmu_device, i);
407 release_pmu(ARM_PMU_DEVICE_CPU);
411 armpmu_reserve_hardware(void)
413 struct arm_pmu_platdata *plat;
414 irq_handler_t handle_irq;
415 int i, err, irq, irqs;
417 err = reserve_pmu(ARM_PMU_DEVICE_CPU);
419 pr_warning("unable to reserve pmu\n");
423 plat = dev_get_platdata(&pmu_device->dev);
424 if (plat && plat->handle_irq)
425 handle_irq = armpmu_platform_irq;
427 handle_irq = armpmu->handle_irq;
429 irqs = min(pmu_device->num_resources, num_possible_cpus());
431 pr_err("no irqs for PMUs defined\n");
435 for (i = 0; i < irqs; ++i) {
437 irq = platform_get_irq(pmu_device, i);
442 * If we have a single PMU interrupt that we can't shift,
443 * assume that we're running on a uniprocessor machine and
444 * continue. Otherwise, continue without this interrupt.
446 if (irq_set_affinity(irq, cpumask_of(i)) && irqs > 1) {
447 pr_warning("unable to set irq affinity (irq=%d, cpu=%u)\n",
452 err = request_irq(irq, handle_irq,
453 IRQF_DISABLED | IRQF_NOBALANCING,
456 pr_err("unable to request IRQ%d for ARM PMU counters\n",
458 armpmu_release_hardware();
462 cpumask_set_cpu(i, &armpmu->active_irqs);
468 static atomic_t active_events = ATOMIC_INIT(0);
469 static DEFINE_MUTEX(pmu_reserve_mutex);
472 hw_perf_event_destroy(struct perf_event *event)
474 if (atomic_dec_and_mutex_lock(&active_events, &pmu_reserve_mutex)) {
475 armpmu_release_hardware();
476 mutex_unlock(&pmu_reserve_mutex);
481 __hw_perf_event_init(struct perf_event *event)
483 struct hw_perf_event *hwc = &event->hw;
486 /* Decode the generic type into an ARM event identifier. */
487 if (PERF_TYPE_HARDWARE == event->attr.type) {
488 mapping = armpmu_map_event(event->attr.config);
489 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
490 mapping = armpmu_map_cache_event(event->attr.config);
491 } else if (PERF_TYPE_RAW == event->attr.type) {
492 mapping = armpmu_map_raw_event(event->attr.config);
494 pr_debug("event type %x not supported\n", event->attr.type);
499 pr_debug("event %x:%llx not supported\n", event->attr.type,
505 * Check whether we need to exclude the counter from certain modes.
506 * The ARM performance counters are on all of the time so if someone
507 * has asked us for some excludes then we have to fail.
509 if (event->attr.exclude_kernel || event->attr.exclude_user ||
510 event->attr.exclude_hv || event->attr.exclude_idle) {
511 pr_debug("ARM performance counters do not support "
517 * We don't assign an index until we actually place the event onto
518 * hardware. Use -1 to signify that we haven't decided where to put it
519 * yet. For SMP systems, each core has it's own PMU so we can't do any
520 * clever allocation or constraints checking at this point.
525 * Store the event encoding into the config_base field. config and
526 * event_base are unused as the only 2 things we need to know are
527 * the event mapping and the counter to use. The counter to use is
528 * also the indx and the config_base is the event type.
530 hwc->config_base = (unsigned long)mapping;
534 if (!hwc->sample_period) {
535 hwc->sample_period = armpmu->max_period;
536 hwc->last_period = hwc->sample_period;
537 local64_set(&hwc->period_left, hwc->sample_period);
541 if (event->group_leader != event) {
542 err = validate_group(event);
550 static int armpmu_event_init(struct perf_event *event)
554 switch (event->attr.type) {
556 case PERF_TYPE_HARDWARE:
557 case PERF_TYPE_HW_CACHE:
567 event->destroy = hw_perf_event_destroy;
569 if (!atomic_inc_not_zero(&active_events)) {
570 mutex_lock(&pmu_reserve_mutex);
571 if (atomic_read(&active_events) == 0) {
572 err = armpmu_reserve_hardware();
576 atomic_inc(&active_events);
577 mutex_unlock(&pmu_reserve_mutex);
583 err = __hw_perf_event_init(event);
585 hw_perf_event_destroy(event);
590 static void armpmu_enable(struct pmu *pmu)
592 /* Enable all of the perf events on hardware. */
593 int idx, enabled = 0;
594 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
599 for (idx = 0; idx < armpmu->num_events; ++idx) {
600 struct perf_event *event = cpuc->events[idx];
605 armpmu->enable(&event->hw, idx);
613 static void armpmu_disable(struct pmu *pmu)
619 static struct pmu pmu = {
620 .pmu_enable = armpmu_enable,
621 .pmu_disable = armpmu_disable,
622 .event_init = armpmu_event_init,
625 .start = armpmu_start,
630 /* Include the PMU-specific implementations. */
631 #include "perf_event_xscale.c"
632 #include "perf_event_v6.c"
633 #include "perf_event_v7.c"
636 * Ensure the PMU has sane values out of reset.
637 * This requires SMP to be available, so exists as a separate initcall.
642 if (armpmu && armpmu->reset)
643 return on_each_cpu(armpmu->reset, NULL, 1);
646 arch_initcall(armpmu_reset);
649 * PMU platform driver and devicetree bindings.
651 static struct of_device_id armpmu_of_device_ids[] = {
652 {.compatible = "arm,cortex-a9-pmu"},
653 {.compatible = "arm,cortex-a8-pmu"},
654 {.compatible = "arm,arm1136-pmu"},
655 {.compatible = "arm,arm1176-pmu"},
659 static struct platform_device_id armpmu_plat_device_ids[] = {
664 static int __devinit armpmu_device_probe(struct platform_device *pdev)
670 static struct platform_driver armpmu_driver = {
673 .of_match_table = armpmu_of_device_ids,
675 .probe = armpmu_device_probe,
676 .id_table = armpmu_plat_device_ids,
679 static int __init register_pmu_driver(void)
681 return platform_driver_register(&armpmu_driver);
683 device_initcall(register_pmu_driver);
686 * CPU PMU identification and registration.
689 init_hw_perf_events(void)
691 unsigned long cpuid = read_cpuid_id();
692 unsigned long implementor = (cpuid & 0xFF000000) >> 24;
693 unsigned long part_number = (cpuid & 0xFFF0);
696 if (0x41 == implementor) {
697 switch (part_number) {
698 case 0xB360: /* ARM1136 */
699 case 0xB560: /* ARM1156 */
700 case 0xB760: /* ARM1176 */
701 armpmu = armv6pmu_init();
703 case 0xB020: /* ARM11mpcore */
704 armpmu = armv6mpcore_pmu_init();
706 case 0xC080: /* Cortex-A8 */
707 armpmu = armv7_a8_pmu_init();
709 case 0xC090: /* Cortex-A9 */
710 armpmu = armv7_a9_pmu_init();
712 case 0xC050: /* Cortex-A5 */
713 armpmu = armv7_a5_pmu_init();
715 case 0xC0F0: /* Cortex-A15 */
716 armpmu = armv7_a15_pmu_init();
719 /* Intel CPUs [xscale]. */
720 } else if (0x69 == implementor) {
721 part_number = (cpuid >> 13) & 0x7;
722 switch (part_number) {
724 armpmu = xscale1pmu_init();
727 armpmu = xscale2pmu_init();
733 pr_info("enabled with %s PMU driver, %d counters available\n",
734 armpmu->name, armpmu->num_events);
736 pr_info("no hardware support available\n");
739 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
743 early_initcall(init_hw_perf_events);
746 * Callchain handling code.
750 * The registers we're interested in are at the end of the variable
751 * length saved register structure. The fp points at the end of this
752 * structure so the address of this struct is:
753 * (struct frame_tail *)(xxx->fp)-1
755 * This code has been adapted from the ARM OProfile support.
758 struct frame_tail __user *fp;
761 } __attribute__((packed));
764 * Get the return address for a single stackframe and return a pointer to the
767 static struct frame_tail __user *
768 user_backtrace(struct frame_tail __user *tail,
769 struct perf_callchain_entry *entry)
771 struct frame_tail buftail;
773 /* Also check accessibility of one struct frame_tail beyond */
774 if (!access_ok(VERIFY_READ, tail, sizeof(buftail)))
776 if (__copy_from_user_inatomic(&buftail, tail, sizeof(buftail)))
779 perf_callchain_store(entry, buftail.lr);
782 * Frame pointers should strictly progress back up the stack
783 * (towards higher addresses).
785 if (tail + 1 >= buftail.fp)
788 return buftail.fp - 1;
792 perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
794 struct frame_tail __user *tail;
797 tail = (struct frame_tail __user *)regs->ARM_fp - 1;
799 while ((entry->nr < PERF_MAX_STACK_DEPTH) &&
800 tail && !((unsigned long)tail & 0x3))
801 tail = user_backtrace(tail, entry);
805 * Gets called by walk_stackframe() for every stackframe. This will be called
806 * whist unwinding the stackframe and is like a subroutine return so we use
810 callchain_trace(struct stackframe *fr,
813 struct perf_callchain_entry *entry = data;
814 perf_callchain_store(entry, fr->pc);
819 perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
821 struct stackframe fr;
823 fr.fp = regs->ARM_fp;
824 fr.sp = regs->ARM_sp;
825 fr.lr = regs->ARM_lr;
826 fr.pc = regs->ARM_pc;
827 walk_stackframe(&fr, callchain_trace, entry);