2 * Performance event support - powerpc architecture code
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
11 #include <linux/kernel.h>
12 #include <linux/sched.h>
13 #include <linux/perf_event.h>
14 #include <linux/percpu.h>
15 #include <linux/hardirq.h>
18 #include <asm/machdep.h>
19 #include <asm/firmware.h>
20 #include <asm/ptrace.h>
22 struct cpu_hw_events {
29 struct perf_event *event[MAX_HWEVENTS];
30 u64 events[MAX_HWEVENTS];
31 unsigned int flags[MAX_HWEVENTS];
32 unsigned long mmcr[3];
33 struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
34 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
35 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
36 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
37 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
39 DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
41 struct power_pmu *ppmu;
44 * Normally, to ignore kernel events we set the FCS (freeze counters
45 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
46 * hypervisor bit set in the MSR, or if we are running on a processor
47 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
48 * then we need to use the FCHV bit to ignore kernel events.
50 static unsigned int freeze_events_kernel = MMCR0_FCS;
53 * 32-bit doesn't have MMCRA but does have an MMCR2,
54 * and a few other names are different.
59 #define MMCR0_PMCjCE MMCR0_PMCnCE
61 #define SPRN_MMCRA SPRN_MMCR2
62 #define MMCRA_SAMPLE_ENABLE 0
64 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
68 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
69 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
73 static inline void perf_read_regs(struct pt_regs *regs) { }
74 static inline int perf_intr_is_nmi(struct pt_regs *regs)
79 #endif /* CONFIG_PPC32 */
82 * Things that are specific to 64-bit implementations.
86 static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
88 unsigned long mmcra = regs->dsisr;
90 if ((mmcra & MMCRA_SAMPLE_ENABLE) && !(ppmu->flags & PPMU_ALT_SIPR)) {
91 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
93 return 4 * (slot - 1);
99 * The user wants a data address recorded.
100 * If we're not doing instruction sampling, give them the SDAR
101 * (sampled data address). If we are doing instruction sampling, then
102 * only give them the SDAR if it corresponds to the instruction
103 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC
106 static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
108 unsigned long mmcra = regs->dsisr;
109 unsigned long sdsync = (ppmu->flags & PPMU_ALT_SIPR) ?
110 POWER6_MMCRA_SDSYNC : MMCRA_SDSYNC;
112 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
113 *addrp = mfspr(SPRN_SDAR);
116 static inline u32 perf_get_misc_flags(struct pt_regs *regs)
118 unsigned long mmcra = regs->dsisr;
119 unsigned long sihv = MMCRA_SIHV;
120 unsigned long sipr = MMCRA_SIPR;
122 if (TRAP(regs) != 0xf00)
123 return 0; /* not a PMU interrupt */
125 if (ppmu->flags & PPMU_ALT_SIPR) {
126 sihv = POWER6_MMCRA_SIHV;
127 sipr = POWER6_MMCRA_SIPR;
130 /* PR has priority over HV, so order below is important */
132 return PERF_RECORD_MISC_USER;
133 if ((mmcra & sihv) && (freeze_events_kernel != MMCR0_FCHV))
134 return PERF_RECORD_MISC_HYPERVISOR;
135 return PERF_RECORD_MISC_KERNEL;
139 * Overload regs->dsisr to store MMCRA so we only need to read it once
142 static inline void perf_read_regs(struct pt_regs *regs)
144 regs->dsisr = mfspr(SPRN_MMCRA);
148 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
151 static inline int perf_intr_is_nmi(struct pt_regs *regs)
156 #endif /* CONFIG_PPC64 */
158 static void perf_event_interrupt(struct pt_regs *regs);
160 void perf_event_print_debug(void)
165 * Read one performance monitor counter (PMC).
167 static unsigned long read_pmc(int idx)
173 val = mfspr(SPRN_PMC1);
176 val = mfspr(SPRN_PMC2);
179 val = mfspr(SPRN_PMC3);
182 val = mfspr(SPRN_PMC4);
185 val = mfspr(SPRN_PMC5);
188 val = mfspr(SPRN_PMC6);
192 val = mfspr(SPRN_PMC7);
195 val = mfspr(SPRN_PMC8);
197 #endif /* CONFIG_PPC64 */
199 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
208 static void write_pmc(int idx, unsigned long val)
212 mtspr(SPRN_PMC1, val);
215 mtspr(SPRN_PMC2, val);
218 mtspr(SPRN_PMC3, val);
221 mtspr(SPRN_PMC4, val);
224 mtspr(SPRN_PMC5, val);
227 mtspr(SPRN_PMC6, val);
231 mtspr(SPRN_PMC7, val);
234 mtspr(SPRN_PMC8, val);
236 #endif /* CONFIG_PPC64 */
238 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
243 * Check if a set of events can all go on the PMU at once.
244 * If they can't, this will look at alternative codes for the events
245 * and see if any combination of alternative codes is feasible.
246 * The feasible set is returned in event_id[].
248 static int power_check_constraints(struct cpu_hw_events *cpuhw,
249 u64 event_id[], unsigned int cflags[],
252 unsigned long mask, value, nv;
253 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
254 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
256 unsigned long addf = ppmu->add_fields;
257 unsigned long tadd = ppmu->test_adder;
259 if (n_ev > ppmu->n_counter)
262 /* First see if the events will go on as-is */
263 for (i = 0; i < n_ev; ++i) {
264 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
265 && !ppmu->limited_pmc_event(event_id[i])) {
266 ppmu->get_alternatives(event_id[i], cflags[i],
267 cpuhw->alternatives[i]);
268 event_id[i] = cpuhw->alternatives[i][0];
270 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
271 &cpuhw->avalues[i][0]))
275 for (i = 0; i < n_ev; ++i) {
276 nv = (value | cpuhw->avalues[i][0]) +
277 (value & cpuhw->avalues[i][0] & addf);
278 if ((((nv + tadd) ^ value) & mask) != 0 ||
279 (((nv + tadd) ^ cpuhw->avalues[i][0]) &
280 cpuhw->amasks[i][0]) != 0)
283 mask |= cpuhw->amasks[i][0];
286 return 0; /* all OK */
288 /* doesn't work, gather alternatives... */
289 if (!ppmu->get_alternatives)
291 for (i = 0; i < n_ev; ++i) {
293 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
294 cpuhw->alternatives[i]);
295 for (j = 1; j < n_alt[i]; ++j)
296 ppmu->get_constraint(cpuhw->alternatives[i][j],
297 &cpuhw->amasks[i][j],
298 &cpuhw->avalues[i][j]);
301 /* enumerate all possibilities and see if any will work */
304 value = mask = nv = 0;
307 /* we're backtracking, restore context */
313 * See if any alternative k for event_id i,
314 * where k > j, will satisfy the constraints.
316 while (++j < n_alt[i]) {
317 nv = (value | cpuhw->avalues[i][j]) +
318 (value & cpuhw->avalues[i][j] & addf);
319 if ((((nv + tadd) ^ value) & mask) == 0 &&
320 (((nv + tadd) ^ cpuhw->avalues[i][j])
321 & cpuhw->amasks[i][j]) == 0)
326 * No feasible alternative, backtrack
327 * to event_id i-1 and continue enumerating its
328 * alternatives from where we got up to.
334 * Found a feasible alternative for event_id i,
335 * remember where we got up to with this event_id,
336 * go on to the next event_id, and start with
337 * the first alternative for it.
343 mask |= cpuhw->amasks[i][j];
349 /* OK, we have a feasible combination, tell the caller the solution */
350 for (i = 0; i < n_ev; ++i)
351 event_id[i] = cpuhw->alternatives[i][choice[i]];
356 * Check if newly-added events have consistent settings for
357 * exclude_{user,kernel,hv} with each other and any previously
360 static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
361 int n_prev, int n_new)
363 int eu = 0, ek = 0, eh = 0;
365 struct perf_event *event;
372 for (i = 0; i < n; ++i) {
373 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
374 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
379 eu = event->attr.exclude_user;
380 ek = event->attr.exclude_kernel;
381 eh = event->attr.exclude_hv;
383 } else if (event->attr.exclude_user != eu ||
384 event->attr.exclude_kernel != ek ||
385 event->attr.exclude_hv != eh) {
391 for (i = 0; i < n; ++i)
392 if (cflags[i] & PPMU_LIMITED_PMC_OK)
393 cflags[i] |= PPMU_LIMITED_PMC_REQD;
398 static void power_pmu_read(struct perf_event *event)
400 s64 val, delta, prev;
405 * Performance monitor interrupts come even when interrupts
406 * are soft-disabled, as long as interrupts are hard-enabled.
407 * Therefore we treat them like NMIs.
410 prev = atomic64_read(&event->hw.prev_count);
412 val = read_pmc(event->hw.idx);
413 } while (atomic64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
415 /* The counters are only 32 bits wide */
416 delta = (val - prev) & 0xfffffffful;
417 atomic64_add(delta, &event->count);
418 atomic64_sub(delta, &event->hw.period_left);
422 * On some machines, PMC5 and PMC6 can't be written, don't respect
423 * the freeze conditions, and don't generate interrupts. This tells
424 * us if `event' is using such a PMC.
426 static int is_limited_pmc(int pmcnum)
428 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
429 && (pmcnum == 5 || pmcnum == 6);
432 static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
433 unsigned long pmc5, unsigned long pmc6)
435 struct perf_event *event;
436 u64 val, prev, delta;
439 for (i = 0; i < cpuhw->n_limited; ++i) {
440 event = cpuhw->limited_counter[i];
443 val = (event->hw.idx == 5) ? pmc5 : pmc6;
444 prev = atomic64_read(&event->hw.prev_count);
446 delta = (val - prev) & 0xfffffffful;
447 atomic64_add(delta, &event->count);
451 static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
452 unsigned long pmc5, unsigned long pmc6)
454 struct perf_event *event;
458 for (i = 0; i < cpuhw->n_limited; ++i) {
459 event = cpuhw->limited_counter[i];
460 event->hw.idx = cpuhw->limited_hwidx[i];
461 val = (event->hw.idx == 5) ? pmc5 : pmc6;
462 atomic64_set(&event->hw.prev_count, val);
463 perf_event_update_userpage(event);
468 * Since limited events don't respect the freeze conditions, we
469 * have to read them immediately after freezing or unfreezing the
470 * other events. We try to keep the values from the limited
471 * events as consistent as possible by keeping the delay (in
472 * cycles and instructions) between freezing/unfreezing and reading
473 * the limited events as small and consistent as possible.
474 * Therefore, if any limited events are in use, we read them
475 * both, and always in the same order, to minimize variability,
476 * and do it inside the same asm that writes MMCR0.
478 static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
480 unsigned long pmc5, pmc6;
482 if (!cpuhw->n_limited) {
483 mtspr(SPRN_MMCR0, mmcr0);
488 * Write MMCR0, then read PMC5 and PMC6 immediately.
489 * To ensure we don't get a performance monitor interrupt
490 * between writing MMCR0 and freezing/thawing the limited
491 * events, we first write MMCR0 with the event overflow
492 * interrupt enable bits turned off.
494 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
495 : "=&r" (pmc5), "=&r" (pmc6)
496 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
498 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
500 if (mmcr0 & MMCR0_FC)
501 freeze_limited_counters(cpuhw, pmc5, pmc6);
503 thaw_limited_counters(cpuhw, pmc5, pmc6);
506 * Write the full MMCR0 including the event overflow interrupt
507 * enable bits, if necessary.
509 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
510 mtspr(SPRN_MMCR0, mmcr0);
514 * Disable all events to prevent PMU interrupts and to allow
515 * events to be added or removed.
517 void hw_perf_disable(void)
519 struct cpu_hw_events *cpuhw;
524 local_irq_save(flags);
525 cpuhw = &__get_cpu_var(cpu_hw_events);
527 if (!cpuhw->disabled) {
532 * Check if we ever enabled the PMU on this cpu.
534 if (!cpuhw->pmcs_enabled) {
536 cpuhw->pmcs_enabled = 1;
540 * Disable instruction sampling if it was enabled
542 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
544 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
549 * Set the 'freeze counters' bit.
550 * The barrier is to make sure the mtspr has been
551 * executed and the PMU has frozen the events
554 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
557 local_irq_restore(flags);
561 * Re-enable all events if disable == 0.
562 * If we were previously disabled and events were added, then
563 * put the new config on the PMU.
565 void hw_perf_enable(void)
567 struct perf_event *event;
568 struct cpu_hw_events *cpuhw;
573 unsigned int hwc_index[MAX_HWEVENTS];
579 local_irq_save(flags);
580 cpuhw = &__get_cpu_var(cpu_hw_events);
581 if (!cpuhw->disabled) {
582 local_irq_restore(flags);
588 * If we didn't change anything, or only removed events,
589 * no need to recalculate MMCR* settings and reset the PMCs.
590 * Just reenable the PMU with the current MMCR* settings
591 * (possibly updated for removal of events).
593 if (!cpuhw->n_added) {
594 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
595 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
596 if (cpuhw->n_events == 0)
597 ppc_set_pmu_inuse(0);
602 * Compute MMCR* values for the new set of events
604 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
606 /* shouldn't ever get here */
607 printk(KERN_ERR "oops compute_mmcr failed\n");
612 * Add in MMCR0 freeze bits corresponding to the
613 * attr.exclude_* bits for the first event.
614 * We have already checked that all events have the
615 * same values for these bits as the first event.
617 event = cpuhw->event[0];
618 if (event->attr.exclude_user)
619 cpuhw->mmcr[0] |= MMCR0_FCP;
620 if (event->attr.exclude_kernel)
621 cpuhw->mmcr[0] |= freeze_events_kernel;
622 if (event->attr.exclude_hv)
623 cpuhw->mmcr[0] |= MMCR0_FCHV;
626 * Write the new configuration to MMCR* with the freeze
627 * bit set and set the hardware events to their initial values.
628 * Then unfreeze the events.
630 ppc_set_pmu_inuse(1);
631 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
632 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
633 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
637 * Read off any pre-existing events that need to move
640 for (i = 0; i < cpuhw->n_events; ++i) {
641 event = cpuhw->event[i];
642 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
643 power_pmu_read(event);
644 write_pmc(event->hw.idx, 0);
650 * Initialize the PMCs for all the new and moved events.
652 cpuhw->n_limited = n_lim = 0;
653 for (i = 0; i < cpuhw->n_events; ++i) {
654 event = cpuhw->event[i];
657 idx = hwc_index[i] + 1;
658 if (is_limited_pmc(idx)) {
659 cpuhw->limited_counter[n_lim] = event;
660 cpuhw->limited_hwidx[n_lim] = idx;
665 if (event->hw.sample_period) {
666 left = atomic64_read(&event->hw.period_left);
667 if (left < 0x80000000L)
668 val = 0x80000000L - left;
670 atomic64_set(&event->hw.prev_count, val);
673 perf_event_update_userpage(event);
675 cpuhw->n_limited = n_lim;
676 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
680 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
683 * Enable instruction sampling if necessary
685 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
687 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
691 local_irq_restore(flags);
694 static int collect_events(struct perf_event *group, int max_count,
695 struct perf_event *ctrs[], u64 *events,
699 struct perf_event *event;
701 if (!is_software_event(group)) {
705 flags[n] = group->hw.event_base;
706 events[n++] = group->hw.config;
708 list_for_each_entry(event, &group->sibling_list, group_entry) {
709 if (!is_software_event(event) &&
710 event->state != PERF_EVENT_STATE_OFF) {
714 flags[n] = event->hw.event_base;
715 events[n++] = event->hw.config;
721 static void event_sched_in(struct perf_event *event)
723 event->state = PERF_EVENT_STATE_ACTIVE;
724 event->oncpu = smp_processor_id();
725 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
726 if (is_software_event(event))
727 event->pmu->enable(event);
731 * Called to enable a whole group of events.
732 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
733 * Assumes the caller has disabled interrupts and has
734 * frozen the PMU with hw_perf_save_disable.
736 int hw_perf_group_sched_in(struct perf_event *group_leader,
737 struct perf_cpu_context *cpuctx,
738 struct perf_event_context *ctx)
740 struct cpu_hw_events *cpuhw;
742 struct perf_event *sub;
746 cpuhw = &__get_cpu_var(cpu_hw_events);
747 n0 = cpuhw->n_events;
748 n = collect_events(group_leader, ppmu->n_counter - n0,
749 &cpuhw->event[n0], &cpuhw->events[n0],
753 if (check_excludes(cpuhw->event, cpuhw->flags, n0, n))
755 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n + n0);
758 cpuhw->n_events = n0 + n;
762 * OK, this group can go on; update event states etc.,
763 * and enable any software events
765 for (i = n0; i < n0 + n; ++i)
766 cpuhw->event[i]->hw.config = cpuhw->events[i];
767 cpuctx->active_oncpu += n;
769 event_sched_in(group_leader);
770 list_for_each_entry(sub, &group_leader->sibling_list, group_entry) {
771 if (sub->state != PERF_EVENT_STATE_OFF) {
782 * Add a event to the PMU.
783 * If all events are not already frozen, then we disable and
784 * re-enable the PMU in order to get hw_perf_enable to do the
785 * actual work of reconfiguring the PMU.
787 static int power_pmu_enable(struct perf_event *event)
789 struct cpu_hw_events *cpuhw;
794 local_irq_save(flags);
798 * Add the event to the list (if there is room)
799 * and check whether the total set is still feasible.
801 cpuhw = &__get_cpu_var(cpu_hw_events);
802 n0 = cpuhw->n_events;
803 if (n0 >= ppmu->n_counter)
805 cpuhw->event[n0] = event;
806 cpuhw->events[n0] = event->hw.config;
807 cpuhw->flags[n0] = event->hw.event_base;
808 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
810 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
813 event->hw.config = cpuhw->events[n0];
820 local_irq_restore(flags);
825 * Remove a event from the PMU.
827 static void power_pmu_disable(struct perf_event *event)
829 struct cpu_hw_events *cpuhw;
833 local_irq_save(flags);
836 power_pmu_read(event);
838 cpuhw = &__get_cpu_var(cpu_hw_events);
839 for (i = 0; i < cpuhw->n_events; ++i) {
840 if (event == cpuhw->event[i]) {
841 while (++i < cpuhw->n_events)
842 cpuhw->event[i-1] = cpuhw->event[i];
844 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
846 write_pmc(event->hw.idx, 0);
849 perf_event_update_userpage(event);
853 for (i = 0; i < cpuhw->n_limited; ++i)
854 if (event == cpuhw->limited_counter[i])
856 if (i < cpuhw->n_limited) {
857 while (++i < cpuhw->n_limited) {
858 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
859 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
863 if (cpuhw->n_events == 0) {
864 /* disable exceptions if no events are running */
865 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
869 local_irq_restore(flags);
873 * Re-enable interrupts on a event after they were throttled
874 * because they were coming too fast.
876 static void power_pmu_unthrottle(struct perf_event *event)
881 if (!event->hw.idx || !event->hw.sample_period)
883 local_irq_save(flags);
885 power_pmu_read(event);
886 left = event->hw.sample_period;
887 event->hw.last_period = left;
889 if (left < 0x80000000L)
890 val = 0x80000000L - left;
891 write_pmc(event->hw.idx, val);
892 atomic64_set(&event->hw.prev_count, val);
893 atomic64_set(&event->hw.period_left, left);
894 perf_event_update_userpage(event);
896 local_irq_restore(flags);
899 struct pmu power_pmu = {
900 .enable = power_pmu_enable,
901 .disable = power_pmu_disable,
902 .read = power_pmu_read,
903 .unthrottle = power_pmu_unthrottle,
907 * Return 1 if we might be able to put event on a limited PMC,
909 * A event can only go on a limited PMC if it counts something
910 * that a limited PMC can count, doesn't require interrupts, and
911 * doesn't exclude any processor mode.
913 static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
917 u64 alt[MAX_EVENT_ALTERNATIVES];
919 if (event->attr.exclude_user
920 || event->attr.exclude_kernel
921 || event->attr.exclude_hv
922 || event->attr.sample_period)
925 if (ppmu->limited_pmc_event(ev))
929 * The requested event_id isn't on a limited PMC already;
930 * see if any alternative code goes on a limited PMC.
932 if (!ppmu->get_alternatives)
935 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
936 n = ppmu->get_alternatives(ev, flags, alt);
942 * Find an alternative event_id that goes on a normal PMC, if possible,
943 * and return the event_id code, or 0 if there is no such alternative.
944 * (Note: event_id code 0 is "don't count" on all machines.)
946 static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
948 u64 alt[MAX_EVENT_ALTERNATIVES];
951 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
952 n = ppmu->get_alternatives(ev, flags, alt);
958 /* Number of perf_events counting hardware events */
959 static atomic_t num_events;
960 /* Used to avoid races in calling reserve/release_pmc_hardware */
961 static DEFINE_MUTEX(pmc_reserve_mutex);
964 * Release the PMU if this is the last perf_event.
966 static void hw_perf_event_destroy(struct perf_event *event)
968 if (!atomic_add_unless(&num_events, -1, 1)) {
969 mutex_lock(&pmc_reserve_mutex);
970 if (atomic_dec_return(&num_events) == 0)
971 release_pmc_hardware();
972 mutex_unlock(&pmc_reserve_mutex);
977 * Translate a generic cache event_id config to a raw event_id code.
979 static int hw_perf_cache_event(u64 config, u64 *eventp)
981 unsigned long type, op, result;
984 if (!ppmu->cache_events)
988 type = config & 0xff;
989 op = (config >> 8) & 0xff;
990 result = (config >> 16) & 0xff;
992 if (type >= PERF_COUNT_HW_CACHE_MAX ||
993 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
994 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
997 ev = (*ppmu->cache_events)[type][op][result];
1006 const struct pmu *hw_perf_event_init(struct perf_event *event)
1009 unsigned long flags;
1010 struct perf_event *ctrs[MAX_HWEVENTS];
1011 u64 events[MAX_HWEVENTS];
1012 unsigned int cflags[MAX_HWEVENTS];
1015 struct cpu_hw_events *cpuhw;
1018 return ERR_PTR(-ENXIO);
1019 switch (event->attr.type) {
1020 case PERF_TYPE_HARDWARE:
1021 ev = event->attr.config;
1022 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
1023 return ERR_PTR(-EOPNOTSUPP);
1024 ev = ppmu->generic_events[ev];
1026 case PERF_TYPE_HW_CACHE:
1027 err = hw_perf_cache_event(event->attr.config, &ev);
1029 return ERR_PTR(err);
1032 ev = event->attr.config;
1035 return ERR_PTR(-EINVAL);
1037 event->hw.config_base = ev;
1041 * If we are not running on a hypervisor, force the
1042 * exclude_hv bit to 0 so that we don't care what
1043 * the user set it to.
1045 if (!firmware_has_feature(FW_FEATURE_LPAR))
1046 event->attr.exclude_hv = 0;
1049 * If this is a per-task event, then we can use
1050 * PM_RUN_* events interchangeably with their non RUN_*
1051 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1052 * XXX we should check if the task is an idle task.
1055 if (event->ctx->task)
1056 flags |= PPMU_ONLY_COUNT_RUN;
1059 * If this machine has limited events, check whether this
1060 * event_id could go on a limited event.
1062 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1063 if (can_go_on_limited_pmc(event, ev, flags)) {
1064 flags |= PPMU_LIMITED_PMC_OK;
1065 } else if (ppmu->limited_pmc_event(ev)) {
1067 * The requested event_id is on a limited PMC,
1068 * but we can't use a limited PMC; see if any
1069 * alternative goes on a normal PMC.
1071 ev = normal_pmc_alternative(ev, flags);
1073 return ERR_PTR(-EINVAL);
1078 * If this is in a group, check if it can go on with all the
1079 * other hardware events in the group. We assume the event
1080 * hasn't been linked into its leader's sibling list at this point.
1083 if (event->group_leader != event) {
1084 n = collect_events(event->group_leader, ppmu->n_counter - 1,
1085 ctrs, events, cflags);
1087 return ERR_PTR(-EINVAL);
1092 if (check_excludes(ctrs, cflags, n, 1))
1093 return ERR_PTR(-EINVAL);
1095 cpuhw = &get_cpu_var(cpu_hw_events);
1096 err = power_check_constraints(cpuhw, events, cflags, n + 1);
1097 put_cpu_var(cpu_hw_events);
1099 return ERR_PTR(-EINVAL);
1101 event->hw.config = events[n];
1102 event->hw.event_base = cflags[n];
1103 event->hw.last_period = event->hw.sample_period;
1104 atomic64_set(&event->hw.period_left, event->hw.last_period);
1107 * See if we need to reserve the PMU.
1108 * If no events are currently in use, then we have to take a
1109 * mutex to ensure that we don't race with another task doing
1110 * reserve_pmc_hardware or release_pmc_hardware.
1113 if (!atomic_inc_not_zero(&num_events)) {
1114 mutex_lock(&pmc_reserve_mutex);
1115 if (atomic_read(&num_events) == 0 &&
1116 reserve_pmc_hardware(perf_event_interrupt))
1119 atomic_inc(&num_events);
1120 mutex_unlock(&pmc_reserve_mutex);
1122 event->destroy = hw_perf_event_destroy;
1125 return ERR_PTR(err);
1130 * A counter has overflowed; update its count and record
1131 * things if requested. Note that interrupts are hard-disabled
1132 * here so there is no possibility of being interrupted.
1134 static void record_and_restart(struct perf_event *event, unsigned long val,
1135 struct pt_regs *regs, int nmi)
1137 u64 period = event->hw.sample_period;
1138 s64 prev, delta, left;
1141 /* we don't have to worry about interrupts here */
1142 prev = atomic64_read(&event->hw.prev_count);
1143 delta = (val - prev) & 0xfffffffful;
1144 atomic64_add(delta, &event->count);
1147 * See if the total period for this event has expired,
1148 * and update for the next period.
1151 left = atomic64_read(&event->hw.period_left) - delta;
1159 if (left < 0x80000000LL)
1160 val = 0x80000000LL - left;
1164 * Finally record data if requested.
1167 struct perf_sample_data data;
1169 perf_sample_data_init(&data, ~0ULL);
1170 data.period = event->hw.last_period;
1172 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1173 perf_get_data_addr(regs, &data.addr);
1175 if (perf_event_overflow(event, nmi, &data, regs)) {
1177 * Interrupts are coming too fast - throttle them
1178 * by setting the event to 0, so it will be
1179 * at least 2^30 cycles until the next interrupt
1180 * (assuming each event counts at most 2 counts
1188 write_pmc(event->hw.idx, val);
1189 atomic64_set(&event->hw.prev_count, val);
1190 atomic64_set(&event->hw.period_left, left);
1191 perf_event_update_userpage(event);
1195 * Called from generic code to get the misc flags (i.e. processor mode)
1198 unsigned long perf_misc_flags(struct pt_regs *regs)
1200 u32 flags = perf_get_misc_flags(regs);
1204 return user_mode(regs) ? PERF_RECORD_MISC_USER :
1205 PERF_RECORD_MISC_KERNEL;
1209 * Called from generic code to get the instruction pointer
1212 unsigned long perf_instruction_pointer(struct pt_regs *regs)
1216 if (TRAP(regs) != 0xf00)
1217 return regs->nip; /* not a PMU interrupt */
1219 ip = mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
1224 * Performance monitor interrupt stuff
1226 static void perf_event_interrupt(struct pt_regs *regs)
1229 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1230 struct perf_event *event;
1235 if (cpuhw->n_limited)
1236 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
1239 perf_read_regs(regs);
1241 nmi = perf_intr_is_nmi(regs);
1247 for (i = 0; i < cpuhw->n_events; ++i) {
1248 event = cpuhw->event[i];
1249 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
1251 val = read_pmc(event->hw.idx);
1253 /* event has overflowed */
1255 record_and_restart(event, val, regs, nmi);
1260 * In case we didn't find and reset the event that caused
1261 * the interrupt, scan all events and reset any that are
1262 * negative, to avoid getting continual interrupts.
1263 * Any that we processed in the previous loop will not be negative.
1266 for (i = 0; i < ppmu->n_counter; ++i) {
1267 if (is_limited_pmc(i + 1))
1269 val = read_pmc(i + 1);
1271 write_pmc(i + 1, 0);
1276 * Reset MMCR0 to its normal value. This will set PMXE and
1277 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
1278 * and thus allow interrupts to occur again.
1279 * XXX might want to use MSR.PM to keep the events frozen until
1280 * we get back out of this interrupt.
1282 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1290 static void power_pmu_setup(int cpu)
1292 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1296 memset(cpuhw, 0, sizeof(*cpuhw));
1297 cpuhw->mmcr[0] = MMCR0_FC;
1300 static int __cpuinit
1301 power_pmu_notify(struct notifier_block *self, unsigned long action, void *hcpu)
1303 unsigned int cpu = (long)hcpu;
1305 switch (action & ~CPU_TASKS_FROZEN) {
1306 case CPU_UP_PREPARE:
1307 power_pmu_setup(cpu);
1317 int register_power_pmu(struct power_pmu *pmu)
1320 return -EBUSY; /* something's already registered */
1323 pr_info("%s performance monitor hardware support registered\n",
1328 * Use FCHV to ignore kernel events if MSR.HV is set.
1330 if (mfmsr() & MSR_HV)
1331 freeze_events_kernel = MMCR0_FCHV;
1332 #endif /* CONFIG_PPC64 */
1334 perf_cpu_notifier(power_pmu_notifier);