2 * Performance events - AMD IBS
4 * Copyright (C) 2011 Advanced Micro Devices, Inc., Robert Richter
6 * For licencing details see kernel-base/COPYING
9 #include <linux/perf_event.h>
10 #include <linux/module.h>
11 #include <linux/pci.h>
12 #include <linux/ptrace.h>
13 #include <linux/syscore_ops.h>
17 #include "../perf_event.h"
21 #if defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD)
23 #include <linux/kprobes.h>
24 #include <linux/hardirq.h>
28 #define IBS_FETCH_CONFIG_MASK (IBS_FETCH_RAND_EN | IBS_FETCH_MAX_CNT)
29 #define IBS_OP_CONFIG_MASK IBS_OP_MAX_CNT
40 struct perf_event *event;
41 unsigned long state[BITS_TO_LONGS(IBS_MAX_STATES)];
52 unsigned long offset_mask[1];
54 struct cpu_perf_ibs __percpu *pcpu;
56 struct attribute **format_attrs;
57 struct attribute_group format_group;
58 const struct attribute_group *attr_groups[2];
60 u64 (*get_count)(u64 config);
63 struct perf_ibs_data {
66 u32 data[0]; /* data buffer starts here */
69 u64 regs[MSR_AMD64_IBS_REG_COUNT_MAX];
73 perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
75 s64 left = local64_read(&hwc->period_left);
76 s64 period = hwc->sample_period;
80 * If we are way outside a reasonable range then just skip forward:
82 if (unlikely(left <= -period)) {
84 local64_set(&hwc->period_left, left);
85 hwc->last_period = period;
89 if (unlikely(left < (s64)min)) {
91 local64_set(&hwc->period_left, left);
92 hwc->last_period = period;
97 * If the hw period that triggers the sw overflow is too short
98 * we might hit the irq handler. This biases the results.
99 * Thus we shorten the next-to-last period and set the last
100 * period to the max period.
110 *hw_period = (u64)left;
116 perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
118 struct hw_perf_event *hwc = &event->hw;
119 int shift = 64 - width;
124 * Careful: an NMI might modify the previous event value.
126 * Our tactic to handle this is to first atomically read and
127 * exchange a new raw count - then add that new-prev delta
128 * count to the generic event atomically:
130 prev_raw_count = local64_read(&hwc->prev_count);
131 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
132 new_raw_count) != prev_raw_count)
136 * Now we have the new raw value and have updated the prev
137 * timestamp already. We can now calculate the elapsed delta
138 * (event-)time and add that to the generic event.
140 * Careful, not all hw sign-extends above the physical width
143 delta = (new_raw_count << shift) - (prev_raw_count << shift);
146 local64_add(delta, &event->count);
147 local64_sub(delta, &hwc->period_left);
152 static struct perf_ibs perf_ibs_fetch;
153 static struct perf_ibs perf_ibs_op;
155 static struct perf_ibs *get_ibs_pmu(int type)
157 if (perf_ibs_fetch.pmu.type == type)
158 return &perf_ibs_fetch;
159 if (perf_ibs_op.pmu.type == type)
165 * Use IBS for precise event sampling:
167 * perf record -a -e cpu-cycles:p ... # use ibs op counting cycle count
168 * perf record -a -e r076:p ... # same as -e cpu-cycles:p
169 * perf record -a -e r0C1:p ... # use ibs op counting micro-ops
171 * IbsOpCntCtl (bit 19) of IBS Execution Control Register (IbsOpCtl,
172 * MSRC001_1033) is used to select either cycle or micro-ops counting
175 * The rip of IBS samples has skid 0. Thus, IBS supports precise
176 * levels 1 and 2 and the PERF_EFLAGS_EXACT is set. In rare cases the
177 * rip is invalid when IBS was not able to record the rip correctly.
178 * We clear PERF_EFLAGS_EXACT and take the rip from pt_regs then.
181 static int perf_ibs_precise_event(struct perf_event *event, u64 *config)
183 switch (event->attr.precise_ip) {
193 switch (event->attr.type) {
194 case PERF_TYPE_HARDWARE:
195 switch (event->attr.config) {
196 case PERF_COUNT_HW_CPU_CYCLES:
202 switch (event->attr.config) {
207 *config = IBS_OP_CNT_CTL;
218 static const struct perf_event_attr ibs_notsupp = {
227 static int perf_ibs_init(struct perf_event *event)
229 struct hw_perf_event *hwc = &event->hw;
230 struct perf_ibs *perf_ibs;
234 perf_ibs = get_ibs_pmu(event->attr.type);
236 config = event->attr.config;
238 perf_ibs = &perf_ibs_op;
239 ret = perf_ibs_precise_event(event, &config);
244 if (event->pmu != &perf_ibs->pmu)
247 if (perf_flags(&event->attr) & perf_flags(&ibs_notsupp))
250 if (config & ~perf_ibs->config_mask)
253 if (hwc->sample_period) {
254 if (config & perf_ibs->cnt_mask)
255 /* raw max_cnt may not be set */
257 if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
259 * lower 4 bits can not be set in ibs max cnt,
260 * but allowing it in case we adjust the
261 * sample period to set a frequency.
264 hwc->sample_period &= ~0x0FULL;
265 if (!hwc->sample_period)
266 hwc->sample_period = 0x10;
268 max_cnt = config & perf_ibs->cnt_mask;
269 config &= ~perf_ibs->cnt_mask;
270 event->attr.sample_period = max_cnt << 4;
271 hwc->sample_period = event->attr.sample_period;
274 if (!hwc->sample_period)
278 * If we modify hwc->sample_period, we also need to update
279 * hwc->last_period and hwc->period_left.
281 hwc->last_period = hwc->sample_period;
282 local64_set(&hwc->period_left, hwc->sample_period);
284 hwc->config_base = perf_ibs->msr;
285 hwc->config = config;
290 static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
291 struct hw_perf_event *hwc, u64 *period)
295 /* ignore lower 4 bits in min count: */
296 overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
297 local64_set(&hwc->prev_count, 0);
302 static u64 get_ibs_fetch_count(u64 config)
304 return (config & IBS_FETCH_CNT) >> 12;
307 static u64 get_ibs_op_count(u64 config)
311 if (config & IBS_OP_VAL)
312 count += (config & IBS_OP_MAX_CNT) << 4; /* cnt rolled over */
314 if (ibs_caps & IBS_CAPS_RDWROPCNT)
315 count += (config & IBS_OP_CUR_CNT) >> 32;
321 perf_ibs_event_update(struct perf_ibs *perf_ibs, struct perf_event *event,
324 u64 count = perf_ibs->get_count(*config);
327 * Set width to 64 since we do not overflow on max width but
328 * instead on max count. In perf_ibs_set_period() we clear
329 * prev count manually on overflow.
331 while (!perf_event_try_update(event, count, 64)) {
332 rdmsrl(event->hw.config_base, *config);
333 count = perf_ibs->get_count(*config);
337 static inline void perf_ibs_enable_event(struct perf_ibs *perf_ibs,
338 struct hw_perf_event *hwc, u64 config)
340 wrmsrl(hwc->config_base, hwc->config | config | perf_ibs->enable_mask);
344 * Erratum #420 Instruction-Based Sampling Engine May Generate
345 * Interrupt that Cannot Be Cleared:
347 * Must clear counter mask first, then clear the enable bit. See
348 * Revision Guide for AMD Family 10h Processors, Publication #41322.
350 static inline void perf_ibs_disable_event(struct perf_ibs *perf_ibs,
351 struct hw_perf_event *hwc, u64 config)
353 config &= ~perf_ibs->cnt_mask;
354 wrmsrl(hwc->config_base, config);
355 config &= ~perf_ibs->enable_mask;
356 wrmsrl(hwc->config_base, config);
360 * We cannot restore the ibs pmu state, so we always needs to update
361 * the event while stopping it and then reset the state when starting
362 * again. Thus, ignoring PERF_EF_RELOAD and PERF_EF_UPDATE flags in
363 * perf_ibs_start()/perf_ibs_stop() and instead always do it.
365 static void perf_ibs_start(struct perf_event *event, int flags)
367 struct hw_perf_event *hwc = &event->hw;
368 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
369 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
372 if (WARN_ON_ONCE(!(hwc->state & PERF_HES_STOPPED)))
375 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
378 perf_ibs_set_period(perf_ibs, hwc, &period);
380 * Set STARTED before enabling the hardware, such that
381 * a subsequent NMI must observe it. Then clear STOPPING
382 * such that we don't consume NMIs by accident.
384 set_bit(IBS_STARTED, pcpu->state);
385 clear_bit(IBS_STOPPING, pcpu->state);
386 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
388 perf_event_update_userpage(event);
391 static void perf_ibs_stop(struct perf_event *event, int flags)
393 struct hw_perf_event *hwc = &event->hw;
394 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
395 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
399 stopping = test_bit(IBS_STARTED, pcpu->state);
401 if (!stopping && (hwc->state & PERF_HES_UPTODATE))
404 rdmsrl(hwc->config_base, config);
408 * Set STOPPING before disabling the hardware, such that it
409 * must be visible to NMIs the moment we clear the EN bit,
410 * at which point we can generate an !VALID sample which
411 * we need to consume.
413 set_bit(IBS_STOPPING, pcpu->state);
414 perf_ibs_disable_event(perf_ibs, hwc, config);
416 * Clear STARTED after disabling the hardware; if it were
417 * cleared before an NMI hitting after the clear but before
418 * clearing the EN bit might think it a spurious NMI and not
421 * Clearing it after, however, creates the problem of the NMI
422 * handler seeing STARTED but not having a valid sample.
424 clear_bit(IBS_STARTED, pcpu->state);
425 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
426 hwc->state |= PERF_HES_STOPPED;
429 if (hwc->state & PERF_HES_UPTODATE)
433 * Clear valid bit to not count rollovers on update, rollovers
434 * are only updated in the irq handler.
436 config &= ~perf_ibs->valid_mask;
438 perf_ibs_event_update(perf_ibs, event, &config);
439 hwc->state |= PERF_HES_UPTODATE;
442 static int perf_ibs_add(struct perf_event *event, int flags)
444 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
445 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
447 if (test_and_set_bit(IBS_ENABLED, pcpu->state))
450 event->hw.state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
454 if (flags & PERF_EF_START)
455 perf_ibs_start(event, PERF_EF_RELOAD);
460 static void perf_ibs_del(struct perf_event *event, int flags)
462 struct perf_ibs *perf_ibs = container_of(event->pmu, struct perf_ibs, pmu);
463 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
465 if (!test_and_clear_bit(IBS_ENABLED, pcpu->state))
468 perf_ibs_stop(event, PERF_EF_UPDATE);
472 perf_event_update_userpage(event);
475 static void perf_ibs_read(struct perf_event *event) { }
477 PMU_FORMAT_ATTR(rand_en, "config:57");
478 PMU_FORMAT_ATTR(cnt_ctl, "config:19");
480 static struct attribute *ibs_fetch_format_attrs[] = {
481 &format_attr_rand_en.attr,
485 static struct attribute *ibs_op_format_attrs[] = {
486 NULL, /* &format_attr_cnt_ctl.attr if IBS_CAPS_OPCNT */
490 static struct perf_ibs perf_ibs_fetch = {
492 .task_ctx_nr = perf_invalid_context,
494 .event_init = perf_ibs_init,
497 .start = perf_ibs_start,
498 .stop = perf_ibs_stop,
499 .read = perf_ibs_read,
501 .msr = MSR_AMD64_IBSFETCHCTL,
502 .config_mask = IBS_FETCH_CONFIG_MASK,
503 .cnt_mask = IBS_FETCH_MAX_CNT,
504 .enable_mask = IBS_FETCH_ENABLE,
505 .valid_mask = IBS_FETCH_VAL,
506 .max_period = IBS_FETCH_MAX_CNT << 4,
507 .offset_mask = { MSR_AMD64_IBSFETCH_REG_MASK },
508 .offset_max = MSR_AMD64_IBSFETCH_REG_COUNT,
509 .format_attrs = ibs_fetch_format_attrs,
511 .get_count = get_ibs_fetch_count,
514 static struct perf_ibs perf_ibs_op = {
516 .task_ctx_nr = perf_invalid_context,
518 .event_init = perf_ibs_init,
521 .start = perf_ibs_start,
522 .stop = perf_ibs_stop,
523 .read = perf_ibs_read,
525 .msr = MSR_AMD64_IBSOPCTL,
526 .config_mask = IBS_OP_CONFIG_MASK,
527 .cnt_mask = IBS_OP_MAX_CNT,
528 .enable_mask = IBS_OP_ENABLE,
529 .valid_mask = IBS_OP_VAL,
530 .max_period = IBS_OP_MAX_CNT << 4,
531 .offset_mask = { MSR_AMD64_IBSOP_REG_MASK },
532 .offset_max = MSR_AMD64_IBSOP_REG_COUNT,
533 .format_attrs = ibs_op_format_attrs,
535 .get_count = get_ibs_op_count,
538 static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
540 struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
541 struct perf_event *event = pcpu->event;
542 struct hw_perf_event *hwc = &event->hw;
543 struct perf_sample_data data;
544 struct perf_raw_record raw;
546 struct perf_ibs_data ibs_data;
547 int offset, size, check_rip, offset_max, throttle = 0;
549 u64 *buf, *config, period;
551 if (!test_bit(IBS_STARTED, pcpu->state)) {
554 * Catch spurious interrupts after stopping IBS: After
555 * disabling IBS there could be still incoming NMIs
556 * with samples that even have the valid bit cleared.
557 * Mark all this NMIs as handled.
559 if (test_and_clear_bit(IBS_STOPPING, pcpu->state))
565 msr = hwc->config_base;
568 if (!(*buf++ & perf_ibs->valid_mask))
571 config = &ibs_data.regs[0];
572 perf_ibs_event_update(perf_ibs, event, config);
573 perf_sample_data_init(&data, 0, hwc->last_period);
574 if (!perf_ibs_set_period(perf_ibs, hwc, &period))
575 goto out; /* no sw counter overflow */
577 ibs_data.caps = ibs_caps;
580 check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
581 if (event->attr.sample_type & PERF_SAMPLE_RAW)
582 offset_max = perf_ibs->offset_max;
588 rdmsrl(msr + offset, *buf++);
590 offset = find_next_bit(perf_ibs->offset_mask,
591 perf_ibs->offset_max,
593 } while (offset < offset_max);
594 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
596 * Read IbsBrTarget and IbsOpData4 separately
597 * depending on their availability.
598 * Can't add to offset_max as they are staggered
600 if (ibs_caps & IBS_CAPS_BRNTRGT) {
601 rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
604 if (ibs_caps & IBS_CAPS_OPDATA4) {
605 rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
609 ibs_data.size = sizeof(u64) * size;
612 if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
613 regs.flags &= ~PERF_EFLAGS_EXACT;
615 set_linear_ip(®s, ibs_data.regs[1]);
616 regs.flags |= PERF_EFLAGS_EXACT;
619 if (event->attr.sample_type & PERF_SAMPLE_RAW) {
620 raw.size = sizeof(u32) + ibs_data.size;
621 raw.data = ibs_data.data;
625 throttle = perf_event_overflow(event, &data, ®s);
628 perf_ibs_stop(event, 0);
630 perf_ibs_enable_event(perf_ibs, hwc, period >> 4);
632 perf_event_update_userpage(event);
638 perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
640 u64 stamp = sched_clock();
643 handled += perf_ibs_handle_irq(&perf_ibs_fetch, regs);
644 handled += perf_ibs_handle_irq(&perf_ibs_op, regs);
647 inc_irq_stat(apic_perf_irqs);
649 perf_sample_event_took(sched_clock() - stamp);
653 NOKPROBE_SYMBOL(perf_ibs_nmi_handler);
655 static __init int perf_ibs_pmu_init(struct perf_ibs *perf_ibs, char *name)
657 struct cpu_perf_ibs __percpu *pcpu;
660 pcpu = alloc_percpu(struct cpu_perf_ibs);
664 perf_ibs->pcpu = pcpu;
666 /* register attributes */
667 if (perf_ibs->format_attrs[0]) {
668 memset(&perf_ibs->format_group, 0, sizeof(perf_ibs->format_group));
669 perf_ibs->format_group.name = "format";
670 perf_ibs->format_group.attrs = perf_ibs->format_attrs;
672 memset(&perf_ibs->attr_groups, 0, sizeof(perf_ibs->attr_groups));
673 perf_ibs->attr_groups[0] = &perf_ibs->format_group;
674 perf_ibs->pmu.attr_groups = perf_ibs->attr_groups;
677 ret = perf_pmu_register(&perf_ibs->pmu, name, -1);
679 perf_ibs->pcpu = NULL;
686 static __init int perf_event_ibs_init(void)
688 struct attribute **attr = ibs_op_format_attrs;
691 return -ENODEV; /* ibs not supported by the cpu */
693 perf_ibs_pmu_init(&perf_ibs_fetch, "ibs_fetch");
695 if (ibs_caps & IBS_CAPS_OPCNT) {
696 perf_ibs_op.config_mask |= IBS_OP_CNT_CTL;
697 *attr++ = &format_attr_cnt_ctl.attr;
699 perf_ibs_pmu_init(&perf_ibs_op, "ibs_op");
701 register_nmi_handler(NMI_LOCAL, perf_ibs_nmi_handler, 0, "perf_ibs");
702 pr_info("perf: AMD IBS detected (0x%08x)\n", ibs_caps);
707 #else /* defined(CONFIG_PERF_EVENTS) && defined(CONFIG_CPU_SUP_AMD) */
709 static __init int perf_event_ibs_init(void) { return 0; }
713 /* IBS - apic initialization, for perf and oprofile */
715 static __init u32 __get_ibs_caps(void)
718 unsigned int max_level;
720 if (!boot_cpu_has(X86_FEATURE_IBS))
723 /* check IBS cpuid feature flags */
724 max_level = cpuid_eax(0x80000000);
725 if (max_level < IBS_CPUID_FEATURES)
726 return IBS_CAPS_DEFAULT;
728 caps = cpuid_eax(IBS_CPUID_FEATURES);
729 if (!(caps & IBS_CAPS_AVAIL))
730 /* cpuid flags not valid */
731 return IBS_CAPS_DEFAULT;
736 u32 get_ibs_caps(void)
741 EXPORT_SYMBOL(get_ibs_caps);
743 static inline int get_eilvt(int offset)
745 return !setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 1);
748 static inline int put_eilvt(int offset)
750 return !setup_APIC_eilvt(offset, 0, 0, 1);
754 * Check and reserve APIC extended interrupt LVT offset for IBS if available.
756 static inline int ibs_eilvt_valid(void)
764 rdmsrl(MSR_AMD64_IBSCTL, val);
765 offset = val & IBSCTL_LVT_OFFSET_MASK;
767 if (!(val & IBSCTL_LVT_OFFSET_VALID)) {
768 pr_err(FW_BUG "cpu %d, invalid IBS interrupt offset %d (MSR%08X=0x%016llx)\n",
769 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
773 if (!get_eilvt(offset)) {
774 pr_err(FW_BUG "cpu %d, IBS interrupt offset %d not available (MSR%08X=0x%016llx)\n",
775 smp_processor_id(), offset, MSR_AMD64_IBSCTL, val);
786 static int setup_ibs_ctl(int ibs_eilvt_off)
788 struct pci_dev *cpu_cfg;
795 cpu_cfg = pci_get_device(PCI_VENDOR_ID_AMD,
796 PCI_DEVICE_ID_AMD_10H_NB_MISC,
801 pci_write_config_dword(cpu_cfg, IBSCTL, ibs_eilvt_off
802 | IBSCTL_LVT_OFFSET_VALID);
803 pci_read_config_dword(cpu_cfg, IBSCTL, &value);
804 if (value != (ibs_eilvt_off | IBSCTL_LVT_OFFSET_VALID)) {
805 pci_dev_put(cpu_cfg);
806 pr_debug("Failed to setup IBS LVT offset, IBSCTL = 0x%08x\n",
813 pr_debug("No CPU node configured for IBS\n");
821 * This runs only on the current cpu. We try to find an LVT offset and
822 * setup the local APIC. For this we must disable preemption. On
823 * success we initialize all nodes with this offset. This updates then
824 * the offset in the IBS_CTL per-node msr. The per-core APIC setup of
825 * the IBS interrupt vector is handled by perf_ibs_cpu_notifier that
826 * is using the new offset.
828 static void force_ibs_eilvt_setup(void)
834 /* find the next free available EILVT entry, skip offset 0 */
835 for (offset = 1; offset < APIC_EILVT_NR_MAX; offset++) {
836 if (get_eilvt(offset))
841 if (offset == APIC_EILVT_NR_MAX) {
842 pr_debug("No EILVT entry available\n");
846 ret = setup_ibs_ctl(offset);
850 if (!ibs_eilvt_valid())
853 pr_info("IBS: LVT offset %d assigned\n", offset);
863 static void ibs_eilvt_setup(void)
866 * Force LVT offset assignment for family 10h: The offsets are
867 * not assigned by the BIOS for this family, so the OS is
868 * responsible for doing it. If the OS assignment fails, fall
869 * back to BIOS settings and try to setup this.
871 if (boot_cpu_data.x86 == 0x10)
872 force_ibs_eilvt_setup();
875 static inline int get_ibs_lvt_offset(void)
879 rdmsrl(MSR_AMD64_IBSCTL, val);
880 if (!(val & IBSCTL_LVT_OFFSET_VALID))
883 return val & IBSCTL_LVT_OFFSET_MASK;
886 static void setup_APIC_ibs(void *dummy)
890 offset = get_ibs_lvt_offset();
894 if (!setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_NMI, 0))
897 pr_warn("perf: IBS APIC setup failed on cpu #%d\n",
901 static void clear_APIC_ibs(void *dummy)
905 offset = get_ibs_lvt_offset();
907 setup_APIC_eilvt(offset, 0, APIC_EILVT_MSG_FIX, 1);
912 static int perf_ibs_suspend(void)
914 clear_APIC_ibs(NULL);
918 static void perf_ibs_resume(void)
921 setup_APIC_ibs(NULL);
924 static struct syscore_ops perf_ibs_syscore_ops = {
925 .resume = perf_ibs_resume,
926 .suspend = perf_ibs_suspend,
929 static void perf_ibs_pm_init(void)
931 register_syscore_ops(&perf_ibs_syscore_ops);
936 static inline void perf_ibs_pm_init(void) { }
941 perf_ibs_cpu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
943 switch (action & ~CPU_TASKS_FROZEN) {
945 setup_APIC_ibs(NULL);
948 clear_APIC_ibs(NULL);
957 static __init int amd_ibs_init(void)
962 caps = __get_ibs_caps();
964 return -ENODEV; /* ibs not supported by the cpu */
968 if (!ibs_eilvt_valid())
972 cpu_notifier_register_begin();
974 /* make ibs_caps visible to other cpus: */
976 smp_call_function(setup_APIC_ibs, NULL, 1);
977 __perf_cpu_notifier(perf_ibs_cpu_notifier);
978 cpu_notifier_register_done();
980 ret = perf_event_ibs_init();
983 pr_err("Failed to setup IBS, %d\n", ret);
987 /* Since we need the pci subsystem to init ibs we can't do this earlier: */
988 device_initcall(amd_ibs_init);