2 * (c) 2005-2016 Advanced Micro Devices, Inc.
3 * Your use of this code is subject to the terms and conditions of the
4 * GNU general public license version 2. See "COPYING" or
5 * http://www.gnu.org/licenses/gpl.html
7 * Written by Jacob Shin - AMD, Inc.
8 * Maintained by: Borislav Petkov <bp@alien8.de>
10 * All MC4_MISCi registers are shared between cores on a node.
12 #include <linux/interrupt.h>
13 #include <linux/notifier.h>
14 #include <linux/kobject.h>
15 #include <linux/percpu.h>
16 #include <linux/errno.h>
17 #include <linux/sched.h>
18 #include <linux/sysfs.h>
19 #include <linux/slab.h>
20 #include <linux/init.h>
21 #include <linux/cpu.h>
22 #include <linux/smp.h>
24 #include <asm/amd_nb.h>
29 #include <asm/trace/irq_vectors.h>
32 #define THRESHOLD_MAX 0xFFF
33 #define INT_TYPE_APIC 0x00020000
34 #define MASK_VALID_HI 0x80000000
35 #define MASK_CNTP_HI 0x40000000
36 #define MASK_LOCKED_HI 0x20000000
37 #define MASK_LVTOFF_HI 0x00F00000
38 #define MASK_COUNT_EN_HI 0x00080000
39 #define MASK_INT_TYPE_HI 0x00060000
40 #define MASK_OVERFLOW_HI 0x00010000
41 #define MASK_ERR_COUNT_HI 0x00000FFF
42 #define MASK_BLKPTR_LO 0xFF000000
43 #define MCG_XBLK_ADDR 0xC0000400
45 /* Deferred error settings */
46 #define MSR_CU_DEF_ERR 0xC0000410
47 #define MASK_DEF_LVTOFF 0x000000F0
48 #define MASK_DEF_INT_TYPE 0x00000006
49 #define DEF_LVT_OFF 0x2
50 #define DEF_INT_TYPE_APIC 0x2
54 /* Threshold LVT offset is at MSR0xC0000410[15:12] */
55 #define SMCA_THR_LVT_OFF 0xF000
57 static const char * const th_names[] = {
66 /* Define HWID to IP type mappings for Scalable MCA */
67 struct amd_hwid amd_hwids[] = {
68 [SMCA_F17H_CORE] = { "f17h_core", 0xB0 },
69 [SMCA_DF] = { "data_fabric", 0x2E },
70 [SMCA_UMC] = { "umc", 0x96 },
71 [SMCA_PB] = { "param_block", 0x5 },
72 [SMCA_PSP] = { "psp", 0xFF },
73 [SMCA_SMU] = { "smu", 0x1 },
75 EXPORT_SYMBOL_GPL(amd_hwids);
77 const char * const amd_core_mcablock_names[] = {
78 [SMCA_LS] = "load_store",
79 [SMCA_IF] = "insn_fetch",
80 [SMCA_L2_CACHE] = "l2_cache",
81 [SMCA_DE] = "decode_unit",
83 [SMCA_EX] = "execution_unit",
84 [SMCA_FP] = "floating_point",
85 [SMCA_L3_CACHE] = "l3_cache",
87 EXPORT_SYMBOL_GPL(amd_core_mcablock_names);
89 const char * const amd_df_mcablock_names[] = {
90 [SMCA_CS] = "coherent_slave",
93 EXPORT_SYMBOL_GPL(amd_df_mcablock_names);
95 static DEFINE_PER_CPU(struct threshold_bank **, threshold_banks);
96 static DEFINE_PER_CPU(unsigned char, bank_map); /* see which banks are on */
98 static void amd_threshold_interrupt(void);
99 static void amd_deferred_error_interrupt(void);
101 static void default_deferred_error_interrupt(void)
103 pr_err("Unexpected deferred interrupt at vector %x\n", DEFERRED_ERROR_VECTOR);
105 void (*deferred_error_int_vector)(void) = default_deferred_error_interrupt;
111 struct thresh_restart {
112 struct threshold_block *b;
119 static inline bool is_shared_bank(int bank)
122 * Scalable MCA provides for only one core to have access to the MSRs of
128 /* Bank 4 is for northbridge reporting and is thus shared */
132 static const char *bank4_names(const struct threshold_block *b)
134 switch (b->address) {
146 WARN(1, "Funny MSR: 0x%08x\n", b->address);
152 static bool lvt_interrupt_supported(unsigned int bank, u32 msr_high_bits)
155 * bank 4 supports APIC LVT interrupts implicitly since forever.
161 * IntP: interrupt present; if this bit is set, the thresholding
162 * bank can generate APIC LVT interrupts
164 return msr_high_bits & BIT(28);
167 static int lvt_off_valid(struct threshold_block *b, int apic, u32 lo, u32 hi)
169 int msr = (hi & MASK_LVTOFF_HI) >> 20;
172 pr_err(FW_BUG "cpu %d, failed to setup threshold interrupt "
173 "for bank %d, block %d (MSR%08X=0x%x%08x)\n", b->cpu,
174 b->bank, b->block, b->address, hi, lo);
180 * On SMCA CPUs, LVT offset is programmed at a different MSR, and
181 * the BIOS provides the value. The original field where LVT offset
182 * was set is reserved. Return early here:
187 pr_err(FW_BUG "cpu %d, invalid threshold interrupt offset %d "
188 "for bank %d, block %d (MSR%08X=0x%x%08x)\n",
189 b->cpu, apic, b->bank, b->block, b->address, hi, lo);
196 /* Reprogram MCx_MISC MSR behind this threshold bank. */
197 static void threshold_restart_bank(void *_tr)
199 struct thresh_restart *tr = _tr;
202 rdmsr(tr->b->address, lo, hi);
204 if (tr->b->threshold_limit < (hi & THRESHOLD_MAX))
205 tr->reset = 1; /* limit cannot be lower than err count */
207 if (tr->reset) { /* reset err count and overflow bit */
209 (hi & ~(MASK_ERR_COUNT_HI | MASK_OVERFLOW_HI)) |
210 (THRESHOLD_MAX - tr->b->threshold_limit);
211 } else if (tr->old_limit) { /* change limit w/o reset */
212 int new_count = (hi & THRESHOLD_MAX) +
213 (tr->old_limit - tr->b->threshold_limit);
215 hi = (hi & ~MASK_ERR_COUNT_HI) |
216 (new_count & THRESHOLD_MAX);
220 hi &= ~MASK_INT_TYPE_HI;
222 if (!tr->b->interrupt_capable)
225 if (tr->set_lvt_off) {
226 if (lvt_off_valid(tr->b, tr->lvt_off, lo, hi)) {
227 /* set new lvt offset */
228 hi &= ~MASK_LVTOFF_HI;
229 hi |= tr->lvt_off << 20;
233 if (tr->b->interrupt_enable)
238 hi |= MASK_COUNT_EN_HI;
239 wrmsr(tr->b->address, lo, hi);
242 static void mce_threshold_block_init(struct threshold_block *b, int offset)
244 struct thresh_restart tr = {
250 b->threshold_limit = THRESHOLD_MAX;
251 threshold_restart_bank(&tr);
254 static int setup_APIC_mce_threshold(int reserved, int new)
256 if (reserved < 0 && !setup_APIC_eilvt(new, THRESHOLD_APIC_VECTOR,
257 APIC_EILVT_MSG_FIX, 0))
263 static int setup_APIC_deferred_error(int reserved, int new)
265 if (reserved < 0 && !setup_APIC_eilvt(new, DEFERRED_ERROR_VECTOR,
266 APIC_EILVT_MSG_FIX, 0))
272 static void deferred_error_interrupt_enable(struct cpuinfo_x86 *c)
274 u32 low = 0, high = 0;
275 int def_offset = -1, def_new;
277 if (rdmsr_safe(MSR_CU_DEF_ERR, &low, &high))
280 def_new = (low & MASK_DEF_LVTOFF) >> 4;
281 if (!(low & MASK_DEF_LVTOFF)) {
282 pr_err(FW_BUG "Your BIOS is not setting up LVT offset 0x2 for deferred error IRQs correctly.\n");
283 def_new = DEF_LVT_OFF;
284 low = (low & ~MASK_DEF_LVTOFF) | (DEF_LVT_OFF << 4);
287 def_offset = setup_APIC_deferred_error(def_offset, def_new);
288 if ((def_offset == def_new) &&
289 (deferred_error_int_vector != amd_deferred_error_interrupt))
290 deferred_error_int_vector = amd_deferred_error_interrupt;
292 low = (low & ~MASK_DEF_INT_TYPE) | DEF_INT_TYPE_APIC;
293 wrmsr(MSR_CU_DEF_ERR, low, high);
296 static u32 get_block_address(u32 current_addr, u32 low, u32 high,
297 unsigned int bank, unsigned int block)
299 u32 addr = 0, offset = 0;
301 if (mce_flags.smca) {
303 addr = MSR_AMD64_SMCA_MCx_MISC(bank);
306 * For SMCA enabled processors, BLKPTR field of the
307 * first MISC register (MCx_MISC0) indicates presence of
308 * additional MISC register set (MISC1-4).
312 if (rdmsr_safe(MSR_AMD64_SMCA_MCx_CONFIG(bank), &low, &high))
315 if (!(low & MCI_CONFIG_MCAX))
318 if (!rdmsr_safe(MSR_AMD64_SMCA_MCx_MISC(bank), &low, &high) &&
319 (low & MASK_BLKPTR_LO))
320 addr = MSR_AMD64_SMCA_MCx_MISCy(bank, block - 1);
325 /* Fall back to method we used for older processors: */
328 addr = msr_ops.misc(bank);
331 offset = ((low & MASK_BLKPTR_LO) >> 21);
333 addr = MCG_XBLK_ADDR + offset;
336 addr = ++current_addr;
342 prepare_threshold_block(unsigned int bank, unsigned int block, u32 addr,
343 int offset, u32 misc_high)
345 unsigned int cpu = smp_processor_id();
346 u32 smca_low, smca_high, smca_addr;
347 struct threshold_block b;
351 per_cpu(bank_map, cpu) |= (1 << bank);
353 memset(&b, 0, sizeof(b));
358 b.interrupt_capable = lvt_interrupt_supported(bank, misc_high);
360 if (!b.interrupt_capable)
363 b.interrupt_enable = 1;
365 if (!mce_flags.smca) {
366 new = (misc_high & MASK_LVTOFF_HI) >> 20;
370 smca_addr = MSR_AMD64_SMCA_MCx_CONFIG(bank);
372 if (!rdmsr_safe(smca_addr, &smca_low, &smca_high)) {
374 * OS is required to set the MCAX bit to acknowledge that it is
375 * now using the new MSR ranges and new registers under each
376 * bank. It also means that the OS will configure deferred
377 * errors in the new MCx_CONFIG register. If the bit is not set,
378 * uncorrectable errors will cause a system panic.
380 * MCA_CONFIG[MCAX] is bit 32 (0 in the high portion of the MSR.)
385 * SMCA logs Deferred Error information in MCA_DE{STAT,ADDR}
386 * registers with the option of additionally logging to
387 * MCA_{STATUS,ADDR} if MCA_CONFIG[LogDeferredInMcaStat] is set.
389 * This bit is usually set by BIOS to retain the old behavior
390 * for OSes that don't use the new registers. Linux supports the
391 * new registers so let's disable that additional logging here.
393 * MCA_CONFIG[LogDeferredInMcaStat] is bit 34 (bit 2 in the high
394 * portion of the MSR).
396 smca_high &= ~BIT(2);
398 wrmsr(smca_addr, smca_low, smca_high);
401 /* Gather LVT offset for thresholding: */
402 if (rdmsr_safe(MSR_CU_DEF_ERR, &smca_low, &smca_high))
405 new = (smca_low & SMCA_THR_LVT_OFF) >> 12;
408 offset = setup_APIC_mce_threshold(offset, new);
410 if ((offset == new) && (mce_threshold_vector != amd_threshold_interrupt))
411 mce_threshold_vector = amd_threshold_interrupt;
414 mce_threshold_block_init(&b, offset);
420 /* cpu init entry point, called from mce.c with preempt off */
421 void mce_amd_feature_init(struct cpuinfo_x86 *c)
423 u32 low = 0, high = 0, address = 0;
424 unsigned int bank, block;
427 for (bank = 0; bank < mca_cfg.banks; ++bank) {
428 for (block = 0; block < NR_BLOCKS; ++block) {
429 address = get_block_address(address, low, high, bank, block);
433 if (rdmsr_safe(address, &low, &high))
436 if (!(high & MASK_VALID_HI))
439 if (!(high & MASK_CNTP_HI) ||
440 (high & MASK_LOCKED_HI))
443 offset = prepare_threshold_block(bank, block, address, offset, high);
447 if (mce_flags.succor)
448 deferred_error_interrupt_enable(c);
452 __log_error(unsigned int bank, bool deferred_err, bool threshold_err, u64 misc)
454 u32 msr_status = msr_ops.status(bank);
455 u32 msr_addr = msr_ops.addr(bank);
459 WARN_ON_ONCE(deferred_err && threshold_err);
461 if (deferred_err && mce_flags.smca) {
462 msr_status = MSR_AMD64_SMCA_MCx_DESTAT(bank);
463 msr_addr = MSR_AMD64_SMCA_MCx_DEADDR(bank);
466 rdmsrl(msr_status, status);
468 if (!(status & MCI_STATUS_VAL))
479 if (m.status & MCI_STATUS_ADDRV)
480 rdmsrl(msr_addr, m.addr);
484 wrmsrl(msr_status, 0);
487 static inline void __smp_deferred_error_interrupt(void)
489 inc_irq_stat(irq_deferred_error_count);
490 deferred_error_int_vector();
493 asmlinkage __visible void smp_deferred_error_interrupt(void)
496 __smp_deferred_error_interrupt();
500 asmlinkage __visible void smp_trace_deferred_error_interrupt(void)
503 trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
504 __smp_deferred_error_interrupt();
505 trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
509 /* APIC interrupt handler for deferred errors */
510 static void amd_deferred_error_interrupt(void)
516 for (bank = 0; bank < mca_cfg.banks; ++bank) {
517 msr_status = (mce_flags.smca) ? MSR_AMD64_SMCA_MCx_DESTAT(bank)
518 : msr_ops.status(bank);
520 rdmsrl(msr_status, status);
522 if (!(status & MCI_STATUS_VAL) ||
523 !(status & MCI_STATUS_DEFERRED))
526 __log_error(bank, true, false, 0);
532 * APIC Interrupt Handler
536 * threshold interrupt handler will service THRESHOLD_APIC_VECTOR.
537 * the interrupt goes off when error_count reaches threshold_limit.
538 * the handler will simply log mcelog w/ software defined bank number.
541 static void amd_threshold_interrupt(void)
543 u32 low = 0, high = 0, address = 0;
544 int cpu = smp_processor_id();
545 unsigned int bank, block;
547 /* assume first bank caused it */
548 for (bank = 0; bank < mca_cfg.banks; ++bank) {
549 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
551 for (block = 0; block < NR_BLOCKS; ++block) {
552 address = get_block_address(address, low, high, bank, block);
556 if (rdmsr_safe(address, &low, &high))
559 if (!(high & MASK_VALID_HI)) {
566 if (!(high & MASK_CNTP_HI) ||
567 (high & MASK_LOCKED_HI))
571 * Log the machine check that caused the threshold
574 if (high & MASK_OVERFLOW_HI)
581 __log_error(bank, false, true, ((u64)high << 32) | low);
588 struct threshold_attr {
589 struct attribute attr;
590 ssize_t (*show) (struct threshold_block *, char *);
591 ssize_t (*store) (struct threshold_block *, const char *, size_t count);
594 #define SHOW_FIELDS(name) \
595 static ssize_t show_ ## name(struct threshold_block *b, char *buf) \
597 return sprintf(buf, "%lu\n", (unsigned long) b->name); \
599 SHOW_FIELDS(interrupt_enable)
600 SHOW_FIELDS(threshold_limit)
603 store_interrupt_enable(struct threshold_block *b, const char *buf, size_t size)
605 struct thresh_restart tr;
608 if (!b->interrupt_capable)
611 if (kstrtoul(buf, 0, &new) < 0)
614 b->interrupt_enable = !!new;
616 memset(&tr, 0, sizeof(tr));
619 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
625 store_threshold_limit(struct threshold_block *b, const char *buf, size_t size)
627 struct thresh_restart tr;
630 if (kstrtoul(buf, 0, &new) < 0)
633 if (new > THRESHOLD_MAX)
638 memset(&tr, 0, sizeof(tr));
639 tr.old_limit = b->threshold_limit;
640 b->threshold_limit = new;
643 smp_call_function_single(b->cpu, threshold_restart_bank, &tr, 1);
648 static ssize_t show_error_count(struct threshold_block *b, char *buf)
652 rdmsr_on_cpu(b->cpu, b->address, &lo, &hi);
654 return sprintf(buf, "%u\n", ((hi & THRESHOLD_MAX) -
655 (THRESHOLD_MAX - b->threshold_limit)));
658 static struct threshold_attr error_count = {
659 .attr = {.name = __stringify(error_count), .mode = 0444 },
660 .show = show_error_count,
663 #define RW_ATTR(val) \
664 static struct threshold_attr val = { \
665 .attr = {.name = __stringify(val), .mode = 0644 }, \
666 .show = show_## val, \
667 .store = store_## val, \
670 RW_ATTR(interrupt_enable);
671 RW_ATTR(threshold_limit);
673 static struct attribute *default_attrs[] = {
674 &threshold_limit.attr,
676 NULL, /* possibly interrupt_enable if supported, see below */
680 #define to_block(k) container_of(k, struct threshold_block, kobj)
681 #define to_attr(a) container_of(a, struct threshold_attr, attr)
683 static ssize_t show(struct kobject *kobj, struct attribute *attr, char *buf)
685 struct threshold_block *b = to_block(kobj);
686 struct threshold_attr *a = to_attr(attr);
689 ret = a->show ? a->show(b, buf) : -EIO;
694 static ssize_t store(struct kobject *kobj, struct attribute *attr,
695 const char *buf, size_t count)
697 struct threshold_block *b = to_block(kobj);
698 struct threshold_attr *a = to_attr(attr);
701 ret = a->store ? a->store(b, buf, count) : -EIO;
706 static const struct sysfs_ops threshold_ops = {
711 static struct kobj_type threshold_ktype = {
712 .sysfs_ops = &threshold_ops,
713 .default_attrs = default_attrs,
716 static int allocate_threshold_blocks(unsigned int cpu, unsigned int bank,
717 unsigned int block, u32 address)
719 struct threshold_block *b = NULL;
723 if ((bank >= mca_cfg.banks) || (block >= NR_BLOCKS))
726 if (rdmsr_safe_on_cpu(cpu, address, &low, &high))
729 if (!(high & MASK_VALID_HI)) {
736 if (!(high & MASK_CNTP_HI) ||
737 (high & MASK_LOCKED_HI))
740 b = kzalloc(sizeof(struct threshold_block), GFP_KERNEL);
747 b->address = address;
748 b->interrupt_enable = 0;
749 b->interrupt_capable = lvt_interrupt_supported(bank, high);
750 b->threshold_limit = THRESHOLD_MAX;
752 if (b->interrupt_capable) {
753 threshold_ktype.default_attrs[2] = &interrupt_enable.attr;
754 b->interrupt_enable = 1;
756 threshold_ktype.default_attrs[2] = NULL;
759 INIT_LIST_HEAD(&b->miscj);
761 if (per_cpu(threshold_banks, cpu)[bank]->blocks) {
763 &per_cpu(threshold_banks, cpu)[bank]->blocks->miscj);
765 per_cpu(threshold_banks, cpu)[bank]->blocks = b;
768 err = kobject_init_and_add(&b->kobj, &threshold_ktype,
769 per_cpu(threshold_banks, cpu)[bank]->kobj,
770 (bank == 4 ? bank4_names(b) : th_names[bank]));
774 address = get_block_address(address, low, high, bank, ++block);
778 err = allocate_threshold_blocks(cpu, bank, block, address);
783 kobject_uevent(&b->kobj, KOBJ_ADD);
789 kobject_put(&b->kobj);
796 static int __threshold_add_blocks(struct threshold_bank *b)
798 struct list_head *head = &b->blocks->miscj;
799 struct threshold_block *pos = NULL;
800 struct threshold_block *tmp = NULL;
803 err = kobject_add(&b->blocks->kobj, b->kobj, b->blocks->kobj.name);
807 list_for_each_entry_safe(pos, tmp, head, miscj) {
809 err = kobject_add(&pos->kobj, b->kobj, pos->kobj.name);
811 list_for_each_entry_safe_reverse(pos, tmp, head, miscj)
812 kobject_del(&pos->kobj);
820 static int threshold_create_bank(unsigned int cpu, unsigned int bank)
822 struct device *dev = per_cpu(mce_device, cpu);
823 struct amd_northbridge *nb = NULL;
824 struct threshold_bank *b = NULL;
825 const char *name = th_names[bank];
828 if (is_shared_bank(bank)) {
829 nb = node_to_amd_nb(amd_get_nb_id(cpu));
831 /* threshold descriptor already initialized on this node? */
832 if (nb && nb->bank4) {
835 err = kobject_add(b->kobj, &dev->kobj, name);
839 per_cpu(threshold_banks, cpu)[bank] = b;
840 atomic_inc(&b->cpus);
842 err = __threshold_add_blocks(b);
848 b = kzalloc(sizeof(struct threshold_bank), GFP_KERNEL);
854 b->kobj = kobject_create_and_add(name, &dev->kobj);
860 per_cpu(threshold_banks, cpu)[bank] = b;
862 if (is_shared_bank(bank)) {
863 atomic_set(&b->cpus, 1);
865 /* nb is already initialized, see above */
872 err = allocate_threshold_blocks(cpu, bank, 0, MSR_IA32_MCx_MISC(bank));
883 /* create dir/files for all valid threshold banks */
884 static int threshold_create_device(unsigned int cpu)
887 struct threshold_bank **bp;
890 bp = kzalloc(sizeof(struct threshold_bank *) * mca_cfg.banks,
895 per_cpu(threshold_banks, cpu) = bp;
897 for (bank = 0; bank < mca_cfg.banks; ++bank) {
898 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
900 err = threshold_create_bank(cpu, bank);
908 static void deallocate_threshold_block(unsigned int cpu,
911 struct threshold_block *pos = NULL;
912 struct threshold_block *tmp = NULL;
913 struct threshold_bank *head = per_cpu(threshold_banks, cpu)[bank];
918 list_for_each_entry_safe(pos, tmp, &head->blocks->miscj, miscj) {
919 kobject_put(&pos->kobj);
920 list_del(&pos->miscj);
924 kfree(per_cpu(threshold_banks, cpu)[bank]->blocks);
925 per_cpu(threshold_banks, cpu)[bank]->blocks = NULL;
928 static void __threshold_remove_blocks(struct threshold_bank *b)
930 struct threshold_block *pos = NULL;
931 struct threshold_block *tmp = NULL;
933 kobject_del(b->kobj);
935 list_for_each_entry_safe(pos, tmp, &b->blocks->miscj, miscj)
936 kobject_del(&pos->kobj);
939 static void threshold_remove_bank(unsigned int cpu, int bank)
941 struct amd_northbridge *nb;
942 struct threshold_bank *b;
944 b = per_cpu(threshold_banks, cpu)[bank];
951 if (is_shared_bank(bank)) {
952 if (!atomic_dec_and_test(&b->cpus)) {
953 __threshold_remove_blocks(b);
954 per_cpu(threshold_banks, cpu)[bank] = NULL;
958 * the last CPU on this node using the shared bank is
959 * going away, remove that bank now.
961 nb = node_to_amd_nb(amd_get_nb_id(cpu));
966 deallocate_threshold_block(cpu, bank);
969 kobject_del(b->kobj);
970 kobject_put(b->kobj);
972 per_cpu(threshold_banks, cpu)[bank] = NULL;
975 static void threshold_remove_device(unsigned int cpu)
979 for (bank = 0; bank < mca_cfg.banks; ++bank) {
980 if (!(per_cpu(bank_map, cpu) & (1 << bank)))
982 threshold_remove_bank(cpu, bank);
984 kfree(per_cpu(threshold_banks, cpu));
987 /* get notified when a cpu comes on/off */
989 amd_64_threshold_cpu_callback(unsigned long action, unsigned int cpu)
993 case CPU_ONLINE_FROZEN:
994 threshold_create_device(cpu);
997 case CPU_DEAD_FROZEN:
998 threshold_remove_device(cpu);
1005 static __init int threshold_init_device(void)
1009 /* to hit CPUs online before the notifier is up */
1010 for_each_online_cpu(lcpu) {
1011 int err = threshold_create_device(lcpu);
1016 threshold_cpu_callback = amd_64_threshold_cpu_callback;
1021 * there are 3 funcs which need to be _initcalled in a logic sequence:
1022 * 1. xen_late_init_mcelog
1023 * 2. mcheck_init_device
1024 * 3. threshold_init_device
1026 * xen_late_init_mcelog must register xen_mce_chrdev_device before
1027 * native mce_chrdev_device registration if running under xen platform;
1029 * mcheck_init_device should be inited before threshold_init_device to
1030 * initialize mce_device, otherwise a NULL ptr dereference will cause panic.
1032 * so we use following _initcalls
1033 * 1. device_initcall(xen_late_init_mcelog);
1034 * 2. device_initcall_sync(mcheck_init_device);
1035 * 3. late_initcall(threshold_init_device);
1037 * when running under xen, the initcall order is 1,2,3;
1038 * on baremetal, we skip 1 and we do only 2 and 3.
1040 late_initcall(threshold_init_device);