Merge tag 'ras_queue_for_3.7' of git://git.kernel.org/pub/scm/linux/kernel/git/ras...
[linux-2.6-block.git] / arch / x86 / kernel / cpu / mcheck / mce_intel.c
CommitLineData
1da177e4
LT
1/*
2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
88ccbedd
AK
4 * Copyright (C) 2008, 2009 Intel Corporation
5 * Author: Andi Kleen
1da177e4
LT
6 */
7
5a0e3ad6 8#include <linux/gfp.h>
1da177e4
LT
9#include <linux/init.h>
10#include <linux/interrupt.h>
11#include <linux/percpu.h>
d43c36dc 12#include <linux/sched.h>
1bf7b31e 13#include <asm/apic.h>
1da177e4
LT
14#include <asm/processor.h>
15#include <asm/msr.h>
16#include <asm/mce.h>
1da177e4 17
55babd8f
CG
18#include "mce-internal.h"
19
88ccbedd
AK
20/*
21 * Support for Intel Correct Machine Check Interrupts. This allows
22 * the CPU to raise an interrupt when a corrected machine check happened.
23 * Normally we pick those up using a regular polling timer.
24 * Also supports reliable discovery of shared banks.
25 */
26
27static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
28
29/*
30 * cmci_discover_lock protects against parallel discovery attempts
31 * which could race against each other.
32 */
59d958d2 33static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
88ccbedd 34
55babd8f
CG
35#define CMCI_THRESHOLD 1
36#define CMCI_POLL_INTERVAL (30 * HZ)
37#define CMCI_STORM_INTERVAL (1 * HZ)
38#define CMCI_STORM_THRESHOLD 15
39
40static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
41static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
42static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
43
44enum {
45 CMCI_STORM_NONE,
46 CMCI_STORM_ACTIVE,
47 CMCI_STORM_SUBSIDED,
48};
49
50static atomic_t cmci_storm_on_cpus;
88ccbedd 51
df20e2eb 52static int cmci_supported(int *banks)
88ccbedd
AK
53{
54 u64 cap;
55
62fdac59
HS
56 if (mce_cmci_disabled || mce_ignore_ce)
57 return 0;
58
88ccbedd
AK
59 /*
60 * Vendor check is not strictly needed, but the initial
61 * initialization is vendor keyed and this
62 * makes sure none of the backdoors are entered otherwise.
63 */
64 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
65 return 0;
66 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
67 return 0;
68 rdmsrl(MSR_IA32_MCG_CAP, cap);
69 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
70 return !!(cap & MCG_CMCI_P);
71}
72
55babd8f
CG
73void mce_intel_cmci_poll(void)
74{
75 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
76 return;
77 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
78}
79
80void mce_intel_hcpu_update(unsigned long cpu)
81{
82 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
83 atomic_dec(&cmci_storm_on_cpus);
84
85 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
86}
87
88unsigned long mce_intel_adjust_timer(unsigned long interval)
89{
90 int r;
91
92 if (interval < CMCI_POLL_INTERVAL)
93 return interval;
94
95 switch (__this_cpu_read(cmci_storm_state)) {
96 case CMCI_STORM_ACTIVE:
97 /*
98 * We switch back to interrupt mode once the poll timer has
99 * silenced itself. That means no events recorded and the
100 * timer interval is back to our poll interval.
101 */
102 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
103 r = atomic_sub_return(1, &cmci_storm_on_cpus);
104 if (r == 0)
105 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
106 /* FALLTHROUGH */
107
108 case CMCI_STORM_SUBSIDED:
109 /*
110 * We wait for all cpus to go back to SUBSIDED
111 * state. When that happens we switch back to
112 * interrupt mode.
113 */
114 if (!atomic_read(&cmci_storm_on_cpus)) {
115 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
116 cmci_reenable();
117 cmci_recheck();
118 }
119 return CMCI_POLL_INTERVAL;
120 default:
121 /*
122 * We have shiny weather. Let the poll do whatever it
123 * thinks.
124 */
125 return interval;
126 }
127}
128
129static bool cmci_storm_detect(void)
130{
131 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
132 unsigned long ts = __this_cpu_read(cmci_time_stamp);
133 unsigned long now = jiffies;
134 int r;
135
136 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
137 return true;
138
139 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
140 cnt++;
141 } else {
142 cnt = 1;
143 __this_cpu_write(cmci_time_stamp, now);
144 }
145 __this_cpu_write(cmci_storm_cnt, cnt);
146
147 if (cnt <= CMCI_STORM_THRESHOLD)
148 return false;
149
150 cmci_clear();
151 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
152 r = atomic_add_return(1, &cmci_storm_on_cpus);
153 mce_timer_kick(CMCI_POLL_INTERVAL);
154
155 if (r == 1)
156 pr_notice("CMCI storm detected: switching to poll mode\n");
157 return true;
158}
159
88ccbedd
AK
160/*
161 * The interrupt handler. This is called on every event.
162 * Just call the poller directly to log any events.
163 * This could in theory increase the threshold under high load,
164 * but doesn't for now.
165 */
166static void intel_threshold_interrupt(void)
167{
55babd8f
CG
168 if (cmci_storm_detect())
169 return;
88ccbedd 170 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
9ff36ee9 171 mce_notify_irq();
88ccbedd
AK
172}
173
88ccbedd
AK
174/*
175 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
176 * on this CPU. Use the algorithm recommended in the SDM to discover shared
177 * banks.
178 */
4670a300 179static void cmci_discover(int banks)
88ccbedd
AK
180{
181 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
e5299926 182 unsigned long flags;
88ccbedd
AK
183 int i;
184
59d958d2 185 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
88ccbedd
AK
186 for (i = 0; i < banks; i++) {
187 u64 val;
188
189 if (test_bit(i, owned))
190 continue;
191
a2d32bcb 192 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
88ccbedd
AK
193
194 /* Already owned by someone else? */
1f9a0bd4 195 if (val & MCI_CTL2_CMCI_EN) {
4670a300 196 clear_bit(i, owned);
88ccbedd
AK
197 __clear_bit(i, __get_cpu_var(mce_poll_banks));
198 continue;
199 }
200
3c417588 201 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
1f9a0bd4 202 val |= MCI_CTL2_CMCI_EN | CMCI_THRESHOLD;
a2d32bcb
AK
203 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
204 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
88ccbedd
AK
205
206 /* Did the enable bit stick? -- the bank supports CMCI */
1f9a0bd4 207 if (val & MCI_CTL2_CMCI_EN) {
4670a300 208 set_bit(i, owned);
88ccbedd
AK
209 __clear_bit(i, __get_cpu_var(mce_poll_banks));
210 } else {
211 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
212 }
213 }
59d958d2 214 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
88ccbedd
AK
215}
216
217/*
218 * Just in case we missed an event during initialization check
219 * all the CMCI owned banks.
220 */
df20e2eb 221void cmci_recheck(void)
88ccbedd
AK
222{
223 unsigned long flags;
224 int banks;
225
7b543a53 226 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
88ccbedd
AK
227 return;
228 local_irq_save(flags);
229 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
230 local_irq_restore(flags);
231}
232
233/*
234 * Disable CMCI on this CPU for all banks it owns when it goes down.
235 * This allows other CPUs to claim the banks on rediscovery.
236 */
df20e2eb 237void cmci_clear(void)
88ccbedd 238{
e5299926 239 unsigned long flags;
88ccbedd
AK
240 int i;
241 int banks;
242 u64 val;
243
244 if (!cmci_supported(&banks))
245 return;
59d958d2 246 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
88ccbedd
AK
247 for (i = 0; i < banks; i++) {
248 if (!test_bit(i, __get_cpu_var(mce_banks_owned)))
249 continue;
250 /* Disable CMCI */
a2d32bcb 251 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
1f9a0bd4 252 val &= ~(MCI_CTL2_CMCI_EN|MCI_CTL2_CMCI_THRESHOLD_MASK);
a2d32bcb 253 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
88ccbedd
AK
254 __clear_bit(i, __get_cpu_var(mce_banks_owned));
255 }
59d958d2 256 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
88ccbedd
AK
257}
258
259/*
260 * After a CPU went down cycle through all the others and rediscover
261 * Must run in process context.
262 */
df20e2eb 263void cmci_rediscover(int dying)
88ccbedd
AK
264{
265 int banks;
266 int cpu;
267 cpumask_var_t old;
268
269 if (!cmci_supported(&banks))
270 return;
271 if (!alloc_cpumask_var(&old, GFP_KERNEL))
272 return;
273 cpumask_copy(old, &current->cpus_allowed);
274
61a021a0 275 for_each_online_cpu(cpu) {
88ccbedd
AK
276 if (cpu == dying)
277 continue;
4f062896 278 if (set_cpus_allowed_ptr(current, cpumask_of(cpu)))
88ccbedd
AK
279 continue;
280 /* Recheck banks in case CPUs don't all have the same */
281 if (cmci_supported(&banks))
4670a300 282 cmci_discover(banks);
88ccbedd
AK
283 }
284
285 set_cpus_allowed_ptr(current, old);
286 free_cpumask_var(old);
287}
288
289/*
290 * Reenable CMCI on this CPU in case a CPU down failed.
291 */
292void cmci_reenable(void)
293{
294 int banks;
295 if (cmci_supported(&banks))
4670a300 296 cmci_discover(banks);
88ccbedd
AK
297}
298
514ec49a 299static void intel_init_cmci(void)
88ccbedd
AK
300{
301 int banks;
302
303 if (!cmci_supported(&banks))
304 return;
305
306 mce_threshold_vector = intel_threshold_interrupt;
4670a300 307 cmci_discover(banks);
88ccbedd
AK
308 /*
309 * For CPU #0 this runs with still disabled APIC, but that's
310 * ok because only the vector is set up. We still do another
311 * check for the banks later for CPU #0 just to make sure
312 * to not miss any events.
313 */
314 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
315 cmci_recheck();
316}
317
cc3ca220 318void mce_intel_feature_init(struct cpuinfo_x86 *c)
1da177e4
LT
319{
320 intel_init_thermal(c);
88ccbedd 321 intel_init_cmci();
1da177e4 322}