irqchips: Replace __this_cpu_ptr uses
[linux-2.6-block.git] / arch / x86 / kernel / cpu / mcheck / mce_intel.c
CommitLineData
1da177e4
LT
1/*
2 * Intel specific MCE features.
3 * Copyright 2004 Zwane Mwaikambo <zwane@linuxpower.ca>
88ccbedd
AK
4 * Copyright (C) 2008, 2009 Intel Corporation
5 * Author: Andi Kleen
1da177e4
LT
6 */
7
5a0e3ad6 8#include <linux/gfp.h>
1da177e4
LT
9#include <linux/interrupt.h>
10#include <linux/percpu.h>
d43c36dc 11#include <linux/sched.h>
27f6c573 12#include <linux/cpumask.h>
1bf7b31e 13#include <asm/apic.h>
1da177e4
LT
14#include <asm/processor.h>
15#include <asm/msr.h>
16#include <asm/mce.h>
1da177e4 17
55babd8f
CG
18#include "mce-internal.h"
19
88ccbedd
AK
20/*
21 * Support for Intel Correct Machine Check Interrupts. This allows
22 * the CPU to raise an interrupt when a corrected machine check happened.
23 * Normally we pick those up using a regular polling timer.
24 * Also supports reliable discovery of shared banks.
25 */
26
0644414e
NR
27/*
28 * CMCI can be delivered to multiple cpus that share a machine check bank
29 * so we need to designate a single cpu to process errors logged in each bank
30 * in the interrupt handler (otherwise we would have many races and potential
31 * double reporting of the same error).
32 * Note that this can change when a cpu is offlined or brought online since
33 * some MCA banks are shared across cpus. When a cpu is offlined, cmci_clear()
34 * disables CMCI on all banks owned by the cpu and clears this bitfield. At
35 * this point, cmci_rediscover() kicks in and a different cpu may end up
36 * taking ownership of some of the shared MCA banks that were previously
37 * owned by the offlined cpu.
38 */
88ccbedd
AK
39static DEFINE_PER_CPU(mce_banks_t, mce_banks_owned);
40
41/*
42 * cmci_discover_lock protects against parallel discovery attempts
43 * which could race against each other.
44 */
ed5c41d3 45static DEFINE_RAW_SPINLOCK(cmci_discover_lock);
88ccbedd 46
55babd8f
CG
47#define CMCI_THRESHOLD 1
48#define CMCI_POLL_INTERVAL (30 * HZ)
49#define CMCI_STORM_INTERVAL (1 * HZ)
50#define CMCI_STORM_THRESHOLD 15
51
52static DEFINE_PER_CPU(unsigned long, cmci_time_stamp);
53static DEFINE_PER_CPU(unsigned int, cmci_storm_cnt);
54static DEFINE_PER_CPU(unsigned int, cmci_storm_state);
55
56enum {
57 CMCI_STORM_NONE,
58 CMCI_STORM_ACTIVE,
59 CMCI_STORM_SUBSIDED,
60};
61
62static atomic_t cmci_storm_on_cpus;
88ccbedd 63
df20e2eb 64static int cmci_supported(int *banks)
88ccbedd
AK
65{
66 u64 cap;
67
7af19e4a 68 if (mca_cfg.cmci_disabled || mca_cfg.ignore_ce)
62fdac59
HS
69 return 0;
70
88ccbedd
AK
71 /*
72 * Vendor check is not strictly needed, but the initial
73 * initialization is vendor keyed and this
74 * makes sure none of the backdoors are entered otherwise.
75 */
76 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
77 return 0;
78 if (!cpu_has_apic || lapic_get_maxlvt() < 6)
79 return 0;
80 rdmsrl(MSR_IA32_MCG_CAP, cap);
81 *banks = min_t(unsigned, MAX_NR_BANKS, cap & 0xff);
82 return !!(cap & MCG_CMCI_P);
83}
84
55babd8f
CG
85void mce_intel_cmci_poll(void)
86{
87 if (__this_cpu_read(cmci_storm_state) == CMCI_STORM_NONE)
88 return;
89 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
90}
91
92void mce_intel_hcpu_update(unsigned long cpu)
93{
94 if (per_cpu(cmci_storm_state, cpu) == CMCI_STORM_ACTIVE)
95 atomic_dec(&cmci_storm_on_cpus);
96
97 per_cpu(cmci_storm_state, cpu) = CMCI_STORM_NONE;
98}
99
100unsigned long mce_intel_adjust_timer(unsigned long interval)
101{
102 int r;
103
104 if (interval < CMCI_POLL_INTERVAL)
105 return interval;
106
107 switch (__this_cpu_read(cmci_storm_state)) {
108 case CMCI_STORM_ACTIVE:
109 /*
110 * We switch back to interrupt mode once the poll timer has
111 * silenced itself. That means no events recorded and the
112 * timer interval is back to our poll interval.
113 */
114 __this_cpu_write(cmci_storm_state, CMCI_STORM_SUBSIDED);
115 r = atomic_sub_return(1, &cmci_storm_on_cpus);
116 if (r == 0)
117 pr_notice("CMCI storm subsided: switching to interrupt mode\n");
118 /* FALLTHROUGH */
119
120 case CMCI_STORM_SUBSIDED:
121 /*
122 * We wait for all cpus to go back to SUBSIDED
123 * state. When that happens we switch back to
124 * interrupt mode.
125 */
126 if (!atomic_read(&cmci_storm_on_cpus)) {
127 __this_cpu_write(cmci_storm_state, CMCI_STORM_NONE);
128 cmci_reenable();
129 cmci_recheck();
130 }
131 return CMCI_POLL_INTERVAL;
132 default:
133 /*
134 * We have shiny weather. Let the poll do whatever it
135 * thinks.
136 */
137 return interval;
138 }
139}
140
27f6c573
CG
141static void cmci_storm_disable_banks(void)
142{
143 unsigned long flags, *owned;
144 int bank;
145 u64 val;
146
ed5c41d3 147 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
27f6c573
CG
148 owned = __get_cpu_var(mce_banks_owned);
149 for_each_set_bit(bank, owned, MAX_NR_BANKS) {
150 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
151 val &= ~MCI_CTL2_CMCI_EN;
152 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
153 }
ed5c41d3 154 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
27f6c573
CG
155}
156
55babd8f
CG
157static bool cmci_storm_detect(void)
158{
159 unsigned int cnt = __this_cpu_read(cmci_storm_cnt);
160 unsigned long ts = __this_cpu_read(cmci_time_stamp);
161 unsigned long now = jiffies;
162 int r;
163
164 if (__this_cpu_read(cmci_storm_state) != CMCI_STORM_NONE)
165 return true;
166
167 if (time_before_eq(now, ts + CMCI_STORM_INTERVAL)) {
168 cnt++;
169 } else {
170 cnt = 1;
171 __this_cpu_write(cmci_time_stamp, now);
172 }
173 __this_cpu_write(cmci_storm_cnt, cnt);
174
175 if (cnt <= CMCI_STORM_THRESHOLD)
176 return false;
177
27f6c573 178 cmci_storm_disable_banks();
55babd8f
CG
179 __this_cpu_write(cmci_storm_state, CMCI_STORM_ACTIVE);
180 r = atomic_add_return(1, &cmci_storm_on_cpus);
181 mce_timer_kick(CMCI_POLL_INTERVAL);
182
183 if (r == 1)
184 pr_notice("CMCI storm detected: switching to poll mode\n");
185 return true;
186}
187
88ccbedd
AK
188/*
189 * The interrupt handler. This is called on every event.
190 * Just call the poller directly to log any events.
191 * This could in theory increase the threshold under high load,
192 * but doesn't for now.
193 */
194static void intel_threshold_interrupt(void)
195{
55babd8f
CG
196 if (cmci_storm_detect())
197 return;
88ccbedd 198 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
9ff36ee9 199 mce_notify_irq();
88ccbedd
AK
200}
201
88ccbedd
AK
202/*
203 * Enable CMCI (Corrected Machine Check Interrupt) for available MCE banks
204 * on this CPU. Use the algorithm recommended in the SDM to discover shared
205 * banks.
206 */
4670a300 207static void cmci_discover(int banks)
88ccbedd
AK
208{
209 unsigned long *owned = (void *)&__get_cpu_var(mce_banks_owned);
e5299926 210 unsigned long flags;
88ccbedd 211 int i;
450cc201 212 int bios_wrong_thresh = 0;
88ccbedd 213
ed5c41d3 214 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
88ccbedd
AK
215 for (i = 0; i < banks; i++) {
216 u64 val;
450cc201 217 int bios_zero_thresh = 0;
88ccbedd
AK
218
219 if (test_bit(i, owned))
220 continue;
221
c3d1fb56
NR
222 /* Skip banks in firmware first mode */
223 if (test_bit(i, mce_banks_ce_disabled))
224 continue;
225
a2d32bcb 226 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
88ccbedd
AK
227
228 /* Already owned by someone else? */
1f9a0bd4 229 if (val & MCI_CTL2_CMCI_EN) {
4670a300 230 clear_bit(i, owned);
88ccbedd
AK
231 __clear_bit(i, __get_cpu_var(mce_poll_banks));
232 continue;
233 }
234
1462594b 235 if (!mca_cfg.bios_cmci_threshold) {
450cc201
NR
236 val &= ~MCI_CTL2_CMCI_THRESHOLD_MASK;
237 val |= CMCI_THRESHOLD;
238 } else if (!(val & MCI_CTL2_CMCI_THRESHOLD_MASK)) {
239 /*
240 * If bios_cmci_threshold boot option was specified
241 * but the threshold is zero, we'll try to initialize
242 * it to 1.
243 */
244 bios_zero_thresh = 1;
245 val |= CMCI_THRESHOLD;
246 }
247
248 val |= MCI_CTL2_CMCI_EN;
a2d32bcb
AK
249 wrmsrl(MSR_IA32_MCx_CTL2(i), val);
250 rdmsrl(MSR_IA32_MCx_CTL2(i), val);
88ccbedd
AK
251
252 /* Did the enable bit stick? -- the bank supports CMCI */
1f9a0bd4 253 if (val & MCI_CTL2_CMCI_EN) {
4670a300 254 set_bit(i, owned);
88ccbedd 255 __clear_bit(i, __get_cpu_var(mce_poll_banks));
450cc201
NR
256 /*
257 * We are able to set thresholds for some banks that
258 * had a threshold of 0. This means the BIOS has not
259 * set the thresholds properly or does not work with
260 * this boot option. Note down now and report later.
261 */
1462594b 262 if (mca_cfg.bios_cmci_threshold && bios_zero_thresh &&
450cc201
NR
263 (val & MCI_CTL2_CMCI_THRESHOLD_MASK))
264 bios_wrong_thresh = 1;
88ccbedd
AK
265 } else {
266 WARN_ON(!test_bit(i, __get_cpu_var(mce_poll_banks)));
267 }
268 }
ed5c41d3 269 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
1462594b 270 if (mca_cfg.bios_cmci_threshold && bios_wrong_thresh) {
450cc201
NR
271 pr_info_once(
272 "bios_cmci_threshold: Some banks do not have valid thresholds set\n");
273 pr_info_once(
274 "bios_cmci_threshold: Make sure your BIOS supports this boot option\n");
275 }
88ccbedd
AK
276}
277
278/*
279 * Just in case we missed an event during initialization check
280 * all the CMCI owned banks.
281 */
df20e2eb 282void cmci_recheck(void)
88ccbedd
AK
283{
284 unsigned long flags;
285 int banks;
286
7b543a53 287 if (!mce_available(__this_cpu_ptr(&cpu_info)) || !cmci_supported(&banks))
88ccbedd
AK
288 return;
289 local_irq_save(flags);
290 machine_check_poll(MCP_TIMESTAMP, &__get_cpu_var(mce_banks_owned));
291 local_irq_restore(flags);
292}
293
c3d1fb56
NR
294/* Caller must hold the lock on cmci_discover_lock */
295static void __cmci_disable_bank(int bank)
296{
297 u64 val;
298
299 if (!test_bit(bank, __get_cpu_var(mce_banks_owned)))
300 return;
301 rdmsrl(MSR_IA32_MCx_CTL2(bank), val);
302 val &= ~MCI_CTL2_CMCI_EN;
303 wrmsrl(MSR_IA32_MCx_CTL2(bank), val);
304 __clear_bit(bank, __get_cpu_var(mce_banks_owned));
305}
306
88ccbedd
AK
307/*
308 * Disable CMCI on this CPU for all banks it owns when it goes down.
309 * This allows other CPUs to claim the banks on rediscovery.
310 */
df20e2eb 311void cmci_clear(void)
88ccbedd 312{
e5299926 313 unsigned long flags;
88ccbedd
AK
314 int i;
315 int banks;
88ccbedd
AK
316
317 if (!cmci_supported(&banks))
318 return;
ed5c41d3 319 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
c3d1fb56
NR
320 for (i = 0; i < banks; i++)
321 __cmci_disable_bank(i);
ed5c41d3 322 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
88ccbedd
AK
323}
324
7a0c819d 325static void cmci_rediscover_work_func(void *arg)
85b97637
TC
326{
327 int banks;
328
329 /* Recheck banks in case CPUs don't all have the same */
330 if (cmci_supported(&banks))
331 cmci_discover(banks);
85b97637
TC
332}
333
7a0c819d
SB
334/* After a CPU went down cycle through all the others and rediscover */
335void cmci_rediscover(void)
88ccbedd 336{
7a0c819d 337 int banks;
88ccbedd
AK
338
339 if (!cmci_supported(&banks))
340 return;
88ccbedd 341
7a0c819d 342 on_each_cpu(cmci_rediscover_work_func, NULL, 1);
88ccbedd
AK
343}
344
345/*
346 * Reenable CMCI on this CPU in case a CPU down failed.
347 */
348void cmci_reenable(void)
349{
350 int banks;
351 if (cmci_supported(&banks))
4670a300 352 cmci_discover(banks);
88ccbedd
AK
353}
354
c3d1fb56
NR
355void cmci_disable_bank(int bank)
356{
357 int banks;
358 unsigned long flags;
359
360 if (!cmci_supported(&banks))
361 return;
362
ed5c41d3 363 raw_spin_lock_irqsave(&cmci_discover_lock, flags);
c3d1fb56 364 __cmci_disable_bank(bank);
ed5c41d3 365 raw_spin_unlock_irqrestore(&cmci_discover_lock, flags);
c3d1fb56
NR
366}
367
514ec49a 368static void intel_init_cmci(void)
88ccbedd
AK
369{
370 int banks;
371
372 if (!cmci_supported(&banks))
373 return;
374
375 mce_threshold_vector = intel_threshold_interrupt;
4670a300 376 cmci_discover(banks);
88ccbedd
AK
377 /*
378 * For CPU #0 this runs with still disabled APIC, but that's
379 * ok because only the vector is set up. We still do another
380 * check for the banks later for CPU #0 just to make sure
381 * to not miss any events.
382 */
383 apic_write(APIC_LVTCMCI, THRESHOLD_APIC_VECTOR|APIC_DM_FIXED);
384 cmci_recheck();
385}
386
cc3ca220 387void mce_intel_feature_init(struct cpuinfo_x86 *c)
1da177e4
LT
388{
389 intel_init_thermal(c);
88ccbedd 390 intel_init_cmci();
1da177e4 391}