x86/paravirt: Move the Xen-only pv_cpu_ops under the PARAVIRT_XXL umbrella
[linux-2.6-block.git] / arch / x86 / kernel / smp.c
CommitLineData
0941ecb5
GC
1/*
2 * Intel SMP support routines.
3 *
87c6fe26 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
8f47e163 5 * (c) 1998-99, 2000, 2009 Ingo Molnar <mingo@redhat.com>
0941ecb5
GC
6 * (c) 2002,2003 Andi Kleen, SuSE Labs.
7 *
8 * i386 and x86_64 integration by Glauber Costa <gcosta@redhat.com>
9 *
10 * This code is released under the GNU General Public License version 2 or
11 * later.
12 */
13
f9e47a12
GC
14#include <linux/init.h>
15
16#include <linux/mm.h>
17#include <linux/delay.h>
18#include <linux/spinlock.h>
69c60c88 19#include <linux/export.h>
f9e47a12
GC
20#include <linux/kernel_stat.h>
21#include <linux/mc146818rtc.h>
22#include <linux/cache.h>
23#include <linux/interrupt.h>
24#include <linux/cpu.h>
5a0e3ad6 25#include <linux/gfp.h>
f9e47a12
GC
26
27#include <asm/mtrr.h>
28#include <asm/tlbflush.h>
29#include <asm/mmu_context.h>
30#include <asm/proto.h>
7b6aa335 31#include <asm/apic.h>
7d007d21 32#include <asm/nmi.h>
8838eb6c 33#include <asm/mce.h>
cf910e83 34#include <asm/trace/irq_vectors.h>
0ee59413 35#include <asm/kexec.h>
fba4f472 36#include <asm/virtext.h>
0ee59413 37
0941ecb5
GC
38/*
39 * Some notes on x86 processor bugs affecting SMP operation:
40 *
41 * Pentium, Pentium Pro, II, III (and all CPUs) have bugs.
42 * The Linux implications for SMP are handled as follows:
43 *
44 * Pentium III / [Xeon]
45 * None of the E1AP-E3AP errata are visible to the user.
46 *
47 * E1AP. see PII A1AP
48 * E2AP. see PII A2AP
49 * E3AP. see PII A3AP
50 *
51 * Pentium II / [Xeon]
52 * None of the A1AP-A3AP errata are visible to the user.
53 *
54 * A1AP. see PPro 1AP
55 * A2AP. see PPro 2AP
56 * A3AP. see PPro 7AP
57 *
58 * Pentium Pro
59 * None of 1AP-9AP errata are visible to the normal user,
60 * except occasional delivery of 'spurious interrupt' as trap #15.
61 * This is very rare and a non-problem.
62 *
63 * 1AP. Linux maps APIC as non-cacheable
64 * 2AP. worked around in hardware
65 * 3AP. fixed in C0 and above steppings microcode update.
66 * Linux does not use excessive STARTUP_IPIs.
67 * 4AP. worked around in hardware
68 * 5AP. symmetric IO mode (normal Linux operation) not affected.
69 * 'noapic' mode has vector 0xf filled out properly.
70 * 6AP. 'noapic' mode might be affected - fixed in later steppings
71 * 7AP. We do not assume writes to the LVT deassering IRQs
72 * 8AP. We do not enable low power mode (deep sleep) during MP bootup
73 * 9AP. We do not use mixed mode
74 *
75 * Pentium
76 * There is a marginal case where REP MOVS on 100MHz SMP
77 * machines with B stepping processors can fail. XXX should provide
78 * an L1cache=Writethrough or L1cache=off option.
79 *
80 * B stepping CPUs may hang. There are hardware work arounds
81 * for this. We warn about it in case your board doesn't have the work
82 * arounds. Basically that's so I can tell anyone with a B stepping
83 * CPU and SMP problems "tough".
84 *
85 * Specific items [From Pentium Processor Specification Update]
86 *
87 * 1AP. Linux doesn't use remote read
88 * 2AP. Linux doesn't trust APIC errors
89 * 3AP. We work around this
90 * 4AP. Linux never generated 3 interrupts of the same priority
91 * to cause a lost local interrupt.
92 * 5AP. Remote read is never used
93 * 6AP. not affected - worked around in hardware
94 * 7AP. not affected - worked around in hardware
95 * 8AP. worked around in hardware - we get explicit CS errors if not
96 * 9AP. only 'noapic' mode affected. Might generate spurious
97 * interrupts, we log only the first one and count the
98 * rest silently.
99 * 10AP. not affected - worked around in hardware
100 * 11AP. Linux reads the APIC between writes to avoid this, as per
101 * the documentation. Make sure you preserve this as it affects
102 * the C stepping chips too.
103 * 12AP. not affected - worked around in hardware
104 * 13AP. not affected - worked around in hardware
105 * 14AP. we always deassert INIT during bootup
106 * 15AP. not affected - worked around in hardware
107 * 16AP. not affected - worked around in hardware
108 * 17AP. not affected - worked around in hardware
109 * 18AP. not affected - worked around in hardware
110 * 19AP. not affected - worked around in BIOS
111 *
112 * If this sounds worrying believe me these bugs are either ___RARE___,
113 * or are signal timing bugs worked around in hardware and there's
114 * about nothing of note with C stepping upwards.
115 */
f9e47a12 116
7d007d21 117static atomic_t stopping_cpu = ATOMIC_INIT(-1);
3aac27ab 118static bool smp_no_nmi_ipi = false;
7d007d21 119
f9e47a12
GC
120/*
121 * this function sends a 'reschedule' IPI to another CPU.
122 * it goes straight through and wastes no time serializing
123 * anything. Worst case is that we lose a reschedule ...
124 */
125static void native_smp_send_reschedule(int cpu)
126{
f6940101 127 if (unlikely(cpu_is_offline(cpu))) {
21173d0b 128 WARN(1, "sched: Unexpected reschedule of offline CPU#%d!\n", cpu);
f6940101
GS
129 return;
130 }
72613184 131 apic->send_IPI(cpu, RESCHEDULE_VECTOR);
f9e47a12
GC
132}
133
3b16cf87 134void native_send_call_func_single_ipi(int cpu)
f9e47a12 135{
72613184 136 apic->send_IPI(cpu, CALL_FUNCTION_SINGLE_VECTOR);
f9e47a12
GC
137}
138
bcda016e 139void native_send_call_func_ipi(const struct cpumask *mask)
f9e47a12 140{
c2d1cec1 141 cpumask_var_t allbutself;
f9e47a12 142
c2d1cec1 143 if (!alloc_cpumask_var(&allbutself, GFP_ATOMIC)) {
dac5f412 144 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
c2d1cec1
MT
145 return;
146 }
f9e47a12 147
c2d1cec1
MT
148 cpumask_copy(allbutself, cpu_online_mask);
149 cpumask_clear_cpu(smp_processor_id(), allbutself);
150
151 if (cpumask_equal(mask, allbutself) &&
152 cpumask_equal(cpu_online_mask, cpu_callout_mask))
dac5f412 153 apic->send_IPI_allbutself(CALL_FUNCTION_VECTOR);
f9e47a12 154 else
dac5f412 155 apic->send_IPI_mask(mask, CALL_FUNCTION_VECTOR);
c2d1cec1
MT
156
157 free_cpumask_var(allbutself);
f9e47a12
GC
158}
159
7d007d21
DZ
160static int smp_stop_nmi_callback(unsigned int val, struct pt_regs *regs)
161{
162 /* We are registered on stopping cpu too, avoid spurious NMI */
163 if (raw_smp_processor_id() == atomic_read(&stopping_cpu))
164 return NMI_HANDLED;
165
fba4f472 166 cpu_emergency_vmxoff();
7d007d21
DZ
167 stop_this_cpu(NULL);
168
169 return NMI_HANDLED;
170}
171
f9e47a12
GC
172/*
173 * this function calls the 'stop' function on all other CPUs in the system.
174 */
175
2605fc21 176asmlinkage __visible void smp_reboot_interrupt(void)
4ef702c1 177{
6dc17876 178 ipi_entering_ack_irq();
fba4f472 179 cpu_emergency_vmxoff();
4ef702c1
AK
180 stop_this_cpu(NULL);
181 irq_exit();
182}
183
5d2b86d9 184static void native_stop_other_cpus(int wait)
f9e47a12 185{
f9e47a12 186 unsigned long flags;
76fac077 187 unsigned long timeout;
f9e47a12
GC
188
189 if (reboot_force)
190 return;
191
4ef702c1
AK
192 /*
193 * Use an own vector here because smp_call_function
194 * does lots of things not suitable in a panic situation.
7d007d21
DZ
195 */
196
197 /*
198 * We start by using the REBOOT_VECTOR irq.
199 * The irq is treated as a sync point to allow critical
200 * regions of code on other cpus to release their spin locks
201 * and re-enable irqs. Jumping straight to an NMI might
202 * accidentally cause deadlocks with further shutdown/panic
203 * code. By syncing, we give the cpus up to one second to
204 * finish their work before we force them off with the NMI.
4ef702c1
AK
205 */
206 if (num_online_cpus() > 1) {
7d007d21
DZ
207 /* did someone beat us here? */
208 if (atomic_cmpxchg(&stopping_cpu, -1, safe_smp_processor_id()) != -1)
209 return;
210
211 /* sync above data before sending IRQ */
212 wmb();
213
4ef702c1
AK
214 apic->send_IPI_allbutself(REBOOT_VECTOR);
215
76fac077
AK
216 /*
217 * Don't wait longer than a second if the caller
218 * didn't ask us to wait.
219 */
220 timeout = USEC_PER_SEC;
221 while (num_online_cpus() > 1 && (wait || timeout--))
4ef702c1
AK
222 udelay(1);
223 }
7d007d21
DZ
224
225 /* if the REBOOT_VECTOR didn't work, try with the NMI */
3aac27ab 226 if ((num_online_cpus() > 1) && (!smp_no_nmi_ipi)) {
7d007d21
DZ
227 if (register_nmi_handler(NMI_LOCAL, smp_stop_nmi_callback,
228 NMI_FLAG_FIRST, "smp_stop"))
229 /* Note: we ignore failures here */
230 /* Hope the REBOOT_IRQ is good enough */
231 goto finish;
232
233 /* sync above data before sending IRQ */
234 wmb();
235
236 pr_emerg("Shutting down cpus with NMI\n");
237
238 apic->send_IPI_allbutself(NMI_VECTOR);
239
240 /*
241 * Don't wait longer than a 10 ms if the caller
242 * didn't ask us to wait.
243 */
244 timeout = USEC_PER_MSEC * 10;
245 while (num_online_cpus() > 1 && (wait || timeout--))
246 udelay(1);
247 }
4ef702c1 248
7d007d21 249finish:
f9e47a12 250 local_irq_save(flags);
f9e47a12 251 disable_local_APIC();
8838eb6c 252 mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
f9e47a12
GC
253 local_irq_restore(flags);
254}
255
256/*
3cd788c1
TG
257 * Reschedule call back. KVM uses this interrupt to force a cpu out of
258 * guest mode
f9e47a12 259 */
c4158ff5 260__visible void __irq_entry smp_reschedule_interrupt(struct pt_regs *regs)
eddc0e92
SA
261{
262 ack_APIC_irq();
85b77cdd 263 inc_irq_stat(irq_resched_count);
ffcba43f 264 kvm_set_cpu_l1tf_flush_l1d();
3cd788c1 265
80954747 266 if (trace_resched_ipi_enabled()) {
3cd788c1
TG
267 /*
268 * scheduler_ipi() might call irq_enter() as well, but
269 * nested calls are fine.
270 */
271 irq_enter();
272 trace_reschedule_entry(RESCHEDULE_VECTOR);
273 scheduler_ipi();
274 trace_reschedule_exit(RESCHEDULE_VECTOR);
275 irq_exit();
276 return;
277 }
85b77cdd 278 scheduler_ipi();
3b16cf87 279}
f9e47a12 280
c4158ff5 281__visible void __irq_entry smp_call_function_interrupt(struct pt_regs *regs)
cf910e83 282{
6dc17876 283 ipi_entering_ack_irq();
cf910e83 284 trace_call_function_entry(CALL_FUNCTION_VECTOR);
915b0d01 285 inc_irq_stat(irq_call_count);
85b77cdd
TG
286 generic_smp_call_function_interrupt();
287 trace_call_function_exit(CALL_FUNCTION_VECTOR);
eddc0e92 288 exiting_irq();
f9e47a12
GC
289}
290
85b77cdd 291__visible void __irq_entry smp_call_function_single_interrupt(struct pt_regs *r)
cf910e83 292{
6dc17876 293 ipi_entering_ack_irq();
cf910e83 294 trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
85b77cdd
TG
295 inc_irq_stat(irq_call_count);
296 generic_smp_call_function_single_interrupt();
cf910e83
SA
297 trace_call_function_single_exit(CALL_FUNCTION_SINGLE_VECTOR);
298 exiting_irq();
299}
300
bda62633
DZ
301static int __init nonmi_ipi_setup(char *str)
302{
3aac27ab
DZ
303 smp_no_nmi_ipi = true;
304 return 1;
bda62633
DZ
305}
306
307__setup("nonmi_ipi", nonmi_ipi_setup);
308
f9e47a12 309struct smp_ops smp_ops = {
b9b34f24
CG
310 .smp_prepare_boot_cpu = native_smp_prepare_boot_cpu,
311 .smp_prepare_cpus = native_smp_prepare_cpus,
312 .smp_cpus_done = native_smp_cpus_done,
f9e47a12 313
5d2b86d9 314 .stop_other_cpus = native_stop_other_cpus,
0ee59413
HK
315#if defined(CONFIG_KEXEC_CORE)
316 .crash_stop_other_cpus = kdump_nmi_shootdown_cpus,
317#endif
b9b34f24 318 .smp_send_reschedule = native_smp_send_reschedule,
3b16cf87 319
b9b34f24
CG
320 .cpu_up = native_cpu_up,
321 .cpu_die = native_cpu_die,
322 .cpu_disable = native_cpu_disable,
323 .play_dead = native_play_dead,
93be71b6 324
b9b34f24 325 .send_call_func_ipi = native_send_call_func_ipi,
3b16cf87 326 .send_call_func_single_ipi = native_send_call_func_single_ipi,
f9e47a12
GC
327};
328EXPORT_SYMBOL_GPL(smp_ops);