Merge branch 'x86/asm' into x86/apic, to resolve conflicts
[linux-2.6-block.git] / arch / x86 / kernel / smpboot.c
CommitLineData
c767a54b 1 /*
4cedb334
GOC
2 * x86 SMP booting functions
3 *
87c6fe26 4 * (c) 1995 Alan Cox, Building #3 <alan@lxorguk.ukuu.org.uk>
8f47e163 5 * (c) 1998, 1999, 2000, 2009 Ingo Molnar <mingo@redhat.com>
4cedb334
GOC
6 * Copyright 2001 Andi Kleen, SuSE Labs.
7 *
8 * Much of the core SMP work is based on previous work by Thomas Radke, to
9 * whom a great many thanks are extended.
10 *
11 * Thanks to Intel for making available several different Pentium,
12 * Pentium Pro and Pentium-II/Xeon MP machines.
13 * Original development of Linux SMP code supported by Caldera.
14 *
15 * This code is released under the GNU General Public License version 2 or
16 * later.
17 *
18 * Fixes
19 * Felix Koop : NR_CPUS used properly
20 * Jose Renau : Handle single CPU case.
21 * Alan Cox : By repeated request 8) - Total BogoMIPS report.
22 * Greg Wright : Fix for kernel stacks panic.
23 * Erich Boleyn : MP v1.4 and additional changes.
24 * Matthias Sattler : Changes for 2.1 kernel map.
25 * Michel Lespinasse : Changes for 2.1 kernel map.
26 * Michael Chastain : Change trampoline.S to gnu as.
27 * Alan Cox : Dumb bug: 'B' step PPro's are fine
28 * Ingo Molnar : Added APIC timers, based on code
29 * from Jose Renau
30 * Ingo Molnar : various cleanups and rewrites
31 * Tigran Aivazian : fixed "0.00 in /proc/uptime on SMP" bug.
32 * Maciej W. Rozycki : Bits for genuine 82489DX APICs
33 * Andi Kleen : Changed for SMP boot into long mode.
34 * Martin J. Bligh : Added support for multi-quad systems
35 * Dave Jones : Report invalid combinations of Athlon CPUs.
36 * Rusty Russell : Hacked into shape for new "hotplug" boot process.
37 * Andi Kleen : Converted to new state machine.
38 * Ashok Raj : CPU hotplug support
39 * Glauber Costa : i386 and x86_64 integration
40 */
41
c767a54b
JP
42#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
43
68a1c3f8
GC
44#include <linux/init.h>
45#include <linux/smp.h>
a355352b 46#include <linux/module.h>
70708a18 47#include <linux/sched.h>
69c18c15 48#include <linux/percpu.h>
91718e8d 49#include <linux/bootmem.h>
cb3c8b90
GOC
50#include <linux/err.h>
51#include <linux/nmi.h>
69575d38 52#include <linux/tboot.h>
35f720c5 53#include <linux/stackprotector.h>
5a0e3ad6 54#include <linux/gfp.h>
1a022e3f 55#include <linux/cpuidle.h>
69c18c15 56
8aef135c 57#include <asm/acpi.h>
cb3c8b90 58#include <asm/desc.h>
69c18c15
GC
59#include <asm/nmi.h>
60#include <asm/irq.h>
07bbc16a 61#include <asm/idle.h>
48927bbb 62#include <asm/realmode.h>
69c18c15
GC
63#include <asm/cpu.h>
64#include <asm/numa.h>
cb3c8b90
GOC
65#include <asm/pgtable.h>
66#include <asm/tlbflush.h>
67#include <asm/mtrr.h>
ea530692 68#include <asm/mwait.h>
7b6aa335 69#include <asm/apic.h>
7167d08e 70#include <asm/io_apic.h>
644c1541
VP
71#include <asm/i387.h>
72#include <asm/fpu-internal.h>
569712b2 73#include <asm/setup.h>
bdbcdd48 74#include <asm/uv/uv.h>
cb3c8b90 75#include <linux/mc146818rtc.h>
b81bb373 76#include <asm/i8259.h>
48927bbb 77#include <asm/realmode.h>
646e29a1 78#include <asm/misc.h>
48927bbb 79
a355352b
GC
80/* Number of siblings per CPU package */
81int smp_num_siblings = 1;
82EXPORT_SYMBOL(smp_num_siblings);
83
84/* Last level cache ID of each logical CPU */
0816b0f0 85DEFINE_PER_CPU_READ_MOSTLY(u16, cpu_llc_id) = BAD_APICID;
a355352b 86
a355352b 87/* representing HT siblings of each logical CPU */
0816b0f0 88DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_sibling_map);
a355352b
GC
89EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
90
91/* representing HT and core siblings of each logical CPU */
0816b0f0 92DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_core_map);
a355352b
GC
93EXPORT_PER_CPU_SYMBOL(cpu_core_map);
94
0816b0f0 95DEFINE_PER_CPU_READ_MOSTLY(cpumask_var_t, cpu_llc_shared_map);
b3d7336d 96
a355352b 97/* Per CPU bogomips and other parameters */
2c773dd3 98DEFINE_PER_CPU_READ_MOSTLY(struct cpuinfo_x86, cpu_info);
a355352b 99EXPORT_PER_CPU_SYMBOL(cpu_info);
768d9505 100
2b6163bf 101atomic_t init_deasserted;
cb3c8b90 102
f77aa308
TG
103static inline void smpboot_setup_warm_reset_vector(unsigned long start_eip)
104{
105 unsigned long flags;
106
107 spin_lock_irqsave(&rtc_lock, flags);
108 CMOS_WRITE(0xa, 0xf);
109 spin_unlock_irqrestore(&rtc_lock, flags);
110 local_flush_tlb();
111 pr_debug("1.\n");
112 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_HIGH)) =
113 start_eip >> 4;
114 pr_debug("2.\n");
115 *((volatile unsigned short *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) =
116 start_eip & 0xf;
117 pr_debug("3.\n");
118}
119
120static inline void smpboot_restore_warm_reset_vector(void)
121{
122 unsigned long flags;
123
124 /*
125 * Install writable page 0 entry to set BIOS data area.
126 */
127 local_flush_tlb();
128
129 /*
130 * Paranoid: Set warm reset code and vector here back
131 * to default values.
132 */
133 spin_lock_irqsave(&rtc_lock, flags);
134 CMOS_WRITE(0, 0xf);
135 spin_unlock_irqrestore(&rtc_lock, flags);
136
137 *((volatile u32 *)phys_to_virt(TRAMPOLINE_PHYS_LOW)) = 0;
138}
139
cb3c8b90 140/*
30106c17
FY
141 * Report back to the Boot Processor during boot time or to the caller processor
142 * during CPU online.
cb3c8b90 143 */
148f9bb8 144static void smp_callin(void)
cb3c8b90
GOC
145{
146 int cpuid, phys_id;
cb3c8b90
GOC
147
148 /*
149 * If waken up by an INIT in an 82489DX configuration
150 * we may get here before an INIT-deassert IPI reaches
151 * our local APIC. We have to wait for the IPI or we'll
152 * lock up on an APIC access.
e1c467e6
FY
153 *
154 * Since CPU0 is not wakened up by INIT, it doesn't wait for the IPI.
cb3c8b90 155 */
e1c467e6 156 cpuid = smp_processor_id();
465822cf
DR
157 if (apic->wait_for_init_deassert && cpuid)
158 while (!atomic_read(&init_deasserted))
159 cpu_relax();
cb3c8b90
GOC
160
161 /*
162 * (This works even if the APIC is not enabled.)
163 */
4c9961d5 164 phys_id = read_apic_id();
cb3c8b90
GOC
165
166 /*
167 * the boot CPU has finished the init stage and is spinning
168 * on callin_map until we finish. We are free to set up this
169 * CPU, first the APIC. (this is probably redundant on most
170 * boards)
171 */
05f7e46d 172 apic_ap_setup();
cb3c8b90 173
9d133e5d
SS
174 /*
175 * Need to setup vector mappings before we enable interrupts.
176 */
36e9e1ea 177 setup_vector_irq(smp_processor_id());
b565201c
JS
178
179 /*
180 * Save our processor parameters. Note: this information
181 * is needed for clock calibration.
182 */
183 smp_store_cpu_info(cpuid);
184
cb3c8b90
GOC
185 /*
186 * Get our bogomips.
b565201c
JS
187 * Update loops_per_jiffy in cpu_data. Previous call to
188 * smp_store_cpu_info() stored a value that is close but not as
189 * accurate as the value just calculated.
cb3c8b90 190 */
cb3c8b90 191 calibrate_delay();
b565201c 192 cpu_data(cpuid).loops_per_jiffy = loops_per_jiffy;
cfc1b9a6 193 pr_debug("Stack at about %p\n", &cpuid);
cb3c8b90 194
5ef428c4
AK
195 /*
196 * This must be done before setting cpu_online_mask
197 * or calling notify_cpu_starting.
198 */
199 set_cpu_sibling_map(raw_smp_processor_id());
200 wmb();
201
85257024
PZ
202 notify_cpu_starting(cpuid);
203
cb3c8b90
GOC
204 /*
205 * Allow the master to continue.
206 */
c2d1cec1 207 cpumask_set_cpu(cpuid, cpu_callin_mask);
cb3c8b90
GOC
208}
209
e1c467e6
FY
210static int cpu0_logical_apicid;
211static int enable_start_cpu0;
bbc2ff6a
GOC
212/*
213 * Activate a secondary processor.
214 */
148f9bb8 215static void notrace start_secondary(void *unused)
bbc2ff6a
GOC
216{
217 /*
218 * Don't put *anything* before cpu_init(), SMP booting is too
219 * fragile that we want to limit the things done here to the
220 * most necessary things.
221 */
b40827fa 222 cpu_init();
df156f90 223 x86_cpuinit.early_percpu_clock_init();
b40827fa
BP
224 preempt_disable();
225 smp_callin();
fd89a137 226
e1c467e6
FY
227 enable_start_cpu0 = 0;
228
fd89a137 229#ifdef CONFIG_X86_32
b40827fa 230 /* switch away from the initial page table */
fd89a137
JR
231 load_cr3(swapper_pg_dir);
232 __flush_tlb_all();
233#endif
234
bbc2ff6a
GOC
235 /* otherwise gcc will move up smp_processor_id before the cpu_init */
236 barrier();
237 /*
238 * Check TSC synchronization with the BP:
239 */
240 check_tsc_sync_target();
241
3891a04a
PA
242 /*
243 * Enable the espfix hack for this CPU
244 */
197725de 245#ifdef CONFIG_X86_ESPFIX64
3891a04a
PA
246 init_espfix_ap();
247#endif
248
bbc2ff6a 249 /*
d388e5fd
EB
250 * We need to hold vector_lock so there the set of online cpus
251 * does not change while we are assigning vectors to cpus. Holding
252 * this lock ensures we don't half assign or remove an irq from a cpu.
bbc2ff6a 253 */
d388e5fd 254 lock_vector_lock();
c2d1cec1 255 set_cpu_online(smp_processor_id(), true);
d388e5fd 256 unlock_vector_lock();
2a442c9c 257 cpu_set_state_online(smp_processor_id());
78c06176 258 x86_platform.nmi_init();
bbc2ff6a 259
0cefa5b9
MS
260 /* enable local interrupts */
261 local_irq_enable();
262
35f720c5
JP
263 /* to prevent fake stack check failure in clock setup */
264 boot_init_stack_canary();
0cefa5b9 265
736decac 266 x86_cpuinit.setup_percpu_clockev();
bbc2ff6a
GOC
267
268 wmb();
7d1a9417 269 cpu_startup_entry(CPUHP_ONLINE);
bbc2ff6a
GOC
270}
271
30106c17
FY
272void __init smp_store_boot_cpu_info(void)
273{
274 int id = 0; /* CPU 0 */
275 struct cpuinfo_x86 *c = &cpu_data(id);
276
277 *c = boot_cpu_data;
278 c->cpu_index = id;
279}
280
1d89a7f0
GOC
281/*
282 * The bootstrap kernel entry code has set these up. Save them for
283 * a given CPU
284 */
148f9bb8 285void smp_store_cpu_info(int id)
1d89a7f0
GOC
286{
287 struct cpuinfo_x86 *c = &cpu_data(id);
288
b3d7336d 289 *c = boot_cpu_data;
1d89a7f0 290 c->cpu_index = id;
30106c17
FY
291 /*
292 * During boot time, CPU0 has this setup already. Save the info when
293 * bringing up AP or offlined CPU0.
294 */
295 identify_secondary_cpu(c);
1d89a7f0
GOC
296}
297
cebf15eb
DH
298static bool
299topology_same_node(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
300{
301 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
302
303 return (cpu_to_node(cpu1) == cpu_to_node(cpu2));
304}
305
148f9bb8 306static bool
316ad248 307topology_sane(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o, const char *name)
d4fbe4f0 308{
316ad248
PZ
309 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
310
cebf15eb 311 return !WARN_ONCE(!topology_same_node(c, o),
316ad248
PZ
312 "sched: CPU #%d's %s-sibling CPU #%d is not on the same node! "
313 "[node: %d != %d]. Ignoring dependency.\n",
314 cpu1, name, cpu2, cpu_to_node(cpu1), cpu_to_node(cpu2));
315}
316
317#define link_mask(_m, c1, c2) \
318do { \
319 cpumask_set_cpu((c1), cpu_##_m##_mask(c2)); \
320 cpumask_set_cpu((c2), cpu_##_m##_mask(c1)); \
321} while (0)
322
148f9bb8 323static bool match_smt(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
316ad248 324{
193f3fcb 325 if (cpu_has_topoext) {
316ad248
PZ
326 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
327
328 if (c->phys_proc_id == o->phys_proc_id &&
329 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2) &&
330 c->compute_unit_id == o->compute_unit_id)
331 return topology_sane(c, o, "smt");
332
333 } else if (c->phys_proc_id == o->phys_proc_id &&
334 c->cpu_core_id == o->cpu_core_id) {
335 return topology_sane(c, o, "smt");
336 }
337
338 return false;
339}
340
148f9bb8 341static bool match_llc(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
316ad248
PZ
342{
343 int cpu1 = c->cpu_index, cpu2 = o->cpu_index;
344
345 if (per_cpu(cpu_llc_id, cpu1) != BAD_APICID &&
346 per_cpu(cpu_llc_id, cpu1) == per_cpu(cpu_llc_id, cpu2))
347 return topology_sane(c, o, "llc");
348
349 return false;
d4fbe4f0
AH
350}
351
cebf15eb
DH
352/*
353 * Unlike the other levels, we do not enforce keeping a
354 * multicore group inside a NUMA node. If this happens, we will
355 * discard the MC level of the topology later.
356 */
357static bool match_die(struct cpuinfo_x86 *c, struct cpuinfo_x86 *o)
316ad248 358{
cebf15eb
DH
359 if (c->phys_proc_id == o->phys_proc_id)
360 return true;
316ad248
PZ
361 return false;
362}
1d89a7f0 363
cebf15eb
DH
364static struct sched_domain_topology_level numa_inside_package_topology[] = {
365#ifdef CONFIG_SCHED_SMT
366 { cpu_smt_mask, cpu_smt_flags, SD_INIT_NAME(SMT) },
367#endif
368#ifdef CONFIG_SCHED_MC
369 { cpu_coregroup_mask, cpu_core_flags, SD_INIT_NAME(MC) },
370#endif
371 { NULL, },
372};
373/*
374 * set_sched_topology() sets the topology internal to a CPU. The
375 * NUMA topologies are layered on top of it to build the full
376 * system topology.
377 *
378 * If NUMA nodes are observed to occur within a CPU package, this
379 * function should be called. It forces the sched domain code to
380 * only use the SMT level for the CPU portion of the topology.
381 * This essentially falls back to relying on NUMA information
382 * from the SRAT table to describe the entire system topology
383 * (except for hyperthreads).
384 */
385static void primarily_use_numa_for_topology(void)
386{
387 set_sched_topology(numa_inside_package_topology);
388}
389
148f9bb8 390void set_cpu_sibling_map(int cpu)
768d9505 391{
316ad248 392 bool has_smt = smp_num_siblings > 1;
b0bc225d 393 bool has_mp = has_smt || boot_cpu_data.x86_max_cores > 1;
768d9505 394 struct cpuinfo_x86 *c = &cpu_data(cpu);
316ad248
PZ
395 struct cpuinfo_x86 *o;
396 int i;
768d9505 397
c2d1cec1 398 cpumask_set_cpu(cpu, cpu_sibling_setup_mask);
768d9505 399
b0bc225d 400 if (!has_mp) {
c2d1cec1 401 cpumask_set_cpu(cpu, cpu_sibling_mask(cpu));
316ad248
PZ
402 cpumask_set_cpu(cpu, cpu_llc_shared_mask(cpu));
403 cpumask_set_cpu(cpu, cpu_core_mask(cpu));
768d9505
GC
404 c->booted_cores = 1;
405 return;
406 }
407
c2d1cec1 408 for_each_cpu(i, cpu_sibling_setup_mask) {
316ad248
PZ
409 o = &cpu_data(i);
410
411 if ((i == cpu) || (has_smt && match_smt(c, o)))
412 link_mask(sibling, cpu, i);
413
b0bc225d 414 if ((i == cpu) || (has_mp && match_llc(c, o)))
316ad248
PZ
415 link_mask(llc_shared, cpu, i);
416
ceb1cbac
KB
417 }
418
419 /*
420 * This needs a separate iteration over the cpus because we rely on all
421 * cpu_sibling_mask links to be set-up.
422 */
423 for_each_cpu(i, cpu_sibling_setup_mask) {
424 o = &cpu_data(i);
425
cebf15eb 426 if ((i == cpu) || (has_mp && match_die(c, o))) {
316ad248
PZ
427 link_mask(core, cpu, i);
428
768d9505
GC
429 /*
430 * Does this new cpu bringup a new core?
431 */
c2d1cec1 432 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1) {
768d9505
GC
433 /*
434 * for each core in package, increment
435 * the booted_cores for this new cpu
436 */
c2d1cec1 437 if (cpumask_first(cpu_sibling_mask(i)) == i)
768d9505
GC
438 c->booted_cores++;
439 /*
440 * increment the core count for all
441 * the other cpus in this package
442 */
443 if (i != cpu)
444 cpu_data(i).booted_cores++;
445 } else if (i != cpu && !c->booted_cores)
446 c->booted_cores = cpu_data(i).booted_cores;
447 }
728e5653 448 if (match_die(c, o) && !topology_same_node(c, o))
cebf15eb 449 primarily_use_numa_for_topology();
768d9505
GC
450 }
451}
452
70708a18 453/* maps the cpu to the sched domain representing multi-core */
030bb203 454const struct cpumask *cpu_coregroup_mask(int cpu)
70708a18 455{
9f646389 456 return cpu_llc_shared_mask(cpu);
030bb203
RR
457}
458
a4928cff 459static void impress_friends(void)
904541e2
GOC
460{
461 int cpu;
462 unsigned long bogosum = 0;
463 /*
464 * Allow the user to impress friends.
465 */
c767a54b 466 pr_debug("Before bogomips\n");
904541e2 467 for_each_possible_cpu(cpu)
c2d1cec1 468 if (cpumask_test_cpu(cpu, cpu_callout_mask))
904541e2 469 bogosum += cpu_data(cpu).loops_per_jiffy;
c767a54b 470 pr_info("Total of %d processors activated (%lu.%02lu BogoMIPS)\n",
f68e00a3 471 num_online_cpus(),
904541e2
GOC
472 bogosum/(500000/HZ),
473 (bogosum/(5000/HZ))%100);
474
c767a54b 475 pr_debug("Before bogocount - setting activated=1\n");
904541e2
GOC
476}
477
569712b2 478void __inquire_remote_apic(int apicid)
cb3c8b90
GOC
479{
480 unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 };
a6c23905 481 const char * const names[] = { "ID", "VERSION", "SPIV" };
cb3c8b90
GOC
482 int timeout;
483 u32 status;
484
c767a54b 485 pr_info("Inquiring remote APIC 0x%x...\n", apicid);
cb3c8b90
GOC
486
487 for (i = 0; i < ARRAY_SIZE(regs); i++) {
c767a54b 488 pr_info("... APIC 0x%x %s: ", apicid, names[i]);
cb3c8b90
GOC
489
490 /*
491 * Wait for idle.
492 */
493 status = safe_apic_wait_icr_idle();
494 if (status)
c767a54b 495 pr_cont("a previous APIC delivery may have failed\n");
cb3c8b90 496
1b374e4d 497 apic_icr_write(APIC_DM_REMRD | regs[i], apicid);
cb3c8b90
GOC
498
499 timeout = 0;
500 do {
501 udelay(100);
502 status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK;
503 } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000);
504
505 switch (status) {
506 case APIC_ICR_RR_VALID:
507 status = apic_read(APIC_RRR);
c767a54b 508 pr_cont("%08x\n", status);
cb3c8b90
GOC
509 break;
510 default:
c767a54b 511 pr_cont("failed\n");
cb3c8b90
GOC
512 }
513 }
514}
515
d68921f9
LB
516/*
517 * The Multiprocessor Specification 1.4 (1997) example code suggests
518 * that there should be a 10ms delay between the BSP asserting INIT
519 * and de-asserting INIT, when starting a remote processor.
520 * But that slows boot and resume on modern processors, which include
521 * many cores and don't require that delay.
522 *
523 * Cmdline "init_cpu_udelay=" is available to over-ride this delay.
1a744cb3 524 * Modern processor families are quirked to remove the delay entirely.
d68921f9
LB
525 */
526#define UDELAY_10MS_DEFAULT 10000
527
528static unsigned int init_udelay = UDELAY_10MS_DEFAULT;
529
530static int __init cpu_init_udelay(char *str)
531{
532 get_option(&str, &init_udelay);
533
534 return 0;
535}
536early_param("cpu_init_udelay", cpu_init_udelay);
537
1a744cb3
LB
538static void __init smp_quirk_init_udelay(void)
539{
540 /* if cmdline changed it from default, leave it alone */
541 if (init_udelay != UDELAY_10MS_DEFAULT)
542 return;
543
544 /* if modern processor, use no delay */
545 if (((boot_cpu_data.x86_vendor == X86_VENDOR_INTEL) && (boot_cpu_data.x86 == 6)) ||
546 ((boot_cpu_data.x86_vendor == X86_VENDOR_AMD) && (boot_cpu_data.x86 >= 0xF)))
547 init_udelay = 0;
548}
549
cb3c8b90
GOC
550/*
551 * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal
552 * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this
553 * won't ... remember to clear down the APIC, etc later.
554 */
148f9bb8 555int
e1c467e6 556wakeup_secondary_cpu_via_nmi(int apicid, unsigned long start_eip)
cb3c8b90
GOC
557{
558 unsigned long send_status, accept_status = 0;
559 int maxlvt;
560
561 /* Target chip */
cb3c8b90
GOC
562 /* Boot on the stack */
563 /* Kick the second */
e1c467e6 564 apic_icr_write(APIC_DM_NMI | apic->dest_logical, apicid);
cb3c8b90 565
cfc1b9a6 566 pr_debug("Waiting for send to finish...\n");
cb3c8b90
GOC
567 send_status = safe_apic_wait_icr_idle();
568
569 /*
570 * Give the other CPU some time to accept the IPI.
571 */
572 udelay(200);
569712b2 573 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
59ef48a5
CG
574 maxlvt = lapic_get_maxlvt();
575 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
576 apic_write(APIC_ESR, 0);
577 accept_status = (apic_read(APIC_ESR) & 0xEF);
578 }
c767a54b 579 pr_debug("NMI sent\n");
cb3c8b90
GOC
580
581 if (send_status)
c767a54b 582 pr_err("APIC never delivered???\n");
cb3c8b90 583 if (accept_status)
c767a54b 584 pr_err("APIC delivery error (%lx)\n", accept_status);
cb3c8b90
GOC
585
586 return (send_status | accept_status);
587}
cb3c8b90 588
148f9bb8 589static int
569712b2 590wakeup_secondary_cpu_via_init(int phys_apicid, unsigned long start_eip)
cb3c8b90 591{
f5d6a52f 592 unsigned long send_status = 0, accept_status = 0;
cb3c8b90
GOC
593 int maxlvt, num_starts, j;
594
593f4a78
MR
595 maxlvt = lapic_get_maxlvt();
596
cb3c8b90
GOC
597 /*
598 * Be paranoid about clearing APIC errors.
599 */
600 if (APIC_INTEGRATED(apic_version[phys_apicid])) {
593f4a78
MR
601 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
602 apic_write(APIC_ESR, 0);
cb3c8b90
GOC
603 apic_read(APIC_ESR);
604 }
605
c767a54b 606 pr_debug("Asserting INIT\n");
cb3c8b90
GOC
607
608 /*
609 * Turn INIT on target chip
610 */
cb3c8b90
GOC
611 /*
612 * Send IPI
613 */
1b374e4d
SS
614 apic_icr_write(APIC_INT_LEVELTRIG | APIC_INT_ASSERT | APIC_DM_INIT,
615 phys_apicid);
cb3c8b90 616
853b160a
IM
617 pr_debug("Waiting for send to finish...\n");
618 send_status = safe_apic_wait_icr_idle();
cb3c8b90 619
853b160a 620 mdelay(init_udelay);
cb3c8b90 621
853b160a 622 pr_debug("Deasserting INIT\n");
cb3c8b90 623
853b160a
IM
624 /* Target chip */
625 /* Send IPI */
626 apic_icr_write(APIC_INT_LEVELTRIG | APIC_DM_INIT, phys_apicid);
cb3c8b90 627
853b160a
IM
628 pr_debug("Waiting for send to finish...\n");
629 send_status = safe_apic_wait_icr_idle();
cb3c8b90 630
853b160a
IM
631 mb();
632 atomic_set(&init_deasserted, 1);
cb3c8b90
GOC
633
634 /*
635 * Should we send STARTUP IPIs ?
636 *
637 * Determine this based on the APIC version.
638 * If we don't have an integrated APIC, don't send the STARTUP IPIs.
639 */
640 if (APIC_INTEGRATED(apic_version[phys_apicid]))
641 num_starts = 2;
642 else
643 num_starts = 0;
644
645 /*
646 * Paravirt / VMI wants a startup IPI hook here to set up the
647 * target processor state.
648 */
649 startup_ipi_hook(phys_apicid, (unsigned long) start_secondary,
11d4c3f9 650 stack_start);
cb3c8b90
GOC
651
652 /*
653 * Run STARTUP IPI loop.
654 */
c767a54b 655 pr_debug("#startup loops: %d\n", num_starts);
cb3c8b90 656
cb3c8b90 657 for (j = 1; j <= num_starts; j++) {
c767a54b 658 pr_debug("Sending STARTUP #%d\n", j);
593f4a78
MR
659 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
660 apic_write(APIC_ESR, 0);
cb3c8b90 661 apic_read(APIC_ESR);
c767a54b 662 pr_debug("After apic_write\n");
cb3c8b90
GOC
663
664 /*
665 * STARTUP IPI
666 */
667
668 /* Target chip */
cb3c8b90
GOC
669 /* Boot on the stack */
670 /* Kick the second */
1b374e4d
SS
671 apic_icr_write(APIC_DM_STARTUP | (start_eip >> 12),
672 phys_apicid);
cb3c8b90 673
853b160a
IM
674 /*
675 * Give the other CPU some time to accept the IPI.
676 */
677 udelay(300);
cb3c8b90 678
853b160a 679 pr_debug("Startup point 1\n");
cb3c8b90 680
853b160a
IM
681 pr_debug("Waiting for send to finish...\n");
682 send_status = safe_apic_wait_icr_idle();
f5d6a52f 683
853b160a
IM
684 /*
685 * Give the other CPU some time to accept the IPI.
686 */
687 udelay(200);
cb3c8b90 688
593f4a78 689 if (maxlvt > 3) /* Due to the Pentium erratum 3AP. */
cb3c8b90 690 apic_write(APIC_ESR, 0);
cb3c8b90
GOC
691 accept_status = (apic_read(APIC_ESR) & 0xEF);
692 if (send_status || accept_status)
693 break;
694 }
c767a54b 695 pr_debug("After Startup\n");
cb3c8b90
GOC
696
697 if (send_status)
c767a54b 698 pr_err("APIC never delivered???\n");
cb3c8b90 699 if (accept_status)
c767a54b 700 pr_err("APIC delivery error (%lx)\n", accept_status);
cb3c8b90
GOC
701
702 return (send_status | accept_status);
703}
cb3c8b90 704
a17bce4d
BP
705void smp_announce(void)
706{
707 int num_nodes = num_online_nodes();
708
709 printk(KERN_INFO "x86: Booted up %d node%s, %d CPUs\n",
710 num_nodes, (num_nodes > 1 ? "s" : ""), num_online_cpus());
711}
712
2eaad1fd 713/* reduce the number of lines printed when booting a large cpu count system */
148f9bb8 714static void announce_cpu(int cpu, int apicid)
2eaad1fd
MT
715{
716 static int current_node = -1;
4adc8b71 717 int node = early_cpu_to_node(cpu);
a17bce4d 718 static int width, node_width;
646e29a1
BP
719
720 if (!width)
721 width = num_digits(num_possible_cpus()) + 1; /* + '#' sign */
2eaad1fd 722
a17bce4d
BP
723 if (!node_width)
724 node_width = num_digits(num_possible_nodes()) + 1; /* + '#' */
725
726 if (cpu == 1)
727 printk(KERN_INFO "x86: Booting SMP configuration:\n");
728
2eaad1fd
MT
729 if (system_state == SYSTEM_BOOTING) {
730 if (node != current_node) {
731 if (current_node > (-1))
a17bce4d 732 pr_cont("\n");
2eaad1fd 733 current_node = node;
a17bce4d
BP
734
735 printk(KERN_INFO ".... node %*s#%d, CPUs: ",
736 node_width - num_digits(node), " ", node);
2eaad1fd 737 }
646e29a1
BP
738
739 /* Add padding for the BSP */
740 if (cpu == 1)
741 pr_cont("%*s", width + 1, " ");
742
743 pr_cont("%*s#%d", width - num_digits(cpu), " ", cpu);
744
2eaad1fd
MT
745 } else
746 pr_info("Booting Node %d Processor %d APIC 0x%x\n",
747 node, cpu, apicid);
748}
749
e1c467e6
FY
750static int wakeup_cpu0_nmi(unsigned int cmd, struct pt_regs *regs)
751{
752 int cpu;
753
754 cpu = smp_processor_id();
755 if (cpu == 0 && !cpu_online(cpu) && enable_start_cpu0)
756 return NMI_HANDLED;
757
758 return NMI_DONE;
759}
760
761/*
762 * Wake up AP by INIT, INIT, STARTUP sequence.
763 *
764 * Instead of waiting for STARTUP after INITs, BSP will execute the BIOS
765 * boot-strap code which is not a desired behavior for waking up BSP. To
766 * void the boot-strap code, wake up CPU0 by NMI instead.
767 *
768 * This works to wake up soft offlined CPU0 only. If CPU0 is hard offlined
769 * (i.e. physically hot removed and then hot added), NMI won't wake it up.
770 * We'll change this code in the future to wake up hard offlined CPU0 if
771 * real platform and request are available.
772 */
148f9bb8 773static int
e1c467e6
FY
774wakeup_cpu_via_init_nmi(int cpu, unsigned long start_ip, int apicid,
775 int *cpu0_nmi_registered)
776{
777 int id;
778 int boot_error;
779
ea7bdc65
JK
780 preempt_disable();
781
e1c467e6
FY
782 /*
783 * Wake up AP by INIT, INIT, STARTUP sequence.
784 */
ea7bdc65
JK
785 if (cpu) {
786 boot_error = wakeup_secondary_cpu_via_init(apicid, start_ip);
787 goto out;
788 }
e1c467e6
FY
789
790 /*
791 * Wake up BSP by nmi.
792 *
793 * Register a NMI handler to help wake up CPU0.
794 */
795 boot_error = register_nmi_handler(NMI_LOCAL,
796 wakeup_cpu0_nmi, 0, "wake_cpu0");
797
798 if (!boot_error) {
799 enable_start_cpu0 = 1;
800 *cpu0_nmi_registered = 1;
801 if (apic->dest_logical == APIC_DEST_LOGICAL)
802 id = cpu0_logical_apicid;
803 else
804 id = apicid;
805 boot_error = wakeup_secondary_cpu_via_nmi(id, start_ip);
806 }
ea7bdc65
JK
807
808out:
809 preempt_enable();
e1c467e6
FY
810
811 return boot_error;
812}
813
3f85483b
BO
814void common_cpu_up(unsigned int cpu, struct task_struct *idle)
815{
816 /* Just in case we booted with a single CPU. */
817 alternatives_enable_smp();
818
819 per_cpu(current_task, cpu) = idle;
820
821#ifdef CONFIG_X86_32
822 /* Stack for startup_32 can be just as for start_secondary onwards */
823 irq_ctx_init(cpu);
824 per_cpu(cpu_current_top_of_stack, cpu) =
825 (unsigned long)task_stack_page(idle) + THREAD_SIZE;
826#else
827 clear_tsk_thread_flag(idle, TIF_FORK);
828 initial_gs = per_cpu_offset(cpu);
829#endif
3f85483b
BO
830}
831
cb3c8b90
GOC
832/*
833 * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad
834 * (ie clustered apic addressing mode), this is a LOGICAL apic ID.
1f5bcabf
IM
835 * Returns zero if CPU booted OK, else error code from
836 * ->wakeup_secondary_cpu.
cb3c8b90 837 */
148f9bb8 838static int do_boot_cpu(int apicid, int cpu, struct task_struct *idle)
cb3c8b90 839{
48927bbb 840 volatile u32 *trampoline_status =
b429dbf6 841 (volatile u32 *) __va(real_mode_header->trampoline_status);
48927bbb 842 /* start_ip had better be page-aligned! */
f37240f1 843 unsigned long start_ip = real_mode_header->trampoline_start;
48927bbb 844
cb3c8b90 845 unsigned long boot_error = 0;
e1c467e6 846 int cpu0_nmi_registered = 0;
ce4b1b16 847 unsigned long timeout;
cb3c8b90 848
7eb43a6d
TG
849 idle->thread.sp = (unsigned long) (((struct pt_regs *)
850 (THREAD_SIZE + task_stack_page(idle))) - 1);
cb3c8b90 851
a939098a 852 early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu);
3e970473 853 initial_code = (unsigned long)start_secondary;
7eb43a6d 854 stack_start = idle->thread.sp;
cb3c8b90 855
2eaad1fd
MT
856 /* So we see what's up */
857 announce_cpu(cpu, apicid);
cb3c8b90
GOC
858
859 /*
860 * This grunge runs the startup process for
861 * the targeted processor.
862 */
863
864 atomic_set(&init_deasserted, 0);
865
34d05591 866 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
cb3c8b90 867
cfc1b9a6 868 pr_debug("Setting warm reset code and vector.\n");
cb3c8b90 869
34d05591
JS
870 smpboot_setup_warm_reset_vector(start_ip);
871 /*
872 * Be paranoid about clearing APIC errors.
db96b0a0
CG
873 */
874 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid])) {
875 apic_write(APIC_ESR, 0);
876 apic_read(APIC_ESR);
877 }
34d05591 878 }
cb3c8b90 879
ce4b1b16
IM
880 /*
881 * AP might wait on cpu_callout_mask in cpu_init() with
882 * cpu_initialized_mask set if previous attempt to online
883 * it timed-out. Clear cpu_initialized_mask so that after
884 * INIT/SIPI it could start with a clean state.
885 */
886 cpumask_clear_cpu(cpu, cpu_initialized_mask);
887 smp_mb();
888
cb3c8b90 889 /*
e1c467e6
FY
890 * Wake up a CPU in difference cases:
891 * - Use the method in the APIC driver if it's defined
892 * Otherwise,
893 * - Use an INIT boot APIC message for APs or NMI for BSP.
cb3c8b90 894 */
1f5bcabf
IM
895 if (apic->wakeup_secondary_cpu)
896 boot_error = apic->wakeup_secondary_cpu(apicid, start_ip);
897 else
e1c467e6
FY
898 boot_error = wakeup_cpu_via_init_nmi(cpu, start_ip, apicid,
899 &cpu0_nmi_registered);
cb3c8b90
GOC
900
901 if (!boot_error) {
902 /*
ce4b1b16 903 * Wait 10s total for a response from AP
cb3c8b90 904 */
ce4b1b16
IM
905 boot_error = -1;
906 timeout = jiffies + 10*HZ;
907 while (time_before(jiffies, timeout)) {
908 if (cpumask_test_cpu(cpu, cpu_initialized_mask)) {
909 /*
910 * Tell AP to proceed with initialization
911 */
912 cpumask_set_cpu(cpu, cpu_callout_mask);
913 boot_error = 0;
914 break;
915 }
916 udelay(100);
917 schedule();
918 }
919 }
cb3c8b90 920
ce4b1b16 921 if (!boot_error) {
cb3c8b90 922 /*
ce4b1b16 923 * Wait till AP completes initial initialization
cb3c8b90 924 */
ce4b1b16 925 while (!cpumask_test_cpu(cpu, cpu_callin_mask)) {
68f202e4
SS
926 /*
927 * Allow other tasks to run while we wait for the
928 * AP to come online. This also gives a chance
929 * for the MTRR work(triggered by the AP coming online)
930 * to be completed in the stop machine context.
931 */
ce4b1b16 932 udelay(100);
68f202e4 933 schedule();
cb3c8b90 934 }
cb3c8b90
GOC
935 }
936
937 /* mark "stuck" area as not stuck */
48927bbb 938 *trampoline_status = 0;
cb3c8b90 939
02421f98
YL
940 if (get_uv_system_type() != UV_NON_UNIQUE_APIC) {
941 /*
942 * Cleanup possible dangling ends...
943 */
944 smpboot_restore_warm_reset_vector();
945 }
e1c467e6
FY
946 /*
947 * Clean up the nmi handler. Do this after the callin and callout sync
948 * to avoid impact of possible long unregister time.
949 */
950 if (cpu0_nmi_registered)
951 unregister_nmi_handler(NMI_LOCAL, "wake_cpu0");
952
cb3c8b90
GOC
953 return boot_error;
954}
955
148f9bb8 956int native_cpu_up(unsigned int cpu, struct task_struct *tidle)
cb3c8b90 957{
a21769a4 958 int apicid = apic->cpu_present_to_apicid(cpu);
cb3c8b90
GOC
959 unsigned long flags;
960 int err;
961
962 WARN_ON(irqs_disabled());
963
cfc1b9a6 964 pr_debug("++++++++++++++++++++=_---CPU UP %u\n", cpu);
cb3c8b90 965
30106c17 966 if (apicid == BAD_APICID ||
c284b42a 967 !physid_isset(apicid, phys_cpu_present_map) ||
fa63030e 968 !apic->apic_id_valid(apicid)) {
c767a54b 969 pr_err("%s: bad cpu %d\n", __func__, cpu);
cb3c8b90
GOC
970 return -EINVAL;
971 }
972
973 /*
974 * Already booted CPU?
975 */
c2d1cec1 976 if (cpumask_test_cpu(cpu, cpu_callin_mask)) {
cfc1b9a6 977 pr_debug("do_boot_cpu %d Already started\n", cpu);
cb3c8b90
GOC
978 return -ENOSYS;
979 }
980
981 /*
982 * Save current MTRR state in case it was changed since early boot
983 * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync:
984 */
985 mtrr_save_state();
986
2a442c9c
PM
987 /* x86 CPUs take themselves offline, so delayed offline is OK. */
988 err = cpu_check_up_prepare(cpu);
989 if (err && err != -EBUSY)
990 return err;
cb3c8b90 991
644c1541
VP
992 /* the FPU context is blank, nobody can own it */
993 __cpu_disable_lazy_restore(cpu);
994
3f85483b
BO
995 common_cpu_up(cpu, tidle);
996
7eb43a6d 997 err = do_boot_cpu(apicid, cpu, tidle);
61165d7a 998 if (err) {
feef1e8e 999 pr_err("do_boot_cpu failed(%d) to wakeup CPU#%u\n", err, cpu);
61165d7a 1000 return -EIO;
cb3c8b90
GOC
1001 }
1002
1003 /*
1004 * Check TSC synchronization with the AP (keep irqs disabled
1005 * while doing so):
1006 */
1007 local_irq_save(flags);
1008 check_tsc_sync_source(cpu);
1009 local_irq_restore(flags);
1010
7c04e64a 1011 while (!cpu_online(cpu)) {
cb3c8b90
GOC
1012 cpu_relax();
1013 touch_nmi_watchdog();
1014 }
1015
1016 return 0;
1017}
1018
7167d08e
HK
1019/**
1020 * arch_disable_smp_support() - disables SMP support for x86 at runtime
1021 */
1022void arch_disable_smp_support(void)
1023{
1024 disable_ioapic_support();
1025}
1026
8aef135c
GOC
1027/*
1028 * Fall back to non SMP mode after errors.
1029 *
1030 * RED-PEN audit/test this more. I bet there is more state messed up here.
1031 */
1032static __init void disable_smp(void)
1033{
613c25ef
TG
1034 pr_info("SMP disabled\n");
1035
ef4c59a4
TG
1036 disable_ioapic_support();
1037
4f062896
RR
1038 init_cpu_present(cpumask_of(0));
1039 init_cpu_possible(cpumask_of(0));
0f385d1d 1040
8aef135c 1041 if (smp_found_config)
b6df1b8b 1042 physid_set_mask_of_physid(boot_cpu_physical_apicid, &phys_cpu_present_map);
8aef135c 1043 else
b6df1b8b 1044 physid_set_mask_of_physid(0, &phys_cpu_present_map);
c2d1cec1
MT
1045 cpumask_set_cpu(0, cpu_sibling_mask(0));
1046 cpumask_set_cpu(0, cpu_core_mask(0));
8aef135c
GOC
1047}
1048
613c25ef
TG
1049enum {
1050 SMP_OK,
1051 SMP_NO_CONFIG,
1052 SMP_NO_APIC,
1053 SMP_FORCE_UP,
1054};
1055
8aef135c
GOC
1056/*
1057 * Various sanity checks.
1058 */
1059static int __init smp_sanity_check(unsigned max_cpus)
1060{
ac23d4ee 1061 preempt_disable();
a58f03b0 1062
1ff2f20d 1063#if !defined(CONFIG_X86_BIGSMP) && defined(CONFIG_X86_32)
a58f03b0
YL
1064 if (def_to_bigsmp && nr_cpu_ids > 8) {
1065 unsigned int cpu;
1066 unsigned nr;
1067
c767a54b
JP
1068 pr_warn("More than 8 CPUs detected - skipping them\n"
1069 "Use CONFIG_X86_BIGSMP\n");
a58f03b0
YL
1070
1071 nr = 0;
1072 for_each_present_cpu(cpu) {
1073 if (nr >= 8)
c2d1cec1 1074 set_cpu_present(cpu, false);
a58f03b0
YL
1075 nr++;
1076 }
1077
1078 nr = 0;
1079 for_each_possible_cpu(cpu) {
1080 if (nr >= 8)
c2d1cec1 1081 set_cpu_possible(cpu, false);
a58f03b0
YL
1082 nr++;
1083 }
1084
1085 nr_cpu_ids = 8;
1086 }
1087#endif
1088
8aef135c 1089 if (!physid_isset(hard_smp_processor_id(), phys_cpu_present_map)) {
c767a54b 1090 pr_warn("weird, boot CPU (#%d) not listed by the BIOS\n",
55c395b4
MT
1091 hard_smp_processor_id());
1092
8aef135c
GOC
1093 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1094 }
1095
1096 /*
1097 * If we couldn't find an SMP configuration at boot time,
1098 * get out of here now!
1099 */
1100 if (!smp_found_config && !acpi_lapic) {
ac23d4ee 1101 preempt_enable();
c767a54b 1102 pr_notice("SMP motherboard not detected\n");
613c25ef 1103 return SMP_NO_CONFIG;
8aef135c
GOC
1104 }
1105
1106 /*
1107 * Should not be necessary because the MP table should list the boot
1108 * CPU too, but we do it for the sake of robustness anyway.
1109 */
a27a6210 1110 if (!apic->check_phys_apicid_present(boot_cpu_physical_apicid)) {
c767a54b
JP
1111 pr_notice("weird, boot CPU (#%d) not listed by the BIOS\n",
1112 boot_cpu_physical_apicid);
8aef135c
GOC
1113 physid_set(hard_smp_processor_id(), phys_cpu_present_map);
1114 }
ac23d4ee 1115 preempt_enable();
8aef135c
GOC
1116
1117 /*
1118 * If we couldn't find a local APIC, then get out of here now!
1119 */
1120 if (APIC_INTEGRATED(apic_version[boot_cpu_physical_apicid]) &&
1121 !cpu_has_apic) {
103428e5
CG
1122 if (!disable_apic) {
1123 pr_err("BIOS bug, local APIC #%d not detected!...\n",
1124 boot_cpu_physical_apicid);
c767a54b 1125 pr_err("... forcing use of dummy APIC emulation (tell your hw vendor)\n");
103428e5 1126 }
613c25ef 1127 return SMP_NO_APIC;
8aef135c
GOC
1128 }
1129
8aef135c
GOC
1130 /*
1131 * If SMP should be disabled, then really disable it!
1132 */
1133 if (!max_cpus) {
c767a54b 1134 pr_info("SMP mode deactivated\n");
613c25ef 1135 return SMP_FORCE_UP;
8aef135c
GOC
1136 }
1137
613c25ef 1138 return SMP_OK;
8aef135c
GOC
1139}
1140
1141static void __init smp_cpu_index_default(void)
1142{
1143 int i;
1144 struct cpuinfo_x86 *c;
1145
7c04e64a 1146 for_each_possible_cpu(i) {
8aef135c
GOC
1147 c = &cpu_data(i);
1148 /* mark all to hotplug */
9628937d 1149 c->cpu_index = nr_cpu_ids;
8aef135c
GOC
1150 }
1151}
1152
1153/*
1154 * Prepare for SMP bootup. The MP table or ACPI has been read
1155 * earlier. Just do some sanity checking here and enable APIC mode.
1156 */
1157void __init native_smp_prepare_cpus(unsigned int max_cpus)
1158{
7ad728f9
RR
1159 unsigned int i;
1160
8aef135c 1161 smp_cpu_index_default();
792363d2 1162
8aef135c
GOC
1163 /*
1164 * Setup boot CPU information
1165 */
30106c17 1166 smp_store_boot_cpu_info(); /* Final full version of the data */
792363d2
YL
1167 cpumask_copy(cpu_callin_mask, cpumask_of(0));
1168 mb();
bd22a2f1 1169
8aef135c 1170 current_thread_info()->cpu = 0; /* needed? */
7ad728f9 1171 for_each_possible_cpu(i) {
79f55997
LZ
1172 zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL);
1173 zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL);
b3d7336d 1174 zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL);
7ad728f9 1175 }
8aef135c
GOC
1176 set_cpu_sibling_map(0);
1177
613c25ef
TG
1178 switch (smp_sanity_check(max_cpus)) {
1179 case SMP_NO_CONFIG:
8aef135c 1180 disable_smp();
613c25ef
TG
1181 if (APIC_init_uniprocessor())
1182 pr_notice("Local APIC not detected. Using dummy APIC emulation.\n");
1183 return;
1184 case SMP_NO_APIC:
1185 disable_smp();
1186 return;
1187 case SMP_FORCE_UP:
1188 disable_smp();
374aab33 1189 apic_bsp_setup(false);
250a1ac6 1190 return;
613c25ef
TG
1191 case SMP_OK:
1192 break;
8aef135c
GOC
1193 }
1194
fa47f7e5
SS
1195 default_setup_apic_routing();
1196
4c9961d5 1197 if (read_apic_id() != boot_cpu_physical_apicid) {
8aef135c 1198 panic("Boot APIC ID in local APIC unexpected (%d vs %d)",
4c9961d5 1199 read_apic_id(), boot_cpu_physical_apicid);
8aef135c
GOC
1200 /* Or can we switch back to PIC here? */
1201 }
1202
374aab33 1203 cpu0_logical_apicid = apic_bsp_setup(false);
ef4c59a4 1204
c767a54b 1205 pr_info("CPU%d: ", 0);
8aef135c 1206 print_cpu_info(&cpu_data(0));
c4bd1fda
MS
1207
1208 if (is_uv_system())
1209 uv_system_init();
d0af9eed
SS
1210
1211 set_mtrr_aps_delayed_init();
1a744cb3
LB
1212
1213 smp_quirk_init_udelay();
8aef135c 1214}
d0af9eed
SS
1215
1216void arch_enable_nonboot_cpus_begin(void)
1217{
1218 set_mtrr_aps_delayed_init();
1219}
1220
1221void arch_enable_nonboot_cpus_end(void)
1222{
1223 mtrr_aps_init();
1224}
1225
a8db8453
GOC
1226/*
1227 * Early setup to make printk work.
1228 */
1229void __init native_smp_prepare_boot_cpu(void)
1230{
1231 int me = smp_processor_id();
552be871 1232 switch_to_new_gdt(me);
c2d1cec1
MT
1233 /* already set me in cpu_online_mask in boot_cpu_init() */
1234 cpumask_set_cpu(me, cpu_callout_mask);
2a442c9c 1235 cpu_set_state_online(me);
a8db8453
GOC
1236}
1237
83f7eb9c
GOC
1238void __init native_smp_cpus_done(unsigned int max_cpus)
1239{
c767a54b 1240 pr_debug("Boot done\n");
83f7eb9c 1241
99e8b9ca 1242 nmi_selftest();
83f7eb9c 1243 impress_friends();
83f7eb9c 1244 setup_ioapic_dest();
d0af9eed 1245 mtrr_aps_init();
83f7eb9c
GOC
1246}
1247
3b11ce7f
MT
1248static int __initdata setup_possible_cpus = -1;
1249static int __init _setup_possible_cpus(char *str)
1250{
1251 get_option(&str, &setup_possible_cpus);
1252 return 0;
1253}
1254early_param("possible_cpus", _setup_possible_cpus);
1255
1256
68a1c3f8 1257/*
4f062896 1258 * cpu_possible_mask should be static, it cannot change as cpu's
68a1c3f8
GC
1259 * are onlined, or offlined. The reason is per-cpu data-structures
1260 * are allocated by some modules at init time, and dont expect to
1261 * do this dynamically on cpu arrival/departure.
4f062896 1262 * cpu_present_mask on the other hand can change dynamically.
68a1c3f8
GC
1263 * In case when cpu_hotplug is not compiled, then we resort to current
1264 * behaviour, which is cpu_possible == cpu_present.
1265 * - Ashok Raj
1266 *
1267 * Three ways to find out the number of additional hotplug CPUs:
1268 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
3b11ce7f 1269 * - The user can overwrite it with possible_cpus=NUM
68a1c3f8
GC
1270 * - Otherwise don't reserve additional CPUs.
1271 * We do this because additional CPUs waste a lot of memory.
1272 * -AK
1273 */
1274__init void prefill_possible_map(void)
1275{
cb48bb59 1276 int i, possible;
68a1c3f8 1277
329513a3
YL
1278 /* no processor from mptable or madt */
1279 if (!num_processors)
1280 num_processors = 1;
1281
5f2eb550
JB
1282 i = setup_max_cpus ?: 1;
1283 if (setup_possible_cpus == -1) {
1284 possible = num_processors;
1285#ifdef CONFIG_HOTPLUG_CPU
1286 if (setup_max_cpus)
1287 possible += disabled_cpus;
1288#else
1289 if (possible > i)
1290 possible = i;
1291#endif
1292 } else
3b11ce7f
MT
1293 possible = setup_possible_cpus;
1294
730cf272
MT
1295 total_cpus = max_t(int, possible, num_processors + disabled_cpus);
1296
2b633e3f
YL
1297 /* nr_cpu_ids could be reduced via nr_cpus= */
1298 if (possible > nr_cpu_ids) {
c767a54b 1299 pr_warn("%d Processors exceeds NR_CPUS limit of %d\n",
2b633e3f
YL
1300 possible, nr_cpu_ids);
1301 possible = nr_cpu_ids;
3b11ce7f 1302 }
68a1c3f8 1303
5f2eb550
JB
1304#ifdef CONFIG_HOTPLUG_CPU
1305 if (!setup_max_cpus)
1306#endif
1307 if (possible > i) {
c767a54b 1308 pr_warn("%d Processors exceeds max_cpus limit of %u\n",
5f2eb550
JB
1309 possible, setup_max_cpus);
1310 possible = i;
1311 }
1312
c767a54b 1313 pr_info("Allowing %d CPUs, %d hotplug CPUs\n",
68a1c3f8
GC
1314 possible, max_t(int, possible - num_processors, 0));
1315
1316 for (i = 0; i < possible; i++)
c2d1cec1 1317 set_cpu_possible(i, true);
5f2eb550
JB
1318 for (; i < NR_CPUS; i++)
1319 set_cpu_possible(i, false);
3461b0af
MT
1320
1321 nr_cpu_ids = possible;
68a1c3f8 1322}
69c18c15 1323
14adf855
CE
1324#ifdef CONFIG_HOTPLUG_CPU
1325
1326static void remove_siblinginfo(int cpu)
1327{
1328 int sibling;
1329 struct cpuinfo_x86 *c = &cpu_data(cpu);
1330
c2d1cec1
MT
1331 for_each_cpu(sibling, cpu_core_mask(cpu)) {
1332 cpumask_clear_cpu(cpu, cpu_core_mask(sibling));
14adf855
CE
1333 /*/
1334 * last thread sibling in this cpu core going down
1335 */
c2d1cec1 1336 if (cpumask_weight(cpu_sibling_mask(cpu)) == 1)
14adf855
CE
1337 cpu_data(sibling).booted_cores--;
1338 }
1339
c2d1cec1
MT
1340 for_each_cpu(sibling, cpu_sibling_mask(cpu))
1341 cpumask_clear_cpu(cpu, cpu_sibling_mask(sibling));
03bd4e1f
WL
1342 for_each_cpu(sibling, cpu_llc_shared_mask(cpu))
1343 cpumask_clear_cpu(cpu, cpu_llc_shared_mask(sibling));
1344 cpumask_clear(cpu_llc_shared_mask(cpu));
c2d1cec1
MT
1345 cpumask_clear(cpu_sibling_mask(cpu));
1346 cpumask_clear(cpu_core_mask(cpu));
14adf855
CE
1347 c->phys_proc_id = 0;
1348 c->cpu_core_id = 0;
c2d1cec1 1349 cpumask_clear_cpu(cpu, cpu_sibling_setup_mask);
14adf855
CE
1350}
1351
69c18c15
GC
1352static void __ref remove_cpu_from_maps(int cpu)
1353{
c2d1cec1
MT
1354 set_cpu_online(cpu, false);
1355 cpumask_clear_cpu(cpu, cpu_callout_mask);
1356 cpumask_clear_cpu(cpu, cpu_callin_mask);
69c18c15 1357 /* was set by cpu_init() */
c2d1cec1 1358 cpumask_clear_cpu(cpu, cpu_initialized_mask);
23ca4bba 1359 numa_remove_cpu(cpu);
69c18c15
GC
1360}
1361
8227dce7 1362void cpu_disable_common(void)
69c18c15
GC
1363{
1364 int cpu = smp_processor_id();
69c18c15 1365
69c18c15
GC
1366 remove_siblinginfo(cpu);
1367
1368 /* It's now safe to remove this processor from the online map */
d388e5fd 1369 lock_vector_lock();
69c18c15 1370 remove_cpu_from_maps(cpu);
d388e5fd 1371 unlock_vector_lock();
d7b381bb 1372 fixup_irqs();
8227dce7
AN
1373}
1374
1375int native_cpu_disable(void)
1376{
da6139e4
PB
1377 int ret;
1378
1379 ret = check_irq_vectors_for_cpu_disable();
1380 if (ret)
1381 return ret;
1382
8227dce7 1383 clear_local_APIC();
8227dce7 1384 cpu_disable_common();
2ed53c0d 1385
69c18c15
GC
1386 return 0;
1387}
1388
2a442c9c 1389int common_cpu_die(unsigned int cpu)
54279552 1390{
2a442c9c 1391 int ret = 0;
54279552 1392
69c18c15 1393 /* We don't do anything here: idle task is faking death itself. */
54279552 1394
2ed53c0d 1395 /* They ack this in play_dead() by setting CPU_DEAD */
2a442c9c 1396 if (cpu_wait_death(cpu, 5)) {
2ed53c0d
LT
1397 if (system_state == SYSTEM_RUNNING)
1398 pr_info("CPU %u is now offline\n", cpu);
1399 } else {
1400 pr_err("CPU %u didn't die...\n", cpu);
2a442c9c 1401 ret = -1;
69c18c15 1402 }
2a442c9c
PM
1403
1404 return ret;
1405}
1406
1407void native_cpu_die(unsigned int cpu)
1408{
1409 common_cpu_die(cpu);
69c18c15 1410}
a21f5d88
AN
1411
1412void play_dead_common(void)
1413{
1414 idle_task_exit();
1415 reset_lazy_tlbstate();
02c68a02 1416 amd_e400_remove_cpu(raw_smp_processor_id());
a21f5d88 1417
a21f5d88 1418 /* Ack it */
2a442c9c 1419 (void)cpu_report_death();
a21f5d88
AN
1420
1421 /*
1422 * With physical CPU hotplug, we should halt the cpu
1423 */
1424 local_irq_disable();
1425}
1426
e1c467e6
FY
1427static bool wakeup_cpu0(void)
1428{
1429 if (smp_processor_id() == 0 && enable_start_cpu0)
1430 return true;
1431
1432 return false;
1433}
1434
ea530692
PA
1435/*
1436 * We need to flush the caches before going to sleep, lest we have
1437 * dirty data in our caches when we come back up.
1438 */
1439static inline void mwait_play_dead(void)
1440{
1441 unsigned int eax, ebx, ecx, edx;
1442 unsigned int highest_cstate = 0;
1443 unsigned int highest_subcstate = 0;
ce5f6824 1444 void *mwait_ptr;
576cfb40 1445 int i;
ea530692 1446
69fb3676 1447 if (!this_cpu_has(X86_FEATURE_MWAIT))
ea530692 1448 return;
840d2830 1449 if (!this_cpu_has(X86_FEATURE_CLFLUSH))
ce5f6824 1450 return;
7b543a53 1451 if (__this_cpu_read(cpu_info.cpuid_level) < CPUID_MWAIT_LEAF)
ea530692
PA
1452 return;
1453
1454 eax = CPUID_MWAIT_LEAF;
1455 ecx = 0;
1456 native_cpuid(&eax, &ebx, &ecx, &edx);
1457
1458 /*
1459 * eax will be 0 if EDX enumeration is not valid.
1460 * Initialized below to cstate, sub_cstate value when EDX is valid.
1461 */
1462 if (!(ecx & CPUID5_ECX_EXTENSIONS_SUPPORTED)) {
1463 eax = 0;
1464 } else {
1465 edx >>= MWAIT_SUBSTATE_SIZE;
1466 for (i = 0; i < 7 && edx; i++, edx >>= MWAIT_SUBSTATE_SIZE) {
1467 if (edx & MWAIT_SUBSTATE_MASK) {
1468 highest_cstate = i;
1469 highest_subcstate = edx & MWAIT_SUBSTATE_MASK;
1470 }
1471 }
1472 eax = (highest_cstate << MWAIT_SUBSTATE_SIZE) |
1473 (highest_subcstate - 1);
1474 }
1475
ce5f6824
PA
1476 /*
1477 * This should be a memory location in a cache line which is
1478 * unlikely to be touched by other processors. The actual
1479 * content is immaterial as it is not actually modified in any way.
1480 */
1481 mwait_ptr = &current_thread_info()->flags;
1482
a68e5c94
PA
1483 wbinvd();
1484
ea530692 1485 while (1) {
ce5f6824
PA
1486 /*
1487 * The CLFLUSH is a workaround for erratum AAI65 for
1488 * the Xeon 7400 series. It's not clear it is actually
1489 * needed, but it should be harmless in either case.
1490 * The WBINVD is insufficient due to the spurious-wakeup
1491 * case where we return around the loop.
1492 */
7d590cca 1493 mb();
ce5f6824 1494 clflush(mwait_ptr);
7d590cca 1495 mb();
ce5f6824 1496 __monitor(mwait_ptr, 0, 0);
ea530692
PA
1497 mb();
1498 __mwait(eax, 0);
e1c467e6
FY
1499 /*
1500 * If NMI wants to wake up CPU0, start CPU0.
1501 */
1502 if (wakeup_cpu0())
1503 start_cpu0();
ea530692
PA
1504 }
1505}
1506
1507static inline void hlt_play_dead(void)
1508{
7b543a53 1509 if (__this_cpu_read(cpu_info.x86) >= 4)
a68e5c94
PA
1510 wbinvd();
1511
ea530692 1512 while (1) {
ea530692 1513 native_halt();
e1c467e6
FY
1514 /*
1515 * If NMI wants to wake up CPU0, start CPU0.
1516 */
1517 if (wakeup_cpu0())
1518 start_cpu0();
ea530692
PA
1519 }
1520}
1521
a21f5d88
AN
1522void native_play_dead(void)
1523{
1524 play_dead_common();
86886e55 1525 tboot_shutdown(TB_SHUTDOWN_WFS);
ea530692
PA
1526
1527 mwait_play_dead(); /* Only returns on failure */
1a022e3f
BO
1528 if (cpuidle_play_dead())
1529 hlt_play_dead();
a21f5d88
AN
1530}
1531
69c18c15 1532#else /* ... !CONFIG_HOTPLUG_CPU */
93be71b6 1533int native_cpu_disable(void)
69c18c15
GC
1534{
1535 return -ENOSYS;
1536}
1537
93be71b6 1538void native_cpu_die(unsigned int cpu)
69c18c15
GC
1539{
1540 /* We said "no" in __cpu_disable */
1541 BUG();
1542}
a21f5d88
AN
1543
1544void native_play_dead(void)
1545{
1546 BUG();
1547}
1548
68a1c3f8 1549#endif