Commit | Line | Data |
---|---|---|
68a1c3f8 GC |
1 | #include <linux/init.h> |
2 | #include <linux/smp.h> | |
a355352b | 3 | #include <linux/module.h> |
70708a18 | 4 | #include <linux/sched.h> |
69c18c15 | 5 | #include <linux/percpu.h> |
91718e8d | 6 | #include <linux/bootmem.h> |
cb3c8b90 GOC |
7 | #include <linux/err.h> |
8 | #include <linux/nmi.h> | |
69c18c15 | 9 | |
cb3c8b90 | 10 | #include <asm/desc.h> |
69c18c15 GC |
11 | #include <asm/nmi.h> |
12 | #include <asm/irq.h> | |
13 | #include <asm/smp.h> | |
14 | #include <asm/cpu.h> | |
15 | #include <asm/numa.h> | |
cb3c8b90 GOC |
16 | #include <asm/pgtable.h> |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/mtrr.h> | |
19 | #include <asm/nmi.h> | |
bbc2ff6a | 20 | #include <asm/vmi.h> |
cb3c8b90 | 21 | #include <linux/mc146818rtc.h> |
68a1c3f8 | 22 | |
f6bc4029 | 23 | #include <mach_apic.h> |
cb3c8b90 GOC |
24 | #include <mach_wakecpu.h> |
25 | #include <smpboot_hooks.h> | |
26 | ||
a8db8453 GOC |
27 | /* State of each CPU */ |
28 | DEFINE_PER_CPU(int, cpu_state) = { 0 }; | |
29 | ||
cb3c8b90 GOC |
30 | /* Store all idle threads, this can be reused instead of creating |
31 | * a new thread. Also avoids complicated thread destroy functionality | |
32 | * for idle threads. | |
33 | */ | |
34 | #ifdef CONFIG_HOTPLUG_CPU | |
35 | /* | |
36 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | |
37 | * removed after init for !CONFIG_HOTPLUG_CPU. | |
38 | */ | |
39 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |
40 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | |
41 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | |
42 | #else | |
43 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | |
44 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | |
45 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | |
46 | #endif | |
f6bc4029 | 47 | |
a355352b GC |
48 | /* Number of siblings per CPU package */ |
49 | int smp_num_siblings = 1; | |
50 | EXPORT_SYMBOL(smp_num_siblings); | |
51 | ||
52 | /* Last level cache ID of each logical CPU */ | |
53 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | |
54 | ||
55 | /* bitmap of online cpus */ | |
56 | cpumask_t cpu_online_map __read_mostly; | |
57 | EXPORT_SYMBOL(cpu_online_map); | |
58 | ||
59 | cpumask_t cpu_callin_map; | |
60 | cpumask_t cpu_callout_map; | |
61 | cpumask_t cpu_possible_map; | |
62 | EXPORT_SYMBOL(cpu_possible_map); | |
63 | ||
64 | /* representing HT siblings of each logical CPU */ | |
65 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | |
66 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |
67 | ||
68 | /* representing HT and core siblings of each logical CPU */ | |
69 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | |
70 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |
71 | ||
72 | /* Per CPU bogomips and other parameters */ | |
73 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | |
74 | EXPORT_PER_CPU_SYMBOL(cpu_info); | |
768d9505 | 75 | |
cb3c8b90 GOC |
76 | static atomic_t init_deasserted; |
77 | ||
91718e8d GC |
78 | /* ready for x86_64, no harm for x86, since it will overwrite after alloc */ |
79 | unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); | |
80 | ||
768d9505 GC |
81 | /* representing cpus for which sibling maps can be computed */ |
82 | static cpumask_t cpu_sibling_setup_map; | |
83 | ||
1d89a7f0 GOC |
84 | /* Set if we find a B stepping CPU */ |
85 | int __cpuinitdata smp_b_stepping; | |
1d89a7f0 | 86 | |
7cc3959e GOC |
87 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
88 | ||
89 | /* which logical CPUs are on which nodes */ | |
90 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | |
91 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | |
92 | EXPORT_SYMBOL(node_to_cpumask_map); | |
93 | /* which node each logical CPU is on */ | |
94 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | |
95 | EXPORT_SYMBOL(cpu_to_node_map); | |
96 | ||
97 | /* set up a mapping between cpu and node. */ | |
98 | static void map_cpu_to_node(int cpu, int node) | |
99 | { | |
100 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | |
101 | cpu_set(cpu, node_to_cpumask_map[node]); | |
102 | cpu_to_node_map[cpu] = node; | |
103 | } | |
104 | ||
105 | /* undo a mapping between cpu and node. */ | |
106 | static void unmap_cpu_to_node(int cpu) | |
107 | { | |
108 | int node; | |
109 | ||
110 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | |
111 | for (node = 0; node < MAX_NUMNODES; node++) | |
112 | cpu_clear(cpu, node_to_cpumask_map[node]); | |
113 | cpu_to_node_map[cpu] = 0; | |
114 | } | |
115 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | |
116 | #define map_cpu_to_node(cpu, node) ({}) | |
117 | #define unmap_cpu_to_node(cpu) ({}) | |
118 | #endif | |
119 | ||
120 | #ifdef CONFIG_X86_32 | |
121 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | |
122 | { [0 ... NR_CPUS-1] = BAD_APICID }; | |
123 | ||
124 | void map_cpu_to_logical_apicid(void) | |
125 | { | |
126 | int cpu = smp_processor_id(); | |
127 | int apicid = logical_smp_processor_id(); | |
128 | int node = apicid_to_node(apicid); | |
129 | ||
130 | if (!node_online(node)) | |
131 | node = first_online_node; | |
132 | ||
133 | cpu_2_logical_apicid[cpu] = apicid; | |
134 | map_cpu_to_node(cpu, node); | |
135 | } | |
136 | ||
137 | void unmap_cpu_to_logical_apicid(int cpu) | |
138 | { | |
139 | cpu_2_logical_apicid[cpu] = BAD_APICID; | |
140 | unmap_cpu_to_node(cpu); | |
141 | } | |
142 | #else | |
143 | #define unmap_cpu_to_logical_apicid(cpu) do {} while (0) | |
144 | #define map_cpu_to_logical_apicid() do {} while (0) | |
145 | #endif | |
146 | ||
cb3c8b90 GOC |
147 | /* |
148 | * Report back to the Boot Processor. | |
149 | * Running on AP. | |
150 | */ | |
151 | void __cpuinit smp_callin(void) | |
152 | { | |
153 | int cpuid, phys_id; | |
154 | unsigned long timeout; | |
155 | ||
156 | /* | |
157 | * If waken up by an INIT in an 82489DX configuration | |
158 | * we may get here before an INIT-deassert IPI reaches | |
159 | * our local APIC. We have to wait for the IPI or we'll | |
160 | * lock up on an APIC access. | |
161 | */ | |
162 | wait_for_init_deassert(&init_deasserted); | |
163 | ||
164 | /* | |
165 | * (This works even if the APIC is not enabled.) | |
166 | */ | |
167 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | |
168 | cpuid = smp_processor_id(); | |
169 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
170 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, | |
171 | phys_id, cpuid); | |
172 | } | |
173 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | |
174 | ||
175 | /* | |
176 | * STARTUP IPIs are fragile beasts as they might sometimes | |
177 | * trigger some glue motherboard logic. Complete APIC bus | |
178 | * silence for 1 second, this overestimates the time the | |
179 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
180 | * by a factor of two. This should be enough. | |
181 | */ | |
182 | ||
183 | /* | |
184 | * Waiting 2s total for startup (udelay is not yet working) | |
185 | */ | |
186 | timeout = jiffies + 2*HZ; | |
187 | while (time_before(jiffies, timeout)) { | |
188 | /* | |
189 | * Has the boot CPU finished it's STARTUP sequence? | |
190 | */ | |
191 | if (cpu_isset(cpuid, cpu_callout_map)) | |
192 | break; | |
193 | cpu_relax(); | |
194 | } | |
195 | ||
196 | if (!time_before(jiffies, timeout)) { | |
197 | panic("%s: CPU%d started up but did not get a callout!\n", | |
198 | __func__, cpuid); | |
199 | } | |
200 | ||
201 | /* | |
202 | * the boot CPU has finished the init stage and is spinning | |
203 | * on callin_map until we finish. We are free to set up this | |
204 | * CPU, first the APIC. (this is probably redundant on most | |
205 | * boards) | |
206 | */ | |
207 | ||
208 | Dprintk("CALLIN, before setup_local_APIC().\n"); | |
209 | smp_callin_clear_local_apic(); | |
210 | setup_local_APIC(); | |
211 | end_local_APIC_setup(); | |
212 | map_cpu_to_logical_apicid(); | |
213 | ||
214 | /* | |
215 | * Get our bogomips. | |
216 | * | |
217 | * Need to enable IRQs because it can take longer and then | |
218 | * the NMI watchdog might kill us. | |
219 | */ | |
220 | local_irq_enable(); | |
221 | calibrate_delay(); | |
222 | local_irq_disable(); | |
223 | Dprintk("Stack at about %p\n", &cpuid); | |
224 | ||
225 | /* | |
226 | * Save our processor parameters | |
227 | */ | |
228 | smp_store_cpu_info(cpuid); | |
229 | ||
230 | /* | |
231 | * Allow the master to continue. | |
232 | */ | |
233 | cpu_set(cpuid, cpu_callin_map); | |
234 | } | |
235 | ||
bbc2ff6a GOC |
236 | /* |
237 | * Activate a secondary processor. | |
238 | */ | |
239 | void __cpuinit start_secondary(void *unused) | |
240 | { | |
241 | /* | |
242 | * Don't put *anything* before cpu_init(), SMP booting is too | |
243 | * fragile that we want to limit the things done here to the | |
244 | * most necessary things. | |
245 | */ | |
246 | #ifdef CONFIG_VMI | |
247 | vmi_bringup(); | |
248 | #endif | |
249 | cpu_init(); | |
250 | preempt_disable(); | |
251 | smp_callin(); | |
252 | ||
253 | /* otherwise gcc will move up smp_processor_id before the cpu_init */ | |
254 | barrier(); | |
255 | /* | |
256 | * Check TSC synchronization with the BP: | |
257 | */ | |
258 | check_tsc_sync_target(); | |
259 | ||
260 | if (nmi_watchdog == NMI_IO_APIC) { | |
261 | disable_8259A_irq(0); | |
262 | enable_NMI_through_LVT0(); | |
263 | enable_8259A_irq(0); | |
264 | } | |
265 | ||
266 | /* This must be done before setting cpu_online_map */ | |
267 | set_cpu_sibling_map(raw_smp_processor_id()); | |
268 | wmb(); | |
269 | ||
270 | /* | |
271 | * We need to hold call_lock, so there is no inconsistency | |
272 | * between the time smp_call_function() determines number of | |
273 | * IPI recipients, and the time when the determination is made | |
274 | * for which cpus receive the IPI. Holding this | |
275 | * lock helps us to not include this cpu in a currently in progress | |
276 | * smp_call_function(). | |
277 | */ | |
278 | lock_ipi_call_lock(); | |
279 | #ifdef CONFIG_X86_64 | |
280 | spin_lock(&vector_lock); | |
281 | ||
282 | /* Setup the per cpu irq handling data structures */ | |
283 | __setup_vector_irq(smp_processor_id()); | |
284 | /* | |
285 | * Allow the master to continue. | |
286 | */ | |
287 | spin_unlock(&vector_lock); | |
288 | #endif | |
289 | cpu_set(smp_processor_id(), cpu_online_map); | |
290 | unlock_ipi_call_lock(); | |
291 | per_cpu(cpu_state, smp_processor_id()) = CPU_ONLINE; | |
292 | ||
293 | setup_secondary_clock(); | |
294 | ||
295 | wmb(); | |
296 | cpu_idle(); | |
297 | } | |
298 | ||
299 | #ifdef CONFIG_X86_32 | |
300 | /* | |
301 | * Everything has been set up for the secondary | |
302 | * CPUs - they just need to reload everything | |
303 | * from the task structure | |
304 | * This function must not return. | |
305 | */ | |
306 | void __devinit initialize_secondary(void) | |
307 | { | |
308 | /* | |
309 | * We don't actually need to load the full TSS, | |
310 | * basically just the stack pointer and the ip. | |
311 | */ | |
312 | ||
313 | asm volatile( | |
314 | "movl %0,%%esp\n\t" | |
315 | "jmp *%1" | |
316 | : | |
317 | :"m" (current->thread.sp), "m" (current->thread.ip)); | |
318 | } | |
319 | #endif | |
cb3c8b90 | 320 | |
1d89a7f0 GOC |
321 | static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) |
322 | { | |
323 | #ifdef CONFIG_X86_32 | |
324 | /* | |
325 | * Mask B, Pentium, but not Pentium MMX | |
326 | */ | |
327 | if (c->x86_vendor == X86_VENDOR_INTEL && | |
328 | c->x86 == 5 && | |
329 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
330 | c->x86_model <= 3) | |
331 | /* | |
332 | * Remember we have B step Pentia with bugs | |
333 | */ | |
334 | smp_b_stepping = 1; | |
335 | ||
336 | /* | |
337 | * Certain Athlons might work (for various values of 'work') in SMP | |
338 | * but they are not certified as MP capable. | |
339 | */ | |
340 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | |
341 | ||
342 | if (num_possible_cpus() == 1) | |
343 | goto valid_k7; | |
344 | ||
345 | /* Athlon 660/661 is valid. */ | |
346 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
347 | (c->x86_mask == 1))) | |
348 | goto valid_k7; | |
349 | ||
350 | /* Duron 670 is valid */ | |
351 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
352 | goto valid_k7; | |
353 | ||
354 | /* | |
355 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
356 | * bit. It's worth noting that the A5 stepping (662) of some | |
357 | * Athlon XP's have the MP bit set. | |
358 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
359 | * more. | |
360 | */ | |
361 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
362 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
363 | (c->x86_model > 7)) | |
364 | if (cpu_has_mp) | |
365 | goto valid_k7; | |
366 | ||
367 | /* If we get here, not a certified SMP capable AMD system. */ | |
368 | add_taint(TAINT_UNSAFE_SMP); | |
369 | } | |
370 | ||
371 | valid_k7: | |
372 | ; | |
373 | #endif | |
374 | } | |
375 | ||
693d4b8a GOC |
376 | void smp_checks(void) |
377 | { | |
378 | if (smp_b_stepping) | |
379 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable" | |
380 | "with B stepping processors.\n"); | |
381 | ||
382 | /* | |
383 | * Don't taint if we are running SMP kernel on a single non-MP | |
384 | * approved Athlon | |
385 | */ | |
386 | if (tainted & TAINT_UNSAFE_SMP) { | |
f68e00a3 | 387 | if (num_online_cpus()) |
693d4b8a GOC |
388 | printk(KERN_INFO "WARNING: This combination of AMD" |
389 | "processors is not suitable for SMP.\n"); | |
390 | else | |
391 | tainted &= ~TAINT_UNSAFE_SMP; | |
392 | } | |
393 | } | |
394 | ||
1d89a7f0 GOC |
395 | /* |
396 | * The bootstrap kernel entry code has set these up. Save them for | |
397 | * a given CPU | |
398 | */ | |
399 | ||
400 | void __cpuinit smp_store_cpu_info(int id) | |
401 | { | |
402 | struct cpuinfo_x86 *c = &cpu_data(id); | |
403 | ||
404 | *c = boot_cpu_data; | |
405 | c->cpu_index = id; | |
406 | if (id != 0) | |
407 | identify_secondary_cpu(c); | |
408 | smp_apply_quirks(c); | |
409 | } | |
410 | ||
411 | ||
768d9505 GC |
412 | void __cpuinit set_cpu_sibling_map(int cpu) |
413 | { | |
414 | int i; | |
415 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
416 | ||
417 | cpu_set(cpu, cpu_sibling_setup_map); | |
418 | ||
419 | if (smp_num_siblings > 1) { | |
420 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | |
421 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | |
422 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | |
423 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | |
424 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | |
425 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
426 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
427 | cpu_set(i, c->llc_shared_map); | |
428 | cpu_set(cpu, cpu_data(i).llc_shared_map); | |
429 | } | |
430 | } | |
431 | } else { | |
432 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | |
433 | } | |
434 | ||
435 | cpu_set(cpu, c->llc_shared_map); | |
436 | ||
437 | if (current_cpu_data.x86_max_cores == 1) { | |
438 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); | |
439 | c->booted_cores = 1; | |
440 | return; | |
441 | } | |
442 | ||
443 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | |
444 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | |
445 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | |
446 | cpu_set(i, c->llc_shared_map); | |
447 | cpu_set(cpu, cpu_data(i).llc_shared_map); | |
448 | } | |
449 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | |
450 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
451 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
452 | /* | |
453 | * Does this new cpu bringup a new core? | |
454 | */ | |
455 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { | |
456 | /* | |
457 | * for each core in package, increment | |
458 | * the booted_cores for this new cpu | |
459 | */ | |
460 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) | |
461 | c->booted_cores++; | |
462 | /* | |
463 | * increment the core count for all | |
464 | * the other cpus in this package | |
465 | */ | |
466 | if (i != cpu) | |
467 | cpu_data(i).booted_cores++; | |
468 | } else if (i != cpu && !c->booted_cores) | |
469 | c->booted_cores = cpu_data(i).booted_cores; | |
470 | } | |
471 | } | |
472 | } | |
473 | ||
70708a18 GC |
474 | /* maps the cpu to the sched domain representing multi-core */ |
475 | cpumask_t cpu_coregroup_map(int cpu) | |
476 | { | |
477 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
478 | /* | |
479 | * For perf, we return last level cache shared map. | |
480 | * And for power savings, we return cpu_core_map | |
481 | */ | |
482 | if (sched_mc_power_savings || sched_smt_power_savings) | |
483 | return per_cpu(cpu_core_map, cpu); | |
484 | else | |
485 | return c->llc_shared_map; | |
486 | } | |
487 | ||
91718e8d GC |
488 | /* |
489 | * Currently trivial. Write the real->protected mode | |
490 | * bootstrap into the page concerned. The caller | |
491 | * has made sure it's suitably aligned. | |
492 | */ | |
493 | ||
494 | unsigned long __cpuinit setup_trampoline(void) | |
495 | { | |
496 | memcpy(trampoline_base, trampoline_data, | |
497 | trampoline_end - trampoline_data); | |
498 | return virt_to_phys(trampoline_base); | |
499 | } | |
500 | ||
501 | #ifdef CONFIG_X86_32 | |
502 | /* | |
503 | * We are called very early to get the low memory for the | |
504 | * SMP bootup trampoline page. | |
505 | */ | |
506 | void __init smp_alloc_memory(void) | |
507 | { | |
508 | trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); | |
509 | /* | |
510 | * Has to be in very low memory so we can execute | |
511 | * real-mode AP code. | |
512 | */ | |
513 | if (__pa(trampoline_base) >= 0x9F000) | |
514 | BUG(); | |
515 | } | |
516 | #endif | |
70708a18 | 517 | |
904541e2 GOC |
518 | void impress_friends(void) |
519 | { | |
520 | int cpu; | |
521 | unsigned long bogosum = 0; | |
522 | /* | |
523 | * Allow the user to impress friends. | |
524 | */ | |
525 | Dprintk("Before bogomips.\n"); | |
526 | for_each_possible_cpu(cpu) | |
527 | if (cpu_isset(cpu, cpu_callout_map)) | |
528 | bogosum += cpu_data(cpu).loops_per_jiffy; | |
529 | printk(KERN_INFO | |
530 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
f68e00a3 | 531 | num_online_cpus(), |
904541e2 GOC |
532 | bogosum/(500000/HZ), |
533 | (bogosum/(5000/HZ))%100); | |
534 | ||
535 | Dprintk("Before bogocount - setting activated=1.\n"); | |
536 | } | |
537 | ||
cb3c8b90 GOC |
538 | static inline void __inquire_remote_apic(int apicid) |
539 | { | |
540 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
541 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
542 | int timeout; | |
543 | u32 status; | |
544 | ||
545 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | |
546 | ||
547 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
548 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); | |
549 | ||
550 | /* | |
551 | * Wait for idle. | |
552 | */ | |
553 | status = safe_apic_wait_icr_idle(); | |
554 | if (status) | |
555 | printk(KERN_CONT | |
556 | "a previous APIC delivery may have failed\n"); | |
557 | ||
558 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | |
559 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
560 | ||
561 | timeout = 0; | |
562 | do { | |
563 | udelay(100); | |
564 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
565 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
566 | ||
567 | switch (status) { | |
568 | case APIC_ICR_RR_VALID: | |
569 | status = apic_read(APIC_RRR); | |
570 | printk(KERN_CONT "%08x\n", status); | |
571 | break; | |
572 | default: | |
573 | printk(KERN_CONT "failed\n"); | |
574 | } | |
575 | } | |
576 | } | |
577 | ||
578 | #ifdef WAKE_SECONDARY_VIA_NMI | |
579 | /* | |
580 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
581 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
582 | * won't ... remember to clear down the APIC, etc later. | |
583 | */ | |
584 | static int __devinit | |
585 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |
586 | { | |
587 | unsigned long send_status, accept_status = 0; | |
588 | int maxlvt; | |
589 | ||
590 | /* Target chip */ | |
591 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | |
592 | ||
593 | /* Boot on the stack */ | |
594 | /* Kick the second */ | |
595 | apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | |
596 | ||
597 | Dprintk("Waiting for send to finish...\n"); | |
598 | send_status = safe_apic_wait_icr_idle(); | |
599 | ||
600 | /* | |
601 | * Give the other CPU some time to accept the IPI. | |
602 | */ | |
603 | udelay(200); | |
604 | /* | |
605 | * Due to the Pentium erratum 3AP. | |
606 | */ | |
607 | maxlvt = lapic_get_maxlvt(); | |
608 | if (maxlvt > 3) { | |
609 | apic_read_around(APIC_SPIV); | |
610 | apic_write(APIC_ESR, 0); | |
611 | } | |
612 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
613 | Dprintk("NMI sent.\n"); | |
614 | ||
615 | if (send_status) | |
616 | printk(KERN_ERR "APIC never delivered???\n"); | |
617 | if (accept_status) | |
618 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
619 | ||
620 | return (send_status | accept_status); | |
621 | } | |
622 | #endif /* WAKE_SECONDARY_VIA_NMI */ | |
623 | ||
cb3c8b90 GOC |
624 | #ifdef WAKE_SECONDARY_VIA_INIT |
625 | static int __devinit | |
626 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |
627 | { | |
628 | unsigned long send_status, accept_status = 0; | |
629 | int maxlvt, num_starts, j; | |
630 | ||
631 | /* | |
632 | * Be paranoid about clearing APIC errors. | |
633 | */ | |
634 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
635 | apic_read_around(APIC_SPIV); | |
636 | apic_write(APIC_ESR, 0); | |
637 | apic_read(APIC_ESR); | |
638 | } | |
639 | ||
640 | Dprintk("Asserting INIT.\n"); | |
641 | ||
642 | /* | |
643 | * Turn INIT on target chip | |
644 | */ | |
645 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
646 | ||
647 | /* | |
648 | * Send IPI | |
649 | */ | |
650 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | |
651 | | APIC_DM_INIT); | |
652 | ||
653 | Dprintk("Waiting for send to finish...\n"); | |
654 | send_status = safe_apic_wait_icr_idle(); | |
655 | ||
656 | mdelay(10); | |
657 | ||
658 | Dprintk("Deasserting INIT.\n"); | |
659 | ||
660 | /* Target chip */ | |
661 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
662 | ||
663 | /* Send IPI */ | |
664 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | |
665 | ||
666 | Dprintk("Waiting for send to finish...\n"); | |
667 | send_status = safe_apic_wait_icr_idle(); | |
668 | ||
669 | mb(); | |
670 | atomic_set(&init_deasserted, 1); | |
671 | ||
672 | /* | |
673 | * Should we send STARTUP IPIs ? | |
674 | * | |
675 | * Determine this based on the APIC version. | |
676 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
677 | */ | |
678 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
679 | num_starts = 2; | |
680 | else | |
681 | num_starts = 0; | |
682 | ||
683 | /* | |
684 | * Paravirt / VMI wants a startup IPI hook here to set up the | |
685 | * target processor state. | |
686 | */ | |
687 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | |
688 | #ifdef CONFIG_X86_64 | |
689 | (unsigned long)init_rsp); | |
690 | #else | |
691 | (unsigned long)stack_start.sp); | |
692 | #endif | |
693 | ||
694 | /* | |
695 | * Run STARTUP IPI loop. | |
696 | */ | |
697 | Dprintk("#startup loops: %d.\n", num_starts); | |
698 | ||
699 | maxlvt = lapic_get_maxlvt(); | |
700 | ||
701 | for (j = 1; j <= num_starts; j++) { | |
702 | Dprintk("Sending STARTUP #%d.\n", j); | |
703 | apic_read_around(APIC_SPIV); | |
704 | apic_write(APIC_ESR, 0); | |
705 | apic_read(APIC_ESR); | |
706 | Dprintk("After apic_write.\n"); | |
707 | ||
708 | /* | |
709 | * STARTUP IPI | |
710 | */ | |
711 | ||
712 | /* Target chip */ | |
713 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
714 | ||
715 | /* Boot on the stack */ | |
716 | /* Kick the second */ | |
717 | apic_write_around(APIC_ICR, APIC_DM_STARTUP | |
718 | | (start_eip >> 12)); | |
719 | ||
720 | /* | |
721 | * Give the other CPU some time to accept the IPI. | |
722 | */ | |
723 | udelay(300); | |
724 | ||
725 | Dprintk("Startup point 1.\n"); | |
726 | ||
727 | Dprintk("Waiting for send to finish...\n"); | |
728 | send_status = safe_apic_wait_icr_idle(); | |
729 | ||
730 | /* | |
731 | * Give the other CPU some time to accept the IPI. | |
732 | */ | |
733 | udelay(200); | |
734 | /* | |
735 | * Due to the Pentium erratum 3AP. | |
736 | */ | |
737 | if (maxlvt > 3) { | |
738 | apic_read_around(APIC_SPIV); | |
739 | apic_write(APIC_ESR, 0); | |
740 | } | |
741 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
742 | if (send_status || accept_status) | |
743 | break; | |
744 | } | |
745 | Dprintk("After Startup.\n"); | |
746 | ||
747 | if (send_status) | |
748 | printk(KERN_ERR "APIC never delivered???\n"); | |
749 | if (accept_status) | |
750 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
751 | ||
752 | return (send_status | accept_status); | |
753 | } | |
754 | #endif /* WAKE_SECONDARY_VIA_INIT */ | |
755 | ||
756 | struct create_idle { | |
757 | struct work_struct work; | |
758 | struct task_struct *idle; | |
759 | struct completion done; | |
760 | int cpu; | |
761 | }; | |
762 | ||
763 | static void __cpuinit do_fork_idle(struct work_struct *work) | |
764 | { | |
765 | struct create_idle *c_idle = | |
766 | container_of(work, struct create_idle, work); | |
767 | ||
768 | c_idle->idle = fork_idle(c_idle->cpu); | |
769 | complete(&c_idle->done); | |
770 | } | |
771 | ||
772 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | |
773 | /* | |
774 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
775 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
776 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | |
777 | */ | |
778 | { | |
779 | unsigned long boot_error = 0; | |
780 | int timeout; | |
781 | unsigned long start_ip; | |
782 | unsigned short nmi_high = 0, nmi_low = 0; | |
783 | struct create_idle c_idle = { | |
784 | .cpu = cpu, | |
785 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | |
786 | }; | |
787 | INIT_WORK(&c_idle.work, do_fork_idle); | |
788 | #ifdef CONFIG_X86_64 | |
789 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | |
790 | if (!cpu_gdt_descr[cpu].address && | |
791 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | |
792 | printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); | |
793 | return -1; | |
794 | } | |
795 | ||
796 | /* Allocate node local memory for AP pdas */ | |
797 | if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { | |
798 | struct x8664_pda *newpda, *pda; | |
799 | int node = cpu_to_node(cpu); | |
800 | pda = cpu_pda(cpu); | |
801 | newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC, | |
802 | node); | |
803 | if (newpda) { | |
804 | memcpy(newpda, pda, sizeof(struct x8664_pda)); | |
805 | cpu_pda(cpu) = newpda; | |
806 | } else | |
807 | printk(KERN_ERR | |
808 | "Could not allocate node local PDA for CPU %d on node %d\n", | |
809 | cpu, node); | |
810 | } | |
811 | #endif | |
812 | ||
813 | alternatives_smp_switch(1); | |
814 | ||
815 | c_idle.idle = get_idle_for_cpu(cpu); | |
816 | ||
817 | /* | |
818 | * We can't use kernel_thread since we must avoid to | |
819 | * reschedule the child. | |
820 | */ | |
821 | if (c_idle.idle) { | |
822 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | |
823 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | |
824 | init_idle(c_idle.idle, cpu); | |
825 | goto do_rest; | |
826 | } | |
827 | ||
828 | if (!keventd_up() || current_is_keventd()) | |
829 | c_idle.work.func(&c_idle.work); | |
830 | else { | |
831 | schedule_work(&c_idle.work); | |
832 | wait_for_completion(&c_idle.done); | |
833 | } | |
834 | ||
835 | if (IS_ERR(c_idle.idle)) { | |
836 | printk("failed fork for CPU %d\n", cpu); | |
837 | return PTR_ERR(c_idle.idle); | |
838 | } | |
839 | ||
840 | set_idle_for_cpu(cpu, c_idle.idle); | |
841 | do_rest: | |
842 | #ifdef CONFIG_X86_32 | |
843 | per_cpu(current_task, cpu) = c_idle.idle; | |
844 | init_gdt(cpu); | |
845 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | |
846 | c_idle.idle->thread.ip = (unsigned long) start_secondary; | |
847 | /* Stack for startup_32 can be just as for start_secondary onwards */ | |
848 | stack_start.sp = (void *) c_idle.idle->thread.sp; | |
849 | irq_ctx_init(cpu); | |
850 | #else | |
851 | cpu_pda(cpu)->pcurrent = c_idle.idle; | |
852 | init_rsp = c_idle.idle->thread.sp; | |
853 | load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); | |
854 | initial_code = (unsigned long)start_secondary; | |
855 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | |
856 | #endif | |
857 | ||
858 | /* start_ip had better be page-aligned! */ | |
859 | start_ip = setup_trampoline(); | |
860 | ||
861 | /* So we see what's up */ | |
862 | printk(KERN_INFO "Booting processor %d/%d ip %lx\n", | |
863 | cpu, apicid, start_ip); | |
864 | ||
865 | /* | |
866 | * This grunge runs the startup process for | |
867 | * the targeted processor. | |
868 | */ | |
869 | ||
870 | atomic_set(&init_deasserted, 0); | |
871 | ||
872 | Dprintk("Setting warm reset code and vector.\n"); | |
873 | ||
874 | store_NMI_vector(&nmi_high, &nmi_low); | |
875 | ||
876 | smpboot_setup_warm_reset_vector(start_ip); | |
877 | /* | |
878 | * Be paranoid about clearing APIC errors. | |
879 | */ | |
880 | apic_write(APIC_ESR, 0); | |
881 | apic_read(APIC_ESR); | |
882 | ||
883 | ||
884 | /* | |
885 | * Starting actual IPI sequence... | |
886 | */ | |
887 | boot_error = wakeup_secondary_cpu(apicid, start_ip); | |
888 | ||
889 | if (!boot_error) { | |
890 | /* | |
891 | * allow APs to start initializing. | |
892 | */ | |
893 | Dprintk("Before Callout %d.\n", cpu); | |
894 | cpu_set(cpu, cpu_callout_map); | |
895 | Dprintk("After Callout %d.\n", cpu); | |
896 | ||
897 | /* | |
898 | * Wait 5s total for a response | |
899 | */ | |
900 | for (timeout = 0; timeout < 50000; timeout++) { | |
901 | if (cpu_isset(cpu, cpu_callin_map)) | |
902 | break; /* It has booted */ | |
903 | udelay(100); | |
904 | } | |
905 | ||
906 | if (cpu_isset(cpu, cpu_callin_map)) { | |
907 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
908 | Dprintk("OK.\n"); | |
909 | printk(KERN_INFO "CPU%d: ", cpu); | |
910 | print_cpu_info(&cpu_data(cpu)); | |
911 | Dprintk("CPU has booted.\n"); | |
912 | } else { | |
913 | boot_error = 1; | |
914 | if (*((volatile unsigned char *)trampoline_base) | |
915 | == 0xA5) | |
916 | /* trampoline started but...? */ | |
917 | printk(KERN_ERR "Stuck ??\n"); | |
918 | else | |
919 | /* trampoline code not run */ | |
920 | printk(KERN_ERR "Not responding.\n"); | |
921 | inquire_remote_apic(apicid); | |
922 | } | |
923 | } | |
924 | ||
925 | if (boot_error) { | |
926 | /* Try to put things back the way they were before ... */ | |
927 | unmap_cpu_to_logical_apicid(cpu); | |
928 | #ifdef CONFIG_X86_64 | |
929 | clear_node_cpumask(cpu); /* was set by numa_add_cpu */ | |
930 | #endif | |
931 | cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ | |
932 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | |
933 | cpu_clear(cpu, cpu_possible_map); | |
934 | cpu_clear(cpu, cpu_present_map); | |
935 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; | |
936 | } | |
937 | ||
938 | /* mark "stuck" area as not stuck */ | |
939 | *((volatile unsigned long *)trampoline_base) = 0; | |
940 | ||
941 | return boot_error; | |
942 | } | |
943 | ||
944 | int __cpuinit native_cpu_up(unsigned int cpu) | |
945 | { | |
946 | int apicid = cpu_present_to_apicid(cpu); | |
947 | unsigned long flags; | |
948 | int err; | |
949 | ||
950 | WARN_ON(irqs_disabled()); | |
951 | ||
952 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); | |
953 | ||
954 | if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || | |
955 | !physid_isset(apicid, phys_cpu_present_map)) { | |
956 | printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); | |
957 | return -EINVAL; | |
958 | } | |
959 | ||
960 | /* | |
961 | * Already booted CPU? | |
962 | */ | |
963 | if (cpu_isset(cpu, cpu_callin_map)) { | |
964 | Dprintk("do_boot_cpu %d Already started\n", cpu); | |
965 | return -ENOSYS; | |
966 | } | |
967 | ||
968 | /* | |
969 | * Save current MTRR state in case it was changed since early boot | |
970 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
971 | */ | |
972 | mtrr_save_state(); | |
973 | ||
974 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
975 | ||
976 | #ifdef CONFIG_X86_32 | |
977 | /* init low mem mapping */ | |
978 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | |
979 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | |
980 | flush_tlb_all(); | |
981 | #endif | |
982 | ||
983 | err = do_boot_cpu(apicid, cpu); | |
984 | if (err < 0) { | |
985 | Dprintk("do_boot_cpu failed %d\n", err); | |
986 | return err; | |
987 | } | |
988 | ||
989 | /* | |
990 | * Check TSC synchronization with the AP (keep irqs disabled | |
991 | * while doing so): | |
992 | */ | |
993 | local_irq_save(flags); | |
994 | check_tsc_sync_source(cpu); | |
995 | local_irq_restore(flags); | |
996 | ||
997 | while (!cpu_isset(cpu, cpu_online_map)) { | |
998 | cpu_relax(); | |
999 | touch_nmi_watchdog(); | |
1000 | } | |
1001 | ||
1002 | return 0; | |
1003 | } | |
1004 | ||
a8db8453 GOC |
1005 | /* |
1006 | * Early setup to make printk work. | |
1007 | */ | |
1008 | void __init native_smp_prepare_boot_cpu(void) | |
1009 | { | |
1010 | int me = smp_processor_id(); | |
1011 | #ifdef CONFIG_X86_32 | |
1012 | init_gdt(me); | |
1013 | switch_to_new_gdt(); | |
1014 | #endif | |
1015 | /* already set me in cpu_online_map in boot_cpu_init() */ | |
1016 | cpu_set(me, cpu_callout_map); | |
1017 | per_cpu(cpu_state, me) = CPU_ONLINE; | |
1018 | } | |
1019 | ||
68a1c3f8 | 1020 | #ifdef CONFIG_HOTPLUG_CPU |
768d9505 GC |
1021 | void remove_siblinginfo(int cpu) |
1022 | { | |
1023 | int sibling; | |
1024 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
1025 | ||
1026 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { | |
1027 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | |
1028 | /*/ | |
1029 | * last thread sibling in this cpu core going down | |
1030 | */ | |
1031 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | |
1032 | cpu_data(sibling).booted_cores--; | |
1033 | } | |
1034 | ||
1035 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | |
1036 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | |
1037 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | |
1038 | cpus_clear(per_cpu(cpu_core_map, cpu)); | |
1039 | c->phys_proc_id = 0; | |
1040 | c->cpu_core_id = 0; | |
1041 | cpu_clear(cpu, cpu_sibling_setup_map); | |
1042 | } | |
68a1c3f8 GC |
1043 | |
1044 | int additional_cpus __initdata = -1; | |
1045 | ||
1046 | static __init int setup_additional_cpus(char *s) | |
1047 | { | |
1048 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; | |
1049 | } | |
1050 | early_param("additional_cpus", setup_additional_cpus); | |
1051 | ||
1052 | /* | |
1053 | * cpu_possible_map should be static, it cannot change as cpu's | |
1054 | * are onlined, or offlined. The reason is per-cpu data-structures | |
1055 | * are allocated by some modules at init time, and dont expect to | |
1056 | * do this dynamically on cpu arrival/departure. | |
1057 | * cpu_present_map on the other hand can change dynamically. | |
1058 | * In case when cpu_hotplug is not compiled, then we resort to current | |
1059 | * behaviour, which is cpu_possible == cpu_present. | |
1060 | * - Ashok Raj | |
1061 | * | |
1062 | * Three ways to find out the number of additional hotplug CPUs: | |
1063 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | |
1064 | * - The user can overwrite it with additional_cpus=NUM | |
1065 | * - Otherwise don't reserve additional CPUs. | |
1066 | * We do this because additional CPUs waste a lot of memory. | |
1067 | * -AK | |
1068 | */ | |
1069 | __init void prefill_possible_map(void) | |
1070 | { | |
1071 | int i; | |
1072 | int possible; | |
1073 | ||
1074 | if (additional_cpus == -1) { | |
1075 | if (disabled_cpus > 0) | |
1076 | additional_cpus = disabled_cpus; | |
1077 | else | |
1078 | additional_cpus = 0; | |
1079 | } | |
1080 | possible = num_processors + additional_cpus; | |
1081 | if (possible > NR_CPUS) | |
1082 | possible = NR_CPUS; | |
1083 | ||
1084 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | |
1085 | possible, max_t(int, possible - num_processors, 0)); | |
1086 | ||
1087 | for (i = 0; i < possible; i++) | |
1088 | cpu_set(i, cpu_possible_map); | |
1089 | } | |
69c18c15 GC |
1090 | |
1091 | static void __ref remove_cpu_from_maps(int cpu) | |
1092 | { | |
1093 | cpu_clear(cpu, cpu_online_map); | |
1094 | #ifdef CONFIG_X86_64 | |
1095 | cpu_clear(cpu, cpu_callout_map); | |
1096 | cpu_clear(cpu, cpu_callin_map); | |
1097 | /* was set by cpu_init() */ | |
1098 | clear_bit(cpu, (unsigned long *)&cpu_initialized); | |
1099 | clear_node_cpumask(cpu); | |
1100 | #endif | |
1101 | } | |
1102 | ||
1103 | int __cpu_disable(void) | |
1104 | { | |
1105 | int cpu = smp_processor_id(); | |
1106 | ||
1107 | /* | |
1108 | * Perhaps use cpufreq to drop frequency, but that could go | |
1109 | * into generic code. | |
1110 | * | |
1111 | * We won't take down the boot processor on i386 due to some | |
1112 | * interrupts only being able to be serviced by the BSP. | |
1113 | * Especially so if we're not using an IOAPIC -zwane | |
1114 | */ | |
1115 | if (cpu == 0) | |
1116 | return -EBUSY; | |
1117 | ||
1118 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
1119 | stop_apic_nmi_watchdog(NULL); | |
1120 | clear_local_APIC(); | |
1121 | ||
1122 | /* | |
1123 | * HACK: | |
1124 | * Allow any queued timer interrupts to get serviced | |
1125 | * This is only a temporary solution until we cleanup | |
1126 | * fixup_irqs as we do for IA64. | |
1127 | */ | |
1128 | local_irq_enable(); | |
1129 | mdelay(1); | |
1130 | ||
1131 | local_irq_disable(); | |
1132 | remove_siblinginfo(cpu); | |
1133 | ||
1134 | /* It's now safe to remove this processor from the online map */ | |
1135 | remove_cpu_from_maps(cpu); | |
1136 | fixup_irqs(cpu_online_map); | |
1137 | return 0; | |
1138 | } | |
1139 | ||
1140 | void __cpu_die(unsigned int cpu) | |
1141 | { | |
1142 | /* We don't do anything here: idle task is faking death itself. */ | |
1143 | unsigned int i; | |
1144 | ||
1145 | for (i = 0; i < 10; i++) { | |
1146 | /* They ack this in play_dead by setting CPU_DEAD */ | |
1147 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | |
1148 | printk(KERN_INFO "CPU %d is now offline\n", cpu); | |
1149 | if (1 == num_online_cpus()) | |
1150 | alternatives_smp_switch(0); | |
1151 | return; | |
1152 | } | |
1153 | msleep(100); | |
1154 | } | |
1155 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | |
1156 | } | |
1157 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
1158 | int __cpu_disable(void) | |
1159 | { | |
1160 | return -ENOSYS; | |
1161 | } | |
1162 | ||
1163 | void __cpu_die(unsigned int cpu) | |
1164 | { | |
1165 | /* We said "no" in __cpu_disable */ | |
1166 | BUG(); | |
1167 | } | |
68a1c3f8 GC |
1168 | #endif |
1169 | ||
89b08200 GC |
1170 | /* |
1171 | * If the BIOS enumerates physical processors before logical, | |
1172 | * maxcpus=N at enumeration-time can be used to disable HT. | |
1173 | */ | |
1174 | static int __init parse_maxcpus(char *arg) | |
1175 | { | |
1176 | extern unsigned int maxcpus; | |
1177 | ||
1178 | maxcpus = simple_strtoul(arg, NULL, 0); | |
1179 | return 0; | |
1180 | } | |
1181 | early_param("maxcpus", parse_maxcpus); |