Commit | Line | Data |
---|---|---|
68a1c3f8 GC |
1 | #include <linux/init.h> |
2 | #include <linux/smp.h> | |
a355352b | 3 | #include <linux/module.h> |
70708a18 | 4 | #include <linux/sched.h> |
69c18c15 | 5 | #include <linux/percpu.h> |
91718e8d | 6 | #include <linux/bootmem.h> |
cb3c8b90 GOC |
7 | #include <linux/err.h> |
8 | #include <linux/nmi.h> | |
69c18c15 | 9 | |
cb3c8b90 | 10 | #include <asm/desc.h> |
69c18c15 GC |
11 | #include <asm/nmi.h> |
12 | #include <asm/irq.h> | |
13 | #include <asm/smp.h> | |
14 | #include <asm/cpu.h> | |
15 | #include <asm/numa.h> | |
cb3c8b90 GOC |
16 | #include <asm/pgtable.h> |
17 | #include <asm/tlbflush.h> | |
18 | #include <asm/mtrr.h> | |
19 | #include <asm/nmi.h> | |
20 | #include <linux/mc146818rtc.h> | |
68a1c3f8 | 21 | |
f6bc4029 | 22 | #include <mach_apic.h> |
cb3c8b90 GOC |
23 | #include <mach_wakecpu.h> |
24 | #include <smpboot_hooks.h> | |
25 | ||
26 | /* Store all idle threads, this can be reused instead of creating | |
27 | * a new thread. Also avoids complicated thread destroy functionality | |
28 | * for idle threads. | |
29 | */ | |
30 | #ifdef CONFIG_HOTPLUG_CPU | |
31 | /* | |
32 | * Needed only for CONFIG_HOTPLUG_CPU because __cpuinitdata is | |
33 | * removed after init for !CONFIG_HOTPLUG_CPU. | |
34 | */ | |
35 | static DEFINE_PER_CPU(struct task_struct *, idle_thread_array); | |
36 | #define get_idle_for_cpu(x) (per_cpu(idle_thread_array, x)) | |
37 | #define set_idle_for_cpu(x, p) (per_cpu(idle_thread_array, x) = (p)) | |
38 | #else | |
39 | struct task_struct *idle_thread_array[NR_CPUS] __cpuinitdata ; | |
40 | #define get_idle_for_cpu(x) (idle_thread_array[(x)]) | |
41 | #define set_idle_for_cpu(x, p) (idle_thread_array[(x)] = (p)) | |
42 | #endif | |
f6bc4029 | 43 | |
a355352b GC |
44 | /* Number of siblings per CPU package */ |
45 | int smp_num_siblings = 1; | |
46 | EXPORT_SYMBOL(smp_num_siblings); | |
47 | ||
48 | /* Last level cache ID of each logical CPU */ | |
49 | DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID; | |
50 | ||
51 | /* bitmap of online cpus */ | |
52 | cpumask_t cpu_online_map __read_mostly; | |
53 | EXPORT_SYMBOL(cpu_online_map); | |
54 | ||
55 | cpumask_t cpu_callin_map; | |
56 | cpumask_t cpu_callout_map; | |
57 | cpumask_t cpu_possible_map; | |
58 | EXPORT_SYMBOL(cpu_possible_map); | |
59 | ||
60 | /* representing HT siblings of each logical CPU */ | |
61 | DEFINE_PER_CPU(cpumask_t, cpu_sibling_map); | |
62 | EXPORT_PER_CPU_SYMBOL(cpu_sibling_map); | |
63 | ||
64 | /* representing HT and core siblings of each logical CPU */ | |
65 | DEFINE_PER_CPU(cpumask_t, cpu_core_map); | |
66 | EXPORT_PER_CPU_SYMBOL(cpu_core_map); | |
67 | ||
68 | /* Per CPU bogomips and other parameters */ | |
69 | DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info); | |
70 | EXPORT_PER_CPU_SYMBOL(cpu_info); | |
768d9505 | 71 | |
cb3c8b90 GOC |
72 | static atomic_t init_deasserted; |
73 | ||
91718e8d GC |
74 | /* ready for x86_64, no harm for x86, since it will overwrite after alloc */ |
75 | unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE); | |
76 | ||
768d9505 GC |
77 | /* representing cpus for which sibling maps can be computed */ |
78 | static cpumask_t cpu_sibling_setup_map; | |
79 | ||
1d89a7f0 GOC |
80 | /* Set if we find a B stepping CPU */ |
81 | int __cpuinitdata smp_b_stepping; | |
1d89a7f0 | 82 | |
7cc3959e GOC |
83 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_32) |
84 | ||
85 | /* which logical CPUs are on which nodes */ | |
86 | cpumask_t node_to_cpumask_map[MAX_NUMNODES] __read_mostly = | |
87 | { [0 ... MAX_NUMNODES-1] = CPU_MASK_NONE }; | |
88 | EXPORT_SYMBOL(node_to_cpumask_map); | |
89 | /* which node each logical CPU is on */ | |
90 | int cpu_to_node_map[NR_CPUS] __read_mostly = { [0 ... NR_CPUS-1] = 0 }; | |
91 | EXPORT_SYMBOL(cpu_to_node_map); | |
92 | ||
93 | /* set up a mapping between cpu and node. */ | |
94 | static void map_cpu_to_node(int cpu, int node) | |
95 | { | |
96 | printk(KERN_INFO "Mapping cpu %d to node %d\n", cpu, node); | |
97 | cpu_set(cpu, node_to_cpumask_map[node]); | |
98 | cpu_to_node_map[cpu] = node; | |
99 | } | |
100 | ||
101 | /* undo a mapping between cpu and node. */ | |
102 | static void unmap_cpu_to_node(int cpu) | |
103 | { | |
104 | int node; | |
105 | ||
106 | printk(KERN_INFO "Unmapping cpu %d from all nodes\n", cpu); | |
107 | for (node = 0; node < MAX_NUMNODES; node++) | |
108 | cpu_clear(cpu, node_to_cpumask_map[node]); | |
109 | cpu_to_node_map[cpu] = 0; | |
110 | } | |
111 | #else /* !(CONFIG_NUMA && CONFIG_X86_32) */ | |
112 | #define map_cpu_to_node(cpu, node) ({}) | |
113 | #define unmap_cpu_to_node(cpu) ({}) | |
114 | #endif | |
115 | ||
116 | #ifdef CONFIG_X86_32 | |
117 | u8 cpu_2_logical_apicid[NR_CPUS] __read_mostly = | |
118 | { [0 ... NR_CPUS-1] = BAD_APICID }; | |
119 | ||
120 | void map_cpu_to_logical_apicid(void) | |
121 | { | |
122 | int cpu = smp_processor_id(); | |
123 | int apicid = logical_smp_processor_id(); | |
124 | int node = apicid_to_node(apicid); | |
125 | ||
126 | if (!node_online(node)) | |
127 | node = first_online_node; | |
128 | ||
129 | cpu_2_logical_apicid[cpu] = apicid; | |
130 | map_cpu_to_node(cpu, node); | |
131 | } | |
132 | ||
133 | void unmap_cpu_to_logical_apicid(int cpu) | |
134 | { | |
135 | cpu_2_logical_apicid[cpu] = BAD_APICID; | |
136 | unmap_cpu_to_node(cpu); | |
137 | } | |
138 | #else | |
139 | #define unmap_cpu_to_logical_apicid(cpu) do {} while (0) | |
140 | #define map_cpu_to_logical_apicid() do {} while (0) | |
141 | #endif | |
142 | ||
cb3c8b90 GOC |
143 | /* |
144 | * Report back to the Boot Processor. | |
145 | * Running on AP. | |
146 | */ | |
147 | void __cpuinit smp_callin(void) | |
148 | { | |
149 | int cpuid, phys_id; | |
150 | unsigned long timeout; | |
151 | ||
152 | /* | |
153 | * If waken up by an INIT in an 82489DX configuration | |
154 | * we may get here before an INIT-deassert IPI reaches | |
155 | * our local APIC. We have to wait for the IPI or we'll | |
156 | * lock up on an APIC access. | |
157 | */ | |
158 | wait_for_init_deassert(&init_deasserted); | |
159 | ||
160 | /* | |
161 | * (This works even if the APIC is not enabled.) | |
162 | */ | |
163 | phys_id = GET_APIC_ID(apic_read(APIC_ID)); | |
164 | cpuid = smp_processor_id(); | |
165 | if (cpu_isset(cpuid, cpu_callin_map)) { | |
166 | panic("%s: phys CPU#%d, CPU#%d already present??\n", __func__, | |
167 | phys_id, cpuid); | |
168 | } | |
169 | Dprintk("CPU#%d (phys ID: %d) waiting for CALLOUT\n", cpuid, phys_id); | |
170 | ||
171 | /* | |
172 | * STARTUP IPIs are fragile beasts as they might sometimes | |
173 | * trigger some glue motherboard logic. Complete APIC bus | |
174 | * silence for 1 second, this overestimates the time the | |
175 | * boot CPU is spending to send the up to 2 STARTUP IPIs | |
176 | * by a factor of two. This should be enough. | |
177 | */ | |
178 | ||
179 | /* | |
180 | * Waiting 2s total for startup (udelay is not yet working) | |
181 | */ | |
182 | timeout = jiffies + 2*HZ; | |
183 | while (time_before(jiffies, timeout)) { | |
184 | /* | |
185 | * Has the boot CPU finished it's STARTUP sequence? | |
186 | */ | |
187 | if (cpu_isset(cpuid, cpu_callout_map)) | |
188 | break; | |
189 | cpu_relax(); | |
190 | } | |
191 | ||
192 | if (!time_before(jiffies, timeout)) { | |
193 | panic("%s: CPU%d started up but did not get a callout!\n", | |
194 | __func__, cpuid); | |
195 | } | |
196 | ||
197 | /* | |
198 | * the boot CPU has finished the init stage and is spinning | |
199 | * on callin_map until we finish. We are free to set up this | |
200 | * CPU, first the APIC. (this is probably redundant on most | |
201 | * boards) | |
202 | */ | |
203 | ||
204 | Dprintk("CALLIN, before setup_local_APIC().\n"); | |
205 | smp_callin_clear_local_apic(); | |
206 | setup_local_APIC(); | |
207 | end_local_APIC_setup(); | |
208 | map_cpu_to_logical_apicid(); | |
209 | ||
210 | /* | |
211 | * Get our bogomips. | |
212 | * | |
213 | * Need to enable IRQs because it can take longer and then | |
214 | * the NMI watchdog might kill us. | |
215 | */ | |
216 | local_irq_enable(); | |
217 | calibrate_delay(); | |
218 | local_irq_disable(); | |
219 | Dprintk("Stack at about %p\n", &cpuid); | |
220 | ||
221 | /* | |
222 | * Save our processor parameters | |
223 | */ | |
224 | smp_store_cpu_info(cpuid); | |
225 | ||
226 | /* | |
227 | * Allow the master to continue. | |
228 | */ | |
229 | cpu_set(cpuid, cpu_callin_map); | |
230 | } | |
231 | ||
232 | ||
1d89a7f0 GOC |
233 | static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c) |
234 | { | |
235 | #ifdef CONFIG_X86_32 | |
236 | /* | |
237 | * Mask B, Pentium, but not Pentium MMX | |
238 | */ | |
239 | if (c->x86_vendor == X86_VENDOR_INTEL && | |
240 | c->x86 == 5 && | |
241 | c->x86_mask >= 1 && c->x86_mask <= 4 && | |
242 | c->x86_model <= 3) | |
243 | /* | |
244 | * Remember we have B step Pentia with bugs | |
245 | */ | |
246 | smp_b_stepping = 1; | |
247 | ||
248 | /* | |
249 | * Certain Athlons might work (for various values of 'work') in SMP | |
250 | * but they are not certified as MP capable. | |
251 | */ | |
252 | if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) { | |
253 | ||
254 | if (num_possible_cpus() == 1) | |
255 | goto valid_k7; | |
256 | ||
257 | /* Athlon 660/661 is valid. */ | |
258 | if ((c->x86_model == 6) && ((c->x86_mask == 0) || | |
259 | (c->x86_mask == 1))) | |
260 | goto valid_k7; | |
261 | ||
262 | /* Duron 670 is valid */ | |
263 | if ((c->x86_model == 7) && (c->x86_mask == 0)) | |
264 | goto valid_k7; | |
265 | ||
266 | /* | |
267 | * Athlon 662, Duron 671, and Athlon >model 7 have capability | |
268 | * bit. It's worth noting that the A5 stepping (662) of some | |
269 | * Athlon XP's have the MP bit set. | |
270 | * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for | |
271 | * more. | |
272 | */ | |
273 | if (((c->x86_model == 6) && (c->x86_mask >= 2)) || | |
274 | ((c->x86_model == 7) && (c->x86_mask >= 1)) || | |
275 | (c->x86_model > 7)) | |
276 | if (cpu_has_mp) | |
277 | goto valid_k7; | |
278 | ||
279 | /* If we get here, not a certified SMP capable AMD system. */ | |
280 | add_taint(TAINT_UNSAFE_SMP); | |
281 | } | |
282 | ||
283 | valid_k7: | |
284 | ; | |
285 | #endif | |
286 | } | |
287 | ||
693d4b8a GOC |
288 | void smp_checks(void) |
289 | { | |
290 | if (smp_b_stepping) | |
291 | printk(KERN_WARNING "WARNING: SMP operation may be unreliable" | |
292 | "with B stepping processors.\n"); | |
293 | ||
294 | /* | |
295 | * Don't taint if we are running SMP kernel on a single non-MP | |
296 | * approved Athlon | |
297 | */ | |
298 | if (tainted & TAINT_UNSAFE_SMP) { | |
f68e00a3 | 299 | if (num_online_cpus()) |
693d4b8a GOC |
300 | printk(KERN_INFO "WARNING: This combination of AMD" |
301 | "processors is not suitable for SMP.\n"); | |
302 | else | |
303 | tainted &= ~TAINT_UNSAFE_SMP; | |
304 | } | |
305 | } | |
306 | ||
1d89a7f0 GOC |
307 | /* |
308 | * The bootstrap kernel entry code has set these up. Save them for | |
309 | * a given CPU | |
310 | */ | |
311 | ||
312 | void __cpuinit smp_store_cpu_info(int id) | |
313 | { | |
314 | struct cpuinfo_x86 *c = &cpu_data(id); | |
315 | ||
316 | *c = boot_cpu_data; | |
317 | c->cpu_index = id; | |
318 | if (id != 0) | |
319 | identify_secondary_cpu(c); | |
320 | smp_apply_quirks(c); | |
321 | } | |
322 | ||
323 | ||
768d9505 GC |
324 | void __cpuinit set_cpu_sibling_map(int cpu) |
325 | { | |
326 | int i; | |
327 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
328 | ||
329 | cpu_set(cpu, cpu_sibling_setup_map); | |
330 | ||
331 | if (smp_num_siblings > 1) { | |
332 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | |
333 | if (c->phys_proc_id == cpu_data(i).phys_proc_id && | |
334 | c->cpu_core_id == cpu_data(i).cpu_core_id) { | |
335 | cpu_set(i, per_cpu(cpu_sibling_map, cpu)); | |
336 | cpu_set(cpu, per_cpu(cpu_sibling_map, i)); | |
337 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
338 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
339 | cpu_set(i, c->llc_shared_map); | |
340 | cpu_set(cpu, cpu_data(i).llc_shared_map); | |
341 | } | |
342 | } | |
343 | } else { | |
344 | cpu_set(cpu, per_cpu(cpu_sibling_map, cpu)); | |
345 | } | |
346 | ||
347 | cpu_set(cpu, c->llc_shared_map); | |
348 | ||
349 | if (current_cpu_data.x86_max_cores == 1) { | |
350 | per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu); | |
351 | c->booted_cores = 1; | |
352 | return; | |
353 | } | |
354 | ||
355 | for_each_cpu_mask(i, cpu_sibling_setup_map) { | |
356 | if (per_cpu(cpu_llc_id, cpu) != BAD_APICID && | |
357 | per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) { | |
358 | cpu_set(i, c->llc_shared_map); | |
359 | cpu_set(cpu, cpu_data(i).llc_shared_map); | |
360 | } | |
361 | if (c->phys_proc_id == cpu_data(i).phys_proc_id) { | |
362 | cpu_set(i, per_cpu(cpu_core_map, cpu)); | |
363 | cpu_set(cpu, per_cpu(cpu_core_map, i)); | |
364 | /* | |
365 | * Does this new cpu bringup a new core? | |
366 | */ | |
367 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) { | |
368 | /* | |
369 | * for each core in package, increment | |
370 | * the booted_cores for this new cpu | |
371 | */ | |
372 | if (first_cpu(per_cpu(cpu_sibling_map, i)) == i) | |
373 | c->booted_cores++; | |
374 | /* | |
375 | * increment the core count for all | |
376 | * the other cpus in this package | |
377 | */ | |
378 | if (i != cpu) | |
379 | cpu_data(i).booted_cores++; | |
380 | } else if (i != cpu && !c->booted_cores) | |
381 | c->booted_cores = cpu_data(i).booted_cores; | |
382 | } | |
383 | } | |
384 | } | |
385 | ||
70708a18 GC |
386 | /* maps the cpu to the sched domain representing multi-core */ |
387 | cpumask_t cpu_coregroup_map(int cpu) | |
388 | { | |
389 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
390 | /* | |
391 | * For perf, we return last level cache shared map. | |
392 | * And for power savings, we return cpu_core_map | |
393 | */ | |
394 | if (sched_mc_power_savings || sched_smt_power_savings) | |
395 | return per_cpu(cpu_core_map, cpu); | |
396 | else | |
397 | return c->llc_shared_map; | |
398 | } | |
399 | ||
91718e8d GC |
400 | /* |
401 | * Currently trivial. Write the real->protected mode | |
402 | * bootstrap into the page concerned. The caller | |
403 | * has made sure it's suitably aligned. | |
404 | */ | |
405 | ||
406 | unsigned long __cpuinit setup_trampoline(void) | |
407 | { | |
408 | memcpy(trampoline_base, trampoline_data, | |
409 | trampoline_end - trampoline_data); | |
410 | return virt_to_phys(trampoline_base); | |
411 | } | |
412 | ||
413 | #ifdef CONFIG_X86_32 | |
414 | /* | |
415 | * We are called very early to get the low memory for the | |
416 | * SMP bootup trampoline page. | |
417 | */ | |
418 | void __init smp_alloc_memory(void) | |
419 | { | |
420 | trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE); | |
421 | /* | |
422 | * Has to be in very low memory so we can execute | |
423 | * real-mode AP code. | |
424 | */ | |
425 | if (__pa(trampoline_base) >= 0x9F000) | |
426 | BUG(); | |
427 | } | |
428 | #endif | |
70708a18 | 429 | |
904541e2 GOC |
430 | void impress_friends(void) |
431 | { | |
432 | int cpu; | |
433 | unsigned long bogosum = 0; | |
434 | /* | |
435 | * Allow the user to impress friends. | |
436 | */ | |
437 | Dprintk("Before bogomips.\n"); | |
438 | for_each_possible_cpu(cpu) | |
439 | if (cpu_isset(cpu, cpu_callout_map)) | |
440 | bogosum += cpu_data(cpu).loops_per_jiffy; | |
441 | printk(KERN_INFO | |
442 | "Total of %d processors activated (%lu.%02lu BogoMIPS).\n", | |
f68e00a3 | 443 | num_online_cpus(), |
904541e2 GOC |
444 | bogosum/(500000/HZ), |
445 | (bogosum/(5000/HZ))%100); | |
446 | ||
447 | Dprintk("Before bogocount - setting activated=1.\n"); | |
448 | } | |
449 | ||
cb3c8b90 GOC |
450 | static inline void __inquire_remote_apic(int apicid) |
451 | { | |
452 | unsigned i, regs[] = { APIC_ID >> 4, APIC_LVR >> 4, APIC_SPIV >> 4 }; | |
453 | char *names[] = { "ID", "VERSION", "SPIV" }; | |
454 | int timeout; | |
455 | u32 status; | |
456 | ||
457 | printk(KERN_INFO "Inquiring remote APIC #%d...\n", apicid); | |
458 | ||
459 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
460 | printk(KERN_INFO "... APIC #%d %s: ", apicid, names[i]); | |
461 | ||
462 | /* | |
463 | * Wait for idle. | |
464 | */ | |
465 | status = safe_apic_wait_icr_idle(); | |
466 | if (status) | |
467 | printk(KERN_CONT | |
468 | "a previous APIC delivery may have failed\n"); | |
469 | ||
470 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(apicid)); | |
471 | apic_write_around(APIC_ICR, APIC_DM_REMRD | regs[i]); | |
472 | ||
473 | timeout = 0; | |
474 | do { | |
475 | udelay(100); | |
476 | status = apic_read(APIC_ICR) & APIC_ICR_RR_MASK; | |
477 | } while (status == APIC_ICR_RR_INPROG && timeout++ < 1000); | |
478 | ||
479 | switch (status) { | |
480 | case APIC_ICR_RR_VALID: | |
481 | status = apic_read(APIC_RRR); | |
482 | printk(KERN_CONT "%08x\n", status); | |
483 | break; | |
484 | default: | |
485 | printk(KERN_CONT "failed\n"); | |
486 | } | |
487 | } | |
488 | } | |
489 | ||
490 | #ifdef WAKE_SECONDARY_VIA_NMI | |
491 | /* | |
492 | * Poke the other CPU in the eye via NMI to wake it up. Remember that the normal | |
493 | * INIT, INIT, STARTUP sequence will reset the chip hard for us, and this | |
494 | * won't ... remember to clear down the APIC, etc later. | |
495 | */ | |
496 | static int __devinit | |
497 | wakeup_secondary_cpu(int logical_apicid, unsigned long start_eip) | |
498 | { | |
499 | unsigned long send_status, accept_status = 0; | |
500 | int maxlvt; | |
501 | ||
502 | /* Target chip */ | |
503 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(logical_apicid)); | |
504 | ||
505 | /* Boot on the stack */ | |
506 | /* Kick the second */ | |
507 | apic_write_around(APIC_ICR, APIC_DM_NMI | APIC_DEST_LOGICAL); | |
508 | ||
509 | Dprintk("Waiting for send to finish...\n"); | |
510 | send_status = safe_apic_wait_icr_idle(); | |
511 | ||
512 | /* | |
513 | * Give the other CPU some time to accept the IPI. | |
514 | */ | |
515 | udelay(200); | |
516 | /* | |
517 | * Due to the Pentium erratum 3AP. | |
518 | */ | |
519 | maxlvt = lapic_get_maxlvt(); | |
520 | if (maxlvt > 3) { | |
521 | apic_read_around(APIC_SPIV); | |
522 | apic_write(APIC_ESR, 0); | |
523 | } | |
524 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
525 | Dprintk("NMI sent.\n"); | |
526 | ||
527 | if (send_status) | |
528 | printk(KERN_ERR "APIC never delivered???\n"); | |
529 | if (accept_status) | |
530 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
531 | ||
532 | return (send_status | accept_status); | |
533 | } | |
534 | #endif /* WAKE_SECONDARY_VIA_NMI */ | |
535 | ||
536 | extern void start_secondary(void *unused); | |
537 | #ifdef WAKE_SECONDARY_VIA_INIT | |
538 | static int __devinit | |
539 | wakeup_secondary_cpu(int phys_apicid, unsigned long start_eip) | |
540 | { | |
541 | unsigned long send_status, accept_status = 0; | |
542 | int maxlvt, num_starts, j; | |
543 | ||
544 | /* | |
545 | * Be paranoid about clearing APIC errors. | |
546 | */ | |
547 | if (APIC_INTEGRATED(apic_version[phys_apicid])) { | |
548 | apic_read_around(APIC_SPIV); | |
549 | apic_write(APIC_ESR, 0); | |
550 | apic_read(APIC_ESR); | |
551 | } | |
552 | ||
553 | Dprintk("Asserting INIT.\n"); | |
554 | ||
555 | /* | |
556 | * Turn INIT on target chip | |
557 | */ | |
558 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
559 | ||
560 | /* | |
561 | * Send IPI | |
562 | */ | |
563 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_INT_ASSERT | |
564 | | APIC_DM_INIT); | |
565 | ||
566 | Dprintk("Waiting for send to finish...\n"); | |
567 | send_status = safe_apic_wait_icr_idle(); | |
568 | ||
569 | mdelay(10); | |
570 | ||
571 | Dprintk("Deasserting INIT.\n"); | |
572 | ||
573 | /* Target chip */ | |
574 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
575 | ||
576 | /* Send IPI */ | |
577 | apic_write_around(APIC_ICR, APIC_INT_LEVELTRIG | APIC_DM_INIT); | |
578 | ||
579 | Dprintk("Waiting for send to finish...\n"); | |
580 | send_status = safe_apic_wait_icr_idle(); | |
581 | ||
582 | mb(); | |
583 | atomic_set(&init_deasserted, 1); | |
584 | ||
585 | /* | |
586 | * Should we send STARTUP IPIs ? | |
587 | * | |
588 | * Determine this based on the APIC version. | |
589 | * If we don't have an integrated APIC, don't send the STARTUP IPIs. | |
590 | */ | |
591 | if (APIC_INTEGRATED(apic_version[phys_apicid])) | |
592 | num_starts = 2; | |
593 | else | |
594 | num_starts = 0; | |
595 | ||
596 | /* | |
597 | * Paravirt / VMI wants a startup IPI hook here to set up the | |
598 | * target processor state. | |
599 | */ | |
600 | startup_ipi_hook(phys_apicid, (unsigned long) start_secondary, | |
601 | #ifdef CONFIG_X86_64 | |
602 | (unsigned long)init_rsp); | |
603 | #else | |
604 | (unsigned long)stack_start.sp); | |
605 | #endif | |
606 | ||
607 | /* | |
608 | * Run STARTUP IPI loop. | |
609 | */ | |
610 | Dprintk("#startup loops: %d.\n", num_starts); | |
611 | ||
612 | maxlvt = lapic_get_maxlvt(); | |
613 | ||
614 | for (j = 1; j <= num_starts; j++) { | |
615 | Dprintk("Sending STARTUP #%d.\n", j); | |
616 | apic_read_around(APIC_SPIV); | |
617 | apic_write(APIC_ESR, 0); | |
618 | apic_read(APIC_ESR); | |
619 | Dprintk("After apic_write.\n"); | |
620 | ||
621 | /* | |
622 | * STARTUP IPI | |
623 | */ | |
624 | ||
625 | /* Target chip */ | |
626 | apic_write_around(APIC_ICR2, SET_APIC_DEST_FIELD(phys_apicid)); | |
627 | ||
628 | /* Boot on the stack */ | |
629 | /* Kick the second */ | |
630 | apic_write_around(APIC_ICR, APIC_DM_STARTUP | |
631 | | (start_eip >> 12)); | |
632 | ||
633 | /* | |
634 | * Give the other CPU some time to accept the IPI. | |
635 | */ | |
636 | udelay(300); | |
637 | ||
638 | Dprintk("Startup point 1.\n"); | |
639 | ||
640 | Dprintk("Waiting for send to finish...\n"); | |
641 | send_status = safe_apic_wait_icr_idle(); | |
642 | ||
643 | /* | |
644 | * Give the other CPU some time to accept the IPI. | |
645 | */ | |
646 | udelay(200); | |
647 | /* | |
648 | * Due to the Pentium erratum 3AP. | |
649 | */ | |
650 | if (maxlvt > 3) { | |
651 | apic_read_around(APIC_SPIV); | |
652 | apic_write(APIC_ESR, 0); | |
653 | } | |
654 | accept_status = (apic_read(APIC_ESR) & 0xEF); | |
655 | if (send_status || accept_status) | |
656 | break; | |
657 | } | |
658 | Dprintk("After Startup.\n"); | |
659 | ||
660 | if (send_status) | |
661 | printk(KERN_ERR "APIC never delivered???\n"); | |
662 | if (accept_status) | |
663 | printk(KERN_ERR "APIC delivery error (%lx).\n", accept_status); | |
664 | ||
665 | return (send_status | accept_status); | |
666 | } | |
667 | #endif /* WAKE_SECONDARY_VIA_INIT */ | |
668 | ||
669 | struct create_idle { | |
670 | struct work_struct work; | |
671 | struct task_struct *idle; | |
672 | struct completion done; | |
673 | int cpu; | |
674 | }; | |
675 | ||
676 | static void __cpuinit do_fork_idle(struct work_struct *work) | |
677 | { | |
678 | struct create_idle *c_idle = | |
679 | container_of(work, struct create_idle, work); | |
680 | ||
681 | c_idle->idle = fork_idle(c_idle->cpu); | |
682 | complete(&c_idle->done); | |
683 | } | |
684 | ||
685 | static int __cpuinit do_boot_cpu(int apicid, int cpu) | |
686 | /* | |
687 | * NOTE - on most systems this is a PHYSICAL apic ID, but on multiquad | |
688 | * (ie clustered apic addressing mode), this is a LOGICAL apic ID. | |
689 | * Returns zero if CPU booted OK, else error code from wakeup_secondary_cpu. | |
690 | */ | |
691 | { | |
692 | unsigned long boot_error = 0; | |
693 | int timeout; | |
694 | unsigned long start_ip; | |
695 | unsigned short nmi_high = 0, nmi_low = 0; | |
696 | struct create_idle c_idle = { | |
697 | .cpu = cpu, | |
698 | .done = COMPLETION_INITIALIZER_ONSTACK(c_idle.done), | |
699 | }; | |
700 | INIT_WORK(&c_idle.work, do_fork_idle); | |
701 | #ifdef CONFIG_X86_64 | |
702 | /* allocate memory for gdts of secondary cpus. Hotplug is considered */ | |
703 | if (!cpu_gdt_descr[cpu].address && | |
704 | !(cpu_gdt_descr[cpu].address = get_zeroed_page(GFP_KERNEL))) { | |
705 | printk(KERN_ERR "Failed to allocate GDT for CPU %d\n", cpu); | |
706 | return -1; | |
707 | } | |
708 | ||
709 | /* Allocate node local memory for AP pdas */ | |
710 | if (cpu_pda(cpu) == &boot_cpu_pda[cpu]) { | |
711 | struct x8664_pda *newpda, *pda; | |
712 | int node = cpu_to_node(cpu); | |
713 | pda = cpu_pda(cpu); | |
714 | newpda = kmalloc_node(sizeof(struct x8664_pda), GFP_ATOMIC, | |
715 | node); | |
716 | if (newpda) { | |
717 | memcpy(newpda, pda, sizeof(struct x8664_pda)); | |
718 | cpu_pda(cpu) = newpda; | |
719 | } else | |
720 | printk(KERN_ERR | |
721 | "Could not allocate node local PDA for CPU %d on node %d\n", | |
722 | cpu, node); | |
723 | } | |
724 | #endif | |
725 | ||
726 | alternatives_smp_switch(1); | |
727 | ||
728 | c_idle.idle = get_idle_for_cpu(cpu); | |
729 | ||
730 | /* | |
731 | * We can't use kernel_thread since we must avoid to | |
732 | * reschedule the child. | |
733 | */ | |
734 | if (c_idle.idle) { | |
735 | c_idle.idle->thread.sp = (unsigned long) (((struct pt_regs *) | |
736 | (THREAD_SIZE + task_stack_page(c_idle.idle))) - 1); | |
737 | init_idle(c_idle.idle, cpu); | |
738 | goto do_rest; | |
739 | } | |
740 | ||
741 | if (!keventd_up() || current_is_keventd()) | |
742 | c_idle.work.func(&c_idle.work); | |
743 | else { | |
744 | schedule_work(&c_idle.work); | |
745 | wait_for_completion(&c_idle.done); | |
746 | } | |
747 | ||
748 | if (IS_ERR(c_idle.idle)) { | |
749 | printk("failed fork for CPU %d\n", cpu); | |
750 | return PTR_ERR(c_idle.idle); | |
751 | } | |
752 | ||
753 | set_idle_for_cpu(cpu, c_idle.idle); | |
754 | do_rest: | |
755 | #ifdef CONFIG_X86_32 | |
756 | per_cpu(current_task, cpu) = c_idle.idle; | |
757 | init_gdt(cpu); | |
758 | early_gdt_descr.address = (unsigned long)get_cpu_gdt_table(cpu); | |
759 | c_idle.idle->thread.ip = (unsigned long) start_secondary; | |
760 | /* Stack for startup_32 can be just as for start_secondary onwards */ | |
761 | stack_start.sp = (void *) c_idle.idle->thread.sp; | |
762 | irq_ctx_init(cpu); | |
763 | #else | |
764 | cpu_pda(cpu)->pcurrent = c_idle.idle; | |
765 | init_rsp = c_idle.idle->thread.sp; | |
766 | load_sp0(&per_cpu(init_tss, cpu), &c_idle.idle->thread); | |
767 | initial_code = (unsigned long)start_secondary; | |
768 | clear_tsk_thread_flag(c_idle.idle, TIF_FORK); | |
769 | #endif | |
770 | ||
771 | /* start_ip had better be page-aligned! */ | |
772 | start_ip = setup_trampoline(); | |
773 | ||
774 | /* So we see what's up */ | |
775 | printk(KERN_INFO "Booting processor %d/%d ip %lx\n", | |
776 | cpu, apicid, start_ip); | |
777 | ||
778 | /* | |
779 | * This grunge runs the startup process for | |
780 | * the targeted processor. | |
781 | */ | |
782 | ||
783 | atomic_set(&init_deasserted, 0); | |
784 | ||
785 | Dprintk("Setting warm reset code and vector.\n"); | |
786 | ||
787 | store_NMI_vector(&nmi_high, &nmi_low); | |
788 | ||
789 | smpboot_setup_warm_reset_vector(start_ip); | |
790 | /* | |
791 | * Be paranoid about clearing APIC errors. | |
792 | */ | |
793 | apic_write(APIC_ESR, 0); | |
794 | apic_read(APIC_ESR); | |
795 | ||
796 | ||
797 | /* | |
798 | * Starting actual IPI sequence... | |
799 | */ | |
800 | boot_error = wakeup_secondary_cpu(apicid, start_ip); | |
801 | ||
802 | if (!boot_error) { | |
803 | /* | |
804 | * allow APs to start initializing. | |
805 | */ | |
806 | Dprintk("Before Callout %d.\n", cpu); | |
807 | cpu_set(cpu, cpu_callout_map); | |
808 | Dprintk("After Callout %d.\n", cpu); | |
809 | ||
810 | /* | |
811 | * Wait 5s total for a response | |
812 | */ | |
813 | for (timeout = 0; timeout < 50000; timeout++) { | |
814 | if (cpu_isset(cpu, cpu_callin_map)) | |
815 | break; /* It has booted */ | |
816 | udelay(100); | |
817 | } | |
818 | ||
819 | if (cpu_isset(cpu, cpu_callin_map)) { | |
820 | /* number CPUs logically, starting from 1 (BSP is 0) */ | |
821 | Dprintk("OK.\n"); | |
822 | printk(KERN_INFO "CPU%d: ", cpu); | |
823 | print_cpu_info(&cpu_data(cpu)); | |
824 | Dprintk("CPU has booted.\n"); | |
825 | } else { | |
826 | boot_error = 1; | |
827 | if (*((volatile unsigned char *)trampoline_base) | |
828 | == 0xA5) | |
829 | /* trampoline started but...? */ | |
830 | printk(KERN_ERR "Stuck ??\n"); | |
831 | else | |
832 | /* trampoline code not run */ | |
833 | printk(KERN_ERR "Not responding.\n"); | |
834 | inquire_remote_apic(apicid); | |
835 | } | |
836 | } | |
837 | ||
838 | if (boot_error) { | |
839 | /* Try to put things back the way they were before ... */ | |
840 | unmap_cpu_to_logical_apicid(cpu); | |
841 | #ifdef CONFIG_X86_64 | |
842 | clear_node_cpumask(cpu); /* was set by numa_add_cpu */ | |
843 | #endif | |
844 | cpu_clear(cpu, cpu_callout_map); /* was set by do_boot_cpu() */ | |
845 | cpu_clear(cpu, cpu_initialized); /* was set by cpu_init() */ | |
846 | cpu_clear(cpu, cpu_possible_map); | |
847 | cpu_clear(cpu, cpu_present_map); | |
848 | per_cpu(x86_cpu_to_apicid, cpu) = BAD_APICID; | |
849 | } | |
850 | ||
851 | /* mark "stuck" area as not stuck */ | |
852 | *((volatile unsigned long *)trampoline_base) = 0; | |
853 | ||
854 | return boot_error; | |
855 | } | |
856 | ||
857 | int __cpuinit native_cpu_up(unsigned int cpu) | |
858 | { | |
859 | int apicid = cpu_present_to_apicid(cpu); | |
860 | unsigned long flags; | |
861 | int err; | |
862 | ||
863 | WARN_ON(irqs_disabled()); | |
864 | ||
865 | Dprintk("++++++++++++++++++++=_---CPU UP %u\n", cpu); | |
866 | ||
867 | if (apicid == BAD_APICID || apicid == boot_cpu_physical_apicid || | |
868 | !physid_isset(apicid, phys_cpu_present_map)) { | |
869 | printk(KERN_ERR "%s: bad cpu %d\n", __func__, cpu); | |
870 | return -EINVAL; | |
871 | } | |
872 | ||
873 | /* | |
874 | * Already booted CPU? | |
875 | */ | |
876 | if (cpu_isset(cpu, cpu_callin_map)) { | |
877 | Dprintk("do_boot_cpu %d Already started\n", cpu); | |
878 | return -ENOSYS; | |
879 | } | |
880 | ||
881 | /* | |
882 | * Save current MTRR state in case it was changed since early boot | |
883 | * (e.g. by the ACPI SMI) to initialize new CPUs with MTRRs in sync: | |
884 | */ | |
885 | mtrr_save_state(); | |
886 | ||
887 | per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; | |
888 | ||
889 | #ifdef CONFIG_X86_32 | |
890 | /* init low mem mapping */ | |
891 | clone_pgd_range(swapper_pg_dir, swapper_pg_dir + USER_PGD_PTRS, | |
892 | min_t(unsigned long, KERNEL_PGD_PTRS, USER_PGD_PTRS)); | |
893 | flush_tlb_all(); | |
894 | #endif | |
895 | ||
896 | err = do_boot_cpu(apicid, cpu); | |
897 | if (err < 0) { | |
898 | Dprintk("do_boot_cpu failed %d\n", err); | |
899 | return err; | |
900 | } | |
901 | ||
902 | /* | |
903 | * Check TSC synchronization with the AP (keep irqs disabled | |
904 | * while doing so): | |
905 | */ | |
906 | local_irq_save(flags); | |
907 | check_tsc_sync_source(cpu); | |
908 | local_irq_restore(flags); | |
909 | ||
910 | while (!cpu_isset(cpu, cpu_online_map)) { | |
911 | cpu_relax(); | |
912 | touch_nmi_watchdog(); | |
913 | } | |
914 | ||
915 | return 0; | |
916 | } | |
917 | ||
68a1c3f8 | 918 | #ifdef CONFIG_HOTPLUG_CPU |
768d9505 GC |
919 | void remove_siblinginfo(int cpu) |
920 | { | |
921 | int sibling; | |
922 | struct cpuinfo_x86 *c = &cpu_data(cpu); | |
923 | ||
924 | for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) { | |
925 | cpu_clear(cpu, per_cpu(cpu_core_map, sibling)); | |
926 | /*/ | |
927 | * last thread sibling in this cpu core going down | |
928 | */ | |
929 | if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) | |
930 | cpu_data(sibling).booted_cores--; | |
931 | } | |
932 | ||
933 | for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu)) | |
934 | cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling)); | |
935 | cpus_clear(per_cpu(cpu_sibling_map, cpu)); | |
936 | cpus_clear(per_cpu(cpu_core_map, cpu)); | |
937 | c->phys_proc_id = 0; | |
938 | c->cpu_core_id = 0; | |
939 | cpu_clear(cpu, cpu_sibling_setup_map); | |
940 | } | |
68a1c3f8 GC |
941 | |
942 | int additional_cpus __initdata = -1; | |
943 | ||
944 | static __init int setup_additional_cpus(char *s) | |
945 | { | |
946 | return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL; | |
947 | } | |
948 | early_param("additional_cpus", setup_additional_cpus); | |
949 | ||
950 | /* | |
951 | * cpu_possible_map should be static, it cannot change as cpu's | |
952 | * are onlined, or offlined. The reason is per-cpu data-structures | |
953 | * are allocated by some modules at init time, and dont expect to | |
954 | * do this dynamically on cpu arrival/departure. | |
955 | * cpu_present_map on the other hand can change dynamically. | |
956 | * In case when cpu_hotplug is not compiled, then we resort to current | |
957 | * behaviour, which is cpu_possible == cpu_present. | |
958 | * - Ashok Raj | |
959 | * | |
960 | * Three ways to find out the number of additional hotplug CPUs: | |
961 | * - If the BIOS specified disabled CPUs in ACPI/mptables use that. | |
962 | * - The user can overwrite it with additional_cpus=NUM | |
963 | * - Otherwise don't reserve additional CPUs. | |
964 | * We do this because additional CPUs waste a lot of memory. | |
965 | * -AK | |
966 | */ | |
967 | __init void prefill_possible_map(void) | |
968 | { | |
969 | int i; | |
970 | int possible; | |
971 | ||
972 | if (additional_cpus == -1) { | |
973 | if (disabled_cpus > 0) | |
974 | additional_cpus = disabled_cpus; | |
975 | else | |
976 | additional_cpus = 0; | |
977 | } | |
978 | possible = num_processors + additional_cpus; | |
979 | if (possible > NR_CPUS) | |
980 | possible = NR_CPUS; | |
981 | ||
982 | printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n", | |
983 | possible, max_t(int, possible - num_processors, 0)); | |
984 | ||
985 | for (i = 0; i < possible; i++) | |
986 | cpu_set(i, cpu_possible_map); | |
987 | } | |
69c18c15 GC |
988 | |
989 | static void __ref remove_cpu_from_maps(int cpu) | |
990 | { | |
991 | cpu_clear(cpu, cpu_online_map); | |
992 | #ifdef CONFIG_X86_64 | |
993 | cpu_clear(cpu, cpu_callout_map); | |
994 | cpu_clear(cpu, cpu_callin_map); | |
995 | /* was set by cpu_init() */ | |
996 | clear_bit(cpu, (unsigned long *)&cpu_initialized); | |
997 | clear_node_cpumask(cpu); | |
998 | #endif | |
999 | } | |
1000 | ||
1001 | int __cpu_disable(void) | |
1002 | { | |
1003 | int cpu = smp_processor_id(); | |
1004 | ||
1005 | /* | |
1006 | * Perhaps use cpufreq to drop frequency, but that could go | |
1007 | * into generic code. | |
1008 | * | |
1009 | * We won't take down the boot processor on i386 due to some | |
1010 | * interrupts only being able to be serviced by the BSP. | |
1011 | * Especially so if we're not using an IOAPIC -zwane | |
1012 | */ | |
1013 | if (cpu == 0) | |
1014 | return -EBUSY; | |
1015 | ||
1016 | if (nmi_watchdog == NMI_LOCAL_APIC) | |
1017 | stop_apic_nmi_watchdog(NULL); | |
1018 | clear_local_APIC(); | |
1019 | ||
1020 | /* | |
1021 | * HACK: | |
1022 | * Allow any queued timer interrupts to get serviced | |
1023 | * This is only a temporary solution until we cleanup | |
1024 | * fixup_irqs as we do for IA64. | |
1025 | */ | |
1026 | local_irq_enable(); | |
1027 | mdelay(1); | |
1028 | ||
1029 | local_irq_disable(); | |
1030 | remove_siblinginfo(cpu); | |
1031 | ||
1032 | /* It's now safe to remove this processor from the online map */ | |
1033 | remove_cpu_from_maps(cpu); | |
1034 | fixup_irqs(cpu_online_map); | |
1035 | return 0; | |
1036 | } | |
1037 | ||
1038 | void __cpu_die(unsigned int cpu) | |
1039 | { | |
1040 | /* We don't do anything here: idle task is faking death itself. */ | |
1041 | unsigned int i; | |
1042 | ||
1043 | for (i = 0; i < 10; i++) { | |
1044 | /* They ack this in play_dead by setting CPU_DEAD */ | |
1045 | if (per_cpu(cpu_state, cpu) == CPU_DEAD) { | |
1046 | printk(KERN_INFO "CPU %d is now offline\n", cpu); | |
1047 | if (1 == num_online_cpus()) | |
1048 | alternatives_smp_switch(0); | |
1049 | return; | |
1050 | } | |
1051 | msleep(100); | |
1052 | } | |
1053 | printk(KERN_ERR "CPU %u didn't die...\n", cpu); | |
1054 | } | |
1055 | #else /* ... !CONFIG_HOTPLUG_CPU */ | |
1056 | int __cpu_disable(void) | |
1057 | { | |
1058 | return -ENOSYS; | |
1059 | } | |
1060 | ||
1061 | void __cpu_die(unsigned int cpu) | |
1062 | { | |
1063 | /* We said "no" in __cpu_disable */ | |
1064 | BUG(); | |
1065 | } | |
68a1c3f8 GC |
1066 | #endif |
1067 | ||
89b08200 GC |
1068 | /* |
1069 | * If the BIOS enumerates physical processors before logical, | |
1070 | * maxcpus=N at enumeration-time can be used to disable HT. | |
1071 | */ | |
1072 | static int __init parse_maxcpus(char *arg) | |
1073 | { | |
1074 | extern unsigned int maxcpus; | |
1075 | ||
1076 | maxcpus = simple_strtoul(arg, NULL, 0); | |
1077 | return 0; | |
1078 | } | |
1079 | early_param("maxcpus", parse_maxcpus); |