x86: call identify_secondary_cpu in smp_store_cpu_info
[linux-2.6-block.git] / arch / x86 / kernel / smpboot.c
CommitLineData
68a1c3f8
GC
1#include <linux/init.h>
2#include <linux/smp.h>
a355352b 3#include <linux/module.h>
70708a18 4#include <linux/sched.h>
69c18c15 5#include <linux/percpu.h>
91718e8d 6#include <linux/bootmem.h>
69c18c15
GC
7
8#include <asm/nmi.h>
9#include <asm/irq.h>
10#include <asm/smp.h>
11#include <asm/cpu.h>
12#include <asm/numa.h>
68a1c3f8 13
a355352b
GC
14/* Number of siblings per CPU package */
15int smp_num_siblings = 1;
16EXPORT_SYMBOL(smp_num_siblings);
17
18/* Last level cache ID of each logical CPU */
19DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
20
21/* bitmap of online cpus */
22cpumask_t cpu_online_map __read_mostly;
23EXPORT_SYMBOL(cpu_online_map);
24
25cpumask_t cpu_callin_map;
26cpumask_t cpu_callout_map;
27cpumask_t cpu_possible_map;
28EXPORT_SYMBOL(cpu_possible_map);
29
30/* representing HT siblings of each logical CPU */
31DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
32EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
33
34/* representing HT and core siblings of each logical CPU */
35DEFINE_PER_CPU(cpumask_t, cpu_core_map);
36EXPORT_PER_CPU_SYMBOL(cpu_core_map);
37
38/* Per CPU bogomips and other parameters */
39DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
40EXPORT_PER_CPU_SYMBOL(cpu_info);
768d9505 41
91718e8d
GC
42/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
43unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
44
768d9505
GC
45/* representing cpus for which sibling maps can be computed */
46static cpumask_t cpu_sibling_setup_map;
47
48void __cpuinit set_cpu_sibling_map(int cpu)
49{
50 int i;
51 struct cpuinfo_x86 *c = &cpu_data(cpu);
52
53 cpu_set(cpu, cpu_sibling_setup_map);
54
55 if (smp_num_siblings > 1) {
56 for_each_cpu_mask(i, cpu_sibling_setup_map) {
57 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
58 c->cpu_core_id == cpu_data(i).cpu_core_id) {
59 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
60 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
61 cpu_set(i, per_cpu(cpu_core_map, cpu));
62 cpu_set(cpu, per_cpu(cpu_core_map, i));
63 cpu_set(i, c->llc_shared_map);
64 cpu_set(cpu, cpu_data(i).llc_shared_map);
65 }
66 }
67 } else {
68 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
69 }
70
71 cpu_set(cpu, c->llc_shared_map);
72
73 if (current_cpu_data.x86_max_cores == 1) {
74 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
75 c->booted_cores = 1;
76 return;
77 }
78
79 for_each_cpu_mask(i, cpu_sibling_setup_map) {
80 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
81 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
82 cpu_set(i, c->llc_shared_map);
83 cpu_set(cpu, cpu_data(i).llc_shared_map);
84 }
85 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
86 cpu_set(i, per_cpu(cpu_core_map, cpu));
87 cpu_set(cpu, per_cpu(cpu_core_map, i));
88 /*
89 * Does this new cpu bringup a new core?
90 */
91 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
92 /*
93 * for each core in package, increment
94 * the booted_cores for this new cpu
95 */
96 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
97 c->booted_cores++;
98 /*
99 * increment the core count for all
100 * the other cpus in this package
101 */
102 if (i != cpu)
103 cpu_data(i).booted_cores++;
104 } else if (i != cpu && !c->booted_cores)
105 c->booted_cores = cpu_data(i).booted_cores;
106 }
107 }
108}
109
70708a18
GC
110/* maps the cpu to the sched domain representing multi-core */
111cpumask_t cpu_coregroup_map(int cpu)
112{
113 struct cpuinfo_x86 *c = &cpu_data(cpu);
114 /*
115 * For perf, we return last level cache shared map.
116 * And for power savings, we return cpu_core_map
117 */
118 if (sched_mc_power_savings || sched_smt_power_savings)
119 return per_cpu(cpu_core_map, cpu);
120 else
121 return c->llc_shared_map;
122}
123
91718e8d
GC
124/*
125 * Currently trivial. Write the real->protected mode
126 * bootstrap into the page concerned. The caller
127 * has made sure it's suitably aligned.
128 */
129
130unsigned long __cpuinit setup_trampoline(void)
131{
132 memcpy(trampoline_base, trampoline_data,
133 trampoline_end - trampoline_data);
134 return virt_to_phys(trampoline_base);
135}
136
137#ifdef CONFIG_X86_32
138/*
139 * We are called very early to get the low memory for the
140 * SMP bootup trampoline page.
141 */
142void __init smp_alloc_memory(void)
143{
144 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
145 /*
146 * Has to be in very low memory so we can execute
147 * real-mode AP code.
148 */
149 if (__pa(trampoline_base) >= 0x9F000)
150 BUG();
151}
152#endif
70708a18 153
68a1c3f8 154#ifdef CONFIG_HOTPLUG_CPU
768d9505
GC
155void remove_siblinginfo(int cpu)
156{
157 int sibling;
158 struct cpuinfo_x86 *c = &cpu_data(cpu);
159
160 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
161 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
162 /*/
163 * last thread sibling in this cpu core going down
164 */
165 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
166 cpu_data(sibling).booted_cores--;
167 }
168
169 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
170 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
171 cpus_clear(per_cpu(cpu_sibling_map, cpu));
172 cpus_clear(per_cpu(cpu_core_map, cpu));
173 c->phys_proc_id = 0;
174 c->cpu_core_id = 0;
175 cpu_clear(cpu, cpu_sibling_setup_map);
176}
68a1c3f8
GC
177
178int additional_cpus __initdata = -1;
179
180static __init int setup_additional_cpus(char *s)
181{
182 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
183}
184early_param("additional_cpus", setup_additional_cpus);
185
186/*
187 * cpu_possible_map should be static, it cannot change as cpu's
188 * are onlined, or offlined. The reason is per-cpu data-structures
189 * are allocated by some modules at init time, and dont expect to
190 * do this dynamically on cpu arrival/departure.
191 * cpu_present_map on the other hand can change dynamically.
192 * In case when cpu_hotplug is not compiled, then we resort to current
193 * behaviour, which is cpu_possible == cpu_present.
194 * - Ashok Raj
195 *
196 * Three ways to find out the number of additional hotplug CPUs:
197 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
198 * - The user can overwrite it with additional_cpus=NUM
199 * - Otherwise don't reserve additional CPUs.
200 * We do this because additional CPUs waste a lot of memory.
201 * -AK
202 */
203__init void prefill_possible_map(void)
204{
205 int i;
206 int possible;
207
208 if (additional_cpus == -1) {
209 if (disabled_cpus > 0)
210 additional_cpus = disabled_cpus;
211 else
212 additional_cpus = 0;
213 }
214 possible = num_processors + additional_cpus;
215 if (possible > NR_CPUS)
216 possible = NR_CPUS;
217
218 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
219 possible, max_t(int, possible - num_processors, 0));
220
221 for (i = 0; i < possible; i++)
222 cpu_set(i, cpu_possible_map);
223}
69c18c15
GC
224
225static void __ref remove_cpu_from_maps(int cpu)
226{
227 cpu_clear(cpu, cpu_online_map);
228#ifdef CONFIG_X86_64
229 cpu_clear(cpu, cpu_callout_map);
230 cpu_clear(cpu, cpu_callin_map);
231 /* was set by cpu_init() */
232 clear_bit(cpu, (unsigned long *)&cpu_initialized);
233 clear_node_cpumask(cpu);
234#endif
235}
236
237int __cpu_disable(void)
238{
239 int cpu = smp_processor_id();
240
241 /*
242 * Perhaps use cpufreq to drop frequency, but that could go
243 * into generic code.
244 *
245 * We won't take down the boot processor on i386 due to some
246 * interrupts only being able to be serviced by the BSP.
247 * Especially so if we're not using an IOAPIC -zwane
248 */
249 if (cpu == 0)
250 return -EBUSY;
251
252 if (nmi_watchdog == NMI_LOCAL_APIC)
253 stop_apic_nmi_watchdog(NULL);
254 clear_local_APIC();
255
256 /*
257 * HACK:
258 * Allow any queued timer interrupts to get serviced
259 * This is only a temporary solution until we cleanup
260 * fixup_irqs as we do for IA64.
261 */
262 local_irq_enable();
263 mdelay(1);
264
265 local_irq_disable();
266 remove_siblinginfo(cpu);
267
268 /* It's now safe to remove this processor from the online map */
269 remove_cpu_from_maps(cpu);
270 fixup_irqs(cpu_online_map);
271 return 0;
272}
273
274void __cpu_die(unsigned int cpu)
275{
276 /* We don't do anything here: idle task is faking death itself. */
277 unsigned int i;
278
279 for (i = 0; i < 10; i++) {
280 /* They ack this in play_dead by setting CPU_DEAD */
281 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
282 printk(KERN_INFO "CPU %d is now offline\n", cpu);
283 if (1 == num_online_cpus())
284 alternatives_smp_switch(0);
285 return;
286 }
287 msleep(100);
288 }
289 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
290}
291#else /* ... !CONFIG_HOTPLUG_CPU */
292int __cpu_disable(void)
293{
294 return -ENOSYS;
295}
296
297void __cpu_die(unsigned int cpu)
298{
299 /* We said "no" in __cpu_disable */
300 BUG();
301}
68a1c3f8
GC
302#endif
303
89b08200
GC
304/*
305 * If the BIOS enumerates physical processors before logical,
306 * maxcpus=N at enumeration-time can be used to disable HT.
307 */
308static int __init parse_maxcpus(char *arg)
309{
310 extern unsigned int maxcpus;
311
312 maxcpus = simple_strtoul(arg, NULL, 0);
313 return 0;
314}
315early_param("maxcpus", parse_maxcpus);