x86: get rid of cpucount
[linux-2.6-block.git] / arch / x86 / kernel / smpboot.c
CommitLineData
68a1c3f8
GC
1#include <linux/init.h>
2#include <linux/smp.h>
a355352b 3#include <linux/module.h>
70708a18 4#include <linux/sched.h>
69c18c15 5#include <linux/percpu.h>
91718e8d 6#include <linux/bootmem.h>
69c18c15
GC
7
8#include <asm/nmi.h>
9#include <asm/irq.h>
10#include <asm/smp.h>
11#include <asm/cpu.h>
12#include <asm/numa.h>
68a1c3f8 13
a355352b
GC
14/* Number of siblings per CPU package */
15int smp_num_siblings = 1;
16EXPORT_SYMBOL(smp_num_siblings);
17
18/* Last level cache ID of each logical CPU */
19DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
20
21/* bitmap of online cpus */
22cpumask_t cpu_online_map __read_mostly;
23EXPORT_SYMBOL(cpu_online_map);
24
25cpumask_t cpu_callin_map;
26cpumask_t cpu_callout_map;
27cpumask_t cpu_possible_map;
28EXPORT_SYMBOL(cpu_possible_map);
29
30/* representing HT siblings of each logical CPU */
31DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
32EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
33
34/* representing HT and core siblings of each logical CPU */
35DEFINE_PER_CPU(cpumask_t, cpu_core_map);
36EXPORT_PER_CPU_SYMBOL(cpu_core_map);
37
38/* Per CPU bogomips and other parameters */
39DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
40EXPORT_PER_CPU_SYMBOL(cpu_info);
768d9505 41
91718e8d
GC
42/* ready for x86_64, no harm for x86, since it will overwrite after alloc */
43unsigned char *trampoline_base = __va(SMP_TRAMPOLINE_BASE);
44
768d9505
GC
45/* representing cpus for which sibling maps can be computed */
46static cpumask_t cpu_sibling_setup_map;
47
1d89a7f0
GOC
48#ifdef CONFIG_X86_32
49/* Set if we find a B stepping CPU */
50int __cpuinitdata smp_b_stepping;
51#endif
52
53static void __cpuinit smp_apply_quirks(struct cpuinfo_x86 *c)
54{
55#ifdef CONFIG_X86_32
56 /*
57 * Mask B, Pentium, but not Pentium MMX
58 */
59 if (c->x86_vendor == X86_VENDOR_INTEL &&
60 c->x86 == 5 &&
61 c->x86_mask >= 1 && c->x86_mask <= 4 &&
62 c->x86_model <= 3)
63 /*
64 * Remember we have B step Pentia with bugs
65 */
66 smp_b_stepping = 1;
67
68 /*
69 * Certain Athlons might work (for various values of 'work') in SMP
70 * but they are not certified as MP capable.
71 */
72 if ((c->x86_vendor == X86_VENDOR_AMD) && (c->x86 == 6)) {
73
74 if (num_possible_cpus() == 1)
75 goto valid_k7;
76
77 /* Athlon 660/661 is valid. */
78 if ((c->x86_model == 6) && ((c->x86_mask == 0) ||
79 (c->x86_mask == 1)))
80 goto valid_k7;
81
82 /* Duron 670 is valid */
83 if ((c->x86_model == 7) && (c->x86_mask == 0))
84 goto valid_k7;
85
86 /*
87 * Athlon 662, Duron 671, and Athlon >model 7 have capability
88 * bit. It's worth noting that the A5 stepping (662) of some
89 * Athlon XP's have the MP bit set.
90 * See http://www.heise.de/newsticker/data/jow-18.10.01-000 for
91 * more.
92 */
93 if (((c->x86_model == 6) && (c->x86_mask >= 2)) ||
94 ((c->x86_model == 7) && (c->x86_mask >= 1)) ||
95 (c->x86_model > 7))
96 if (cpu_has_mp)
97 goto valid_k7;
98
99 /* If we get here, not a certified SMP capable AMD system. */
100 add_taint(TAINT_UNSAFE_SMP);
101 }
102
103valid_k7:
104 ;
105#endif
106}
107
108/*
109 * The bootstrap kernel entry code has set these up. Save them for
110 * a given CPU
111 */
112
113void __cpuinit smp_store_cpu_info(int id)
114{
115 struct cpuinfo_x86 *c = &cpu_data(id);
116
117 *c = boot_cpu_data;
118 c->cpu_index = id;
119 if (id != 0)
120 identify_secondary_cpu(c);
121 smp_apply_quirks(c);
122}
123
124
768d9505
GC
125void __cpuinit set_cpu_sibling_map(int cpu)
126{
127 int i;
128 struct cpuinfo_x86 *c = &cpu_data(cpu);
129
130 cpu_set(cpu, cpu_sibling_setup_map);
131
132 if (smp_num_siblings > 1) {
133 for_each_cpu_mask(i, cpu_sibling_setup_map) {
134 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
135 c->cpu_core_id == cpu_data(i).cpu_core_id) {
136 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
137 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
138 cpu_set(i, per_cpu(cpu_core_map, cpu));
139 cpu_set(cpu, per_cpu(cpu_core_map, i));
140 cpu_set(i, c->llc_shared_map);
141 cpu_set(cpu, cpu_data(i).llc_shared_map);
142 }
143 }
144 } else {
145 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
146 }
147
148 cpu_set(cpu, c->llc_shared_map);
149
150 if (current_cpu_data.x86_max_cores == 1) {
151 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
152 c->booted_cores = 1;
153 return;
154 }
155
156 for_each_cpu_mask(i, cpu_sibling_setup_map) {
157 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
158 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
159 cpu_set(i, c->llc_shared_map);
160 cpu_set(cpu, cpu_data(i).llc_shared_map);
161 }
162 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
163 cpu_set(i, per_cpu(cpu_core_map, cpu));
164 cpu_set(cpu, per_cpu(cpu_core_map, i));
165 /*
166 * Does this new cpu bringup a new core?
167 */
168 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
169 /*
170 * for each core in package, increment
171 * the booted_cores for this new cpu
172 */
173 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
174 c->booted_cores++;
175 /*
176 * increment the core count for all
177 * the other cpus in this package
178 */
179 if (i != cpu)
180 cpu_data(i).booted_cores++;
181 } else if (i != cpu && !c->booted_cores)
182 c->booted_cores = cpu_data(i).booted_cores;
183 }
184 }
185}
186
70708a18
GC
187/* maps the cpu to the sched domain representing multi-core */
188cpumask_t cpu_coregroup_map(int cpu)
189{
190 struct cpuinfo_x86 *c = &cpu_data(cpu);
191 /*
192 * For perf, we return last level cache shared map.
193 * And for power savings, we return cpu_core_map
194 */
195 if (sched_mc_power_savings || sched_smt_power_savings)
196 return per_cpu(cpu_core_map, cpu);
197 else
198 return c->llc_shared_map;
199}
200
91718e8d
GC
201/*
202 * Currently trivial. Write the real->protected mode
203 * bootstrap into the page concerned. The caller
204 * has made sure it's suitably aligned.
205 */
206
207unsigned long __cpuinit setup_trampoline(void)
208{
209 memcpy(trampoline_base, trampoline_data,
210 trampoline_end - trampoline_data);
211 return virt_to_phys(trampoline_base);
212}
213
214#ifdef CONFIG_X86_32
215/*
216 * We are called very early to get the low memory for the
217 * SMP bootup trampoline page.
218 */
219void __init smp_alloc_memory(void)
220{
221 trampoline_base = alloc_bootmem_low_pages(PAGE_SIZE);
222 /*
223 * Has to be in very low memory so we can execute
224 * real-mode AP code.
225 */
226 if (__pa(trampoline_base) >= 0x9F000)
227 BUG();
228}
229#endif
70708a18 230
68a1c3f8 231#ifdef CONFIG_HOTPLUG_CPU
768d9505
GC
232void remove_siblinginfo(int cpu)
233{
234 int sibling;
235 struct cpuinfo_x86 *c = &cpu_data(cpu);
236
237 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
238 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
239 /*/
240 * last thread sibling in this cpu core going down
241 */
242 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
243 cpu_data(sibling).booted_cores--;
244 }
245
246 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
247 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
248 cpus_clear(per_cpu(cpu_sibling_map, cpu));
249 cpus_clear(per_cpu(cpu_core_map, cpu));
250 c->phys_proc_id = 0;
251 c->cpu_core_id = 0;
252 cpu_clear(cpu, cpu_sibling_setup_map);
253}
68a1c3f8
GC
254
255int additional_cpus __initdata = -1;
256
257static __init int setup_additional_cpus(char *s)
258{
259 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
260}
261early_param("additional_cpus", setup_additional_cpus);
262
263/*
264 * cpu_possible_map should be static, it cannot change as cpu's
265 * are onlined, or offlined. The reason is per-cpu data-structures
266 * are allocated by some modules at init time, and dont expect to
267 * do this dynamically on cpu arrival/departure.
268 * cpu_present_map on the other hand can change dynamically.
269 * In case when cpu_hotplug is not compiled, then we resort to current
270 * behaviour, which is cpu_possible == cpu_present.
271 * - Ashok Raj
272 *
273 * Three ways to find out the number of additional hotplug CPUs:
274 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
275 * - The user can overwrite it with additional_cpus=NUM
276 * - Otherwise don't reserve additional CPUs.
277 * We do this because additional CPUs waste a lot of memory.
278 * -AK
279 */
280__init void prefill_possible_map(void)
281{
282 int i;
283 int possible;
284
285 if (additional_cpus == -1) {
286 if (disabled_cpus > 0)
287 additional_cpus = disabled_cpus;
288 else
289 additional_cpus = 0;
290 }
291 possible = num_processors + additional_cpus;
292 if (possible > NR_CPUS)
293 possible = NR_CPUS;
294
295 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
296 possible, max_t(int, possible - num_processors, 0));
297
298 for (i = 0; i < possible; i++)
299 cpu_set(i, cpu_possible_map);
300}
69c18c15
GC
301
302static void __ref remove_cpu_from_maps(int cpu)
303{
304 cpu_clear(cpu, cpu_online_map);
305#ifdef CONFIG_X86_64
306 cpu_clear(cpu, cpu_callout_map);
307 cpu_clear(cpu, cpu_callin_map);
308 /* was set by cpu_init() */
309 clear_bit(cpu, (unsigned long *)&cpu_initialized);
310 clear_node_cpumask(cpu);
311#endif
312}
313
314int __cpu_disable(void)
315{
316 int cpu = smp_processor_id();
317
318 /*
319 * Perhaps use cpufreq to drop frequency, but that could go
320 * into generic code.
321 *
322 * We won't take down the boot processor on i386 due to some
323 * interrupts only being able to be serviced by the BSP.
324 * Especially so if we're not using an IOAPIC -zwane
325 */
326 if (cpu == 0)
327 return -EBUSY;
328
329 if (nmi_watchdog == NMI_LOCAL_APIC)
330 stop_apic_nmi_watchdog(NULL);
331 clear_local_APIC();
332
333 /*
334 * HACK:
335 * Allow any queued timer interrupts to get serviced
336 * This is only a temporary solution until we cleanup
337 * fixup_irqs as we do for IA64.
338 */
339 local_irq_enable();
340 mdelay(1);
341
342 local_irq_disable();
343 remove_siblinginfo(cpu);
344
345 /* It's now safe to remove this processor from the online map */
346 remove_cpu_from_maps(cpu);
347 fixup_irqs(cpu_online_map);
348 return 0;
349}
350
351void __cpu_die(unsigned int cpu)
352{
353 /* We don't do anything here: idle task is faking death itself. */
354 unsigned int i;
355
356 for (i = 0; i < 10; i++) {
357 /* They ack this in play_dead by setting CPU_DEAD */
358 if (per_cpu(cpu_state, cpu) == CPU_DEAD) {
359 printk(KERN_INFO "CPU %d is now offline\n", cpu);
360 if (1 == num_online_cpus())
361 alternatives_smp_switch(0);
362 return;
363 }
364 msleep(100);
365 }
366 printk(KERN_ERR "CPU %u didn't die...\n", cpu);
367}
368#else /* ... !CONFIG_HOTPLUG_CPU */
369int __cpu_disable(void)
370{
371 return -ENOSYS;
372}
373
374void __cpu_die(unsigned int cpu)
375{
376 /* We said "no" in __cpu_disable */
377 BUG();
378}
68a1c3f8
GC
379#endif
380
89b08200
GC
381/*
382 * If the BIOS enumerates physical processors before logical,
383 * maxcpus=N at enumeration-time can be used to disable HT.
384 */
385static int __init parse_maxcpus(char *arg)
386{
387 extern unsigned int maxcpus;
388
389 maxcpus = simple_strtoul(arg, NULL, 0);
390 return 0;
391}
392early_param("maxcpus", parse_maxcpus);