x86: do not clear cpu_online_map
[linux-2.6-block.git] / arch / x86 / kernel / smpboot.c
CommitLineData
68a1c3f8
GC
1#include <linux/init.h>
2#include <linux/smp.h>
a355352b 3#include <linux/module.h>
70708a18 4#include <linux/sched.h>
68a1c3f8 5
a355352b
GC
6/* Number of siblings per CPU package */
7int smp_num_siblings = 1;
8EXPORT_SYMBOL(smp_num_siblings);
9
10/* Last level cache ID of each logical CPU */
11DEFINE_PER_CPU(u16, cpu_llc_id) = BAD_APICID;
12
13/* bitmap of online cpus */
14cpumask_t cpu_online_map __read_mostly;
15EXPORT_SYMBOL(cpu_online_map);
16
17cpumask_t cpu_callin_map;
18cpumask_t cpu_callout_map;
19cpumask_t cpu_possible_map;
20EXPORT_SYMBOL(cpu_possible_map);
21
22/* representing HT siblings of each logical CPU */
23DEFINE_PER_CPU(cpumask_t, cpu_sibling_map);
24EXPORT_PER_CPU_SYMBOL(cpu_sibling_map);
25
26/* representing HT and core siblings of each logical CPU */
27DEFINE_PER_CPU(cpumask_t, cpu_core_map);
28EXPORT_PER_CPU_SYMBOL(cpu_core_map);
29
30/* Per CPU bogomips and other parameters */
31DEFINE_PER_CPU_SHARED_ALIGNED(struct cpuinfo_x86, cpu_info);
32EXPORT_PER_CPU_SYMBOL(cpu_info);
768d9505
GC
33
34/* representing cpus for which sibling maps can be computed */
35static cpumask_t cpu_sibling_setup_map;
36
37void __cpuinit set_cpu_sibling_map(int cpu)
38{
39 int i;
40 struct cpuinfo_x86 *c = &cpu_data(cpu);
41
42 cpu_set(cpu, cpu_sibling_setup_map);
43
44 if (smp_num_siblings > 1) {
45 for_each_cpu_mask(i, cpu_sibling_setup_map) {
46 if (c->phys_proc_id == cpu_data(i).phys_proc_id &&
47 c->cpu_core_id == cpu_data(i).cpu_core_id) {
48 cpu_set(i, per_cpu(cpu_sibling_map, cpu));
49 cpu_set(cpu, per_cpu(cpu_sibling_map, i));
50 cpu_set(i, per_cpu(cpu_core_map, cpu));
51 cpu_set(cpu, per_cpu(cpu_core_map, i));
52 cpu_set(i, c->llc_shared_map);
53 cpu_set(cpu, cpu_data(i).llc_shared_map);
54 }
55 }
56 } else {
57 cpu_set(cpu, per_cpu(cpu_sibling_map, cpu));
58 }
59
60 cpu_set(cpu, c->llc_shared_map);
61
62 if (current_cpu_data.x86_max_cores == 1) {
63 per_cpu(cpu_core_map, cpu) = per_cpu(cpu_sibling_map, cpu);
64 c->booted_cores = 1;
65 return;
66 }
67
68 for_each_cpu_mask(i, cpu_sibling_setup_map) {
69 if (per_cpu(cpu_llc_id, cpu) != BAD_APICID &&
70 per_cpu(cpu_llc_id, cpu) == per_cpu(cpu_llc_id, i)) {
71 cpu_set(i, c->llc_shared_map);
72 cpu_set(cpu, cpu_data(i).llc_shared_map);
73 }
74 if (c->phys_proc_id == cpu_data(i).phys_proc_id) {
75 cpu_set(i, per_cpu(cpu_core_map, cpu));
76 cpu_set(cpu, per_cpu(cpu_core_map, i));
77 /*
78 * Does this new cpu bringup a new core?
79 */
80 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1) {
81 /*
82 * for each core in package, increment
83 * the booted_cores for this new cpu
84 */
85 if (first_cpu(per_cpu(cpu_sibling_map, i)) == i)
86 c->booted_cores++;
87 /*
88 * increment the core count for all
89 * the other cpus in this package
90 */
91 if (i != cpu)
92 cpu_data(i).booted_cores++;
93 } else if (i != cpu && !c->booted_cores)
94 c->booted_cores = cpu_data(i).booted_cores;
95 }
96 }
97}
98
70708a18
GC
99/* maps the cpu to the sched domain representing multi-core */
100cpumask_t cpu_coregroup_map(int cpu)
101{
102 struct cpuinfo_x86 *c = &cpu_data(cpu);
103 /*
104 * For perf, we return last level cache shared map.
105 * And for power savings, we return cpu_core_map
106 */
107 if (sched_mc_power_savings || sched_smt_power_savings)
108 return per_cpu(cpu_core_map, cpu);
109 else
110 return c->llc_shared_map;
111}
112
113
68a1c3f8 114#ifdef CONFIG_HOTPLUG_CPU
768d9505
GC
115void remove_siblinginfo(int cpu)
116{
117 int sibling;
118 struct cpuinfo_x86 *c = &cpu_data(cpu);
119
120 for_each_cpu_mask(sibling, per_cpu(cpu_core_map, cpu)) {
121 cpu_clear(cpu, per_cpu(cpu_core_map, sibling));
122 /*/
123 * last thread sibling in this cpu core going down
124 */
125 if (cpus_weight(per_cpu(cpu_sibling_map, cpu)) == 1)
126 cpu_data(sibling).booted_cores--;
127 }
128
129 for_each_cpu_mask(sibling, per_cpu(cpu_sibling_map, cpu))
130 cpu_clear(cpu, per_cpu(cpu_sibling_map, sibling));
131 cpus_clear(per_cpu(cpu_sibling_map, cpu));
132 cpus_clear(per_cpu(cpu_core_map, cpu));
133 c->phys_proc_id = 0;
134 c->cpu_core_id = 0;
135 cpu_clear(cpu, cpu_sibling_setup_map);
136}
68a1c3f8
GC
137
138int additional_cpus __initdata = -1;
139
140static __init int setup_additional_cpus(char *s)
141{
142 return s && get_option(&s, &additional_cpus) ? 0 : -EINVAL;
143}
144early_param("additional_cpus", setup_additional_cpus);
145
146/*
147 * cpu_possible_map should be static, it cannot change as cpu's
148 * are onlined, or offlined. The reason is per-cpu data-structures
149 * are allocated by some modules at init time, and dont expect to
150 * do this dynamically on cpu arrival/departure.
151 * cpu_present_map on the other hand can change dynamically.
152 * In case when cpu_hotplug is not compiled, then we resort to current
153 * behaviour, which is cpu_possible == cpu_present.
154 * - Ashok Raj
155 *
156 * Three ways to find out the number of additional hotplug CPUs:
157 * - If the BIOS specified disabled CPUs in ACPI/mptables use that.
158 * - The user can overwrite it with additional_cpus=NUM
159 * - Otherwise don't reserve additional CPUs.
160 * We do this because additional CPUs waste a lot of memory.
161 * -AK
162 */
163__init void prefill_possible_map(void)
164{
165 int i;
166 int possible;
167
168 if (additional_cpus == -1) {
169 if (disabled_cpus > 0)
170 additional_cpus = disabled_cpus;
171 else
172 additional_cpus = 0;
173 }
174 possible = num_processors + additional_cpus;
175 if (possible > NR_CPUS)
176 possible = NR_CPUS;
177
178 printk(KERN_INFO "SMP: Allowing %d CPUs, %d hotplug CPUs\n",
179 possible, max_t(int, possible - num_processors, 0));
180
181 for (i = 0; i < possible; i++)
182 cpu_set(i, cpu_possible_map);
183}
184#endif
185