Commit | Line | Data |
---|---|---|
4fe29a85 GOC |
1 | #include <linux/kernel.h> |
2 | #include <linux/module.h> | |
3 | #include <linux/init.h> | |
4 | #include <linux/bootmem.h> | |
5 | #include <linux/percpu.h> | |
1ecd2765 | 6 | #include <linux/kexec.h> |
17b4cceb | 7 | #include <linux/crash_dump.h> |
8a87dd9a JSR |
8 | #include <linux/smp.h> |
9 | #include <linux/topology.h> | |
4fe29a85 GOC |
10 | #include <asm/sections.h> |
11 | #include <asm/processor.h> | |
12 | #include <asm/setup.h> | |
0fc0906e | 13 | #include <asm/mpspec.h> |
76eb4131 | 14 | #include <asm/apicdef.h> |
1ecd2765 | 15 | #include <asm/highmem.h> |
1a51e3a0 | 16 | #include <asm/proto.h> |
06879033 | 17 | #include <asm/cpumask.h> |
76eb4131 | 18 | |
c90aa894 MT |
19 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
20 | # define DBG(x...) printk(KERN_DEBUG x) | |
21 | #else | |
22 | # define DBG(x...) | |
23 | #endif | |
24 | ||
f8955ebe | 25 | #ifdef CONFIG_X86_LOCAL_APIC |
2fe60147 AS |
26 | unsigned int num_processors; |
27 | unsigned disabled_cpus __cpuinitdata; | |
28 | /* Processor that is doing the boot up */ | |
29 | unsigned int boot_cpu_physical_apicid = -1U; | |
30 | EXPORT_SYMBOL(boot_cpu_physical_apicid); | |
8a87dd9a | 31 | unsigned int max_physical_apicid; |
2fe60147 | 32 | |
0fc0906e AS |
33 | /* Bitmask of physically existing CPUs */ |
34 | physid_mask_t phys_cpu_present_map; | |
f8955ebe | 35 | #endif |
0fc0906e | 36 | |
c90aa894 MT |
37 | /* |
38 | * Map cpu index to physical APIC ID | |
39 | */ | |
23ca4bba MT |
40 | DEFINE_EARLY_PER_CPU(u16, x86_cpu_to_apicid, BAD_APICID); |
41 | DEFINE_EARLY_PER_CPU(u16, x86_bios_cpu_apicid, BAD_APICID); | |
42 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_apicid); | |
43 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_bios_cpu_apicid); | |
44 | ||
45 | #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) | |
c90aa894 | 46 | #define X86_64_NUMA 1 /* (used later) */ |
23ca4bba | 47 | |
c90aa894 MT |
48 | /* |
49 | * Map cpu index to node index | |
50 | */ | |
23ca4bba MT |
51 | DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE); |
52 | EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map); | |
9f248bde | 53 | |
c90aa894 MT |
54 | /* |
55 | * Which logical CPUs are on which nodes | |
56 | */ | |
9f248bde MT |
57 | cpumask_t *node_to_cpumask_map; |
58 | EXPORT_SYMBOL(node_to_cpumask_map); | |
59 | ||
c90aa894 MT |
60 | /* |
61 | * Setup node_to_cpumask_map | |
62 | */ | |
9f248bde MT |
63 | static void __init setup_node_to_cpumask_map(void); |
64 | ||
65 | #else | |
66 | static inline void setup_node_to_cpumask_map(void) { } | |
23ca4bba MT |
67 | #endif |
68 | ||
1a51e3a0 TH |
69 | #ifdef CONFIG_X86_64 |
70 | void __cpuinit load_pda_offset(int cpu) | |
71 | { | |
72 | /* Memory clobbers used to order pda/percpu accesses */ | |
73 | mb(); | |
74 | wrmsrl(MSR_GS_BASE, cpu_pda(cpu)); | |
75 | mb(); | |
76 | } | |
77 | ||
78 | #endif /* CONFIG_SMP && CONFIG_X86_64 */ | |
79 | ||
80 | #ifdef CONFIG_X86_64 | |
81 | ||
82 | /* correctly size the local cpu masks */ | |
83 | static void setup_cpu_local_masks(void) | |
84 | { | |
85 | alloc_bootmem_cpumask_var(&cpu_initialized_mask); | |
86 | alloc_bootmem_cpumask_var(&cpu_callin_mask); | |
87 | alloc_bootmem_cpumask_var(&cpu_callout_mask); | |
88 | alloc_bootmem_cpumask_var(&cpu_sibling_setup_mask); | |
89 | } | |
90 | ||
91 | #else /* CONFIG_X86_32 */ | |
92 | ||
93 | static inline void setup_cpu_local_masks(void) | |
94 | { | |
95 | } | |
96 | ||
97 | #endif /* CONFIG_X86_32 */ | |
98 | ||
c90aa894 | 99 | #ifdef CONFIG_HAVE_SETUP_PER_CPU_AREA |
4fe29a85 GOC |
100 | /* |
101 | * Copy data used in early init routines from the initial arrays to the | |
102 | * per cpu data areas. These arrays then become expendable and the | |
103 | * *_early_ptr's are zeroed indicating that the static arrays are gone. | |
104 | */ | |
105 | static void __init setup_per_cpu_maps(void) | |
106 | { | |
107 | int cpu; | |
108 | ||
109 | for_each_possible_cpu(cpu) { | |
23ca4bba MT |
110 | per_cpu(x86_cpu_to_apicid, cpu) = |
111 | early_per_cpu_map(x86_cpu_to_apicid, cpu); | |
b447a468 | 112 | per_cpu(x86_bios_cpu_apicid, cpu) = |
23ca4bba MT |
113 | early_per_cpu_map(x86_bios_cpu_apicid, cpu); |
114 | #ifdef X86_64_NUMA | |
b447a468 | 115 | per_cpu(x86_cpu_to_node_map, cpu) = |
23ca4bba | 116 | early_per_cpu_map(x86_cpu_to_node_map, cpu); |
4fe29a85 GOC |
117 | #endif |
118 | } | |
119 | ||
120 | /* indicate the early static arrays will soon be gone */ | |
23ca4bba MT |
121 | early_per_cpu_ptr(x86_cpu_to_apicid) = NULL; |
122 | early_per_cpu_ptr(x86_bios_cpu_apicid) = NULL; | |
123 | #ifdef X86_64_NUMA | |
124 | early_per_cpu_ptr(x86_cpu_to_node_map) = NULL; | |
4fe29a85 GOC |
125 | #endif |
126 | } | |
127 | ||
128 | #ifdef CONFIG_X86_32 | |
129 | /* | |
130 | * Great future not-so-futuristic plan: make i386 and x86_64 do it | |
131 | * the same way | |
132 | */ | |
133 | unsigned long __per_cpu_offset[NR_CPUS] __read_mostly; | |
134 | EXPORT_SYMBOL(__per_cpu_offset); | |
1a51e3a0 | 135 | #endif |
4fe29a85 GOC |
136 | |
137 | /* | |
138 | * Great future plan: | |
139 | * Declare PDA itself and support (irqstack,tss,pgd) as per cpu data. | |
140 | * Always point %gs to its beginning | |
141 | */ | |
142 | void __init setup_per_cpu_areas(void) | |
143 | { | |
d6c88a50 | 144 | ssize_t size, old_size; |
3461b0af MT |
145 | char *ptr; |
146 | int cpu; | |
1f8ff037 | 147 | unsigned long align = 1; |
4fe29a85 | 148 | |
4fe29a85 | 149 | /* Copy section for each CPU (we discard the original) */ |
1f3fcd4b | 150 | old_size = PERCPU_ENOUGH_ROOM; |
1f8ff037 | 151 | align = max_t(unsigned long, PAGE_SIZE, align); |
d6c88a50 | 152 | size = roundup(old_size, align); |
a1681965 | 153 | |
ab14398a | 154 | pr_info("NR_CPUS:%d nr_cpumask_bits:%d nr_cpu_ids:%d nr_node_ids:%d\n", |
a1681965 MT |
155 | NR_CPUS, nr_cpumask_bits, nr_cpu_ids, nr_node_ids); |
156 | ||
ab14398a | 157 | pr_info("PERCPU: Allocating %zd bytes of per cpu data\n", size); |
b447a468 | 158 | |
3461b0af | 159 | for_each_possible_cpu(cpu) { |
4fe29a85 | 160 | #ifndef CONFIG_NEED_MULTIPLE_NODES |
1f8ff037 YL |
161 | ptr = __alloc_bootmem(size, align, |
162 | __pa(MAX_DMA_ADDRESS)); | |
4fe29a85 | 163 | #else |
3461b0af | 164 | int node = early_cpu_to_node(cpu); |
b447a468 | 165 | if (!node_online(node) || !NODE_DATA(node)) { |
1f8ff037 YL |
166 | ptr = __alloc_bootmem(size, align, |
167 | __pa(MAX_DMA_ADDRESS)); | |
ab14398a | 168 | pr_info("cpu %d has no node %d or node-local memory\n", |
3461b0af | 169 | cpu, node); |
ab14398a CG |
170 | pr_debug("per cpu data for cpu%d at %016lx\n", |
171 | cpu, __pa(ptr)); | |
172 | } else { | |
1f8ff037 YL |
173 | ptr = __alloc_bootmem_node(NODE_DATA(node), size, align, |
174 | __pa(MAX_DMA_ADDRESS)); | |
ab14398a CG |
175 | pr_debug("per cpu data for cpu%d on node%d at %016lx\n", |
176 | cpu, node, __pa(ptr)); | |
a677f58a | 177 | } |
4fe29a85 | 178 | #endif |
1a51e3a0 | 179 | |
3e5d8f97 | 180 | memcpy(ptr, __per_cpu_load, __per_cpu_end - __per_cpu_start); |
1a51e3a0 TH |
181 | #ifdef CONFIG_X86_64 |
182 | cpu_pda(cpu) = (void *)ptr; | |
183 | ||
184 | /* | |
185 | * CPU0 modified pda in the init data area, reload pda | |
186 | * offset for CPU0 and clear the area for others. | |
187 | */ | |
188 | if (cpu == 0) | |
189 | load_pda_offset(0); | |
190 | else | |
191 | memset(cpu_pda(cpu), 0, sizeof(*cpu_pda(cpu))); | |
192 | #endif | |
193 | per_cpu_offset(cpu) = ptr - __per_cpu_start; | |
c90aa894 MT |
194 | |
195 | DBG("PERCPU: cpu %4d %p\n", cpu, ptr); | |
4fe29a85 GOC |
196 | } |
197 | ||
b447a468 | 198 | /* Setup percpu data maps */ |
4fe29a85 | 199 | setup_per_cpu_maps(); |
9f0e8d04 | 200 | |
9f248bde MT |
201 | /* Setup node to cpumask map */ |
202 | setup_node_to_cpumask_map(); | |
c2d1cec1 MT |
203 | |
204 | /* Setup cpu initialized, callin, callout masks */ | |
205 | setup_cpu_local_masks(); | |
4fe29a85 GOC |
206 | } |
207 | ||
208 | #endif | |
c45a707d | 209 | |
23ca4bba | 210 | #ifdef X86_64_NUMA |
9f248bde MT |
211 | |
212 | /* | |
213 | * Allocate node_to_cpumask_map based on number of available nodes | |
214 | * Requires node_possible_map to be valid. | |
215 | * | |
216 | * Note: node_to_cpumask() is not valid until after this is done. | |
c90aa894 | 217 | * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.) |
9f248bde MT |
218 | */ |
219 | static void __init setup_node_to_cpumask_map(void) | |
220 | { | |
221 | unsigned int node, num = 0; | |
222 | cpumask_t *map; | |
223 | ||
224 | /* setup nr_node_ids if not done yet */ | |
225 | if (nr_node_ids == MAX_NUMNODES) { | |
226 | for_each_node_mask(node, node_possible_map) | |
227 | num = node; | |
228 | nr_node_ids = num + 1; | |
229 | } | |
230 | ||
231 | /* allocate the map */ | |
232 | map = alloc_bootmem_low(nr_node_ids * sizeof(cpumask_t)); | |
c90aa894 | 233 | DBG("node_to_cpumask_map at %p for %d nodes\n", map, nr_node_ids); |
9f248bde | 234 | |
55410791 | 235 | pr_debug("Node to cpumask map at %p for %d nodes\n", |
cfc1b9a6 | 236 | map, nr_node_ids); |
9f248bde MT |
237 | |
238 | /* node_to_cpumask() will now work */ | |
239 | node_to_cpumask_map = map; | |
240 | } | |
241 | ||
23ca4bba MT |
242 | void __cpuinit numa_set_node(int cpu, int node) |
243 | { | |
244 | int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map); | |
245 | ||
c90aa894 MT |
246 | /* early setting, no percpu area yet */ |
247 | if (cpu_to_node_map) { | |
23ca4bba | 248 | cpu_to_node_map[cpu] = node; |
c90aa894 MT |
249 | return; |
250 | } | |
23ca4bba | 251 | |
c90aa894 MT |
252 | #ifdef CONFIG_DEBUG_PER_CPU_MAPS |
253 | if (cpu >= nr_cpu_ids || !per_cpu_offset(cpu)) { | |
254 | printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu); | |
255 | dump_stack(); | |
256 | return; | |
257 | } | |
258 | #endif | |
259 | per_cpu(x86_cpu_to_node_map, cpu) = node; | |
23ca4bba | 260 | |
c90aa894 MT |
261 | if (node != NUMA_NO_NODE) |
262 | cpu_pda(cpu)->nodenumber = node; | |
23ca4bba MT |
263 | } |
264 | ||
265 | void __cpuinit numa_clear_node(int cpu) | |
266 | { | |
267 | numa_set_node(cpu, NUMA_NO_NODE); | |
268 | } | |
269 | ||
9f248bde MT |
270 | #ifndef CONFIG_DEBUG_PER_CPU_MAPS |
271 | ||
23ca4bba MT |
272 | void __cpuinit numa_add_cpu(int cpu) |
273 | { | |
274 | cpu_set(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); | |
275 | } | |
276 | ||
277 | void __cpuinit numa_remove_cpu(int cpu) | |
278 | { | |
c90aa894 | 279 | cpu_clear(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]); |
23ca4bba | 280 | } |
23ca4bba | 281 | |
9f248bde MT |
282 | #else /* CONFIG_DEBUG_PER_CPU_MAPS */ |
283 | ||
284 | /* | |
285 | * --------- debug versions of the numa functions --------- | |
286 | */ | |
287 | static void __cpuinit numa_set_cpumask(int cpu, int enable) | |
288 | { | |
c90aa894 | 289 | int node = early_cpu_to_node(cpu); |
9f248bde MT |
290 | cpumask_t *mask; |
291 | char buf[64]; | |
292 | ||
293 | if (node_to_cpumask_map == NULL) { | |
294 | printk(KERN_ERR "node_to_cpumask_map NULL\n"); | |
295 | dump_stack(); | |
296 | return; | |
297 | } | |
298 | ||
299 | mask = &node_to_cpumask_map[node]; | |
300 | if (enable) | |
301 | cpu_set(cpu, *mask); | |
302 | else | |
303 | cpu_clear(cpu, *mask); | |
304 | ||
29c0177e | 305 | cpulist_scnprintf(buf, sizeof(buf), mask); |
9f248bde | 306 | printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n", |
8a87dd9a JSR |
307 | enable ? "numa_add_cpu" : "numa_remove_cpu", cpu, node, buf); |
308 | } | |
9f248bde MT |
309 | |
310 | void __cpuinit numa_add_cpu(int cpu) | |
311 | { | |
312 | numa_set_cpumask(cpu, 1); | |
313 | } | |
314 | ||
315 | void __cpuinit numa_remove_cpu(int cpu) | |
316 | { | |
317 | numa_set_cpumask(cpu, 0); | |
318 | } | |
23ca4bba MT |
319 | |
320 | int cpu_to_node(int cpu) | |
321 | { | |
322 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) { | |
323 | printk(KERN_WARNING | |
324 | "cpu_to_node(%d): usage too early!\n", cpu); | |
325 | dump_stack(); | |
326 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
327 | } | |
328 | return per_cpu(x86_cpu_to_node_map, cpu); | |
329 | } | |
330 | EXPORT_SYMBOL(cpu_to_node); | |
331 | ||
9f248bde MT |
332 | /* |
333 | * Same function as cpu_to_node() but used if called before the | |
334 | * per_cpu areas are setup. | |
335 | */ | |
23ca4bba MT |
336 | int early_cpu_to_node(int cpu) |
337 | { | |
338 | if (early_per_cpu_ptr(x86_cpu_to_node_map)) | |
339 | return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu]; | |
340 | ||
341 | if (!per_cpu_offset(cpu)) { | |
342 | printk(KERN_WARNING | |
343 | "early_cpu_to_node(%d): no per_cpu area!\n", cpu); | |
9f248bde | 344 | dump_stack(); |
23ca4bba MT |
345 | return NUMA_NO_NODE; |
346 | } | |
347 | return per_cpu(x86_cpu_to_node_map, cpu); | |
348 | } | |
9f248bde | 349 | |
6a2f47ca MT |
350 | |
351 | /* empty cpumask */ | |
352 | static const cpumask_t cpu_mask_none; | |
353 | ||
9f248bde MT |
354 | /* |
355 | * Returns a pointer to the bitmask of CPUs on Node 'node'. | |
356 | */ | |
393d68fb | 357 | const cpumask_t *cpumask_of_node(int node) |
9f248bde MT |
358 | { |
359 | if (node_to_cpumask_map == NULL) { | |
360 | printk(KERN_WARNING | |
393d68fb | 361 | "cpumask_of_node(%d): no node_to_cpumask_map!\n", |
9f248bde MT |
362 | node); |
363 | dump_stack(); | |
11369f35 | 364 | return (const cpumask_t *)&cpu_online_map; |
9f248bde | 365 | } |
6a2f47ca MT |
366 | if (node >= nr_node_ids) { |
367 | printk(KERN_WARNING | |
393d68fb | 368 | "cpumask_of_node(%d): node > nr_node_ids(%d)\n", |
6a2f47ca MT |
369 | node, nr_node_ids); |
370 | dump_stack(); | |
11369f35 | 371 | return &cpu_mask_none; |
6a2f47ca | 372 | } |
11369f35 | 373 | return &node_to_cpumask_map[node]; |
9f248bde | 374 | } |
393d68fb | 375 | EXPORT_SYMBOL(cpumask_of_node); |
9f248bde MT |
376 | |
377 | /* | |
378 | * Returns a bitmask of CPUs on Node 'node'. | |
6a2f47ca MT |
379 | * |
380 | * Side note: this function creates the returned cpumask on the stack | |
381 | * so with a high NR_CPUS count, excessive stack space is used. The | |
382 | * node_to_cpumask_ptr function should be used whenever possible. | |
9f248bde MT |
383 | */ |
384 | cpumask_t node_to_cpumask(int node) | |
385 | { | |
386 | if (node_to_cpumask_map == NULL) { | |
387 | printk(KERN_WARNING | |
388 | "node_to_cpumask(%d): no node_to_cpumask_map!\n", node); | |
389 | dump_stack(); | |
390 | return cpu_online_map; | |
391 | } | |
6a2f47ca MT |
392 | if (node >= nr_node_ids) { |
393 | printk(KERN_WARNING | |
394 | "node_to_cpumask(%d): node > nr_node_ids(%d)\n", | |
395 | node, nr_node_ids); | |
396 | dump_stack(); | |
397 | return cpu_mask_none; | |
398 | } | |
9f248bde MT |
399 | return node_to_cpumask_map[node]; |
400 | } | |
401 | EXPORT_SYMBOL(node_to_cpumask); | |
402 | ||
403 | /* | |
404 | * --------- end of debug versions of the numa functions --------- | |
405 | */ | |
406 | ||
407 | #endif /* CONFIG_DEBUG_PER_CPU_MAPS */ | |
408 | ||
409 | #endif /* X86_64_NUMA */ | |
1ecd2765 | 410 |