x86-32, NUMA: Add @start and @end to init_alloc_remap()
[linux-block.git] / arch / x86 / mm / numa.c
CommitLineData
71ee73e7 1/* Common code for 32 and 64-bit NUMA */
a4106eae
TH
2#include <linux/kernel.h>
3#include <linux/mm.h>
4#include <linux/string.h>
5#include <linux/init.h>
71ee73e7 6#include <linux/bootmem.h>
a4106eae
TH
7#include <linux/memblock.h>
8#include <linux/mmzone.h>
9#include <linux/ctype.h>
10#include <linux/module.h>
11#include <linux/nodemask.h>
12#include <linux/sched.h>
13#include <linux/topology.h>
14
15#include <asm/e820.h>
16#include <asm/proto.h>
17#include <asm/dma.h>
90321602 18#include <asm/acpi.h>
a4106eae
TH
19#include <asm/amd_nb.h>
20
21#include "numa_internal.h"
90321602
JB
22
23int __initdata numa_off;
e6df595b 24nodemask_t numa_nodes_parsed __initdata;
90321602 25
a4106eae
TH
26struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
27EXPORT_SYMBOL(node_data);
28
29static struct numa_meminfo numa_meminfo
30#ifndef CONFIG_MEMORY_HOTPLUG
31__initdata
32#endif
33;
34
35static int numa_distance_cnt;
36static u8 *numa_distance;
a4106eae 37
90321602
JB
38static __init int numa_setup(char *opt)
39{
40 if (!opt)
41 return -EINVAL;
42 if (!strncmp(opt, "off", 3))
43 numa_off = 1;
44#ifdef CONFIG_NUMA_EMU
45 if (!strncmp(opt, "fake=", 5))
46 numa_emu_cmdline(opt + 5);
47#endif
48#ifdef CONFIG_ACPI_NUMA
49 if (!strncmp(opt, "noacpi", 6))
50 acpi_numa = -1;
51#endif
52 return 0;
53}
54early_param("numa", numa_setup);
71ee73e7 55
71ee73e7 56/*
bbc9e2f4 57 * apicid, cpu, node mappings
71ee73e7 58 */
bbc9e2f4
TH
59s16 __apicid_to_node[MAX_LOCAL_APIC] __cpuinitdata = {
60 [0 ... MAX_LOCAL_APIC-1] = NUMA_NO_NODE
61};
62
6bd26273
TH
63int __cpuinit numa_cpu_node(int cpu)
64{
65 int apicid = early_per_cpu(x86_cpu_to_apicid, cpu);
66
67 if (apicid != BAD_APICID)
68 return __apicid_to_node[apicid];
69 return NUMA_NO_NODE;
70}
71
c032ef60 72cpumask_var_t node_to_cpumask_map[MAX_NUMNODES];
71ee73e7
RR
73EXPORT_SYMBOL(node_to_cpumask_map);
74
645a7919
TH
75/*
76 * Map cpu index to node index
77 */
645a7919 78DEFINE_EARLY_PER_CPU(int, x86_cpu_to_node_map, NUMA_NO_NODE);
645a7919
TH
79EXPORT_EARLY_PER_CPU_SYMBOL(x86_cpu_to_node_map);
80
81void __cpuinit numa_set_node(int cpu, int node)
82{
83 int *cpu_to_node_map = early_per_cpu_ptr(x86_cpu_to_node_map);
84
85 /* early setting, no percpu area yet */
86 if (cpu_to_node_map) {
87 cpu_to_node_map[cpu] = node;
88 return;
89 }
90
91#ifdef CONFIG_DEBUG_PER_CPU_MAPS
92 if (cpu >= nr_cpu_ids || !cpu_possible(cpu)) {
93 printk(KERN_ERR "numa_set_node: invalid cpu# (%d)\n", cpu);
94 dump_stack();
95 return;
96 }
97#endif
98 per_cpu(x86_cpu_to_node_map, cpu) = node;
99
100 if (node != NUMA_NO_NODE)
101 set_cpu_numa_node(cpu, node);
102}
103
104void __cpuinit numa_clear_node(int cpu)
105{
106 numa_set_node(cpu, NUMA_NO_NODE);
107}
108
71ee73e7
RR
109/*
110 * Allocate node_to_cpumask_map based on number of available nodes
111 * Requires node_possible_map to be valid.
112 *
113 * Note: node_to_cpumask() is not valid until after this is done.
114 * (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
115 */
116void __init setup_node_to_cpumask_map(void)
117{
118 unsigned int node, num = 0;
71ee73e7
RR
119
120 /* setup nr_node_ids if not done yet */
121 if (nr_node_ids == MAX_NUMNODES) {
122 for_each_node_mask(node, node_possible_map)
123 num = node;
124 nr_node_ids = num + 1;
125 }
126
127 /* allocate the map */
c032ef60
RR
128 for (node = 0; node < nr_node_ids; node++)
129 alloc_bootmem_cpumask_var(&node_to_cpumask_map[node]);
71ee73e7 130
c032ef60
RR
131 /* cpumask_of_node() will now work */
132 pr_debug("Node to cpumask map for %d nodes\n", nr_node_ids);
71ee73e7
RR
133}
134
a4106eae
TH
135static int __init numa_add_memblk_to(int nid, u64 start, u64 end,
136 struct numa_meminfo *mi)
137{
138 /* ignore zero length blks */
139 if (start == end)
140 return 0;
141
142 /* whine about and ignore invalid blks */
143 if (start > end || nid < 0 || nid >= MAX_NUMNODES) {
144 pr_warning("NUMA: Warning: invalid memblk node %d (%Lx-%Lx)\n",
145 nid, start, end);
146 return 0;
147 }
148
149 if (mi->nr_blks >= NR_NODE_MEMBLKS) {
150 pr_err("NUMA: too many memblk ranges\n");
151 return -EINVAL;
152 }
153
154 mi->blk[mi->nr_blks].start = start;
155 mi->blk[mi->nr_blks].end = end;
156 mi->blk[mi->nr_blks].nid = nid;
157 mi->nr_blks++;
158 return 0;
159}
160
161/**
162 * numa_remove_memblk_from - Remove one numa_memblk from a numa_meminfo
163 * @idx: Index of memblk to remove
164 * @mi: numa_meminfo to remove memblk from
165 *
166 * Remove @idx'th numa_memblk from @mi by shifting @mi->blk[] and
167 * decrementing @mi->nr_blks.
168 */
169void __init numa_remove_memblk_from(int idx, struct numa_meminfo *mi)
170{
171 mi->nr_blks--;
172 memmove(&mi->blk[idx], &mi->blk[idx + 1],
173 (mi->nr_blks - idx) * sizeof(mi->blk[0]));
174}
175
744baba0 176#ifdef CONFIG_X86_64
a4106eae
TH
177/**
178 * numa_add_memblk - Add one numa_memblk to numa_meminfo
179 * @nid: NUMA node ID of the new memblk
180 * @start: Start address of the new memblk
181 * @end: End address of the new memblk
182 *
183 * Add a new memblk to the default numa_meminfo.
184 *
185 * RETURNS:
186 * 0 on success, -errno on failure.
187 */
188int __init numa_add_memblk(int nid, u64 start, u64 end)
189{
190 return numa_add_memblk_to(nid, start, end, &numa_meminfo);
191}
744baba0 192#endif
a4106eae
TH
193
194/* Initialize bootmem allocator for a node */
38f3e1ca 195static void __init setup_node_bootmem(int nid, u64 start, u64 end)
a4106eae 196{
38f3e1ca
TH
197 const u64 nd_low = PFN_PHYS(MAX_DMA_PFN);
198 const u64 nd_high = PFN_PHYS(max_pfn_mapped);
a4106eae 199 const size_t nd_size = roundup(sizeof(pg_data_t), PAGE_SIZE);
38f3e1ca 200 u64 nd_pa;
a4106eae
TH
201 int tnid;
202
203 /*
204 * Don't confuse VM with a node that doesn't have the
205 * minimum amount of memory:
206 */
207 if (end && (end - start) < NODE_MIN_SIZE)
208 return;
209
210 start = roundup(start, ZONE_ALIGN);
211
38f3e1ca 212 printk(KERN_INFO "Initmem setup node %d %016Lx-%016Lx\n",
a4106eae
TH
213 nid, start, end);
214
215 /*
216 * Try to allocate node data on local node and then fall back to
217 * all nodes. Never allocate in DMA zone.
218 */
219 nd_pa = memblock_x86_find_in_range_node(nid, nd_low, nd_high,
220 nd_size, SMP_CACHE_BYTES);
221 if (nd_pa == MEMBLOCK_ERROR)
222 nd_pa = memblock_find_in_range(nd_low, nd_high,
223 nd_size, SMP_CACHE_BYTES);
224 if (nd_pa == MEMBLOCK_ERROR) {
38f3e1ca 225 pr_err("Cannot find %zu bytes in node %d\n", nd_size, nid);
a4106eae
TH
226 return;
227 }
228 memblock_x86_reserve_range(nd_pa, nd_pa + nd_size, "NODE_DATA");
229
230 /* report and initialize */
38f3e1ca 231 printk(KERN_INFO " NODE_DATA [%016Lx - %016Lx]\n",
a4106eae
TH
232 nd_pa, nd_pa + nd_size - 1);
233 tnid = early_pfn_to_nid(nd_pa >> PAGE_SHIFT);
234 if (tnid != nid)
235 printk(KERN_INFO " NODE_DATA(%d) on node %d\n", nid, tnid);
236
237 node_data[nid] = __va(nd_pa);
238 memset(NODE_DATA(nid), 0, sizeof(pg_data_t));
239 NODE_DATA(nid)->node_id = nid;
240 NODE_DATA(nid)->node_start_pfn = start >> PAGE_SHIFT;
241 NODE_DATA(nid)->node_spanned_pages = (end - start) >> PAGE_SHIFT;
242
243 node_set_online(nid);
244}
245
246/**
247 * numa_cleanup_meminfo - Cleanup a numa_meminfo
248 * @mi: numa_meminfo to clean up
249 *
250 * Sanitize @mi by merging and removing unncessary memblks. Also check for
251 * conflicts and clear unused memblks.
252 *
253 * RETURNS:
254 * 0 on success, -errno on failure.
255 */
256int __init numa_cleanup_meminfo(struct numa_meminfo *mi)
257{
258 const u64 low = 0;
38f3e1ca 259 const u64 high = PFN_PHYS(max_pfn);
a4106eae
TH
260 int i, j, k;
261
262 for (i = 0; i < mi->nr_blks; i++) {
263 struct numa_memblk *bi = &mi->blk[i];
264
265 /* make sure all blocks are inside the limits */
266 bi->start = max(bi->start, low);
267 bi->end = min(bi->end, high);
268
269 /* and there's no empty block */
270 if (bi->start >= bi->end) {
271 numa_remove_memblk_from(i--, mi);
272 continue;
273 }
274
275 for (j = i + 1; j < mi->nr_blks; j++) {
276 struct numa_memblk *bj = &mi->blk[j];
38f3e1ca 277 u64 start, end;
a4106eae
TH
278
279 /*
280 * See whether there are overlapping blocks. Whine
281 * about but allow overlaps of the same nid. They
282 * will be merged below.
283 */
284 if (bi->end > bj->start && bi->start < bj->end) {
285 if (bi->nid != bj->nid) {
286 pr_err("NUMA: node %d (%Lx-%Lx) overlaps with node %d (%Lx-%Lx)\n",
287 bi->nid, bi->start, bi->end,
288 bj->nid, bj->start, bj->end);
289 return -EINVAL;
290 }
291 pr_warning("NUMA: Warning: node %d (%Lx-%Lx) overlaps with itself (%Lx-%Lx)\n",
292 bi->nid, bi->start, bi->end,
293 bj->start, bj->end);
294 }
295
296 /*
297 * Join together blocks on the same node, holes
298 * between which don't overlap with memory on other
299 * nodes.
300 */
301 if (bi->nid != bj->nid)
302 continue;
303 start = max(min(bi->start, bj->start), low);
304 end = min(max(bi->end, bj->end), high);
305 for (k = 0; k < mi->nr_blks; k++) {
306 struct numa_memblk *bk = &mi->blk[k];
307
308 if (bi->nid == bk->nid)
309 continue;
310 if (start < bk->end && end > bk->start)
311 break;
312 }
313 if (k < mi->nr_blks)
314 continue;
38f3e1ca 315 printk(KERN_INFO "NUMA: Node %d [%Lx,%Lx) + [%Lx,%Lx) -> [%Lx,%Lx)\n",
a4106eae
TH
316 bi->nid, bi->start, bi->end, bj->start, bj->end,
317 start, end);
318 bi->start = start;
319 bi->end = end;
320 numa_remove_memblk_from(j--, mi);
321 }
322 }
323
324 for (i = mi->nr_blks; i < ARRAY_SIZE(mi->blk); i++) {
325 mi->blk[i].start = mi->blk[i].end = 0;
326 mi->blk[i].nid = NUMA_NO_NODE;
327 }
328
329 return 0;
330}
331
332/*
333 * Set nodes, which have memory in @mi, in *@nodemask.
334 */
335static void __init numa_nodemask_from_meminfo(nodemask_t *nodemask,
336 const struct numa_meminfo *mi)
337{
338 int i;
339
340 for (i = 0; i < ARRAY_SIZE(mi->blk); i++)
341 if (mi->blk[i].start != mi->blk[i].end &&
342 mi->blk[i].nid != NUMA_NO_NODE)
343 node_set(mi->blk[i].nid, *nodemask);
344}
345
346/**
347 * numa_reset_distance - Reset NUMA distance table
348 *
349 * The current table is freed. The next numa_set_distance() call will
350 * create a new one.
351 */
352void __init numa_reset_distance(void)
353{
354 size_t size = numa_distance_cnt * numa_distance_cnt * sizeof(numa_distance[0]);
355
356 /* numa_distance could be 1LU marking allocation failure, test cnt */
357 if (numa_distance_cnt)
358 memblock_x86_free_range(__pa(numa_distance),
359 __pa(numa_distance) + size);
360 numa_distance_cnt = 0;
361 numa_distance = NULL; /* enable table creation */
362}
363
364static int __init numa_alloc_distance(void)
365{
366 nodemask_t nodes_parsed;
367 size_t size;
368 int i, j, cnt = 0;
369 u64 phys;
370
371 /* size the new table and allocate it */
372 nodes_parsed = numa_nodes_parsed;
373 numa_nodemask_from_meminfo(&nodes_parsed, &numa_meminfo);
374
375 for_each_node_mask(i, nodes_parsed)
376 cnt = i;
377 cnt++;
378 size = cnt * cnt * sizeof(numa_distance[0]);
379
38f3e1ca 380 phys = memblock_find_in_range(0, PFN_PHYS(max_pfn_mapped),
a4106eae
TH
381 size, PAGE_SIZE);
382 if (phys == MEMBLOCK_ERROR) {
383 pr_warning("NUMA: Warning: can't allocate distance table!\n");
384 /* don't retry until explicitly reset */
385 numa_distance = (void *)1LU;
386 return -ENOMEM;
387 }
388 memblock_x86_reserve_range(phys, phys + size, "NUMA DIST");
389
390 numa_distance = __va(phys);
391 numa_distance_cnt = cnt;
392
393 /* fill with the default distances */
394 for (i = 0; i < cnt; i++)
395 for (j = 0; j < cnt; j++)
396 numa_distance[i * cnt + j] = i == j ?
397 LOCAL_DISTANCE : REMOTE_DISTANCE;
398 printk(KERN_DEBUG "NUMA: Initialized distance table, cnt=%d\n", cnt);
399
400 return 0;
401}
402
744baba0 403#ifdef CONFIG_X86_64
a4106eae
TH
404/**
405 * numa_set_distance - Set NUMA distance from one NUMA to another
406 * @from: the 'from' node to set distance
407 * @to: the 'to' node to set distance
408 * @distance: NUMA distance
409 *
410 * Set the distance from node @from to @to to @distance. If distance table
411 * doesn't exist, one which is large enough to accommodate all the currently
412 * known nodes will be created.
413 *
414 * If such table cannot be allocated, a warning is printed and further
415 * calls are ignored until the distance table is reset with
416 * numa_reset_distance().
417 *
418 * If @from or @to is higher than the highest known node at the time of
419 * table creation or @distance doesn't make sense, the call is ignored.
420 * This is to allow simplification of specific NUMA config implementations.
421 */
422void __init numa_set_distance(int from, int to, int distance)
423{
424 if (!numa_distance && numa_alloc_distance() < 0)
425 return;
426
427 if (from >= numa_distance_cnt || to >= numa_distance_cnt) {
428 printk_once(KERN_DEBUG "NUMA: Debug: distance out of bound, from=%d to=%d distance=%d\n",
429 from, to, distance);
430 return;
431 }
432
433 if ((u8)distance != distance ||
434 (from == to && distance != LOCAL_DISTANCE)) {
435 pr_warn_once("NUMA: Warning: invalid distance parameter, from=%d to=%d distance=%d\n",
436 from, to, distance);
437 return;
438 }
439
440 numa_distance[from * numa_distance_cnt + to] = distance;
441}
744baba0 442#endif
a4106eae
TH
443
444int __node_distance(int from, int to)
445{
446 if (from >= numa_distance_cnt || to >= numa_distance_cnt)
447 return from == to ? LOCAL_DISTANCE : REMOTE_DISTANCE;
448 return numa_distance[from * numa_distance_cnt + to];
449}
450EXPORT_SYMBOL(__node_distance);
451
452/*
453 * Sanity check to catch more bad NUMA configurations (they are amazingly
454 * common). Make sure the nodes cover all memory.
455 */
456static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
457{
38f3e1ca 458 u64 numaram, e820ram;
a4106eae
TH
459 int i;
460
461 numaram = 0;
462 for (i = 0; i < mi->nr_blks; i++) {
38f3e1ca
TH
463 u64 s = mi->blk[i].start >> PAGE_SHIFT;
464 u64 e = mi->blk[i].end >> PAGE_SHIFT;
a4106eae
TH
465 numaram += e - s;
466 numaram -= __absent_pages_in_range(mi->blk[i].nid, s, e);
38f3e1ca 467 if ((s64)numaram < 0)
a4106eae
TH
468 numaram = 0;
469 }
470
471 e820ram = max_pfn - (memblock_x86_hole_size(0,
38f3e1ca 472 PFN_PHYS(max_pfn)) >> PAGE_SHIFT);
a4106eae 473 /* We seem to lose 3 pages somewhere. Allow 1M of slack. */
38f3e1ca
TH
474 if ((s64)(e820ram - numaram) >= (1 << (20 - PAGE_SHIFT))) {
475 printk(KERN_ERR "NUMA: nodes only cover %LuMB of your %LuMB e820 RAM. Not used.\n",
a4106eae
TH
476 (numaram << PAGE_SHIFT) >> 20,
477 (e820ram << PAGE_SHIFT) >> 20);
478 return false;
479 }
480 return true;
481}
482
483static int __init numa_register_memblks(struct numa_meminfo *mi)
484{
485 int i, nid;
486
487 /* Account for nodes with cpus and no memory */
488 node_possible_map = numa_nodes_parsed;
489 numa_nodemask_from_meminfo(&node_possible_map, mi);
490 if (WARN_ON(nodes_empty(node_possible_map)))
491 return -EINVAL;
492
493 for (i = 0; i < mi->nr_blks; i++)
494 memblock_x86_register_active_regions(mi->blk[i].nid,
495 mi->blk[i].start >> PAGE_SHIFT,
496 mi->blk[i].end >> PAGE_SHIFT);
497
498 /* for out of order entries */
499 sort_node_map();
500 if (!numa_meminfo_cover_memory(mi))
501 return -EINVAL;
502
503 /* Finally register nodes. */
504 for_each_node_mask(nid, node_possible_map) {
38f3e1ca 505 u64 start = PFN_PHYS(max_pfn);
a4106eae
TH
506 u64 end = 0;
507
508 for (i = 0; i < mi->nr_blks; i++) {
509 if (nid != mi->blk[i].nid)
510 continue;
511 start = min(mi->blk[i].start, start);
512 end = max(mi->blk[i].end, end);
513 }
514
515 if (start < end)
516 setup_node_bootmem(nid, start, end);
517 }
518
519 return 0;
520}
a4106eae 521
8db78cc4
TH
522/*
523 * There are unfortunately some poorly designed mainboards around that
524 * only connect memory to a single CPU. This breaks the 1:1 cpu->node
525 * mapping. To avoid this fill in the mapping for all possible CPUs,
526 * as the number of CPUs is not known yet. We round robin the existing
527 * nodes.
528 */
529void __init numa_init_array(void)
530{
531 int rr, i;
532
533 rr = first_node(node_online_map);
534 for (i = 0; i < nr_cpu_ids; i++) {
535 if (early_cpu_to_node(i) != NUMA_NO_NODE)
536 continue;
537 numa_set_node(i, rr);
538 rr = next_node(rr, node_online_map);
539 if (rr == MAX_NUMNODES)
540 rr = first_node(node_online_map);
541 }
542}
543
a4106eae
TH
544static int __init numa_init(int (*init_func)(void))
545{
546 int i;
547 int ret;
548
549 for (i = 0; i < MAX_LOCAL_APIC; i++)
550 set_apicid_to_node(i, NUMA_NO_NODE);
551
552 nodes_clear(numa_nodes_parsed);
553 nodes_clear(node_possible_map);
554 nodes_clear(node_online_map);
555 memset(&numa_meminfo, 0, sizeof(numa_meminfo));
556 remove_all_active_ranges();
557 numa_reset_distance();
558
559 ret = init_func();
560 if (ret < 0)
561 return ret;
562 ret = numa_cleanup_meminfo(&numa_meminfo);
563 if (ret < 0)
564 return ret;
565
566 numa_emulation(&numa_meminfo, numa_distance_cnt);
567
568 ret = numa_register_memblks(&numa_meminfo);
569 if (ret < 0)
570 return ret;
571
572 for (i = 0; i < nr_cpu_ids; i++) {
573 int nid = early_cpu_to_node(i);
574
575 if (nid == NUMA_NO_NODE)
576 continue;
577 if (!node_online(nid))
578 numa_clear_node(i);
579 }
580 numa_init_array();
581 return 0;
582}
583
584/**
585 * dummy_numa_init - Fallback dummy NUMA init
586 *
587 * Used if there's no underlying NUMA architecture, NUMA initialization
588 * fails, or NUMA is disabled on the command line.
589 *
590 * Must online at least one node and add memory blocks that cover all
591 * allowed memory. This function must not fail.
592 */
593static int __init dummy_numa_init(void)
594{
595 printk(KERN_INFO "%s\n",
596 numa_off ? "NUMA turned off" : "No NUMA configuration found");
38f3e1ca
TH
597 printk(KERN_INFO "Faking a node at %016Lx-%016Lx\n",
598 0LLU, PFN_PHYS(max_pfn));
a4106eae
TH
599
600 node_set(0, numa_nodes_parsed);
38f3e1ca 601 numa_add_memblk(0, 0, PFN_PHYS(max_pfn));
a4106eae
TH
602
603 return 0;
604}
605
606/**
607 * x86_numa_init - Initialize NUMA
608 *
609 * Try each configured NUMA initialization method until one succeeds. The
610 * last fallback is dummy single node config encomapssing whole memory and
611 * never fails.
612 */
613void __init x86_numa_init(void)
614{
615 if (!numa_off) {
616#ifdef CONFIG_ACPI_NUMA
617 if (!numa_init(x86_acpi_numa_init))
618 return;
619#endif
620#ifdef CONFIG_AMD_NUMA
621 if (!numa_init(amd_numa_init))
622 return;
623#endif
624 }
625
626 numa_init(dummy_numa_init);
627}
a4106eae 628
8db78cc4
TH
629static __init int find_near_online_node(int node)
630{
631 int n, val;
632 int min_val = INT_MAX;
633 int best_node = -1;
634
635 for_each_online_node(n) {
636 val = node_distance(node, n);
637
638 if (val < min_val) {
639 min_val = val;
640 best_node = n;
641 }
642 }
643
644 return best_node;
645}
646
647/*
648 * Setup early cpu_to_node.
649 *
650 * Populate cpu_to_node[] only if x86_cpu_to_apicid[],
651 * and apicid_to_node[] tables have valid entries for a CPU.
652 * This means we skip cpu_to_node[] initialisation for NUMA
653 * emulation and faking node case (when running a kernel compiled
654 * for NUMA on a non NUMA box), which is OK as cpu_to_node[]
655 * is already initialized in a round robin manner at numa_init_array,
656 * prior to this call, and this initialization is good enough
657 * for the fake NUMA cases.
658 *
659 * Called before the per_cpu areas are setup.
660 */
661void __init init_cpu_to_node(void)
662{
663 int cpu;
664 u16 *cpu_to_apicid = early_per_cpu_ptr(x86_cpu_to_apicid);
665
666 BUG_ON(cpu_to_apicid == NULL);
667
668 for_each_possible_cpu(cpu) {
669 int node = numa_cpu_node(cpu);
670
671 if (node == NUMA_NO_NODE)
672 continue;
673 if (!node_online(node))
674 node = find_near_online_node(node);
675 numa_set_node(cpu, node);
676 }
677}
678
de2d9445
TH
679#ifndef CONFIG_DEBUG_PER_CPU_MAPS
680
681# ifndef CONFIG_NUMA_EMU
682void __cpuinit numa_add_cpu(int cpu)
683{
684 cpumask_set_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
685}
686
687void __cpuinit numa_remove_cpu(int cpu)
688{
689 cpumask_clear_cpu(cpu, node_to_cpumask_map[early_cpu_to_node(cpu)]);
690}
691# endif /* !CONFIG_NUMA_EMU */
692
693#else /* !CONFIG_DEBUG_PER_CPU_MAPS */
645a7919
TH
694
695int __cpu_to_node(int cpu)
696{
697 if (early_per_cpu_ptr(x86_cpu_to_node_map)) {
698 printk(KERN_WARNING
699 "cpu_to_node(%d): usage too early!\n", cpu);
700 dump_stack();
701 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
702 }
703 return per_cpu(x86_cpu_to_node_map, cpu);
704}
705EXPORT_SYMBOL(__cpu_to_node);
706
707/*
708 * Same function as cpu_to_node() but used if called before the
709 * per_cpu areas are setup.
710 */
711int early_cpu_to_node(int cpu)
712{
713 if (early_per_cpu_ptr(x86_cpu_to_node_map))
714 return early_per_cpu_ptr(x86_cpu_to_node_map)[cpu];
715
716 if (!cpu_possible(cpu)) {
717 printk(KERN_WARNING
718 "early_cpu_to_node(%d): no per_cpu area!\n", cpu);
719 dump_stack();
720 return NUMA_NO_NODE;
721 }
722 return per_cpu(x86_cpu_to_node_map, cpu);
723}
724
7a6c6547 725void debug_cpumask_set_cpu(int cpu, int node, bool enable)
de2d9445 726{
de2d9445
TH
727 struct cpumask *mask;
728 char buf[64];
729
14392fd3
DR
730 if (node == NUMA_NO_NODE) {
731 /* early_cpu_to_node() already emits a warning and trace */
7a6c6547 732 return;
14392fd3 733 }
de2d9445
TH
734 mask = node_to_cpumask_map[node];
735 if (!mask) {
736 pr_err("node_to_cpumask_map[%i] NULL\n", node);
737 dump_stack();
7a6c6547 738 return;
de2d9445
TH
739 }
740
7a6c6547
DR
741 if (enable)
742 cpumask_set_cpu(cpu, mask);
743 else
744 cpumask_clear_cpu(cpu, mask);
745
de2d9445
TH
746 cpulist_scnprintf(buf, sizeof(buf), mask);
747 printk(KERN_DEBUG "%s cpu %d node %d: mask now %s\n",
748 enable ? "numa_add_cpu" : "numa_remove_cpu",
749 cpu, node, buf);
7a6c6547 750 return;
de2d9445
TH
751}
752
753# ifndef CONFIG_NUMA_EMU
7a6c6547 754static void __cpuinit numa_set_cpumask(int cpu, bool enable)
de2d9445 755{
7a6c6547 756 debug_cpumask_set_cpu(cpu, early_cpu_to_node(cpu), enable);
de2d9445
TH
757}
758
759void __cpuinit numa_add_cpu(int cpu)
760{
7a6c6547 761 numa_set_cpumask(cpu, true);
de2d9445
TH
762}
763
764void __cpuinit numa_remove_cpu(int cpu)
765{
7a6c6547 766 numa_set_cpumask(cpu, false);
de2d9445
TH
767}
768# endif /* !CONFIG_NUMA_EMU */
769
71ee73e7
RR
770/*
771 * Returns a pointer to the bitmask of CPUs on Node 'node'.
772 */
73e907de 773const struct cpumask *cpumask_of_node(int node)
71ee73e7 774{
71ee73e7
RR
775 if (node >= nr_node_ids) {
776 printk(KERN_WARNING
777 "cpumask_of_node(%d): node > nr_node_ids(%d)\n",
778 node, nr_node_ids);
779 dump_stack();
780 return cpu_none_mask;
781 }
c032ef60
RR
782 if (node_to_cpumask_map[node] == NULL) {
783 printk(KERN_WARNING
784 "cpumask_of_node(%d): no node_to_cpumask_map!\n",
785 node);
786 dump_stack();
787 return cpu_online_mask;
788 }
0b966252 789 return node_to_cpumask_map[node];
71ee73e7
RR
790}
791EXPORT_SYMBOL(cpumask_of_node);
645a7919 792
de2d9445 793#endif /* !CONFIG_DEBUG_PER_CPU_MAPS */
a4106eae
TH
794
795#if defined(CONFIG_X86_64) && defined(CONFIG_MEMORY_HOTPLUG)
796int memory_add_physaddr_to_nid(u64 start)
797{
798 struct numa_meminfo *mi = &numa_meminfo;
799 int nid = mi->blk[0].nid;
800 int i;
801
802 for (i = 0; i < mi->nr_blks; i++)
803 if (mi->blk[i].start <= start && mi->blk[i].end > start)
804 nid = mi->blk[i].nid;
805 return nid;
806}
807EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid);
808#endif