[PATCH] Have x86_64 use add_active_range() and free_area_init_nodes
[linux-block.git] / arch / x86_64 / mm / srat.c
CommitLineData
1da177e4
LT
1/*
2 * ACPI 3.0 based NUMA setup
3 * Copyright 2004 Andi Kleen, SuSE Labs.
4 *
5 * Reads the ACPI SRAT table to figure out what memory belongs to which CPUs.
6 *
7 * Called from acpi_numa_init while reading the SRAT and SLIT tables.
8 * Assumes all memory regions belonging to a single proximity domain
9 * are in one chunk. Holes between them will be included in the node.
10 */
11
12#include <linux/kernel.h>
13#include <linux/acpi.h>
14#include <linux/mmzone.h>
15#include <linux/bitmap.h>
16#include <linux/module.h>
17#include <linux/topology.h>
68a3a7fe
AK
18#include <linux/bootmem.h>
19#include <linux/mm.h>
1da177e4
LT
20#include <asm/proto.h>
21#include <asm/numa.h>
8a6fdd3e 22#include <asm/e820.h>
1da177e4 23
c31fbb1a
AK
24int acpi_numa __initdata;
25
68a3a7fe
AK
26#if (defined(CONFIG_ACPI_HOTPLUG_MEMORY) || \
27 defined(CONFIG_ACPI_HOTPLUG_MEMORY_MODULE)) \
28 && !defined(CONFIG_MEMORY_HOTPLUG)
29#define RESERVE_HOTADD 1
30#endif
31
1da177e4
LT
32static struct acpi_table_slit *acpi_slit;
33
34static nodemask_t nodes_parsed __initdata;
abe059e7 35static struct bootnode nodes[MAX_NUMNODES] __initdata;
68a3a7fe
AK
36static struct bootnode nodes_add[MAX_NUMNODES] __initdata;
37static int found_add_area __initdata;
fad7906d
AK
38int hotadd_percent __initdata = 0;
39#ifndef RESERVE_HOTADD
40#define hotadd_percent 0 /* Ignore all settings */
41#endif
1da177e4 42
9391a3f9
AK
43/* Too small nodes confuse the VM badly. Usually they result
44 from BIOS bugs. */
45#define NODE_MIN_SIZE (4*1024*1024)
46
1da177e4
LT
47static __init int setup_node(int pxm)
48{
762834e8 49 return acpi_map_pxm_to_node(pxm);
1da177e4
LT
50}
51
52static __init int conflicting_nodes(unsigned long start, unsigned long end)
53{
54 int i;
4b6a455c 55 for_each_node_mask(i, nodes_parsed) {
abe059e7 56 struct bootnode *nd = &nodes[i];
1da177e4
LT
57 if (nd->start == nd->end)
58 continue;
59 if (nd->end > start && nd->start < end)
05d1fa4b 60 return i;
1da177e4 61 if (nd->end == end && nd->start == start)
05d1fa4b 62 return i;
1da177e4
LT
63 }
64 return -1;
65}
66
67static __init void cutoff_node(int i, unsigned long start, unsigned long end)
68{
abe059e7 69 struct bootnode *nd = &nodes[i];
68a3a7fe
AK
70
71 if (found_add_area)
72 return;
73
1da177e4
LT
74 if (nd->start < start) {
75 nd->start = start;
76 if (nd->end < nd->start)
77 nd->start = nd->end;
78 }
79 if (nd->end > end) {
1da177e4
LT
80 nd->end = end;
81 if (nd->start > nd->end)
82 nd->start = nd->end;
83 }
84}
85
86static __init void bad_srat(void)
87{
2bce2b54 88 int i;
1da177e4
LT
89 printk(KERN_ERR "SRAT: SRAT not used.\n");
90 acpi_numa = -1;
fad7906d 91 found_add_area = 0;
2bce2b54
AK
92 for (i = 0; i < MAX_LOCAL_APIC; i++)
93 apicid_to_node[i] = NUMA_NO_NODE;
68a3a7fe
AK
94 for (i = 0; i < MAX_NUMNODES; i++)
95 nodes_add[i].start = nodes[i].end = 0;
5cb248ab 96 remove_all_active_ranges();
1da177e4
LT
97}
98
99static __init inline int srat_disabled(void)
100{
101 return numa_off || acpi_numa < 0;
102}
103
1584b89c
AK
104/*
105 * A lot of BIOS fill in 10 (= no distance) everywhere. This messes
106 * up the NUMA heuristics which wants the local node to have a smaller
107 * distance than the others.
108 * Do some quick checks here and only use the SLIT if it passes.
109 */
110static __init int slit_valid(struct acpi_table_slit *slit)
111{
112 int i, j;
113 int d = slit->localities;
114 for (i = 0; i < d; i++) {
115 for (j = 0; j < d; j++) {
116 u8 val = slit->entry[d*i + j];
117 if (i == j) {
118 if (val != 10)
119 return 0;
120 } else if (val <= 10)
121 return 0;
122 }
123 }
124 return 1;
125}
126
1da177e4
LT
127/* Callback for SLIT parsing */
128void __init acpi_numa_slit_init(struct acpi_table_slit *slit)
129{
1584b89c
AK
130 if (!slit_valid(slit)) {
131 printk(KERN_INFO "ACPI: SLIT table looks invalid. Not used.\n");
132 return;
133 }
1da177e4
LT
134 acpi_slit = slit;
135}
136
137/* Callback for Proximity Domain -> LAPIC mapping */
138void __init
139acpi_numa_processor_affinity_init(struct acpi_table_processor_affinity *pa)
140{
141 int pxm, node;
d22fe808
AK
142 if (srat_disabled())
143 return;
fad7906d
AK
144 if (pa->header.length != sizeof(struct acpi_table_processor_affinity)) {
145 bad_srat();
d22fe808
AK
146 return;
147 }
148 if (pa->flags.enabled == 0)
1da177e4
LT
149 return;
150 pxm = pa->proximity_domain;
151 node = setup_node(pxm);
152 if (node < 0) {
153 printk(KERN_ERR "SRAT: Too many proximity domains %x\n", pxm);
154 bad_srat();
155 return;
156 }
0b07e984 157 apicid_to_node[pa->apic_id] = node;
1da177e4 158 acpi_numa = 1;
0b07e984
AK
159 printk(KERN_INFO "SRAT: PXM %u -> APIC %u -> Node %u\n",
160 pxm, pa->apic_id, node);
1da177e4
LT
161}
162
68a3a7fe
AK
163#ifdef RESERVE_HOTADD
164/*
165 * Protect against too large hotadd areas that would fill up memory.
166 */
167static int hotadd_enough_memory(struct bootnode *nd)
168{
169 static unsigned long allocated;
170 static unsigned long last_area_end;
171 unsigned long pages = (nd->end - nd->start) >> PAGE_SHIFT;
172 long mem = pages * sizeof(struct page);
173 unsigned long addr;
174 unsigned long allowed;
175 unsigned long oldpages = pages;
176
177 if (mem < 0)
178 return 0;
5cb248ab 179 allowed = (end_pfn - absent_pages_in_range(0, end_pfn)) * PAGE_SIZE;
68a3a7fe
AK
180 allowed = (allowed / 100) * hotadd_percent;
181 if (allocated + mem > allowed) {
fad7906d 182 unsigned long range;
68a3a7fe
AK
183 /* Give them at least part of their hotadd memory upto hotadd_percent
184 It would be better to spread the limit out
185 over multiple hotplug areas, but that is too complicated
186 right now */
187 if (allocated >= allowed)
188 return 0;
fad7906d
AK
189 range = allowed - allocated;
190 pages = (range / PAGE_SIZE);
68a3a7fe 191 mem = pages * sizeof(struct page);
fad7906d 192 nd->end = nd->start + range;
68a3a7fe
AK
193 }
194 /* Not completely fool proof, but a good sanity check */
195 addr = find_e820_area(last_area_end, end_pfn<<PAGE_SHIFT, mem);
196 if (addr == -1UL)
197 return 0;
198 if (pages != oldpages)
199 printk(KERN_NOTICE "SRAT: Hotadd area limited to %lu bytes\n",
200 pages << PAGE_SHIFT);
201 last_area_end = addr + mem;
202 allocated += mem;
203 return 1;
204}
205
206/*
207 * It is fine to add this area to the nodes data it will be used later
208 * This code supports one contigious hot add area per node.
209 */
210static int reserve_hotadd(int node, unsigned long start, unsigned long end)
211{
212 unsigned long s_pfn = start >> PAGE_SHIFT;
213 unsigned long e_pfn = end >> PAGE_SHIFT;
214 int changed = 0;
215 struct bootnode *nd = &nodes_add[node];
216
217 /* I had some trouble with strange memory hotadd regions breaking
218 the boot. Be very strict here and reject anything unexpected.
219 If you want working memory hotadd write correct SRATs.
220
221 The node size check is a basic sanity check to guard against
222 mistakes */
223 if ((signed long)(end - start) < NODE_MIN_SIZE) {
224 printk(KERN_ERR "SRAT: Hotplug area too small\n");
225 return -1;
226 }
227
228 /* This check might be a bit too strict, but I'm keeping it for now. */
5cb248ab 229 if (absent_pages_in_range(s_pfn, e_pfn) != e_pfn - s_pfn) {
68a3a7fe
AK
230 printk(KERN_ERR "SRAT: Hotplug area has existing memory\n");
231 return -1;
232 }
233
234 if (!hotadd_enough_memory(&nodes_add[node])) {
235 printk(KERN_ERR "SRAT: Hotplug area too large\n");
236 return -1;
237 }
238
239 /* Looks good */
240
241 found_add_area = 1;
242 if (nd->start == nd->end) {
243 nd->start = start;
244 nd->end = end;
245 changed = 1;
246 } else {
247 if (nd->start == end) {
248 nd->start = start;
249 changed = 1;
250 }
251 if (nd->end == start) {
252 nd->end = end;
253 changed = 1;
254 }
255 if (!changed)
256 printk(KERN_ERR "SRAT: Hotplug zone not continuous. Partly ignored\n");
257 }
258
259 if ((nd->end >> PAGE_SHIFT) > end_pfn)
260 end_pfn = nd->end >> PAGE_SHIFT;
261
262 if (changed)
263 printk(KERN_INFO "SRAT: hot plug zone found %Lx - %Lx\n", nd->start, nd->end);
264 return 0;
265}
266#endif
267
1da177e4
LT
268/* Callback for parsing of the Proximity Domain <-> Memory Area mappings */
269void __init
270acpi_numa_memory_affinity_init(struct acpi_table_memory_affinity *ma)
271{
68a3a7fe 272 struct bootnode *nd, oldnode;
1da177e4
LT
273 unsigned long start, end;
274 int node, pxm;
275 int i;
276
d22fe808 277 if (srat_disabled())
1da177e4 278 return;
d22fe808
AK
279 if (ma->header.length != sizeof(struct acpi_table_memory_affinity)) {
280 bad_srat();
281 return;
282 }
283 if (ma->flags.enabled == 0)
284 return;
68a3a7fe
AK
285 if (ma->flags.hot_pluggable && hotadd_percent == 0)
286 return;
d22fe808
AK
287 start = ma->base_addr_lo | ((u64)ma->base_addr_hi << 32);
288 end = start + (ma->length_lo | ((u64)ma->length_hi << 32));
1da177e4
LT
289 pxm = ma->proximity_domain;
290 node = setup_node(pxm);
291 if (node < 0) {
292 printk(KERN_ERR "SRAT: Too many proximity domains.\n");
293 bad_srat();
294 return;
295 }
1da177e4 296 i = conflicting_nodes(start, end);
05d1fa4b
AK
297 if (i == node) {
298 printk(KERN_WARNING
299 "SRAT: Warning: PXM %d (%lx-%lx) overlaps with itself (%Lx-%Lx)\n",
300 pxm, start, end, nodes[i].start, nodes[i].end);
301 } else if (i >= 0) {
1da177e4 302 printk(KERN_ERR
05d1fa4b
AK
303 "SRAT: PXM %d (%lx-%lx) overlaps with PXM %d (%Lx-%Lx)\n",
304 pxm, start, end, node_to_pxm(i),
305 nodes[i].start, nodes[i].end);
1da177e4
LT
306 bad_srat();
307 return;
308 }
309 nd = &nodes[node];
68a3a7fe 310 oldnode = *nd;
1da177e4
LT
311 if (!node_test_and_set(node, nodes_parsed)) {
312 nd->start = start;
313 nd->end = end;
314 } else {
315 if (start < nd->start)
316 nd->start = start;
317 if (nd->end < end)
318 nd->end = end;
319 }
68a3a7fe 320
1da177e4
LT
321 printk(KERN_INFO "SRAT: Node %u PXM %u %Lx-%Lx\n", node, pxm,
322 nd->start, nd->end);
5cb248ab
MG
323 e820_register_active_regions(node, nd->start >> PAGE_SHIFT,
324 nd->end >> PAGE_SHIFT);
68a3a7fe
AK
325
326#ifdef RESERVE_HOTADD
327 if (ma->flags.hot_pluggable && reserve_hotadd(node, start, end) < 0) {
328 /* Ignore hotadd region. Undo damage */
329 printk(KERN_NOTICE "SRAT: Hotplug region ignored\n");
330 *nd = oldnode;
331 if ((nd->start | nd->end) == 0)
332 node_clear(node, nodes_parsed);
333 }
334#endif
1da177e4
LT
335}
336
8a6fdd3e
AK
337/* Sanity check to catch more bad SRATs (they are amazingly common).
338 Make sure the PXMs cover all memory. */
339static int nodes_cover_memory(void)
340{
341 int i;
342 unsigned long pxmram, e820ram;
343
344 pxmram = 0;
345 for_each_node_mask(i, nodes_parsed) {
346 unsigned long s = nodes[i].start >> PAGE_SHIFT;
347 unsigned long e = nodes[i].end >> PAGE_SHIFT;
348 pxmram += e - s;
5cb248ab 349 pxmram -= absent_pages_in_range(s, e);
68a3a7fe
AK
350 pxmram -= nodes_add[i].end - nodes_add[i].start;
351 if ((long)pxmram < 0)
352 pxmram = 0;
8a6fdd3e
AK
353 }
354
5cb248ab 355 e820ram = end_pfn - absent_pages_in_range(0, end_pfn);
fdb9df94
AK
356 /* We seem to lose 3 pages somewhere. Allow a bit of slack. */
357 if ((long)(e820ram - pxmram) >= 1*1024*1024) {
8a6fdd3e
AK
358 printk(KERN_ERR
359 "SRAT: PXMs only cover %luMB of your %luMB e820 RAM. Not used.\n",
360 (pxmram << PAGE_SHIFT) >> 20,
361 (e820ram << PAGE_SHIFT) >> 20);
362 return 0;
363 }
364 return 1;
365}
366
9391a3f9
AK
367static void unparse_node(int node)
368{
369 int i;
370 node_clear(node, nodes_parsed);
371 for (i = 0; i < MAX_LOCAL_APIC; i++) {
372 if (apicid_to_node[i] == node)
373 apicid_to_node[i] = NUMA_NO_NODE;
374 }
375}
376
1da177e4
LT
377void __init acpi_numa_arch_fixup(void) {}
378
379/* Use the information discovered above to actually set up the nodes. */
380int __init acpi_scan_nodes(unsigned long start, unsigned long end)
381{
382 int i;
8a6fdd3e 383
e58e0d03 384 /* First clean up the node list */
9391a3f9 385 for (i = 0; i < MAX_NUMNODES; i++) {
68a3a7fe 386 cutoff_node(i, start, end);
0d015324 387 if ((nodes[i].end - nodes[i].start) < NODE_MIN_SIZE) {
9391a3f9 388 unparse_node(i);
0d015324
DY
389 node_set_offline(i);
390 }
e58e0d03
AK
391 }
392
9391a3f9
AK
393 if (acpi_numa <= 0)
394 return -1;
395
8a6fdd3e
AK
396 if (!nodes_cover_memory()) {
397 bad_srat();
398 return -1;
399 }
400
2aed711a 401 memnode_shift = compute_hash_shift(nodes, MAX_NUMNODES);
1da177e4
LT
402 if (memnode_shift < 0) {
403 printk(KERN_ERR
404 "SRAT: No NUMA node hash function found. Contact maintainer\n");
405 bad_srat();
406 return -1;
407 }
e58e0d03
AK
408
409 /* Finally register nodes */
410 for_each_node_mask(i, nodes_parsed)
1da177e4 411 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
a8062231
AK
412 /* Try again in case setup_node_bootmem missed one due
413 to missing bootmem */
414 for_each_node_mask(i, nodes_parsed)
415 if (!node_online(i))
416 setup_node_bootmem(i, nodes[i].start, nodes[i].end);
417
1da177e4
LT
418 for (i = 0; i < NR_CPUS; i++) {
419 if (cpu_to_node[i] == NUMA_NO_NODE)
420 continue;
421 if (!node_isset(cpu_to_node[i], nodes_parsed))
69d81fcd 422 numa_set_node(i, NUMA_NO_NODE);
1da177e4
LT
423 }
424 numa_init_array();
425 return 0;
426}
427
68a3a7fe
AK
428void __init srat_reserve_add_area(int nodeid)
429{
430 if (found_add_area && nodes_add[nodeid].end) {
431 u64 total_mb;
432
433 printk(KERN_INFO "SRAT: Reserving hot-add memory space "
434 "for node %d at %Lx-%Lx\n",
435 nodeid, nodes_add[nodeid].start, nodes_add[nodeid].end);
436 total_mb = (nodes_add[nodeid].end - nodes_add[nodeid].start)
437 >> PAGE_SHIFT;
438 total_mb *= sizeof(struct page);
439 total_mb >>= 20;
440 printk(KERN_INFO "SRAT: This will cost you %Lu MB of "
441 "pre-allocated memory.\n", (unsigned long long)total_mb);
442 reserve_bootmem_node(NODE_DATA(nodeid), nodes_add[nodeid].start,
443 nodes_add[nodeid].end - nodes_add[nodeid].start);
444 }
445}
446
1da177e4
LT
447int __node_distance(int a, int b)
448{
449 int index;
450
451 if (!acpi_slit)
452 return a == b ? 10 : 20;
453 index = acpi_slit->localities * node_to_pxm(a);
454 return acpi_slit->entry[index + node_to_pxm(b)];
455}
456
457EXPORT_SYMBOL(__node_distance);