[PATCH] sparsemem memory model
[linux-2.6-block.git] / arch / i386 / mm / discontig.c
CommitLineData
1da177e4
LT
1/*
2 * Written by: Patricia Gaughen <gone@us.ibm.com>, IBM Corporation
3 * August 2002: added remote node KVA remap - Martin J. Bligh
4 *
5 * Copyright (C) 2002, IBM Corp.
6 *
7 * All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
13 *
14 * This program is distributed in the hope that it will be useful, but
15 * WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
17 * NON INFRINGEMENT. See the GNU General Public License for more
18 * details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program; if not, write to the Free Software
22 * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 */
24
25#include <linux/config.h>
26#include <linux/mm.h>
27#include <linux/bootmem.h>
28#include <linux/mmzone.h>
29#include <linux/highmem.h>
30#include <linux/initrd.h>
31#include <linux/nodemask.h>
32#include <asm/e820.h>
33#include <asm/setup.h>
34#include <asm/mmzone.h>
35#include <bios_ebda.h>
36
37struct pglist_data *node_data[MAX_NUMNODES];
38bootmem_data_t node0_bdata;
39
40/*
41 * numa interface - we expect the numa architecture specfic code to have
42 * populated the following initialisation.
43 *
44 * 1) node_online_map - the map of all nodes configured (online) in the system
45 * 2) physnode_map - the mapping between a pfn and owning node
46 * 3) node_start_pfn - the starting page frame number for a node
47 * 3) node_end_pfn - the ending page fram number for a node
48 */
49
50/*
51 * physnode_map keeps track of the physical memory layout of a generic
52 * numa node on a 256Mb break (each element of the array will
53 * represent 256Mb of memory and will be marked by the node id. so,
54 * if the first gig is on node 0, and the second gig is on node 1
55 * physnode_map will contain:
56 *
57 * physnode_map[0-3] = 0;
58 * physnode_map[4-7] = 1;
59 * physnode_map[8- ] = -1;
60 */
61s8 physnode_map[MAX_ELEMENTS] = { [0 ... (MAX_ELEMENTS - 1)] = -1};
62
63void memory_present(int nid, unsigned long start, unsigned long end)
64{
65 unsigned long pfn;
66
67 printk(KERN_INFO "Node: %d, start_pfn: %ld, end_pfn: %ld\n",
68 nid, start, end);
69 printk(KERN_DEBUG " Setting physnode_map array to node %d for pfns:\n", nid);
70 printk(KERN_DEBUG " ");
71 for (pfn = start; pfn < end; pfn += PAGES_PER_ELEMENT) {
72 physnode_map[pfn / PAGES_PER_ELEMENT] = nid;
73 printk("%ld ", pfn);
74 }
75 printk("\n");
76}
77
78unsigned long node_memmap_size_bytes(int nid, unsigned long start_pfn,
79 unsigned long end_pfn)
80{
81 unsigned long nr_pages = end_pfn - start_pfn;
82
83 if (!nr_pages)
84 return 0;
85
86 return (nr_pages + 1) * sizeof(struct page);
87}
88
89unsigned long node_start_pfn[MAX_NUMNODES];
90unsigned long node_end_pfn[MAX_NUMNODES];
91
92extern unsigned long find_max_low_pfn(void);
93extern void find_max_pfn(void);
94extern void one_highpage_init(struct page *, int, int);
95
96extern struct e820map e820;
97extern unsigned long init_pg_tables_end;
98extern unsigned long highend_pfn, highstart_pfn;
99extern unsigned long max_low_pfn;
100extern unsigned long totalram_pages;
101extern unsigned long totalhigh_pages;
102
103#define LARGE_PAGE_BYTES (PTRS_PER_PTE * PAGE_SIZE)
104
105unsigned long node_remap_start_pfn[MAX_NUMNODES];
106unsigned long node_remap_size[MAX_NUMNODES];
107unsigned long node_remap_offset[MAX_NUMNODES];
108void *node_remap_start_vaddr[MAX_NUMNODES];
109void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags);
110
6f167ec7
DH
111void *node_remap_end_vaddr[MAX_NUMNODES];
112void *node_remap_alloc_vaddr[MAX_NUMNODES];
113
1da177e4
LT
114/*
115 * FLAT - support for basic PC memory model with discontig enabled, essentially
116 * a single node with all available processors in it with a flat
117 * memory map.
118 */
119int __init get_memcfg_numa_flat(void)
120{
121 printk("NUMA - single node, flat memory mode\n");
122
123 /* Run the memory configuration and find the top of memory. */
124 find_max_pfn();
125 node_start_pfn[0] = 0;
126 node_end_pfn[0] = max_pfn;
127 memory_present(0, 0, max_pfn);
128
129 /* Indicate there is one node available. */
130 nodes_clear(node_online_map);
131 node_set_online(0);
132 return 1;
133}
134
135/*
136 * Find the highest page frame number we have available for the node
137 */
138static void __init find_max_pfn_node(int nid)
139{
140 if (node_end_pfn[nid] > max_pfn)
141 node_end_pfn[nid] = max_pfn;
142 /*
143 * if a user has given mem=XXXX, then we need to make sure
144 * that the node _starts_ before that, too, not just ends
145 */
146 if (node_start_pfn[nid] > max_pfn)
147 node_start_pfn[nid] = max_pfn;
148 if (node_start_pfn[nid] > node_end_pfn[nid])
149 BUG();
150}
151
c2ebaa42
DH
152/* Find the owning node for a pfn. */
153int early_pfn_to_nid(unsigned long pfn)
154{
155 int nid;
156
157 for_each_node(nid) {
158 if (node_end_pfn[nid] == 0)
159 break;
160 if (node_start_pfn[nid] <= pfn && node_end_pfn[nid] >= pfn)
161 return nid;
162 }
163
164 return 0;
165}
166
1da177e4
LT
167/*
168 * Allocate memory for the pg_data_t for this node via a crude pre-bootmem
169 * method. For node zero take this from the bottom of memory, for
170 * subsequent nodes place them at node_remap_start_vaddr which contains
171 * node local data in physically node local memory. See setup_memory()
172 * for details.
173 */
174static void __init allocate_pgdat(int nid)
175{
176 if (nid && node_has_online_mem(nid))
177 NODE_DATA(nid) = (pg_data_t *)node_remap_start_vaddr[nid];
178 else {
179 NODE_DATA(nid) = (pg_data_t *)(__va(min_low_pfn << PAGE_SHIFT));
180 min_low_pfn += PFN_UP(sizeof(pg_data_t));
181 }
182}
183
6f167ec7
DH
184void *alloc_remap(int nid, unsigned long size)
185{
186 void *allocation = node_remap_alloc_vaddr[nid];
187
188 size = ALIGN(size, L1_CACHE_BYTES);
189
190 if (!allocation || (allocation + size) >= node_remap_end_vaddr[nid])
191 return 0;
192
193 node_remap_alloc_vaddr[nid] += size;
194 memset(allocation, 0, size);
195
196 return allocation;
197}
198
1da177e4
LT
199void __init remap_numa_kva(void)
200{
201 void *vaddr;
202 unsigned long pfn;
203 int node;
204
205 for_each_online_node(node) {
1da177e4
LT
206 for (pfn=0; pfn < node_remap_size[node]; pfn += PTRS_PER_PTE) {
207 vaddr = node_remap_start_vaddr[node]+(pfn<<PAGE_SHIFT);
208 set_pmd_pfn((ulong) vaddr,
209 node_remap_start_pfn[node] + pfn,
210 PAGE_KERNEL_LARGE);
211 }
212 }
213}
214
215static unsigned long calculate_numa_remap_pages(void)
216{
217 int nid;
218 unsigned long size, reserve_pages = 0;
5b505b90 219 unsigned long pfn;
1da177e4
LT
220
221 for_each_online_node(nid) {
1da177e4
LT
222 /*
223 * The acpi/srat node info can show hot-add memroy zones
224 * where memory could be added but not currently present.
225 */
226 if (node_start_pfn[nid] > max_pfn)
227 continue;
228 if (node_end_pfn[nid] > max_pfn)
229 node_end_pfn[nid] = max_pfn;
230
231 /* ensure the remap includes space for the pgdat. */
232 size = node_remap_size[nid] + sizeof(pg_data_t);
233
234 /* convert size to large (pmd size) pages, rounding up */
235 size = (size + LARGE_PAGE_BYTES - 1) / LARGE_PAGE_BYTES;
236 /* now the roundup is correct, convert to PAGE_SIZE pages */
237 size = size * PTRS_PER_PTE;
5b505b90
DH
238
239 /*
240 * Validate the region we are allocating only contains valid
241 * pages.
242 */
243 for (pfn = node_end_pfn[nid] - size;
244 pfn < node_end_pfn[nid]; pfn++)
245 if (!page_is_ram(pfn))
246 break;
247
248 if (pfn != node_end_pfn[nid])
249 size = 0;
250
1da177e4
LT
251 printk("Reserving %ld pages of KVA for lmem_map of node %d\n",
252 size, nid);
253 node_remap_size[nid] = size;
1da177e4 254 node_remap_offset[nid] = reserve_pages;
6f167ec7 255 reserve_pages += size;
1da177e4
LT
256 printk("Shrinking node %d from %ld pages to %ld pages\n",
257 nid, node_end_pfn[nid], node_end_pfn[nid] - size);
258 node_end_pfn[nid] -= size;
259 node_remap_start_pfn[nid] = node_end_pfn[nid];
260 }
261 printk("Reserving total of %ld pages for numa KVA remap\n",
262 reserve_pages);
263 return reserve_pages;
264}
265
266extern void setup_bootmem_allocator(void);
267unsigned long __init setup_memory(void)
268{
269 int nid;
270 unsigned long system_start_pfn, system_max_low_pfn;
271 unsigned long reserve_pages;
272
273 /*
274 * When mapping a NUMA machine we allocate the node_mem_map arrays
275 * from node local memory. They are then mapped directly into KVA
276 * between zone normal and vmalloc space. Calculate the size of
277 * this space and use it to adjust the boundry between ZONE_NORMAL
278 * and ZONE_HIGHMEM.
279 */
280 find_max_pfn();
281 get_memcfg_numa();
282
283 reserve_pages = calculate_numa_remap_pages();
284
285 /* partially used pages are not usable - thus round upwards */
286 system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
287
288 system_max_low_pfn = max_low_pfn = find_max_low_pfn() - reserve_pages;
289 printk("reserve_pages = %ld find_max_low_pfn() ~ %ld\n",
290 reserve_pages, max_low_pfn + reserve_pages);
291 printk("max_pfn = %ld\n", max_pfn);
292#ifdef CONFIG_HIGHMEM
293 highstart_pfn = highend_pfn = max_pfn;
294 if (max_pfn > system_max_low_pfn)
295 highstart_pfn = system_max_low_pfn;
296 printk(KERN_NOTICE "%ldMB HIGHMEM available.\n",
297 pages_to_mb(highend_pfn - highstart_pfn));
298#endif
299 printk(KERN_NOTICE "%ldMB LOWMEM available.\n",
300 pages_to_mb(system_max_low_pfn));
301 printk("min_low_pfn = %ld, max_low_pfn = %ld, highstart_pfn = %ld\n",
302 min_low_pfn, max_low_pfn, highstart_pfn);
303
304 printk("Low memory ends at vaddr %08lx\n",
305 (ulong) pfn_to_kaddr(max_low_pfn));
306 for_each_online_node(nid) {
307 node_remap_start_vaddr[nid] = pfn_to_kaddr(
6f167ec7
DH
308 highstart_pfn + node_remap_offset[nid]);
309 /* Init the node remap allocator */
310 node_remap_end_vaddr[nid] = node_remap_start_vaddr[nid] +
311 (node_remap_size[nid] * PAGE_SIZE);
312 node_remap_alloc_vaddr[nid] = node_remap_start_vaddr[nid] +
313 ALIGN(sizeof(pg_data_t), PAGE_SIZE);
314
1da177e4
LT
315 allocate_pgdat(nid);
316 printk ("node %d will remap to vaddr %08lx - %08lx\n", nid,
317 (ulong) node_remap_start_vaddr[nid],
6f167ec7
DH
318 (ulong) pfn_to_kaddr(highstart_pfn
319 + node_remap_offset[nid] + node_remap_size[nid]));
1da177e4
LT
320 }
321 printk("High memory starts at vaddr %08lx\n",
322 (ulong) pfn_to_kaddr(highstart_pfn));
323 vmalloc_earlyreserve = reserve_pages * PAGE_SIZE;
324 for_each_online_node(nid)
325 find_max_pfn_node(nid);
326
327 memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
328 NODE_DATA(0)->bdata = &node0_bdata;
329 setup_bootmem_allocator();
330 return max_low_pfn;
331}
332
333void __init zone_sizes_init(void)
334{
335 int nid;
336
337 /*
338 * Insert nodes into pgdat_list backward so they appear in order.
339 * Clobber node 0's links and NULL out pgdat_list before starting.
340 */
341 pgdat_list = NULL;
342 for (nid = MAX_NUMNODES - 1; nid >= 0; nid--) {
343 if (!node_online(nid))
344 continue;
345 NODE_DATA(nid)->pgdat_next = pgdat_list;
346 pgdat_list = NODE_DATA(nid);
347 }
348
349 for_each_online_node(nid) {
350 unsigned long zones_size[MAX_NR_ZONES] = {0, 0, 0};
351 unsigned long *zholes_size;
352 unsigned int max_dma;
353
354 unsigned long low = max_low_pfn;
355 unsigned long start = node_start_pfn[nid];
356 unsigned long high = node_end_pfn[nid];
357
358 max_dma = virt_to_phys((char *)MAX_DMA_ADDRESS) >> PAGE_SHIFT;
359
360 if (node_has_online_mem(nid)){
361 if (start > low) {
362#ifdef CONFIG_HIGHMEM
363 BUG_ON(start > high);
364 zones_size[ZONE_HIGHMEM] = high - start;
365#endif
366 } else {
367 if (low < max_dma)
368 zones_size[ZONE_DMA] = low;
369 else {
370 BUG_ON(max_dma > low);
371 BUG_ON(low > high);
372 zones_size[ZONE_DMA] = max_dma;
373 zones_size[ZONE_NORMAL] = low - max_dma;
374#ifdef CONFIG_HIGHMEM
375 zones_size[ZONE_HIGHMEM] = high - low;
376#endif
377 }
378 }
379 }
380
381 zholes_size = get_zholes_size(nid);
6f167ec7
DH
382
383 free_area_init_node(nid, NODE_DATA(nid), zones_size, start,
384 zholes_size);
1da177e4
LT
385 }
386 return;
387}
388
389void __init set_highmem_pages_init(int bad_ppro)
390{
391#ifdef CONFIG_HIGHMEM
392 struct zone *zone;
393
394 for_each_zone(zone) {
395 unsigned long node_pfn, node_high_size, zone_start_pfn;
396 struct page * zone_mem_map;
397
398 if (!is_highmem(zone))
399 continue;
400
401 printk("Initializing %s for node %d\n", zone->name,
402 zone->zone_pgdat->node_id);
403
404 node_high_size = zone->spanned_pages;
405 zone_mem_map = zone->zone_mem_map;
406 zone_start_pfn = zone->zone_start_pfn;
407
408 for (node_pfn = 0; node_pfn < node_high_size; node_pfn++) {
409 one_highpage_init((struct page *)(zone_mem_map + node_pfn),
410 zone_start_pfn + node_pfn, bad_ppro);
411 }
412 }
413 totalram_pages += totalhigh_pages;
414#endif
415}