Merge branch 'iommu/largepages' into amd-iommu/2.6.35
[linux-2.6-block.git] / arch / sh / mm / numa.c
CommitLineData
b241cb0c
PM
1/*
2 * arch/sh/mm/numa.c - Multiple node support for SH machines
3 *
4 * Copyright (C) 2007 Paul Mundt
5 *
6 * This file is subject to the terms and conditions of the GNU General Public
7 * License. See the file "COPYING" in the main directory of this archive
8 * for more details.
9 */
10#include <linux/module.h>
11#include <linux/bootmem.h>
5084f61a 12#include <linux/lmb.h>
b241cb0c
PM
13#include <linux/mm.h>
14#include <linux/numa.h>
15#include <linux/pfn.h>
16#include <asm/sections.h>
17
b241cb0c
PM
18struct pglist_data *node_data[MAX_NUMNODES] __read_mostly;
19EXPORT_SYMBOL_GPL(node_data);
20
21/*
22 * On SH machines the conventional approach is to stash system RAM
23 * in node 0, and other memory blocks in to node 1 and up, ordered by
24 * latency. Each node's pgdat is node-local at the beginning of the node,
25 * immediately followed by the node mem map.
26 */
27void __init setup_memory(void)
28{
29 unsigned long free_pfn = PFN_UP(__pa(_end));
5084f61a 30 u64 base = min_low_pfn << PAGE_SHIFT;
f3a4c00a 31 u64 size = (max_low_pfn << PAGE_SHIFT) - base;
5084f61a
MF
32
33 lmb_add(base, size);
34
35 /* Reserve the LMB regions used by the kernel, initrd, etc.. */
36 lmb_reserve(__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET,
37 (PFN_PHYS(free_pfn) + PAGE_SIZE - 1) -
38 (__MEMORY_START + CONFIG_ZERO_PAGE_OFFSET));
b241cb0c 39
b25b9758
MD
40 /*
41 * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
42 */
43 if (CONFIG_ZERO_PAGE_OFFSET != 0)
44 lmb_reserve(__MEMORY_START, CONFIG_ZERO_PAGE_OFFSET);
45
46 lmb_analyze();
47 lmb_dump_all();
48
b241cb0c
PM
49 /*
50 * Node 0 sets up its pgdat at the first available pfn,
51 * and bumps it up before setting up the bootmem allocator.
52 */
53 NODE_DATA(0) = pfn_to_kaddr(free_pfn);
54 memset(NODE_DATA(0), 0, sizeof(struct pglist_data));
55 free_pfn += PFN_UP(sizeof(struct pglist_data));
b61bfa3c 56 NODE_DATA(0)->bdata = &bootmem_node_data[0];
b241cb0c
PM
57
58 /* Set up node 0 */
59 setup_bootmem_allocator(free_pfn);
60
61 /* Give the platforms a chance to hook up their nodes */
62 plat_mem_setup();
63}
64
65void __init setup_bootmem_node(int nid, unsigned long start, unsigned long end)
66{
5084f61a
MF
67 unsigned long bootmap_pages;
68 unsigned long start_pfn, end_pfn;
69 unsigned long bootmem_paddr;
b241cb0c
PM
70
71 /* Don't allow bogus node assignment */
90163320 72 BUG_ON(nid > MAX_NUMNODES || nid <= 0);
b241cb0c 73
5084f61a 74 start_pfn = start >> PAGE_SHIFT;
b241cb0c
PM
75 end_pfn = end >> PAGE_SHIFT;
76
09e11723
PM
77 pmb_bolt_mapping((unsigned long)__va(start), start, end - start,
78 PAGE_KERNEL);
79
5084f61a
MF
80 lmb_add(start, end - start);
81
0146ba78 82 __add_active_range(nid, start_pfn, end_pfn);
b241cb0c
PM
83
84 /* Node-local pgdat */
5084f61a 85 NODE_DATA(nid) = __va(lmb_alloc_base(sizeof(struct pglist_data),
b25b9758 86 SMP_CACHE_BYTES, end));
b241cb0c
PM
87 memset(NODE_DATA(nid), 0, sizeof(struct pglist_data));
88
b61bfa3c 89 NODE_DATA(nid)->bdata = &bootmem_node_data[nid];
b241cb0c
PM
90 NODE_DATA(nid)->node_start_pfn = start_pfn;
91 NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn;
92
93 /* Node-local bootmap */
94 bootmap_pages = bootmem_bootmap_pages(end_pfn - start_pfn);
5084f61a 95 bootmem_paddr = lmb_alloc_base(bootmap_pages << PAGE_SHIFT,
b25b9758 96 PAGE_SIZE, end);
5084f61a
MF
97 init_bootmem_node(NODE_DATA(nid), bootmem_paddr >> PAGE_SHIFT,
98 start_pfn, end_pfn);
b241cb0c
PM
99
100 free_bootmem_with_active_regions(nid, end_pfn);
101
102 /* Reserve the pgdat and bootmap space with the bootmem allocator */
103 reserve_bootmem_node(NODE_DATA(nid), start_pfn << PAGE_SHIFT,
72a7fe39 104 sizeof(struct pglist_data), BOOTMEM_DEFAULT);
5084f61a 105 reserve_bootmem_node(NODE_DATA(nid), bootmem_paddr,
72a7fe39 106 bootmap_pages << PAGE_SHIFT, BOOTMEM_DEFAULT);
b241cb0c
PM
107
108 /* It's up */
109 node_set_online(nid);
110
111 /* Kick sparsemem */
112 sparse_memory_present_with_active_regions(nid);
113}