Commit | Line | Data |
---|---|---|
1da177e4 LT |
1 | /* |
2 | * Written by Pat Gaughen (gone@us.ibm.com) Mar 2002 | |
3 | * | |
4 | */ | |
5 | ||
6 | #ifndef _ASM_MMZONE_H_ | |
7 | #define _ASM_MMZONE_H_ | |
8 | ||
9 | #include <asm/smp.h> | |
10 | ||
8ff8b27b | 11 | #ifdef CONFIG_NUMA |
05b79bdc AW |
12 | extern struct pglist_data *node_data[]; |
13 | #define NODE_DATA(nid) (node_data[nid]) | |
1da177e4 | 14 | |
8ff8b27b DJ |
15 | #ifdef CONFIG_X86_NUMAQ |
16 | #include <asm/numaq.h> | |
17 | #else /* summit or generic arch */ | |
18 | #include <asm/srat.h> | |
19 | #endif | |
1da177e4 | 20 | |
05b79bdc AW |
21 | extern int get_memcfg_numa_flat(void ); |
22 | /* | |
23 | * This allows any one NUMA architecture to be compiled | |
24 | * for, and still fall back to the flat function if it | |
25 | * fails. | |
26 | */ | |
27 | static inline void get_memcfg_numa(void) | |
28 | { | |
29 | #ifdef CONFIG_X86_NUMAQ | |
30 | if (get_memcfg_numaq()) | |
31 | return; | |
abda2452 | 32 | #elif defined(CONFIG_ACPI_SRAT) |
05b79bdc AW |
33 | if (get_memcfg_from_srat()) |
34 | return; | |
35 | #endif | |
36 | ||
37 | get_memcfg_numa_flat(); | |
38 | } | |
39 | ||
e8af300c DJ |
40 | extern int early_pfn_to_nid(unsigned long pfn); |
41 | ||
8ff8b27b DJ |
42 | #else /* !CONFIG_NUMA */ |
43 | #define get_memcfg_numa get_memcfg_numa_flat | |
44 | #define get_zholes_size(n) (0) | |
05b79bdc AW |
45 | #endif /* CONFIG_NUMA */ |
46 | ||
47 | #ifdef CONFIG_DISCONTIGMEM | |
1da177e4 LT |
48 | |
49 | /* | |
50 | * generic node memory support, the following assumptions apply: | |
51 | * | |
52 | * 1) memory comes in 256Mb contigious chunks which are either present or not | |
53 | * 2) we will not have more than 64Gb in total | |
54 | * | |
55 | * for now assume that 64Gb is max amount of RAM for whole system | |
56 | * 64Gb / 4096bytes/page = 16777216 pages | |
57 | */ | |
58 | #define MAX_NR_PAGES 16777216 | |
59 | #define MAX_ELEMENTS 256 | |
60 | #define PAGES_PER_ELEMENT (MAX_NR_PAGES/MAX_ELEMENTS) | |
61 | ||
62 | extern s8 physnode_map[]; | |
63 | ||
64 | static inline int pfn_to_nid(unsigned long pfn) | |
65 | { | |
66 | #ifdef CONFIG_NUMA | |
67 | return((int) physnode_map[(pfn) / PAGES_PER_ELEMENT]); | |
68 | #else | |
69 | return 0; | |
70 | #endif | |
71 | } | |
72 | ||
1da177e4 LT |
73 | #define node_localnr(pfn, nid) ((pfn) - node_data[nid]->node_start_pfn) |
74 | ||
75 | /* | |
76 | * Following are macros that each numa implmentation must define. | |
77 | */ | |
78 | ||
1da177e4 LT |
79 | #define node_start_pfn(nid) (NODE_DATA(nid)->node_start_pfn) |
80 | #define node_end_pfn(nid) \ | |
81 | ({ \ | |
82 | pg_data_t *__pgdat = NODE_DATA(nid); \ | |
83 | __pgdat->node_start_pfn + __pgdat->node_spanned_pages; \ | |
84 | }) | |
85 | ||
1da177e4 LT |
86 | /* XXX: FIXME -- wli */ |
87 | #define kern_addr_valid(kaddr) (0) | |
88 | ||
89 | #define pfn_to_page(pfn) \ | |
90 | ({ \ | |
91 | unsigned long __pfn = pfn; \ | |
92 | int __node = pfn_to_nid(__pfn); \ | |
408fde81 | 93 | &NODE_DATA(__node)->node_mem_map[node_localnr(__pfn,__node)]; \ |
1da177e4 LT |
94 | }) |
95 | ||
96 | #define page_to_pfn(pg) \ | |
97 | ({ \ | |
98 | struct page *__page = pg; \ | |
99 | struct zone *__zone = page_zone(__page); \ | |
100 | (unsigned long)(__page - __zone->zone_mem_map) \ | |
101 | + __zone->zone_start_pfn; \ | |
102 | }) | |
103 | ||
104 | #ifdef CONFIG_X86_NUMAQ /* we have contiguous memory on NUMA-Q */ | |
105 | #define pfn_valid(pfn) ((pfn) < num_physpages) | |
106 | #else | |
107 | static inline int pfn_valid(int pfn) | |
108 | { | |
109 | int nid = pfn_to_nid(pfn); | |
110 | ||
111 | if (nid >= 0) | |
112 | return (pfn < node_end_pfn(nid)); | |
113 | return 0; | |
114 | } | |
05b79bdc AW |
115 | #endif /* CONFIG_X86_NUMAQ */ |
116 | ||
117 | #endif /* CONFIG_DISCONTIGMEM */ | |
118 | ||
119 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
1da177e4 | 120 | |
1da177e4 | 121 | /* |
05b79bdc | 122 | * Following are macros that are specific to this numa platform. |
1da177e4 | 123 | */ |
05b79bdc AW |
124 | #define reserve_bootmem(addr, size) \ |
125 | reserve_bootmem_node(NODE_DATA(0), (addr), (size)) | |
126 | #define alloc_bootmem(x) \ | |
127 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | |
128 | #define alloc_bootmem_low(x) \ | |
129 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, 0) | |
130 | #define alloc_bootmem_pages(x) \ | |
131 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | |
132 | #define alloc_bootmem_low_pages(x) \ | |
133 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) | |
134 | #define alloc_bootmem_node(ignore, x) \ | |
135 | __alloc_bootmem_node(NODE_DATA(0), (x), SMP_CACHE_BYTES, __pa(MAX_DMA_ADDRESS)) | |
136 | #define alloc_bootmem_pages_node(ignore, x) \ | |
137 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, __pa(MAX_DMA_ADDRESS)) | |
138 | #define alloc_bootmem_low_pages_node(ignore, x) \ | |
139 | __alloc_bootmem_node(NODE_DATA(0), (x), PAGE_SIZE, 0) | |
1da177e4 | 140 | |
05b79bdc | 141 | #endif /* CONFIG_NEED_MULTIPLE_NODES */ |
b159d43f | 142 | |
1da177e4 | 143 | #endif /* _ASM_MMZONE_H_ */ |