Commit | Line | Data |
---|---|---|
d41dee36 AW |
1 | /* |
2 | * sparse memory mappings. | |
3 | */ | |
4 | #include <linux/config.h> | |
5 | #include <linux/mm.h> | |
6 | #include <linux/mmzone.h> | |
7 | #include <linux/bootmem.h> | |
8 | #include <linux/module.h> | |
28ae55c9 | 9 | #include <linux/spinlock.h> |
d41dee36 AW |
10 | #include <asm/dma.h> |
11 | ||
12 | /* | |
13 | * Permanent SPARSEMEM data: | |
14 | * | |
15 | * 1) mem_section - memory sections, mem_map's for valid memory | |
16 | */ | |
3e347261 | 17 | #ifdef CONFIG_SPARSEMEM_EXTREME |
802f192e BP |
18 | struct mem_section *mem_section[NR_SECTION_ROOTS] |
19 | ____cacheline_maxaligned_in_smp; | |
3e347261 BP |
20 | #else |
21 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
22 | ____cacheline_maxaligned_in_smp; | |
23 | #endif | |
24 | EXPORT_SYMBOL(mem_section); | |
25 | ||
3e347261 | 26 | #ifdef CONFIG_SPARSEMEM_EXTREME |
28ae55c9 DH |
27 | static struct mem_section *sparse_index_alloc(int nid) |
28 | { | |
29 | struct mem_section *section = NULL; | |
30 | unsigned long array_size = SECTIONS_PER_ROOT * | |
31 | sizeof(struct mem_section); | |
32 | ||
33 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); | |
34 | ||
35 | if (section) | |
36 | memset(section, 0, array_size); | |
37 | ||
38 | return section; | |
3e347261 | 39 | } |
802f192e | 40 | |
28ae55c9 | 41 | static int sparse_index_init(unsigned long section_nr, int nid) |
802f192e | 42 | { |
28ae55c9 DH |
43 | static spinlock_t index_init_lock = SPIN_LOCK_UNLOCKED; |
44 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); | |
45 | struct mem_section *section; | |
46 | int ret = 0; | |
802f192e BP |
47 | |
48 | if (mem_section[root]) | |
28ae55c9 | 49 | return -EEXIST; |
3e347261 | 50 | |
28ae55c9 DH |
51 | section = sparse_index_alloc(nid); |
52 | /* | |
53 | * This lock keeps two different sections from | |
54 | * reallocating for the same index | |
55 | */ | |
56 | spin_lock(&index_init_lock); | |
3e347261 | 57 | |
28ae55c9 DH |
58 | if (mem_section[root]) { |
59 | ret = -EEXIST; | |
60 | goto out; | |
61 | } | |
62 | ||
63 | mem_section[root] = section; | |
64 | out: | |
65 | spin_unlock(&index_init_lock); | |
66 | return ret; | |
67 | } | |
68 | #else /* !SPARSEMEM_EXTREME */ | |
69 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
70 | { | |
71 | return 0; | |
802f192e | 72 | } |
28ae55c9 DH |
73 | #endif |
74 | ||
4ca644d9 DH |
75 | /* |
76 | * Although written for the SPARSEMEM_EXTREME case, this happens | |
77 | * to also work for the flat array case becase | |
78 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. | |
79 | */ | |
80 | int __section_nr(struct mem_section* ms) | |
81 | { | |
82 | unsigned long root_nr; | |
83 | struct mem_section* root; | |
84 | ||
85 | for (root_nr = 0; | |
86 | root_nr < NR_MEM_SECTIONS; | |
87 | root_nr += SECTIONS_PER_ROOT) { | |
88 | root = __nr_to_section(root_nr); | |
89 | ||
90 | if (!root) | |
91 | continue; | |
92 | ||
93 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
94 | break; | |
95 | } | |
96 | ||
97 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | |
98 | } | |
99 | ||
d41dee36 AW |
100 | /* Record a memory area against a node. */ |
101 | void memory_present(int nid, unsigned long start, unsigned long end) | |
102 | { | |
103 | unsigned long pfn; | |
104 | ||
105 | start &= PAGE_SECTION_MASK; | |
106 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { | |
107 | unsigned long section = pfn_to_section_nr(pfn); | |
802f192e BP |
108 | struct mem_section *ms; |
109 | ||
110 | sparse_index_init(section, nid); | |
111 | ||
112 | ms = __nr_to_section(section); | |
113 | if (!ms->section_mem_map) | |
114 | ms->section_mem_map = SECTION_MARKED_PRESENT; | |
d41dee36 AW |
115 | } |
116 | } | |
117 | ||
118 | /* | |
119 | * Only used by the i386 NUMA architecures, but relatively | |
120 | * generic code. | |
121 | */ | |
122 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | |
123 | unsigned long end_pfn) | |
124 | { | |
125 | unsigned long pfn; | |
126 | unsigned long nr_pages = 0; | |
127 | ||
128 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
129 | if (nid != early_pfn_to_nid(pfn)) | |
130 | continue; | |
131 | ||
132 | if (pfn_valid(pfn)) | |
133 | nr_pages += PAGES_PER_SECTION; | |
134 | } | |
135 | ||
136 | return nr_pages * sizeof(struct page); | |
137 | } | |
138 | ||
29751f69 AW |
139 | /* |
140 | * Subtle, we encode the real pfn into the mem_map such that | |
141 | * the identity pfn - section_mem_map will return the actual | |
142 | * physical page frame number. | |
143 | */ | |
144 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
145 | { | |
146 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
147 | } | |
148 | ||
149 | /* | |
150 | * We need this if we ever free the mem_maps. While not implemented yet, | |
151 | * this function is included for parity with its sibling. | |
152 | */ | |
153 | static __attribute((unused)) | |
154 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) | |
155 | { | |
156 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); | |
157 | } | |
158 | ||
159 | static int sparse_init_one_section(struct mem_section *ms, | |
160 | unsigned long pnum, struct page *mem_map) | |
161 | { | |
162 | if (!valid_section(ms)) | |
163 | return -EINVAL; | |
164 | ||
165 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum); | |
166 | ||
167 | return 1; | |
168 | } | |
169 | ||
170 | static struct page *sparse_early_mem_map_alloc(unsigned long pnum) | |
171 | { | |
172 | struct page *map; | |
173 | int nid = early_pfn_to_nid(section_nr_to_pfn(pnum)); | |
802f192e | 174 | struct mem_section *ms = __nr_to_section(pnum); |
29751f69 AW |
175 | |
176 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | |
177 | if (map) | |
178 | return map; | |
179 | ||
180 | map = alloc_bootmem_node(NODE_DATA(nid), | |
181 | sizeof(struct page) * PAGES_PER_SECTION); | |
182 | if (map) | |
183 | return map; | |
184 | ||
185 | printk(KERN_WARNING "%s: allocation failed\n", __FUNCTION__); | |
802f192e | 186 | ms->section_mem_map = 0; |
29751f69 AW |
187 | return NULL; |
188 | } | |
189 | ||
d41dee36 AW |
190 | /* |
191 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
192 | * for each and record the physical to section mapping. | |
193 | */ | |
194 | void sparse_init(void) | |
195 | { | |
196 | unsigned long pnum; | |
197 | struct page *map; | |
d41dee36 AW |
198 | |
199 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | |
29751f69 | 200 | if (!valid_section_nr(pnum)) |
d41dee36 AW |
201 | continue; |
202 | ||
29751f69 | 203 | map = sparse_early_mem_map_alloc(pnum); |
802f192e BP |
204 | if (!map) |
205 | continue; | |
206 | sparse_init_one_section(__nr_to_section(pnum), pnum, map); | |
d41dee36 AW |
207 | } |
208 | } | |
29751f69 AW |
209 | |
210 | /* | |
211 | * returns the number of sections whose mem_maps were properly | |
212 | * set. If this is <=0, then that means that the passed-in | |
213 | * map was not consumed and must be freed. | |
214 | */ | |
215 | int sparse_add_one_section(unsigned long start_pfn, int nr_pages, struct page *map) | |
216 | { | |
217 | struct mem_section *ms = __pfn_to_section(start_pfn); | |
218 | ||
219 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) | |
220 | return -EEXIST; | |
221 | ||
222 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | |
223 | ||
224 | return sparse_init_one_section(ms, pfn_to_section_nr(start_pfn), map); | |
225 | } |