Commit | Line | Data |
---|---|---|
d41dee36 AW |
1 | /* |
2 | * sparse memory mappings. | |
3 | */ | |
d41dee36 | 4 | #include <linux/mm.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
d41dee36 AW |
6 | #include <linux/mmzone.h> |
7 | #include <linux/bootmem.h> | |
0b0acbec | 8 | #include <linux/highmem.h> |
b95f1b31 | 9 | #include <linux/export.h> |
28ae55c9 | 10 | #include <linux/spinlock.h> |
0b0acbec | 11 | #include <linux/vmalloc.h> |
0c0a4a51 | 12 | #include "internal.h" |
d41dee36 | 13 | #include <asm/dma.h> |
8f6aac41 CL |
14 | #include <asm/pgalloc.h> |
15 | #include <asm/pgtable.h> | |
d41dee36 AW |
16 | |
17 | /* | |
18 | * Permanent SPARSEMEM data: | |
19 | * | |
20 | * 1) mem_section - memory sections, mem_map's for valid memory | |
21 | */ | |
3e347261 | 22 | #ifdef CONFIG_SPARSEMEM_EXTREME |
802f192e | 23 | struct mem_section *mem_section[NR_SECTION_ROOTS] |
22fc6ecc | 24 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
25 | #else |
26 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
22fc6ecc | 27 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
28 | #endif |
29 | EXPORT_SYMBOL(mem_section); | |
30 | ||
89689ae7 CL |
31 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
32 | /* | |
33 | * If we did not store the node number in the page then we have to | |
34 | * do a lookup in the section_to_node_table in order to find which | |
35 | * node the page belongs to. | |
36 | */ | |
37 | #if MAX_NUMNODES <= 256 | |
38 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
39 | #else | |
40 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
41 | #endif | |
42 | ||
33dd4e0e | 43 | int page_to_nid(const struct page *page) |
89689ae7 CL |
44 | { |
45 | return section_to_node_table[page_to_section(page)]; | |
46 | } | |
47 | EXPORT_SYMBOL(page_to_nid); | |
85770ffe AW |
48 | |
49 | static void set_section_nid(unsigned long section_nr, int nid) | |
50 | { | |
51 | section_to_node_table[section_nr] = nid; | |
52 | } | |
53 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | |
54 | static inline void set_section_nid(unsigned long section_nr, int nid) | |
55 | { | |
56 | } | |
89689ae7 CL |
57 | #endif |
58 | ||
3e347261 | 59 | #ifdef CONFIG_SPARSEMEM_EXTREME |
577a32f6 | 60 | static struct mem_section noinline __init_refok *sparse_index_alloc(int nid) |
28ae55c9 DH |
61 | { |
62 | struct mem_section *section = NULL; | |
63 | unsigned long array_size = SECTIONS_PER_ROOT * | |
64 | sizeof(struct mem_section); | |
65 | ||
f52407ce SL |
66 | if (slab_is_available()) { |
67 | if (node_state(nid, N_HIGH_MEMORY)) | |
5b760e64 | 68 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
f52407ce | 69 | else |
5b760e64 GS |
70 | section = kzalloc(array_size, GFP_KERNEL); |
71 | } else { | |
46a66eec | 72 | section = alloc_bootmem_node(NODE_DATA(nid), array_size); |
5b760e64 | 73 | } |
28ae55c9 DH |
74 | |
75 | return section; | |
3e347261 | 76 | } |
802f192e | 77 | |
a3142c8e | 78 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
802f192e | 79 | { |
34af946a | 80 | static DEFINE_SPINLOCK(index_init_lock); |
28ae55c9 DH |
81 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
82 | struct mem_section *section; | |
83 | int ret = 0; | |
802f192e BP |
84 | |
85 | if (mem_section[root]) | |
28ae55c9 | 86 | return -EEXIST; |
3e347261 | 87 | |
28ae55c9 | 88 | section = sparse_index_alloc(nid); |
af0cd5a7 WC |
89 | if (!section) |
90 | return -ENOMEM; | |
28ae55c9 DH |
91 | /* |
92 | * This lock keeps two different sections from | |
93 | * reallocating for the same index | |
94 | */ | |
95 | spin_lock(&index_init_lock); | |
3e347261 | 96 | |
28ae55c9 DH |
97 | if (mem_section[root]) { |
98 | ret = -EEXIST; | |
99 | goto out; | |
100 | } | |
101 | ||
102 | mem_section[root] = section; | |
103 | out: | |
104 | spin_unlock(&index_init_lock); | |
105 | return ret; | |
106 | } | |
107 | #else /* !SPARSEMEM_EXTREME */ | |
108 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
109 | { | |
110 | return 0; | |
802f192e | 111 | } |
28ae55c9 DH |
112 | #endif |
113 | ||
4ca644d9 DH |
114 | /* |
115 | * Although written for the SPARSEMEM_EXTREME case, this happens | |
cd881a6b | 116 | * to also work for the flat array case because |
4ca644d9 DH |
117 | * NR_SECTION_ROOTS==NR_MEM_SECTIONS. |
118 | */ | |
119 | int __section_nr(struct mem_section* ms) | |
120 | { | |
121 | unsigned long root_nr; | |
122 | struct mem_section* root; | |
123 | ||
12783b00 MK |
124 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
125 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | |
4ca644d9 DH |
126 | if (!root) |
127 | continue; | |
128 | ||
129 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
130 | break; | |
131 | } | |
132 | ||
133 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); | |
134 | } | |
135 | ||
30c253e6 AW |
136 | /* |
137 | * During early boot, before section_mem_map is used for an actual | |
138 | * mem_map, we use section_mem_map to store the section's NUMA | |
139 | * node. This keeps us from having to use another data structure. The | |
140 | * node information is cleared just before we store the real mem_map. | |
141 | */ | |
142 | static inline unsigned long sparse_encode_early_nid(int nid) | |
143 | { | |
144 | return (nid << SECTION_NID_SHIFT); | |
145 | } | |
146 | ||
147 | static inline int sparse_early_nid(struct mem_section *section) | |
148 | { | |
149 | return (section->section_mem_map >> SECTION_NID_SHIFT); | |
150 | } | |
151 | ||
2dbb51c4 MG |
152 | /* Validate the physical addressing limitations of the model */ |
153 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
154 | unsigned long *end_pfn) | |
d41dee36 | 155 | { |
2dbb51c4 | 156 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
d41dee36 | 157 | |
bead9a3a IM |
158 | /* |
159 | * Sanity checks - do not allow an architecture to pass | |
160 | * in larger pfns than the maximum scope of sparsemem: | |
161 | */ | |
2dbb51c4 MG |
162 | if (*start_pfn > max_sparsemem_pfn) { |
163 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
164 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
165 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
166 | WARN_ON_ONCE(1); | |
167 | *start_pfn = max_sparsemem_pfn; | |
168 | *end_pfn = max_sparsemem_pfn; | |
ef161a98 | 169 | } else if (*end_pfn > max_sparsemem_pfn) { |
2dbb51c4 MG |
170 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
171 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
172 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
173 | WARN_ON_ONCE(1); | |
174 | *end_pfn = max_sparsemem_pfn; | |
175 | } | |
176 | } | |
177 | ||
178 | /* Record a memory area against a node. */ | |
179 | void __init memory_present(int nid, unsigned long start, unsigned long end) | |
180 | { | |
181 | unsigned long pfn; | |
bead9a3a | 182 | |
d41dee36 | 183 | start &= PAGE_SECTION_MASK; |
2dbb51c4 | 184 | mminit_validate_memmodel_limits(&start, &end); |
d41dee36 AW |
185 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
186 | unsigned long section = pfn_to_section_nr(pfn); | |
802f192e BP |
187 | struct mem_section *ms; |
188 | ||
189 | sparse_index_init(section, nid); | |
85770ffe | 190 | set_section_nid(section, nid); |
802f192e BP |
191 | |
192 | ms = __nr_to_section(section); | |
193 | if (!ms->section_mem_map) | |
30c253e6 AW |
194 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
195 | SECTION_MARKED_PRESENT; | |
d41dee36 AW |
196 | } |
197 | } | |
198 | ||
199 | /* | |
200 | * Only used by the i386 NUMA architecures, but relatively | |
201 | * generic code. | |
202 | */ | |
203 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | |
204 | unsigned long end_pfn) | |
205 | { | |
206 | unsigned long pfn; | |
207 | unsigned long nr_pages = 0; | |
208 | ||
2dbb51c4 | 209 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); |
d41dee36 AW |
210 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
211 | if (nid != early_pfn_to_nid(pfn)) | |
212 | continue; | |
213 | ||
540557b9 | 214 | if (pfn_present(pfn)) |
d41dee36 AW |
215 | nr_pages += PAGES_PER_SECTION; |
216 | } | |
217 | ||
218 | return nr_pages * sizeof(struct page); | |
219 | } | |
220 | ||
29751f69 AW |
221 | /* |
222 | * Subtle, we encode the real pfn into the mem_map such that | |
223 | * the identity pfn - section_mem_map will return the actual | |
224 | * physical page frame number. | |
225 | */ | |
226 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
227 | { | |
228 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
229 | } | |
230 | ||
231 | /* | |
ea01ea93 | 232 | * Decode mem_map from the coded memmap |
29751f69 | 233 | */ |
29751f69 AW |
234 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
235 | { | |
ea01ea93 BP |
236 | /* mask off the extra low bits of information */ |
237 | coded_mem_map &= SECTION_MAP_MASK; | |
29751f69 AW |
238 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
239 | } | |
240 | ||
a3142c8e | 241 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
5c0e3066 MG |
242 | unsigned long pnum, struct page *mem_map, |
243 | unsigned long *pageblock_bitmap) | |
29751f69 | 244 | { |
540557b9 | 245 | if (!present_section(ms)) |
29751f69 AW |
246 | return -EINVAL; |
247 | ||
30c253e6 | 248 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
540557b9 AW |
249 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
250 | SECTION_HAS_MEM_MAP; | |
5c0e3066 | 251 | ms->pageblock_flags = pageblock_bitmap; |
29751f69 AW |
252 | |
253 | return 1; | |
254 | } | |
255 | ||
04753278 | 256 | unsigned long usemap_size(void) |
5c0e3066 MG |
257 | { |
258 | unsigned long size_bytes; | |
259 | size_bytes = roundup(SECTION_BLOCKFLAGS_BITS, 8) / 8; | |
260 | size_bytes = roundup(size_bytes, sizeof(unsigned long)); | |
261 | return size_bytes; | |
262 | } | |
263 | ||
264 | #ifdef CONFIG_MEMORY_HOTPLUG | |
265 | static unsigned long *__kmalloc_section_usemap(void) | |
266 | { | |
267 | return kmalloc(usemap_size(), GFP_KERNEL); | |
268 | } | |
269 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
270 | ||
48c90682 YG |
271 | #ifdef CONFIG_MEMORY_HOTREMOVE |
272 | static unsigned long * __init | |
a4322e1b | 273 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 274 | unsigned long size) |
48c90682 | 275 | { |
99ab7b19 YL |
276 | unsigned long goal, limit; |
277 | unsigned long *p; | |
278 | int nid; | |
48c90682 YG |
279 | /* |
280 | * A page may contain usemaps for other sections preventing the | |
281 | * page being freed and making a section unremovable while | |
282 | * other sections referencing the usemap retmain active. Similarly, | |
283 | * a pgdat can prevent a section being removed. If section A | |
284 | * contains a pgdat and section B contains the usemap, both | |
285 | * sections become inter-dependent. This allocates usemaps | |
286 | * from the same section as the pgdat where possible to avoid | |
287 | * this problem. | |
288 | */ | |
07b4e2bc | 289 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
99ab7b19 YL |
290 | limit = goal + (1UL << PA_SECTION_SHIFT); |
291 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | |
292 | again: | |
293 | p = ___alloc_bootmem_node_nopanic(NODE_DATA(nid), size, | |
294 | SMP_CACHE_BYTES, goal, limit); | |
295 | if (!p && limit) { | |
296 | limit = 0; | |
297 | goto again; | |
298 | } | |
299 | return p; | |
48c90682 YG |
300 | } |
301 | ||
302 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
303 | { | |
304 | unsigned long usemap_snr, pgdat_snr; | |
305 | static unsigned long old_usemap_snr = NR_MEM_SECTIONS; | |
306 | static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; | |
307 | struct pglist_data *pgdat = NODE_DATA(nid); | |
308 | int usemap_nid; | |
309 | ||
310 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | |
311 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | |
312 | if (usemap_snr == pgdat_snr) | |
313 | return; | |
314 | ||
315 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | |
316 | /* skip redundant message */ | |
317 | return; | |
318 | ||
319 | old_usemap_snr = usemap_snr; | |
320 | old_pgdat_snr = pgdat_snr; | |
321 | ||
322 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | |
323 | if (usemap_nid != nid) { | |
324 | printk(KERN_INFO | |
325 | "node %d must be removed before remove section %ld\n", | |
326 | nid, usemap_snr); | |
327 | return; | |
328 | } | |
329 | /* | |
330 | * There is a circular dependency. | |
331 | * Some platforms allow un-removable section because they will just | |
332 | * gather other removable sections for dynamic partitioning. | |
333 | * Just notify un-removable section's number here. | |
334 | */ | |
335 | printk(KERN_INFO "Section %ld and %ld (node %d)", usemap_snr, | |
336 | pgdat_snr, nid); | |
337 | printk(KERN_CONT | |
338 | " have a circular dependency on usemap and pgdat allocations\n"); | |
339 | } | |
340 | #else | |
341 | static unsigned long * __init | |
a4322e1b | 342 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 343 | unsigned long size) |
48c90682 | 344 | { |
238305bb | 345 | return alloc_bootmem_node_nopanic(pgdat, size); |
48c90682 YG |
346 | } |
347 | ||
348 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
349 | { | |
350 | } | |
351 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
352 | ||
a4322e1b YL |
353 | static void __init sparse_early_usemaps_alloc_node(unsigned long**usemap_map, |
354 | unsigned long pnum_begin, | |
355 | unsigned long pnum_end, | |
356 | unsigned long usemap_count, int nodeid) | |
5c0e3066 | 357 | { |
a4322e1b YL |
358 | void *usemap; |
359 | unsigned long pnum; | |
360 | int size = usemap_size(); | |
5c0e3066 | 361 | |
a4322e1b | 362 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
238305bb | 363 | size * usemap_count); |
f5bf18fa | 364 | if (!usemap) { |
238305bb JW |
365 | printk(KERN_WARNING "%s: allocation failed\n", __func__); |
366 | return; | |
48c90682 YG |
367 | } |
368 | ||
f5bf18fa NA |
369 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
370 | if (!present_section_nr(pnum)) | |
371 | continue; | |
372 | usemap_map[pnum] = usemap; | |
373 | usemap += size; | |
374 | check_usemap_section_nr(nodeid, usemap_map[pnum]); | |
a4322e1b | 375 | } |
5c0e3066 MG |
376 | } |
377 | ||
8f6aac41 | 378 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
98f3cfc1 | 379 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
29751f69 AW |
380 | { |
381 | struct page *map; | |
e48e67e0 | 382 | unsigned long size; |
29751f69 AW |
383 | |
384 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | |
385 | if (map) | |
386 | return map; | |
387 | ||
e48e67e0 YL |
388 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
389 | map = __alloc_bootmem_node_high(NODE_DATA(nid), size, | |
390 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | |
8f6aac41 CL |
391 | return map; |
392 | } | |
9bdac914 YL |
393 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
394 | unsigned long pnum_begin, | |
395 | unsigned long pnum_end, | |
396 | unsigned long map_count, int nodeid) | |
397 | { | |
398 | void *map; | |
399 | unsigned long pnum; | |
400 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | |
401 | ||
402 | map = alloc_remap(nodeid, size * map_count); | |
403 | if (map) { | |
404 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
405 | if (!present_section_nr(pnum)) | |
406 | continue; | |
407 | map_map[pnum] = map; | |
408 | map += size; | |
409 | } | |
410 | return; | |
411 | } | |
412 | ||
413 | size = PAGE_ALIGN(size); | |
e48e67e0 YL |
414 | map = __alloc_bootmem_node_high(NODE_DATA(nodeid), size * map_count, |
415 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS)); | |
9bdac914 YL |
416 | if (map) { |
417 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
418 | if (!present_section_nr(pnum)) | |
419 | continue; | |
420 | map_map[pnum] = map; | |
421 | map += size; | |
422 | } | |
423 | return; | |
424 | } | |
425 | ||
426 | /* fallback */ | |
427 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
428 | struct mem_section *ms; | |
429 | ||
430 | if (!present_section_nr(pnum)) | |
431 | continue; | |
432 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | |
433 | if (map_map[pnum]) | |
434 | continue; | |
435 | ms = __nr_to_section(pnum); | |
436 | printk(KERN_ERR "%s: sparsemem memory map backing failed " | |
437 | "some memory will not be available.\n", __func__); | |
438 | ms->section_mem_map = 0; | |
439 | } | |
440 | } | |
8f6aac41 CL |
441 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
442 | ||
81d0d950 | 443 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
9bdac914 YL |
444 | static void __init sparse_early_mem_maps_alloc_node(struct page **map_map, |
445 | unsigned long pnum_begin, | |
446 | unsigned long pnum_end, | |
447 | unsigned long map_count, int nodeid) | |
448 | { | |
449 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, | |
450 | map_count, nodeid); | |
451 | } | |
81d0d950 | 452 | #else |
9e5c6da7 | 453 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
8f6aac41 CL |
454 | { |
455 | struct page *map; | |
456 | struct mem_section *ms = __nr_to_section(pnum); | |
457 | int nid = sparse_early_nid(ms); | |
458 | ||
98f3cfc1 | 459 | map = sparse_mem_map_populate(pnum, nid); |
29751f69 AW |
460 | if (map) |
461 | return map; | |
462 | ||
8f6aac41 | 463 | printk(KERN_ERR "%s: sparsemem memory map backing failed " |
d40cee24 | 464 | "some memory will not be available.\n", __func__); |
802f192e | 465 | ms->section_mem_map = 0; |
29751f69 AW |
466 | return NULL; |
467 | } | |
9bdac914 | 468 | #endif |
29751f69 | 469 | |
c2b91e2e YL |
470 | void __attribute__((weak)) __meminit vmemmap_populate_print_last(void) |
471 | { | |
472 | } | |
a4322e1b | 473 | |
193faea9 SR |
474 | /* |
475 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
476 | * for each and record the physical to section mapping. | |
477 | */ | |
478 | void __init sparse_init(void) | |
479 | { | |
480 | unsigned long pnum; | |
481 | struct page *map; | |
5c0e3066 | 482 | unsigned long *usemap; |
e123dd3f | 483 | unsigned long **usemap_map; |
81d0d950 | 484 | int size; |
a4322e1b YL |
485 | int nodeid_begin = 0; |
486 | unsigned long pnum_begin = 0; | |
487 | unsigned long usemap_count; | |
81d0d950 | 488 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
9bdac914 | 489 | unsigned long map_count; |
81d0d950 YL |
490 | int size2; |
491 | struct page **map_map; | |
492 | #endif | |
e123dd3f | 493 | |
ca57df79 XQ |
494 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
495 | set_pageblock_order(); | |
496 | ||
e123dd3f YL |
497 | /* |
498 | * map is using big page (aka 2M in x86 64 bit) | |
499 | * usemap is less one page (aka 24 bytes) | |
500 | * so alloc 2M (with 2M align) and 24 bytes in turn will | |
501 | * make next 2M slip to one more 2M later. | |
502 | * then in big system, the memory will have a lot of holes... | |
25985edc | 503 | * here try to allocate 2M pages continuously. |
e123dd3f YL |
504 | * |
505 | * powerpc need to call sparse_init_one_section right after each | |
506 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. | |
507 | */ | |
508 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; | |
509 | usemap_map = alloc_bootmem(size); | |
510 | if (!usemap_map) | |
511 | panic("can not allocate usemap_map\n"); | |
193faea9 SR |
512 | |
513 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | |
a4322e1b YL |
514 | struct mem_section *ms; |
515 | ||
540557b9 | 516 | if (!present_section_nr(pnum)) |
193faea9 | 517 | continue; |
a4322e1b YL |
518 | ms = __nr_to_section(pnum); |
519 | nodeid_begin = sparse_early_nid(ms); | |
520 | pnum_begin = pnum; | |
521 | break; | |
522 | } | |
523 | usemap_count = 1; | |
524 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | |
525 | struct mem_section *ms; | |
526 | int nodeid; | |
527 | ||
528 | if (!present_section_nr(pnum)) | |
529 | continue; | |
530 | ms = __nr_to_section(pnum); | |
531 | nodeid = sparse_early_nid(ms); | |
532 | if (nodeid == nodeid_begin) { | |
533 | usemap_count++; | |
534 | continue; | |
535 | } | |
536 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | |
537 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, pnum, | |
538 | usemap_count, nodeid_begin); | |
539 | /* new start, update count etc*/ | |
540 | nodeid_begin = nodeid; | |
541 | pnum_begin = pnum; | |
542 | usemap_count = 1; | |
e123dd3f | 543 | } |
a4322e1b YL |
544 | /* ok, last chunk */ |
545 | sparse_early_usemaps_alloc_node(usemap_map, pnum_begin, NR_MEM_SECTIONS, | |
546 | usemap_count, nodeid_begin); | |
193faea9 | 547 | |
9bdac914 YL |
548 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
549 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | |
550 | map_map = alloc_bootmem(size2); | |
551 | if (!map_map) | |
552 | panic("can not allocate map_map\n"); | |
553 | ||
554 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { | |
555 | struct mem_section *ms; | |
556 | ||
557 | if (!present_section_nr(pnum)) | |
558 | continue; | |
559 | ms = __nr_to_section(pnum); | |
560 | nodeid_begin = sparse_early_nid(ms); | |
561 | pnum_begin = pnum; | |
562 | break; | |
563 | } | |
564 | map_count = 1; | |
565 | for (pnum = pnum_begin + 1; pnum < NR_MEM_SECTIONS; pnum++) { | |
566 | struct mem_section *ms; | |
567 | int nodeid; | |
568 | ||
569 | if (!present_section_nr(pnum)) | |
570 | continue; | |
571 | ms = __nr_to_section(pnum); | |
572 | nodeid = sparse_early_nid(ms); | |
573 | if (nodeid == nodeid_begin) { | |
574 | map_count++; | |
575 | continue; | |
576 | } | |
577 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | |
578 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, pnum, | |
579 | map_count, nodeid_begin); | |
580 | /* new start, update count etc*/ | |
581 | nodeid_begin = nodeid; | |
582 | pnum_begin = pnum; | |
583 | map_count = 1; | |
584 | } | |
585 | /* ok, last chunk */ | |
586 | sparse_early_mem_maps_alloc_node(map_map, pnum_begin, NR_MEM_SECTIONS, | |
587 | map_count, nodeid_begin); | |
588 | #endif | |
589 | ||
e123dd3f YL |
590 | for (pnum = 0; pnum < NR_MEM_SECTIONS; pnum++) { |
591 | if (!present_section_nr(pnum)) | |
193faea9 | 592 | continue; |
5c0e3066 | 593 | |
e123dd3f | 594 | usemap = usemap_map[pnum]; |
5c0e3066 MG |
595 | if (!usemap) |
596 | continue; | |
597 | ||
9bdac914 YL |
598 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
599 | map = map_map[pnum]; | |
600 | #else | |
e123dd3f | 601 | map = sparse_early_mem_map_alloc(pnum); |
9bdac914 | 602 | #endif |
e123dd3f YL |
603 | if (!map) |
604 | continue; | |
605 | ||
5c0e3066 MG |
606 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
607 | usemap); | |
193faea9 | 608 | } |
e123dd3f | 609 | |
c2b91e2e YL |
610 | vmemmap_populate_print_last(); |
611 | ||
9bdac914 YL |
612 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
613 | free_bootmem(__pa(map_map), size2); | |
614 | #endif | |
e123dd3f | 615 | free_bootmem(__pa(usemap_map), size); |
193faea9 SR |
616 | } |
617 | ||
618 | #ifdef CONFIG_MEMORY_HOTPLUG | |
98f3cfc1 YG |
619 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
620 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, | |
621 | unsigned long nr_pages) | |
622 | { | |
623 | /* This will make the necessary allocations eventually. */ | |
624 | return sparse_mem_map_populate(pnum, nid); | |
625 | } | |
626 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) | |
627 | { | |
628 | return; /* XXX: Not implemented yet */ | |
629 | } | |
0c0a4a51 YG |
630 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) |
631 | { | |
632 | } | |
98f3cfc1 | 633 | #else |
0b0acbec DH |
634 | static struct page *__kmalloc_section_memmap(unsigned long nr_pages) |
635 | { | |
636 | struct page *page, *ret; | |
637 | unsigned long memmap_size = sizeof(struct page) * nr_pages; | |
638 | ||
f2d0aa5b | 639 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
0b0acbec DH |
640 | if (page) |
641 | goto got_map_page; | |
642 | ||
643 | ret = vmalloc(memmap_size); | |
644 | if (ret) | |
645 | goto got_map_ptr; | |
646 | ||
647 | return NULL; | |
648 | got_map_page: | |
649 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | |
650 | got_map_ptr: | |
651 | memset(ret, 0, memmap_size); | |
652 | ||
653 | return ret; | |
654 | } | |
655 | ||
98f3cfc1 YG |
656 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid, |
657 | unsigned long nr_pages) | |
658 | { | |
659 | return __kmalloc_section_memmap(nr_pages); | |
660 | } | |
661 | ||
0b0acbec DH |
662 | static void __kfree_section_memmap(struct page *memmap, unsigned long nr_pages) |
663 | { | |
9e2779fa | 664 | if (is_vmalloc_addr(memmap)) |
0b0acbec DH |
665 | vfree(memmap); |
666 | else | |
667 | free_pages((unsigned long)memmap, | |
668 | get_order(sizeof(struct page) * nr_pages)); | |
669 | } | |
0c0a4a51 YG |
670 | |
671 | static void free_map_bootmem(struct page *page, unsigned long nr_pages) | |
672 | { | |
673 | unsigned long maps_section_nr, removing_section_nr, i; | |
5f24ce5f | 674 | unsigned long magic; |
0c0a4a51 YG |
675 | |
676 | for (i = 0; i < nr_pages; i++, page++) { | |
5f24ce5f | 677 | magic = (unsigned long) page->lru.next; |
0c0a4a51 YG |
678 | |
679 | BUG_ON(magic == NODE_INFO); | |
680 | ||
681 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | |
682 | removing_section_nr = page->private; | |
683 | ||
684 | /* | |
685 | * When this function is called, the removing section is | |
686 | * logical offlined state. This means all pages are isolated | |
687 | * from page allocator. If removing section's memmap is placed | |
688 | * on the same section, it must not be freed. | |
689 | * If it is freed, page allocator may allocate it which will | |
690 | * be removed physically soon. | |
691 | */ | |
692 | if (maps_section_nr != removing_section_nr) | |
693 | put_page_bootmem(page); | |
694 | } | |
695 | } | |
98f3cfc1 | 696 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
0b0acbec | 697 | |
ea01ea93 BP |
698 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) |
699 | { | |
0c0a4a51 YG |
700 | struct page *usemap_page; |
701 | unsigned long nr_pages; | |
702 | ||
ea01ea93 BP |
703 | if (!usemap) |
704 | return; | |
705 | ||
0c0a4a51 | 706 | usemap_page = virt_to_page(usemap); |
ea01ea93 BP |
707 | /* |
708 | * Check to see if allocation came from hot-plug-add | |
709 | */ | |
0c0a4a51 | 710 | if (PageSlab(usemap_page)) { |
ea01ea93 BP |
711 | kfree(usemap); |
712 | if (memmap) | |
713 | __kfree_section_memmap(memmap, PAGES_PER_SECTION); | |
714 | return; | |
715 | } | |
716 | ||
717 | /* | |
0c0a4a51 YG |
718 | * The usemap came from bootmem. This is packed with other usemaps |
719 | * on the section which has pgdat at boot time. Just keep it as is now. | |
ea01ea93 | 720 | */ |
0c0a4a51 YG |
721 | |
722 | if (memmap) { | |
723 | struct page *memmap_page; | |
724 | memmap_page = virt_to_page(memmap); | |
725 | ||
726 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) | |
727 | >> PAGE_SHIFT; | |
728 | ||
729 | free_map_bootmem(memmap_page, nr_pages); | |
730 | } | |
ea01ea93 BP |
731 | } |
732 | ||
29751f69 AW |
733 | /* |
734 | * returns the number of sections whose mem_maps were properly | |
735 | * set. If this is <=0, then that means that the passed-in | |
736 | * map was not consumed and must be freed. | |
737 | */ | |
31168481 | 738 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn, |
0b0acbec | 739 | int nr_pages) |
29751f69 | 740 | { |
0b0acbec DH |
741 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
742 | struct pglist_data *pgdat = zone->zone_pgdat; | |
743 | struct mem_section *ms; | |
744 | struct page *memmap; | |
5c0e3066 | 745 | unsigned long *usemap; |
0b0acbec DH |
746 | unsigned long flags; |
747 | int ret; | |
29751f69 | 748 | |
0b0acbec DH |
749 | /* |
750 | * no locking for this, because it does its own | |
751 | * plus, it does a kmalloc | |
752 | */ | |
bbd06825 WC |
753 | ret = sparse_index_init(section_nr, pgdat->node_id); |
754 | if (ret < 0 && ret != -EEXIST) | |
755 | return ret; | |
98f3cfc1 | 756 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id, nr_pages); |
bbd06825 WC |
757 | if (!memmap) |
758 | return -ENOMEM; | |
5c0e3066 | 759 | usemap = __kmalloc_section_usemap(); |
bbd06825 WC |
760 | if (!usemap) { |
761 | __kfree_section_memmap(memmap, nr_pages); | |
762 | return -ENOMEM; | |
763 | } | |
0b0acbec DH |
764 | |
765 | pgdat_resize_lock(pgdat, &flags); | |
29751f69 | 766 | |
0b0acbec DH |
767 | ms = __pfn_to_section(start_pfn); |
768 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | |
769 | ret = -EEXIST; | |
770 | goto out; | |
771 | } | |
5c0e3066 | 772 | |
29751f69 AW |
773 | ms->section_mem_map |= SECTION_MARKED_PRESENT; |
774 | ||
5c0e3066 | 775 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
0b0acbec | 776 | |
0b0acbec DH |
777 | out: |
778 | pgdat_resize_unlock(pgdat, &flags); | |
bbd06825 WC |
779 | if (ret <= 0) { |
780 | kfree(usemap); | |
46a66eec | 781 | __kfree_section_memmap(memmap, nr_pages); |
bbd06825 | 782 | } |
0b0acbec | 783 | return ret; |
29751f69 | 784 | } |
ea01ea93 BP |
785 | |
786 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms) | |
787 | { | |
788 | struct page *memmap = NULL; | |
789 | unsigned long *usemap = NULL; | |
790 | ||
791 | if (ms->section_mem_map) { | |
792 | usemap = ms->pageblock_flags; | |
793 | memmap = sparse_decode_mem_map(ms->section_mem_map, | |
794 | __section_nr(ms)); | |
795 | ms->section_mem_map = 0; | |
796 | ms->pageblock_flags = NULL; | |
797 | } | |
798 | ||
799 | free_section_usemap(memmap, usemap); | |
800 | } | |
a3142c8e | 801 | #endif |