Commit | Line | Data |
---|---|---|
d41dee36 AW |
1 | /* |
2 | * sparse memory mappings. | |
3 | */ | |
d41dee36 | 4 | #include <linux/mm.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
d41dee36 AW |
6 | #include <linux/mmzone.h> |
7 | #include <linux/bootmem.h> | |
3b32123d | 8 | #include <linux/compiler.h> |
0b0acbec | 9 | #include <linux/highmem.h> |
b95f1b31 | 10 | #include <linux/export.h> |
28ae55c9 | 11 | #include <linux/spinlock.h> |
0b0acbec | 12 | #include <linux/vmalloc.h> |
3b32123d | 13 | |
0c0a4a51 | 14 | #include "internal.h" |
d41dee36 | 15 | #include <asm/dma.h> |
8f6aac41 CL |
16 | #include <asm/pgalloc.h> |
17 | #include <asm/pgtable.h> | |
d41dee36 AW |
18 | |
19 | /* | |
20 | * Permanent SPARSEMEM data: | |
21 | * | |
22 | * 1) mem_section - memory sections, mem_map's for valid memory | |
23 | */ | |
3e347261 | 24 | #ifdef CONFIG_SPARSEMEM_EXTREME |
802f192e | 25 | struct mem_section *mem_section[NR_SECTION_ROOTS] |
22fc6ecc | 26 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
27 | #else |
28 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
22fc6ecc | 29 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
30 | #endif |
31 | EXPORT_SYMBOL(mem_section); | |
32 | ||
89689ae7 CL |
33 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
34 | /* | |
35 | * If we did not store the node number in the page then we have to | |
36 | * do a lookup in the section_to_node_table in order to find which | |
37 | * node the page belongs to. | |
38 | */ | |
39 | #if MAX_NUMNODES <= 256 | |
40 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
41 | #else | |
42 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
43 | #endif | |
44 | ||
33dd4e0e | 45 | int page_to_nid(const struct page *page) |
89689ae7 CL |
46 | { |
47 | return section_to_node_table[page_to_section(page)]; | |
48 | } | |
49 | EXPORT_SYMBOL(page_to_nid); | |
85770ffe AW |
50 | |
51 | static void set_section_nid(unsigned long section_nr, int nid) | |
52 | { | |
53 | section_to_node_table[section_nr] = nid; | |
54 | } | |
55 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | |
56 | static inline void set_section_nid(unsigned long section_nr, int nid) | |
57 | { | |
58 | } | |
89689ae7 CL |
59 | #endif |
60 | ||
3e347261 | 61 | #ifdef CONFIG_SPARSEMEM_EXTREME |
bd721ea7 | 62 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
28ae55c9 DH |
63 | { |
64 | struct mem_section *section = NULL; | |
65 | unsigned long array_size = SECTIONS_PER_ROOT * | |
66 | sizeof(struct mem_section); | |
67 | ||
f52407ce SL |
68 | if (slab_is_available()) { |
69 | if (node_state(nid, N_HIGH_MEMORY)) | |
5b760e64 | 70 | section = kzalloc_node(array_size, GFP_KERNEL, nid); |
f52407ce | 71 | else |
5b760e64 GS |
72 | section = kzalloc(array_size, GFP_KERNEL); |
73 | } else { | |
bb016b84 | 74 | section = memblock_virt_alloc_node(array_size, nid); |
5b760e64 | 75 | } |
28ae55c9 DH |
76 | |
77 | return section; | |
3e347261 | 78 | } |
802f192e | 79 | |
a3142c8e | 80 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
802f192e | 81 | { |
28ae55c9 DH |
82 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
83 | struct mem_section *section; | |
802f192e BP |
84 | |
85 | if (mem_section[root]) | |
28ae55c9 | 86 | return -EEXIST; |
3e347261 | 87 | |
28ae55c9 | 88 | section = sparse_index_alloc(nid); |
af0cd5a7 WC |
89 | if (!section) |
90 | return -ENOMEM; | |
28ae55c9 DH |
91 | |
92 | mem_section[root] = section; | |
c1c95183 | 93 | |
9d1936cf | 94 | return 0; |
28ae55c9 DH |
95 | } |
96 | #else /* !SPARSEMEM_EXTREME */ | |
97 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
98 | { | |
99 | return 0; | |
802f192e | 100 | } |
28ae55c9 DH |
101 | #endif |
102 | ||
91fd8b95 | 103 | #ifdef CONFIG_SPARSEMEM_EXTREME |
4ca644d9 DH |
104 | int __section_nr(struct mem_section* ms) |
105 | { | |
106 | unsigned long root_nr; | |
107 | struct mem_section* root; | |
108 | ||
12783b00 MK |
109 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
110 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | |
4ca644d9 DH |
111 | if (!root) |
112 | continue; | |
113 | ||
114 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
115 | break; | |
116 | } | |
117 | ||
db36a461 GS |
118 | VM_BUG_ON(root_nr == NR_SECTION_ROOTS); |
119 | ||
4ca644d9 DH |
120 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
121 | } | |
91fd8b95 ZC |
122 | #else |
123 | int __section_nr(struct mem_section* ms) | |
124 | { | |
125 | return (int)(ms - mem_section[0]); | |
126 | } | |
127 | #endif | |
4ca644d9 | 128 | |
30c253e6 AW |
129 | /* |
130 | * During early boot, before section_mem_map is used for an actual | |
131 | * mem_map, we use section_mem_map to store the section's NUMA | |
132 | * node. This keeps us from having to use another data structure. The | |
133 | * node information is cleared just before we store the real mem_map. | |
134 | */ | |
135 | static inline unsigned long sparse_encode_early_nid(int nid) | |
136 | { | |
137 | return (nid << SECTION_NID_SHIFT); | |
138 | } | |
139 | ||
140 | static inline int sparse_early_nid(struct mem_section *section) | |
141 | { | |
142 | return (section->section_mem_map >> SECTION_NID_SHIFT); | |
143 | } | |
144 | ||
2dbb51c4 MG |
145 | /* Validate the physical addressing limitations of the model */ |
146 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
147 | unsigned long *end_pfn) | |
d41dee36 | 148 | { |
2dbb51c4 | 149 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
d41dee36 | 150 | |
bead9a3a IM |
151 | /* |
152 | * Sanity checks - do not allow an architecture to pass | |
153 | * in larger pfns than the maximum scope of sparsemem: | |
154 | */ | |
2dbb51c4 MG |
155 | if (*start_pfn > max_sparsemem_pfn) { |
156 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
157 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
158 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
159 | WARN_ON_ONCE(1); | |
160 | *start_pfn = max_sparsemem_pfn; | |
161 | *end_pfn = max_sparsemem_pfn; | |
ef161a98 | 162 | } else if (*end_pfn > max_sparsemem_pfn) { |
2dbb51c4 MG |
163 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
164 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
165 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
166 | WARN_ON_ONCE(1); | |
167 | *end_pfn = max_sparsemem_pfn; | |
168 | } | |
169 | } | |
170 | ||
c4e1be9e DH |
171 | /* |
172 | * There are a number of times that we loop over NR_MEM_SECTIONS, | |
173 | * looking for section_present() on each. But, when we have very | |
174 | * large physical address spaces, NR_MEM_SECTIONS can also be | |
175 | * very large which makes the loops quite long. | |
176 | * | |
177 | * Keeping track of this gives us an easy way to break out of | |
178 | * those loops early. | |
179 | */ | |
180 | int __highest_present_section_nr; | |
181 | static void section_mark_present(struct mem_section *ms) | |
182 | { | |
183 | int section_nr = __section_nr(ms); | |
184 | ||
185 | if (section_nr > __highest_present_section_nr) | |
186 | __highest_present_section_nr = section_nr; | |
187 | ||
188 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | |
189 | } | |
190 | ||
191 | static inline int next_present_section_nr(int section_nr) | |
192 | { | |
193 | do { | |
194 | section_nr++; | |
195 | if (present_section_nr(section_nr)) | |
196 | return section_nr; | |
197 | } while ((section_nr < NR_MEM_SECTIONS) && | |
198 | (section_nr <= __highest_present_section_nr)); | |
199 | ||
200 | return -1; | |
201 | } | |
202 | #define for_each_present_section_nr(start, section_nr) \ | |
203 | for (section_nr = next_present_section_nr(start-1); \ | |
204 | ((section_nr >= 0) && \ | |
205 | (section_nr < NR_MEM_SECTIONS) && \ | |
206 | (section_nr <= __highest_present_section_nr)); \ | |
207 | section_nr = next_present_section_nr(section_nr)) | |
208 | ||
2dbb51c4 MG |
209 | /* Record a memory area against a node. */ |
210 | void __init memory_present(int nid, unsigned long start, unsigned long end) | |
211 | { | |
212 | unsigned long pfn; | |
bead9a3a | 213 | |
d41dee36 | 214 | start &= PAGE_SECTION_MASK; |
2dbb51c4 | 215 | mminit_validate_memmodel_limits(&start, &end); |
d41dee36 AW |
216 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
217 | unsigned long section = pfn_to_section_nr(pfn); | |
802f192e BP |
218 | struct mem_section *ms; |
219 | ||
220 | sparse_index_init(section, nid); | |
85770ffe | 221 | set_section_nid(section, nid); |
802f192e BP |
222 | |
223 | ms = __nr_to_section(section); | |
c4e1be9e DH |
224 | if (!ms->section_mem_map) { |
225 | ms->section_mem_map = sparse_encode_early_nid(nid); | |
226 | section_mark_present(ms); | |
227 | } | |
d41dee36 AW |
228 | } |
229 | } | |
230 | ||
231 | /* | |
232 | * Only used by the i386 NUMA architecures, but relatively | |
233 | * generic code. | |
234 | */ | |
235 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | |
236 | unsigned long end_pfn) | |
237 | { | |
238 | unsigned long pfn; | |
239 | unsigned long nr_pages = 0; | |
240 | ||
2dbb51c4 | 241 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); |
d41dee36 AW |
242 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
243 | if (nid != early_pfn_to_nid(pfn)) | |
244 | continue; | |
245 | ||
540557b9 | 246 | if (pfn_present(pfn)) |
d41dee36 AW |
247 | nr_pages += PAGES_PER_SECTION; |
248 | } | |
249 | ||
250 | return nr_pages * sizeof(struct page); | |
251 | } | |
252 | ||
29751f69 AW |
253 | /* |
254 | * Subtle, we encode the real pfn into the mem_map such that | |
255 | * the identity pfn - section_mem_map will return the actual | |
256 | * physical page frame number. | |
257 | */ | |
258 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
259 | { | |
260 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
261 | } | |
262 | ||
263 | /* | |
ea01ea93 | 264 | * Decode mem_map from the coded memmap |
29751f69 | 265 | */ |
29751f69 AW |
266 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
267 | { | |
ea01ea93 BP |
268 | /* mask off the extra low bits of information */ |
269 | coded_mem_map &= SECTION_MAP_MASK; | |
29751f69 AW |
270 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
271 | } | |
272 | ||
a3142c8e | 273 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
5c0e3066 MG |
274 | unsigned long pnum, struct page *mem_map, |
275 | unsigned long *pageblock_bitmap) | |
29751f69 | 276 | { |
540557b9 | 277 | if (!present_section(ms)) |
29751f69 AW |
278 | return -EINVAL; |
279 | ||
30c253e6 | 280 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
540557b9 AW |
281 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
282 | SECTION_HAS_MEM_MAP; | |
5c0e3066 | 283 | ms->pageblock_flags = pageblock_bitmap; |
29751f69 AW |
284 | |
285 | return 1; | |
286 | } | |
287 | ||
04753278 | 288 | unsigned long usemap_size(void) |
5c0e3066 | 289 | { |
60a7a88d | 290 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
5c0e3066 MG |
291 | } |
292 | ||
293 | #ifdef CONFIG_MEMORY_HOTPLUG | |
294 | static unsigned long *__kmalloc_section_usemap(void) | |
295 | { | |
296 | return kmalloc(usemap_size(), GFP_KERNEL); | |
297 | } | |
298 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
299 | ||
48c90682 YG |
300 | #ifdef CONFIG_MEMORY_HOTREMOVE |
301 | static unsigned long * __init | |
a4322e1b | 302 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 303 | unsigned long size) |
48c90682 | 304 | { |
99ab7b19 YL |
305 | unsigned long goal, limit; |
306 | unsigned long *p; | |
307 | int nid; | |
48c90682 YG |
308 | /* |
309 | * A page may contain usemaps for other sections preventing the | |
310 | * page being freed and making a section unremovable while | |
c800bcd5 | 311 | * other sections referencing the usemap remain active. Similarly, |
48c90682 YG |
312 | * a pgdat can prevent a section being removed. If section A |
313 | * contains a pgdat and section B contains the usemap, both | |
314 | * sections become inter-dependent. This allocates usemaps | |
315 | * from the same section as the pgdat where possible to avoid | |
316 | * this problem. | |
317 | */ | |
07b4e2bc | 318 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
99ab7b19 YL |
319 | limit = goal + (1UL << PA_SECTION_SHIFT); |
320 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | |
321 | again: | |
bb016b84 SS |
322 | p = memblock_virt_alloc_try_nid_nopanic(size, |
323 | SMP_CACHE_BYTES, goal, limit, | |
324 | nid); | |
99ab7b19 YL |
325 | if (!p && limit) { |
326 | limit = 0; | |
327 | goto again; | |
328 | } | |
329 | return p; | |
48c90682 YG |
330 | } |
331 | ||
332 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
333 | { | |
334 | unsigned long usemap_snr, pgdat_snr; | |
335 | static unsigned long old_usemap_snr = NR_MEM_SECTIONS; | |
336 | static unsigned long old_pgdat_snr = NR_MEM_SECTIONS; | |
337 | struct pglist_data *pgdat = NODE_DATA(nid); | |
338 | int usemap_nid; | |
339 | ||
340 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); | |
341 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | |
342 | if (usemap_snr == pgdat_snr) | |
343 | return; | |
344 | ||
345 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | |
346 | /* skip redundant message */ | |
347 | return; | |
348 | ||
349 | old_usemap_snr = usemap_snr; | |
350 | old_pgdat_snr = pgdat_snr; | |
351 | ||
352 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | |
353 | if (usemap_nid != nid) { | |
1170532b JP |
354 | pr_info("node %d must be removed before remove section %ld\n", |
355 | nid, usemap_snr); | |
48c90682 YG |
356 | return; |
357 | } | |
358 | /* | |
359 | * There is a circular dependency. | |
360 | * Some platforms allow un-removable section because they will just | |
361 | * gather other removable sections for dynamic partitioning. | |
362 | * Just notify un-removable section's number here. | |
363 | */ | |
1170532b JP |
364 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
365 | usemap_snr, pgdat_snr, nid); | |
48c90682 YG |
366 | } |
367 | #else | |
368 | static unsigned long * __init | |
a4322e1b | 369 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 370 | unsigned long size) |
48c90682 | 371 | { |
bb016b84 | 372 | return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); |
48c90682 YG |
373 | } |
374 | ||
375 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
376 | { | |
377 | } | |
378 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
379 | ||
18732093 | 380 | static void __init sparse_early_usemaps_alloc_node(void *data, |
a4322e1b YL |
381 | unsigned long pnum_begin, |
382 | unsigned long pnum_end, | |
383 | unsigned long usemap_count, int nodeid) | |
5c0e3066 | 384 | { |
a4322e1b YL |
385 | void *usemap; |
386 | unsigned long pnum; | |
18732093 | 387 | unsigned long **usemap_map = (unsigned long **)data; |
a4322e1b | 388 | int size = usemap_size(); |
5c0e3066 | 389 | |
a4322e1b | 390 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
238305bb | 391 | size * usemap_count); |
f5bf18fa | 392 | if (!usemap) { |
1170532b | 393 | pr_warn("%s: allocation failed\n", __func__); |
238305bb | 394 | return; |
48c90682 YG |
395 | } |
396 | ||
f5bf18fa NA |
397 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
398 | if (!present_section_nr(pnum)) | |
399 | continue; | |
400 | usemap_map[pnum] = usemap; | |
401 | usemap += size; | |
402 | check_usemap_section_nr(nodeid, usemap_map[pnum]); | |
a4322e1b | 403 | } |
5c0e3066 MG |
404 | } |
405 | ||
8f6aac41 | 406 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
98f3cfc1 | 407 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
29751f69 AW |
408 | { |
409 | struct page *map; | |
e48e67e0 | 410 | unsigned long size; |
29751f69 AW |
411 | |
412 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | |
413 | if (map) | |
414 | return map; | |
415 | ||
e48e67e0 | 416 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
bb016b84 SS |
417 | map = memblock_virt_alloc_try_nid(size, |
418 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), | |
419 | BOOTMEM_ALLOC_ACCESSIBLE, nid); | |
8f6aac41 CL |
420 | return map; |
421 | } | |
9bdac914 YL |
422 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
423 | unsigned long pnum_begin, | |
424 | unsigned long pnum_end, | |
425 | unsigned long map_count, int nodeid) | |
426 | { | |
427 | void *map; | |
428 | unsigned long pnum; | |
429 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | |
430 | ||
431 | map = alloc_remap(nodeid, size * map_count); | |
432 | if (map) { | |
433 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
434 | if (!present_section_nr(pnum)) | |
435 | continue; | |
436 | map_map[pnum] = map; | |
437 | map += size; | |
438 | } | |
439 | return; | |
440 | } | |
441 | ||
442 | size = PAGE_ALIGN(size); | |
bb016b84 SS |
443 | map = memblock_virt_alloc_try_nid(size * map_count, |
444 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), | |
445 | BOOTMEM_ALLOC_ACCESSIBLE, nodeid); | |
9bdac914 YL |
446 | if (map) { |
447 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
448 | if (!present_section_nr(pnum)) | |
449 | continue; | |
450 | map_map[pnum] = map; | |
451 | map += size; | |
452 | } | |
453 | return; | |
454 | } | |
455 | ||
456 | /* fallback */ | |
457 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
458 | struct mem_section *ms; | |
459 | ||
460 | if (!present_section_nr(pnum)) | |
461 | continue; | |
462 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | |
463 | if (map_map[pnum]) | |
464 | continue; | |
465 | ms = __nr_to_section(pnum); | |
1170532b | 466 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
756a025f | 467 | __func__); |
9bdac914 YL |
468 | ms->section_mem_map = 0; |
469 | } | |
470 | } | |
8f6aac41 CL |
471 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
472 | ||
81d0d950 | 473 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
18732093 | 474 | static void __init sparse_early_mem_maps_alloc_node(void *data, |
9bdac914 YL |
475 | unsigned long pnum_begin, |
476 | unsigned long pnum_end, | |
477 | unsigned long map_count, int nodeid) | |
478 | { | |
18732093 | 479 | struct page **map_map = (struct page **)data; |
9bdac914 YL |
480 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, |
481 | map_count, nodeid); | |
482 | } | |
81d0d950 | 483 | #else |
9e5c6da7 | 484 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
8f6aac41 CL |
485 | { |
486 | struct page *map; | |
487 | struct mem_section *ms = __nr_to_section(pnum); | |
488 | int nid = sparse_early_nid(ms); | |
489 | ||
98f3cfc1 | 490 | map = sparse_mem_map_populate(pnum, nid); |
29751f69 AW |
491 | if (map) |
492 | return map; | |
493 | ||
1170532b | 494 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
756a025f | 495 | __func__); |
802f192e | 496 | ms->section_mem_map = 0; |
29751f69 AW |
497 | return NULL; |
498 | } | |
9bdac914 | 499 | #endif |
29751f69 | 500 | |
3b32123d | 501 | void __weak __meminit vmemmap_populate_print_last(void) |
c2b91e2e YL |
502 | { |
503 | } | |
a4322e1b | 504 | |
18732093 WL |
505 | /** |
506 | * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap | |
507 | * @map: usemap_map for pageblock flags or mmap_map for vmemmap | |
508 | */ | |
509 | static void __init alloc_usemap_and_memmap(void (*alloc_func) | |
510 | (void *, unsigned long, unsigned long, | |
511 | unsigned long, int), void *data) | |
512 | { | |
513 | unsigned long pnum; | |
514 | unsigned long map_count; | |
515 | int nodeid_begin = 0; | |
516 | unsigned long pnum_begin = 0; | |
517 | ||
c4e1be9e | 518 | for_each_present_section_nr(0, pnum) { |
18732093 WL |
519 | struct mem_section *ms; |
520 | ||
18732093 WL |
521 | ms = __nr_to_section(pnum); |
522 | nodeid_begin = sparse_early_nid(ms); | |
523 | pnum_begin = pnum; | |
524 | break; | |
525 | } | |
526 | map_count = 1; | |
c4e1be9e | 527 | for_each_present_section_nr(pnum_begin + 1, pnum) { |
18732093 WL |
528 | struct mem_section *ms; |
529 | int nodeid; | |
530 | ||
18732093 WL |
531 | ms = __nr_to_section(pnum); |
532 | nodeid = sparse_early_nid(ms); | |
533 | if (nodeid == nodeid_begin) { | |
534 | map_count++; | |
535 | continue; | |
536 | } | |
537 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | |
538 | alloc_func(data, pnum_begin, pnum, | |
539 | map_count, nodeid_begin); | |
540 | /* new start, update count etc*/ | |
541 | nodeid_begin = nodeid; | |
542 | pnum_begin = pnum; | |
543 | map_count = 1; | |
544 | } | |
545 | /* ok, last chunk */ | |
546 | alloc_func(data, pnum_begin, NR_MEM_SECTIONS, | |
547 | map_count, nodeid_begin); | |
548 | } | |
549 | ||
193faea9 SR |
550 | /* |
551 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
552 | * for each and record the physical to section mapping. | |
553 | */ | |
554 | void __init sparse_init(void) | |
555 | { | |
556 | unsigned long pnum; | |
557 | struct page *map; | |
5c0e3066 | 558 | unsigned long *usemap; |
e123dd3f | 559 | unsigned long **usemap_map; |
81d0d950 | 560 | int size; |
81d0d950 | 561 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
81d0d950 YL |
562 | int size2; |
563 | struct page **map_map; | |
564 | #endif | |
e123dd3f | 565 | |
55878e88 CS |
566 | /* see include/linux/mmzone.h 'struct mem_section' definition */ |
567 | BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); | |
568 | ||
ca57df79 XQ |
569 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
570 | set_pageblock_order(); | |
571 | ||
e123dd3f YL |
572 | /* |
573 | * map is using big page (aka 2M in x86 64 bit) | |
574 | * usemap is less one page (aka 24 bytes) | |
575 | * so alloc 2M (with 2M align) and 24 bytes in turn will | |
576 | * make next 2M slip to one more 2M later. | |
577 | * then in big system, the memory will have a lot of holes... | |
25985edc | 578 | * here try to allocate 2M pages continuously. |
e123dd3f YL |
579 | * |
580 | * powerpc need to call sparse_init_one_section right after each | |
581 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. | |
582 | */ | |
583 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; | |
bb016b84 | 584 | usemap_map = memblock_virt_alloc(size, 0); |
e123dd3f YL |
585 | if (!usemap_map) |
586 | panic("can not allocate usemap_map\n"); | |
18732093 WL |
587 | alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, |
588 | (void *)usemap_map); | |
193faea9 | 589 | |
9bdac914 YL |
590 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
591 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | |
bb016b84 | 592 | map_map = memblock_virt_alloc(size2, 0); |
9bdac914 YL |
593 | if (!map_map) |
594 | panic("can not allocate map_map\n"); | |
18732093 WL |
595 | alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, |
596 | (void *)map_map); | |
9bdac914 YL |
597 | #endif |
598 | ||
c4e1be9e | 599 | for_each_present_section_nr(0, pnum) { |
e123dd3f | 600 | usemap = usemap_map[pnum]; |
5c0e3066 MG |
601 | if (!usemap) |
602 | continue; | |
603 | ||
9bdac914 YL |
604 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
605 | map = map_map[pnum]; | |
606 | #else | |
e123dd3f | 607 | map = sparse_early_mem_map_alloc(pnum); |
9bdac914 | 608 | #endif |
e123dd3f YL |
609 | if (!map) |
610 | continue; | |
611 | ||
5c0e3066 MG |
612 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
613 | usemap); | |
193faea9 | 614 | } |
e123dd3f | 615 | |
c2b91e2e YL |
616 | vmemmap_populate_print_last(); |
617 | ||
9bdac914 | 618 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
bb016b84 | 619 | memblock_free_early(__pa(map_map), size2); |
9bdac914 | 620 | #endif |
bb016b84 | 621 | memblock_free_early(__pa(usemap_map), size); |
193faea9 SR |
622 | } |
623 | ||
624 | #ifdef CONFIG_MEMORY_HOTPLUG | |
98f3cfc1 | 625 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
85b35fea | 626 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) |
98f3cfc1 YG |
627 | { |
628 | /* This will make the necessary allocations eventually. */ | |
629 | return sparse_mem_map_populate(pnum, nid); | |
630 | } | |
85b35fea | 631 | static void __kfree_section_memmap(struct page *memmap) |
98f3cfc1 | 632 | { |
0aad818b | 633 | unsigned long start = (unsigned long)memmap; |
85b35fea | 634 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b JW |
635 | |
636 | vmemmap_free(start, end); | |
98f3cfc1 | 637 | } |
4edd7cef | 638 | #ifdef CONFIG_MEMORY_HOTREMOVE |
81556b02 | 639 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 | 640 | { |
0aad818b | 641 | unsigned long start = (unsigned long)memmap; |
81556b02 | 642 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b JW |
643 | |
644 | vmemmap_free(start, end); | |
0c0a4a51 | 645 | } |
4edd7cef | 646 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
98f3cfc1 | 647 | #else |
85b35fea | 648 | static struct page *__kmalloc_section_memmap(void) |
0b0acbec DH |
649 | { |
650 | struct page *page, *ret; | |
85b35fea | 651 | unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; |
0b0acbec | 652 | |
f2d0aa5b | 653 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
0b0acbec DH |
654 | if (page) |
655 | goto got_map_page; | |
656 | ||
657 | ret = vmalloc(memmap_size); | |
658 | if (ret) | |
659 | goto got_map_ptr; | |
660 | ||
661 | return NULL; | |
662 | got_map_page: | |
663 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | |
664 | got_map_ptr: | |
0b0acbec DH |
665 | |
666 | return ret; | |
667 | } | |
668 | ||
85b35fea | 669 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) |
98f3cfc1 | 670 | { |
85b35fea | 671 | return __kmalloc_section_memmap(); |
98f3cfc1 YG |
672 | } |
673 | ||
85b35fea | 674 | static void __kfree_section_memmap(struct page *memmap) |
0b0acbec | 675 | { |
9e2779fa | 676 | if (is_vmalloc_addr(memmap)) |
0b0acbec DH |
677 | vfree(memmap); |
678 | else | |
679 | free_pages((unsigned long)memmap, | |
85b35fea | 680 | get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
0b0acbec | 681 | } |
0c0a4a51 | 682 | |
4edd7cef | 683 | #ifdef CONFIG_MEMORY_HOTREMOVE |
81556b02 | 684 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 YG |
685 | { |
686 | unsigned long maps_section_nr, removing_section_nr, i; | |
81556b02 | 687 | unsigned long magic, nr_pages; |
ae64ffca | 688 | struct page *page = virt_to_page(memmap); |
0c0a4a51 | 689 | |
81556b02 ZY |
690 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
691 | >> PAGE_SHIFT; | |
692 | ||
0c0a4a51 | 693 | for (i = 0; i < nr_pages; i++, page++) { |
ddffe98d | 694 | magic = (unsigned long) page->freelist; |
0c0a4a51 YG |
695 | |
696 | BUG_ON(magic == NODE_INFO); | |
697 | ||
698 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | |
857e522a | 699 | removing_section_nr = page_private(page); |
0c0a4a51 YG |
700 | |
701 | /* | |
702 | * When this function is called, the removing section is | |
703 | * logical offlined state. This means all pages are isolated | |
704 | * from page allocator. If removing section's memmap is placed | |
705 | * on the same section, it must not be freed. | |
706 | * If it is freed, page allocator may allocate it which will | |
707 | * be removed physically soon. | |
708 | */ | |
709 | if (maps_section_nr != removing_section_nr) | |
710 | put_page_bootmem(page); | |
711 | } | |
712 | } | |
4edd7cef | 713 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
98f3cfc1 | 714 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
0b0acbec | 715 | |
29751f69 AW |
716 | /* |
717 | * returns the number of sections whose mem_maps were properly | |
718 | * set. If this is <=0, then that means that the passed-in | |
719 | * map was not consumed and must be freed. | |
720 | */ | |
85b35fea | 721 | int __meminit sparse_add_one_section(struct zone *zone, unsigned long start_pfn) |
29751f69 | 722 | { |
0b0acbec DH |
723 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
724 | struct pglist_data *pgdat = zone->zone_pgdat; | |
725 | struct mem_section *ms; | |
726 | struct page *memmap; | |
5c0e3066 | 727 | unsigned long *usemap; |
0b0acbec DH |
728 | unsigned long flags; |
729 | int ret; | |
29751f69 | 730 | |
0b0acbec DH |
731 | /* |
732 | * no locking for this, because it does its own | |
733 | * plus, it does a kmalloc | |
734 | */ | |
bbd06825 WC |
735 | ret = sparse_index_init(section_nr, pgdat->node_id); |
736 | if (ret < 0 && ret != -EEXIST) | |
737 | return ret; | |
85b35fea | 738 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); |
bbd06825 WC |
739 | if (!memmap) |
740 | return -ENOMEM; | |
5c0e3066 | 741 | usemap = __kmalloc_section_usemap(); |
bbd06825 | 742 | if (!usemap) { |
85b35fea | 743 | __kfree_section_memmap(memmap); |
bbd06825 WC |
744 | return -ENOMEM; |
745 | } | |
0b0acbec DH |
746 | |
747 | pgdat_resize_lock(pgdat, &flags); | |
29751f69 | 748 | |
0b0acbec DH |
749 | ms = __pfn_to_section(start_pfn); |
750 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | |
751 | ret = -EEXIST; | |
752 | goto out; | |
753 | } | |
5c0e3066 | 754 | |
85b35fea | 755 | memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); |
3ac19f8e | 756 | |
c4e1be9e | 757 | section_mark_present(ms); |
29751f69 | 758 | |
5c0e3066 | 759 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
0b0acbec | 760 | |
0b0acbec DH |
761 | out: |
762 | pgdat_resize_unlock(pgdat, &flags); | |
bbd06825 WC |
763 | if (ret <= 0) { |
764 | kfree(usemap); | |
85b35fea | 765 | __kfree_section_memmap(memmap); |
bbd06825 | 766 | } |
0b0acbec | 767 | return ret; |
29751f69 | 768 | } |
ea01ea93 | 769 | |
f3deb687 | 770 | #ifdef CONFIG_MEMORY_HOTREMOVE |
95a4774d WC |
771 | #ifdef CONFIG_MEMORY_FAILURE |
772 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
773 | { | |
774 | int i; | |
775 | ||
776 | if (!memmap) | |
777 | return; | |
778 | ||
4b94ffdc | 779 | for (i = 0; i < nr_pages; i++) { |
95a4774d | 780 | if (PageHWPoison(&memmap[i])) { |
293c07e3 | 781 | atomic_long_sub(1, &num_poisoned_pages); |
95a4774d WC |
782 | ClearPageHWPoison(&memmap[i]); |
783 | } | |
784 | } | |
785 | } | |
786 | #else | |
787 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
788 | { | |
789 | } | |
790 | #endif | |
791 | ||
4edd7cef DR |
792 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) |
793 | { | |
794 | struct page *usemap_page; | |
4edd7cef DR |
795 | |
796 | if (!usemap) | |
797 | return; | |
798 | ||
799 | usemap_page = virt_to_page(usemap); | |
800 | /* | |
801 | * Check to see if allocation came from hot-plug-add | |
802 | */ | |
803 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { | |
804 | kfree(usemap); | |
805 | if (memmap) | |
85b35fea | 806 | __kfree_section_memmap(memmap); |
4edd7cef DR |
807 | return; |
808 | } | |
809 | ||
810 | /* | |
811 | * The usemap came from bootmem. This is packed with other usemaps | |
812 | * on the section which has pgdat at boot time. Just keep it as is now. | |
813 | */ | |
814 | ||
81556b02 ZY |
815 | if (memmap) |
816 | free_map_bootmem(memmap); | |
4edd7cef DR |
817 | } |
818 | ||
4b94ffdc DW |
819 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
820 | unsigned long map_offset) | |
ea01ea93 BP |
821 | { |
822 | struct page *memmap = NULL; | |
cd099682 TC |
823 | unsigned long *usemap = NULL, flags; |
824 | struct pglist_data *pgdat = zone->zone_pgdat; | |
ea01ea93 | 825 | |
cd099682 | 826 | pgdat_resize_lock(pgdat, &flags); |
ea01ea93 BP |
827 | if (ms->section_mem_map) { |
828 | usemap = ms->pageblock_flags; | |
829 | memmap = sparse_decode_mem_map(ms->section_mem_map, | |
830 | __section_nr(ms)); | |
831 | ms->section_mem_map = 0; | |
832 | ms->pageblock_flags = NULL; | |
833 | } | |
cd099682 | 834 | pgdat_resize_unlock(pgdat, &flags); |
ea01ea93 | 835 | |
4b94ffdc DW |
836 | clear_hwpoisoned_pages(memmap + map_offset, |
837 | PAGES_PER_SECTION - map_offset); | |
ea01ea93 BP |
838 | free_section_usemap(memmap, usemap); |
839 | } | |
4edd7cef DR |
840 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
841 | #endif /* CONFIG_MEMORY_HOTPLUG */ |