Commit | Line | Data |
---|---|---|
d41dee36 AW |
1 | /* |
2 | * sparse memory mappings. | |
3 | */ | |
d41dee36 | 4 | #include <linux/mm.h> |
5a0e3ad6 | 5 | #include <linux/slab.h> |
d41dee36 AW |
6 | #include <linux/mmzone.h> |
7 | #include <linux/bootmem.h> | |
3b32123d | 8 | #include <linux/compiler.h> |
0b0acbec | 9 | #include <linux/highmem.h> |
b95f1b31 | 10 | #include <linux/export.h> |
28ae55c9 | 11 | #include <linux/spinlock.h> |
0b0acbec | 12 | #include <linux/vmalloc.h> |
3b32123d | 13 | |
0c0a4a51 | 14 | #include "internal.h" |
d41dee36 | 15 | #include <asm/dma.h> |
8f6aac41 CL |
16 | #include <asm/pgalloc.h> |
17 | #include <asm/pgtable.h> | |
d41dee36 AW |
18 | |
19 | /* | |
20 | * Permanent SPARSEMEM data: | |
21 | * | |
22 | * 1) mem_section - memory sections, mem_map's for valid memory | |
23 | */ | |
3e347261 | 24 | #ifdef CONFIG_SPARSEMEM_EXTREME |
83e3c487 | 25 | struct mem_section **mem_section; |
3e347261 BP |
26 | #else |
27 | struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT] | |
22fc6ecc | 28 | ____cacheline_internodealigned_in_smp; |
3e347261 BP |
29 | #endif |
30 | EXPORT_SYMBOL(mem_section); | |
31 | ||
89689ae7 CL |
32 | #ifdef NODE_NOT_IN_PAGE_FLAGS |
33 | /* | |
34 | * If we did not store the node number in the page then we have to | |
35 | * do a lookup in the section_to_node_table in order to find which | |
36 | * node the page belongs to. | |
37 | */ | |
38 | #if MAX_NUMNODES <= 256 | |
39 | static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
40 | #else | |
41 | static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned; | |
42 | #endif | |
43 | ||
33dd4e0e | 44 | int page_to_nid(const struct page *page) |
89689ae7 CL |
45 | { |
46 | return section_to_node_table[page_to_section(page)]; | |
47 | } | |
48 | EXPORT_SYMBOL(page_to_nid); | |
85770ffe AW |
49 | |
50 | static void set_section_nid(unsigned long section_nr, int nid) | |
51 | { | |
52 | section_to_node_table[section_nr] = nid; | |
53 | } | |
54 | #else /* !NODE_NOT_IN_PAGE_FLAGS */ | |
55 | static inline void set_section_nid(unsigned long section_nr, int nid) | |
56 | { | |
57 | } | |
89689ae7 CL |
58 | #endif |
59 | ||
3e347261 | 60 | #ifdef CONFIG_SPARSEMEM_EXTREME |
bd721ea7 | 61 | static noinline struct mem_section __ref *sparse_index_alloc(int nid) |
28ae55c9 DH |
62 | { |
63 | struct mem_section *section = NULL; | |
64 | unsigned long array_size = SECTIONS_PER_ROOT * | |
65 | sizeof(struct mem_section); | |
66 | ||
b95046b0 MH |
67 | if (slab_is_available()) |
68 | section = kzalloc_node(array_size, GFP_KERNEL, nid); | |
69 | else | |
bb016b84 | 70 | section = memblock_virt_alloc_node(array_size, nid); |
28ae55c9 DH |
71 | |
72 | return section; | |
3e347261 | 73 | } |
802f192e | 74 | |
a3142c8e | 75 | static int __meminit sparse_index_init(unsigned long section_nr, int nid) |
802f192e | 76 | { |
28ae55c9 DH |
77 | unsigned long root = SECTION_NR_TO_ROOT(section_nr); |
78 | struct mem_section *section; | |
802f192e BP |
79 | |
80 | if (mem_section[root]) | |
28ae55c9 | 81 | return -EEXIST; |
3e347261 | 82 | |
28ae55c9 | 83 | section = sparse_index_alloc(nid); |
af0cd5a7 WC |
84 | if (!section) |
85 | return -ENOMEM; | |
28ae55c9 DH |
86 | |
87 | mem_section[root] = section; | |
c1c95183 | 88 | |
9d1936cf | 89 | return 0; |
28ae55c9 DH |
90 | } |
91 | #else /* !SPARSEMEM_EXTREME */ | |
92 | static inline int sparse_index_init(unsigned long section_nr, int nid) | |
93 | { | |
94 | return 0; | |
802f192e | 95 | } |
28ae55c9 DH |
96 | #endif |
97 | ||
91fd8b95 | 98 | #ifdef CONFIG_SPARSEMEM_EXTREME |
4ca644d9 DH |
99 | int __section_nr(struct mem_section* ms) |
100 | { | |
101 | unsigned long root_nr; | |
83e3c487 | 102 | struct mem_section *root = NULL; |
4ca644d9 | 103 | |
12783b00 MK |
104 | for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) { |
105 | root = __nr_to_section(root_nr * SECTIONS_PER_ROOT); | |
4ca644d9 DH |
106 | if (!root) |
107 | continue; | |
108 | ||
109 | if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT))) | |
110 | break; | |
111 | } | |
112 | ||
83e3c487 | 113 | VM_BUG_ON(!root); |
db36a461 | 114 | |
4ca644d9 DH |
115 | return (root_nr * SECTIONS_PER_ROOT) + (ms - root); |
116 | } | |
91fd8b95 ZC |
117 | #else |
118 | int __section_nr(struct mem_section* ms) | |
119 | { | |
120 | return (int)(ms - mem_section[0]); | |
121 | } | |
122 | #endif | |
4ca644d9 | 123 | |
30c253e6 AW |
124 | /* |
125 | * During early boot, before section_mem_map is used for an actual | |
126 | * mem_map, we use section_mem_map to store the section's NUMA | |
127 | * node. This keeps us from having to use another data structure. The | |
128 | * node information is cleared just before we store the real mem_map. | |
129 | */ | |
130 | static inline unsigned long sparse_encode_early_nid(int nid) | |
131 | { | |
132 | return (nid << SECTION_NID_SHIFT); | |
133 | } | |
134 | ||
135 | static inline int sparse_early_nid(struct mem_section *section) | |
136 | { | |
137 | return (section->section_mem_map >> SECTION_NID_SHIFT); | |
138 | } | |
139 | ||
2dbb51c4 MG |
140 | /* Validate the physical addressing limitations of the model */ |
141 | void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn, | |
142 | unsigned long *end_pfn) | |
d41dee36 | 143 | { |
2dbb51c4 | 144 | unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT); |
d41dee36 | 145 | |
bead9a3a IM |
146 | /* |
147 | * Sanity checks - do not allow an architecture to pass | |
148 | * in larger pfns than the maximum scope of sparsemem: | |
149 | */ | |
2dbb51c4 MG |
150 | if (*start_pfn > max_sparsemem_pfn) { |
151 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", | |
152 | "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
153 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
154 | WARN_ON_ONCE(1); | |
155 | *start_pfn = max_sparsemem_pfn; | |
156 | *end_pfn = max_sparsemem_pfn; | |
ef161a98 | 157 | } else if (*end_pfn > max_sparsemem_pfn) { |
2dbb51c4 MG |
158 | mminit_dprintk(MMINIT_WARNING, "pfnvalidation", |
159 | "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n", | |
160 | *start_pfn, *end_pfn, max_sparsemem_pfn); | |
161 | WARN_ON_ONCE(1); | |
162 | *end_pfn = max_sparsemem_pfn; | |
163 | } | |
164 | } | |
165 | ||
c4e1be9e DH |
166 | /* |
167 | * There are a number of times that we loop over NR_MEM_SECTIONS, | |
168 | * looking for section_present() on each. But, when we have very | |
169 | * large physical address spaces, NR_MEM_SECTIONS can also be | |
170 | * very large which makes the loops quite long. | |
171 | * | |
172 | * Keeping track of this gives us an easy way to break out of | |
173 | * those loops early. | |
174 | */ | |
175 | int __highest_present_section_nr; | |
176 | static void section_mark_present(struct mem_section *ms) | |
177 | { | |
178 | int section_nr = __section_nr(ms); | |
179 | ||
180 | if (section_nr > __highest_present_section_nr) | |
181 | __highest_present_section_nr = section_nr; | |
182 | ||
183 | ms->section_mem_map |= SECTION_MARKED_PRESENT; | |
184 | } | |
185 | ||
186 | static inline int next_present_section_nr(int section_nr) | |
187 | { | |
188 | do { | |
189 | section_nr++; | |
190 | if (present_section_nr(section_nr)) | |
191 | return section_nr; | |
192 | } while ((section_nr < NR_MEM_SECTIONS) && | |
193 | (section_nr <= __highest_present_section_nr)); | |
194 | ||
195 | return -1; | |
196 | } | |
197 | #define for_each_present_section_nr(start, section_nr) \ | |
198 | for (section_nr = next_present_section_nr(start-1); \ | |
199 | ((section_nr >= 0) && \ | |
200 | (section_nr < NR_MEM_SECTIONS) && \ | |
201 | (section_nr <= __highest_present_section_nr)); \ | |
202 | section_nr = next_present_section_nr(section_nr)) | |
203 | ||
2dbb51c4 MG |
204 | /* Record a memory area against a node. */ |
205 | void __init memory_present(int nid, unsigned long start, unsigned long end) | |
206 | { | |
207 | unsigned long pfn; | |
bead9a3a | 208 | |
d41dee36 | 209 | start &= PAGE_SECTION_MASK; |
2dbb51c4 | 210 | mminit_validate_memmodel_limits(&start, &end); |
d41dee36 AW |
211 | for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) { |
212 | unsigned long section = pfn_to_section_nr(pfn); | |
802f192e BP |
213 | struct mem_section *ms; |
214 | ||
215 | sparse_index_init(section, nid); | |
85770ffe | 216 | set_section_nid(section, nid); |
802f192e BP |
217 | |
218 | ms = __nr_to_section(section); | |
c4e1be9e | 219 | if (!ms->section_mem_map) { |
2d070eab MH |
220 | ms->section_mem_map = sparse_encode_early_nid(nid) | |
221 | SECTION_IS_ONLINE; | |
c4e1be9e DH |
222 | section_mark_present(ms); |
223 | } | |
d41dee36 AW |
224 | } |
225 | } | |
226 | ||
227 | /* | |
228 | * Only used by the i386 NUMA architecures, but relatively | |
229 | * generic code. | |
230 | */ | |
231 | unsigned long __init node_memmap_size_bytes(int nid, unsigned long start_pfn, | |
232 | unsigned long end_pfn) | |
233 | { | |
234 | unsigned long pfn; | |
235 | unsigned long nr_pages = 0; | |
236 | ||
2dbb51c4 | 237 | mminit_validate_memmodel_limits(&start_pfn, &end_pfn); |
d41dee36 AW |
238 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { |
239 | if (nid != early_pfn_to_nid(pfn)) | |
240 | continue; | |
241 | ||
540557b9 | 242 | if (pfn_present(pfn)) |
d41dee36 AW |
243 | nr_pages += PAGES_PER_SECTION; |
244 | } | |
245 | ||
246 | return nr_pages * sizeof(struct page); | |
247 | } | |
248 | ||
29751f69 AW |
249 | /* |
250 | * Subtle, we encode the real pfn into the mem_map such that | |
251 | * the identity pfn - section_mem_map will return the actual | |
252 | * physical page frame number. | |
253 | */ | |
254 | static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum) | |
255 | { | |
256 | return (unsigned long)(mem_map - (section_nr_to_pfn(pnum))); | |
257 | } | |
258 | ||
259 | /* | |
ea01ea93 | 260 | * Decode mem_map from the coded memmap |
29751f69 | 261 | */ |
29751f69 AW |
262 | struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum) |
263 | { | |
ea01ea93 BP |
264 | /* mask off the extra low bits of information */ |
265 | coded_mem_map &= SECTION_MAP_MASK; | |
29751f69 AW |
266 | return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum); |
267 | } | |
268 | ||
a3142c8e | 269 | static int __meminit sparse_init_one_section(struct mem_section *ms, |
5c0e3066 MG |
270 | unsigned long pnum, struct page *mem_map, |
271 | unsigned long *pageblock_bitmap) | |
29751f69 | 272 | { |
540557b9 | 273 | if (!present_section(ms)) |
29751f69 AW |
274 | return -EINVAL; |
275 | ||
30c253e6 | 276 | ms->section_mem_map &= ~SECTION_MAP_MASK; |
540557b9 AW |
277 | ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum) | |
278 | SECTION_HAS_MEM_MAP; | |
5c0e3066 | 279 | ms->pageblock_flags = pageblock_bitmap; |
29751f69 AW |
280 | |
281 | return 1; | |
282 | } | |
283 | ||
04753278 | 284 | unsigned long usemap_size(void) |
5c0e3066 | 285 | { |
60a7a88d | 286 | return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long); |
5c0e3066 MG |
287 | } |
288 | ||
289 | #ifdef CONFIG_MEMORY_HOTPLUG | |
290 | static unsigned long *__kmalloc_section_usemap(void) | |
291 | { | |
292 | return kmalloc(usemap_size(), GFP_KERNEL); | |
293 | } | |
294 | #endif /* CONFIG_MEMORY_HOTPLUG */ | |
295 | ||
48c90682 YG |
296 | #ifdef CONFIG_MEMORY_HOTREMOVE |
297 | static unsigned long * __init | |
a4322e1b | 298 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 299 | unsigned long size) |
48c90682 | 300 | { |
99ab7b19 YL |
301 | unsigned long goal, limit; |
302 | unsigned long *p; | |
303 | int nid; | |
48c90682 YG |
304 | /* |
305 | * A page may contain usemaps for other sections preventing the | |
306 | * page being freed and making a section unremovable while | |
c800bcd5 | 307 | * other sections referencing the usemap remain active. Similarly, |
48c90682 YG |
308 | * a pgdat can prevent a section being removed. If section A |
309 | * contains a pgdat and section B contains the usemap, both | |
310 | * sections become inter-dependent. This allocates usemaps | |
311 | * from the same section as the pgdat where possible to avoid | |
312 | * this problem. | |
313 | */ | |
07b4e2bc | 314 | goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT); |
99ab7b19 YL |
315 | limit = goal + (1UL << PA_SECTION_SHIFT); |
316 | nid = early_pfn_to_nid(goal >> PAGE_SHIFT); | |
317 | again: | |
bb016b84 SS |
318 | p = memblock_virt_alloc_try_nid_nopanic(size, |
319 | SMP_CACHE_BYTES, goal, limit, | |
320 | nid); | |
99ab7b19 YL |
321 | if (!p && limit) { |
322 | limit = 0; | |
323 | goto again; | |
324 | } | |
325 | return p; | |
48c90682 YG |
326 | } |
327 | ||
328 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
329 | { | |
330 | unsigned long usemap_snr, pgdat_snr; | |
83e3c487 KS |
331 | static unsigned long old_usemap_snr; |
332 | static unsigned long old_pgdat_snr; | |
48c90682 YG |
333 | struct pglist_data *pgdat = NODE_DATA(nid); |
334 | int usemap_nid; | |
335 | ||
83e3c487 KS |
336 | /* First call */ |
337 | if (!old_usemap_snr) { | |
338 | old_usemap_snr = NR_MEM_SECTIONS; | |
339 | old_pgdat_snr = NR_MEM_SECTIONS; | |
340 | } | |
341 | ||
48c90682 YG |
342 | usemap_snr = pfn_to_section_nr(__pa(usemap) >> PAGE_SHIFT); |
343 | pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT); | |
344 | if (usemap_snr == pgdat_snr) | |
345 | return; | |
346 | ||
347 | if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr) | |
348 | /* skip redundant message */ | |
349 | return; | |
350 | ||
351 | old_usemap_snr = usemap_snr; | |
352 | old_pgdat_snr = pgdat_snr; | |
353 | ||
354 | usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr)); | |
355 | if (usemap_nid != nid) { | |
1170532b JP |
356 | pr_info("node %d must be removed before remove section %ld\n", |
357 | nid, usemap_snr); | |
48c90682 YG |
358 | return; |
359 | } | |
360 | /* | |
361 | * There is a circular dependency. | |
362 | * Some platforms allow un-removable section because they will just | |
363 | * gather other removable sections for dynamic partitioning. | |
364 | * Just notify un-removable section's number here. | |
365 | */ | |
1170532b JP |
366 | pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n", |
367 | usemap_snr, pgdat_snr, nid); | |
48c90682 YG |
368 | } |
369 | #else | |
370 | static unsigned long * __init | |
a4322e1b | 371 | sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat, |
238305bb | 372 | unsigned long size) |
48c90682 | 373 | { |
bb016b84 | 374 | return memblock_virt_alloc_node_nopanic(size, pgdat->node_id); |
48c90682 YG |
375 | } |
376 | ||
377 | static void __init check_usemap_section_nr(int nid, unsigned long *usemap) | |
378 | { | |
379 | } | |
380 | #endif /* CONFIG_MEMORY_HOTREMOVE */ | |
381 | ||
18732093 | 382 | static void __init sparse_early_usemaps_alloc_node(void *data, |
a4322e1b YL |
383 | unsigned long pnum_begin, |
384 | unsigned long pnum_end, | |
385 | unsigned long usemap_count, int nodeid) | |
5c0e3066 | 386 | { |
a4322e1b YL |
387 | void *usemap; |
388 | unsigned long pnum; | |
18732093 | 389 | unsigned long **usemap_map = (unsigned long **)data; |
a4322e1b | 390 | int size = usemap_size(); |
5c0e3066 | 391 | |
a4322e1b | 392 | usemap = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nodeid), |
238305bb | 393 | size * usemap_count); |
f5bf18fa | 394 | if (!usemap) { |
1170532b | 395 | pr_warn("%s: allocation failed\n", __func__); |
238305bb | 396 | return; |
48c90682 YG |
397 | } |
398 | ||
f5bf18fa NA |
399 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { |
400 | if (!present_section_nr(pnum)) | |
401 | continue; | |
402 | usemap_map[pnum] = usemap; | |
403 | usemap += size; | |
404 | check_usemap_section_nr(nodeid, usemap_map[pnum]); | |
a4322e1b | 405 | } |
5c0e3066 MG |
406 | } |
407 | ||
8f6aac41 | 408 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
98f3cfc1 | 409 | struct page __init *sparse_mem_map_populate(unsigned long pnum, int nid) |
29751f69 AW |
410 | { |
411 | struct page *map; | |
e48e67e0 | 412 | unsigned long size; |
29751f69 AW |
413 | |
414 | map = alloc_remap(nid, sizeof(struct page) * PAGES_PER_SECTION); | |
415 | if (map) | |
416 | return map; | |
417 | ||
e48e67e0 | 418 | size = PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION); |
bb016b84 SS |
419 | map = memblock_virt_alloc_try_nid(size, |
420 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), | |
421 | BOOTMEM_ALLOC_ACCESSIBLE, nid); | |
8f6aac41 CL |
422 | return map; |
423 | } | |
9bdac914 YL |
424 | void __init sparse_mem_maps_populate_node(struct page **map_map, |
425 | unsigned long pnum_begin, | |
426 | unsigned long pnum_end, | |
427 | unsigned long map_count, int nodeid) | |
428 | { | |
429 | void *map; | |
430 | unsigned long pnum; | |
431 | unsigned long size = sizeof(struct page) * PAGES_PER_SECTION; | |
432 | ||
433 | map = alloc_remap(nodeid, size * map_count); | |
434 | if (map) { | |
435 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
436 | if (!present_section_nr(pnum)) | |
437 | continue; | |
438 | map_map[pnum] = map; | |
439 | map += size; | |
440 | } | |
441 | return; | |
442 | } | |
443 | ||
444 | size = PAGE_ALIGN(size); | |
bb016b84 SS |
445 | map = memblock_virt_alloc_try_nid(size * map_count, |
446 | PAGE_SIZE, __pa(MAX_DMA_ADDRESS), | |
447 | BOOTMEM_ALLOC_ACCESSIBLE, nodeid); | |
9bdac914 YL |
448 | if (map) { |
449 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
450 | if (!present_section_nr(pnum)) | |
451 | continue; | |
452 | map_map[pnum] = map; | |
453 | map += size; | |
454 | } | |
455 | return; | |
456 | } | |
457 | ||
458 | /* fallback */ | |
459 | for (pnum = pnum_begin; pnum < pnum_end; pnum++) { | |
460 | struct mem_section *ms; | |
461 | ||
462 | if (!present_section_nr(pnum)) | |
463 | continue; | |
464 | map_map[pnum] = sparse_mem_map_populate(pnum, nodeid); | |
465 | if (map_map[pnum]) | |
466 | continue; | |
467 | ms = __nr_to_section(pnum); | |
1170532b | 468 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
756a025f | 469 | __func__); |
9bdac914 YL |
470 | ms->section_mem_map = 0; |
471 | } | |
472 | } | |
8f6aac41 CL |
473 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ |
474 | ||
81d0d950 | 475 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
18732093 | 476 | static void __init sparse_early_mem_maps_alloc_node(void *data, |
9bdac914 YL |
477 | unsigned long pnum_begin, |
478 | unsigned long pnum_end, | |
479 | unsigned long map_count, int nodeid) | |
480 | { | |
18732093 | 481 | struct page **map_map = (struct page **)data; |
9bdac914 YL |
482 | sparse_mem_maps_populate_node(map_map, pnum_begin, pnum_end, |
483 | map_count, nodeid); | |
484 | } | |
81d0d950 | 485 | #else |
9e5c6da7 | 486 | static struct page __init *sparse_early_mem_map_alloc(unsigned long pnum) |
8f6aac41 CL |
487 | { |
488 | struct page *map; | |
489 | struct mem_section *ms = __nr_to_section(pnum); | |
490 | int nid = sparse_early_nid(ms); | |
491 | ||
98f3cfc1 | 492 | map = sparse_mem_map_populate(pnum, nid); |
29751f69 AW |
493 | if (map) |
494 | return map; | |
495 | ||
1170532b | 496 | pr_err("%s: sparsemem memory map backing failed some memory will not be available\n", |
756a025f | 497 | __func__); |
802f192e | 498 | ms->section_mem_map = 0; |
29751f69 AW |
499 | return NULL; |
500 | } | |
9bdac914 | 501 | #endif |
29751f69 | 502 | |
3b32123d | 503 | void __weak __meminit vmemmap_populate_print_last(void) |
c2b91e2e YL |
504 | { |
505 | } | |
a4322e1b | 506 | |
18732093 WL |
507 | /** |
508 | * alloc_usemap_and_memmap - memory alloction for pageblock flags and vmemmap | |
509 | * @map: usemap_map for pageblock flags or mmap_map for vmemmap | |
510 | */ | |
511 | static void __init alloc_usemap_and_memmap(void (*alloc_func) | |
512 | (void *, unsigned long, unsigned long, | |
513 | unsigned long, int), void *data) | |
514 | { | |
515 | unsigned long pnum; | |
516 | unsigned long map_count; | |
517 | int nodeid_begin = 0; | |
518 | unsigned long pnum_begin = 0; | |
519 | ||
c4e1be9e | 520 | for_each_present_section_nr(0, pnum) { |
18732093 WL |
521 | struct mem_section *ms; |
522 | ||
18732093 WL |
523 | ms = __nr_to_section(pnum); |
524 | nodeid_begin = sparse_early_nid(ms); | |
525 | pnum_begin = pnum; | |
526 | break; | |
527 | } | |
528 | map_count = 1; | |
c4e1be9e | 529 | for_each_present_section_nr(pnum_begin + 1, pnum) { |
18732093 WL |
530 | struct mem_section *ms; |
531 | int nodeid; | |
532 | ||
18732093 WL |
533 | ms = __nr_to_section(pnum); |
534 | nodeid = sparse_early_nid(ms); | |
535 | if (nodeid == nodeid_begin) { | |
536 | map_count++; | |
537 | continue; | |
538 | } | |
539 | /* ok, we need to take cake of from pnum_begin to pnum - 1*/ | |
540 | alloc_func(data, pnum_begin, pnum, | |
541 | map_count, nodeid_begin); | |
542 | /* new start, update count etc*/ | |
543 | nodeid_begin = nodeid; | |
544 | pnum_begin = pnum; | |
545 | map_count = 1; | |
546 | } | |
547 | /* ok, last chunk */ | |
548 | alloc_func(data, pnum_begin, NR_MEM_SECTIONS, | |
549 | map_count, nodeid_begin); | |
550 | } | |
551 | ||
193faea9 SR |
552 | /* |
553 | * Allocate the accumulated non-linear sections, allocate a mem_map | |
554 | * for each and record the physical to section mapping. | |
555 | */ | |
556 | void __init sparse_init(void) | |
557 | { | |
558 | unsigned long pnum; | |
559 | struct page *map; | |
5c0e3066 | 560 | unsigned long *usemap; |
e123dd3f | 561 | unsigned long **usemap_map; |
81d0d950 | 562 | int size; |
81d0d950 | 563 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
81d0d950 YL |
564 | int size2; |
565 | struct page **map_map; | |
566 | #endif | |
e123dd3f | 567 | |
55878e88 CS |
568 | /* see include/linux/mmzone.h 'struct mem_section' definition */ |
569 | BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section))); | |
570 | ||
ca57df79 XQ |
571 | /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */ |
572 | set_pageblock_order(); | |
573 | ||
e123dd3f YL |
574 | /* |
575 | * map is using big page (aka 2M in x86 64 bit) | |
576 | * usemap is less one page (aka 24 bytes) | |
577 | * so alloc 2M (with 2M align) and 24 bytes in turn will | |
578 | * make next 2M slip to one more 2M later. | |
579 | * then in big system, the memory will have a lot of holes... | |
25985edc | 580 | * here try to allocate 2M pages continuously. |
e123dd3f YL |
581 | * |
582 | * powerpc need to call sparse_init_one_section right after each | |
583 | * sparse_early_mem_map_alloc, so allocate usemap_map at first. | |
584 | */ | |
585 | size = sizeof(unsigned long *) * NR_MEM_SECTIONS; | |
bb016b84 | 586 | usemap_map = memblock_virt_alloc(size, 0); |
e123dd3f YL |
587 | if (!usemap_map) |
588 | panic("can not allocate usemap_map\n"); | |
18732093 WL |
589 | alloc_usemap_and_memmap(sparse_early_usemaps_alloc_node, |
590 | (void *)usemap_map); | |
193faea9 | 591 | |
9bdac914 YL |
592 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
593 | size2 = sizeof(struct page *) * NR_MEM_SECTIONS; | |
bb016b84 | 594 | map_map = memblock_virt_alloc(size2, 0); |
9bdac914 YL |
595 | if (!map_map) |
596 | panic("can not allocate map_map\n"); | |
18732093 WL |
597 | alloc_usemap_and_memmap(sparse_early_mem_maps_alloc_node, |
598 | (void *)map_map); | |
9bdac914 YL |
599 | #endif |
600 | ||
c4e1be9e | 601 | for_each_present_section_nr(0, pnum) { |
e123dd3f | 602 | usemap = usemap_map[pnum]; |
5c0e3066 MG |
603 | if (!usemap) |
604 | continue; | |
605 | ||
9bdac914 YL |
606 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
607 | map = map_map[pnum]; | |
608 | #else | |
e123dd3f | 609 | map = sparse_early_mem_map_alloc(pnum); |
9bdac914 | 610 | #endif |
e123dd3f YL |
611 | if (!map) |
612 | continue; | |
613 | ||
5c0e3066 MG |
614 | sparse_init_one_section(__nr_to_section(pnum), pnum, map, |
615 | usemap); | |
193faea9 | 616 | } |
e123dd3f | 617 | |
c2b91e2e YL |
618 | vmemmap_populate_print_last(); |
619 | ||
9bdac914 | 620 | #ifdef CONFIG_SPARSEMEM_ALLOC_MEM_MAP_TOGETHER |
bb016b84 | 621 | memblock_free_early(__pa(map_map), size2); |
9bdac914 | 622 | #endif |
bb016b84 | 623 | memblock_free_early(__pa(usemap_map), size); |
193faea9 SR |
624 | } |
625 | ||
626 | #ifdef CONFIG_MEMORY_HOTPLUG | |
2d070eab MH |
627 | |
628 | /* Mark all memory sections within the pfn range as online */ | |
629 | void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |
630 | { | |
631 | unsigned long pfn; | |
632 | ||
633 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
b4ccec41 | 634 | unsigned long section_nr = pfn_to_section_nr(pfn); |
2d070eab MH |
635 | struct mem_section *ms; |
636 | ||
637 | /* onlining code should never touch invalid ranges */ | |
638 | if (WARN_ON(!valid_section_nr(section_nr))) | |
639 | continue; | |
640 | ||
641 | ms = __nr_to_section(section_nr); | |
642 | ms->section_mem_map |= SECTION_IS_ONLINE; | |
643 | } | |
644 | } | |
645 | ||
646 | #ifdef CONFIG_MEMORY_HOTREMOVE | |
647 | /* Mark all memory sections within the pfn range as online */ | |
648 | void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn) | |
649 | { | |
650 | unsigned long pfn; | |
651 | ||
652 | for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) { | |
653 | unsigned long section_nr = pfn_to_section_nr(start_pfn); | |
654 | struct mem_section *ms; | |
655 | ||
656 | /* | |
657 | * TODO this needs some double checking. Offlining code makes | |
658 | * sure to check pfn_valid but those checks might be just bogus | |
659 | */ | |
660 | if (WARN_ON(!valid_section_nr(section_nr))) | |
661 | continue; | |
662 | ||
663 | ms = __nr_to_section(section_nr); | |
664 | ms->section_mem_map &= ~SECTION_IS_ONLINE; | |
665 | } | |
666 | } | |
667 | #endif | |
668 | ||
98f3cfc1 | 669 | #ifdef CONFIG_SPARSEMEM_VMEMMAP |
85b35fea | 670 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) |
98f3cfc1 YG |
671 | { |
672 | /* This will make the necessary allocations eventually. */ | |
673 | return sparse_mem_map_populate(pnum, nid); | |
674 | } | |
85b35fea | 675 | static void __kfree_section_memmap(struct page *memmap) |
98f3cfc1 | 676 | { |
0aad818b | 677 | unsigned long start = (unsigned long)memmap; |
85b35fea | 678 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b JW |
679 | |
680 | vmemmap_free(start, end); | |
98f3cfc1 | 681 | } |
4edd7cef | 682 | #ifdef CONFIG_MEMORY_HOTREMOVE |
81556b02 | 683 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 | 684 | { |
0aad818b | 685 | unsigned long start = (unsigned long)memmap; |
81556b02 | 686 | unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION); |
0aad818b JW |
687 | |
688 | vmemmap_free(start, end); | |
0c0a4a51 | 689 | } |
4edd7cef | 690 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
98f3cfc1 | 691 | #else |
85b35fea | 692 | static struct page *__kmalloc_section_memmap(void) |
0b0acbec DH |
693 | { |
694 | struct page *page, *ret; | |
85b35fea | 695 | unsigned long memmap_size = sizeof(struct page) * PAGES_PER_SECTION; |
0b0acbec | 696 | |
f2d0aa5b | 697 | page = alloc_pages(GFP_KERNEL|__GFP_NOWARN, get_order(memmap_size)); |
0b0acbec DH |
698 | if (page) |
699 | goto got_map_page; | |
700 | ||
701 | ret = vmalloc(memmap_size); | |
702 | if (ret) | |
703 | goto got_map_ptr; | |
704 | ||
705 | return NULL; | |
706 | got_map_page: | |
707 | ret = (struct page *)pfn_to_kaddr(page_to_pfn(page)); | |
708 | got_map_ptr: | |
0b0acbec DH |
709 | |
710 | return ret; | |
711 | } | |
712 | ||
85b35fea | 713 | static inline struct page *kmalloc_section_memmap(unsigned long pnum, int nid) |
98f3cfc1 | 714 | { |
85b35fea | 715 | return __kmalloc_section_memmap(); |
98f3cfc1 YG |
716 | } |
717 | ||
85b35fea | 718 | static void __kfree_section_memmap(struct page *memmap) |
0b0acbec | 719 | { |
9e2779fa | 720 | if (is_vmalloc_addr(memmap)) |
0b0acbec DH |
721 | vfree(memmap); |
722 | else | |
723 | free_pages((unsigned long)memmap, | |
85b35fea | 724 | get_order(sizeof(struct page) * PAGES_PER_SECTION)); |
0b0acbec | 725 | } |
0c0a4a51 | 726 | |
4edd7cef | 727 | #ifdef CONFIG_MEMORY_HOTREMOVE |
81556b02 | 728 | static void free_map_bootmem(struct page *memmap) |
0c0a4a51 YG |
729 | { |
730 | unsigned long maps_section_nr, removing_section_nr, i; | |
81556b02 | 731 | unsigned long magic, nr_pages; |
ae64ffca | 732 | struct page *page = virt_to_page(memmap); |
0c0a4a51 | 733 | |
81556b02 ZY |
734 | nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page)) |
735 | >> PAGE_SHIFT; | |
736 | ||
0c0a4a51 | 737 | for (i = 0; i < nr_pages; i++, page++) { |
ddffe98d | 738 | magic = (unsigned long) page->freelist; |
0c0a4a51 YG |
739 | |
740 | BUG_ON(magic == NODE_INFO); | |
741 | ||
742 | maps_section_nr = pfn_to_section_nr(page_to_pfn(page)); | |
857e522a | 743 | removing_section_nr = page_private(page); |
0c0a4a51 YG |
744 | |
745 | /* | |
746 | * When this function is called, the removing section is | |
747 | * logical offlined state. This means all pages are isolated | |
748 | * from page allocator. If removing section's memmap is placed | |
749 | * on the same section, it must not be freed. | |
750 | * If it is freed, page allocator may allocate it which will | |
751 | * be removed physically soon. | |
752 | */ | |
753 | if (maps_section_nr != removing_section_nr) | |
754 | put_page_bootmem(page); | |
755 | } | |
756 | } | |
4edd7cef | 757 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
98f3cfc1 | 758 | #endif /* CONFIG_SPARSEMEM_VMEMMAP */ |
0b0acbec | 759 | |
29751f69 AW |
760 | /* |
761 | * returns the number of sections whose mem_maps were properly | |
762 | * set. If this is <=0, then that means that the passed-in | |
763 | * map was not consumed and must be freed. | |
764 | */ | |
f1dd2cd1 | 765 | int __meminit sparse_add_one_section(struct pglist_data *pgdat, unsigned long start_pfn) |
29751f69 | 766 | { |
0b0acbec | 767 | unsigned long section_nr = pfn_to_section_nr(start_pfn); |
0b0acbec DH |
768 | struct mem_section *ms; |
769 | struct page *memmap; | |
5c0e3066 | 770 | unsigned long *usemap; |
0b0acbec DH |
771 | unsigned long flags; |
772 | int ret; | |
29751f69 | 773 | |
0b0acbec DH |
774 | /* |
775 | * no locking for this, because it does its own | |
776 | * plus, it does a kmalloc | |
777 | */ | |
bbd06825 WC |
778 | ret = sparse_index_init(section_nr, pgdat->node_id); |
779 | if (ret < 0 && ret != -EEXIST) | |
780 | return ret; | |
85b35fea | 781 | memmap = kmalloc_section_memmap(section_nr, pgdat->node_id); |
bbd06825 WC |
782 | if (!memmap) |
783 | return -ENOMEM; | |
5c0e3066 | 784 | usemap = __kmalloc_section_usemap(); |
bbd06825 | 785 | if (!usemap) { |
85b35fea | 786 | __kfree_section_memmap(memmap); |
bbd06825 WC |
787 | return -ENOMEM; |
788 | } | |
0b0acbec DH |
789 | |
790 | pgdat_resize_lock(pgdat, &flags); | |
29751f69 | 791 | |
0b0acbec DH |
792 | ms = __pfn_to_section(start_pfn); |
793 | if (ms->section_mem_map & SECTION_MARKED_PRESENT) { | |
794 | ret = -EEXIST; | |
795 | goto out; | |
796 | } | |
5c0e3066 | 797 | |
85b35fea | 798 | memset(memmap, 0, sizeof(struct page) * PAGES_PER_SECTION); |
3ac19f8e | 799 | |
c4e1be9e | 800 | section_mark_present(ms); |
29751f69 | 801 | |
5c0e3066 | 802 | ret = sparse_init_one_section(ms, section_nr, memmap, usemap); |
0b0acbec | 803 | |
0b0acbec DH |
804 | out: |
805 | pgdat_resize_unlock(pgdat, &flags); | |
bbd06825 WC |
806 | if (ret <= 0) { |
807 | kfree(usemap); | |
85b35fea | 808 | __kfree_section_memmap(memmap); |
bbd06825 | 809 | } |
0b0acbec | 810 | return ret; |
29751f69 | 811 | } |
ea01ea93 | 812 | |
f3deb687 | 813 | #ifdef CONFIG_MEMORY_HOTREMOVE |
95a4774d WC |
814 | #ifdef CONFIG_MEMORY_FAILURE |
815 | static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
816 | { | |
817 | int i; | |
818 | ||
819 | if (!memmap) | |
820 | return; | |
821 | ||
4b94ffdc | 822 | for (i = 0; i < nr_pages; i++) { |
95a4774d | 823 | if (PageHWPoison(&memmap[i])) { |
293c07e3 | 824 | atomic_long_sub(1, &num_poisoned_pages); |
95a4774d WC |
825 | ClearPageHWPoison(&memmap[i]); |
826 | } | |
827 | } | |
828 | } | |
829 | #else | |
830 | static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages) | |
831 | { | |
832 | } | |
833 | #endif | |
834 | ||
4edd7cef DR |
835 | static void free_section_usemap(struct page *memmap, unsigned long *usemap) |
836 | { | |
837 | struct page *usemap_page; | |
4edd7cef DR |
838 | |
839 | if (!usemap) | |
840 | return; | |
841 | ||
842 | usemap_page = virt_to_page(usemap); | |
843 | /* | |
844 | * Check to see if allocation came from hot-plug-add | |
845 | */ | |
846 | if (PageSlab(usemap_page) || PageCompound(usemap_page)) { | |
847 | kfree(usemap); | |
848 | if (memmap) | |
85b35fea | 849 | __kfree_section_memmap(memmap); |
4edd7cef DR |
850 | return; |
851 | } | |
852 | ||
853 | /* | |
854 | * The usemap came from bootmem. This is packed with other usemaps | |
855 | * on the section which has pgdat at boot time. Just keep it as is now. | |
856 | */ | |
857 | ||
81556b02 ZY |
858 | if (memmap) |
859 | free_map_bootmem(memmap); | |
4edd7cef DR |
860 | } |
861 | ||
4b94ffdc DW |
862 | void sparse_remove_one_section(struct zone *zone, struct mem_section *ms, |
863 | unsigned long map_offset) | |
ea01ea93 BP |
864 | { |
865 | struct page *memmap = NULL; | |
cd099682 TC |
866 | unsigned long *usemap = NULL, flags; |
867 | struct pglist_data *pgdat = zone->zone_pgdat; | |
ea01ea93 | 868 | |
cd099682 | 869 | pgdat_resize_lock(pgdat, &flags); |
ea01ea93 BP |
870 | if (ms->section_mem_map) { |
871 | usemap = ms->pageblock_flags; | |
872 | memmap = sparse_decode_mem_map(ms->section_mem_map, | |
873 | __section_nr(ms)); | |
874 | ms->section_mem_map = 0; | |
875 | ms->pageblock_flags = NULL; | |
876 | } | |
cd099682 | 877 | pgdat_resize_unlock(pgdat, &flags); |
ea01ea93 | 878 | |
4b94ffdc DW |
879 | clear_hwpoisoned_pages(memmap + map_offset, |
880 | PAGES_PER_SECTION - map_offset); | |
ea01ea93 BP |
881 | free_section_usemap(memmap, usemap); |
882 | } | |
4edd7cef DR |
883 | #endif /* CONFIG_MEMORY_HOTREMOVE */ |
884 | #endif /* CONFIG_MEMORY_HOTPLUG */ |