mm/sparse: only sub-section aligned range would be populated
[linux-block.git] / mm / sparse.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d41dee36
AW
2/*
3 * sparse memory mappings.
4 */
d41dee36 5#include <linux/mm.h>
5a0e3ad6 6#include <linux/slab.h>
d41dee36 7#include <linux/mmzone.h>
97ad1087 8#include <linux/memblock.h>
3b32123d 9#include <linux/compiler.h>
0b0acbec 10#include <linux/highmem.h>
b95f1b31 11#include <linux/export.h>
28ae55c9 12#include <linux/spinlock.h>
0b0acbec 13#include <linux/vmalloc.h>
9f82883c
AS
14#include <linux/swap.h>
15#include <linux/swapops.h>
3b32123d 16
0c0a4a51 17#include "internal.h"
d41dee36
AW
18#include <asm/dma.h>
19
20/*
21 * Permanent SPARSEMEM data:
22 *
23 * 1) mem_section - memory sections, mem_map's for valid memory
24 */
3e347261 25#ifdef CONFIG_SPARSEMEM_EXTREME
83e3c487 26struct mem_section **mem_section;
3e347261
BP
27#else
28struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6ecc 29 ____cacheline_internodealigned_in_smp;
3e347261
BP
30#endif
31EXPORT_SYMBOL(mem_section);
32
89689ae7
CL
33#ifdef NODE_NOT_IN_PAGE_FLAGS
34/*
35 * If we did not store the node number in the page then we have to
36 * do a lookup in the section_to_node_table in order to find which
37 * node the page belongs to.
38 */
39#if MAX_NUMNODES <= 256
40static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
41#else
42static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
43#endif
44
33dd4e0e 45int page_to_nid(const struct page *page)
89689ae7
CL
46{
47 return section_to_node_table[page_to_section(page)];
48}
49EXPORT_SYMBOL(page_to_nid);
85770ffe
AW
50
51static void set_section_nid(unsigned long section_nr, int nid)
52{
53 section_to_node_table[section_nr] = nid;
54}
55#else /* !NODE_NOT_IN_PAGE_FLAGS */
56static inline void set_section_nid(unsigned long section_nr, int nid)
57{
58}
89689ae7
CL
59#endif
60
3e347261 61#ifdef CONFIG_SPARSEMEM_EXTREME
bd721ea7 62static noinline struct mem_section __ref *sparse_index_alloc(int nid)
28ae55c9
DH
63{
64 struct mem_section *section = NULL;
65 unsigned long array_size = SECTIONS_PER_ROOT *
66 sizeof(struct mem_section);
67
8a7f97b9 68 if (slab_is_available()) {
b95046b0 69 section = kzalloc_node(array_size, GFP_KERNEL, nid);
8a7f97b9 70 } else {
7e1c4e27
MR
71 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
72 nid);
8a7f97b9
MR
73 if (!section)
74 panic("%s: Failed to allocate %lu bytes nid=%d\n",
75 __func__, array_size, nid);
76 }
28ae55c9
DH
77
78 return section;
3e347261 79}
802f192e 80
a3142c8e 81static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e 82{
28ae55c9
DH
83 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
84 struct mem_section *section;
802f192e 85
ba72b4c8
DW
86 /*
87 * An existing section is possible in the sub-section hotplug
88 * case. First hot-add instantiates, follow-on hot-add reuses
89 * the existing section.
90 *
91 * The mem_hotplug_lock resolves the apparent race below.
92 */
802f192e 93 if (mem_section[root])
ba72b4c8 94 return 0;
3e347261 95
28ae55c9 96 section = sparse_index_alloc(nid);
af0cd5a7
WC
97 if (!section)
98 return -ENOMEM;
28ae55c9
DH
99
100 mem_section[root] = section;
c1c95183 101
9d1936cf 102 return 0;
28ae55c9
DH
103}
104#else /* !SPARSEMEM_EXTREME */
105static inline int sparse_index_init(unsigned long section_nr, int nid)
106{
107 return 0;
802f192e 108}
28ae55c9
DH
109#endif
110
91fd8b95 111#ifdef CONFIG_SPARSEMEM_EXTREME
2491f0a2 112unsigned long __section_nr(struct mem_section *ms)
4ca644d9
DH
113{
114 unsigned long root_nr;
83e3c487 115 struct mem_section *root = NULL;
4ca644d9 116
12783b00
MK
117 for (root_nr = 0; root_nr < NR_SECTION_ROOTS; root_nr++) {
118 root = __nr_to_section(root_nr * SECTIONS_PER_ROOT);
4ca644d9
DH
119 if (!root)
120 continue;
121
122 if ((ms >= root) && (ms < (root + SECTIONS_PER_ROOT)))
123 break;
124 }
125
83e3c487 126 VM_BUG_ON(!root);
db36a461 127
4ca644d9
DH
128 return (root_nr * SECTIONS_PER_ROOT) + (ms - root);
129}
91fd8b95 130#else
2491f0a2 131unsigned long __section_nr(struct mem_section *ms)
91fd8b95 132{
2491f0a2 133 return (unsigned long)(ms - mem_section[0]);
91fd8b95
ZC
134}
135#endif
4ca644d9 136
30c253e6
AW
137/*
138 * During early boot, before section_mem_map is used for an actual
139 * mem_map, we use section_mem_map to store the section's NUMA
140 * node. This keeps us from having to use another data structure. The
141 * node information is cleared just before we store the real mem_map.
142 */
143static inline unsigned long sparse_encode_early_nid(int nid)
144{
145 return (nid << SECTION_NID_SHIFT);
146}
147
148static inline int sparse_early_nid(struct mem_section *section)
149{
150 return (section->section_mem_map >> SECTION_NID_SHIFT);
151}
152
2dbb51c4
MG
153/* Validate the physical addressing limitations of the model */
154void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
155 unsigned long *end_pfn)
d41dee36 156{
2dbb51c4 157 unsigned long max_sparsemem_pfn = 1UL << (MAX_PHYSMEM_BITS-PAGE_SHIFT);
d41dee36 158
bead9a3a
IM
159 /*
160 * Sanity checks - do not allow an architecture to pass
161 * in larger pfns than the maximum scope of sparsemem:
162 */
2dbb51c4
MG
163 if (*start_pfn > max_sparsemem_pfn) {
164 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
165 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
166 *start_pfn, *end_pfn, max_sparsemem_pfn);
167 WARN_ON_ONCE(1);
168 *start_pfn = max_sparsemem_pfn;
169 *end_pfn = max_sparsemem_pfn;
ef161a98 170 } else if (*end_pfn > max_sparsemem_pfn) {
2dbb51c4
MG
171 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
172 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
173 *start_pfn, *end_pfn, max_sparsemem_pfn);
174 WARN_ON_ONCE(1);
175 *end_pfn = max_sparsemem_pfn;
176 }
177}
178
c4e1be9e
DH
179/*
180 * There are a number of times that we loop over NR_MEM_SECTIONS,
181 * looking for section_present() on each. But, when we have very
182 * large physical address spaces, NR_MEM_SECTIONS can also be
183 * very large which makes the loops quite long.
184 *
185 * Keeping track of this gives us an easy way to break out of
186 * those loops early.
187 */
2491f0a2 188unsigned long __highest_present_section_nr;
c4e1be9e
DH
189static void section_mark_present(struct mem_section *ms)
190{
2491f0a2 191 unsigned long section_nr = __section_nr(ms);
c4e1be9e
DH
192
193 if (section_nr > __highest_present_section_nr)
194 __highest_present_section_nr = section_nr;
195
196 ms->section_mem_map |= SECTION_MARKED_PRESENT;
197}
198
c4e1be9e
DH
199#define for_each_present_section_nr(start, section_nr) \
200 for (section_nr = next_present_section_nr(start-1); \
d778015a 201 ((section_nr != -1) && \
c4e1be9e
DH
202 (section_nr <= __highest_present_section_nr)); \
203 section_nr = next_present_section_nr(section_nr))
204
85c77f79
PT
205static inline unsigned long first_present_section_nr(void)
206{
207 return next_present_section_nr(-1);
208}
209
0a9f9f62 210#ifdef CONFIG_SPARSEMEM_VMEMMAP
758b8db4 211static void subsection_mask_set(unsigned long *map, unsigned long pfn,
f46edbd1
DW
212 unsigned long nr_pages)
213{
214 int idx = subsection_map_index(pfn);
215 int end = subsection_map_index(pfn + nr_pages - 1);
216
217 bitmap_set(map, idx, end - idx + 1);
218}
219
220void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
221{
222 int end_sec = pfn_to_section_nr(pfn + nr_pages - 1);
9a845030 223 unsigned long nr, start_sec = pfn_to_section_nr(pfn);
f46edbd1
DW
224
225 if (!nr_pages)
226 return;
227
9a845030 228 for (nr = start_sec; nr <= end_sec; nr++) {
f46edbd1
DW
229 struct mem_section *ms;
230 unsigned long pfns;
231
232 pfns = min(nr_pages, PAGES_PER_SECTION
233 - (pfn & ~PAGE_SECTION_MASK));
9a845030 234 ms = __nr_to_section(nr);
f46edbd1
DW
235 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
236
9a845030 237 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
f46edbd1
DW
238 pfns, subsection_map_index(pfn),
239 subsection_map_index(pfn + pfns - 1));
240
241 pfn += pfns;
242 nr_pages -= pfns;
243 }
244}
0a9f9f62
BH
245#else
246void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
247{
248}
249#endif
f46edbd1 250
2dbb51c4
MG
251/* Record a memory area against a node. */
252void __init memory_present(int nid, unsigned long start, unsigned long end)
253{
254 unsigned long pfn;
bead9a3a 255
629a359b
KS
256#ifdef CONFIG_SPARSEMEM_EXTREME
257 if (unlikely(!mem_section)) {
258 unsigned long size, align;
259
d09cfbbf 260 size = sizeof(struct mem_section*) * NR_SECTION_ROOTS;
629a359b 261 align = 1 << (INTERNODE_CACHE_SHIFT);
eb31d559 262 mem_section = memblock_alloc(size, align);
8a7f97b9
MR
263 if (!mem_section)
264 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
265 __func__, size, align);
629a359b
KS
266 }
267#endif
268
d41dee36 269 start &= PAGE_SECTION_MASK;
2dbb51c4 270 mminit_validate_memmodel_limits(&start, &end);
d41dee36
AW
271 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
272 unsigned long section = pfn_to_section_nr(pfn);
802f192e
BP
273 struct mem_section *ms;
274
275 sparse_index_init(section, nid);
85770ffe 276 set_section_nid(section, nid);
802f192e
BP
277
278 ms = __nr_to_section(section);
c4e1be9e 279 if (!ms->section_mem_map) {
2d070eab
MH
280 ms->section_mem_map = sparse_encode_early_nid(nid) |
281 SECTION_IS_ONLINE;
c4e1be9e
DH
282 section_mark_present(ms);
283 }
d41dee36
AW
284 }
285}
286
9def36e0
LG
287/*
288 * Mark all memblocks as present using memory_present(). This is a
2e6787d3 289 * convenience function that is useful for a number of arches
9def36e0
LG
290 * to mark all of the systems memory as present during initialization.
291 */
292void __init memblocks_present(void)
293{
294 struct memblock_region *reg;
295
296 for_each_memblock(memory, reg) {
297 memory_present(memblock_get_region_node(reg),
298 memblock_region_memory_base_pfn(reg),
299 memblock_region_memory_end_pfn(reg));
300 }
301}
302
29751f69
AW
303/*
304 * Subtle, we encode the real pfn into the mem_map such that
305 * the identity pfn - section_mem_map will return the actual
306 * physical page frame number.
307 */
308static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
309{
def9b71e
PT
310 unsigned long coded_mem_map =
311 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
312 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > (1UL<<PFN_SECTION_SHIFT));
313 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
314 return coded_mem_map;
29751f69
AW
315}
316
317/*
ea01ea93 318 * Decode mem_map from the coded memmap
29751f69 319 */
29751f69
AW
320struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
321{
ea01ea93
BP
322 /* mask off the extra low bits of information */
323 coded_mem_map &= SECTION_MAP_MASK;
29751f69
AW
324 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
325}
326
4e40987f 327static void __meminit sparse_init_one_section(struct mem_section *ms,
5c0e3066 328 unsigned long pnum, struct page *mem_map,
326e1b8f 329 struct mem_section_usage *usage, unsigned long flags)
29751f69 330{
30c253e6 331 ms->section_mem_map &= ~SECTION_MAP_MASK;
326e1b8f
DW
332 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
333 | SECTION_HAS_MEM_MAP | flags;
f1eca35a 334 ms->usage = usage;
29751f69
AW
335}
336
f1eca35a 337static unsigned long usemap_size(void)
5c0e3066 338{
60a7a88d 339 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
5c0e3066
MG
340}
341
f1eca35a 342size_t mem_section_usage_size(void)
5c0e3066 343{
f1eca35a 344 return sizeof(struct mem_section_usage) + usemap_size();
5c0e3066 345}
5c0e3066 346
48c90682 347#ifdef CONFIG_MEMORY_HOTREMOVE
f1eca35a 348static struct mem_section_usage * __init
a4322e1b 349sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 350 unsigned long size)
48c90682 351{
f1eca35a 352 struct mem_section_usage *usage;
99ab7b19 353 unsigned long goal, limit;
99ab7b19 354 int nid;
48c90682
YG
355 /*
356 * A page may contain usemaps for other sections preventing the
357 * page being freed and making a section unremovable while
c800bcd5 358 * other sections referencing the usemap remain active. Similarly,
48c90682
YG
359 * a pgdat can prevent a section being removed. If section A
360 * contains a pgdat and section B contains the usemap, both
361 * sections become inter-dependent. This allocates usemaps
362 * from the same section as the pgdat where possible to avoid
363 * this problem.
364 */
07b4e2bc 365 goal = __pa(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
99ab7b19
YL
366 limit = goal + (1UL << PA_SECTION_SHIFT);
367 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
368again:
f1eca35a
DW
369 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
370 if (!usage && limit) {
99ab7b19
YL
371 limit = 0;
372 goto again;
373 }
f1eca35a 374 return usage;
48c90682
YG
375}
376
f1eca35a
DW
377static void __init check_usemap_section_nr(int nid,
378 struct mem_section_usage *usage)
48c90682
YG
379{
380 unsigned long usemap_snr, pgdat_snr;
83e3c487
KS
381 static unsigned long old_usemap_snr;
382 static unsigned long old_pgdat_snr;
48c90682
YG
383 struct pglist_data *pgdat = NODE_DATA(nid);
384 int usemap_nid;
385
83e3c487
KS
386 /* First call */
387 if (!old_usemap_snr) {
388 old_usemap_snr = NR_MEM_SECTIONS;
389 old_pgdat_snr = NR_MEM_SECTIONS;
390 }
391
f1eca35a 392 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
48c90682
YG
393 pgdat_snr = pfn_to_section_nr(__pa(pgdat) >> PAGE_SHIFT);
394 if (usemap_snr == pgdat_snr)
395 return;
396
397 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
398 /* skip redundant message */
399 return;
400
401 old_usemap_snr = usemap_snr;
402 old_pgdat_snr = pgdat_snr;
403
404 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
405 if (usemap_nid != nid) {
1170532b
JP
406 pr_info("node %d must be removed before remove section %ld\n",
407 nid, usemap_snr);
48c90682
YG
408 return;
409 }
410 /*
411 * There is a circular dependency.
412 * Some platforms allow un-removable section because they will just
413 * gather other removable sections for dynamic partitioning.
414 * Just notify un-removable section's number here.
415 */
1170532b
JP
416 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
417 usemap_snr, pgdat_snr, nid);
48c90682
YG
418}
419#else
f1eca35a 420static struct mem_section_usage * __init
a4322e1b 421sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 422 unsigned long size)
48c90682 423{
26fb3dae 424 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
48c90682
YG
425}
426
f1eca35a
DW
427static void __init check_usemap_section_nr(int nid,
428 struct mem_section_usage *usage)
48c90682
YG
429{
430}
431#endif /* CONFIG_MEMORY_HOTREMOVE */
432
35fd1eb1 433#ifdef CONFIG_SPARSEMEM_VMEMMAP
afda57bc 434static unsigned long __init section_map_size(void)
35fd1eb1
PT
435{
436 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
437}
438
439#else
afda57bc 440static unsigned long __init section_map_size(void)
e131c06b
PT
441{
442 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
443}
444
e9c0a3f0
DW
445struct page __init *__populate_section_memmap(unsigned long pfn,
446 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
29751f69 447{
e131c06b
PT
448 unsigned long size = section_map_size();
449 struct page *map = sparse_buffer_alloc(size);
8a7f97b9 450 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
e131c06b
PT
451
452 if (map)
453 return map;
29751f69 454
09dbcf42 455 map = memblock_alloc_try_nid_raw(size, size, addr,
97ad1087 456 MEMBLOCK_ALLOC_ACCESSIBLE, nid);
8a7f97b9
MR
457 if (!map)
458 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
459 __func__, size, PAGE_SIZE, nid, &addr);
460
8f6aac41
CL
461 return map;
462}
463#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
464
35fd1eb1
PT
465static void *sparsemap_buf __meminitdata;
466static void *sparsemap_buf_end __meminitdata;
467
ae831894
LC
468static inline void __meminit sparse_buffer_free(unsigned long size)
469{
470 WARN_ON(!sparsemap_buf || size == 0);
471 memblock_free_early(__pa(sparsemap_buf), size);
472}
473
afda57bc 474static void __init sparse_buffer_init(unsigned long size, int nid)
35fd1eb1 475{
8a7f97b9 476 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
35fd1eb1 477 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
09dbcf42
MH
478 /*
479 * Pre-allocated buffer is mainly used by __populate_section_memmap
480 * and we want it to be properly aligned to the section size - this is
481 * especially the case for VMEMMAP which maps memmap to PMDs
482 */
0ac398b1 483 sparsemap_buf = memblock_alloc_exact_nid_raw(size, section_map_size(),
09dbcf42 484 addr, MEMBLOCK_ALLOC_ACCESSIBLE, nid);
35fd1eb1
PT
485 sparsemap_buf_end = sparsemap_buf + size;
486}
487
afda57bc 488static void __init sparse_buffer_fini(void)
35fd1eb1
PT
489{
490 unsigned long size = sparsemap_buf_end - sparsemap_buf;
491
492 if (sparsemap_buf && size > 0)
ae831894 493 sparse_buffer_free(size);
35fd1eb1
PT
494 sparsemap_buf = NULL;
495}
496
497void * __meminit sparse_buffer_alloc(unsigned long size)
498{
499 void *ptr = NULL;
500
501 if (sparsemap_buf) {
db57e98d 502 ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
35fd1eb1
PT
503 if (ptr + size > sparsemap_buf_end)
504 ptr = NULL;
ae831894
LC
505 else {
506 /* Free redundant aligned space */
507 if ((unsigned long)(ptr - sparsemap_buf) > 0)
508 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
35fd1eb1 509 sparsemap_buf = ptr + size;
ae831894 510 }
35fd1eb1
PT
511 }
512 return ptr;
513}
514
3b32123d 515void __weak __meminit vmemmap_populate_print_last(void)
c2b91e2e
YL
516{
517}
a4322e1b 518
85c77f79
PT
519/*
520 * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
521 * And number of present sections in this node is map_count.
522 */
523static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
524 unsigned long pnum_end,
525 unsigned long map_count)
526{
f1eca35a
DW
527 struct mem_section_usage *usage;
528 unsigned long pnum;
85c77f79
PT
529 struct page *map;
530
f1eca35a
DW
531 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
532 mem_section_usage_size() * map_count);
533 if (!usage) {
85c77f79
PT
534 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
535 goto failed;
536 }
537 sparse_buffer_init(map_count * section_map_size(), nid);
538 for_each_present_section_nr(pnum_begin, pnum) {
e9c0a3f0
DW
539 unsigned long pfn = section_nr_to_pfn(pnum);
540
85c77f79
PT
541 if (pnum >= pnum_end)
542 break;
543
e9c0a3f0
DW
544 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
545 nid, NULL);
85c77f79
PT
546 if (!map) {
547 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
548 __func__, nid);
549 pnum_begin = pnum;
550 goto failed;
551 }
f1eca35a 552 check_usemap_section_nr(nid, usage);
326e1b8f
DW
553 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
554 SECTION_IS_EARLY);
f1eca35a 555 usage = (void *) usage + mem_section_usage_size();
85c77f79
PT
556 }
557 sparse_buffer_fini();
558 return;
559failed:
560 /* We failed to allocate, mark all the following pnums as not present */
561 for_each_present_section_nr(pnum_begin, pnum) {
562 struct mem_section *ms;
563
564 if (pnum >= pnum_end)
565 break;
566 ms = __nr_to_section(pnum);
567 ms->section_mem_map = 0;
568 }
569}
570
571/*
572 * Allocate the accumulated non-linear sections, allocate a mem_map
573 * for each and record the physical to section mapping.
574 */
2a3cb8ba 575void __init sparse_init(void)
85c77f79
PT
576{
577 unsigned long pnum_begin = first_present_section_nr();
578 int nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
579 unsigned long pnum_end, map_count = 1;
580
581 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
582 set_pageblock_order();
583
584 for_each_present_section_nr(pnum_begin + 1, pnum_end) {
585 int nid = sparse_early_nid(__nr_to_section(pnum_end));
586
587 if (nid == nid_begin) {
588 map_count++;
589 continue;
590 }
591 /* Init node with sections in range [pnum_begin, pnum_end) */
592 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
593 nid_begin = nid;
594 pnum_begin = pnum_end;
595 map_count = 1;
596 }
597 /* cover the last node */
598 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
599 vmemmap_populate_print_last();
600}
601
193faea9 602#ifdef CONFIG_MEMORY_HOTPLUG
2d070eab
MH
603
604/* Mark all memory sections within the pfn range as online */
605void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
606{
607 unsigned long pfn;
608
609 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
b4ccec41 610 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
611 struct mem_section *ms;
612
613 /* onlining code should never touch invalid ranges */
614 if (WARN_ON(!valid_section_nr(section_nr)))
615 continue;
616
617 ms = __nr_to_section(section_nr);
618 ms->section_mem_map |= SECTION_IS_ONLINE;
619 }
620}
621
622#ifdef CONFIG_MEMORY_HOTREMOVE
9b7ea46a 623/* Mark all memory sections within the pfn range as offline */
2d070eab
MH
624void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
625{
626 unsigned long pfn;
627
628 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
27227c73 629 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
630 struct mem_section *ms;
631
632 /*
633 * TODO this needs some double checking. Offlining code makes
634 * sure to check pfn_valid but those checks might be just bogus
635 */
636 if (WARN_ON(!valid_section_nr(section_nr)))
637 continue;
638
639 ms = __nr_to_section(section_nr);
640 ms->section_mem_map &= ~SECTION_IS_ONLINE;
641 }
642}
643#endif
644
98f3cfc1 645#ifdef CONFIG_SPARSEMEM_VMEMMAP
030eab4f 646static struct page * __meminit populate_section_memmap(unsigned long pfn,
e9c0a3f0 647 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
98f3cfc1 648{
e9c0a3f0 649 return __populate_section_memmap(pfn, nr_pages, nid, altmap);
98f3cfc1 650}
e9c0a3f0
DW
651
652static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
24b6d416 653 struct vmem_altmap *altmap)
98f3cfc1 654{
e9c0a3f0
DW
655 unsigned long start = (unsigned long) pfn_to_page(pfn);
656 unsigned long end = start + nr_pages * sizeof(struct page);
0aad818b 657
24b6d416 658 vmemmap_free(start, end, altmap);
98f3cfc1 659}
81556b02 660static void free_map_bootmem(struct page *memmap)
0c0a4a51 661{
0aad818b 662 unsigned long start = (unsigned long)memmap;
81556b02 663 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b 664
24b6d416 665 vmemmap_free(start, end, NULL);
0c0a4a51 666}
6ecb0fc6
BH
667
668static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
669{
670 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
671 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
672 struct mem_section *ms = __pfn_to_section(pfn);
673 unsigned long *subsection_map = ms->usage
674 ? &ms->usage->subsection_map[0] : NULL;
675
676 subsection_mask_set(map, pfn, nr_pages);
677 if (subsection_map)
678 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
679
680 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
681 "section already deactivated (%#lx + %ld)\n",
682 pfn, nr_pages))
683 return -EINVAL;
684
685 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
686 return 0;
687}
688
689static bool is_subsection_map_empty(struct mem_section *ms)
690{
691 return bitmap_empty(&ms->usage->subsection_map[0],
692 SUBSECTIONS_PER_SECTION);
693}
694
695static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
696{
697 struct mem_section *ms = __pfn_to_section(pfn);
698 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
699 unsigned long *subsection_map;
700 int rc = 0;
701
702 subsection_mask_set(map, pfn, nr_pages);
703
704 subsection_map = &ms->usage->subsection_map[0];
705
706 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
707 rc = -EINVAL;
708 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
709 rc = -EEXIST;
710 else
711 bitmap_or(subsection_map, map, subsection_map,
712 SUBSECTIONS_PER_SECTION);
713
714 return rc;
715}
98f3cfc1 716#else
030eab4f 717struct page * __meminit populate_section_memmap(unsigned long pfn,
e9c0a3f0 718 unsigned long nr_pages, int nid, struct vmem_altmap *altmap)
0b0acbec 719{
4027149a
BH
720 return kvmalloc_node(array_size(sizeof(struct page),
721 PAGES_PER_SECTION), GFP_KERNEL, nid);
0b0acbec
DH
722}
723
e9c0a3f0 724static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
7b73d978 725 struct vmem_altmap *altmap)
98f3cfc1 726{
3af776f6 727 kvfree(pfn_to_page(pfn));
0b0acbec 728}
0c0a4a51 729
81556b02 730static void free_map_bootmem(struct page *memmap)
0c0a4a51
YG
731{
732 unsigned long maps_section_nr, removing_section_nr, i;
81556b02 733 unsigned long magic, nr_pages;
ae64ffca 734 struct page *page = virt_to_page(memmap);
0c0a4a51 735
81556b02
ZY
736 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
737 >> PAGE_SHIFT;
738
0c0a4a51 739 for (i = 0; i < nr_pages; i++, page++) {
ddffe98d 740 magic = (unsigned long) page->freelist;
0c0a4a51
YG
741
742 BUG_ON(magic == NODE_INFO);
743
744 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
857e522a 745 removing_section_nr = page_private(page);
0c0a4a51
YG
746
747 /*
748 * When this function is called, the removing section is
749 * logical offlined state. This means all pages are isolated
750 * from page allocator. If removing section's memmap is placed
751 * on the same section, it must not be freed.
752 * If it is freed, page allocator may allocate it which will
753 * be removed physically soon.
754 */
755 if (maps_section_nr != removing_section_nr)
756 put_page_bootmem(page);
757 }
758}
0b0acbec 759
37bc1502 760static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
ba72b4c8 761{
37bc1502
BH
762 return 0;
763}
764
765static bool is_subsection_map_empty(struct mem_section *ms)
766{
6ecb0fc6 767 return true;
0a9f9f62
BH
768}
769
6ecb0fc6 770static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
0a9f9f62 771{
6ecb0fc6 772 return 0;
0a9f9f62 773}
6ecb0fc6 774#endif /* CONFIG_SPARSEMEM_VMEMMAP */
37bc1502 775
95a5a34d
BH
776/*
777 * To deactivate a memory region, there are 3 cases to handle across
778 * two configurations (SPARSEMEM_VMEMMAP={y,n}):
779 *
780 * 1. deactivation of a partial hot-added section (only possible in
781 * the SPARSEMEM_VMEMMAP=y case).
782 * a) section was present at memory init.
783 * b) section was hot-added post memory init.
784 * 2. deactivation of a complete hot-added section.
785 * 3. deactivation of a complete section from memory init.
786 *
787 * For 1, when subsection_map does not empty we will not be freeing the
788 * usage map, but still need to free the vmemmap range.
789 *
790 * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
791 */
37bc1502
BH
792static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
793 struct vmem_altmap *altmap)
794{
795 struct mem_section *ms = __pfn_to_section(pfn);
796 bool section_is_early = early_section(ms);
797 struct page *memmap = NULL;
798 bool empty;
799
800 if (clear_subsection_map(pfn, nr_pages))
801 return;
95a5a34d 802
37bc1502 803 empty = is_subsection_map_empty(ms);
d41e2f3b 804 if (empty) {
ba72b4c8
DW
805 unsigned long section_nr = pfn_to_section_nr(pfn);
806
8068df3b
DH
807 /*
808 * When removing an early section, the usage map is kept (as the
809 * usage maps of other sections fall into the same page). It
810 * will be re-used when re-adding the section - which is then no
811 * longer an early section. If the usage map is PageReserved, it
812 * was allocated during boot.
813 */
814 if (!PageReserved(virt_to_page(ms->usage))) {
ba72b4c8
DW
815 kfree(ms->usage);
816 ms->usage = NULL;
817 }
818 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
b943f045
AK
819 /*
820 * Mark the section invalid so that valid_section()
821 * return false. This prevents code from dereferencing
822 * ms->usage array.
823 */
824 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
ba72b4c8
DW
825 }
826
ef69bc9f
WY
827 /*
828 * The memmap of early sections is always fully populated. See
829 * section_activate() and pfn_valid() .
830 */
831 if (!section_is_early)
ba72b4c8 832 depopulate_section_memmap(pfn, nr_pages, altmap);
ef69bc9f
WY
833 else if (memmap)
834 free_map_bootmem(memmap);
d41e2f3b
BH
835
836 if (empty)
837 ms->section_mem_map = (unsigned long)NULL;
ba72b4c8
DW
838}
839
5d87255c
BH
840static struct page * __meminit section_activate(int nid, unsigned long pfn,
841 unsigned long nr_pages, struct vmem_altmap *altmap)
842{
843 struct mem_section *ms = __pfn_to_section(pfn);
844 struct mem_section_usage *usage = NULL;
845 struct page *memmap;
846 int rc = 0;
847
848 if (!ms->usage) {
849 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
850 if (!usage)
851 return ERR_PTR(-ENOMEM);
852 ms->usage = usage;
853 }
854
855 rc = fill_subsection_map(pfn, nr_pages);
ba72b4c8
DW
856 if (rc) {
857 if (usage)
858 ms->usage = NULL;
859 kfree(usage);
860 return ERR_PTR(rc);
861 }
862
863 /*
864 * The early init code does not consider partially populated
865 * initial sections, it simply assumes that memory will never be
866 * referenced. If we hot-add memory into such a section then we
867 * do not need to populate the memmap and can simply reuse what
868 * is already there.
869 */
870 if (nr_pages < PAGES_PER_SECTION && early_section(ms))
871 return pfn_to_page(pfn);
872
873 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap);
874 if (!memmap) {
875 section_deactivate(pfn, nr_pages, altmap);
876 return ERR_PTR(-ENOMEM);
877 }
878
879 return memmap;
880}
881
7567cfc5 882/**
ba72b4c8 883 * sparse_add_section - add a memory section, or populate an existing one
7567cfc5
BH
884 * @nid: The node to add section on
885 * @start_pfn: start pfn of the memory range
ba72b4c8 886 * @nr_pages: number of pfns to add in the section
7567cfc5
BH
887 * @altmap: device page map
888 *
889 * This is only intended for hotplug.
890 *
95a5a34d
BH
891 * Note that only VMEMMAP supports sub-section aligned hotplug,
892 * the proper alignment and size are gated by check_pfn_span().
893 *
894 *
7567cfc5
BH
895 * Return:
896 * * 0 - On success.
897 * * -EEXIST - Section has been present.
898 * * -ENOMEM - Out of memory.
29751f69 899 */
7ea62160
DW
900int __meminit sparse_add_section(int nid, unsigned long start_pfn,
901 unsigned long nr_pages, struct vmem_altmap *altmap)
29751f69 902{
0b0acbec 903 unsigned long section_nr = pfn_to_section_nr(start_pfn);
0b0acbec
DH
904 struct mem_section *ms;
905 struct page *memmap;
0b0acbec 906 int ret;
29751f69 907
4e0d2e7e 908 ret = sparse_index_init(section_nr, nid);
ba72b4c8 909 if (ret < 0)
bbd06825 910 return ret;
0b0acbec 911
ba72b4c8
DW
912 memmap = section_activate(nid, start_pfn, nr_pages, altmap);
913 if (IS_ERR(memmap))
914 return PTR_ERR(memmap);
5c0e3066 915
d0dc12e8
PT
916 /*
917 * Poison uninitialized struct pages in order to catch invalid flags
918 * combinations.
919 */
18e19f19 920 page_init_poison(memmap, sizeof(struct page) * nr_pages);
3ac19f8e 921
c1cbc3ee 922 ms = __nr_to_section(section_nr);
26f26bed 923 set_section_nid(section_nr, nid);
c4e1be9e 924 section_mark_present(ms);
0b0acbec 925
ba72b4c8
DW
926 /* Align memmap to section boundary in the subsection case */
927 if (section_nr_to_pfn(section_nr) != start_pfn)
4627d76d 928 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
ba72b4c8
DW
929 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
930
931 return 0;
29751f69 932}
ea01ea93 933
95a4774d
WC
934#ifdef CONFIG_MEMORY_FAILURE
935static void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
936{
937 int i;
938
5eb570a8
BS
939 /*
940 * A further optimization is to have per section refcounted
941 * num_poisoned_pages. But that would need more space per memmap, so
942 * for now just do a quick global check to speed up this routine in the
943 * absence of bad pages.
944 */
945 if (atomic_long_read(&num_poisoned_pages) == 0)
946 return;
947
4b94ffdc 948 for (i = 0; i < nr_pages; i++) {
95a4774d 949 if (PageHWPoison(&memmap[i])) {
9f82883c 950 num_poisoned_pages_dec();
95a4774d
WC
951 ClearPageHWPoison(&memmap[i]);
952 }
953 }
954}
955#else
956static inline void clear_hwpoisoned_pages(struct page *memmap, int nr_pages)
957{
958}
959#endif
960
ba72b4c8 961void sparse_remove_section(struct mem_section *ms, unsigned long pfn,
7ea62160
DW
962 unsigned long nr_pages, unsigned long map_offset,
963 struct vmem_altmap *altmap)
ea01ea93 964{
ba72b4c8
DW
965 clear_hwpoisoned_pages(pfn_to_page(pfn) + map_offset,
966 nr_pages - map_offset);
967 section_deactivate(pfn, nr_pages, altmap);
ea01ea93 968}
4edd7cef 969#endif /* CONFIG_MEMORY_HOTPLUG */