Merge tag 'sched-core-2024-09-19' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / mm / sparse.c
CommitLineData
b2441318 1// SPDX-License-Identifier: GPL-2.0
d41dee36
AW
2/*
3 * sparse memory mappings.
4 */
d41dee36 5#include <linux/mm.h>
5a0e3ad6 6#include <linux/slab.h>
d41dee36 7#include <linux/mmzone.h>
97ad1087 8#include <linux/memblock.h>
3b32123d 9#include <linux/compiler.h>
0b0acbec 10#include <linux/highmem.h>
b95f1b31 11#include <linux/export.h>
28ae55c9 12#include <linux/spinlock.h>
0b0acbec 13#include <linux/vmalloc.h>
9f82883c
AS
14#include <linux/swap.h>
15#include <linux/swapops.h>
426e5c42 16#include <linux/bootmem_info.h>
15995a35 17#include <linux/vmstat.h>
0c0a4a51 18#include "internal.h"
d41dee36
AW
19#include <asm/dma.h>
20
21/*
22 * Permanent SPARSEMEM data:
23 *
24 * 1) mem_section - memory sections, mem_map's for valid memory
25 */
3e347261 26#ifdef CONFIG_SPARSEMEM_EXTREME
83e3c487 27struct mem_section **mem_section;
3e347261
BP
28#else
29struct mem_section mem_section[NR_SECTION_ROOTS][SECTIONS_PER_ROOT]
22fc6ecc 30 ____cacheline_internodealigned_in_smp;
3e347261
BP
31#endif
32EXPORT_SYMBOL(mem_section);
33
89689ae7
CL
34#ifdef NODE_NOT_IN_PAGE_FLAGS
35/*
36 * If we did not store the node number in the page then we have to
37 * do a lookup in the section_to_node_table in order to find which
38 * node the page belongs to.
39 */
40#if MAX_NUMNODES <= 256
41static u8 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
42#else
43static u16 section_to_node_table[NR_MEM_SECTIONS] __cacheline_aligned;
44#endif
45
33dd4e0e 46int page_to_nid(const struct page *page)
89689ae7
CL
47{
48 return section_to_node_table[page_to_section(page)];
49}
50EXPORT_SYMBOL(page_to_nid);
85770ffe
AW
51
52static void set_section_nid(unsigned long section_nr, int nid)
53{
54 section_to_node_table[section_nr] = nid;
55}
56#else /* !NODE_NOT_IN_PAGE_FLAGS */
57static inline void set_section_nid(unsigned long section_nr, int nid)
58{
59}
89689ae7
CL
60#endif
61
3e347261 62#ifdef CONFIG_SPARSEMEM_EXTREME
bd721ea7 63static noinline struct mem_section __ref *sparse_index_alloc(int nid)
28ae55c9
DH
64{
65 struct mem_section *section = NULL;
66 unsigned long array_size = SECTIONS_PER_ROOT *
67 sizeof(struct mem_section);
68
8a7f97b9 69 if (slab_is_available()) {
b95046b0 70 section = kzalloc_node(array_size, GFP_KERNEL, nid);
8a7f97b9 71 } else {
7e1c4e27
MR
72 section = memblock_alloc_node(array_size, SMP_CACHE_BYTES,
73 nid);
8a7f97b9
MR
74 if (!section)
75 panic("%s: Failed to allocate %lu bytes nid=%d\n",
76 __func__, array_size, nid);
77 }
28ae55c9
DH
78
79 return section;
3e347261 80}
802f192e 81
a3142c8e 82static int __meminit sparse_index_init(unsigned long section_nr, int nid)
802f192e 83{
28ae55c9
DH
84 unsigned long root = SECTION_NR_TO_ROOT(section_nr);
85 struct mem_section *section;
802f192e 86
ba72b4c8
DW
87 /*
88 * An existing section is possible in the sub-section hotplug
89 * case. First hot-add instantiates, follow-on hot-add reuses
90 * the existing section.
91 *
92 * The mem_hotplug_lock resolves the apparent race below.
93 */
802f192e 94 if (mem_section[root])
ba72b4c8 95 return 0;
3e347261 96
28ae55c9 97 section = sparse_index_alloc(nid);
af0cd5a7
WC
98 if (!section)
99 return -ENOMEM;
28ae55c9
DH
100
101 mem_section[root] = section;
c1c95183 102
9d1936cf 103 return 0;
28ae55c9
DH
104}
105#else /* !SPARSEMEM_EXTREME */
106static inline int sparse_index_init(unsigned long section_nr, int nid)
107{
108 return 0;
802f192e 109}
28ae55c9
DH
110#endif
111
30c253e6
AW
112/*
113 * During early boot, before section_mem_map is used for an actual
114 * mem_map, we use section_mem_map to store the section's NUMA
115 * node. This keeps us from having to use another data structure. The
116 * node information is cleared just before we store the real mem_map.
117 */
118static inline unsigned long sparse_encode_early_nid(int nid)
119{
e0dbb2bc 120 return ((unsigned long)nid << SECTION_NID_SHIFT);
30c253e6
AW
121}
122
123static inline int sparse_early_nid(struct mem_section *section)
124{
125 return (section->section_mem_map >> SECTION_NID_SHIFT);
126}
127
2dbb51c4 128/* Validate the physical addressing limitations of the model */
c7878534 129static void __meminit mminit_validate_memmodel_limits(unsigned long *start_pfn,
2dbb51c4 130 unsigned long *end_pfn)
d41dee36 131{
ea72ce5d 132 unsigned long max_sparsemem_pfn = (PHYSMEM_END + 1) >> PAGE_SHIFT;
d41dee36 133
bead9a3a
IM
134 /*
135 * Sanity checks - do not allow an architecture to pass
136 * in larger pfns than the maximum scope of sparsemem:
137 */
2dbb51c4
MG
138 if (*start_pfn > max_sparsemem_pfn) {
139 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
140 "Start of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
141 *start_pfn, *end_pfn, max_sparsemem_pfn);
142 WARN_ON_ONCE(1);
143 *start_pfn = max_sparsemem_pfn;
144 *end_pfn = max_sparsemem_pfn;
ef161a98 145 } else if (*end_pfn > max_sparsemem_pfn) {
2dbb51c4
MG
146 mminit_dprintk(MMINIT_WARNING, "pfnvalidation",
147 "End of range %lu -> %lu exceeds SPARSEMEM max %lu\n",
148 *start_pfn, *end_pfn, max_sparsemem_pfn);
149 WARN_ON_ONCE(1);
150 *end_pfn = max_sparsemem_pfn;
151 }
152}
153
c4e1be9e
DH
154/*
155 * There are a number of times that we loop over NR_MEM_SECTIONS,
156 * looking for section_present() on each. But, when we have very
157 * large physical address spaces, NR_MEM_SECTIONS can also be
158 * very large which makes the loops quite long.
159 *
160 * Keeping track of this gives us an easy way to break out of
161 * those loops early.
162 */
2491f0a2 163unsigned long __highest_present_section_nr;
a1bc561b
OK
164static void __section_mark_present(struct mem_section *ms,
165 unsigned long section_nr)
c4e1be9e 166{
c4e1be9e
DH
167 if (section_nr > __highest_present_section_nr)
168 __highest_present_section_nr = section_nr;
169
170 ms->section_mem_map |= SECTION_MARKED_PRESENT;
171}
172
c4e1be9e
DH
173#define for_each_present_section_nr(start, section_nr) \
174 for (section_nr = next_present_section_nr(start-1); \
c200a711 175 section_nr != -1; \
c4e1be9e
DH
176 section_nr = next_present_section_nr(section_nr))
177
85c77f79
PT
178static inline unsigned long first_present_section_nr(void)
179{
180 return next_present_section_nr(-1);
181}
182
0a9f9f62 183#ifdef CONFIG_SPARSEMEM_VMEMMAP
758b8db4 184static void subsection_mask_set(unsigned long *map, unsigned long pfn,
f46edbd1
DW
185 unsigned long nr_pages)
186{
187 int idx = subsection_map_index(pfn);
188 int end = subsection_map_index(pfn + nr_pages - 1);
189
190 bitmap_set(map, idx, end - idx + 1);
191}
192
193void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
194{
fe91eca6
DJ
195 int end_sec_nr = pfn_to_section_nr(pfn + nr_pages - 1);
196 unsigned long nr, start_sec_nr = pfn_to_section_nr(pfn);
f46edbd1 197
fe91eca6 198 for (nr = start_sec_nr; nr <= end_sec_nr; nr++) {
f46edbd1
DW
199 struct mem_section *ms;
200 unsigned long pfns;
201
202 pfns = min(nr_pages, PAGES_PER_SECTION
203 - (pfn & ~PAGE_SECTION_MASK));
9a845030 204 ms = __nr_to_section(nr);
f46edbd1
DW
205 subsection_mask_set(ms->usage->subsection_map, pfn, pfns);
206
9a845030 207 pr_debug("%s: sec: %lu pfns: %lu set(%d, %d)\n", __func__, nr,
f46edbd1
DW
208 pfns, subsection_map_index(pfn),
209 subsection_map_index(pfn + pfns - 1));
210
211 pfn += pfns;
212 nr_pages -= pfns;
213 }
214}
0a9f9f62
BH
215#else
216void __init subsection_map_init(unsigned long pfn, unsigned long nr_pages)
217{
218}
219#endif
f46edbd1 220
2dbb51c4 221/* Record a memory area against a node. */
c89ab04f 222static void __init memory_present(int nid, unsigned long start, unsigned long end)
2dbb51c4
MG
223{
224 unsigned long pfn;
bead9a3a 225
d41dee36 226 start &= PAGE_SECTION_MASK;
2dbb51c4 227 mminit_validate_memmodel_limits(&start, &end);
d41dee36 228 for (pfn = start; pfn < end; pfn += PAGES_PER_SECTION) {
fe91eca6 229 unsigned long section_nr = pfn_to_section_nr(pfn);
802f192e
BP
230 struct mem_section *ms;
231
fe91eca6
DJ
232 sparse_index_init(section_nr, nid);
233 set_section_nid(section_nr, nid);
802f192e 234
fe91eca6 235 ms = __nr_to_section(section_nr);
c4e1be9e 236 if (!ms->section_mem_map) {
2d070eab
MH
237 ms->section_mem_map = sparse_encode_early_nid(nid) |
238 SECTION_IS_ONLINE;
fe91eca6 239 __section_mark_present(ms, section_nr);
c4e1be9e 240 }
d41dee36
AW
241 }
242}
243
9def36e0 244/*
c89ab04f
MR
245 * Mark all memblocks as present using memory_present().
246 * This is a convenience function that is useful to mark all of the systems
247 * memory as present during initialization.
9def36e0 248 */
c89ab04f 249static void __init memblocks_present(void)
9def36e0 250{
c9118e6c
MR
251 unsigned long start, end;
252 int i, nid;
9def36e0 253
850ed205
BH
254#ifdef CONFIG_SPARSEMEM_EXTREME
255 if (unlikely(!mem_section)) {
256 unsigned long size, align;
257
258 size = sizeof(struct mem_section *) * NR_SECTION_ROOTS;
259 align = 1 << (INTERNODE_CACHE_SHIFT);
260 mem_section = memblock_alloc(size, align);
261 if (!mem_section)
262 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
263 __func__, size, align);
264 }
265#endif
266
c9118e6c
MR
267 for_each_mem_pfn_range(i, MAX_NUMNODES, &start, &end, &nid)
268 memory_present(nid, start, end);
9def36e0
LG
269}
270
29751f69
AW
271/*
272 * Subtle, we encode the real pfn into the mem_map such that
273 * the identity pfn - section_mem_map will return the actual
274 * physical page frame number.
275 */
276static unsigned long sparse_encode_mem_map(struct page *mem_map, unsigned long pnum)
277{
def9b71e
PT
278 unsigned long coded_mem_map =
279 (unsigned long)(mem_map - (section_nr_to_pfn(pnum)));
ed7802dd 280 BUILD_BUG_ON(SECTION_MAP_LAST_BIT > PFN_SECTION_SHIFT);
def9b71e
PT
281 BUG_ON(coded_mem_map & ~SECTION_MAP_MASK);
282 return coded_mem_map;
29751f69
AW
283}
284
3a0aaefe 285#ifdef CONFIG_MEMORY_HOTPLUG
29751f69 286/*
ea01ea93 287 * Decode mem_map from the coded memmap
29751f69 288 */
29751f69
AW
289struct page *sparse_decode_mem_map(unsigned long coded_mem_map, unsigned long pnum)
290{
ea01ea93
BP
291 /* mask off the extra low bits of information */
292 coded_mem_map &= SECTION_MAP_MASK;
29751f69
AW
293 return ((struct page *)coded_mem_map) + section_nr_to_pfn(pnum);
294}
3a0aaefe 295#endif /* CONFIG_MEMORY_HOTPLUG */
29751f69 296
4e40987f 297static void __meminit sparse_init_one_section(struct mem_section *ms,
5c0e3066 298 unsigned long pnum, struct page *mem_map,
326e1b8f 299 struct mem_section_usage *usage, unsigned long flags)
29751f69 300{
30c253e6 301 ms->section_mem_map &= ~SECTION_MAP_MASK;
326e1b8f
DW
302 ms->section_mem_map |= sparse_encode_mem_map(mem_map, pnum)
303 | SECTION_HAS_MEM_MAP | flags;
f1eca35a 304 ms->usage = usage;
29751f69
AW
305}
306
f1eca35a 307static unsigned long usemap_size(void)
5c0e3066 308{
60a7a88d 309 return BITS_TO_LONGS(SECTION_BLOCKFLAGS_BITS) * sizeof(unsigned long);
5c0e3066
MG
310}
311
f1eca35a 312size_t mem_section_usage_size(void)
5c0e3066 313{
f1eca35a 314 return sizeof(struct mem_section_usage) + usemap_size();
5c0e3066 315}
5c0e3066 316
2e126aa2 317#ifdef CONFIG_MEMORY_HOTREMOVE
ccbd6283
MC
318static inline phys_addr_t pgdat_to_phys(struct pglist_data *pgdat)
319{
a9ee6cf5 320#ifndef CONFIG_NUMA
bdbda735
MC
321 VM_BUG_ON(pgdat != &contig_page_data);
322 return __pa_symbol(&contig_page_data);
ccbd6283
MC
323#else
324 return __pa(pgdat);
325#endif
326}
327
f1eca35a 328static struct mem_section_usage * __init
a4322e1b 329sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 330 unsigned long size)
48c90682 331{
f1eca35a 332 struct mem_section_usage *usage;
99ab7b19 333 unsigned long goal, limit;
99ab7b19 334 int nid;
48c90682
YG
335 /*
336 * A page may contain usemaps for other sections preventing the
337 * page being freed and making a section unremovable while
c800bcd5 338 * other sections referencing the usemap remain active. Similarly,
48c90682
YG
339 * a pgdat can prevent a section being removed. If section A
340 * contains a pgdat and section B contains the usemap, both
341 * sections become inter-dependent. This allocates usemaps
342 * from the same section as the pgdat where possible to avoid
343 * this problem.
344 */
ccbd6283 345 goal = pgdat_to_phys(pgdat) & (PAGE_SECTION_MASK << PAGE_SHIFT);
99ab7b19
YL
346 limit = goal + (1UL << PA_SECTION_SHIFT);
347 nid = early_pfn_to_nid(goal >> PAGE_SHIFT);
348again:
f1eca35a
DW
349 usage = memblock_alloc_try_nid(size, SMP_CACHE_BYTES, goal, limit, nid);
350 if (!usage && limit) {
afb90a36 351 limit = MEMBLOCK_ALLOC_ACCESSIBLE;
99ab7b19
YL
352 goto again;
353 }
f1eca35a 354 return usage;
48c90682
YG
355}
356
f1eca35a
DW
357static void __init check_usemap_section_nr(int nid,
358 struct mem_section_usage *usage)
48c90682
YG
359{
360 unsigned long usemap_snr, pgdat_snr;
83e3c487
KS
361 static unsigned long old_usemap_snr;
362 static unsigned long old_pgdat_snr;
48c90682
YG
363 struct pglist_data *pgdat = NODE_DATA(nid);
364 int usemap_nid;
365
83e3c487
KS
366 /* First call */
367 if (!old_usemap_snr) {
368 old_usemap_snr = NR_MEM_SECTIONS;
369 old_pgdat_snr = NR_MEM_SECTIONS;
370 }
371
f1eca35a 372 usemap_snr = pfn_to_section_nr(__pa(usage) >> PAGE_SHIFT);
ccbd6283 373 pgdat_snr = pfn_to_section_nr(pgdat_to_phys(pgdat) >> PAGE_SHIFT);
48c90682
YG
374 if (usemap_snr == pgdat_snr)
375 return;
376
377 if (old_usemap_snr == usemap_snr && old_pgdat_snr == pgdat_snr)
378 /* skip redundant message */
379 return;
380
381 old_usemap_snr = usemap_snr;
382 old_pgdat_snr = pgdat_snr;
383
384 usemap_nid = sparse_early_nid(__nr_to_section(usemap_snr));
385 if (usemap_nid != nid) {
1170532b
JP
386 pr_info("node %d must be removed before remove section %ld\n",
387 nid, usemap_snr);
48c90682
YG
388 return;
389 }
390 /*
391 * There is a circular dependency.
392 * Some platforms allow un-removable section because they will just
393 * gather other removable sections for dynamic partitioning.
394 * Just notify un-removable section's number here.
395 */
1170532b
JP
396 pr_info("Section %ld and %ld (node %d) have a circular dependency on usemap and pgdat allocations\n",
397 usemap_snr, pgdat_snr, nid);
48c90682
YG
398}
399#else
f1eca35a 400static struct mem_section_usage * __init
a4322e1b 401sparse_early_usemaps_alloc_pgdat_section(struct pglist_data *pgdat,
238305bb 402 unsigned long size)
48c90682 403{
26fb3dae 404 return memblock_alloc_node(size, SMP_CACHE_BYTES, pgdat->node_id);
48c90682
YG
405}
406
f1eca35a
DW
407static void __init check_usemap_section_nr(int nid,
408 struct mem_section_usage *usage)
48c90682
YG
409{
410}
411#endif /* CONFIG_MEMORY_HOTREMOVE */
412
35fd1eb1 413#ifdef CONFIG_SPARSEMEM_VMEMMAP
afda57bc 414static unsigned long __init section_map_size(void)
35fd1eb1
PT
415{
416 return ALIGN(sizeof(struct page) * PAGES_PER_SECTION, PMD_SIZE);
417}
418
419#else
afda57bc 420static unsigned long __init section_map_size(void)
e131c06b
PT
421{
422 return PAGE_ALIGN(sizeof(struct page) * PAGES_PER_SECTION);
423}
424
e9c0a3f0 425struct page __init *__populate_section_memmap(unsigned long pfn,
e3246d8f
JM
426 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
427 struct dev_pagemap *pgmap)
29751f69 428{
e131c06b
PT
429 unsigned long size = section_map_size();
430 struct page *map = sparse_buffer_alloc(size);
8a7f97b9 431 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
e131c06b
PT
432
433 if (map)
434 return map;
29751f69 435
c803b3c8 436 map = memmap_alloc(size, size, addr, nid, false);
8a7f97b9
MR
437 if (!map)
438 panic("%s: Failed to allocate %lu bytes align=0x%lx nid=%d from=%pa\n",
439 __func__, size, PAGE_SIZE, nid, &addr);
440
8f6aac41
CL
441 return map;
442}
443#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
444
35fd1eb1
PT
445static void *sparsemap_buf __meminitdata;
446static void *sparsemap_buf_end __meminitdata;
447
ae831894
LC
448static inline void __meminit sparse_buffer_free(unsigned long size)
449{
450 WARN_ON(!sparsemap_buf || size == 0);
4421cca0 451 memblock_free(sparsemap_buf, size);
ae831894
LC
452}
453
afda57bc 454static void __init sparse_buffer_init(unsigned long size, int nid)
35fd1eb1 455{
8a7f97b9 456 phys_addr_t addr = __pa(MAX_DMA_ADDRESS);
35fd1eb1 457 WARN_ON(sparsemap_buf); /* forgot to call sparse_buffer_fini()? */
09dbcf42
MH
458 /*
459 * Pre-allocated buffer is mainly used by __populate_section_memmap
460 * and we want it to be properly aligned to the section size - this is
461 * especially the case for VMEMMAP which maps memmap to PMDs
462 */
c803b3c8 463 sparsemap_buf = memmap_alloc(size, section_map_size(), addr, nid, true);
35fd1eb1 464 sparsemap_buf_end = sparsemap_buf + size;
15995a35 465#ifndef CONFIG_SPARSEMEM_VMEMMAP
9d857311 466 memmap_boot_pages_add(DIV_ROUND_UP(size, PAGE_SIZE));
15995a35 467#endif
35fd1eb1
PT
468}
469
afda57bc 470static void __init sparse_buffer_fini(void)
35fd1eb1
PT
471{
472 unsigned long size = sparsemap_buf_end - sparsemap_buf;
473
474 if (sparsemap_buf && size > 0)
ae831894 475 sparse_buffer_free(size);
35fd1eb1
PT
476 sparsemap_buf = NULL;
477}
478
479void * __meminit sparse_buffer_alloc(unsigned long size)
480{
481 void *ptr = NULL;
482
483 if (sparsemap_buf) {
db57e98d 484 ptr = (void *) roundup((unsigned long)sparsemap_buf, size);
35fd1eb1
PT
485 if (ptr + size > sparsemap_buf_end)
486 ptr = NULL;
ae831894
LC
487 else {
488 /* Free redundant aligned space */
489 if ((unsigned long)(ptr - sparsemap_buf) > 0)
490 sparse_buffer_free((unsigned long)(ptr - sparsemap_buf));
35fd1eb1 491 sparsemap_buf = ptr + size;
ae831894 492 }
35fd1eb1
PT
493 }
494 return ptr;
495}
496
3b32123d 497void __weak __meminit vmemmap_populate_print_last(void)
c2b91e2e
YL
498{
499}
a4322e1b 500
85c77f79
PT
501/*
502 * Initialize sparse on a specific node. The node spans [pnum_begin, pnum_end)
503 * And number of present sections in this node is map_count.
504 */
505static void __init sparse_init_nid(int nid, unsigned long pnum_begin,
506 unsigned long pnum_end,
507 unsigned long map_count)
508{
f1eca35a
DW
509 struct mem_section_usage *usage;
510 unsigned long pnum;
85c77f79
PT
511 struct page *map;
512
f1eca35a
DW
513 usage = sparse_early_usemaps_alloc_pgdat_section(NODE_DATA(nid),
514 mem_section_usage_size() * map_count);
515 if (!usage) {
85c77f79
PT
516 pr_err("%s: node[%d] usemap allocation failed", __func__, nid);
517 goto failed;
518 }
519 sparse_buffer_init(map_count * section_map_size(), nid);
520 for_each_present_section_nr(pnum_begin, pnum) {
e9c0a3f0
DW
521 unsigned long pfn = section_nr_to_pfn(pnum);
522
85c77f79
PT
523 if (pnum >= pnum_end)
524 break;
525
e9c0a3f0 526 map = __populate_section_memmap(pfn, PAGES_PER_SECTION,
e3246d8f 527 nid, NULL, NULL);
85c77f79
PT
528 if (!map) {
529 pr_err("%s: node[%d] memory map backing failed. Some memory will not be available.",
530 __func__, nid);
531 pnum_begin = pnum;
2284f47f 532 sparse_buffer_fini();
85c77f79
PT
533 goto failed;
534 }
f1eca35a 535 check_usemap_section_nr(nid, usage);
326e1b8f
DW
536 sparse_init_one_section(__nr_to_section(pnum), pnum, map, usage,
537 SECTION_IS_EARLY);
f1eca35a 538 usage = (void *) usage + mem_section_usage_size();
85c77f79
PT
539 }
540 sparse_buffer_fini();
541 return;
542failed:
543 /* We failed to allocate, mark all the following pnums as not present */
544 for_each_present_section_nr(pnum_begin, pnum) {
545 struct mem_section *ms;
546
547 if (pnum >= pnum_end)
548 break;
549 ms = __nr_to_section(pnum);
550 ms->section_mem_map = 0;
551 }
552}
553
554/*
555 * Allocate the accumulated non-linear sections, allocate a mem_map
556 * for each and record the physical to section mapping.
557 */
2a3cb8ba 558void __init sparse_init(void)
85c77f79 559{
c89ab04f
MR
560 unsigned long pnum_end, pnum_begin, map_count = 1;
561 int nid_begin;
562
122ff80e
WY
563 /* see include/linux/mmzone.h 'struct mem_section' definition */
564 BUILD_BUG_ON(!is_power_of_2(sizeof(struct mem_section)));
c89ab04f
MR
565 memblocks_present();
566
567 pnum_begin = first_present_section_nr();
568 nid_begin = sparse_early_nid(__nr_to_section(pnum_begin));
85c77f79
PT
569
570 /* Setup pageblock_order for HUGETLB_PAGE_SIZE_VARIABLE */
571 set_pageblock_order();
572
573 for_each_present_section_nr(pnum_begin + 1, pnum_end) {
574 int nid = sparse_early_nid(__nr_to_section(pnum_end));
575
576 if (nid == nid_begin) {
577 map_count++;
578 continue;
579 }
580 /* Init node with sections in range [pnum_begin, pnum_end) */
581 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
582 nid_begin = nid;
583 pnum_begin = pnum_end;
584 map_count = 1;
585 }
586 /* cover the last node */
587 sparse_init_nid(nid_begin, pnum_begin, pnum_end, map_count);
588 vmemmap_populate_print_last();
589}
590
193faea9 591#ifdef CONFIG_MEMORY_HOTPLUG
2d070eab
MH
592
593/* Mark all memory sections within the pfn range as online */
594void online_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
595{
596 unsigned long pfn;
597
598 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
b4ccec41 599 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
600 struct mem_section *ms;
601
602 /* onlining code should never touch invalid ranges */
603 if (WARN_ON(!valid_section_nr(section_nr)))
604 continue;
605
606 ms = __nr_to_section(section_nr);
607 ms->section_mem_map |= SECTION_IS_ONLINE;
608 }
609}
610
9b7ea46a 611/* Mark all memory sections within the pfn range as offline */
2d070eab
MH
612void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
613{
614 unsigned long pfn;
615
616 for (pfn = start_pfn; pfn < end_pfn; pfn += PAGES_PER_SECTION) {
27227c73 617 unsigned long section_nr = pfn_to_section_nr(pfn);
2d070eab
MH
618 struct mem_section *ms;
619
620 /*
621 * TODO this needs some double checking. Offlining code makes
622 * sure to check pfn_valid but those checks might be just bogus
623 */
624 if (WARN_ON(!valid_section_nr(section_nr)))
625 continue;
626
627 ms = __nr_to_section(section_nr);
628 ms->section_mem_map &= ~SECTION_IS_ONLINE;
629 }
630}
2d070eab 631
98f3cfc1 632#ifdef CONFIG_SPARSEMEM_VMEMMAP
030eab4f 633static struct page * __meminit populate_section_memmap(unsigned long pfn,
e3246d8f
JM
634 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
635 struct dev_pagemap *pgmap)
98f3cfc1 636{
e3246d8f 637 return __populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
98f3cfc1 638}
e9c0a3f0
DW
639
640static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
24b6d416 641 struct vmem_altmap *altmap)
98f3cfc1 642{
e9c0a3f0
DW
643 unsigned long start = (unsigned long) pfn_to_page(pfn);
644 unsigned long end = start + nr_pages * sizeof(struct page);
0aad818b 645
9d857311 646 memmap_pages_add(-1L * (DIV_ROUND_UP(end - start, PAGE_SIZE)));
24b6d416 647 vmemmap_free(start, end, altmap);
98f3cfc1 648}
81556b02 649static void free_map_bootmem(struct page *memmap)
0c0a4a51 650{
0aad818b 651 unsigned long start = (unsigned long)memmap;
81556b02 652 unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
0aad818b 653
24b6d416 654 vmemmap_free(start, end, NULL);
0c0a4a51 655}
6ecb0fc6
BH
656
657static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
658{
659 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
660 DECLARE_BITMAP(tmp, SUBSECTIONS_PER_SECTION) = { 0 };
661 struct mem_section *ms = __pfn_to_section(pfn);
662 unsigned long *subsection_map = ms->usage
663 ? &ms->usage->subsection_map[0] : NULL;
664
665 subsection_mask_set(map, pfn, nr_pages);
666 if (subsection_map)
667 bitmap_and(tmp, map, subsection_map, SUBSECTIONS_PER_SECTION);
668
669 if (WARN(!subsection_map || !bitmap_equal(tmp, map, SUBSECTIONS_PER_SECTION),
670 "section already deactivated (%#lx + %ld)\n",
671 pfn, nr_pages))
672 return -EINVAL;
673
674 bitmap_xor(subsection_map, map, subsection_map, SUBSECTIONS_PER_SECTION);
675 return 0;
676}
677
678static bool is_subsection_map_empty(struct mem_section *ms)
679{
680 return bitmap_empty(&ms->usage->subsection_map[0],
681 SUBSECTIONS_PER_SECTION);
682}
683
684static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
685{
686 struct mem_section *ms = __pfn_to_section(pfn);
687 DECLARE_BITMAP(map, SUBSECTIONS_PER_SECTION) = { 0 };
688 unsigned long *subsection_map;
689 int rc = 0;
690
691 subsection_mask_set(map, pfn, nr_pages);
692
693 subsection_map = &ms->usage->subsection_map[0];
694
695 if (bitmap_empty(map, SUBSECTIONS_PER_SECTION))
696 rc = -EINVAL;
697 else if (bitmap_intersects(map, subsection_map, SUBSECTIONS_PER_SECTION))
698 rc = -EEXIST;
699 else
700 bitmap_or(subsection_map, map, subsection_map,
701 SUBSECTIONS_PER_SECTION);
702
703 return rc;
704}
98f3cfc1 705#else
52bb85d6 706static struct page * __meminit populate_section_memmap(unsigned long pfn,
e3246d8f
JM
707 unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
708 struct dev_pagemap *pgmap)
0b0acbec 709{
4027149a
BH
710 return kvmalloc_node(array_size(sizeof(struct page),
711 PAGES_PER_SECTION), GFP_KERNEL, nid);
0b0acbec
DH
712}
713
e9c0a3f0 714static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
7b73d978 715 struct vmem_altmap *altmap)
98f3cfc1 716{
3af776f6 717 kvfree(pfn_to_page(pfn));
0b0acbec 718}
0c0a4a51 719
81556b02 720static void free_map_bootmem(struct page *memmap)
0c0a4a51
YG
721{
722 unsigned long maps_section_nr, removing_section_nr, i;
81556b02 723 unsigned long magic, nr_pages;
ae64ffca 724 struct page *page = virt_to_page(memmap);
0c0a4a51 725
81556b02
ZY
726 nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
727 >> PAGE_SHIFT;
728
0c0a4a51 729 for (i = 0; i < nr_pages; i++, page++) {
c5e97ed1 730 magic = page->index;
0c0a4a51
YG
731
732 BUG_ON(magic == NODE_INFO);
733
734 maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
857e522a 735 removing_section_nr = page_private(page);
0c0a4a51
YG
736
737 /*
738 * When this function is called, the removing section is
739 * logical offlined state. This means all pages are isolated
740 * from page allocator. If removing section's memmap is placed
741 * on the same section, it must not be freed.
742 * If it is freed, page allocator may allocate it which will
743 * be removed physically soon.
744 */
745 if (maps_section_nr != removing_section_nr)
746 put_page_bootmem(page);
747 }
748}
0b0acbec 749
37bc1502 750static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
ba72b4c8 751{
37bc1502
BH
752 return 0;
753}
754
755static bool is_subsection_map_empty(struct mem_section *ms)
756{
6ecb0fc6 757 return true;
0a9f9f62
BH
758}
759
6ecb0fc6 760static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
0a9f9f62 761{
6ecb0fc6 762 return 0;
0a9f9f62 763}
6ecb0fc6 764#endif /* CONFIG_SPARSEMEM_VMEMMAP */
37bc1502 765
95a5a34d
BH
766/*
767 * To deactivate a memory region, there are 3 cases to handle across
768 * two configurations (SPARSEMEM_VMEMMAP={y,n}):
769 *
770 * 1. deactivation of a partial hot-added section (only possible in
771 * the SPARSEMEM_VMEMMAP=y case).
772 * a) section was present at memory init.
773 * b) section was hot-added post memory init.
774 * 2. deactivation of a complete hot-added section.
775 * 3. deactivation of a complete section from memory init.
776 *
777 * For 1, when subsection_map does not empty we will not be freeing the
778 * usage map, but still need to free the vmemmap range.
779 *
780 * For 2 and 3, the SPARSEMEM_VMEMMAP={y,n} cases are unified
781 */
37bc1502
BH
782static void section_deactivate(unsigned long pfn, unsigned long nr_pages,
783 struct vmem_altmap *altmap)
784{
785 struct mem_section *ms = __pfn_to_section(pfn);
786 bool section_is_early = early_section(ms);
787 struct page *memmap = NULL;
788 bool empty;
789
790 if (clear_subsection_map(pfn, nr_pages))
791 return;
95a5a34d 792
37bc1502 793 empty = is_subsection_map_empty(ms);
d41e2f3b 794 if (empty) {
ba72b4c8
DW
795 unsigned long section_nr = pfn_to_section_nr(pfn);
796
5ec8e8ea
CTK
797 /*
798 * Mark the section invalid so that valid_section()
799 * return false. This prevents code from dereferencing
800 * ms->usage array.
801 */
802 ms->section_mem_map &= ~SECTION_HAS_MEM_MAP;
803
8068df3b
DH
804 /*
805 * When removing an early section, the usage map is kept (as the
806 * usage maps of other sections fall into the same page). It
807 * will be re-used when re-adding the section - which is then no
808 * longer an early section. If the usage map is PageReserved, it
809 * was allocated during boot.
810 */
811 if (!PageReserved(virt_to_page(ms->usage))) {
5ec8e8ea
CTK
812 kfree_rcu(ms->usage, rcu);
813 WRITE_ONCE(ms->usage, NULL);
ba72b4c8
DW
814 }
815 memmap = sparse_decode_mem_map(ms->section_mem_map, section_nr);
ba72b4c8
DW
816 }
817
ef69bc9f
WY
818 /*
819 * The memmap of early sections is always fully populated. See
820 * section_activate() and pfn_valid() .
821 */
822 if (!section_is_early)
ba72b4c8 823 depopulate_section_memmap(pfn, nr_pages, altmap);
ef69bc9f
WY
824 else if (memmap)
825 free_map_bootmem(memmap);
d41e2f3b
BH
826
827 if (empty)
828 ms->section_mem_map = (unsigned long)NULL;
ba72b4c8
DW
829}
830
5d87255c 831static struct page * __meminit section_activate(int nid, unsigned long pfn,
e3246d8f
JM
832 unsigned long nr_pages, struct vmem_altmap *altmap,
833 struct dev_pagemap *pgmap)
5d87255c
BH
834{
835 struct mem_section *ms = __pfn_to_section(pfn);
836 struct mem_section_usage *usage = NULL;
837 struct page *memmap;
f0ca8c25 838 int rc;
5d87255c
BH
839
840 if (!ms->usage) {
841 usage = kzalloc(mem_section_usage_size(), GFP_KERNEL);
842 if (!usage)
843 return ERR_PTR(-ENOMEM);
844 ms->usage = usage;
845 }
846
847 rc = fill_subsection_map(pfn, nr_pages);
ba72b4c8
DW
848 if (rc) {
849 if (usage)
850 ms->usage = NULL;
851 kfree(usage);
852 return ERR_PTR(rc);
853 }
854
855 /*
856 * The early init code does not consider partially populated
857 * initial sections, it simply assumes that memory will never be
858 * referenced. If we hot-add memory into such a section then we
859 * do not need to populate the memmap and can simply reuse what
860 * is already there.
861 */
862 if (nr_pages < PAGES_PER_SECTION && early_section(ms))
863 return pfn_to_page(pfn);
864
e3246d8f 865 memmap = populate_section_memmap(pfn, nr_pages, nid, altmap, pgmap);
ba72b4c8
DW
866 if (!memmap) {
867 section_deactivate(pfn, nr_pages, altmap);
868 return ERR_PTR(-ENOMEM);
869 }
870
871 return memmap;
872}
873
7567cfc5 874/**
ba72b4c8 875 * sparse_add_section - add a memory section, or populate an existing one
7567cfc5
BH
876 * @nid: The node to add section on
877 * @start_pfn: start pfn of the memory range
ba72b4c8 878 * @nr_pages: number of pfns to add in the section
e3246d8f
JM
879 * @altmap: alternate pfns to allocate the memmap backing store
880 * @pgmap: alternate compound page geometry for devmap mappings
7567cfc5
BH
881 *
882 * This is only intended for hotplug.
883 *
95a5a34d
BH
884 * Note that only VMEMMAP supports sub-section aligned hotplug,
885 * the proper alignment and size are gated by check_pfn_span().
886 *
887 *
7567cfc5
BH
888 * Return:
889 * * 0 - On success.
890 * * -EEXIST - Section has been present.
891 * * -ENOMEM - Out of memory.
29751f69 892 */
7ea62160 893int __meminit sparse_add_section(int nid, unsigned long start_pfn,
e3246d8f
JM
894 unsigned long nr_pages, struct vmem_altmap *altmap,
895 struct dev_pagemap *pgmap)
29751f69 896{
0b0acbec 897 unsigned long section_nr = pfn_to_section_nr(start_pfn);
0b0acbec
DH
898 struct mem_section *ms;
899 struct page *memmap;
0b0acbec 900 int ret;
29751f69 901
4e0d2e7e 902 ret = sparse_index_init(section_nr, nid);
ba72b4c8 903 if (ret < 0)
bbd06825 904 return ret;
0b0acbec 905
e3246d8f 906 memmap = section_activate(nid, start_pfn, nr_pages, altmap, pgmap);
ba72b4c8
DW
907 if (IS_ERR(memmap))
908 return PTR_ERR(memmap);
5c0e3066 909
d0dc12e8
PT
910 /*
911 * Poison uninitialized struct pages in order to catch invalid flags
912 * combinations.
913 */
c5f1e2d1
SK
914 if (!altmap || !altmap->inaccessible)
915 page_init_poison(memmap, sizeof(struct page) * nr_pages);
3ac19f8e 916
c1cbc3ee 917 ms = __nr_to_section(section_nr);
26f26bed 918 set_section_nid(section_nr, nid);
a1bc561b 919 __section_mark_present(ms, section_nr);
0b0acbec 920
ba72b4c8
DW
921 /* Align memmap to section boundary in the subsection case */
922 if (section_nr_to_pfn(section_nr) != start_pfn)
4627d76d 923 memmap = pfn_to_page(section_nr_to_pfn(section_nr));
ba72b4c8
DW
924 sparse_init_one_section(ms, section_nr, memmap, ms->usage, 0);
925
926 return 0;
29751f69 927}
ea01ea93 928
bd5f79ab
YD
929void sparse_remove_section(unsigned long pfn, unsigned long nr_pages,
930 struct vmem_altmap *altmap)
ea01ea93 931{
bd5f79ab
YD
932 struct mem_section *ms = __pfn_to_section(pfn);
933
934 if (WARN_ON_ONCE(!valid_section(ms)))
935 return;
936
ba72b4c8 937 section_deactivate(pfn, nr_pages, altmap);
ea01ea93 938}
4edd7cef 939#endif /* CONFIG_MEMORY_HOTPLUG */