2 * Based on arch/arm/mm/init.c
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 #include <linux/kernel.h>
21 #include <linux/export.h>
22 #include <linux/errno.h>
23 #include <linux/swap.h>
24 #include <linux/init.h>
25 #include <linux/cache.h>
26 #include <linux/mman.h>
27 #include <linux/nodemask.h>
28 #include <linux/initrd.h>
29 #include <linux/gfp.h>
30 #include <linux/memblock.h>
31 #include <linux/sort.h>
33 #include <linux/of_fdt.h>
34 #include <linux/dma-mapping.h>
35 #include <linux/dma-contiguous.h>
36 #include <linux/efi.h>
37 #include <linux/swiotlb.h>
38 #include <linux/vmalloc.h>
40 #include <linux/kexec.h>
41 #include <linux/crash_dump.h>
44 #include <asm/fixmap.h>
45 #include <asm/kasan.h>
46 #include <asm/kernel-pgtable.h>
47 #include <asm/memory.h>
49 #include <asm/sections.h>
50 #include <asm/setup.h>
51 #include <asm/sizes.h>
53 #include <asm/alternative.h>
56 * We need to be able to catch inadvertent references to memstart_addr
57 * that occur (potentially in generic code) before arm64_memblock_init()
58 * executes, which assigns it its actual value. So use a default value
59 * that cannot be mistaken for a real physical address.
61 s64 memstart_addr __ro_after_init = -1;
62 EXPORT_SYMBOL(memstart_addr);
64 phys_addr_t arm64_dma_phys_limit __ro_after_init;
66 #ifdef CONFIG_KEXEC_CORE
68 * reserve_crashkernel() - reserves memory for crash kernel
70 * This function reserves memory area given in "crashkernel=" kernel command
71 * line parameter. The memory reserved is used by dump capture kernel when
72 * primary kernel is crashing.
74 static void __init reserve_crashkernel(void)
76 unsigned long long crash_base, crash_size;
79 ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(),
80 &crash_size, &crash_base);
81 /* no crashkernel= or invalid value specified */
82 if (ret || !crash_size)
85 crash_size = PAGE_ALIGN(crash_size);
87 if (crash_base == 0) {
88 /* Current arm64 boot protocol requires 2MB alignment */
89 crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT,
91 if (crash_base == 0) {
92 pr_warn("cannot allocate crashkernel (size:0x%llx)\n",
97 /* User specifies base address explicitly. */
98 if (!memblock_is_region_memory(crash_base, crash_size)) {
99 pr_warn("cannot reserve crashkernel: region is not memory\n");
103 if (memblock_is_region_reserved(crash_base, crash_size)) {
104 pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n");
108 if (!IS_ALIGNED(crash_base, SZ_2M)) {
109 pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n");
113 memblock_reserve(crash_base, crash_size);
115 pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n",
116 crash_base, crash_base + crash_size, crash_size >> 20);
118 crashk_res.start = crash_base;
119 crashk_res.end = crash_base + crash_size - 1;
122 static void __init reserve_crashkernel(void)
125 #endif /* CONFIG_KEXEC_CORE */
127 #ifdef CONFIG_CRASH_DUMP
128 static int __init early_init_dt_scan_elfcorehdr(unsigned long node,
129 const char *uname, int depth, void *data)
134 if (depth != 1 || strcmp(uname, "chosen") != 0)
137 reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len);
138 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
141 elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®);
142 elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®);
148 * reserve_elfcorehdr() - reserves memory for elf core header
150 * This function reserves the memory occupied by an elf core header
151 * described in the device tree. This region contains all the
152 * information about primary kernel's core image and is used by a dump
153 * capture kernel to access the system memory on primary kernel.
155 static void __init reserve_elfcorehdr(void)
157 of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL);
159 if (!elfcorehdr_size)
162 if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) {
163 pr_warn("elfcorehdr is overlapped\n");
167 memblock_reserve(elfcorehdr_addr, elfcorehdr_size);
169 pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n",
170 elfcorehdr_size >> 10, elfcorehdr_addr);
173 static void __init reserve_elfcorehdr(void)
176 #endif /* CONFIG_CRASH_DUMP */
178 * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It
179 * currently assumes that for memory starting above 4G, 32-bit devices will
182 static phys_addr_t __init max_zone_dma_phys(void)
184 phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32);
185 return min(offset + (1ULL << 32), memblock_end_of_DRAM());
190 static void __init zone_sizes_init(unsigned long min, unsigned long max)
192 unsigned long max_zone_pfns[MAX_NR_ZONES] = {0};
194 if (IS_ENABLED(CONFIG_ZONE_DMA32))
195 max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys());
196 max_zone_pfns[ZONE_NORMAL] = max;
198 free_area_init_nodes(max_zone_pfns);
203 static void __init zone_sizes_init(unsigned long min, unsigned long max)
205 struct memblock_region *reg;
206 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
207 unsigned long max_dma = min;
209 memset(zone_size, 0, sizeof(zone_size));
211 /* 4GB maximum for 32-bit only capable devices */
212 #ifdef CONFIG_ZONE_DMA32
213 max_dma = PFN_DOWN(arm64_dma_phys_limit);
214 zone_size[ZONE_DMA32] = max_dma - min;
216 zone_size[ZONE_NORMAL] = max - max_dma;
218 memcpy(zhole_size, zone_size, sizeof(zhole_size));
220 for_each_memblock(memory, reg) {
221 unsigned long start = memblock_region_memory_base_pfn(reg);
222 unsigned long end = memblock_region_memory_end_pfn(reg);
227 #ifdef CONFIG_ZONE_DMA32
228 if (start < max_dma) {
229 unsigned long dma_end = min(end, max_dma);
230 zhole_size[ZONE_DMA32] -= dma_end - start;
234 unsigned long normal_end = min(end, max);
235 unsigned long normal_start = max(start, max_dma);
236 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
240 free_area_init_node(0, zone_size, min, zhole_size);
243 #endif /* CONFIG_NUMA */
245 int pfn_valid(unsigned long pfn)
247 phys_addr_t addr = pfn << PAGE_SHIFT;
249 if ((addr >> PAGE_SHIFT) != pfn)
252 #ifdef CONFIG_SPARSEMEM
253 if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS)
256 if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn))))
259 return memblock_is_map_memory(addr);
261 EXPORT_SYMBOL(pfn_valid);
263 #ifndef CONFIG_SPARSEMEM
264 static void __init arm64_memory_present(void)
268 static void __init arm64_memory_present(void)
270 struct memblock_region *reg;
272 for_each_memblock(memory, reg) {
273 int nid = memblock_get_region_node(reg);
275 memory_present(nid, memblock_region_memory_base_pfn(reg),
276 memblock_region_memory_end_pfn(reg));
281 static phys_addr_t memory_limit = PHYS_ADDR_MAX;
284 * Limit the memory size that was specified via FDT.
286 static int __init early_mem(char *p)
291 memory_limit = memparse(p, &p) & PAGE_MASK;
292 pr_notice("Memory limited to %lldMB\n", memory_limit >> 20);
296 early_param("mem", early_mem);
298 static int __init early_init_dt_scan_usablemem(unsigned long node,
299 const char *uname, int depth, void *data)
301 struct memblock_region *usablemem = data;
305 if (depth != 1 || strcmp(uname, "chosen") != 0)
308 reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len);
309 if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells)))
312 usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®);
313 usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®);
318 static void __init fdt_enforce_memory_region(void)
320 struct memblock_region reg = {
324 of_scan_flat_dt(early_init_dt_scan_usablemem, ®);
327 memblock_cap_memory_range(reg.base, reg.size);
330 void __init arm64_memblock_init(void)
332 const s64 linear_region_size = -(s64)PAGE_OFFSET;
334 /* Handle linux,usable-memory-range property */
335 fdt_enforce_memory_region();
337 /* Remove memory above our supported physical address size */
338 memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX);
341 * Ensure that the linear region takes up exactly half of the kernel
342 * virtual address space. This way, we can distinguish a linear address
343 * from a kernel/module/vmalloc address by testing a single bit.
345 BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1));
348 * Select a suitable value for the base of physical memory.
350 memstart_addr = round_down(memblock_start_of_DRAM(),
351 ARM64_MEMSTART_ALIGN);
354 * Remove the memory that we will not be able to cover with the
355 * linear mapping. Take care not to clip the kernel which may be
358 memblock_remove(max_t(u64, memstart_addr + linear_region_size,
359 __pa_symbol(_end)), ULLONG_MAX);
360 if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) {
361 /* ensure that memstart_addr remains sufficiently aligned */
362 memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size,
363 ARM64_MEMSTART_ALIGN);
364 memblock_remove(0, memstart_addr);
368 * Apply the memory limit if it was set. Since the kernel may be loaded
369 * high up in memory, add back the kernel region that must be accessible
370 * via the linear mapping.
372 if (memory_limit != PHYS_ADDR_MAX) {
373 memblock_mem_limit_remove_map(memory_limit);
374 memblock_add(__pa_symbol(_text), (u64)(_end - _text));
377 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
379 * Add back the memory we just removed if it results in the
380 * initrd to become inaccessible via the linear mapping.
381 * Otherwise, this is a no-op
383 u64 base = phys_initrd_start & PAGE_MASK;
384 u64 size = PAGE_ALIGN(phys_initrd_size);
387 * We can only add back the initrd memory if we don't end up
388 * with more memory than we can address via the linear mapping.
389 * It is up to the bootloader to position the kernel and the
390 * initrd reasonably close to each other (i.e., within 32 GB of
391 * each other) so that all granule/#levels combinations can
392 * always access both.
394 if (WARN(base < memblock_start_of_DRAM() ||
395 base + size > memblock_start_of_DRAM() +
397 "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) {
400 memblock_remove(base, size); /* clear MEMBLOCK_ flags */
401 memblock_add(base, size);
402 memblock_reserve(base, size);
406 if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) {
407 extern u16 memstart_offset_seed;
408 u64 range = linear_region_size -
409 (memblock_end_of_DRAM() - memblock_start_of_DRAM());
412 * If the size of the linear region exceeds, by a sufficient
413 * margin, the size of the region that the available physical
414 * memory spans, randomize the linear region as well.
416 if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) {
417 range /= ARM64_MEMSTART_ALIGN;
418 memstart_addr -= ARM64_MEMSTART_ALIGN *
419 ((range * memstart_offset_seed) >> 16);
424 * Register the kernel text, kernel data, initrd, and initial
425 * pagetables with memblock.
427 memblock_reserve(__pa_symbol(_text), _end - _text);
428 if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) {
429 /* the generic initrd code expects virtual addresses */
430 initrd_start = __phys_to_virt(phys_initrd_start);
431 initrd_end = initrd_start + phys_initrd_size;
434 early_init_fdt_scan_reserved_mem();
436 /* 4GB maximum for 32-bit only capable devices */
437 if (IS_ENABLED(CONFIG_ZONE_DMA32))
438 arm64_dma_phys_limit = max_zone_dma_phys();
440 arm64_dma_phys_limit = PHYS_MASK + 1;
442 reserve_crashkernel();
444 reserve_elfcorehdr();
446 high_memory = __va(memblock_end_of_DRAM() - 1) + 1;
448 dma_contiguous_reserve(arm64_dma_phys_limit);
451 void __init bootmem_init(void)
453 unsigned long min, max;
455 min = PFN_UP(memblock_start_of_DRAM());
456 max = PFN_DOWN(memblock_end_of_DRAM());
458 early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT);
460 max_pfn = max_low_pfn = max;
464 * Sparsemem tries to allocate bootmem in memory_present(), so must be
465 * done after the fixed reservations.
467 arm64_memory_present();
470 zone_sizes_init(min, max);
475 #ifndef CONFIG_SPARSEMEM_VMEMMAP
476 static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
478 struct page *start_pg, *end_pg;
479 unsigned long pg, pgend;
482 * Convert start_pfn/end_pfn to a struct page pointer.
484 start_pg = pfn_to_page(start_pfn - 1) + 1;
485 end_pg = pfn_to_page(end_pfn - 1) + 1;
488 * Convert to physical addresses, and round start upwards and end
491 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
492 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
495 * If there are free pages between these, free the section of the
499 memblock_free(pg, pgend - pg);
503 * The mem_map array can get very big. Free the unused area of the memory map.
505 static void __init free_unused_memmap(void)
507 unsigned long start, prev_end = 0;
508 struct memblock_region *reg;
510 for_each_memblock(memory, reg) {
511 start = __phys_to_pfn(reg->base);
513 #ifdef CONFIG_SPARSEMEM
515 * Take care not to free memmap entries that don't exist due
516 * to SPARSEMEM sections which aren't present.
518 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
521 * If we had a previous bank, and there is a space between the
522 * current bank and the previous, free it.
524 if (prev_end && prev_end < start)
525 free_memmap(prev_end, start);
528 * Align up here since the VM subsystem insists that the
529 * memmap entries are valid from the bank end aligned to
530 * MAX_ORDER_NR_PAGES.
532 prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size),
536 #ifdef CONFIG_SPARSEMEM
537 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
538 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
541 #endif /* !CONFIG_SPARSEMEM_VMEMMAP */
544 * mem_init() marks the free areas in the mem_map and tells us how much memory
545 * is free. This is done after various parts of the system have claimed their
546 * memory after the kernel image.
548 void __init mem_init(void)
550 if (swiotlb_force == SWIOTLB_FORCE ||
551 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
554 swiotlb_force = SWIOTLB_NO_FORCE;
556 set_max_mapnr(pfn_to_page(max_pfn) - mem_map);
558 #ifndef CONFIG_SPARSEMEM_VMEMMAP
559 free_unused_memmap();
561 /* this will put all unused low memory onto the freelists */
564 mem_init_print_info(NULL);
567 * Check boundaries twice: Some fundamental inconsistencies can be
568 * detected at build time already.
571 BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64);
574 if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) {
575 extern int sysctl_overcommit_memory;
577 * On a machine this small we won't get anywhere without
578 * overcommit, so turn it on by default.
580 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
584 void free_initmem(void)
586 free_reserved_area(lm_alias(__init_begin),
587 lm_alias(__init_end),
590 * Unmap the __init region but leave the VM area in place. This
591 * prevents the region from being reused for kernel modules, which
592 * is not supported by kallsyms.
594 unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin));
597 #ifdef CONFIG_BLK_DEV_INITRD
599 static int keep_initrd __initdata;
601 void __init free_initrd_mem(unsigned long start, unsigned long end)
604 free_reserved_area((void *)start, (void *)end, 0, "initrd");
605 memblock_free(__virt_to_phys(start), end - start);
609 static int __init keepinitrd_setup(char *__unused)
615 __setup("keepinitrd", keepinitrd_setup);
619 * Dump out memory limit information on panic.
621 static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p)
623 if (memory_limit != PHYS_ADDR_MAX) {
624 pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20);
626 pr_emerg("Memory Limit: none\n");
631 static struct notifier_block mem_limit_notifier = {
632 .notifier_call = dump_mem_limit,
635 static int __init register_mem_limit_dumper(void)
637 atomic_notifier_chain_register(&panic_notifier_list,
638 &mem_limit_notifier);
641 __initcall(register_mem_limit_dumper);