Commit | Line | Data |
---|---|---|
caab277b | 1 | // SPDX-License-Identifier: GPL-2.0-only |
c1cc1552 CM |
2 | /* |
3 | * Based on arch/arm/mm/init.c | |
4 | * | |
5 | * Copyright (C) 1995-2005 Russell King | |
6 | * Copyright (C) 2012 ARM Ltd. | |
c1cc1552 CM |
7 | */ |
8 | ||
9 | #include <linux/kernel.h> | |
10 | #include <linux/export.h> | |
11 | #include <linux/errno.h> | |
12 | #include <linux/swap.h> | |
13 | #include <linux/init.h> | |
5a9e3e15 | 14 | #include <linux/cache.h> |
c1cc1552 CM |
15 | #include <linux/mman.h> |
16 | #include <linux/nodemask.h> | |
17 | #include <linux/initrd.h> | |
18 | #include <linux/gfp.h> | |
19 | #include <linux/memblock.h> | |
20 | #include <linux/sort.h> | |
764b51ea | 21 | #include <linux/of.h> |
c1cc1552 | 22 | #include <linux/of_fdt.h> |
8b5369ea | 23 | #include <linux/dma-direct.h> |
19e7640d | 24 | #include <linux/dma-mapping.h> |
6ac2104d | 25 | #include <linux/dma-contiguous.h> |
86c8b27a | 26 | #include <linux/efi.h> |
a1e50a82 | 27 | #include <linux/swiotlb.h> |
dae8c235 | 28 | #include <linux/vmalloc.h> |
2077be67 | 29 | #include <linux/mm.h> |
764b51ea | 30 | #include <linux/kexec.h> |
e62aaeac | 31 | #include <linux/crash_dump.h> |
c1cc1552 | 32 | |
a7f8de16 | 33 | #include <asm/boot.h> |
08375198 | 34 | #include <asm/fixmap.h> |
f9040773 | 35 | #include <asm/kasan.h> |
a7f8de16 | 36 | #include <asm/kernel-pgtable.h> |
aa03c428 | 37 | #include <asm/memory.h> |
1a2db300 | 38 | #include <asm/numa.h> |
c1cc1552 CM |
39 | #include <asm/sections.h> |
40 | #include <asm/setup.h> | |
87dfb311 | 41 | #include <linux/sizes.h> |
c1cc1552 | 42 | #include <asm/tlb.h> |
e039ee4e | 43 | #include <asm/alternative.h> |
c1cc1552 | 44 | |
8b5369ea NSJ |
45 | #define ARM64_ZONE_DMA_BITS 30 |
46 | ||
a7f8de16 AB |
47 | /* |
48 | * We need to be able to catch inadvertent references to memstart_addr | |
49 | * that occur (potentially in generic code) before arm64_memblock_init() | |
50 | * executes, which assigns it its actual value. So use a default value | |
51 | * that cannot be mistaken for a real physical address. | |
52 | */ | |
5a9e3e15 | 53 | s64 memstart_addr __ro_after_init = -1; |
03ef055f MR |
54 | EXPORT_SYMBOL(memstart_addr); |
55 | ||
5383cc6e SC |
56 | s64 physvirt_offset __ro_after_init; |
57 | EXPORT_SYMBOL(physvirt_offset); | |
58 | ||
c8b6d2cc SC |
59 | struct page *vmemmap __ro_after_init; |
60 | EXPORT_SYMBOL(vmemmap); | |
61 | ||
1a8e1cef NSJ |
62 | /* |
63 | * We create both ZONE_DMA and ZONE_DMA32. ZONE_DMA covers the first 1G of | |
64 | * memory as some devices, namely the Raspberry Pi 4, have peripherals with | |
65 | * this limited view of the memory. ZONE_DMA32 will cover the rest of the 32 | |
66 | * bit addressable memory area. | |
67 | */ | |
5a9e3e15 | 68 | phys_addr_t arm64_dma_phys_limit __ro_after_init; |
4686da51 | 69 | static phys_addr_t arm64_dma32_phys_limit __ro_after_init; |
c1cc1552 | 70 | |
764b51ea AT |
71 | #ifdef CONFIG_KEXEC_CORE |
72 | /* | |
73 | * reserve_crashkernel() - reserves memory for crash kernel | |
74 | * | |
75 | * This function reserves memory area given in "crashkernel=" kernel command | |
76 | * line parameter. The memory reserved is used by dump capture kernel when | |
77 | * primary kernel is crashing. | |
78 | */ | |
79 | static void __init reserve_crashkernel(void) | |
80 | { | |
81 | unsigned long long crash_base, crash_size; | |
82 | int ret; | |
83 | ||
84 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), | |
85 | &crash_size, &crash_base); | |
86 | /* no crashkernel= or invalid value specified */ | |
87 | if (ret || !crash_size) | |
88 | return; | |
89 | ||
90 | crash_size = PAGE_ALIGN(crash_size); | |
91 | ||
92 | if (crash_base == 0) { | |
93 | /* Current arm64 boot protocol requires 2MB alignment */ | |
bff3b044 | 94 | crash_base = memblock_find_in_range(0, arm64_dma32_phys_limit, |
764b51ea AT |
95 | crash_size, SZ_2M); |
96 | if (crash_base == 0) { | |
97 | pr_warn("cannot allocate crashkernel (size:0x%llx)\n", | |
98 | crash_size); | |
99 | return; | |
100 | } | |
101 | } else { | |
102 | /* User specifies base address explicitly. */ | |
103 | if (!memblock_is_region_memory(crash_base, crash_size)) { | |
104 | pr_warn("cannot reserve crashkernel: region is not memory\n"); | |
105 | return; | |
106 | } | |
107 | ||
108 | if (memblock_is_region_reserved(crash_base, crash_size)) { | |
109 | pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); | |
110 | return; | |
111 | } | |
112 | ||
113 | if (!IS_ALIGNED(crash_base, SZ_2M)) { | |
114 | pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); | |
115 | return; | |
116 | } | |
117 | } | |
118 | memblock_reserve(crash_base, crash_size); | |
119 | ||
120 | pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", | |
121 | crash_base, crash_base + crash_size, crash_size >> 20); | |
122 | ||
123 | crashk_res.start = crash_base; | |
124 | crashk_res.end = crash_base + crash_size - 1; | |
125 | } | |
126 | #else | |
127 | static void __init reserve_crashkernel(void) | |
128 | { | |
129 | } | |
130 | #endif /* CONFIG_KEXEC_CORE */ | |
131 | ||
e62aaeac AT |
132 | #ifdef CONFIG_CRASH_DUMP |
133 | static int __init early_init_dt_scan_elfcorehdr(unsigned long node, | |
134 | const char *uname, int depth, void *data) | |
135 | { | |
136 | const __be32 *reg; | |
137 | int len; | |
138 | ||
139 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
140 | return 0; | |
141 | ||
142 | reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); | |
143 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
144 | return 1; | |
145 | ||
146 | elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); | |
147 | elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); | |
148 | ||
149 | return 1; | |
150 | } | |
151 | ||
152 | /* | |
153 | * reserve_elfcorehdr() - reserves memory for elf core header | |
154 | * | |
155 | * This function reserves the memory occupied by an elf core header | |
156 | * described in the device tree. This region contains all the | |
157 | * information about primary kernel's core image and is used by a dump | |
158 | * capture kernel to access the system memory on primary kernel. | |
159 | */ | |
160 | static void __init reserve_elfcorehdr(void) | |
161 | { | |
162 | of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); | |
163 | ||
164 | if (!elfcorehdr_size) | |
165 | return; | |
166 | ||
167 | if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { | |
168 | pr_warn("elfcorehdr is overlapped\n"); | |
169 | return; | |
170 | } | |
171 | ||
172 | memblock_reserve(elfcorehdr_addr, elfcorehdr_size); | |
173 | ||
174 | pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", | |
175 | elfcorehdr_size >> 10, elfcorehdr_addr); | |
176 | } | |
177 | #else | |
178 | static void __init reserve_elfcorehdr(void) | |
179 | { | |
180 | } | |
181 | #endif /* CONFIG_CRASH_DUMP */ | |
1a8e1cef | 182 | |
d50314a6 | 183 | /* |
1a8e1cef NSJ |
184 | * Return the maximum physical address for a zone with a given address size |
185 | * limit. It currently assumes that for memory starting above 4G, 32-bit | |
186 | * devices will use a DMA offset. | |
d50314a6 | 187 | */ |
1a8e1cef | 188 | static phys_addr_t __init max_zone_phys(unsigned int zone_bits) |
d50314a6 | 189 | { |
1a8e1cef NSJ |
190 | phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, zone_bits); |
191 | return min(offset + (1ULL << zone_bits), memblock_end_of_DRAM()); | |
d50314a6 CM |
192 | } |
193 | ||
1a2db300 GK |
194 | #ifdef CONFIG_NUMA |
195 | ||
196 | static void __init zone_sizes_init(unsigned long min, unsigned long max) | |
197 | { | |
198 | unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; | |
199 | ||
1a8e1cef NSJ |
200 | #ifdef CONFIG_ZONE_DMA |
201 | max_zone_pfns[ZONE_DMA] = PFN_DOWN(arm64_dma_phys_limit); | |
202 | #endif | |
0c1f14ed | 203 | #ifdef CONFIG_ZONE_DMA32 |
a573cdd7 | 204 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(arm64_dma32_phys_limit); |
0c1f14ed | 205 | #endif |
1a2db300 GK |
206 | max_zone_pfns[ZONE_NORMAL] = max; |
207 | ||
208 | free_area_init_nodes(max_zone_pfns); | |
209 | } | |
210 | ||
211 | #else | |
212 | ||
c1cc1552 CM |
213 | static void __init zone_sizes_init(unsigned long min, unsigned long max) |
214 | { | |
215 | struct memblock_region *reg; | |
216 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | |
93b90414 | 217 | unsigned long __maybe_unused max_dma, max_dma32; |
c1cc1552 CM |
218 | |
219 | memset(zone_size, 0, sizeof(zone_size)); | |
220 | ||
93b90414 | 221 | max_dma = max_dma32 = min; |
1a8e1cef | 222 | #ifdef CONFIG_ZONE_DMA |
93b90414 | 223 | max_dma = max_dma32 = PFN_DOWN(arm64_dma_phys_limit); |
1a8e1cef | 224 | zone_size[ZONE_DMA] = max_dma - min; |
1a8e1cef | 225 | #endif |
ad67f5a6 | 226 | #ifdef CONFIG_ZONE_DMA32 |
a573cdd7 | 227 | max_dma32 = PFN_DOWN(arm64_dma32_phys_limit); |
1a8e1cef | 228 | zone_size[ZONE_DMA32] = max_dma32 - max_dma; |
86a5906e | 229 | #endif |
a573cdd7 | 230 | zone_size[ZONE_NORMAL] = max - max_dma32; |
c1cc1552 CM |
231 | |
232 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | |
233 | ||
234 | for_each_memblock(memory, reg) { | |
235 | unsigned long start = memblock_region_memory_base_pfn(reg); | |
236 | unsigned long end = memblock_region_memory_end_pfn(reg); | |
237 | ||
1a8e1cef | 238 | #ifdef CONFIG_ZONE_DMA |
93b90414 WD |
239 | if (start >= min && start < max_dma) { |
240 | unsigned long dma_end = min(end, max_dma); | |
1a8e1cef | 241 | zhole_size[ZONE_DMA] -= dma_end - start; |
93b90414 | 242 | start = dma_end; |
c1cc1552 | 243 | } |
86a5906e | 244 | #endif |
ad67f5a6 | 245 | #ifdef CONFIG_ZONE_DMA32 |
93b90414 | 246 | if (start >= max_dma && start < max_dma32) { |
1a8e1cef | 247 | unsigned long dma32_end = min(end, max_dma32); |
93b90414 WD |
248 | zhole_size[ZONE_DMA32] -= dma32_end - start; |
249 | start = dma32_end; | |
c1cc1552 | 250 | } |
86a5906e | 251 | #endif |
93b90414 | 252 | if (start >= max_dma32 && start < max) { |
c1cc1552 | 253 | unsigned long normal_end = min(end, max); |
93b90414 | 254 | zhole_size[ZONE_NORMAL] -= normal_end - start; |
c1cc1552 CM |
255 | } |
256 | } | |
257 | ||
258 | free_area_init_node(0, zone_size, min, zhole_size); | |
259 | } | |
260 | ||
1a2db300 GK |
261 | #endif /* CONFIG_NUMA */ |
262 | ||
c1cc1552 CM |
263 | int pfn_valid(unsigned long pfn) |
264 | { | |
5ad356ea GH |
265 | phys_addr_t addr = pfn << PAGE_SHIFT; |
266 | ||
267 | if ((addr >> PAGE_SHIFT) != pfn) | |
268 | return 0; | |
4ab21506 RM |
269 | |
270 | #ifdef CONFIG_SPARSEMEM | |
271 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
272 | return 0; | |
273 | ||
274 | if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) | |
275 | return 0; | |
276 | #endif | |
5ad356ea | 277 | return memblock_is_map_memory(addr); |
c1cc1552 CM |
278 | } |
279 | EXPORT_SYMBOL(pfn_valid); | |
c1cc1552 | 280 | |
d7dc899a | 281 | static phys_addr_t memory_limit = PHYS_ADDR_MAX; |
6083fe74 MR |
282 | |
283 | /* | |
284 | * Limit the memory size that was specified via FDT. | |
285 | */ | |
286 | static int __init early_mem(char *p) | |
287 | { | |
288 | if (!p) | |
289 | return 1; | |
290 | ||
291 | memory_limit = memparse(p, &p) & PAGE_MASK; | |
292 | pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); | |
293 | ||
294 | return 0; | |
295 | } | |
296 | early_param("mem", early_mem); | |
297 | ||
8f579b1c AT |
298 | static int __init early_init_dt_scan_usablemem(unsigned long node, |
299 | const char *uname, int depth, void *data) | |
300 | { | |
301 | struct memblock_region *usablemem = data; | |
302 | const __be32 *reg; | |
303 | int len; | |
304 | ||
305 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
306 | return 0; | |
307 | ||
308 | reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); | |
309 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
310 | return 1; | |
311 | ||
312 | usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); | |
313 | usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); | |
314 | ||
315 | return 1; | |
316 | } | |
317 | ||
318 | static void __init fdt_enforce_memory_region(void) | |
319 | { | |
320 | struct memblock_region reg = { | |
321 | .size = 0, | |
322 | }; | |
323 | ||
324 | of_scan_flat_dt(early_init_dt_scan_usablemem, ®); | |
325 | ||
326 | if (reg.size) | |
327 | memblock_cap_memory_range(reg.base, reg.size); | |
328 | } | |
329 | ||
c1cc1552 CM |
330 | void __init arm64_memblock_init(void) |
331 | { | |
5383cc6e | 332 | const s64 linear_region_size = BIT(vabits_actual - 1); |
a7f8de16 | 333 | |
8f579b1c AT |
334 | /* Handle linux,usable-memory-range property */ |
335 | fdt_enforce_memory_region(); | |
336 | ||
e9eaa805 KM |
337 | /* Remove memory above our supported physical address size */ |
338 | memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); | |
339 | ||
a7f8de16 AB |
340 | /* |
341 | * Select a suitable value for the base of physical memory. | |
342 | */ | |
343 | memstart_addr = round_down(memblock_start_of_DRAM(), | |
344 | ARM64_MEMSTART_ALIGN); | |
345 | ||
5383cc6e SC |
346 | physvirt_offset = PHYS_OFFSET - PAGE_OFFSET; |
347 | ||
c8b6d2cc SC |
348 | vmemmap = ((struct page *)VMEMMAP_START - (memstart_addr >> PAGE_SHIFT)); |
349 | ||
b6d00d47 SC |
350 | /* |
351 | * If we are running with a 52-bit kernel VA config on a system that | |
352 | * does not support it, we have to offset our vmemmap and physvirt_offset | |
353 | * s.t. we avoid the 52-bit portion of the direct linear map | |
354 | */ | |
355 | if (IS_ENABLED(CONFIG_ARM64_VA_BITS_52) && (vabits_actual != 52)) { | |
356 | vmemmap += (_PAGE_OFFSET(48) - _PAGE_OFFSET(52)) >> PAGE_SHIFT; | |
357 | physvirt_offset = PHYS_OFFSET - _PAGE_OFFSET(48); | |
358 | } | |
359 | ||
a7f8de16 AB |
360 | /* |
361 | * Remove the memory that we will not be able to cover with the | |
362 | * linear mapping. Take care not to clip the kernel which may be | |
363 | * high in memory. | |
364 | */ | |
2077be67 LA |
365 | memblock_remove(max_t(u64, memstart_addr + linear_region_size, |
366 | __pa_symbol(_end)), ULLONG_MAX); | |
2958987f AB |
367 | if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { |
368 | /* ensure that memstart_addr remains sufficiently aligned */ | |
369 | memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, | |
370 | ARM64_MEMSTART_ALIGN); | |
371 | memblock_remove(0, memstart_addr); | |
372 | } | |
a7f8de16 AB |
373 | |
374 | /* | |
375 | * Apply the memory limit if it was set. Since the kernel may be loaded | |
376 | * high up in memory, add back the kernel region that must be accessible | |
377 | * via the linear mapping. | |
378 | */ | |
d7dc899a | 379 | if (memory_limit != PHYS_ADDR_MAX) { |
cb0a6502 | 380 | memblock_mem_limit_remove_map(memory_limit); |
2077be67 | 381 | memblock_add(__pa_symbol(_text), (u64)(_end - _text)); |
a7f8de16 | 382 | } |
6083fe74 | 383 | |
c756c592 | 384 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
177e15f0 AB |
385 | /* |
386 | * Add back the memory we just removed if it results in the | |
387 | * initrd to become inaccessible via the linear mapping. | |
388 | * Otherwise, this is a no-op | |
389 | */ | |
c756c592 | 390 | u64 base = phys_initrd_start & PAGE_MASK; |
d4d18e3e | 391 | u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; |
177e15f0 AB |
392 | |
393 | /* | |
394 | * We can only add back the initrd memory if we don't end up | |
395 | * with more memory than we can address via the linear mapping. | |
396 | * It is up to the bootloader to position the kernel and the | |
397 | * initrd reasonably close to each other (i.e., within 32 GB of | |
398 | * each other) so that all granule/#levels combinations can | |
399 | * always access both. | |
400 | */ | |
401 | if (WARN(base < memblock_start_of_DRAM() || | |
402 | base + size > memblock_start_of_DRAM() + | |
403 | linear_region_size, | |
404 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { | |
70b3d237 | 405 | phys_initrd_size = 0; |
177e15f0 AB |
406 | } else { |
407 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ | |
408 | memblock_add(base, size); | |
409 | memblock_reserve(base, size); | |
410 | } | |
411 | } | |
412 | ||
c031a421 AB |
413 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
414 | extern u16 memstart_offset_seed; | |
415 | u64 range = linear_region_size - | |
416 | (memblock_end_of_DRAM() - memblock_start_of_DRAM()); | |
417 | ||
418 | /* | |
419 | * If the size of the linear region exceeds, by a sufficient | |
420 | * margin, the size of the region that the available physical | |
421 | * memory spans, randomize the linear region as well. | |
422 | */ | |
423 | if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { | |
c8a43c18 | 424 | range /= ARM64_MEMSTART_ALIGN; |
c031a421 AB |
425 | memstart_addr -= ARM64_MEMSTART_ALIGN * |
426 | ((range * memstart_offset_seed) >> 16); | |
427 | } | |
428 | } | |
6083fe74 | 429 | |
bd00cd5f MR |
430 | /* |
431 | * Register the kernel text, kernel data, initrd, and initial | |
432 | * pagetables with memblock. | |
433 | */ | |
2077be67 | 434 | memblock_reserve(__pa_symbol(_text), _end - _text); |
c756c592 | 435 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
a89dea58 | 436 | /* the generic initrd code expects virtual addresses */ |
c756c592 FF |
437 | initrd_start = __phys_to_virt(phys_initrd_start); |
438 | initrd_end = initrd_start + phys_initrd_size; | |
a89dea58 | 439 | } |
c1cc1552 | 440 | |
0ceac9e0 | 441 | early_init_fdt_scan_reserved_mem(); |
2d5a5612 | 442 | |
8b5369ea NSJ |
443 | if (IS_ENABLED(CONFIG_ZONE_DMA)) { |
444 | zone_dma_bits = ARM64_ZONE_DMA_BITS; | |
445 | arm64_dma_phys_limit = max_zone_phys(ARM64_ZONE_DMA_BITS); | |
446 | } | |
1a8e1cef | 447 | |
ad67f5a6 | 448 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
1a8e1cef | 449 | arm64_dma32_phys_limit = max_zone_phys(32); |
a1e50a82 | 450 | else |
a573cdd7 | 451 | arm64_dma32_phys_limit = PHYS_MASK + 1; |
764b51ea AT |
452 | |
453 | reserve_crashkernel(); | |
454 | ||
e62aaeac AT |
455 | reserve_elfcorehdr(); |
456 | ||
f24e5834 SC |
457 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; |
458 | ||
bff3b044 | 459 | dma_contiguous_reserve(arm64_dma32_phys_limit); |
c1cc1552 CM |
460 | } |
461 | ||
462 | void __init bootmem_init(void) | |
463 | { | |
464 | unsigned long min, max; | |
465 | ||
466 | min = PFN_UP(memblock_start_of_DRAM()); | |
467 | max = PFN_DOWN(memblock_end_of_DRAM()); | |
468 | ||
36dd9086 VM |
469 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); |
470 | ||
1a2db300 | 471 | max_pfn = max_low_pfn = max; |
19d6242e | 472 | min_low_pfn = min; |
1a2db300 GK |
473 | |
474 | arm64_numa_init(); | |
c1cc1552 CM |
475 | /* |
476 | * Sparsemem tries to allocate bootmem in memory_present(), so must be | |
477 | * done after the fixed reservations. | |
478 | */ | |
a2c801c5 | 479 | memblocks_present(); |
c1cc1552 CM |
480 | |
481 | sparse_init(); | |
482 | zone_sizes_init(min, max); | |
483 | ||
1a2db300 | 484 | memblock_dump_all(); |
c1cc1552 CM |
485 | } |
486 | ||
c1cc1552 CM |
487 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
488 | static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |
489 | { | |
490 | struct page *start_pg, *end_pg; | |
491 | unsigned long pg, pgend; | |
492 | ||
493 | /* | |
494 | * Convert start_pfn/end_pfn to a struct page pointer. | |
495 | */ | |
496 | start_pg = pfn_to_page(start_pfn - 1) + 1; | |
497 | end_pg = pfn_to_page(end_pfn - 1) + 1; | |
498 | ||
499 | /* | |
500 | * Convert to physical addresses, and round start upwards and end | |
501 | * downwards. | |
502 | */ | |
503 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | |
504 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | |
505 | ||
506 | /* | |
507 | * If there are free pages between these, free the section of the | |
508 | * memmap array. | |
509 | */ | |
510 | if (pg < pgend) | |
2013288f | 511 | memblock_free(pg, pgend - pg); |
c1cc1552 CM |
512 | } |
513 | ||
514 | /* | |
515 | * The mem_map array can get very big. Free the unused area of the memory map. | |
516 | */ | |
517 | static void __init free_unused_memmap(void) | |
518 | { | |
519 | unsigned long start, prev_end = 0; | |
520 | struct memblock_region *reg; | |
521 | ||
522 | for_each_memblock(memory, reg) { | |
523 | start = __phys_to_pfn(reg->base); | |
524 | ||
525 | #ifdef CONFIG_SPARSEMEM | |
526 | /* | |
527 | * Take care not to free memmap entries that don't exist due | |
528 | * to SPARSEMEM sections which aren't present. | |
529 | */ | |
530 | start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); | |
531 | #endif | |
532 | /* | |
533 | * If we had a previous bank, and there is a space between the | |
534 | * current bank and the previous, free it. | |
535 | */ | |
536 | if (prev_end && prev_end < start) | |
537 | free_memmap(prev_end, start); | |
538 | ||
539 | /* | |
540 | * Align up here since the VM subsystem insists that the | |
541 | * memmap entries are valid from the bank end aligned to | |
542 | * MAX_ORDER_NR_PAGES. | |
543 | */ | |
b9bcc919 | 544 | prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), |
c1cc1552 CM |
545 | MAX_ORDER_NR_PAGES); |
546 | } | |
547 | ||
548 | #ifdef CONFIG_SPARSEMEM | |
549 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) | |
550 | free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); | |
551 | #endif | |
552 | } | |
553 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
554 | ||
555 | /* | |
556 | * mem_init() marks the free areas in the mem_map and tells us how much memory | |
557 | * is free. This is done after various parts of the system have claimed their | |
558 | * memory after the kernel image. | |
559 | */ | |
560 | void __init mem_init(void) | |
561 | { | |
ae7871be | 562 | if (swiotlb_force == SWIOTLB_FORCE || |
1a8e1cef | 563 | max_pfn > PFN_DOWN(arm64_dma_phys_limit ? : arm64_dma32_phys_limit)) |
b67a8b29 | 564 | swiotlb_init(1); |
524dabe1 AG |
565 | else |
566 | swiotlb_force = SWIOTLB_NO_FORCE; | |
a1e50a82 | 567 | |
344bf332 | 568 | set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); |
c1cc1552 CM |
569 | |
570 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
c1cc1552 CM |
571 | free_unused_memmap(); |
572 | #endif | |
bee4ebd1 | 573 | /* this will put all unused low memory onto the freelists */ |
c6ffc5ca | 574 | memblock_free_all(); |
c1cc1552 | 575 | |
6879ea83 | 576 | mem_init_print_info(NULL); |
c1cc1552 | 577 | |
c1cc1552 CM |
578 | /* |
579 | * Check boundaries twice: Some fundamental inconsistencies can be | |
580 | * detected at build time already. | |
581 | */ | |
582 | #ifdef CONFIG_COMPAT | |
363524d2 | 583 | BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); |
c1cc1552 | 584 | #endif |
c1cc1552 | 585 | |
bee4ebd1 | 586 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
c1cc1552 CM |
587 | extern int sysctl_overcommit_memory; |
588 | /* | |
589 | * On a machine this small we won't get anywhere without | |
590 | * overcommit, so turn it on by default. | |
591 | */ | |
592 | sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | |
593 | } | |
594 | } | |
595 | ||
596 | void free_initmem(void) | |
597 | { | |
2077be67 LA |
598 | free_reserved_area(lm_alias(__init_begin), |
599 | lm_alias(__init_end), | |
6ec939f8 | 600 | POISON_FREE_INITMEM, "unused kernel"); |
dae8c235 KW |
601 | /* |
602 | * Unmap the __init region but leave the VM area in place. This | |
603 | * prevents the region from being reused for kernel modules, which | |
604 | * is not supported by kallsyms. | |
605 | */ | |
606 | unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); | |
c1cc1552 CM |
607 | } |
608 | ||
a7f8de16 AB |
609 | /* |
610 | * Dump out memory limit information on panic. | |
611 | */ | |
612 | static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) | |
613 | { | |
d7dc899a | 614 | if (memory_limit != PHYS_ADDR_MAX) { |
a7f8de16 AB |
615 | pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); |
616 | } else { | |
617 | pr_emerg("Memory Limit: none\n"); | |
618 | } | |
619 | return 0; | |
620 | } | |
621 | ||
622 | static struct notifier_block mem_limit_notifier = { | |
623 | .notifier_call = dump_mem_limit, | |
624 | }; | |
625 | ||
626 | static int __init register_mem_limit_dumper(void) | |
627 | { | |
628 | atomic_notifier_chain_register(&panic_notifier_list, | |
629 | &mem_limit_notifier); | |
630 | return 0; | |
631 | } | |
632 | __initcall(register_mem_limit_dumper); |