Commit | Line | Data |
---|---|---|
c1cc1552 CM |
1 | /* |
2 | * Based on arch/arm/mm/init.c | |
3 | * | |
4 | * Copyright (C) 1995-2005 Russell King | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * | |
7 | * This program is free software; you can redistribute it and/or modify | |
8 | * it under the terms of the GNU General Public License version 2 as | |
9 | * published by the Free Software Foundation. | |
10 | * | |
11 | * This program is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
14 | * GNU General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU General Public License | |
17 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include <linux/kernel.h> | |
21 | #include <linux/export.h> | |
22 | #include <linux/errno.h> | |
23 | #include <linux/swap.h> | |
24 | #include <linux/init.h> | |
5a9e3e15 | 25 | #include <linux/cache.h> |
c1cc1552 CM |
26 | #include <linux/mman.h> |
27 | #include <linux/nodemask.h> | |
28 | #include <linux/initrd.h> | |
29 | #include <linux/gfp.h> | |
30 | #include <linux/memblock.h> | |
31 | #include <linux/sort.h> | |
764b51ea | 32 | #include <linux/of.h> |
c1cc1552 | 33 | #include <linux/of_fdt.h> |
19e7640d | 34 | #include <linux/dma-mapping.h> |
6ac2104d | 35 | #include <linux/dma-contiguous.h> |
86c8b27a | 36 | #include <linux/efi.h> |
a1e50a82 | 37 | #include <linux/swiotlb.h> |
dae8c235 | 38 | #include <linux/vmalloc.h> |
2077be67 | 39 | #include <linux/mm.h> |
764b51ea | 40 | #include <linux/kexec.h> |
e62aaeac | 41 | #include <linux/crash_dump.h> |
c1cc1552 | 42 | |
a7f8de16 | 43 | #include <asm/boot.h> |
08375198 | 44 | #include <asm/fixmap.h> |
f9040773 | 45 | #include <asm/kasan.h> |
a7f8de16 | 46 | #include <asm/kernel-pgtable.h> |
aa03c428 | 47 | #include <asm/memory.h> |
1a2db300 | 48 | #include <asm/numa.h> |
c1cc1552 CM |
49 | #include <asm/sections.h> |
50 | #include <asm/setup.h> | |
87dfb311 | 51 | #include <linux/sizes.h> |
c1cc1552 | 52 | #include <asm/tlb.h> |
e039ee4e | 53 | #include <asm/alternative.h> |
c1cc1552 | 54 | |
a7f8de16 AB |
55 | /* |
56 | * We need to be able to catch inadvertent references to memstart_addr | |
57 | * that occur (potentially in generic code) before arm64_memblock_init() | |
58 | * executes, which assigns it its actual value. So use a default value | |
59 | * that cannot be mistaken for a real physical address. | |
60 | */ | |
5a9e3e15 | 61 | s64 memstart_addr __ro_after_init = -1; |
03ef055f MR |
62 | EXPORT_SYMBOL(memstart_addr); |
63 | ||
5a9e3e15 | 64 | phys_addr_t arm64_dma_phys_limit __ro_after_init; |
c1cc1552 | 65 | |
764b51ea AT |
66 | #ifdef CONFIG_KEXEC_CORE |
67 | /* | |
68 | * reserve_crashkernel() - reserves memory for crash kernel | |
69 | * | |
70 | * This function reserves memory area given in "crashkernel=" kernel command | |
71 | * line parameter. The memory reserved is used by dump capture kernel when | |
72 | * primary kernel is crashing. | |
73 | */ | |
74 | static void __init reserve_crashkernel(void) | |
75 | { | |
76 | unsigned long long crash_base, crash_size; | |
77 | int ret; | |
78 | ||
79 | ret = parse_crashkernel(boot_command_line, memblock_phys_mem_size(), | |
80 | &crash_size, &crash_base); | |
81 | /* no crashkernel= or invalid value specified */ | |
82 | if (ret || !crash_size) | |
83 | return; | |
84 | ||
85 | crash_size = PAGE_ALIGN(crash_size); | |
86 | ||
87 | if (crash_base == 0) { | |
88 | /* Current arm64 boot protocol requires 2MB alignment */ | |
89 | crash_base = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, | |
90 | crash_size, SZ_2M); | |
91 | if (crash_base == 0) { | |
92 | pr_warn("cannot allocate crashkernel (size:0x%llx)\n", | |
93 | crash_size); | |
94 | return; | |
95 | } | |
96 | } else { | |
97 | /* User specifies base address explicitly. */ | |
98 | if (!memblock_is_region_memory(crash_base, crash_size)) { | |
99 | pr_warn("cannot reserve crashkernel: region is not memory\n"); | |
100 | return; | |
101 | } | |
102 | ||
103 | if (memblock_is_region_reserved(crash_base, crash_size)) { | |
104 | pr_warn("cannot reserve crashkernel: region overlaps reserved memory\n"); | |
105 | return; | |
106 | } | |
107 | ||
108 | if (!IS_ALIGNED(crash_base, SZ_2M)) { | |
109 | pr_warn("cannot reserve crashkernel: base address is not 2MB aligned\n"); | |
110 | return; | |
111 | } | |
112 | } | |
113 | memblock_reserve(crash_base, crash_size); | |
114 | ||
115 | pr_info("crashkernel reserved: 0x%016llx - 0x%016llx (%lld MB)\n", | |
116 | crash_base, crash_base + crash_size, crash_size >> 20); | |
117 | ||
118 | crashk_res.start = crash_base; | |
119 | crashk_res.end = crash_base + crash_size - 1; | |
120 | } | |
121 | #else | |
122 | static void __init reserve_crashkernel(void) | |
123 | { | |
124 | } | |
125 | #endif /* CONFIG_KEXEC_CORE */ | |
126 | ||
e62aaeac AT |
127 | #ifdef CONFIG_CRASH_DUMP |
128 | static int __init early_init_dt_scan_elfcorehdr(unsigned long node, | |
129 | const char *uname, int depth, void *data) | |
130 | { | |
131 | const __be32 *reg; | |
132 | int len; | |
133 | ||
134 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
135 | return 0; | |
136 | ||
137 | reg = of_get_flat_dt_prop(node, "linux,elfcorehdr", &len); | |
138 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
139 | return 1; | |
140 | ||
141 | elfcorehdr_addr = dt_mem_next_cell(dt_root_addr_cells, ®); | |
142 | elfcorehdr_size = dt_mem_next_cell(dt_root_size_cells, ®); | |
143 | ||
144 | return 1; | |
145 | } | |
146 | ||
147 | /* | |
148 | * reserve_elfcorehdr() - reserves memory for elf core header | |
149 | * | |
150 | * This function reserves the memory occupied by an elf core header | |
151 | * described in the device tree. This region contains all the | |
152 | * information about primary kernel's core image and is used by a dump | |
153 | * capture kernel to access the system memory on primary kernel. | |
154 | */ | |
155 | static void __init reserve_elfcorehdr(void) | |
156 | { | |
157 | of_scan_flat_dt(early_init_dt_scan_elfcorehdr, NULL); | |
158 | ||
159 | if (!elfcorehdr_size) | |
160 | return; | |
161 | ||
162 | if (memblock_is_region_reserved(elfcorehdr_addr, elfcorehdr_size)) { | |
163 | pr_warn("elfcorehdr is overlapped\n"); | |
164 | return; | |
165 | } | |
166 | ||
167 | memblock_reserve(elfcorehdr_addr, elfcorehdr_size); | |
168 | ||
169 | pr_info("Reserving %lldKB of memory at 0x%llx for elfcorehdr\n", | |
170 | elfcorehdr_size >> 10, elfcorehdr_addr); | |
171 | } | |
172 | #else | |
173 | static void __init reserve_elfcorehdr(void) | |
174 | { | |
175 | } | |
176 | #endif /* CONFIG_CRASH_DUMP */ | |
d50314a6 | 177 | /* |
ad67f5a6 | 178 | * Return the maximum physical address for ZONE_DMA32 (DMA_BIT_MASK(32)). It |
d50314a6 CM |
179 | * currently assumes that for memory starting above 4G, 32-bit devices will |
180 | * use a DMA offset. | |
181 | */ | |
a7c61a34 | 182 | static phys_addr_t __init max_zone_dma_phys(void) |
d50314a6 CM |
183 | { |
184 | phys_addr_t offset = memblock_start_of_DRAM() & GENMASK_ULL(63, 32); | |
185 | return min(offset + (1ULL << 32), memblock_end_of_DRAM()); | |
186 | } | |
187 | ||
1a2db300 GK |
188 | #ifdef CONFIG_NUMA |
189 | ||
190 | static void __init zone_sizes_init(unsigned long min, unsigned long max) | |
191 | { | |
192 | unsigned long max_zone_pfns[MAX_NR_ZONES] = {0}; | |
193 | ||
ad67f5a6 CH |
194 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
195 | max_zone_pfns[ZONE_DMA32] = PFN_DOWN(max_zone_dma_phys()); | |
1a2db300 GK |
196 | max_zone_pfns[ZONE_NORMAL] = max; |
197 | ||
198 | free_area_init_nodes(max_zone_pfns); | |
199 | } | |
200 | ||
201 | #else | |
202 | ||
c1cc1552 CM |
203 | static void __init zone_sizes_init(unsigned long min, unsigned long max) |
204 | { | |
205 | struct memblock_region *reg; | |
206 | unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES]; | |
19e7640d | 207 | unsigned long max_dma = min; |
c1cc1552 CM |
208 | |
209 | memset(zone_size, 0, sizeof(zone_size)); | |
210 | ||
c1cc1552 | 211 | /* 4GB maximum for 32-bit only capable devices */ |
ad67f5a6 | 212 | #ifdef CONFIG_ZONE_DMA32 |
86a5906e | 213 | max_dma = PFN_DOWN(arm64_dma_phys_limit); |
ad67f5a6 | 214 | zone_size[ZONE_DMA32] = max_dma - min; |
86a5906e | 215 | #endif |
19e7640d | 216 | zone_size[ZONE_NORMAL] = max - max_dma; |
c1cc1552 CM |
217 | |
218 | memcpy(zhole_size, zone_size, sizeof(zhole_size)); | |
219 | ||
220 | for_each_memblock(memory, reg) { | |
221 | unsigned long start = memblock_region_memory_base_pfn(reg); | |
222 | unsigned long end = memblock_region_memory_end_pfn(reg); | |
223 | ||
224 | if (start >= max) | |
225 | continue; | |
19e7640d | 226 | |
ad67f5a6 | 227 | #ifdef CONFIG_ZONE_DMA32 |
86a5906e | 228 | if (start < max_dma) { |
19e7640d | 229 | unsigned long dma_end = min(end, max_dma); |
ad67f5a6 | 230 | zhole_size[ZONE_DMA32] -= dma_end - start; |
c1cc1552 | 231 | } |
86a5906e | 232 | #endif |
19e7640d | 233 | if (end > max_dma) { |
c1cc1552 | 234 | unsigned long normal_end = min(end, max); |
19e7640d | 235 | unsigned long normal_start = max(start, max_dma); |
c1cc1552 CM |
236 | zhole_size[ZONE_NORMAL] -= normal_end - normal_start; |
237 | } | |
238 | } | |
239 | ||
240 | free_area_init_node(0, zone_size, min, zhole_size); | |
241 | } | |
242 | ||
1a2db300 GK |
243 | #endif /* CONFIG_NUMA */ |
244 | ||
c1cc1552 CM |
245 | int pfn_valid(unsigned long pfn) |
246 | { | |
5ad356ea GH |
247 | phys_addr_t addr = pfn << PAGE_SHIFT; |
248 | ||
249 | if ((addr >> PAGE_SHIFT) != pfn) | |
250 | return 0; | |
4ab21506 RM |
251 | |
252 | #ifdef CONFIG_SPARSEMEM | |
253 | if (pfn_to_section_nr(pfn) >= NR_MEM_SECTIONS) | |
254 | return 0; | |
255 | ||
256 | if (!valid_section(__nr_to_section(pfn_to_section_nr(pfn)))) | |
257 | return 0; | |
258 | #endif | |
5ad356ea | 259 | return memblock_is_map_memory(addr); |
c1cc1552 CM |
260 | } |
261 | EXPORT_SYMBOL(pfn_valid); | |
c1cc1552 | 262 | |
d7dc899a | 263 | static phys_addr_t memory_limit = PHYS_ADDR_MAX; |
6083fe74 MR |
264 | |
265 | /* | |
266 | * Limit the memory size that was specified via FDT. | |
267 | */ | |
268 | static int __init early_mem(char *p) | |
269 | { | |
270 | if (!p) | |
271 | return 1; | |
272 | ||
273 | memory_limit = memparse(p, &p) & PAGE_MASK; | |
274 | pr_notice("Memory limited to %lldMB\n", memory_limit >> 20); | |
275 | ||
276 | return 0; | |
277 | } | |
278 | early_param("mem", early_mem); | |
279 | ||
8f579b1c AT |
280 | static int __init early_init_dt_scan_usablemem(unsigned long node, |
281 | const char *uname, int depth, void *data) | |
282 | { | |
283 | struct memblock_region *usablemem = data; | |
284 | const __be32 *reg; | |
285 | int len; | |
286 | ||
287 | if (depth != 1 || strcmp(uname, "chosen") != 0) | |
288 | return 0; | |
289 | ||
290 | reg = of_get_flat_dt_prop(node, "linux,usable-memory-range", &len); | |
291 | if (!reg || (len < (dt_root_addr_cells + dt_root_size_cells))) | |
292 | return 1; | |
293 | ||
294 | usablemem->base = dt_mem_next_cell(dt_root_addr_cells, ®); | |
295 | usablemem->size = dt_mem_next_cell(dt_root_size_cells, ®); | |
296 | ||
297 | return 1; | |
298 | } | |
299 | ||
300 | static void __init fdt_enforce_memory_region(void) | |
301 | { | |
302 | struct memblock_region reg = { | |
303 | .size = 0, | |
304 | }; | |
305 | ||
306 | of_scan_flat_dt(early_init_dt_scan_usablemem, ®); | |
307 | ||
308 | if (reg.size) | |
309 | memblock_cap_memory_range(reg.base, reg.size); | |
310 | } | |
311 | ||
c1cc1552 CM |
312 | void __init arm64_memblock_init(void) |
313 | { | |
a7f8de16 AB |
314 | const s64 linear_region_size = -(s64)PAGE_OFFSET; |
315 | ||
8f579b1c AT |
316 | /* Handle linux,usable-memory-range property */ |
317 | fdt_enforce_memory_region(); | |
318 | ||
e9eaa805 KM |
319 | /* Remove memory above our supported physical address size */ |
320 | memblock_remove(1ULL << PHYS_MASK_SHIFT, ULLONG_MAX); | |
321 | ||
6d2aa549 AB |
322 | /* |
323 | * Ensure that the linear region takes up exactly half of the kernel | |
324 | * virtual address space. This way, we can distinguish a linear address | |
325 | * from a kernel/module/vmalloc address by testing a single bit. | |
326 | */ | |
327 | BUILD_BUG_ON(linear_region_size != BIT(VA_BITS - 1)); | |
328 | ||
a7f8de16 AB |
329 | /* |
330 | * Select a suitable value for the base of physical memory. | |
331 | */ | |
332 | memstart_addr = round_down(memblock_start_of_DRAM(), | |
333 | ARM64_MEMSTART_ALIGN); | |
334 | ||
335 | /* | |
336 | * Remove the memory that we will not be able to cover with the | |
337 | * linear mapping. Take care not to clip the kernel which may be | |
338 | * high in memory. | |
339 | */ | |
2077be67 LA |
340 | memblock_remove(max_t(u64, memstart_addr + linear_region_size, |
341 | __pa_symbol(_end)), ULLONG_MAX); | |
2958987f AB |
342 | if (memstart_addr + linear_region_size < memblock_end_of_DRAM()) { |
343 | /* ensure that memstart_addr remains sufficiently aligned */ | |
344 | memstart_addr = round_up(memblock_end_of_DRAM() - linear_region_size, | |
345 | ARM64_MEMSTART_ALIGN); | |
346 | memblock_remove(0, memstart_addr); | |
347 | } | |
a7f8de16 AB |
348 | |
349 | /* | |
350 | * Apply the memory limit if it was set. Since the kernel may be loaded | |
351 | * high up in memory, add back the kernel region that must be accessible | |
352 | * via the linear mapping. | |
353 | */ | |
d7dc899a | 354 | if (memory_limit != PHYS_ADDR_MAX) { |
cb0a6502 | 355 | memblock_mem_limit_remove_map(memory_limit); |
2077be67 | 356 | memblock_add(__pa_symbol(_text), (u64)(_end - _text)); |
a7f8de16 | 357 | } |
6083fe74 | 358 | |
c756c592 | 359 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
177e15f0 AB |
360 | /* |
361 | * Add back the memory we just removed if it results in the | |
362 | * initrd to become inaccessible via the linear mapping. | |
363 | * Otherwise, this is a no-op | |
364 | */ | |
c756c592 | 365 | u64 base = phys_initrd_start & PAGE_MASK; |
d4d18e3e | 366 | u64 size = PAGE_ALIGN(phys_initrd_start + phys_initrd_size) - base; |
177e15f0 AB |
367 | |
368 | /* | |
369 | * We can only add back the initrd memory if we don't end up | |
370 | * with more memory than we can address via the linear mapping. | |
371 | * It is up to the bootloader to position the kernel and the | |
372 | * initrd reasonably close to each other (i.e., within 32 GB of | |
373 | * each other) so that all granule/#levels combinations can | |
374 | * always access both. | |
375 | */ | |
376 | if (WARN(base < memblock_start_of_DRAM() || | |
377 | base + size > memblock_start_of_DRAM() + | |
378 | linear_region_size, | |
379 | "initrd not fully accessible via the linear mapping -- please check your bootloader ...\n")) { | |
70b3d237 | 380 | phys_initrd_size = 0; |
177e15f0 AB |
381 | } else { |
382 | memblock_remove(base, size); /* clear MEMBLOCK_ flags */ | |
383 | memblock_add(base, size); | |
384 | memblock_reserve(base, size); | |
385 | } | |
386 | } | |
387 | ||
c031a421 AB |
388 | if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { |
389 | extern u16 memstart_offset_seed; | |
390 | u64 range = linear_region_size - | |
391 | (memblock_end_of_DRAM() - memblock_start_of_DRAM()); | |
392 | ||
393 | /* | |
394 | * If the size of the linear region exceeds, by a sufficient | |
395 | * margin, the size of the region that the available physical | |
396 | * memory spans, randomize the linear region as well. | |
397 | */ | |
398 | if (memstart_offset_seed > 0 && range >= ARM64_MEMSTART_ALIGN) { | |
c8a43c18 | 399 | range /= ARM64_MEMSTART_ALIGN; |
c031a421 AB |
400 | memstart_addr -= ARM64_MEMSTART_ALIGN * |
401 | ((range * memstart_offset_seed) >> 16); | |
402 | } | |
403 | } | |
6083fe74 | 404 | |
bd00cd5f MR |
405 | /* |
406 | * Register the kernel text, kernel data, initrd, and initial | |
407 | * pagetables with memblock. | |
408 | */ | |
2077be67 | 409 | memblock_reserve(__pa_symbol(_text), _end - _text); |
c756c592 | 410 | if (IS_ENABLED(CONFIG_BLK_DEV_INITRD) && phys_initrd_size) { |
a89dea58 | 411 | /* the generic initrd code expects virtual addresses */ |
c756c592 FF |
412 | initrd_start = __phys_to_virt(phys_initrd_start); |
413 | initrd_end = initrd_start + phys_initrd_size; | |
a89dea58 | 414 | } |
c1cc1552 | 415 | |
0ceac9e0 | 416 | early_init_fdt_scan_reserved_mem(); |
2d5a5612 CM |
417 | |
418 | /* 4GB maximum for 32-bit only capable devices */ | |
ad67f5a6 | 419 | if (IS_ENABLED(CONFIG_ZONE_DMA32)) |
a1e50a82 CM |
420 | arm64_dma_phys_limit = max_zone_dma_phys(); |
421 | else | |
422 | arm64_dma_phys_limit = PHYS_MASK + 1; | |
764b51ea AT |
423 | |
424 | reserve_crashkernel(); | |
425 | ||
e62aaeac AT |
426 | reserve_elfcorehdr(); |
427 | ||
f24e5834 SC |
428 | high_memory = __va(memblock_end_of_DRAM() - 1) + 1; |
429 | ||
a1e50a82 | 430 | dma_contiguous_reserve(arm64_dma_phys_limit); |
c1cc1552 CM |
431 | } |
432 | ||
433 | void __init bootmem_init(void) | |
434 | { | |
435 | unsigned long min, max; | |
436 | ||
437 | min = PFN_UP(memblock_start_of_DRAM()); | |
438 | max = PFN_DOWN(memblock_end_of_DRAM()); | |
439 | ||
36dd9086 VM |
440 | early_memtest(min << PAGE_SHIFT, max << PAGE_SHIFT); |
441 | ||
1a2db300 | 442 | max_pfn = max_low_pfn = max; |
19d6242e | 443 | min_low_pfn = min; |
1a2db300 GK |
444 | |
445 | arm64_numa_init(); | |
c1cc1552 CM |
446 | /* |
447 | * Sparsemem tries to allocate bootmem in memory_present(), so must be | |
448 | * done after the fixed reservations. | |
449 | */ | |
a2c801c5 | 450 | memblocks_present(); |
c1cc1552 CM |
451 | |
452 | sparse_init(); | |
453 | zone_sizes_init(min, max); | |
454 | ||
1a2db300 | 455 | memblock_dump_all(); |
c1cc1552 CM |
456 | } |
457 | ||
c1cc1552 CM |
458 | #ifndef CONFIG_SPARSEMEM_VMEMMAP |
459 | static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn) | |
460 | { | |
461 | struct page *start_pg, *end_pg; | |
462 | unsigned long pg, pgend; | |
463 | ||
464 | /* | |
465 | * Convert start_pfn/end_pfn to a struct page pointer. | |
466 | */ | |
467 | start_pg = pfn_to_page(start_pfn - 1) + 1; | |
468 | end_pg = pfn_to_page(end_pfn - 1) + 1; | |
469 | ||
470 | /* | |
471 | * Convert to physical addresses, and round start upwards and end | |
472 | * downwards. | |
473 | */ | |
474 | pg = (unsigned long)PAGE_ALIGN(__pa(start_pg)); | |
475 | pgend = (unsigned long)__pa(end_pg) & PAGE_MASK; | |
476 | ||
477 | /* | |
478 | * If there are free pages between these, free the section of the | |
479 | * memmap array. | |
480 | */ | |
481 | if (pg < pgend) | |
2013288f | 482 | memblock_free(pg, pgend - pg); |
c1cc1552 CM |
483 | } |
484 | ||
485 | /* | |
486 | * The mem_map array can get very big. Free the unused area of the memory map. | |
487 | */ | |
488 | static void __init free_unused_memmap(void) | |
489 | { | |
490 | unsigned long start, prev_end = 0; | |
491 | struct memblock_region *reg; | |
492 | ||
493 | for_each_memblock(memory, reg) { | |
494 | start = __phys_to_pfn(reg->base); | |
495 | ||
496 | #ifdef CONFIG_SPARSEMEM | |
497 | /* | |
498 | * Take care not to free memmap entries that don't exist due | |
499 | * to SPARSEMEM sections which aren't present. | |
500 | */ | |
501 | start = min(start, ALIGN(prev_end, PAGES_PER_SECTION)); | |
502 | #endif | |
503 | /* | |
504 | * If we had a previous bank, and there is a space between the | |
505 | * current bank and the previous, free it. | |
506 | */ | |
507 | if (prev_end && prev_end < start) | |
508 | free_memmap(prev_end, start); | |
509 | ||
510 | /* | |
511 | * Align up here since the VM subsystem insists that the | |
512 | * memmap entries are valid from the bank end aligned to | |
513 | * MAX_ORDER_NR_PAGES. | |
514 | */ | |
b9bcc919 | 515 | prev_end = ALIGN(__phys_to_pfn(reg->base + reg->size), |
c1cc1552 CM |
516 | MAX_ORDER_NR_PAGES); |
517 | } | |
518 | ||
519 | #ifdef CONFIG_SPARSEMEM | |
520 | if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION)) | |
521 | free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION)); | |
522 | #endif | |
523 | } | |
524 | #endif /* !CONFIG_SPARSEMEM_VMEMMAP */ | |
525 | ||
526 | /* | |
527 | * mem_init() marks the free areas in the mem_map and tells us how much memory | |
528 | * is free. This is done after various parts of the system have claimed their | |
529 | * memory after the kernel image. | |
530 | */ | |
531 | void __init mem_init(void) | |
532 | { | |
ae7871be GU |
533 | if (swiotlb_force == SWIOTLB_FORCE || |
534 | max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT)) | |
b67a8b29 | 535 | swiotlb_init(1); |
524dabe1 AG |
536 | else |
537 | swiotlb_force = SWIOTLB_NO_FORCE; | |
a1e50a82 | 538 | |
344bf332 | 539 | set_max_mapnr(max_pfn - PHYS_PFN_OFFSET); |
c1cc1552 CM |
540 | |
541 | #ifndef CONFIG_SPARSEMEM_VMEMMAP | |
c1cc1552 CM |
542 | free_unused_memmap(); |
543 | #endif | |
bee4ebd1 | 544 | /* this will put all unused low memory onto the freelists */ |
c6ffc5ca | 545 | memblock_free_all(); |
c1cc1552 | 546 | |
6879ea83 | 547 | mem_init_print_info(NULL); |
c1cc1552 | 548 | |
c1cc1552 CM |
549 | /* |
550 | * Check boundaries twice: Some fundamental inconsistencies can be | |
551 | * detected at build time already. | |
552 | */ | |
553 | #ifdef CONFIG_COMPAT | |
363524d2 | 554 | BUILD_BUG_ON(TASK_SIZE_32 > DEFAULT_MAP_WINDOW_64); |
c1cc1552 | 555 | #endif |
c1cc1552 | 556 | |
bee4ebd1 | 557 | if (PAGE_SIZE >= 16384 && get_num_physpages() <= 128) { |
c1cc1552 CM |
558 | extern int sysctl_overcommit_memory; |
559 | /* | |
560 | * On a machine this small we won't get anywhere without | |
561 | * overcommit, so turn it on by default. | |
562 | */ | |
563 | sysctl_overcommit_memory = OVERCOMMIT_ALWAYS; | |
564 | } | |
565 | } | |
566 | ||
567 | void free_initmem(void) | |
568 | { | |
2077be67 LA |
569 | free_reserved_area(lm_alias(__init_begin), |
570 | lm_alias(__init_end), | |
d386825c | 571 | 0, "unused kernel"); |
dae8c235 KW |
572 | /* |
573 | * Unmap the __init region but leave the VM area in place. This | |
574 | * prevents the region from being reused for kernel modules, which | |
575 | * is not supported by kallsyms. | |
576 | */ | |
577 | unmap_kernel_range((u64)__init_begin, (u64)(__init_end - __init_begin)); | |
c1cc1552 CM |
578 | } |
579 | ||
580 | #ifdef CONFIG_BLK_DEV_INITRD | |
662ba3db | 581 | void __init free_initrd_mem(unsigned long start, unsigned long end) |
c1cc1552 | 582 | { |
d8ae8a37 CH |
583 | free_reserved_area((void *)start, (void *)end, 0, "initrd"); |
584 | memblock_free(__virt_to_phys(start), end - start); | |
c1cc1552 | 585 | } |
c1cc1552 | 586 | #endif |
a7f8de16 AB |
587 | |
588 | /* | |
589 | * Dump out memory limit information on panic. | |
590 | */ | |
591 | static int dump_mem_limit(struct notifier_block *self, unsigned long v, void *p) | |
592 | { | |
d7dc899a | 593 | if (memory_limit != PHYS_ADDR_MAX) { |
a7f8de16 AB |
594 | pr_emerg("Memory Limit: %llu MB\n", memory_limit >> 20); |
595 | } else { | |
596 | pr_emerg("Memory Limit: none\n"); | |
597 | } | |
598 | return 0; | |
599 | } | |
600 | ||
601 | static struct notifier_block mem_limit_notifier = { | |
602 | .notifier_call = dump_mem_limit, | |
603 | }; | |
604 | ||
605 | static int __init register_mem_limit_dumper(void) | |
606 | { | |
607 | atomic_notifier_chain_register(&panic_notifier_list, | |
608 | &mem_limit_notifier); | |
609 | return 0; | |
610 | } | |
611 | __initcall(register_mem_limit_dumper); |