mm: enhance free_reserved_area() to support poisoning memory with zero
[linux-2.6-block.git] / arch / arm64 / mm / init.c
CommitLineData
c1cc1552
CM
1/*
2 * Based on arch/arm/mm/init.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 * Copyright (C) 2012 ARM Ltd.
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/kernel.h>
21#include <linux/export.h>
22#include <linux/errno.h>
23#include <linux/swap.h>
24#include <linux/init.h>
25#include <linux/bootmem.h>
26#include <linux/mman.h>
27#include <linux/nodemask.h>
28#include <linux/initrd.h>
29#include <linux/gfp.h>
30#include <linux/memblock.h>
31#include <linux/sort.h>
32#include <linux/of_fdt.h>
33
34#include <asm/prom.h>
35#include <asm/sections.h>
36#include <asm/setup.h>
37#include <asm/sizes.h>
38#include <asm/tlb.h>
39
40#include "mm.h"
41
42static unsigned long phys_initrd_start __initdata = 0;
43static unsigned long phys_initrd_size __initdata = 0;
44
45phys_addr_t memstart_addr __read_mostly = 0;
46
47void __init early_init_dt_setup_initrd_arch(unsigned long start,
48 unsigned long end)
49{
50 phys_initrd_start = start;
51 phys_initrd_size = end - start;
52}
53
54static int __init early_initrd(char *p)
55{
56 unsigned long start, size;
57 char *endp;
58
59 start = memparse(p, &endp);
60 if (*endp == ',') {
61 size = memparse(endp + 1, NULL);
62
63 phys_initrd_start = start;
64 phys_initrd_size = size;
65 }
66 return 0;
67}
68early_param("initrd", early_initrd);
69
70#define MAX_DMA32_PFN ((4UL * 1024 * 1024 * 1024) >> PAGE_SHIFT)
71
72static void __init zone_sizes_init(unsigned long min, unsigned long max)
73{
74 struct memblock_region *reg;
75 unsigned long zone_size[MAX_NR_ZONES], zhole_size[MAX_NR_ZONES];
76 unsigned long max_dma32 = min;
77
78 memset(zone_size, 0, sizeof(zone_size));
79
80#ifdef CONFIG_ZONE_DMA32
81 /* 4GB maximum for 32-bit only capable devices */
938edf5c
WD
82 max_dma32 = max(min, min(max, MAX_DMA32_PFN));
83 zone_size[ZONE_DMA32] = max_dma32 - min;
c1cc1552
CM
84#endif
85 zone_size[ZONE_NORMAL] = max - max_dma32;
86
87 memcpy(zhole_size, zone_size, sizeof(zhole_size));
88
89 for_each_memblock(memory, reg) {
90 unsigned long start = memblock_region_memory_base_pfn(reg);
91 unsigned long end = memblock_region_memory_end_pfn(reg);
92
93 if (start >= max)
94 continue;
95#ifdef CONFIG_ZONE_DMA32
96 if (start < max_dma32) {
97 unsigned long dma_end = min(end, max_dma32);
98 zhole_size[ZONE_DMA32] -= dma_end - start;
99 }
100#endif
101 if (end > max_dma32) {
102 unsigned long normal_end = min(end, max);
103 unsigned long normal_start = max(start, max_dma32);
104 zhole_size[ZONE_NORMAL] -= normal_end - normal_start;
105 }
106 }
107
108 free_area_init_node(0, zone_size, min, zhole_size);
109}
110
111#ifdef CONFIG_HAVE_ARCH_PFN_VALID
112int pfn_valid(unsigned long pfn)
113{
114 return memblock_is_memory(pfn << PAGE_SHIFT);
115}
116EXPORT_SYMBOL(pfn_valid);
117#endif
118
119#ifndef CONFIG_SPARSEMEM
120static void arm64_memory_present(void)
121{
122}
123#else
124static void arm64_memory_present(void)
125{
126 struct memblock_region *reg;
127
128 for_each_memblock(memory, reg)
129 memory_present(0, memblock_region_memory_base_pfn(reg),
130 memblock_region_memory_end_pfn(reg));
131}
132#endif
133
134void __init arm64_memblock_init(void)
135{
136 u64 *reserve_map, base, size;
137
138 /* Register the kernel text, kernel data and initrd with memblock */
139 memblock_reserve(__pa(_text), _end - _text);
140#ifdef CONFIG_BLK_DEV_INITRD
141 if (phys_initrd_size) {
142 memblock_reserve(phys_initrd_start, phys_initrd_size);
143
144 /* Now convert initrd to virtual addresses */
145 initrd_start = __phys_to_virt(phys_initrd_start);
146 initrd_end = initrd_start + phys_initrd_size;
147 }
148#endif
149
150 /*
151 * Reserve the page tables. These are already in use,
152 * and can only be in node 0.
153 */
154 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_DIR_SIZE);
155 memblock_reserve(__pa(idmap_pg_dir), IDMAP_DIR_SIZE);
156
157 /* Reserve the dtb region */
158 memblock_reserve(virt_to_phys(initial_boot_params),
159 be32_to_cpu(initial_boot_params->totalsize));
160
161 /*
162 * Process the reserve map. This will probably overlap the initrd
163 * and dtb locations which are already reserved, but overlapping
164 * doesn't hurt anything
165 */
166 reserve_map = ((void*)initial_boot_params) +
167 be32_to_cpu(initial_boot_params->off_mem_rsvmap);
168 while (1) {
169 base = be64_to_cpup(reserve_map++);
170 size = be64_to_cpup(reserve_map++);
171 if (!size)
172 break;
173 memblock_reserve(base, size);
174 }
175
176 memblock_allow_resize();
177 memblock_dump_all();
178}
179
180void __init bootmem_init(void)
181{
182 unsigned long min, max;
183
184 min = PFN_UP(memblock_start_of_DRAM());
185 max = PFN_DOWN(memblock_end_of_DRAM());
186
187 /*
188 * Sparsemem tries to allocate bootmem in memory_present(), so must be
189 * done after the fixed reservations.
190 */
191 arm64_memory_present();
192
193 sparse_init();
194 zone_sizes_init(min, max);
195
196 high_memory = __va((max << PAGE_SHIFT) - 1) + 1;
197 max_pfn = max_low_pfn = max;
198}
199
c1cc1552
CM
200/*
201 * Poison init memory with an undefined instruction (0x0).
202 */
203static inline void poison_init_mem(void *s, size_t count)
204{
205 memset(s, 0, count);
206}
207
208#ifndef CONFIG_SPARSEMEM_VMEMMAP
209static inline void free_memmap(unsigned long start_pfn, unsigned long end_pfn)
210{
211 struct page *start_pg, *end_pg;
212 unsigned long pg, pgend;
213
214 /*
215 * Convert start_pfn/end_pfn to a struct page pointer.
216 */
217 start_pg = pfn_to_page(start_pfn - 1) + 1;
218 end_pg = pfn_to_page(end_pfn - 1) + 1;
219
220 /*
221 * Convert to physical addresses, and round start upwards and end
222 * downwards.
223 */
224 pg = (unsigned long)PAGE_ALIGN(__pa(start_pg));
225 pgend = (unsigned long)__pa(end_pg) & PAGE_MASK;
226
227 /*
228 * If there are free pages between these, free the section of the
229 * memmap array.
230 */
231 if (pg < pgend)
232 free_bootmem(pg, pgend - pg);
233}
234
235/*
236 * The mem_map array can get very big. Free the unused area of the memory map.
237 */
238static void __init free_unused_memmap(void)
239{
240 unsigned long start, prev_end = 0;
241 struct memblock_region *reg;
242
243 for_each_memblock(memory, reg) {
244 start = __phys_to_pfn(reg->base);
245
246#ifdef CONFIG_SPARSEMEM
247 /*
248 * Take care not to free memmap entries that don't exist due
249 * to SPARSEMEM sections which aren't present.
250 */
251 start = min(start, ALIGN(prev_end, PAGES_PER_SECTION));
252#endif
253 /*
254 * If we had a previous bank, and there is a space between the
255 * current bank and the previous, free it.
256 */
257 if (prev_end && prev_end < start)
258 free_memmap(prev_end, start);
259
260 /*
261 * Align up here since the VM subsystem insists that the
262 * memmap entries are valid from the bank end aligned to
263 * MAX_ORDER_NR_PAGES.
264 */
265 prev_end = ALIGN(start + __phys_to_pfn(reg->size),
266 MAX_ORDER_NR_PAGES);
267 }
268
269#ifdef CONFIG_SPARSEMEM
270 if (!IS_ALIGNED(prev_end, PAGES_PER_SECTION))
271 free_memmap(prev_end, ALIGN(prev_end, PAGES_PER_SECTION));
272#endif
273}
274#endif /* !CONFIG_SPARSEMEM_VMEMMAP */
275
276/*
277 * mem_init() marks the free areas in the mem_map and tells us how much memory
278 * is free. This is done after various parts of the system have claimed their
279 * memory after the kernel image.
280 */
281void __init mem_init(void)
282{
283 unsigned long reserved_pages, free_pages;
284 struct memblock_region *reg;
285
27222a3d 286 arm64_swiotlb_init();
c1cc1552
CM
287
288 max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
289
290#ifndef CONFIG_SPARSEMEM_VMEMMAP
291 /* this will put all unused low memory onto the freelists */
292 free_unused_memmap();
293#endif
294
295 totalram_pages += free_all_bootmem();
296
297 reserved_pages = free_pages = 0;
298
299 for_each_memblock(memory, reg) {
300 unsigned int pfn1, pfn2;
301 struct page *page, *end;
302
303 pfn1 = __phys_to_pfn(reg->base);
304 pfn2 = pfn1 + __phys_to_pfn(reg->size);
305
306 page = pfn_to_page(pfn1);
307 end = pfn_to_page(pfn2 - 1) + 1;
308
309 do {
310 if (PageReserved(page))
311 reserved_pages++;
312 else if (!page_count(page))
313 free_pages++;
314 page++;
315 } while (page < end);
316 }
317
318 /*
319 * Since our memory may not be contiguous, calculate the real number
320 * of pages we have in this system.
321 */
322 pr_info("Memory:");
323 num_physpages = 0;
324 for_each_memblock(memory, reg) {
325 unsigned long pages = memblock_region_memory_end_pfn(reg) -
326 memblock_region_memory_base_pfn(reg);
327 num_physpages += pages;
328 printk(" %ldMB", pages >> (20 - PAGE_SHIFT));
329 }
330 printk(" = %luMB total\n", num_physpages >> (20 - PAGE_SHIFT));
331
332 pr_notice("Memory: %luk/%luk available, %luk reserved\n",
333 nr_free_pages() << (PAGE_SHIFT-10),
334 free_pages << (PAGE_SHIFT-10),
335 reserved_pages << (PAGE_SHIFT-10));
336
337#define MLK(b, t) b, t, ((t) - (b)) >> 10
338#define MLM(b, t) b, t, ((t) - (b)) >> 20
339#define MLK_ROUNDUP(b, t) b, t, DIV_ROUND_UP(((t) - (b)), SZ_1K)
340
341 pr_notice("Virtual kernel memory layout:\n"
342 " vmalloc : 0x%16lx - 0x%16lx (%6ld MB)\n"
343#ifdef CONFIG_SPARSEMEM_VMEMMAP
344 " vmemmap : 0x%16lx - 0x%16lx (%6ld MB)\n"
345#endif
346 " modules : 0x%16lx - 0x%16lx (%6ld MB)\n"
347 " memory : 0x%16lx - 0x%16lx (%6ld MB)\n"
348 " .init : 0x%p" " - 0x%p" " (%6ld kB)\n"
349 " .text : 0x%p" " - 0x%p" " (%6ld kB)\n"
350 " .data : 0x%p" " - 0x%p" " (%6ld kB)\n",
351 MLM(VMALLOC_START, VMALLOC_END),
352#ifdef CONFIG_SPARSEMEM_VMEMMAP
353 MLM((unsigned long)virt_to_page(PAGE_OFFSET),
354 (unsigned long)virt_to_page(high_memory)),
355#endif
356 MLM(MODULES_VADDR, MODULES_END),
357 MLM(PAGE_OFFSET, (unsigned long)high_memory),
358
359 MLK_ROUNDUP(__init_begin, __init_end),
360 MLK_ROUNDUP(_text, _etext),
361 MLK_ROUNDUP(_sdata, _edata));
362
363#undef MLK
364#undef MLM
365#undef MLK_ROUNDUP
366
367 /*
368 * Check boundaries twice: Some fundamental inconsistencies can be
369 * detected at build time already.
370 */
371#ifdef CONFIG_COMPAT
372 BUILD_BUG_ON(TASK_SIZE_32 > TASK_SIZE_64);
373#endif
374 BUILD_BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
375 BUG_ON(TASK_SIZE_64 > MODULES_VADDR);
376
377 if (PAGE_SIZE >= 16384 && num_physpages <= 128) {
378 extern int sysctl_overcommit_memory;
379 /*
380 * On a machine this small we won't get anywhere without
381 * overcommit, so turn it on by default.
382 */
383 sysctl_overcommit_memory = OVERCOMMIT_ALWAYS;
384 }
385}
386
387void free_initmem(void)
388{
389 poison_init_mem(__init_begin, __init_end - __init_begin);
dbe67df4 390 free_initmem_default(-1);
c1cc1552
CM
391}
392
393#ifdef CONFIG_BLK_DEV_INITRD
394
395static int keep_initrd;
396
397void free_initrd_mem(unsigned long start, unsigned long end)
398{
399 if (!keep_initrd) {
400 poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
dbe67df4 401 free_reserved_area((void *)start, (void *)end, -1, "initrd");
c1cc1552
CM
402 }
403}
404
405static int __init keepinitrd_setup(char *__unused)
406{
407 keep_initrd = 1;
408 return 1;
409}
410
411__setup("keepinitrd", keepinitrd_setup);
412#endif