Commit | Line | Data |
---|---|---|
01066625 PM |
1 | /* |
2 | * linux/arch/sh/mm/init.c | |
1da177e4 LT |
3 | * |
4 | * Copyright (C) 1999 Niibe Yutaka | |
2f599861 | 5 | * Copyright (C) 2002 - 2011 Paul Mundt |
1da177e4 LT |
6 | * |
7 | * Based on linux/arch/i386/mm/init.c: | |
8 | * Copyright (C) 1995 Linus Torvalds | |
9 | */ | |
1da177e4 LT |
10 | #include <linux/mm.h> |
11 | #include <linux/swap.h> | |
1da177e4 | 12 | #include <linux/init.h> |
5a0e3ad6 | 13 | #include <linux/gfp.h> |
57c8a661 | 14 | #include <linux/memblock.h> |
2cb7ce3b | 15 | #include <linux/proc_fs.h> |
27641dee | 16 | #include <linux/pagemap.h> |
01066625 PM |
17 | #include <linux/percpu.h> |
18 | #include <linux/io.h> | |
94c28510 | 19 | #include <linux/dma-mapping.h> |
f7be3455 | 20 | #include <linux/export.h> |
1da177e4 | 21 | #include <asm/mmu_context.h> |
4bc277ac | 22 | #include <asm/mmzone.h> |
c77b29db | 23 | #include <asm/kexec.h> |
1da177e4 LT |
24 | #include <asm/tlb.h> |
25 | #include <asm/cacheflush.h> | |
07cbb41b | 26 | #include <asm/sections.h> |
4bc277ac | 27 | #include <asm/setup.h> |
1da177e4 | 28 | #include <asm/cache.h> |
b0f3ae03 | 29 | #include <asm/sizes.h> |
1da177e4 | 30 | |
1da177e4 | 31 | pgd_t swapper_pg_dir[PTRS_PER_PGD]; |
c6feb614 | 32 | |
19d8f84f PM |
33 | void __init generic_mem_init(void) |
34 | { | |
95f72d1e | 35 | memblock_add(__MEMORY_START, __MEMORY_SIZE); |
19d8f84f PM |
36 | } |
37 | ||
4bc277ac PM |
38 | void __init __weak plat_mem_setup(void) |
39 | { | |
40 | /* Nothing to see here, move along. */ | |
41 | } | |
42 | ||
11cbb70e | 43 | #ifdef CONFIG_MMU |
07cad4dc | 44 | static pte_t *__get_pte_phys(unsigned long addr) |
1da177e4 LT |
45 | { |
46 | pgd_t *pgd; | |
26ff6c11 | 47 | pud_t *pud; |
1da177e4 | 48 | pmd_t *pmd; |
1da177e4 | 49 | |
99a596f9 | 50 | pgd = pgd_offset_k(addr); |
1da177e4 LT |
51 | if (pgd_none(*pgd)) { |
52 | pgd_ERROR(*pgd); | |
07cad4dc | 53 | return NULL; |
1da177e4 LT |
54 | } |
55 | ||
99a596f9 SM |
56 | pud = pud_alloc(NULL, pgd, addr); |
57 | if (unlikely(!pud)) { | |
58 | pud_ERROR(*pud); | |
07cad4dc | 59 | return NULL; |
26ff6c11 PM |
60 | } |
61 | ||
99a596f9 SM |
62 | pmd = pmd_alloc(NULL, pud, addr); |
63 | if (unlikely(!pmd)) { | |
64 | pmd_ERROR(*pmd); | |
07cad4dc | 65 | return NULL; |
1da177e4 LT |
66 | } |
67 | ||
598ee698 | 68 | return pte_offset_kernel(pmd, addr); |
07cad4dc MF |
69 | } |
70 | ||
71 | static void set_pte_phys(unsigned long addr, unsigned long phys, pgprot_t prot) | |
72 | { | |
73 | pte_t *pte; | |
74 | ||
75 | pte = __get_pte_phys(addr); | |
1da177e4 LT |
76 | if (!pte_none(*pte)) { |
77 | pte_ERROR(*pte); | |
78 | return; | |
79 | } | |
80 | ||
81 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, prot)); | |
997d0030 | 82 | local_flush_tlb_one(get_asid(), addr); |
07cad4dc MF |
83 | |
84 | if (pgprot_val(prot) & _PAGE_WIRED) | |
85 | tlb_wire_entry(NULL, addr, *pte); | |
86 | } | |
87 | ||
88 | static void clear_pte_phys(unsigned long addr, pgprot_t prot) | |
89 | { | |
90 | pte_t *pte; | |
91 | ||
92 | pte = __get_pte_phys(addr); | |
93 | ||
94 | if (pgprot_val(prot) & _PAGE_WIRED) | |
95 | tlb_unwire_entry(); | |
96 | ||
97 | set_pte(pte, pfn_pte(0, __pgprot(0))); | |
98 | local_flush_tlb_one(get_asid(), addr); | |
1da177e4 LT |
99 | } |
100 | ||
1da177e4 LT |
101 | void __set_fixmap(enum fixed_addresses idx, unsigned long phys, pgprot_t prot) |
102 | { | |
103 | unsigned long address = __fix_to_virt(idx); | |
104 | ||
105 | if (idx >= __end_of_fixed_addresses) { | |
106 | BUG(); | |
107 | return; | |
108 | } | |
109 | ||
110 | set_pte_phys(address, phys, prot); | |
111 | } | |
2adb4e10 | 112 | |
07cad4dc MF |
113 | void __clear_fixmap(enum fixed_addresses idx, pgprot_t prot) |
114 | { | |
115 | unsigned long address = __fix_to_virt(idx); | |
116 | ||
117 | if (idx >= __end_of_fixed_addresses) { | |
118 | BUG(); | |
119 | return; | |
120 | } | |
121 | ||
122 | clear_pte_phys(address, prot); | |
123 | } | |
124 | ||
598ee698 PM |
125 | static pmd_t * __init one_md_table_init(pud_t *pud) |
126 | { | |
127 | if (pud_none(*pud)) { | |
128 | pmd_t *pmd; | |
129 | ||
15c3c114 | 130 | pmd = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
598ee698 PM |
131 | pud_populate(&init_mm, pud, pmd); |
132 | BUG_ON(pmd != pmd_offset(pud, 0)); | |
133 | } | |
134 | ||
135 | return pmd_offset(pud, 0); | |
136 | } | |
137 | ||
138 | static pte_t * __init one_page_table_init(pmd_t *pmd) | |
139 | { | |
140 | if (pmd_none(*pmd)) { | |
141 | pte_t *pte; | |
142 | ||
15c3c114 | 143 | pte = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
598ee698 PM |
144 | pmd_populate_kernel(&init_mm, pmd, pte); |
145 | BUG_ON(pte != pte_offset_kernel(pmd, 0)); | |
146 | } | |
147 | ||
148 | return pte_offset_kernel(pmd, 0); | |
149 | } | |
150 | ||
151 | static pte_t * __init page_table_kmap_check(pte_t *pte, pmd_t *pmd, | |
152 | unsigned long vaddr, pte_t *lastpte) | |
153 | { | |
154 | return pte; | |
155 | } | |
156 | ||
2adb4e10 SM |
157 | void __init page_table_range_init(unsigned long start, unsigned long end, |
158 | pgd_t *pgd_base) | |
159 | { | |
160 | pgd_t *pgd; | |
161 | pud_t *pud; | |
162 | pmd_t *pmd; | |
598ee698 | 163 | pte_t *pte = NULL; |
0906a3ad | 164 | int i, j, k; |
2adb4e10 SM |
165 | unsigned long vaddr; |
166 | ||
0906a3ad PM |
167 | vaddr = start; |
168 | i = __pgd_offset(vaddr); | |
169 | j = __pud_offset(vaddr); | |
170 | k = __pmd_offset(vaddr); | |
171 | pgd = pgd_base + i; | |
172 | ||
173 | for ( ; (i < PTRS_PER_PGD) && (vaddr != end); pgd++, i++) { | |
174 | pud = (pud_t *)pgd; | |
175 | for ( ; (j < PTRS_PER_PUD) && (vaddr != end); pud++, j++) { | |
598ee698 PM |
176 | pmd = one_md_table_init(pud); |
177 | #ifndef __PAGETABLE_PMD_FOLDED | |
5d9b4b19 MF |
178 | pmd += k; |
179 | #endif | |
0906a3ad | 180 | for (; (k < PTRS_PER_PMD) && (vaddr != end); pmd++, k++) { |
598ee698 PM |
181 | pte = page_table_kmap_check(one_page_table_init(pmd), |
182 | pmd, vaddr, pte); | |
0906a3ad PM |
183 | vaddr += PMD_SIZE; |
184 | } | |
185 | k = 0; | |
2adb4e10 | 186 | } |
0906a3ad | 187 | j = 0; |
2adb4e10 SM |
188 | } |
189 | } | |
11cbb70e | 190 | #endif /* CONFIG_MMU */ |
1da177e4 | 191 | |
4bc277ac PM |
192 | void __init allocate_pgdat(unsigned int nid) |
193 | { | |
194 | unsigned long start_pfn, end_pfn; | |
195 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
196 | unsigned long phys; | |
197 | #endif | |
198 | ||
199 | get_pfn_range_for_nid(nid, &start_pfn, &end_pfn); | |
200 | ||
201 | #ifdef CONFIG_NEED_MULTIPLE_NODES | |
95f72d1e | 202 | phys = __memblock_alloc_base(sizeof(struct pglist_data), |
4bc277ac PM |
203 | SMP_CACHE_BYTES, end_pfn << PAGE_SHIFT); |
204 | /* Retry with all of system memory */ | |
205 | if (!phys) | |
95f72d1e YL |
206 | phys = __memblock_alloc_base(sizeof(struct pglist_data), |
207 | SMP_CACHE_BYTES, memblock_end_of_DRAM()); | |
4bc277ac PM |
208 | if (!phys) |
209 | panic("Can't allocate pgdat for node %d\n", nid); | |
210 | ||
211 | NODE_DATA(nid) = __va(phys); | |
212 | memset(NODE_DATA(nid), 0, sizeof(struct pglist_data)); | |
4bc277ac PM |
213 | #endif |
214 | ||
215 | NODE_DATA(nid)->node_start_pfn = start_pfn; | |
216 | NODE_DATA(nid)->node_spanned_pages = end_pfn - start_pfn; | |
217 | } | |
218 | ||
4bc277ac PM |
219 | static void __init do_init_bootmem(void) |
220 | { | |
64106ca6 | 221 | struct memblock_region *reg; |
4bc277ac PM |
222 | |
223 | /* Add active regions with valid PFNs. */ | |
64106ca6 | 224 | for_each_memblock(memory, reg) { |
4bc277ac | 225 | unsigned long start_pfn, end_pfn; |
c7fc2de0 YL |
226 | start_pfn = memblock_region_memory_base_pfn(reg); |
227 | end_pfn = memblock_region_memory_end_pfn(reg); | |
4bc277ac PM |
228 | __add_active_range(0, start_pfn, end_pfn); |
229 | } | |
230 | ||
231 | /* All of system RAM sits in node 0 for the non-NUMA case */ | |
232 | allocate_pgdat(0); | |
233 | node_set_online(0); | |
234 | ||
235 | plat_mem_setup(); | |
236 | ||
ac21fc2d RH |
237 | for_each_memblock(memory, reg) { |
238 | int nid = memblock_get_region_node(reg); | |
4bc277ac | 239 | |
ac21fc2d RH |
240 | memory_present(nid, memblock_region_memory_base_pfn(reg), |
241 | memblock_region_memory_end_pfn(reg)); | |
242 | } | |
4bc277ac PM |
243 | sparse_init(); |
244 | } | |
245 | ||
246 | static void __init early_reserve_mem(void) | |
247 | { | |
248 | unsigned long start_pfn; | |
e66ac3f2 SH |
249 | u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET; |
250 | u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET; | |
4bc277ac PM |
251 | |
252 | /* | |
253 | * Partially used pages are not usable - thus | |
254 | * we are rounding upwards: | |
255 | */ | |
256 | start_pfn = PFN_UP(__pa(_end)); | |
257 | ||
258 | /* | |
259 | * Reserve the kernel text and Reserve the bootmem bitmap. We do | |
260 | * this in two steps (first step was init_bootmem()), because | |
261 | * this catches the (definitely buggy) case of us accidentally | |
262 | * initializing the bootmem allocator with an invalid RAM area. | |
263 | */ | |
e66ac3f2 | 264 | memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start); |
4bc277ac PM |
265 | |
266 | /* | |
267 | * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET. | |
268 | */ | |
269 | if (CONFIG_ZERO_PAGE_OFFSET != 0) | |
e66ac3f2 | 270 | memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET); |
4bc277ac PM |
271 | |
272 | /* | |
273 | * Handle additional early reservations | |
274 | */ | |
275 | check_for_initrd(); | |
276 | reserve_crashkernel(); | |
277 | } | |
278 | ||
1da177e4 LT |
279 | void __init paging_init(void) |
280 | { | |
2de212eb | 281 | unsigned long max_zone_pfns[MAX_NR_ZONES]; |
0906a3ad | 282 | unsigned long vaddr, end; |
1da177e4 | 283 | |
4bc277ac PM |
284 | sh_mv.mv_mem_init(); |
285 | ||
286 | early_reserve_mem(); | |
287 | ||
2f599861 PM |
288 | /* |
289 | * Once the early reservations are out of the way, give the | |
290 | * platforms a chance to kick out some memory. | |
291 | */ | |
292 | if (sh_mv.mv_mem_reserve) | |
293 | sh_mv.mv_mem_reserve(); | |
294 | ||
95f72d1e | 295 | memblock_enforce_memory_limit(memory_limit); |
1aadc056 | 296 | memblock_allow_resize(); |
4bc277ac | 297 | |
95f72d1e | 298 | memblock_dump_all(); |
4bc277ac PM |
299 | |
300 | /* | |
301 | * Determine low and high memory ranges: | |
302 | */ | |
95f72d1e | 303 | max_low_pfn = max_pfn = memblock_end_of_DRAM() >> PAGE_SHIFT; |
4bc277ac PM |
304 | min_low_pfn = __MEMORY_START >> PAGE_SHIFT; |
305 | ||
306 | nodes_clear(node_online_map); | |
307 | ||
308 | memory_start = (unsigned long)__va(__MEMORY_START); | |
95f72d1e | 309 | memory_end = memory_start + (memory_limit ?: memblock_phys_mem_size()); |
4bc277ac PM |
310 | |
311 | uncached_init(); | |
312 | pmb_init(); | |
313 | do_init_bootmem(); | |
314 | ioremap_fixed_init(); | |
315 | ||
01066625 PM |
316 | /* We don't need to map the kernel through the TLB, as |
317 | * it is permanatly mapped using P1. So clear the | |
318 | * entire pgd. */ | |
319 | memset(swapper_pg_dir, 0, sizeof(swapper_pg_dir)); | |
1da177e4 | 320 | |
6e4662ff SM |
321 | /* Set an initial value for the MMU.TTB so we don't have to |
322 | * check for a null value. */ | |
323 | set_TTB(swapper_pg_dir); | |
324 | ||
acca4f4d PM |
325 | /* |
326 | * Populate the relevant portions of swapper_pg_dir so that | |
2adb4e10 | 327 | * we can use the fixmap entries without calling kmalloc. |
acca4f4d PM |
328 | * pte's will be filled in by __set_fixmap(). |
329 | */ | |
330 | vaddr = __fix_to_virt(__end_of_fixed_addresses - 1) & PMD_MASK; | |
0906a3ad PM |
331 | end = (FIXADDR_TOP + PMD_SIZE - 1) & PMD_MASK; |
332 | page_table_range_init(vaddr, end, swapper_pg_dir); | |
acca4f4d PM |
333 | |
334 | kmap_coherent_init(); | |
2adb4e10 | 335 | |
2de212eb | 336 | memset(max_zone_pfns, 0, sizeof(max_zone_pfns)); |
ac21fc2d | 337 | max_zone_pfns[ZONE_NORMAL] = max_low_pfn; |
2de212eb | 338 | free_area_init_nodes(max_zone_pfns); |
1da177e4 LT |
339 | } |
340 | ||
d9b9487a PM |
341 | unsigned int mem_init_done = 0; |
342 | ||
1da177e4 LT |
343 | void __init mem_init(void) |
344 | { | |
da61efcf | 345 | pg_data_t *pgdat; |
1da177e4 | 346 | |
2de212eb | 347 | high_memory = NULL; |
e3a466b2 JL |
348 | for_each_online_pgdat(pgdat) |
349 | high_memory = max_t(void *, high_memory, | |
350 | __va(pgdat_end_pfn(pgdat) << PAGE_SHIFT)); | |
2de212eb | 351 | |
c6ffc5ca | 352 | memblock_free_all(); |
1da177e4 | 353 | |
37443ef3 PM |
354 | /* Set this up early, so we can take care of the zero page */ |
355 | cpu_cache_init(); | |
356 | ||
1da177e4 LT |
357 | /* clear the zero-page */ |
358 | memset(empty_zero_page, 0, PAGE_SIZE); | |
359 | __flush_wback_region(empty_zero_page, PAGE_SIZE); | |
360 | ||
35f99c0d PM |
361 | vsyscall_init(); |
362 | ||
da61efcf JL |
363 | mem_init_print_info(NULL); |
364 | pr_info("virtual kernel memory layout:\n" | |
35f99c0d PM |
365 | " fixmap : 0x%08lx - 0x%08lx (%4ld kB)\n" |
366 | #ifdef CONFIG_HIGHMEM | |
367 | " pkmap : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
368 | #endif | |
369 | " vmalloc : 0x%08lx - 0x%08lx (%4ld MB)\n" | |
3125ee72 | 370 | " lowmem : 0x%08lx - 0x%08lx (%4ld MB) (cached)\n" |
b0f3ae03 | 371 | #ifdef CONFIG_UNCACHED_MAPPING |
3125ee72 | 372 | " : 0x%08lx - 0x%08lx (%4ld MB) (uncached)\n" |
b0f3ae03 | 373 | #endif |
35f99c0d PM |
374 | " .init : 0x%08lx - 0x%08lx (%4ld kB)\n" |
375 | " .data : 0x%08lx - 0x%08lx (%4ld kB)\n" | |
376 | " .text : 0x%08lx - 0x%08lx (%4ld kB)\n", | |
377 | FIXADDR_START, FIXADDR_TOP, | |
378 | (FIXADDR_TOP - FIXADDR_START) >> 10, | |
379 | ||
380 | #ifdef CONFIG_HIGHMEM | |
381 | PKMAP_BASE, PKMAP_BASE+LAST_PKMAP*PAGE_SIZE, | |
382 | (LAST_PKMAP*PAGE_SIZE) >> 10, | |
383 | #endif | |
384 | ||
385 | (unsigned long)VMALLOC_START, VMALLOC_END, | |
386 | (VMALLOC_END - VMALLOC_START) >> 20, | |
387 | ||
388 | (unsigned long)memory_start, (unsigned long)high_memory, | |
389 | ((unsigned long)high_memory - (unsigned long)memory_start) >> 20, | |
390 | ||
b0f3ae03 | 391 | #ifdef CONFIG_UNCACHED_MAPPING |
9edef286 | 392 | uncached_start, uncached_end, uncached_size >> 20, |
b0f3ae03 | 393 | #endif |
3125ee72 | 394 | |
35f99c0d PM |
395 | (unsigned long)&__init_begin, (unsigned long)&__init_end, |
396 | ((unsigned long)&__init_end - | |
397 | (unsigned long)&__init_begin) >> 10, | |
398 | ||
399 | (unsigned long)&_etext, (unsigned long)&_edata, | |
400 | ((unsigned long)&_edata - (unsigned long)&_etext) >> 10, | |
401 | ||
402 | (unsigned long)&_text, (unsigned long)&_etext, | |
403 | ((unsigned long)&_etext - (unsigned long)&_text) >> 10); | |
d9b9487a PM |
404 | |
405 | mem_init_done = 1; | |
1da177e4 LT |
406 | } |
407 | ||
408 | void free_initmem(void) | |
409 | { | |
dbe67df4 | 410 | free_initmem_default(-1); |
1da177e4 LT |
411 | } |
412 | ||
413 | #ifdef CONFIG_BLK_DEV_INITRD | |
414 | void free_initrd_mem(unsigned long start, unsigned long end) | |
415 | { | |
dbe67df4 | 416 | free_reserved_area((void *)start, (void *)end, -1, "initrd"); |
1da177e4 LT |
417 | } |
418 | #endif | |
33d63bd8 PM |
419 | |
420 | #ifdef CONFIG_MEMORY_HOTPLUG | |
24e6d5a5 CH |
421 | int arch_add_memory(int nid, u64 start, u64 size, struct vmem_altmap *altmap, |
422 | bool want_memblock) | |
33d63bd8 | 423 | { |
81cf09ed | 424 | unsigned long start_pfn = PFN_DOWN(start); |
33d63bd8 PM |
425 | unsigned long nr_pages = size >> PAGE_SHIFT; |
426 | int ret; | |
427 | ||
33d63bd8 | 428 | /* We only have ZONE_NORMAL, so this is easy.. */ |
24e6d5a5 | 429 | ret = __add_pages(nid, start_pfn, nr_pages, altmap, want_memblock); |
33d63bd8 | 430 | if (unlikely(ret)) |
866e6b9e | 431 | printk("%s: Failed, __add_pages() == %d\n", __func__, ret); |
33d63bd8 PM |
432 | |
433 | return ret; | |
434 | } | |
33d63bd8 | 435 | |
357d5946 | 436 | #ifdef CONFIG_NUMA |
33d63bd8 PM |
437 | int memory_add_physaddr_to_nid(u64 addr) |
438 | { | |
439 | /* Node 0 for now.. */ | |
440 | return 0; | |
441 | } | |
442 | EXPORT_SYMBOL_GPL(memory_add_physaddr_to_nid); | |
443 | #endif | |
1f69b6af | 444 | |
24d335ca | 445 | #ifdef CONFIG_MEMORY_HOTREMOVE |
da024512 | 446 | int arch_remove_memory(u64 start, u64 size, struct vmem_altmap *altmap) |
24d335ca | 447 | { |
81cf09ed | 448 | unsigned long start_pfn = PFN_DOWN(start); |
24d335ca WC |
449 | unsigned long nr_pages = size >> PAGE_SHIFT; |
450 | struct zone *zone; | |
451 | int ret; | |
452 | ||
453 | zone = page_zone(pfn_to_page(start_pfn)); | |
da024512 | 454 | ret = __remove_pages(zone, start_pfn, nr_pages, altmap); |
24d335ca WC |
455 | if (unlikely(ret)) |
456 | pr_warn("%s: Failed, __remove_pages() == %d\n", __func__, | |
457 | ret); | |
458 | ||
459 | return ret; | |
460 | } | |
461 | #endif | |
3159e7d6 | 462 | #endif /* CONFIG_MEMORY_HOTPLUG */ |