Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
1da177e4 | 2 | /* |
f30c2269 | 3 | * linux/arch/m68k/mm/motorola.c |
1da177e4 LT |
4 | * |
5 | * Routines specific to the Motorola MMU, originally from: | |
6 | * linux/arch/m68k/init.c | |
7 | * which are Copyright (C) 1995 Hamish Macdonald | |
8 | * | |
9 | * Moved 8/20/1999 Sam Creasey | |
10 | */ | |
11 | ||
1da177e4 LT |
12 | #include <linux/module.h> |
13 | #include <linux/signal.h> | |
14 | #include <linux/sched.h> | |
15 | #include <linux/mm.h> | |
16 | #include <linux/swap.h> | |
17 | #include <linux/kernel.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/types.h> | |
20 | #include <linux/init.h> | |
1008a115 | 21 | #include <linux/memblock.h> |
5a0e3ad6 | 22 | #include <linux/gfp.h> |
1da177e4 LT |
23 | |
24 | #include <asm/setup.h> | |
7c0f6ba6 | 25 | #include <linux/uaccess.h> |
1da177e4 LT |
26 | #include <asm/page.h> |
27 | #include <asm/pgalloc.h> | |
1da177e4 LT |
28 | #include <asm/machdep.h> |
29 | #include <asm/io.h> | |
1da177e4 LT |
30 | #ifdef CONFIG_ATARI |
31 | #include <asm/atari_stram.h> | |
32 | #endif | |
c85627fb | 33 | #include <asm/sections.h> |
1da177e4 LT |
34 | |
35 | #undef DEBUG | |
36 | ||
37 | #ifndef mm_cachebits | |
38 | /* | |
39 | * Bits to add to page descriptors for "normal" caching mode. | |
40 | * For 68020/030 this is 0. | |
41 | * For 68040, this is _PAGE_CACHE040 (cachable, copyback) | |
42 | */ | |
43 | unsigned long mm_cachebits; | |
44 | EXPORT_SYMBOL(mm_cachebits); | |
45 | #endif | |
46 | ||
1bcdc68d MR |
47 | /* Prior to calling these routines, the page should have been flushed |
48 | * from both the cache and ATC, or the CPU might not notice that the | |
49 | * cache setting for the page has been changed. -jskov | |
50 | */ | |
51 | static inline void nocache_page(void *vaddr) | |
52 | { | |
53 | unsigned long addr = (unsigned long)vaddr; | |
54 | ||
55 | if (CPU_IS_040_OR_060) { | |
e05c7b1f | 56 | pte_t *ptep = virt_to_kpte(addr); |
1bcdc68d | 57 | |
1bcdc68d MR |
58 | *ptep = pte_mknocache(*ptep); |
59 | } | |
60 | } | |
61 | ||
62 | static inline void cache_page(void *vaddr) | |
63 | { | |
64 | unsigned long addr = (unsigned long)vaddr; | |
65 | ||
66 | if (CPU_IS_040_OR_060) { | |
e05c7b1f | 67 | pte_t *ptep = virt_to_kpte(addr); |
1bcdc68d | 68 | |
1bcdc68d MR |
69 | *ptep = pte_mkcache(*ptep); |
70 | } | |
71 | } | |
13076a29 PZ |
72 | |
73 | /* | |
74 | * Motorola 680x0 user's manual recommends using uncached memory for address | |
75 | * translation tables. | |
76 | * | |
77 | * Seeing how the MMU can be external on (some of) these chips, that seems like | |
78 | * a very important recommendation to follow. Provide some helpers to combat | |
79 | * 'variation' amongst the users of this. | |
80 | */ | |
81 | ||
82 | void mmu_page_ctor(void *page) | |
83 | { | |
5553b15a | 84 | __flush_pages_to_ram(page, 1); |
13076a29 PZ |
85 | flush_tlb_kernel_page(page); |
86 | nocache_page(page); | |
87 | } | |
88 | ||
89 | void mmu_page_dtor(void *page) | |
90 | { | |
91 | cache_page(page); | |
92 | } | |
93 | ||
5ad272ab PZ |
94 | /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from |
95 | struct page instead of separately kmalloced struct. Stolen from | |
96 | arch/sparc/mm/srmmu.c ... */ | |
97 | ||
98 | typedef struct list_head ptable_desc; | |
0e071ee6 PZ |
99 | |
100 | static struct list_head ptable_list[2] = { | |
101 | LIST_HEAD_INIT(ptable_list[0]), | |
102 | LIST_HEAD_INIT(ptable_list[1]), | |
103 | }; | |
5ad272ab | 104 | |
8f246087 | 105 | #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page((void *)(page))->lru)) |
5ad272ab | 106 | #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru)) |
0e071ee6 PZ |
107 | #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index) |
108 | ||
109 | static const int ptable_shift[2] = { | |
110 | 7+2, /* PGD, PMD */ | |
111 | 6+2, /* PTE */ | |
112 | }; | |
5ad272ab | 113 | |
0e071ee6 PZ |
114 | #define ptable_size(type) (1U << ptable_shift[type]) |
115 | #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1) | |
5ad272ab | 116 | |
0e071ee6 | 117 | void __init init_pointer_table(void *table, int type) |
5ad272ab PZ |
118 | { |
119 | ptable_desc *dp; | |
0e071ee6 | 120 | unsigned long ptable = (unsigned long)table; |
5ad272ab | 121 | unsigned long page = ptable & PAGE_MASK; |
0e071ee6 | 122 | unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); |
5ad272ab PZ |
123 | |
124 | dp = PD_PTABLE(page); | |
125 | if (!(PD_MARKBITS(dp) & mask)) { | |
0e071ee6 PZ |
126 | PD_MARKBITS(dp) = ptable_mask(type); |
127 | list_add(dp, &ptable_list[type]); | |
5ad272ab PZ |
128 | } |
129 | ||
130 | PD_MARKBITS(dp) &= ~mask; | |
131 | pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp)); | |
132 | ||
133 | /* unreserve the page so it's possible to free that page */ | |
134 | __ClearPageReserved(PD_PAGE(dp)); | |
135 | init_page_count(PD_PAGE(dp)); | |
136 | ||
137 | return; | |
138 | } | |
139 | ||
0e071ee6 | 140 | void *get_pointer_table(int type) |
5ad272ab | 141 | { |
0e071ee6 PZ |
142 | ptable_desc *dp = ptable_list[type].next; |
143 | unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp); | |
144 | unsigned int tmp, off; | |
5ad272ab PZ |
145 | |
146 | /* | |
147 | * For a pointer table for a user process address space, a | |
148 | * table is taken from a page allocated for the purpose. Each | |
149 | * page can hold 8 pointer tables. The page is remapped in | |
150 | * virtual address space to be noncacheable. | |
151 | */ | |
152 | if (mask == 0) { | |
153 | void *page; | |
154 | ptable_desc *new; | |
155 | ||
156 | if (!(page = (void *)get_zeroed_page(GFP_KERNEL))) | |
157 | return NULL; | |
158 | ||
0e071ee6 PZ |
159 | if (type == TABLE_PTE) { |
160 | /* | |
161 | * m68k doesn't have SPLIT_PTE_PTLOCKS for not having | |
162 | * SMP. | |
163 | */ | |
bff28e6b | 164 | pagetable_pte_ctor(virt_to_ptdesc(page)); |
0e071ee6 PZ |
165 | } |
166 | ||
5ad272ab PZ |
167 | mmu_page_ctor(page); |
168 | ||
169 | new = PD_PTABLE(page); | |
0e071ee6 | 170 | PD_MARKBITS(new) = ptable_mask(type) - 1; |
5ad272ab PZ |
171 | list_add_tail(new, dp); |
172 | ||
173 | return (pmd_t *)page; | |
174 | } | |
175 | ||
0e071ee6 | 176 | for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type)) |
5ad272ab PZ |
177 | ; |
178 | PD_MARKBITS(dp) = mask & ~tmp; | |
179 | if (!PD_MARKBITS(dp)) { | |
180 | /* move to end of list */ | |
0e071ee6 | 181 | list_move_tail(dp, &ptable_list[type]); |
5ad272ab | 182 | } |
0e071ee6 | 183 | return page_address(PD_PAGE(dp)) + off; |
5ad272ab PZ |
184 | } |
185 | ||
0e071ee6 | 186 | int free_pointer_table(void *table, int type) |
5ad272ab PZ |
187 | { |
188 | ptable_desc *dp; | |
0e071ee6 PZ |
189 | unsigned long ptable = (unsigned long)table; |
190 | unsigned long page = ptable & PAGE_MASK; | |
191 | unsigned int mask = 1U << ((ptable - page)/ptable_size(type)); | |
5ad272ab PZ |
192 | |
193 | dp = PD_PTABLE(page); | |
194 | if (PD_MARKBITS (dp) & mask) | |
195 | panic ("table already free!"); | |
196 | ||
197 | PD_MARKBITS (dp) |= mask; | |
198 | ||
0e071ee6 | 199 | if (PD_MARKBITS(dp) == ptable_mask(type)) { |
5ad272ab PZ |
200 | /* all tables in page are free, free page */ |
201 | list_del(dp); | |
202 | mmu_page_dtor((void *)page); | |
0e071ee6 | 203 | if (type == TABLE_PTE) |
bff28e6b | 204 | pagetable_pte_dtor(virt_to_ptdesc((void *)page)); |
5ad272ab PZ |
205 | free_page (page); |
206 | return 1; | |
0e071ee6 | 207 | } else if (ptable_list[type].next != dp) { |
5ad272ab PZ |
208 | /* |
209 | * move this descriptor to the front of the list, since | |
210 | * it has one or more free tables. | |
211 | */ | |
0e071ee6 | 212 | list_move(dp, &ptable_list[type]); |
5ad272ab PZ |
213 | } |
214 | return 0; | |
215 | } | |
216 | ||
12d810c1 | 217 | /* size of memory already mapped in head.S */ |
486df8bc | 218 | extern __initdata unsigned long m68k_init_mapped_size; |
12d810c1 RZ |
219 | |
220 | extern unsigned long availmem; | |
221 | ||
ef9285f6 PZ |
222 | static pte_t *last_pte_table __initdata = NULL; |
223 | ||
1da177e4 LT |
224 | static pte_t * __init kernel_page_table(void) |
225 | { | |
ef9285f6 | 226 | pte_t *pte_table = last_pte_table; |
1da177e4 | 227 | |
41f1bf37 | 228 | if (PAGE_ALIGNED(last_pte_table)) { |
7e158826 | 229 | pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
ef9285f6 PZ |
230 | if (!pte_table) { |
231 | panic("%s: Failed to allocate %lu bytes align=%lx\n", | |
232 | __func__, PAGE_SIZE, PAGE_SIZE); | |
233 | } | |
1da177e4 | 234 | |
ef9285f6 PZ |
235 | clear_page(pte_table); |
236 | mmu_page_ctor(pte_table); | |
1da177e4 | 237 | |
ef9285f6 PZ |
238 | last_pte_table = pte_table; |
239 | } | |
240 | ||
241 | last_pte_table += PTRS_PER_PTE; | |
242 | ||
243 | return pte_table; | |
1da177e4 LT |
244 | } |
245 | ||
ef9285f6 | 246 | static pmd_t *last_pmd_table __initdata = NULL; |
1da177e4 LT |
247 | |
248 | static pmd_t * __init kernel_ptr_table(void) | |
249 | { | |
ef9285f6 | 250 | if (!last_pmd_table) { |
1da177e4 LT |
251 | unsigned long pmd, last; |
252 | int i; | |
253 | ||
254 | /* Find the last ptr table that was used in head.S and | |
255 | * reuse the remaining space in that page for further | |
256 | * ptr tables. | |
257 | */ | |
258 | last = (unsigned long)kernel_pg_dir; | |
259 | for (i = 0; i < PTRS_PER_PGD; i++) { | |
60e50f34 MR |
260 | pud_t *pud = (pud_t *)(&kernel_pg_dir[i]); |
261 | ||
262 | if (!pud_present(*pud)) | |
1da177e4 | 263 | continue; |
60e50f34 | 264 | pmd = pgd_page_vaddr(kernel_pg_dir[i]); |
1da177e4 LT |
265 | if (pmd > last) |
266 | last = pmd; | |
267 | } | |
268 | ||
ef9285f6 | 269 | last_pmd_table = (pmd_t *)last; |
1da177e4 | 270 | #ifdef DEBUG |
ef9285f6 | 271 | printk("kernel_ptr_init: %p\n", last_pmd_table); |
1da177e4 LT |
272 | #endif |
273 | } | |
274 | ||
ef9285f6 | 275 | last_pmd_table += PTRS_PER_PMD; |
41f1bf37 | 276 | if (PAGE_ALIGNED(last_pmd_table)) { |
7e158826 | 277 | last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE); |
ef9285f6 | 278 | if (!last_pmd_table) |
8a7f97b9 MR |
279 | panic("%s: Failed to allocate %lu bytes align=%lx\n", |
280 | __func__, PAGE_SIZE, PAGE_SIZE); | |
1da177e4 | 281 | |
ef9285f6 PZ |
282 | clear_page(last_pmd_table); |
283 | mmu_page_ctor(last_pmd_table); | |
1da177e4 LT |
284 | } |
285 | ||
ef9285f6 | 286 | return last_pmd_table; |
1da177e4 LT |
287 | } |
288 | ||
12d810c1 | 289 | static void __init map_node(int node) |
1da177e4 | 290 | { |
12d810c1 | 291 | unsigned long physaddr, virtaddr, size; |
1da177e4 | 292 | pgd_t *pgd_dir; |
60e50f34 MR |
293 | p4d_t *p4d_dir; |
294 | pud_t *pud_dir; | |
1da177e4 LT |
295 | pmd_t *pmd_dir; |
296 | pte_t *pte_dir; | |
297 | ||
12d810c1 RZ |
298 | size = m68k_memory[node].size; |
299 | physaddr = m68k_memory[node].addr; | |
300 | virtaddr = (unsigned long)phys_to_virt(physaddr); | |
301 | physaddr |= m68k_supervisor_cachemode | | |
302 | _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY; | |
1da177e4 LT |
303 | if (CPU_IS_040_OR_060) |
304 | physaddr |= _PAGE_GLOBAL040; | |
305 | ||
306 | while (size > 0) { | |
307 | #ifdef DEBUG | |
ef22d8ab | 308 | if (!(virtaddr & (PMD_SIZE-1))) |
1da177e4 LT |
309 | printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK, |
310 | virtaddr); | |
311 | #endif | |
312 | pgd_dir = pgd_offset_k(virtaddr); | |
313 | if (virtaddr && CPU_IS_020_OR_030) { | |
ef22d8ab PZ |
314 | if (!(virtaddr & (PGDIR_SIZE-1)) && |
315 | size >= PGDIR_SIZE) { | |
1da177e4 LT |
316 | #ifdef DEBUG |
317 | printk ("[very early term]"); | |
318 | #endif | |
319 | pgd_val(*pgd_dir) = physaddr; | |
ef22d8ab PZ |
320 | size -= PGDIR_SIZE; |
321 | virtaddr += PGDIR_SIZE; | |
322 | physaddr += PGDIR_SIZE; | |
1da177e4 LT |
323 | continue; |
324 | } | |
325 | } | |
60e50f34 MR |
326 | p4d_dir = p4d_offset(pgd_dir, virtaddr); |
327 | pud_dir = pud_offset(p4d_dir, virtaddr); | |
328 | if (!pud_present(*pud_dir)) { | |
1da177e4 LT |
329 | pmd_dir = kernel_ptr_table(); |
330 | #ifdef DEBUG | |
331 | printk ("[new pointer %p]", pmd_dir); | |
332 | #endif | |
60e50f34 | 333 | pud_set(pud_dir, pmd_dir); |
1da177e4 | 334 | } else |
60e50f34 | 335 | pmd_dir = pmd_offset(pud_dir, virtaddr); |
1da177e4 LT |
336 | |
337 | if (CPU_IS_020_OR_030) { | |
338 | if (virtaddr) { | |
339 | #ifdef DEBUG | |
340 | printk ("[early term]"); | |
341 | #endif | |
ef22d8ab PZ |
342 | pmd_val(*pmd_dir) = physaddr; |
343 | physaddr += PMD_SIZE; | |
1da177e4 LT |
344 | } else { |
345 | int i; | |
346 | #ifdef DEBUG | |
347 | printk ("[zero map]"); | |
348 | #endif | |
ef9285f6 | 349 | pte_dir = kernel_page_table(); |
ef22d8ab PZ |
350 | pmd_set(pmd_dir, pte_dir); |
351 | ||
1da177e4 LT |
352 | pte_val(*pte_dir++) = 0; |
353 | physaddr += PAGE_SIZE; | |
ef22d8ab | 354 | for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++) |
1da177e4 LT |
355 | pte_val(*pte_dir++) = physaddr; |
356 | } | |
ef22d8ab PZ |
357 | size -= PMD_SIZE; |
358 | virtaddr += PMD_SIZE; | |
1da177e4 LT |
359 | } else { |
360 | if (!pmd_present(*pmd_dir)) { | |
361 | #ifdef DEBUG | |
362 | printk ("[new table]"); | |
363 | #endif | |
364 | pte_dir = kernel_page_table(); | |
365 | pmd_set(pmd_dir, pte_dir); | |
366 | } | |
367 | pte_dir = pte_offset_kernel(pmd_dir, virtaddr); | |
368 | ||
369 | if (virtaddr) { | |
370 | if (!pte_present(*pte_dir)) | |
371 | pte_val(*pte_dir) = physaddr; | |
372 | } else | |
373 | pte_val(*pte_dir) = 0; | |
374 | size -= PAGE_SIZE; | |
375 | virtaddr += PAGE_SIZE; | |
376 | physaddr += PAGE_SIZE; | |
377 | } | |
378 | ||
379 | } | |
380 | #ifdef DEBUG | |
381 | printk("\n"); | |
382 | #endif | |
1da177e4 LT |
383 | } |
384 | ||
6d0b9225 AK |
385 | /* |
386 | * Alternate definitions that are compile time constants, for | |
387 | * initializing protection_map. The cachebits are fixed later. | |
388 | */ | |
389 | #define PAGE_NONE_C __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED) | |
390 | #define PAGE_SHARED_C __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED) | |
391 | #define PAGE_COPY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) | |
392 | #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED) | |
393 | ||
394 | static pgprot_t protection_map[16] __ro_after_init = { | |
395 | [VM_NONE] = PAGE_NONE_C, | |
396 | [VM_READ] = PAGE_READONLY_C, | |
397 | [VM_WRITE] = PAGE_COPY_C, | |
398 | [VM_WRITE | VM_READ] = PAGE_COPY_C, | |
399 | [VM_EXEC] = PAGE_READONLY_C, | |
400 | [VM_EXEC | VM_READ] = PAGE_READONLY_C, | |
401 | [VM_EXEC | VM_WRITE] = PAGE_COPY_C, | |
402 | [VM_EXEC | VM_WRITE | VM_READ] = PAGE_COPY_C, | |
403 | [VM_SHARED] = PAGE_NONE_C, | |
404 | [VM_SHARED | VM_READ] = PAGE_READONLY_C, | |
405 | [VM_SHARED | VM_WRITE] = PAGE_SHARED_C, | |
406 | [VM_SHARED | VM_WRITE | VM_READ] = PAGE_SHARED_C, | |
407 | [VM_SHARED | VM_EXEC] = PAGE_READONLY_C, | |
408 | [VM_SHARED | VM_EXEC | VM_READ] = PAGE_READONLY_C, | |
409 | [VM_SHARED | VM_EXEC | VM_WRITE] = PAGE_SHARED_C, | |
410 | [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ] = PAGE_SHARED_C | |
411 | }; | |
412 | DECLARE_VM_GET_PAGE_PROT | |
413 | ||
1da177e4 LT |
414 | /* |
415 | * paging_init() continues the virtual memory environment setup which | |
416 | * was begun by the code in arch/head.S. | |
417 | */ | |
418 | void __init paging_init(void) | |
419 | { | |
5d2ee1a1 | 420 | unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, }; |
12d810c1 | 421 | unsigned long min_addr, max_addr; |
1008a115 | 422 | unsigned long addr; |
12d810c1 | 423 | int i; |
1da177e4 LT |
424 | |
425 | #ifdef DEBUG | |
fb425d0b | 426 | printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem); |
1da177e4 LT |
427 | #endif |
428 | ||
429 | /* Fix the cache mode in the page descriptors for the 680[46]0. */ | |
430 | if (CPU_IS_040_OR_060) { | |
431 | int i; | |
432 | #ifndef mm_cachebits | |
433 | mm_cachebits = _PAGE_CACHE040; | |
434 | #endif | |
435 | for (i = 0; i < 16; i++) | |
436 | pgprot_val(protection_map[i]) |= _PAGE_CACHE040; | |
437 | } | |
438 | ||
12d810c1 | 439 | min_addr = m68k_memory[0].addr; |
0d9fad91 | 440 | max_addr = min_addr + m68k_memory[0].size - 1; |
952eea9b DH |
441 | memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0, |
442 | MEMBLOCK_NONE); | |
12d810c1 RZ |
443 | for (i = 1; i < m68k_num_memory;) { |
444 | if (m68k_memory[i].addr < min_addr) { | |
445 | printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n", | |
446 | m68k_memory[i].addr, m68k_memory[i].size); | |
447 | printk("Fix your bootloader or use a memfile to make use of this area!\n"); | |
448 | m68k_num_memory--; | |
449 | memmove(m68k_memory + i, m68k_memory + i + 1, | |
79930084 | 450 | (m68k_num_memory - i) * sizeof(struct m68k_mem_info)); |
12d810c1 RZ |
451 | continue; |
452 | } | |
952eea9b DH |
453 | memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i, |
454 | MEMBLOCK_NONE); | |
0d9fad91 | 455 | addr = m68k_memory[i].addr + m68k_memory[i].size - 1; |
12d810c1 RZ |
456 | if (addr > max_addr) |
457 | max_addr = addr; | |
458 | i++; | |
459 | } | |
460 | m68k_memoffset = min_addr - PAGE_OFFSET; | |
0d9fad91 | 461 | m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6; |
12d810c1 | 462 | |
fbe9c961 RZ |
463 | module_fixup(NULL, __start_fixup, __stop_fixup); |
464 | flush_icache(); | |
465 | ||
0d9fad91 | 466 | high_memory = phys_to_virt(max_addr) + 1; |
12d810c1 RZ |
467 | |
468 | min_low_pfn = availmem >> PAGE_SHIFT; | |
0d9fad91 | 469 | max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1; |
12d810c1 | 470 | |
1008a115 MR |
471 | /* Reserve kernel text/data/bss and the memory allocated in head.S */ |
472 | memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr); | |
12d810c1 | 473 | |
1da177e4 LT |
474 | /* |
475 | * Map the physical memory available into the kernel virtual | |
1008a115 MR |
476 | * address space. Make sure memblock will not try to allocate |
477 | * pages beyond the memory we already mapped in head.S | |
1da177e4 | 478 | */ |
1008a115 MR |
479 | memblock_set_bottom_up(true); |
480 | ||
481 | for (i = 0; i < m68k_num_memory; i++) { | |
482 | m68k_setup_node(i); | |
12d810c1 | 483 | map_node(i); |
1008a115 | 484 | } |
1da177e4 LT |
485 | |
486 | flush_tlb_all(); | |
1da177e4 | 487 | |
376e3fde FT |
488 | early_memtest(min_addr, max_addr); |
489 | ||
1da177e4 LT |
490 | /* |
491 | * initialize the bad page table and bad page to point | |
492 | * to a couple of allocated pages | |
493 | */ | |
15c3c114 | 494 | empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE); |
8a7f97b9 MR |
495 | if (!empty_zero_page) |
496 | panic("%s: Failed to allocate %lu bytes align=0x%lx\n", | |
497 | __func__, PAGE_SIZE, PAGE_SIZE); | |
1da177e4 LT |
498 | |
499 | /* | |
500 | * Set up SFC/DFC registers | |
501 | */ | |
9fde0348 | 502 | set_fc(USER_DATA); |
1da177e4 LT |
503 | |
504 | #ifdef DEBUG | |
505 | printk ("before free_area_init\n"); | |
506 | #endif | |
5d2ee1a1 | 507 | for (i = 0; i < m68k_num_memory; i++) |
4aac0b48 MS |
508 | if (node_present_pages(i)) |
509 | node_set_state(i, N_NORMAL_MEMORY); | |
5d2ee1a1 MR |
510 | |
511 | max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM(); | |
512 | free_area_init(max_zone_pfn); | |
1da177e4 | 513 | } |