1 // SPDX-License-Identifier: GPL-2.0
3 * linux/arch/m68k/mm/motorola.c
5 * Routines specific to the Motorola MMU, originally from:
6 * linux/arch/m68k/init.c
7 * which are Copyright (C) 1995 Hamish Macdonald
9 * Moved 8/20/1999 Sam Creasey
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
16 #include <linux/swap.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
24 #include <asm/setup.h>
25 #include <linux/uaccess.h>
27 #include <asm/pgalloc.h>
28 #include <asm/machdep.h>
32 #include <asm/atari_stram.h>
34 #include <asm/sections.h>
40 * Bits to add to page descriptors for "normal" caching mode.
41 * For 68020/030 this is 0.
42 * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
44 unsigned long mm_cachebits;
45 EXPORT_SYMBOL(mm_cachebits);
48 /* Prior to calling these routines, the page should have been flushed
49 * from both the cache and ATC, or the CPU might not notice that the
50 * cache setting for the page has been changed. -jskov
52 static inline void nocache_page(void *vaddr)
54 unsigned long addr = (unsigned long)vaddr;
56 if (CPU_IS_040_OR_060) {
63 dir = pgd_offset_k(addr);
64 p4dp = p4d_offset(dir, addr);
65 pudp = pud_offset(p4dp, addr);
66 pmdp = pmd_offset(pudp, addr);
67 ptep = pte_offset_kernel(pmdp, addr);
68 *ptep = pte_mknocache(*ptep);
72 static inline void cache_page(void *vaddr)
74 unsigned long addr = (unsigned long)vaddr;
76 if (CPU_IS_040_OR_060) {
83 dir = pgd_offset_k(addr);
84 p4dp = p4d_offset(dir, addr);
85 pudp = pud_offset(p4dp, addr);
86 pmdp = pmd_offset(pudp, addr);
87 ptep = pte_offset_kernel(pmdp, addr);
88 *ptep = pte_mkcache(*ptep);
93 * Motorola 680x0 user's manual recommends using uncached memory for address
96 * Seeing how the MMU can be external on (some of) these chips, that seems like
97 * a very important recommendation to follow. Provide some helpers to combat
98 * 'variation' amongst the users of this.
101 void mmu_page_ctor(void *page)
103 __flush_page_to_ram(page);
104 flush_tlb_kernel_page(page);
108 void mmu_page_dtor(void *page)
113 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
114 struct page instead of separately kmalloced struct. Stolen from
115 arch/sparc/mm/srmmu.c ... */
117 typedef struct list_head ptable_desc;
119 static struct list_head ptable_list[2] = {
120 LIST_HEAD_INIT(ptable_list[0]),
121 LIST_HEAD_INIT(ptable_list[1]),
124 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
125 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
126 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
128 static const int ptable_shift[2] = {
133 #define ptable_size(type) (1U << ptable_shift[type])
134 #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
136 void __init init_pointer_table(void *table, int type)
139 unsigned long ptable = (unsigned long)table;
140 unsigned long page = ptable & PAGE_MASK;
141 unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
143 dp = PD_PTABLE(page);
144 if (!(PD_MARKBITS(dp) & mask)) {
145 PD_MARKBITS(dp) = ptable_mask(type);
146 list_add(dp, &ptable_list[type]);
149 PD_MARKBITS(dp) &= ~mask;
150 pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
152 /* unreserve the page so it's possible to free that page */
153 __ClearPageReserved(PD_PAGE(dp));
154 init_page_count(PD_PAGE(dp));
159 void *get_pointer_table(int type)
161 ptable_desc *dp = ptable_list[type].next;
162 unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
163 unsigned int tmp, off;
166 * For a pointer table for a user process address space, a
167 * table is taken from a page allocated for the purpose. Each
168 * page can hold 8 pointer tables. The page is remapped in
169 * virtual address space to be noncacheable.
175 if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
178 if (type == TABLE_PTE) {
180 * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
183 pgtable_pte_page_ctor(virt_to_page(page));
188 new = PD_PTABLE(page);
189 PD_MARKBITS(new) = ptable_mask(type) - 1;
190 list_add_tail(new, dp);
192 return (pmd_t *)page;
195 for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
197 PD_MARKBITS(dp) = mask & ~tmp;
198 if (!PD_MARKBITS(dp)) {
199 /* move to end of list */
200 list_move_tail(dp, &ptable_list[type]);
202 return page_address(PD_PAGE(dp)) + off;
205 int free_pointer_table(void *table, int type)
208 unsigned long ptable = (unsigned long)table;
209 unsigned long page = ptable & PAGE_MASK;
210 unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
212 dp = PD_PTABLE(page);
213 if (PD_MARKBITS (dp) & mask)
214 panic ("table already free!");
216 PD_MARKBITS (dp) |= mask;
218 if (PD_MARKBITS(dp) == ptable_mask(type)) {
219 /* all tables in page are free, free page */
221 mmu_page_dtor((void *)page);
222 if (type == TABLE_PTE)
223 pgtable_pte_page_dtor(virt_to_page(page));
226 } else if (ptable_list[type].next != dp) {
228 * move this descriptor to the front of the list, since
229 * it has one or more free tables.
231 list_move(dp, &ptable_list[type]);
236 /* size of memory already mapped in head.S */
237 extern __initdata unsigned long m68k_init_mapped_size;
239 extern unsigned long availmem;
241 static pte_t *last_pte_table __initdata = NULL;
243 static pte_t * __init kernel_page_table(void)
245 pte_t *pte_table = last_pte_table;
247 if (((unsigned long)last_pte_table & ~PAGE_MASK) == 0) {
248 pte_table = (pte_t *)memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
250 panic("%s: Failed to allocate %lu bytes align=%lx\n",
251 __func__, PAGE_SIZE, PAGE_SIZE);
254 clear_page(pte_table);
255 mmu_page_ctor(pte_table);
257 last_pte_table = pte_table;
260 last_pte_table += PTRS_PER_PTE;
265 static pmd_t *last_pmd_table __initdata = NULL;
267 static pmd_t * __init kernel_ptr_table(void)
269 if (!last_pmd_table) {
270 unsigned long pmd, last;
273 /* Find the last ptr table that was used in head.S and
274 * reuse the remaining space in that page for further
277 last = (unsigned long)kernel_pg_dir;
278 for (i = 0; i < PTRS_PER_PGD; i++) {
279 pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
281 if (!pud_present(*pud))
283 pmd = pgd_page_vaddr(kernel_pg_dir[i]);
288 last_pmd_table = (pmd_t *)last;
290 printk("kernel_ptr_init: %p\n", last_pmd_table);
294 last_pmd_table += PTRS_PER_PMD;
295 if (((unsigned long)last_pmd_table & ~PAGE_MASK) == 0) {
296 last_pmd_table = (pmd_t *)memblock_alloc_low(PAGE_SIZE,
299 panic("%s: Failed to allocate %lu bytes align=%lx\n",
300 __func__, PAGE_SIZE, PAGE_SIZE);
302 clear_page(last_pmd_table);
303 mmu_page_ctor(last_pmd_table);
306 return last_pmd_table;
309 static void __init map_node(int node)
311 unsigned long physaddr, virtaddr, size;
318 size = m68k_memory[node].size;
319 physaddr = m68k_memory[node].addr;
320 virtaddr = (unsigned long)phys_to_virt(physaddr);
321 physaddr |= m68k_supervisor_cachemode |
322 _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
323 if (CPU_IS_040_OR_060)
324 physaddr |= _PAGE_GLOBAL040;
328 if (!(virtaddr & (PMD_SIZE-1)))
329 printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
332 pgd_dir = pgd_offset_k(virtaddr);
333 if (virtaddr && CPU_IS_020_OR_030) {
334 if (!(virtaddr & (PGDIR_SIZE-1)) &&
335 size >= PGDIR_SIZE) {
337 printk ("[very early term]");
339 pgd_val(*pgd_dir) = physaddr;
341 virtaddr += PGDIR_SIZE;
342 physaddr += PGDIR_SIZE;
346 p4d_dir = p4d_offset(pgd_dir, virtaddr);
347 pud_dir = pud_offset(p4d_dir, virtaddr);
348 if (!pud_present(*pud_dir)) {
349 pmd_dir = kernel_ptr_table();
351 printk ("[new pointer %p]", pmd_dir);
353 pud_set(pud_dir, pmd_dir);
355 pmd_dir = pmd_offset(pud_dir, virtaddr);
357 if (CPU_IS_020_OR_030) {
360 printk ("[early term]");
362 pmd_val(*pmd_dir) = physaddr;
363 physaddr += PMD_SIZE;
367 printk ("[zero map]");
369 pte_dir = kernel_page_table();
370 pmd_set(pmd_dir, pte_dir);
372 pte_val(*pte_dir++) = 0;
373 physaddr += PAGE_SIZE;
374 for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
375 pte_val(*pte_dir++) = physaddr;
378 virtaddr += PMD_SIZE;
380 if (!pmd_present(*pmd_dir)) {
382 printk ("[new table]");
384 pte_dir = kernel_page_table();
385 pmd_set(pmd_dir, pte_dir);
387 pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
390 if (!pte_present(*pte_dir))
391 pte_val(*pte_dir) = physaddr;
393 pte_val(*pte_dir) = 0;
395 virtaddr += PAGE_SIZE;
396 physaddr += PAGE_SIZE;
406 * paging_init() continues the virtual memory environment setup which
407 * was begun by the code in arch/head.S.
409 void __init paging_init(void)
411 unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
412 unsigned long min_addr, max_addr;
417 printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
420 /* Fix the cache mode in the page descriptors for the 680[46]0. */
421 if (CPU_IS_040_OR_060) {
424 mm_cachebits = _PAGE_CACHE040;
426 for (i = 0; i < 16; i++)
427 pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
430 min_addr = m68k_memory[0].addr;
431 max_addr = min_addr + m68k_memory[0].size;
432 memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0);
433 for (i = 1; i < m68k_num_memory;) {
434 if (m68k_memory[i].addr < min_addr) {
435 printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
436 m68k_memory[i].addr, m68k_memory[i].size);
437 printk("Fix your bootloader or use a memfile to make use of this area!\n");
439 memmove(m68k_memory + i, m68k_memory + i + 1,
440 (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
443 memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i);
444 addr = m68k_memory[i].addr + m68k_memory[i].size;
449 m68k_memoffset = min_addr - PAGE_OFFSET;
450 m68k_virt_to_node_shift = fls(max_addr - min_addr - 1) - 6;
452 module_fixup(NULL, __start_fixup, __stop_fixup);
455 high_memory = phys_to_virt(max_addr);
457 min_low_pfn = availmem >> PAGE_SHIFT;
458 max_pfn = max_low_pfn = max_addr >> PAGE_SHIFT;
460 /* Reserve kernel text/data/bss and the memory allocated in head.S */
461 memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
464 * Map the physical memory available into the kernel virtual
465 * address space. Make sure memblock will not try to allocate
466 * pages beyond the memory we already mapped in head.S
468 memblock_set_bottom_up(true);
470 for (i = 0; i < m68k_num_memory; i++) {
478 * initialize the bad page table and bad page to point
479 * to a couple of allocated pages
481 empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
482 if (!empty_zero_page)
483 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
484 __func__, PAGE_SIZE, PAGE_SIZE);
487 * Set up SFC/DFC registers
492 printk ("before free_area_init\n");
494 for (i = 0; i < m68k_num_memory; i++)
495 if (node_present_pages(i))
496 node_set_state(i, N_NORMAL_MEMORY);
498 max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
499 free_area_init(max_zone_pfn);