Merge tag 'erofs-for-6.3-rc2-fixes' of git://git.kernel.org/pub/scm/linux/kernel...
[linux-block.git] / arch / m68k / mm / motorola.c
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * linux/arch/m68k/mm/motorola.c
4  *
5  * Routines specific to the Motorola MMU, originally from:
6  * linux/arch/m68k/init.c
7  * which are Copyright (C) 1995 Hamish Macdonald
8  *
9  * Moved 8/20/1999 Sam Creasey
10  */
11
12 #include <linux/module.h>
13 #include <linux/signal.h>
14 #include <linux/sched.h>
15 #include <linux/mm.h>
16 #include <linux/swap.h>
17 #include <linux/kernel.h>
18 #include <linux/string.h>
19 #include <linux/types.h>
20 #include <linux/init.h>
21 #include <linux/memblock.h>
22 #include <linux/gfp.h>
23
24 #include <asm/setup.h>
25 #include <linux/uaccess.h>
26 #include <asm/page.h>
27 #include <asm/pgalloc.h>
28 #include <asm/machdep.h>
29 #include <asm/io.h>
30 #ifdef CONFIG_ATARI
31 #include <asm/atari_stram.h>
32 #endif
33 #include <asm/sections.h>
34
35 #undef DEBUG
36
37 #ifndef mm_cachebits
38 /*
39  * Bits to add to page descriptors for "normal" caching mode.
40  * For 68020/030 this is 0.
41  * For 68040, this is _PAGE_CACHE040 (cachable, copyback)
42  */
43 unsigned long mm_cachebits;
44 EXPORT_SYMBOL(mm_cachebits);
45 #endif
46
47 /* Prior to calling these routines, the page should have been flushed
48  * from both the cache and ATC, or the CPU might not notice that the
49  * cache setting for the page has been changed. -jskov
50  */
51 static inline void nocache_page(void *vaddr)
52 {
53         unsigned long addr = (unsigned long)vaddr;
54
55         if (CPU_IS_040_OR_060) {
56                 pte_t *ptep = virt_to_kpte(addr);
57
58                 *ptep = pte_mknocache(*ptep);
59         }
60 }
61
62 static inline void cache_page(void *vaddr)
63 {
64         unsigned long addr = (unsigned long)vaddr;
65
66         if (CPU_IS_040_OR_060) {
67                 pte_t *ptep = virt_to_kpte(addr);
68
69                 *ptep = pte_mkcache(*ptep);
70         }
71 }
72
73 /*
74  * Motorola 680x0 user's manual recommends using uncached memory for address
75  * translation tables.
76  *
77  * Seeing how the MMU can be external on (some of) these chips, that seems like
78  * a very important recommendation to follow. Provide some helpers to combat
79  * 'variation' amongst the users of this.
80  */
81
82 void mmu_page_ctor(void *page)
83 {
84         __flush_page_to_ram(page);
85         flush_tlb_kernel_page(page);
86         nocache_page(page);
87 }
88
89 void mmu_page_dtor(void *page)
90 {
91         cache_page(page);
92 }
93
94 /* ++andreas: {get,free}_pointer_table rewritten to use unused fields from
95    struct page instead of separately kmalloced struct.  Stolen from
96    arch/sparc/mm/srmmu.c ... */
97
98 typedef struct list_head ptable_desc;
99
100 static struct list_head ptable_list[2] = {
101         LIST_HEAD_INIT(ptable_list[0]),
102         LIST_HEAD_INIT(ptable_list[1]),
103 };
104
105 #define PD_PTABLE(page) ((ptable_desc *)&(virt_to_page(page)->lru))
106 #define PD_PAGE(ptable) (list_entry(ptable, struct page, lru))
107 #define PD_MARKBITS(dp) (*(unsigned int *)&PD_PAGE(dp)->index)
108
109 static const int ptable_shift[2] = {
110         7+2, /* PGD, PMD */
111         6+2, /* PTE */
112 };
113
114 #define ptable_size(type) (1U << ptable_shift[type])
115 #define ptable_mask(type) ((1U << (PAGE_SIZE / ptable_size(type))) - 1)
116
117 void __init init_pointer_table(void *table, int type)
118 {
119         ptable_desc *dp;
120         unsigned long ptable = (unsigned long)table;
121         unsigned long page = ptable & PAGE_MASK;
122         unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
123
124         dp = PD_PTABLE(page);
125         if (!(PD_MARKBITS(dp) & mask)) {
126                 PD_MARKBITS(dp) = ptable_mask(type);
127                 list_add(dp, &ptable_list[type]);
128         }
129
130         PD_MARKBITS(dp) &= ~mask;
131         pr_debug("init_pointer_table: %lx, %x\n", ptable, PD_MARKBITS(dp));
132
133         /* unreserve the page so it's possible to free that page */
134         __ClearPageReserved(PD_PAGE(dp));
135         init_page_count(PD_PAGE(dp));
136
137         return;
138 }
139
140 void *get_pointer_table(int type)
141 {
142         ptable_desc *dp = ptable_list[type].next;
143         unsigned int mask = list_empty(&ptable_list[type]) ? 0 : PD_MARKBITS(dp);
144         unsigned int tmp, off;
145
146         /*
147          * For a pointer table for a user process address space, a
148          * table is taken from a page allocated for the purpose.  Each
149          * page can hold 8 pointer tables.  The page is remapped in
150          * virtual address space to be noncacheable.
151          */
152         if (mask == 0) {
153                 void *page;
154                 ptable_desc *new;
155
156                 if (!(page = (void *)get_zeroed_page(GFP_KERNEL)))
157                         return NULL;
158
159                 if (type == TABLE_PTE) {
160                         /*
161                          * m68k doesn't have SPLIT_PTE_PTLOCKS for not having
162                          * SMP.
163                          */
164                         pgtable_pte_page_ctor(virt_to_page(page));
165                 }
166
167                 mmu_page_ctor(page);
168
169                 new = PD_PTABLE(page);
170                 PD_MARKBITS(new) = ptable_mask(type) - 1;
171                 list_add_tail(new, dp);
172
173                 return (pmd_t *)page;
174         }
175
176         for (tmp = 1, off = 0; (mask & tmp) == 0; tmp <<= 1, off += ptable_size(type))
177                 ;
178         PD_MARKBITS(dp) = mask & ~tmp;
179         if (!PD_MARKBITS(dp)) {
180                 /* move to end of list */
181                 list_move_tail(dp, &ptable_list[type]);
182         }
183         return page_address(PD_PAGE(dp)) + off;
184 }
185
186 int free_pointer_table(void *table, int type)
187 {
188         ptable_desc *dp;
189         unsigned long ptable = (unsigned long)table;
190         unsigned long page = ptable & PAGE_MASK;
191         unsigned int mask = 1U << ((ptable - page)/ptable_size(type));
192
193         dp = PD_PTABLE(page);
194         if (PD_MARKBITS (dp) & mask)
195                 panic ("table already free!");
196
197         PD_MARKBITS (dp) |= mask;
198
199         if (PD_MARKBITS(dp) == ptable_mask(type)) {
200                 /* all tables in page are free, free page */
201                 list_del(dp);
202                 mmu_page_dtor((void *)page);
203                 if (type == TABLE_PTE)
204                         pgtable_pte_page_dtor(virt_to_page(page));
205                 free_page (page);
206                 return 1;
207         } else if (ptable_list[type].next != dp) {
208                 /*
209                  * move this descriptor to the front of the list, since
210                  * it has one or more free tables.
211                  */
212                 list_move(dp, &ptable_list[type]);
213         }
214         return 0;
215 }
216
217 /* size of memory already mapped in head.S */
218 extern __initdata unsigned long m68k_init_mapped_size;
219
220 extern unsigned long availmem;
221
222 static pte_t *last_pte_table __initdata = NULL;
223
224 static pte_t * __init kernel_page_table(void)
225 {
226         pte_t *pte_table = last_pte_table;
227
228         if (PAGE_ALIGNED(last_pte_table)) {
229                 pte_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
230                 if (!pte_table) {
231                         panic("%s: Failed to allocate %lu bytes align=%lx\n",
232                                         __func__, PAGE_SIZE, PAGE_SIZE);
233                 }
234
235                 clear_page(pte_table);
236                 mmu_page_ctor(pte_table);
237
238                 last_pte_table = pte_table;
239         }
240
241         last_pte_table += PTRS_PER_PTE;
242
243         return pte_table;
244 }
245
246 static pmd_t *last_pmd_table __initdata = NULL;
247
248 static pmd_t * __init kernel_ptr_table(void)
249 {
250         if (!last_pmd_table) {
251                 unsigned long pmd, last;
252                 int i;
253
254                 /* Find the last ptr table that was used in head.S and
255                  * reuse the remaining space in that page for further
256                  * ptr tables.
257                  */
258                 last = (unsigned long)kernel_pg_dir;
259                 for (i = 0; i < PTRS_PER_PGD; i++) {
260                         pud_t *pud = (pud_t *)(&kernel_pg_dir[i]);
261
262                         if (!pud_present(*pud))
263                                 continue;
264                         pmd = pgd_page_vaddr(kernel_pg_dir[i]);
265                         if (pmd > last)
266                                 last = pmd;
267                 }
268
269                 last_pmd_table = (pmd_t *)last;
270 #ifdef DEBUG
271                 printk("kernel_ptr_init: %p\n", last_pmd_table);
272 #endif
273         }
274
275         last_pmd_table += PTRS_PER_PMD;
276         if (PAGE_ALIGNED(last_pmd_table)) {
277                 last_pmd_table = memblock_alloc_low(PAGE_SIZE, PAGE_SIZE);
278                 if (!last_pmd_table)
279                         panic("%s: Failed to allocate %lu bytes align=%lx\n",
280                               __func__, PAGE_SIZE, PAGE_SIZE);
281
282                 clear_page(last_pmd_table);
283                 mmu_page_ctor(last_pmd_table);
284         }
285
286         return last_pmd_table;
287 }
288
289 static void __init map_node(int node)
290 {
291         unsigned long physaddr, virtaddr, size;
292         pgd_t *pgd_dir;
293         p4d_t *p4d_dir;
294         pud_t *pud_dir;
295         pmd_t *pmd_dir;
296         pte_t *pte_dir;
297
298         size = m68k_memory[node].size;
299         physaddr = m68k_memory[node].addr;
300         virtaddr = (unsigned long)phys_to_virt(physaddr);
301         physaddr |= m68k_supervisor_cachemode |
302                     _PAGE_PRESENT | _PAGE_ACCESSED | _PAGE_DIRTY;
303         if (CPU_IS_040_OR_060)
304                 physaddr |= _PAGE_GLOBAL040;
305
306         while (size > 0) {
307 #ifdef DEBUG
308                 if (!(virtaddr & (PMD_SIZE-1)))
309                         printk ("\npa=%#lx va=%#lx ", physaddr & PAGE_MASK,
310                                 virtaddr);
311 #endif
312                 pgd_dir = pgd_offset_k(virtaddr);
313                 if (virtaddr && CPU_IS_020_OR_030) {
314                         if (!(virtaddr & (PGDIR_SIZE-1)) &&
315                             size >= PGDIR_SIZE) {
316 #ifdef DEBUG
317                                 printk ("[very early term]");
318 #endif
319                                 pgd_val(*pgd_dir) = physaddr;
320                                 size -= PGDIR_SIZE;
321                                 virtaddr += PGDIR_SIZE;
322                                 physaddr += PGDIR_SIZE;
323                                 continue;
324                         }
325                 }
326                 p4d_dir = p4d_offset(pgd_dir, virtaddr);
327                 pud_dir = pud_offset(p4d_dir, virtaddr);
328                 if (!pud_present(*pud_dir)) {
329                         pmd_dir = kernel_ptr_table();
330 #ifdef DEBUG
331                         printk ("[new pointer %p]", pmd_dir);
332 #endif
333                         pud_set(pud_dir, pmd_dir);
334                 } else
335                         pmd_dir = pmd_offset(pud_dir, virtaddr);
336
337                 if (CPU_IS_020_OR_030) {
338                         if (virtaddr) {
339 #ifdef DEBUG
340                                 printk ("[early term]");
341 #endif
342                                 pmd_val(*pmd_dir) = physaddr;
343                                 physaddr += PMD_SIZE;
344                         } else {
345                                 int i;
346 #ifdef DEBUG
347                                 printk ("[zero map]");
348 #endif
349                                 pte_dir = kernel_page_table();
350                                 pmd_set(pmd_dir, pte_dir);
351
352                                 pte_val(*pte_dir++) = 0;
353                                 physaddr += PAGE_SIZE;
354                                 for (i = 1; i < PTRS_PER_PTE; physaddr += PAGE_SIZE, i++)
355                                         pte_val(*pte_dir++) = physaddr;
356                         }
357                         size -= PMD_SIZE;
358                         virtaddr += PMD_SIZE;
359                 } else {
360                         if (!pmd_present(*pmd_dir)) {
361 #ifdef DEBUG
362                                 printk ("[new table]");
363 #endif
364                                 pte_dir = kernel_page_table();
365                                 pmd_set(pmd_dir, pte_dir);
366                         }
367                         pte_dir = pte_offset_kernel(pmd_dir, virtaddr);
368
369                         if (virtaddr) {
370                                 if (!pte_present(*pte_dir))
371                                         pte_val(*pte_dir) = physaddr;
372                         } else
373                                 pte_val(*pte_dir) = 0;
374                         size -= PAGE_SIZE;
375                         virtaddr += PAGE_SIZE;
376                         physaddr += PAGE_SIZE;
377                 }
378
379         }
380 #ifdef DEBUG
381         printk("\n");
382 #endif
383 }
384
385 /*
386  * Alternate definitions that are compile time constants, for
387  * initializing protection_map.  The cachebits are fixed later.
388  */
389 #define PAGE_NONE_C     __pgprot(_PAGE_PROTNONE | _PAGE_ACCESSED)
390 #define PAGE_SHARED_C   __pgprot(_PAGE_PRESENT | _PAGE_ACCESSED)
391 #define PAGE_COPY_C     __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
392 #define PAGE_READONLY_C __pgprot(_PAGE_PRESENT | _PAGE_RONLY | _PAGE_ACCESSED)
393
394 static pgprot_t protection_map[16] __ro_after_init = {
395         [VM_NONE]                                       = PAGE_NONE_C,
396         [VM_READ]                                       = PAGE_READONLY_C,
397         [VM_WRITE]                                      = PAGE_COPY_C,
398         [VM_WRITE | VM_READ]                            = PAGE_COPY_C,
399         [VM_EXEC]                                       = PAGE_READONLY_C,
400         [VM_EXEC | VM_READ]                             = PAGE_READONLY_C,
401         [VM_EXEC | VM_WRITE]                            = PAGE_COPY_C,
402         [VM_EXEC | VM_WRITE | VM_READ]                  = PAGE_COPY_C,
403         [VM_SHARED]                                     = PAGE_NONE_C,
404         [VM_SHARED | VM_READ]                           = PAGE_READONLY_C,
405         [VM_SHARED | VM_WRITE]                          = PAGE_SHARED_C,
406         [VM_SHARED | VM_WRITE | VM_READ]                = PAGE_SHARED_C,
407         [VM_SHARED | VM_EXEC]                           = PAGE_READONLY_C,
408         [VM_SHARED | VM_EXEC | VM_READ]                 = PAGE_READONLY_C,
409         [VM_SHARED | VM_EXEC | VM_WRITE]                = PAGE_SHARED_C,
410         [VM_SHARED | VM_EXEC | VM_WRITE | VM_READ]      = PAGE_SHARED_C
411 };
412 DECLARE_VM_GET_PAGE_PROT
413
414 /*
415  * paging_init() continues the virtual memory environment setup which
416  * was begun by the code in arch/head.S.
417  */
418 void __init paging_init(void)
419 {
420         unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0, };
421         unsigned long min_addr, max_addr;
422         unsigned long addr;
423         int i;
424
425 #ifdef DEBUG
426         printk ("start of paging_init (%p, %lx)\n", kernel_pg_dir, availmem);
427 #endif
428
429         /* Fix the cache mode in the page descriptors for the 680[46]0.  */
430         if (CPU_IS_040_OR_060) {
431                 int i;
432 #ifndef mm_cachebits
433                 mm_cachebits = _PAGE_CACHE040;
434 #endif
435                 for (i = 0; i < 16; i++)
436                         pgprot_val(protection_map[i]) |= _PAGE_CACHE040;
437         }
438
439         min_addr = m68k_memory[0].addr;
440         max_addr = min_addr + m68k_memory[0].size - 1;
441         memblock_add_node(m68k_memory[0].addr, m68k_memory[0].size, 0,
442                           MEMBLOCK_NONE);
443         for (i = 1; i < m68k_num_memory;) {
444                 if (m68k_memory[i].addr < min_addr) {
445                         printk("Ignoring memory chunk at 0x%lx:0x%lx before the first chunk\n",
446                                 m68k_memory[i].addr, m68k_memory[i].size);
447                         printk("Fix your bootloader or use a memfile to make use of this area!\n");
448                         m68k_num_memory--;
449                         memmove(m68k_memory + i, m68k_memory + i + 1,
450                                 (m68k_num_memory - i) * sizeof(struct m68k_mem_info));
451                         continue;
452                 }
453                 memblock_add_node(m68k_memory[i].addr, m68k_memory[i].size, i,
454                                   MEMBLOCK_NONE);
455                 addr = m68k_memory[i].addr + m68k_memory[i].size - 1;
456                 if (addr > max_addr)
457                         max_addr = addr;
458                 i++;
459         }
460         m68k_memoffset = min_addr - PAGE_OFFSET;
461         m68k_virt_to_node_shift = fls(max_addr - min_addr) - 6;
462
463         module_fixup(NULL, __start_fixup, __stop_fixup);
464         flush_icache();
465
466         high_memory = phys_to_virt(max_addr) + 1;
467
468         min_low_pfn = availmem >> PAGE_SHIFT;
469         max_pfn = max_low_pfn = (max_addr >> PAGE_SHIFT) + 1;
470
471         /* Reserve kernel text/data/bss and the memory allocated in head.S */
472         memblock_reserve(m68k_memory[0].addr, availmem - m68k_memory[0].addr);
473
474         /*
475          * Map the physical memory available into the kernel virtual
476          * address space. Make sure memblock will not try to allocate
477          * pages beyond the memory we already mapped in head.S
478          */
479         memblock_set_bottom_up(true);
480
481         for (i = 0; i < m68k_num_memory; i++) {
482                 m68k_setup_node(i);
483                 map_node(i);
484         }
485
486         flush_tlb_all();
487
488         early_memtest(min_addr, max_addr);
489
490         /*
491          * initialize the bad page table and bad page to point
492          * to a couple of allocated pages
493          */
494         empty_zero_page = memblock_alloc(PAGE_SIZE, PAGE_SIZE);
495         if (!empty_zero_page)
496                 panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
497                       __func__, PAGE_SIZE, PAGE_SIZE);
498
499         /*
500          * Set up SFC/DFC registers
501          */
502         set_fc(USER_DATA);
503
504 #ifdef DEBUG
505         printk ("before free_area_init\n");
506 #endif
507         for (i = 0; i < m68k_num_memory; i++)
508                 if (node_present_pages(i))
509                         node_set_state(i, N_NORMAL_MEMORY);
510
511         max_zone_pfn[ZONE_DMA] = memblock_end_of_DRAM();
512         free_area_init(max_zone_pfn);
513 }