Commit | Line | Data |
---|---|---|
2874c5fd | 1 | // SPDX-License-Identifier: GPL-2.0-or-later |
61e85e36 JB |
2 | /* |
3 | * OpenRISC idle.c | |
4 | * | |
5 | * Linux architectural port borrowing liberally from similar works of | |
6 | * others. All original copyrights apply as per the original source | |
7 | * declaration. | |
8 | * | |
9 | * Modifications for the OpenRISC architecture: | |
10 | * Copyright (C) 2003 Matjaz Breskvar <phoenix@bsemi.com> | |
11 | * Copyright (C) 2010-2011 Jonas Bonn <jonas@southpole.se> | |
61e85e36 JB |
12 | */ |
13 | ||
14 | #include <linux/signal.h> | |
15 | #include <linux/sched.h> | |
16 | #include <linux/kernel.h> | |
17 | #include <linux/errno.h> | |
18 | #include <linux/string.h> | |
19 | #include <linux/types.h> | |
20 | #include <linux/ptrace.h> | |
21 | #include <linux/mman.h> | |
22 | #include <linux/mm.h> | |
23 | #include <linux/swap.h> | |
24 | #include <linux/smp.h> | |
57c8a661 | 25 | #include <linux/memblock.h> |
61e85e36 JB |
26 | #include <linux/init.h> |
27 | #include <linux/delay.h> | |
28 | #include <linux/blkdev.h> /* for initrd_* */ | |
29 | #include <linux/pagemap.h> | |
61e85e36 | 30 | |
61e85e36 | 31 | #include <asm/pgalloc.h> |
61e85e36 JB |
32 | #include <asm/dma.h> |
33 | #include <asm/io.h> | |
34 | #include <asm/tlb.h> | |
35 | #include <asm/mmu_context.h> | |
61e85e36 JB |
36 | #include <asm/fixmap.h> |
37 | #include <asm/tlbflush.h> | |
7932f61b | 38 | #include <asm/sections.h> |
61e85e36 JB |
39 | |
40 | int mem_init_done; | |
41 | ||
42 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); | |
43 | ||
44 | static void __init zone_sizes_init(void) | |
45 | { | |
fa3354e4 | 46 | unsigned long max_zone_pfn[MAX_NR_ZONES] = { 0 }; |
61e85e36 JB |
47 | |
48 | /* | |
49 | * We use only ZONE_NORMAL | |
50 | */ | |
fa3354e4 | 51 | max_zone_pfn[ZONE_NORMAL] = max_low_pfn; |
61e85e36 | 52 | |
fa3354e4 | 53 | free_area_init(max_zone_pfn); |
61e85e36 JB |
54 | } |
55 | ||
56 | extern const char _s_kernel_ro[], _e_kernel_ro[]; | |
57 | ||
58 | /* | |
59 | * Map all physical memory into kernel's address space. | |
60 | * | |
61 | * This is explicitly coded for two-level page tables, so if you need | |
62 | * something else then this needs to change. | |
63 | */ | |
64 | static void __init map_ram(void) | |
65 | { | |
b10d6bca | 66 | phys_addr_t start, end; |
61e85e36 JB |
67 | unsigned long v, p, e; |
68 | pgprot_t prot; | |
69 | pgd_t *pge; | |
b187fb7f | 70 | p4d_t *p4e; |
61e85e36 JB |
71 | pud_t *pue; |
72 | pmd_t *pme; | |
73 | pte_t *pte; | |
b10d6bca | 74 | u64 i; |
61e85e36 JB |
75 | /* These mark extents of read-only kernel pages... |
76 | * ...from vmlinux.lds.S | |
77 | */ | |
78 | struct memblock_region *region; | |
79 | ||
80 | v = PAGE_OFFSET; | |
81 | ||
b10d6bca MR |
82 | for_each_mem_range(i, &start, &end) { |
83 | p = (u32) start & PAGE_MASK; | |
84 | e = (u32) end; | |
61e85e36 JB |
85 | |
86 | v = (u32) __va(p); | |
87 | pge = pgd_offset_k(v); | |
88 | ||
89 | while (p < e) { | |
90 | int j; | |
b187fb7f MR |
91 | p4e = p4d_offset(pge, v); |
92 | pue = pud_offset(p4e, v); | |
61e85e36 JB |
93 | pme = pmd_offset(pue, v); |
94 | ||
95 | if ((u32) pue != (u32) pge || (u32) pme != (u32) pge) { | |
96 | panic("%s: OR1K kernel hardcoded for " | |
97 | "two-level page tables", | |
98 | __func__); | |
99 | } | |
100 | ||
101 | /* Alloc one page for holding PTE's... */ | |
fb054d0d MR |
102 | pte = memblock_alloc_raw(PAGE_SIZE, PAGE_SIZE); |
103 | if (!pte) | |
104 | panic("%s: Failed to allocate page for PTEs\n", | |
105 | __func__); | |
61e85e36 JB |
106 | set_pmd(pme, __pmd(_KERNPG_TABLE + __pa(pte))); |
107 | ||
108 | /* Fill the newly allocated page with PTE'S */ | |
f4770609 | 109 | for (j = 0; p < e && j < PTRS_PER_PTE; |
61e85e36 JB |
110 | v += PAGE_SIZE, p += PAGE_SIZE, j++, pte++) { |
111 | if (v >= (u32) _e_kernel_ro || | |
112 | v < (u32) _s_kernel_ro) | |
113 | prot = PAGE_KERNEL; | |
114 | else | |
115 | prot = PAGE_KERNEL_RO; | |
116 | ||
117 | set_pte(pte, mk_pte_phys(p, prot)); | |
118 | } | |
119 | ||
120 | pge++; | |
121 | } | |
122 | ||
123 | printk(KERN_INFO "%s: Memory: 0x%x-0x%x\n", __func__, | |
124 | region->base, region->base + region->size); | |
125 | } | |
126 | } | |
127 | ||
128 | void __init paging_init(void) | |
129 | { | |
130 | extern void tlb_init(void); | |
131 | ||
132 | unsigned long end; | |
133 | int i; | |
134 | ||
135 | printk(KERN_INFO "Setting up paging and PTEs.\n"); | |
136 | ||
137 | /* clear out the init_mm.pgd that will contain the kernel's mappings */ | |
138 | ||
139 | for (i = 0; i < PTRS_PER_PGD; i++) | |
140 | swapper_pg_dir[i] = __pgd(0); | |
141 | ||
142 | /* make sure the current pgd table points to something sane | |
143 | * (even if it is most probably not used until the next | |
144 | * switch_mm) | |
145 | */ | |
8e6d08e0 | 146 | current_pgd[smp_processor_id()] = init_mm.pgd; |
61e85e36 JB |
147 | |
148 | end = (unsigned long)__va(max_low_pfn * PAGE_SIZE); | |
149 | ||
150 | map_ram(); | |
151 | ||
152 | zone_sizes_init(); | |
153 | ||
154 | /* self modifying code ;) */ | |
155 | /* Since the old TLB miss handler has been running up until now, | |
156 | * the kernel pages are still all RW, so we can still modify the | |
157 | * text directly... after this change and a TLB flush, the kernel | |
158 | * pages will become RO. | |
159 | */ | |
160 | { | |
161 | extern unsigned long dtlb_miss_handler; | |
162 | extern unsigned long itlb_miss_handler; | |
163 | ||
164 | unsigned long *dtlb_vector = __va(0x900); | |
165 | unsigned long *itlb_vector = __va(0xa00); | |
166 | ||
8668480e JB |
167 | printk(KERN_INFO "itlb_miss_handler %p\n", &itlb_miss_handler); |
168 | *itlb_vector = ((unsigned long)&itlb_miss_handler - | |
169 | (unsigned long)itlb_vector) >> 2; | |
170 | ||
171 | /* Soft ordering constraint to ensure that dtlb_vector is | |
172 | * the last thing updated | |
173 | */ | |
174 | barrier(); | |
175 | ||
61e85e36 JB |
176 | printk(KERN_INFO "dtlb_miss_handler %p\n", &dtlb_miss_handler); |
177 | *dtlb_vector = ((unsigned long)&dtlb_miss_handler - | |
178 | (unsigned long)dtlb_vector) >> 2; | |
179 | ||
61e85e36 JB |
180 | } |
181 | ||
8668480e JB |
182 | /* Soft ordering constraint to ensure that cache invalidation and |
183 | * TLB flush really happen _after_ code has been modified. | |
184 | */ | |
185 | barrier(); | |
186 | ||
61e85e36 JB |
187 | /* Invalidate instruction caches after code modification */ |
188 | mtspr(SPR_ICBIR, 0x900); | |
189 | mtspr(SPR_ICBIR, 0xa00); | |
190 | ||
191 | /* New TLB miss handlers and kernel page tables are in now place. | |
192 | * Make sure that page flags get updated for all pages in TLB by | |
193 | * flushing the TLB and forcing all TLB entries to be recreated | |
194 | * from their page table flags. | |
195 | */ | |
196 | flush_tlb_all(); | |
197 | } | |
198 | ||
199 | /* References to section boundaries */ | |
200 | ||
61e85e36 JB |
201 | void __init mem_init(void) |
202 | { | |
2e1c958d | 203 | BUG_ON(!mem_map); |
61e85e36 | 204 | |
1173db12 | 205 | max_mapnr = max_low_pfn; |
61e85e36 JB |
206 | high_memory = (void *)__va(max_low_pfn * PAGE_SIZE); |
207 | ||
208 | /* clear the zero-page */ | |
209 | memset((void *)empty_zero_page, 0, PAGE_SIZE); | |
210 | ||
1173db12 | 211 | /* this will put all low memory onto the freelists */ |
c6ffc5ca | 212 | memblock_free_all(); |
61e85e36 | 213 | |
1173db12 | 214 | mem_init_print_info(NULL); |
61e85e36 JB |
215 | |
216 | printk("mem_init_done ...........................................\n"); | |
217 | mem_init_done = 1; | |
218 | return; | |
219 | } |