Commit | Line | Data |
---|---|---|
09cfefb7 HC |
1 | /* SPDX-License-Identifier: GPL-2.0 */ |
2 | /* | |
3 | * Copyright (C) 2020-2022 Loongson Technology Corporation Limited | |
4 | */ | |
5 | #include <asm/asm.h> | |
09cfefb7 HC |
6 | #include <asm/loongarch.h> |
7 | #include <asm/page.h> | |
8 | #include <asm/pgtable.h> | |
9 | #include <asm/regdef.h> | |
10 | #include <asm/stackframe.h> | |
11 | ||
b681604e HC |
12 | #define INVTLB_ADDR_GFALSE_AND_ASID 5 |
13 | ||
a2a84e36 RW |
14 | #define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3) |
15 | #define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3) | |
16 | #define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3) | |
17 | #define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3) | |
18 | ||
09cfefb7 | 19 | .macro tlb_do_page_fault, write |
00c2ca84 | 20 | SYM_CODE_START(tlb_do_page_fault_\write) |
09cfefb7 | 21 | SAVE_ALL |
a2a84e36 RW |
22 | csrrd a2, LOONGARCH_CSR_BADV |
23 | move a0, sp | |
24 | REG_S a2, sp, PT_BVADDR | |
25 | li.w a1, \write | |
f733f119 | 26 | bl do_page_fault |
09cfefb7 | 27 | RESTORE_ALL_AND_RET |
00c2ca84 | 28 | SYM_CODE_END(tlb_do_page_fault_\write) |
09cfefb7 HC |
29 | .endm |
30 | ||
31 | tlb_do_page_fault 0 | |
32 | tlb_do_page_fault 1 | |
33 | ||
00c2ca84 | 34 | SYM_CODE_START(handle_tlb_protect) |
09cfefb7 HC |
35 | BACKUP_T0T1 |
36 | SAVE_ALL | |
a2a84e36 RW |
37 | move a0, sp |
38 | move a1, zero | |
39 | csrrd a2, LOONGARCH_CSR_BADV | |
40 | REG_S a2, sp, PT_BVADDR | |
396233c6 | 41 | la_abs t0, do_page_fault |
a2a84e36 | 42 | jirl ra, t0, 0 |
09cfefb7 | 43 | RESTORE_ALL_AND_RET |
00c2ca84 | 44 | SYM_CODE_END(handle_tlb_protect) |
09cfefb7 | 45 | |
00c2ca84 | 46 | SYM_CODE_START(handle_tlb_load) |
a2a84e36 RW |
47 | csrwr t0, EXCEPTION_KS0 |
48 | csrwr t1, EXCEPTION_KS1 | |
49 | csrwr ra, EXCEPTION_KS2 | |
09cfefb7 HC |
50 | |
51 | /* | |
52 | * The vmalloc handling is not in the hotpath. | |
53 | */ | |
a2a84e36 RW |
54 | csrrd t0, LOONGARCH_CSR_BADV |
55 | bltz t0, vmalloc_load | |
56 | csrrd t1, LOONGARCH_CSR_PGDL | |
09cfefb7 HC |
57 | |
58 | vmalloc_done_load: | |
59 | /* Get PGD offset in bytes */ | |
a2a84e36 RW |
60 | bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT |
61 | alsl.d t1, ra, t1, 3 | |
09cfefb7 | 62 | #if CONFIG_PGTABLE_LEVELS > 3 |
a2a84e36 RW |
63 | ld.d t1, t1, 0 |
64 | bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT | |
65 | alsl.d t1, ra, t1, 3 | |
09cfefb7 HC |
66 | #endif |
67 | #if CONFIG_PGTABLE_LEVELS > 2 | |
a2a84e36 RW |
68 | ld.d t1, t1, 0 |
69 | bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT | |
70 | alsl.d t1, ra, t1, 3 | |
09cfefb7 | 71 | #endif |
a2a84e36 | 72 | ld.d ra, t1, 0 |
09cfefb7 HC |
73 | |
74 | /* | |
75 | * For huge tlb entries, pmde doesn't contain an address but | |
76 | * instead contains the tlb pte. Check the PAGE_HUGE bit and | |
77 | * see if we need to jump to huge tlb processing. | |
78 | */ | |
a2a84e36 RW |
79 | rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 |
80 | bltz ra, tlb_huge_update_load | |
09cfefb7 | 81 | |
a2a84e36 RW |
82 | rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) |
83 | bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT | |
84 | alsl.d t1, t0, ra, _PTE_T_LOG2 | |
09cfefb7 | 85 | |
46859ac8 HC |
86 | #ifdef CONFIG_SMP |
87 | smp_pgtable_change_load: | |
a2a84e36 | 88 | ll.d t0, t1, 0 |
46859ac8 | 89 | #else |
a2a84e36 | 90 | ld.d t0, t1, 0 |
46859ac8 | 91 | #endif |
a2a84e36 RW |
92 | andi ra, t0, _PAGE_PRESENT |
93 | beqz ra, nopage_tlb_load | |
09cfefb7 | 94 | |
a2a84e36 | 95 | ori t0, t0, _PAGE_VALID |
46859ac8 | 96 | #ifdef CONFIG_SMP |
a2a84e36 RW |
97 | sc.d t0, t1, 0 |
98 | beqz t0, smp_pgtable_change_load | |
46859ac8 | 99 | #else |
a2a84e36 | 100 | st.d t0, t1, 0 |
46859ac8 | 101 | #endif |
a2a84e36 RW |
102 | tlbsrch |
103 | bstrins.d t1, zero, 3, 3 | |
104 | ld.d t0, t1, 0 | |
105 | ld.d t1, t1, 8 | |
106 | csrwr t0, LOONGARCH_CSR_TLBELO0 | |
107 | csrwr t1, LOONGARCH_CSR_TLBELO1 | |
09cfefb7 | 108 | tlbwr |
a2a84e36 RW |
109 | |
110 | csrrd t0, EXCEPTION_KS0 | |
111 | csrrd t1, EXCEPTION_KS1 | |
112 | csrrd ra, EXCEPTION_KS2 | |
09cfefb7 | 113 | ertn |
a2a84e36 | 114 | |
09cfefb7 HC |
115 | #ifdef CONFIG_64BIT |
116 | vmalloc_load: | |
396233c6 | 117 | la_abs t1, swapper_pg_dir |
a2a84e36 | 118 | b vmalloc_done_load |
09cfefb7 HC |
119 | #endif |
120 | ||
a2a84e36 | 121 | /* This is the entry point of a huge page. */ |
09cfefb7 | 122 | tlb_huge_update_load: |
46859ac8 | 123 | #ifdef CONFIG_SMP |
a2a84e36 | 124 | ll.d ra, t1, 0 |
46859ac8 | 125 | #endif |
a2a84e36 RW |
126 | andi t0, ra, _PAGE_PRESENT |
127 | beqz t0, nopage_tlb_load | |
09cfefb7 | 128 | |
46859ac8 | 129 | #ifdef CONFIG_SMP |
a2a84e36 RW |
130 | ori t0, ra, _PAGE_VALID |
131 | sc.d t0, t1, 0 | |
132 | beqz t0, tlb_huge_update_load | |
133 | ori t0, ra, _PAGE_VALID | |
46859ac8 | 134 | #else |
a2a84e36 RW |
135 | rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) |
136 | ori t0, ra, _PAGE_VALID | |
137 | st.d t0, t1, 0 | |
46859ac8 | 138 | #endif |
b681604e HC |
139 | csrrd ra, LOONGARCH_CSR_ASID |
140 | csrrd t1, LOONGARCH_CSR_BADV | |
141 | andi ra, ra, CSR_ASID_ASID | |
142 | invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1 | |
09cfefb7 HC |
143 | |
144 | /* | |
145 | * A huge PTE describes an area the size of the | |
146 | * configured huge page size. This is twice the | |
147 | * of the large TLB entry size we intend to use. | |
148 | * A TLB entry half the size of the configured | |
149 | * huge page size is configured into entrylo0 | |
150 | * and entrylo1 to cover the contiguous huge PTE | |
151 | * address space. | |
152 | */ | |
153 | /* Huge page: Move Global bit */ | |
a2a84e36 RW |
154 | xori t0, t0, _PAGE_HUGE |
155 | lu12i.w t1, _PAGE_HGLOBAL >> 12 | |
156 | and t1, t0, t1 | |
157 | srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) | |
158 | or t0, t0, t1 | |
09cfefb7 | 159 | |
a2a84e36 RW |
160 | move ra, t0 |
161 | csrwr ra, LOONGARCH_CSR_TLBELO0 | |
09cfefb7 HC |
162 | |
163 | /* Convert to entrylo1 */ | |
a2a84e36 RW |
164 | addi.d t1, zero, 1 |
165 | slli.d t1, t1, (HPAGE_SHIFT - 1) | |
166 | add.d t0, t0, t1 | |
167 | csrwr t0, LOONGARCH_CSR_TLBELO1 | |
09cfefb7 HC |
168 | |
169 | /* Set huge page tlb entry size */ | |
d8e7f201 WX |
170 | addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) |
171 | addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) | |
09cfefb7 HC |
172 | csrxchg t1, t0, LOONGARCH_CSR_TLBIDX |
173 | ||
174 | tlbfill | |
175 | ||
d8e7f201 WX |
176 | addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) |
177 | addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) | |
09cfefb7 HC |
178 | csrxchg t1, t0, LOONGARCH_CSR_TLBIDX |
179 | ||
a2a84e36 RW |
180 | csrrd t0, EXCEPTION_KS0 |
181 | csrrd t1, EXCEPTION_KS1 | |
182 | csrrd ra, EXCEPTION_KS2 | |
183 | ertn | |
184 | ||
09cfefb7 | 185 | nopage_tlb_load: |
e031a5f3 | 186 | dbar 0x700 |
a2a84e36 | 187 | csrrd ra, EXCEPTION_KS2 |
396233c6 | 188 | la_abs t0, tlb_do_page_fault_0 |
a2a84e36 | 189 | jr t0 |
00c2ca84 | 190 | SYM_CODE_END(handle_tlb_load) |
09cfefb7 | 191 | |
00c2ca84 | 192 | SYM_CODE_START(handle_tlb_load_ptw) |
01158487 HC |
193 | csrwr t0, LOONGARCH_CSR_KS0 |
194 | csrwr t1, LOONGARCH_CSR_KS1 | |
195 | la_abs t0, tlb_do_page_fault_0 | |
196 | jr t0 | |
00c2ca84 | 197 | SYM_CODE_END(handle_tlb_load_ptw) |
01158487 | 198 | |
00c2ca84 | 199 | SYM_CODE_START(handle_tlb_store) |
a2a84e36 RW |
200 | csrwr t0, EXCEPTION_KS0 |
201 | csrwr t1, EXCEPTION_KS1 | |
202 | csrwr ra, EXCEPTION_KS2 | |
09cfefb7 HC |
203 | |
204 | /* | |
205 | * The vmalloc handling is not in the hotpath. | |
206 | */ | |
a2a84e36 RW |
207 | csrrd t0, LOONGARCH_CSR_BADV |
208 | bltz t0, vmalloc_store | |
209 | csrrd t1, LOONGARCH_CSR_PGDL | |
09cfefb7 HC |
210 | |
211 | vmalloc_done_store: | |
212 | /* Get PGD offset in bytes */ | |
a2a84e36 RW |
213 | bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT |
214 | alsl.d t1, ra, t1, 3 | |
09cfefb7 | 215 | #if CONFIG_PGTABLE_LEVELS > 3 |
a2a84e36 RW |
216 | ld.d t1, t1, 0 |
217 | bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT | |
218 | alsl.d t1, ra, t1, 3 | |
09cfefb7 HC |
219 | #endif |
220 | #if CONFIG_PGTABLE_LEVELS > 2 | |
a2a84e36 RW |
221 | ld.d t1, t1, 0 |
222 | bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT | |
223 | alsl.d t1, ra, t1, 3 | |
09cfefb7 | 224 | #endif |
a2a84e36 | 225 | ld.d ra, t1, 0 |
09cfefb7 HC |
226 | |
227 | /* | |
228 | * For huge tlb entries, pmde doesn't contain an address but | |
229 | * instead contains the tlb pte. Check the PAGE_HUGE bit and | |
230 | * see if we need to jump to huge tlb processing. | |
231 | */ | |
a2a84e36 RW |
232 | rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 |
233 | bltz ra, tlb_huge_update_store | |
09cfefb7 | 234 | |
a2a84e36 RW |
235 | rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) |
236 | bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT | |
237 | alsl.d t1, t0, ra, _PTE_T_LOG2 | |
09cfefb7 | 238 | |
46859ac8 HC |
239 | #ifdef CONFIG_SMP |
240 | smp_pgtable_change_store: | |
a2a84e36 | 241 | ll.d t0, t1, 0 |
46859ac8 | 242 | #else |
a2a84e36 | 243 | ld.d t0, t1, 0 |
46859ac8 | 244 | #endif |
a2a84e36 RW |
245 | andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE |
246 | xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE | |
247 | bnez ra, nopage_tlb_store | |
09cfefb7 | 248 | |
a2a84e36 | 249 | ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) |
46859ac8 | 250 | #ifdef CONFIG_SMP |
a2a84e36 RW |
251 | sc.d t0, t1, 0 |
252 | beqz t0, smp_pgtable_change_store | |
46859ac8 | 253 | #else |
a2a84e36 | 254 | st.d t0, t1, 0 |
46859ac8 | 255 | #endif |
a2a84e36 RW |
256 | tlbsrch |
257 | bstrins.d t1, zero, 3, 3 | |
258 | ld.d t0, t1, 0 | |
259 | ld.d t1, t1, 8 | |
260 | csrwr t0, LOONGARCH_CSR_TLBELO0 | |
261 | csrwr t1, LOONGARCH_CSR_TLBELO1 | |
09cfefb7 | 262 | tlbwr |
a2a84e36 RW |
263 | |
264 | csrrd t0, EXCEPTION_KS0 | |
265 | csrrd t1, EXCEPTION_KS1 | |
266 | csrrd ra, EXCEPTION_KS2 | |
09cfefb7 | 267 | ertn |
a2a84e36 | 268 | |
09cfefb7 HC |
269 | #ifdef CONFIG_64BIT |
270 | vmalloc_store: | |
396233c6 | 271 | la_abs t1, swapper_pg_dir |
a2a84e36 | 272 | b vmalloc_done_store |
09cfefb7 HC |
273 | #endif |
274 | ||
a2a84e36 | 275 | /* This is the entry point of a huge page. */ |
09cfefb7 | 276 | tlb_huge_update_store: |
46859ac8 | 277 | #ifdef CONFIG_SMP |
a2a84e36 | 278 | ll.d ra, t1, 0 |
46859ac8 | 279 | #endif |
a2a84e36 RW |
280 | andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE |
281 | xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE | |
282 | bnez t0, nopage_tlb_store | |
09cfefb7 | 283 | |
46859ac8 | 284 | #ifdef CONFIG_SMP |
a2a84e36 RW |
285 | ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) |
286 | sc.d t0, t1, 0 | |
287 | beqz t0, tlb_huge_update_store | |
288 | ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) | |
46859ac8 | 289 | #else |
a2a84e36 RW |
290 | rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) |
291 | ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) | |
292 | st.d t0, t1, 0 | |
46859ac8 | 293 | #endif |
b681604e HC |
294 | csrrd ra, LOONGARCH_CSR_ASID |
295 | csrrd t1, LOONGARCH_CSR_BADV | |
296 | andi ra, ra, CSR_ASID_ASID | |
297 | invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1 | |
09cfefb7 | 298 | |
09cfefb7 HC |
299 | /* |
300 | * A huge PTE describes an area the size of the | |
301 | * configured huge page size. This is twice the | |
302 | * of the large TLB entry size we intend to use. | |
303 | * A TLB entry half the size of the configured | |
304 | * huge page size is configured into entrylo0 | |
305 | * and entrylo1 to cover the contiguous huge PTE | |
306 | * address space. | |
307 | */ | |
308 | /* Huge page: Move Global bit */ | |
a2a84e36 RW |
309 | xori t0, t0, _PAGE_HUGE |
310 | lu12i.w t1, _PAGE_HGLOBAL >> 12 | |
311 | and t1, t0, t1 | |
312 | srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) | |
313 | or t0, t0, t1 | |
09cfefb7 | 314 | |
a2a84e36 RW |
315 | move ra, t0 |
316 | csrwr ra, LOONGARCH_CSR_TLBELO0 | |
09cfefb7 HC |
317 | |
318 | /* Convert to entrylo1 */ | |
a2a84e36 RW |
319 | addi.d t1, zero, 1 |
320 | slli.d t1, t1, (HPAGE_SHIFT - 1) | |
321 | add.d t0, t0, t1 | |
322 | csrwr t0, LOONGARCH_CSR_TLBELO1 | |
09cfefb7 HC |
323 | |
324 | /* Set huge page tlb entry size */ | |
d8e7f201 WX |
325 | addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) |
326 | addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) | |
09cfefb7 HC |
327 | csrxchg t1, t0, LOONGARCH_CSR_TLBIDX |
328 | ||
329 | tlbfill | |
330 | ||
331 | /* Reset default page size */ | |
d8e7f201 WX |
332 | addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) |
333 | addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) | |
09cfefb7 HC |
334 | csrxchg t1, t0, LOONGARCH_CSR_TLBIDX |
335 | ||
a2a84e36 RW |
336 | csrrd t0, EXCEPTION_KS0 |
337 | csrrd t1, EXCEPTION_KS1 | |
338 | csrrd ra, EXCEPTION_KS2 | |
339 | ertn | |
340 | ||
09cfefb7 | 341 | nopage_tlb_store: |
e031a5f3 | 342 | dbar 0x700 |
a2a84e36 | 343 | csrrd ra, EXCEPTION_KS2 |
396233c6 | 344 | la_abs t0, tlb_do_page_fault_1 |
a2a84e36 | 345 | jr t0 |
00c2ca84 | 346 | SYM_CODE_END(handle_tlb_store) |
09cfefb7 | 347 | |
00c2ca84 | 348 | SYM_CODE_START(handle_tlb_store_ptw) |
01158487 HC |
349 | csrwr t0, LOONGARCH_CSR_KS0 |
350 | csrwr t1, LOONGARCH_CSR_KS1 | |
351 | la_abs t0, tlb_do_page_fault_1 | |
352 | jr t0 | |
00c2ca84 | 353 | SYM_CODE_END(handle_tlb_store_ptw) |
01158487 | 354 | |
00c2ca84 | 355 | SYM_CODE_START(handle_tlb_modify) |
a2a84e36 RW |
356 | csrwr t0, EXCEPTION_KS0 |
357 | csrwr t1, EXCEPTION_KS1 | |
358 | csrwr ra, EXCEPTION_KS2 | |
09cfefb7 HC |
359 | |
360 | /* | |
361 | * The vmalloc handling is not in the hotpath. | |
362 | */ | |
a2a84e36 RW |
363 | csrrd t0, LOONGARCH_CSR_BADV |
364 | bltz t0, vmalloc_modify | |
365 | csrrd t1, LOONGARCH_CSR_PGDL | |
09cfefb7 HC |
366 | |
367 | vmalloc_done_modify: | |
368 | /* Get PGD offset in bytes */ | |
a2a84e36 RW |
369 | bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT |
370 | alsl.d t1, ra, t1, 3 | |
09cfefb7 | 371 | #if CONFIG_PGTABLE_LEVELS > 3 |
a2a84e36 RW |
372 | ld.d t1, t1, 0 |
373 | bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT | |
374 | alsl.d t1, ra, t1, 3 | |
09cfefb7 HC |
375 | #endif |
376 | #if CONFIG_PGTABLE_LEVELS > 2 | |
a2a84e36 RW |
377 | ld.d t1, t1, 0 |
378 | bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT | |
379 | alsl.d t1, ra, t1, 3 | |
09cfefb7 | 380 | #endif |
a2a84e36 | 381 | ld.d ra, t1, 0 |
09cfefb7 HC |
382 | |
383 | /* | |
384 | * For huge tlb entries, pmde doesn't contain an address but | |
385 | * instead contains the tlb pte. Check the PAGE_HUGE bit and | |
386 | * see if we need to jump to huge tlb processing. | |
387 | */ | |
a2a84e36 RW |
388 | rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1 |
389 | bltz ra, tlb_huge_update_modify | |
09cfefb7 | 390 | |
a2a84e36 RW |
391 | rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) |
392 | bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT | |
393 | alsl.d t1, t0, ra, _PTE_T_LOG2 | |
09cfefb7 | 394 | |
46859ac8 HC |
395 | #ifdef CONFIG_SMP |
396 | smp_pgtable_change_modify: | |
a2a84e36 | 397 | ll.d t0, t1, 0 |
46859ac8 | 398 | #else |
a2a84e36 | 399 | ld.d t0, t1, 0 |
46859ac8 | 400 | #endif |
a2a84e36 RW |
401 | andi ra, t0, _PAGE_WRITE |
402 | beqz ra, nopage_tlb_modify | |
09cfefb7 | 403 | |
a2a84e36 | 404 | ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) |
46859ac8 | 405 | #ifdef CONFIG_SMP |
a2a84e36 RW |
406 | sc.d t0, t1, 0 |
407 | beqz t0, smp_pgtable_change_modify | |
46859ac8 | 408 | #else |
a2a84e36 | 409 | st.d t0, t1, 0 |
46859ac8 | 410 | #endif |
a2a84e36 RW |
411 | tlbsrch |
412 | bstrins.d t1, zero, 3, 3 | |
413 | ld.d t0, t1, 0 | |
414 | ld.d t1, t1, 8 | |
415 | csrwr t0, LOONGARCH_CSR_TLBELO0 | |
416 | csrwr t1, LOONGARCH_CSR_TLBELO1 | |
09cfefb7 | 417 | tlbwr |
a2a84e36 RW |
418 | |
419 | csrrd t0, EXCEPTION_KS0 | |
420 | csrrd t1, EXCEPTION_KS1 | |
421 | csrrd ra, EXCEPTION_KS2 | |
09cfefb7 | 422 | ertn |
a2a84e36 | 423 | |
09cfefb7 HC |
424 | #ifdef CONFIG_64BIT |
425 | vmalloc_modify: | |
396233c6 | 426 | la_abs t1, swapper_pg_dir |
a2a84e36 | 427 | b vmalloc_done_modify |
09cfefb7 HC |
428 | #endif |
429 | ||
a2a84e36 | 430 | /* This is the entry point of a huge page. */ |
09cfefb7 | 431 | tlb_huge_update_modify: |
46859ac8 | 432 | #ifdef CONFIG_SMP |
a2a84e36 | 433 | ll.d ra, t1, 0 |
46859ac8 | 434 | #endif |
a2a84e36 RW |
435 | andi t0, ra, _PAGE_WRITE |
436 | beqz t0, nopage_tlb_modify | |
09cfefb7 | 437 | |
46859ac8 | 438 | #ifdef CONFIG_SMP |
a2a84e36 RW |
439 | ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) |
440 | sc.d t0, t1, 0 | |
441 | beqz t0, tlb_huge_update_modify | |
442 | ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) | |
46859ac8 | 443 | #else |
a2a84e36 RW |
444 | rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1) |
445 | ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED) | |
446 | st.d t0, t1, 0 | |
46859ac8 | 447 | #endif |
b681604e HC |
448 | csrrd ra, LOONGARCH_CSR_ASID |
449 | csrrd t1, LOONGARCH_CSR_BADV | |
450 | andi ra, ra, CSR_ASID_ASID | |
451 | invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1 | |
452 | ||
09cfefb7 HC |
453 | /* |
454 | * A huge PTE describes an area the size of the | |
455 | * configured huge page size. This is twice the | |
456 | * of the large TLB entry size we intend to use. | |
457 | * A TLB entry half the size of the configured | |
458 | * huge page size is configured into entrylo0 | |
459 | * and entrylo1 to cover the contiguous huge PTE | |
460 | * address space. | |
461 | */ | |
462 | /* Huge page: Move Global bit */ | |
a2a84e36 RW |
463 | xori t0, t0, _PAGE_HUGE |
464 | lu12i.w t1, _PAGE_HGLOBAL >> 12 | |
465 | and t1, t0, t1 | |
466 | srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT) | |
467 | or t0, t0, t1 | |
09cfefb7 | 468 | |
a2a84e36 RW |
469 | move ra, t0 |
470 | csrwr ra, LOONGARCH_CSR_TLBELO0 | |
09cfefb7 HC |
471 | |
472 | /* Convert to entrylo1 */ | |
a2a84e36 RW |
473 | addi.d t1, zero, 1 |
474 | slli.d t1, t1, (HPAGE_SHIFT - 1) | |
475 | add.d t0, t0, t1 | |
476 | csrwr t0, LOONGARCH_CSR_TLBELO1 | |
09cfefb7 HC |
477 | |
478 | /* Set huge page tlb entry size */ | |
d8e7f201 WX |
479 | addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) |
480 | addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) | |
f5c3c22f | 481 | csrxchg t1, t0, LOONGARCH_CSR_TLBIDX |
09cfefb7 | 482 | |
b681604e | 483 | tlbfill |
09cfefb7 HC |
484 | |
485 | /* Reset default page size */ | |
d8e7f201 WX |
486 | addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16) |
487 | addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16)) | |
f5c3c22f | 488 | csrxchg t1, t0, LOONGARCH_CSR_TLBIDX |
09cfefb7 | 489 | |
a2a84e36 RW |
490 | csrrd t0, EXCEPTION_KS0 |
491 | csrrd t1, EXCEPTION_KS1 | |
492 | csrrd ra, EXCEPTION_KS2 | |
493 | ertn | |
494 | ||
09cfefb7 | 495 | nopage_tlb_modify: |
e031a5f3 | 496 | dbar 0x700 |
a2a84e36 | 497 | csrrd ra, EXCEPTION_KS2 |
396233c6 | 498 | la_abs t0, tlb_do_page_fault_1 |
a2a84e36 | 499 | jr t0 |
00c2ca84 | 500 | SYM_CODE_END(handle_tlb_modify) |
09cfefb7 | 501 | |
00c2ca84 | 502 | SYM_CODE_START(handle_tlb_modify_ptw) |
01158487 HC |
503 | csrwr t0, LOONGARCH_CSR_KS0 |
504 | csrwr t1, LOONGARCH_CSR_KS1 | |
505 | la_abs t0, tlb_do_page_fault_1 | |
506 | jr t0 | |
00c2ca84 | 507 | SYM_CODE_END(handle_tlb_modify_ptw) |
01158487 | 508 | |
00c2ca84 | 509 | SYM_CODE_START(handle_tlb_refill) |
a2a84e36 RW |
510 | csrwr t0, LOONGARCH_CSR_TLBRSAVE |
511 | csrrd t0, LOONGARCH_CSR_PGD | |
512 | lddir t0, t0, 3 | |
09cfefb7 | 513 | #if CONFIG_PGTABLE_LEVELS > 3 |
a2a84e36 | 514 | lddir t0, t0, 2 |
09cfefb7 HC |
515 | #endif |
516 | #if CONFIG_PGTABLE_LEVELS > 2 | |
a2a84e36 | 517 | lddir t0, t0, 1 |
09cfefb7 | 518 | #endif |
a2a84e36 RW |
519 | ldpte t0, 0 |
520 | ldpte t0, 1 | |
09cfefb7 | 521 | tlbfill |
a2a84e36 | 522 | csrrd t0, LOONGARCH_CSR_TLBRSAVE |
09cfefb7 | 523 | ertn |
00c2ca84 | 524 | SYM_CODE_END(handle_tlb_refill) |