Merge tag 'pull-work.unaligned' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-block.git] / arch / loongarch / mm / tlbex.S
CommitLineData
09cfefb7
HC
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Copyright (C) 2020-2022 Loongson Technology Corporation Limited
4 */
5#include <asm/asm.h>
09cfefb7
HC
6#include <asm/loongarch.h>
7#include <asm/page.h>
8#include <asm/pgtable.h>
9#include <asm/regdef.h>
10#include <asm/stackframe.h>
11
b681604e
HC
12#define INVTLB_ADDR_GFALSE_AND_ASID 5
13
a2a84e36
RW
14#define PTRS_PER_PGD_BITS (PAGE_SHIFT - 3)
15#define PTRS_PER_PUD_BITS (PAGE_SHIFT - 3)
16#define PTRS_PER_PMD_BITS (PAGE_SHIFT - 3)
17#define PTRS_PER_PTE_BITS (PAGE_SHIFT - 3)
18
09cfefb7 19 .macro tlb_do_page_fault, write
00c2ca84 20 SYM_CODE_START(tlb_do_page_fault_\write)
cb8a2ef0 21 UNWIND_HINT_UNDEFINED
09cfefb7 22 SAVE_ALL
a2a84e36
RW
23 csrrd a2, LOONGARCH_CSR_BADV
24 move a0, sp
25 REG_S a2, sp, PT_BVADDR
26 li.w a1, \write
f733f119 27 bl do_page_fault
09cfefb7 28 RESTORE_ALL_AND_RET
00c2ca84 29 SYM_CODE_END(tlb_do_page_fault_\write)
09cfefb7
HC
30 .endm
31
32 tlb_do_page_fault 0
33 tlb_do_page_fault 1
34
00c2ca84 35SYM_CODE_START(handle_tlb_protect)
cb8a2ef0 36 UNWIND_HINT_UNDEFINED
09cfefb7
HC
37 BACKUP_T0T1
38 SAVE_ALL
a2a84e36
RW
39 move a0, sp
40 move a1, zero
41 csrrd a2, LOONGARCH_CSR_BADV
42 REG_S a2, sp, PT_BVADDR
396233c6 43 la_abs t0, do_page_fault
a2a84e36 44 jirl ra, t0, 0
09cfefb7 45 RESTORE_ALL_AND_RET
00c2ca84 46SYM_CODE_END(handle_tlb_protect)
09cfefb7 47
00c2ca84 48SYM_CODE_START(handle_tlb_load)
cb8a2ef0 49 UNWIND_HINT_UNDEFINED
a2a84e36
RW
50 csrwr t0, EXCEPTION_KS0
51 csrwr t1, EXCEPTION_KS1
52 csrwr ra, EXCEPTION_KS2
09cfefb7
HC
53
54 /*
55 * The vmalloc handling is not in the hotpath.
56 */
a2a84e36
RW
57 csrrd t0, LOONGARCH_CSR_BADV
58 bltz t0, vmalloc_load
59 csrrd t1, LOONGARCH_CSR_PGDL
09cfefb7
HC
60
61vmalloc_done_load:
62 /* Get PGD offset in bytes */
a2a84e36
RW
63 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
64 alsl.d t1, ra, t1, 3
09cfefb7 65#if CONFIG_PGTABLE_LEVELS > 3
a2a84e36
RW
66 ld.d t1, t1, 0
67 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
68 alsl.d t1, ra, t1, 3
09cfefb7
HC
69#endif
70#if CONFIG_PGTABLE_LEVELS > 2
a2a84e36
RW
71 ld.d t1, t1, 0
72 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
73 alsl.d t1, ra, t1, 3
09cfefb7 74#endif
a2a84e36 75 ld.d ra, t1, 0
09cfefb7
HC
76
77 /*
78 * For huge tlb entries, pmde doesn't contain an address but
79 * instead contains the tlb pte. Check the PAGE_HUGE bit and
80 * see if we need to jump to huge tlb processing.
81 */
a2a84e36
RW
82 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
83 bltz ra, tlb_huge_update_load
09cfefb7 84
a2a84e36
RW
85 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
86 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
87 alsl.d t1, t0, ra, _PTE_T_LOG2
09cfefb7 88
46859ac8
HC
89#ifdef CONFIG_SMP
90smp_pgtable_change_load:
a2a84e36 91 ll.d t0, t1, 0
46859ac8 92#else
a2a84e36 93 ld.d t0, t1, 0
46859ac8 94#endif
a2a84e36
RW
95 andi ra, t0, _PAGE_PRESENT
96 beqz ra, nopage_tlb_load
09cfefb7 97
a2a84e36 98 ori t0, t0, _PAGE_VALID
46859ac8 99#ifdef CONFIG_SMP
a2a84e36
RW
100 sc.d t0, t1, 0
101 beqz t0, smp_pgtable_change_load
46859ac8 102#else
a2a84e36 103 st.d t0, t1, 0
46859ac8 104#endif
a2a84e36
RW
105 tlbsrch
106 bstrins.d t1, zero, 3, 3
107 ld.d t0, t1, 0
108 ld.d t1, t1, 8
109 csrwr t0, LOONGARCH_CSR_TLBELO0
110 csrwr t1, LOONGARCH_CSR_TLBELO1
09cfefb7 111 tlbwr
a2a84e36
RW
112
113 csrrd t0, EXCEPTION_KS0
114 csrrd t1, EXCEPTION_KS1
115 csrrd ra, EXCEPTION_KS2
09cfefb7 116 ertn
a2a84e36 117
09cfefb7
HC
118#ifdef CONFIG_64BIT
119vmalloc_load:
396233c6 120 la_abs t1, swapper_pg_dir
a2a84e36 121 b vmalloc_done_load
09cfefb7
HC
122#endif
123
a2a84e36 124 /* This is the entry point of a huge page. */
09cfefb7 125tlb_huge_update_load:
46859ac8 126#ifdef CONFIG_SMP
a2a84e36 127 ll.d ra, t1, 0
5685d7fc
TY
128#else
129 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
46859ac8 130#endif
a2a84e36
RW
131 andi t0, ra, _PAGE_PRESENT
132 beqz t0, nopage_tlb_load
09cfefb7 133
46859ac8 134#ifdef CONFIG_SMP
a2a84e36
RW
135 ori t0, ra, _PAGE_VALID
136 sc.d t0, t1, 0
137 beqz t0, tlb_huge_update_load
138 ori t0, ra, _PAGE_VALID
46859ac8 139#else
a2a84e36
RW
140 ori t0, ra, _PAGE_VALID
141 st.d t0, t1, 0
46859ac8 142#endif
b681604e
HC
143 csrrd ra, LOONGARCH_CSR_ASID
144 csrrd t1, LOONGARCH_CSR_BADV
145 andi ra, ra, CSR_ASID_ASID
146 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
09cfefb7
HC
147
148 /*
149 * A huge PTE describes an area the size of the
150 * configured huge page size. This is twice the
151 * of the large TLB entry size we intend to use.
152 * A TLB entry half the size of the configured
153 * huge page size is configured into entrylo0
154 * and entrylo1 to cover the contiguous huge PTE
155 * address space.
156 */
157 /* Huge page: Move Global bit */
a2a84e36
RW
158 xori t0, t0, _PAGE_HUGE
159 lu12i.w t1, _PAGE_HGLOBAL >> 12
160 and t1, t0, t1
161 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
162 or t0, t0, t1
09cfefb7 163
a2a84e36
RW
164 move ra, t0
165 csrwr ra, LOONGARCH_CSR_TLBELO0
09cfefb7
HC
166
167 /* Convert to entrylo1 */
a2a84e36
RW
168 addi.d t1, zero, 1
169 slli.d t1, t1, (HPAGE_SHIFT - 1)
170 add.d t0, t0, t1
171 csrwr t0, LOONGARCH_CSR_TLBELO1
09cfefb7
HC
172
173 /* Set huge page tlb entry size */
d8e7f201
WX
174 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
175 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
09cfefb7
HC
176 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
177
178 tlbfill
179
d8e7f201
WX
180 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
181 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
09cfefb7
HC
182 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
183
a2a84e36
RW
184 csrrd t0, EXCEPTION_KS0
185 csrrd t1, EXCEPTION_KS1
186 csrrd ra, EXCEPTION_KS2
187 ertn
188
09cfefb7 189nopage_tlb_load:
e031a5f3 190 dbar 0x700
a2a84e36 191 csrrd ra, EXCEPTION_KS2
396233c6 192 la_abs t0, tlb_do_page_fault_0
a2a84e36 193 jr t0
00c2ca84 194SYM_CODE_END(handle_tlb_load)
09cfefb7 195
00c2ca84 196SYM_CODE_START(handle_tlb_load_ptw)
cb8a2ef0 197 UNWIND_HINT_UNDEFINED
01158487
HC
198 csrwr t0, LOONGARCH_CSR_KS0
199 csrwr t1, LOONGARCH_CSR_KS1
200 la_abs t0, tlb_do_page_fault_0
201 jr t0
00c2ca84 202SYM_CODE_END(handle_tlb_load_ptw)
01158487 203
00c2ca84 204SYM_CODE_START(handle_tlb_store)
cb8a2ef0 205 UNWIND_HINT_UNDEFINED
a2a84e36
RW
206 csrwr t0, EXCEPTION_KS0
207 csrwr t1, EXCEPTION_KS1
208 csrwr ra, EXCEPTION_KS2
09cfefb7
HC
209
210 /*
211 * The vmalloc handling is not in the hotpath.
212 */
a2a84e36
RW
213 csrrd t0, LOONGARCH_CSR_BADV
214 bltz t0, vmalloc_store
215 csrrd t1, LOONGARCH_CSR_PGDL
09cfefb7
HC
216
217vmalloc_done_store:
218 /* Get PGD offset in bytes */
a2a84e36
RW
219 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
220 alsl.d t1, ra, t1, 3
09cfefb7 221#if CONFIG_PGTABLE_LEVELS > 3
a2a84e36
RW
222 ld.d t1, t1, 0
223 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
224 alsl.d t1, ra, t1, 3
09cfefb7
HC
225#endif
226#if CONFIG_PGTABLE_LEVELS > 2
a2a84e36
RW
227 ld.d t1, t1, 0
228 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
229 alsl.d t1, ra, t1, 3
09cfefb7 230#endif
a2a84e36 231 ld.d ra, t1, 0
09cfefb7
HC
232
233 /*
234 * For huge tlb entries, pmde doesn't contain an address but
235 * instead contains the tlb pte. Check the PAGE_HUGE bit and
236 * see if we need to jump to huge tlb processing.
237 */
a2a84e36
RW
238 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
239 bltz ra, tlb_huge_update_store
09cfefb7 240
a2a84e36
RW
241 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
242 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
243 alsl.d t1, t0, ra, _PTE_T_LOG2
09cfefb7 244
46859ac8
HC
245#ifdef CONFIG_SMP
246smp_pgtable_change_store:
a2a84e36 247 ll.d t0, t1, 0
46859ac8 248#else
a2a84e36 249 ld.d t0, t1, 0
46859ac8 250#endif
a2a84e36
RW
251 andi ra, t0, _PAGE_PRESENT | _PAGE_WRITE
252 xori ra, ra, _PAGE_PRESENT | _PAGE_WRITE
253 bnez ra, nopage_tlb_store
09cfefb7 254
a2a84e36 255 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
46859ac8 256#ifdef CONFIG_SMP
a2a84e36
RW
257 sc.d t0, t1, 0
258 beqz t0, smp_pgtable_change_store
46859ac8 259#else
a2a84e36 260 st.d t0, t1, 0
46859ac8 261#endif
a2a84e36
RW
262 tlbsrch
263 bstrins.d t1, zero, 3, 3
264 ld.d t0, t1, 0
265 ld.d t1, t1, 8
266 csrwr t0, LOONGARCH_CSR_TLBELO0
267 csrwr t1, LOONGARCH_CSR_TLBELO1
09cfefb7 268 tlbwr
a2a84e36
RW
269
270 csrrd t0, EXCEPTION_KS0
271 csrrd t1, EXCEPTION_KS1
272 csrrd ra, EXCEPTION_KS2
09cfefb7 273 ertn
a2a84e36 274
09cfefb7
HC
275#ifdef CONFIG_64BIT
276vmalloc_store:
396233c6 277 la_abs t1, swapper_pg_dir
a2a84e36 278 b vmalloc_done_store
09cfefb7
HC
279#endif
280
a2a84e36 281 /* This is the entry point of a huge page. */
09cfefb7 282tlb_huge_update_store:
46859ac8 283#ifdef CONFIG_SMP
a2a84e36 284 ll.d ra, t1, 0
5685d7fc
TY
285#else
286 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
46859ac8 287#endif
a2a84e36
RW
288 andi t0, ra, _PAGE_PRESENT | _PAGE_WRITE
289 xori t0, t0, _PAGE_PRESENT | _PAGE_WRITE
290 bnez t0, nopage_tlb_store
09cfefb7 291
46859ac8 292#ifdef CONFIG_SMP
a2a84e36
RW
293 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
294 sc.d t0, t1, 0
295 beqz t0, tlb_huge_update_store
296 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
46859ac8 297#else
a2a84e36
RW
298 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
299 st.d t0, t1, 0
46859ac8 300#endif
b681604e
HC
301 csrrd ra, LOONGARCH_CSR_ASID
302 csrrd t1, LOONGARCH_CSR_BADV
303 andi ra, ra, CSR_ASID_ASID
304 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
09cfefb7 305
09cfefb7
HC
306 /*
307 * A huge PTE describes an area the size of the
308 * configured huge page size. This is twice the
309 * of the large TLB entry size we intend to use.
310 * A TLB entry half the size of the configured
311 * huge page size is configured into entrylo0
312 * and entrylo1 to cover the contiguous huge PTE
313 * address space.
314 */
315 /* Huge page: Move Global bit */
a2a84e36
RW
316 xori t0, t0, _PAGE_HUGE
317 lu12i.w t1, _PAGE_HGLOBAL >> 12
318 and t1, t0, t1
319 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
320 or t0, t0, t1
09cfefb7 321
a2a84e36
RW
322 move ra, t0
323 csrwr ra, LOONGARCH_CSR_TLBELO0
09cfefb7
HC
324
325 /* Convert to entrylo1 */
a2a84e36
RW
326 addi.d t1, zero, 1
327 slli.d t1, t1, (HPAGE_SHIFT - 1)
328 add.d t0, t0, t1
329 csrwr t0, LOONGARCH_CSR_TLBELO1
09cfefb7
HC
330
331 /* Set huge page tlb entry size */
d8e7f201
WX
332 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
333 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
09cfefb7
HC
334 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
335
336 tlbfill
337
338 /* Reset default page size */
d8e7f201
WX
339 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
340 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
09cfefb7
HC
341 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
342
a2a84e36
RW
343 csrrd t0, EXCEPTION_KS0
344 csrrd t1, EXCEPTION_KS1
345 csrrd ra, EXCEPTION_KS2
346 ertn
347
09cfefb7 348nopage_tlb_store:
e031a5f3 349 dbar 0x700
a2a84e36 350 csrrd ra, EXCEPTION_KS2
396233c6 351 la_abs t0, tlb_do_page_fault_1
a2a84e36 352 jr t0
00c2ca84 353SYM_CODE_END(handle_tlb_store)
09cfefb7 354
00c2ca84 355SYM_CODE_START(handle_tlb_store_ptw)
cb8a2ef0 356 UNWIND_HINT_UNDEFINED
01158487
HC
357 csrwr t0, LOONGARCH_CSR_KS0
358 csrwr t1, LOONGARCH_CSR_KS1
359 la_abs t0, tlb_do_page_fault_1
360 jr t0
00c2ca84 361SYM_CODE_END(handle_tlb_store_ptw)
01158487 362
00c2ca84 363SYM_CODE_START(handle_tlb_modify)
cb8a2ef0 364 UNWIND_HINT_UNDEFINED
a2a84e36
RW
365 csrwr t0, EXCEPTION_KS0
366 csrwr t1, EXCEPTION_KS1
367 csrwr ra, EXCEPTION_KS2
09cfefb7
HC
368
369 /*
370 * The vmalloc handling is not in the hotpath.
371 */
a2a84e36
RW
372 csrrd t0, LOONGARCH_CSR_BADV
373 bltz t0, vmalloc_modify
374 csrrd t1, LOONGARCH_CSR_PGDL
09cfefb7
HC
375
376vmalloc_done_modify:
377 /* Get PGD offset in bytes */
a2a84e36
RW
378 bstrpick.d ra, t0, PTRS_PER_PGD_BITS + PGDIR_SHIFT - 1, PGDIR_SHIFT
379 alsl.d t1, ra, t1, 3
09cfefb7 380#if CONFIG_PGTABLE_LEVELS > 3
a2a84e36
RW
381 ld.d t1, t1, 0
382 bstrpick.d ra, t0, PTRS_PER_PUD_BITS + PUD_SHIFT - 1, PUD_SHIFT
383 alsl.d t1, ra, t1, 3
09cfefb7
HC
384#endif
385#if CONFIG_PGTABLE_LEVELS > 2
a2a84e36
RW
386 ld.d t1, t1, 0
387 bstrpick.d ra, t0, PTRS_PER_PMD_BITS + PMD_SHIFT - 1, PMD_SHIFT
388 alsl.d t1, ra, t1, 3
09cfefb7 389#endif
a2a84e36 390 ld.d ra, t1, 0
09cfefb7
HC
391
392 /*
393 * For huge tlb entries, pmde doesn't contain an address but
394 * instead contains the tlb pte. Check the PAGE_HUGE bit and
395 * see if we need to jump to huge tlb processing.
396 */
a2a84e36
RW
397 rotri.d ra, ra, _PAGE_HUGE_SHIFT + 1
398 bltz ra, tlb_huge_update_modify
09cfefb7 399
a2a84e36
RW
400 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
401 bstrpick.d t0, t0, PTRS_PER_PTE_BITS + PAGE_SHIFT - 1, PAGE_SHIFT
402 alsl.d t1, t0, ra, _PTE_T_LOG2
09cfefb7 403
46859ac8
HC
404#ifdef CONFIG_SMP
405smp_pgtable_change_modify:
a2a84e36 406 ll.d t0, t1, 0
46859ac8 407#else
a2a84e36 408 ld.d t0, t1, 0
46859ac8 409#endif
a2a84e36
RW
410 andi ra, t0, _PAGE_WRITE
411 beqz ra, nopage_tlb_modify
09cfefb7 412
a2a84e36 413 ori t0, t0, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
46859ac8 414#ifdef CONFIG_SMP
a2a84e36
RW
415 sc.d t0, t1, 0
416 beqz t0, smp_pgtable_change_modify
46859ac8 417#else
a2a84e36 418 st.d t0, t1, 0
46859ac8 419#endif
a2a84e36
RW
420 tlbsrch
421 bstrins.d t1, zero, 3, 3
422 ld.d t0, t1, 0
423 ld.d t1, t1, 8
424 csrwr t0, LOONGARCH_CSR_TLBELO0
425 csrwr t1, LOONGARCH_CSR_TLBELO1
09cfefb7 426 tlbwr
a2a84e36
RW
427
428 csrrd t0, EXCEPTION_KS0
429 csrrd t1, EXCEPTION_KS1
430 csrrd ra, EXCEPTION_KS2
09cfefb7 431 ertn
a2a84e36 432
09cfefb7
HC
433#ifdef CONFIG_64BIT
434vmalloc_modify:
396233c6 435 la_abs t1, swapper_pg_dir
a2a84e36 436 b vmalloc_done_modify
09cfefb7
HC
437#endif
438
a2a84e36 439 /* This is the entry point of a huge page. */
09cfefb7 440tlb_huge_update_modify:
46859ac8 441#ifdef CONFIG_SMP
a2a84e36 442 ll.d ra, t1, 0
5685d7fc
TY
443#else
444 rotri.d ra, ra, 64 - (_PAGE_HUGE_SHIFT + 1)
46859ac8 445#endif
a2a84e36
RW
446 andi t0, ra, _PAGE_WRITE
447 beqz t0, nopage_tlb_modify
09cfefb7 448
46859ac8 449#ifdef CONFIG_SMP
a2a84e36
RW
450 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
451 sc.d t0, t1, 0
452 beqz t0, tlb_huge_update_modify
453 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
46859ac8 454#else
a2a84e36
RW
455 ori t0, ra, (_PAGE_VALID | _PAGE_DIRTY | _PAGE_MODIFIED)
456 st.d t0, t1, 0
46859ac8 457#endif
b681604e
HC
458 csrrd ra, LOONGARCH_CSR_ASID
459 csrrd t1, LOONGARCH_CSR_BADV
460 andi ra, ra, CSR_ASID_ASID
461 invtlb INVTLB_ADDR_GFALSE_AND_ASID, ra, t1
462
09cfefb7
HC
463 /*
464 * A huge PTE describes an area the size of the
465 * configured huge page size. This is twice the
466 * of the large TLB entry size we intend to use.
467 * A TLB entry half the size of the configured
468 * huge page size is configured into entrylo0
469 * and entrylo1 to cover the contiguous huge PTE
470 * address space.
471 */
472 /* Huge page: Move Global bit */
a2a84e36
RW
473 xori t0, t0, _PAGE_HUGE
474 lu12i.w t1, _PAGE_HGLOBAL >> 12
475 and t1, t0, t1
476 srli.d t1, t1, (_PAGE_HGLOBAL_SHIFT - _PAGE_GLOBAL_SHIFT)
477 or t0, t0, t1
09cfefb7 478
a2a84e36
RW
479 move ra, t0
480 csrwr ra, LOONGARCH_CSR_TLBELO0
09cfefb7
HC
481
482 /* Convert to entrylo1 */
a2a84e36
RW
483 addi.d t1, zero, 1
484 slli.d t1, t1, (HPAGE_SHIFT - 1)
485 add.d t0, t0, t1
486 csrwr t0, LOONGARCH_CSR_TLBELO1
09cfefb7
HC
487
488 /* Set huge page tlb entry size */
d8e7f201
WX
489 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
490 addu16i.d t1, zero, (PS_HUGE_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
f5c3c22f 491 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
09cfefb7 492
b681604e 493 tlbfill
09cfefb7
HC
494
495 /* Reset default page size */
d8e7f201
WX
496 addu16i.d t0, zero, (CSR_TLBIDX_PS >> 16)
497 addu16i.d t1, zero, (PS_DEFAULT_SIZE << (CSR_TLBIDX_PS_SHIFT - 16))
f5c3c22f 498 csrxchg t1, t0, LOONGARCH_CSR_TLBIDX
09cfefb7 499
a2a84e36
RW
500 csrrd t0, EXCEPTION_KS0
501 csrrd t1, EXCEPTION_KS1
502 csrrd ra, EXCEPTION_KS2
503 ertn
504
09cfefb7 505nopage_tlb_modify:
e031a5f3 506 dbar 0x700
a2a84e36 507 csrrd ra, EXCEPTION_KS2
396233c6 508 la_abs t0, tlb_do_page_fault_1
a2a84e36 509 jr t0
00c2ca84 510SYM_CODE_END(handle_tlb_modify)
09cfefb7 511
00c2ca84 512SYM_CODE_START(handle_tlb_modify_ptw)
cb8a2ef0 513 UNWIND_HINT_UNDEFINED
01158487
HC
514 csrwr t0, LOONGARCH_CSR_KS0
515 csrwr t1, LOONGARCH_CSR_KS1
516 la_abs t0, tlb_do_page_fault_1
517 jr t0
00c2ca84 518SYM_CODE_END(handle_tlb_modify_ptw)
01158487 519
00c2ca84 520SYM_CODE_START(handle_tlb_refill)
cb8a2ef0 521 UNWIND_HINT_UNDEFINED
a2a84e36
RW
522 csrwr t0, LOONGARCH_CSR_TLBRSAVE
523 csrrd t0, LOONGARCH_CSR_PGD
524 lddir t0, t0, 3
09cfefb7 525#if CONFIG_PGTABLE_LEVELS > 3
a2a84e36 526 lddir t0, t0, 2
09cfefb7
HC
527#endif
528#if CONFIG_PGTABLE_LEVELS > 2
a2a84e36 529 lddir t0, t0, 1
09cfefb7 530#endif
a2a84e36
RW
531 ldpte t0, 0
532 ldpte t0, 1
09cfefb7 533 tlbfill
a2a84e36 534 csrrd t0, LOONGARCH_CSR_TLBRSAVE
09cfefb7 535 ertn
00c2ca84 536SYM_CODE_END(handle_tlb_refill)