Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
60ffc30d CM |
2 | /* |
3 | * Low-level exception handling code | |
4 | * | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
7 | * Will Deacon <will.deacon@arm.com> | |
60ffc30d CM |
8 | */ |
9 | ||
8e290624 | 10 | #include <linux/arm-smccc.h> |
60ffc30d CM |
11 | #include <linux/init.h> |
12 | #include <linux/linkage.h> | |
13 | ||
8d883b23 | 14 | #include <asm/alternative.h> |
60ffc30d CM |
15 | #include <asm/assembler.h> |
16 | #include <asm/asm-offsets.h> | |
905e8c5d | 17 | #include <asm/cpufeature.h> |
60ffc30d | 18 | #include <asm/errno.h> |
5c1ce6f7 | 19 | #include <asm/esr.h> |
8e23dacd | 20 | #include <asm/irq.h> |
c7b9adaf WD |
21 | #include <asm/memory.h> |
22 | #include <asm/mmu.h> | |
eef94a3d | 23 | #include <asm/processor.h> |
39bc88e5 | 24 | #include <asm/ptrace.h> |
60ffc30d | 25 | #include <asm/thread_info.h> |
b4b8664d | 26 | #include <asm/asm-uaccess.h> |
60ffc30d CM |
27 | #include <asm/unistd.h> |
28 | ||
6c81fe79 LB |
29 | /* |
30 | * Context tracking subsystem. Used to instrument transitions | |
31 | * between user and kernel mode. | |
32 | */ | |
d9be0325 | 33 | .macro ct_user_exit |
6c81fe79 LB |
34 | #ifdef CONFIG_CONTEXT_TRACKING |
35 | bl context_tracking_user_exit | |
6c81fe79 LB |
36 | #endif |
37 | .endm | |
38 | ||
39 | .macro ct_user_enter | |
40 | #ifdef CONFIG_CONTEXT_TRACKING | |
41 | bl context_tracking_user_enter | |
42 | #endif | |
43 | .endm | |
44 | ||
baaa7237 MR |
45 | .macro clear_gp_regs |
46 | .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 | |
47 | mov x\n, xzr | |
48 | .endr | |
49 | .endm | |
50 | ||
60ffc30d CM |
51 | /* |
52 | * Bad Abort numbers | |
53 | *----------------- | |
54 | */ | |
55 | #define BAD_SYNC 0 | |
56 | #define BAD_IRQ 1 | |
57 | #define BAD_FIQ 2 | |
58 | #define BAD_ERROR 3 | |
59 | ||
5b1f7fe4 | 60 | .macro kernel_ventry, el, label, regsize = 64 |
b11e5759 | 61 | .align 7 |
4bf3286d | 62 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
ea1e3de8 | 63 | alternative_if ARM64_UNMAP_KERNEL_AT_EL0 |
4bf3286d WD |
64 | .if \el == 0 |
65 | .if \regsize == 64 | |
66 | mrs x30, tpidrro_el0 | |
67 | msr tpidrro_el0, xzr | |
68 | .else | |
69 | mov x30, xzr | |
70 | .endif | |
71 | .endif | |
ea1e3de8 | 72 | alternative_else_nop_endif |
4bf3286d WD |
73 | #endif |
74 | ||
63648dd2 | 75 | sub sp, sp, #S_FRAME_SIZE |
872d8327 MR |
76 | #ifdef CONFIG_VMAP_STACK |
77 | /* | |
78 | * Test whether the SP has overflowed, without corrupting a GPR. | |
79 | * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT). | |
80 | */ | |
81 | add sp, sp, x0 // sp' = sp + x0 | |
82 | sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp | |
83 | tbnz x0, #THREAD_SHIFT, 0f | |
84 | sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 | |
85 | sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp | |
5b1f7fe4 | 86 | b el\()\el\()_\label |
872d8327 MR |
87 | |
88 | 0: | |
89 | /* | |
90 | * Either we've just detected an overflow, or we've taken an exception | |
91 | * while on the overflow stack. Either way, we won't return to | |
92 | * userspace, and can clobber EL0 registers to free up GPRs. | |
93 | */ | |
94 | ||
95 | /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */ | |
96 | msr tpidr_el0, x0 | |
97 | ||
98 | /* Recover the original x0 value and stash it in tpidrro_el0 */ | |
99 | sub x0, sp, x0 | |
100 | msr tpidrro_el0, x0 | |
101 | ||
102 | /* Switch to the overflow stack */ | |
103 | adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 | |
104 | ||
105 | /* | |
106 | * Check whether we were already on the overflow stack. This may happen | |
107 | * after panic() re-enables interrupts. | |
108 | */ | |
109 | mrs x0, tpidr_el0 // sp of interrupted context | |
110 | sub x0, sp, x0 // delta with top of overflow stack | |
111 | tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? | |
112 | b.ne __bad_stack // no? -> bad stack pointer | |
113 | ||
114 | /* We were already on the overflow stack. Restore sp/x0 and carry on. */ | |
115 | sub sp, sp, x0 | |
116 | mrs x0, tpidrro_el0 | |
117 | #endif | |
5b1f7fe4 | 118 | b el\()\el\()_\label |
b11e5759 MR |
119 | .endm |
120 | ||
4bf3286d WD |
121 | .macro tramp_alias, dst, sym |
122 | mov_q \dst, TRAMP_VALIAS | |
123 | add \dst, \dst, #(\sym - .entry.tramp.text) | |
b11e5759 MR |
124 | .endm |
125 | ||
8e290624 MZ |
126 | // This macro corrupts x0-x3. It is the caller's duty |
127 | // to save/restore them if required. | |
99ed3ed0 | 128 | .macro apply_ssbd, state, tmp1, tmp2 |
8e290624 | 129 | #ifdef CONFIG_ARM64_SSBD |
986372c4 | 130 | alternative_cb arm64_enable_wa2_handling |
99ed3ed0 | 131 | b .L__asm_ssbd_skip\@ |
986372c4 | 132 | alternative_cb_end |
5cf9ce6e | 133 | ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 |
99ed3ed0 | 134 | cbz \tmp2, .L__asm_ssbd_skip\@ |
9dd9614f | 135 | ldr \tmp2, [tsk, #TSK_TI_FLAGS] |
99ed3ed0 | 136 | tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ |
8e290624 MZ |
137 | mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 |
138 | mov w1, #\state | |
139 | alternative_cb arm64_update_smccc_conduit | |
140 | nop // Patched to SMC/HVC #0 | |
141 | alternative_cb_end | |
99ed3ed0 | 142 | .L__asm_ssbd_skip\@: |
8e290624 MZ |
143 | #endif |
144 | .endm | |
145 | ||
b11e5759 | 146 | .macro kernel_entry, el, regsize = 64 |
60ffc30d CM |
147 | .if \regsize == 32 |
148 | mov w0, w0 // zero upper 32 bits of x0 | |
149 | .endif | |
63648dd2 WD |
150 | stp x0, x1, [sp, #16 * 0] |
151 | stp x2, x3, [sp, #16 * 1] | |
152 | stp x4, x5, [sp, #16 * 2] | |
153 | stp x6, x7, [sp, #16 * 3] | |
154 | stp x8, x9, [sp, #16 * 4] | |
155 | stp x10, x11, [sp, #16 * 5] | |
156 | stp x12, x13, [sp, #16 * 6] | |
157 | stp x14, x15, [sp, #16 * 7] | |
158 | stp x16, x17, [sp, #16 * 8] | |
159 | stp x18, x19, [sp, #16 * 9] | |
160 | stp x20, x21, [sp, #16 * 10] | |
161 | stp x22, x23, [sp, #16 * 11] | |
162 | stp x24, x25, [sp, #16 * 12] | |
163 | stp x26, x27, [sp, #16 * 13] | |
164 | stp x28, x29, [sp, #16 * 14] | |
165 | ||
60ffc30d | 166 | .if \el == 0 |
baaa7237 | 167 | clear_gp_regs |
60ffc30d | 168 | mrs x21, sp_el0 |
c02433dd MR |
169 | ldr_this_cpu tsk, __entry_task, x20 // Ensure MDSCR_EL1.SS is clear, |
170 | ldr x19, [tsk, #TSK_TI_FLAGS] // since we can unmask debug | |
2a283070 | 171 | disable_step_tsk x19, x20 // exceptions when scheduling. |
49003a8d | 172 | |
99ed3ed0 | 173 | apply_ssbd 1, x22, x23 |
8e290624 | 174 | |
60ffc30d CM |
175 | .else |
176 | add x21, sp, #S_FRAME_SIZE | |
4caf8758 | 177 | get_current_task tsk |
51369e39 | 178 | /* Save the task's original addr_limit and set USER_DS */ |
c02433dd | 179 | ldr x20, [tsk, #TSK_TI_ADDR_LIMIT] |
e19a6ee2 | 180 | str x20, [sp, #S_ORIG_ADDR_LIMIT] |
51369e39 | 181 | mov x20, #USER_DS |
c02433dd | 182 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
563cada0 | 183 | /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */ |
e19a6ee2 | 184 | .endif /* \el == 0 */ |
60ffc30d CM |
185 | mrs x22, elr_el1 |
186 | mrs x23, spsr_el1 | |
187 | stp lr, x21, [sp, #S_LR] | |
39bc88e5 | 188 | |
73267498 AB |
189 | /* |
190 | * In order to be able to dump the contents of struct pt_regs at the | |
191 | * time the exception was taken (in case we attempt to walk the call | |
192 | * stack later), chain it together with the stack frames. | |
193 | */ | |
194 | .if \el == 0 | |
195 | stp xzr, xzr, [sp, #S_STACKFRAME] | |
196 | .else | |
197 | stp x29, x22, [sp, #S_STACKFRAME] | |
198 | .endif | |
199 | add x29, sp, #S_STACKFRAME | |
200 | ||
39bc88e5 CM |
201 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
202 | /* | |
203 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | |
204 | * EL0, there is no need to check the state of TTBR0_EL1 since | |
205 | * accesses are always enabled. | |
206 | * Note that the meaning of this bit differs from the ARMv8.1 PAN | |
207 | * feature as all TTBR0_EL1 accesses are disabled, not just those to | |
208 | * user mappings. | |
209 | */ | |
210 | alternative_if ARM64_HAS_PAN | |
211 | b 1f // skip TTBR0 PAN | |
212 | alternative_else_nop_endif | |
213 | ||
214 | .if \el != 0 | |
215 | mrs x21, ttbr0_el1 | |
b519538d | 216 | tst x21, #TTBR_ASID_MASK // Check for the reserved ASID |
39bc88e5 CM |
217 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR |
218 | b.eq 1f // TTBR0 access already disabled | |
219 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | |
220 | .endif | |
221 | ||
222 | __uaccess_ttbr0_disable x21 | |
223 | 1: | |
224 | #endif | |
225 | ||
60ffc30d CM |
226 | stp x22, x23, [sp, #S_PC] |
227 | ||
17c28958 | 228 | /* Not in a syscall by default (el0_svc overwrites for real syscall) */ |
60ffc30d | 229 | .if \el == 0 |
17c28958 | 230 | mov w21, #NO_SYSCALL |
35d0e6fb | 231 | str w21, [sp, #S_SYSCALLNO] |
60ffc30d CM |
232 | .endif |
233 | ||
6cdf9c7c JL |
234 | /* |
235 | * Set sp_el0 to current thread_info. | |
236 | */ | |
237 | .if \el == 0 | |
238 | msr sp_el0, tsk | |
239 | .endif | |
240 | ||
133d0518 JT |
241 | /* Save pmr */ |
242 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING | |
243 | mrs_s x20, SYS_ICC_PMR_EL1 | |
244 | str x20, [sp, #S_PMR_SAVE] | |
245 | alternative_else_nop_endif | |
246 | ||
60ffc30d CM |
247 | /* |
248 | * Registers that may be useful after this macro is invoked: | |
249 | * | |
bd82d4bd | 250 | * x20 - ICC_PMR_EL1 |
60ffc30d CM |
251 | * x21 - aborted SP |
252 | * x22 - aborted PC | |
253 | * x23 - aborted PSTATE | |
254 | */ | |
255 | .endm | |
256 | ||
412fcb6c | 257 | .macro kernel_exit, el |
e19a6ee2 | 258 | .if \el != 0 |
8d66772e JM |
259 | disable_daif |
260 | ||
e19a6ee2 JM |
261 | /* Restore the task's original addr_limit. */ |
262 | ldr x20, [sp, #S_ORIG_ADDR_LIMIT] | |
c02433dd | 263 | str x20, [tsk, #TSK_TI_ADDR_LIMIT] |
e19a6ee2 JM |
264 | |
265 | /* No need to restore UAO, it will be restored from SPSR_EL1 */ | |
266 | .endif | |
267 | ||
133d0518 JT |
268 | /* Restore pmr */ |
269 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING | |
270 | ldr x20, [sp, #S_PMR_SAVE] | |
271 | msr_s SYS_ICC_PMR_EL1, x20 | |
272 | /* Ensure priority change is seen by redistributor */ | |
273 | dsb sy | |
274 | alternative_else_nop_endif | |
275 | ||
60ffc30d CM |
276 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
277 | .if \el == 0 | |
6c81fe79 | 278 | ct_user_enter |
39bc88e5 CM |
279 | .endif |
280 | ||
281 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
282 | /* | |
283 | * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR | |
284 | * PAN bit checking. | |
285 | */ | |
286 | alternative_if ARM64_HAS_PAN | |
287 | b 2f // skip TTBR0 PAN | |
288 | alternative_else_nop_endif | |
289 | ||
290 | .if \el != 0 | |
291 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | |
292 | .endif | |
293 | ||
27a921e7 | 294 | __uaccess_ttbr0_enable x0, x1 |
39bc88e5 CM |
295 | |
296 | .if \el == 0 | |
297 | /* | |
298 | * Enable errata workarounds only if returning to user. The only | |
299 | * workaround currently required for TTBR0_EL1 changes are for the | |
300 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | |
301 | * corruption). | |
302 | */ | |
95e3de35 | 303 | bl post_ttbr_update_workaround |
39bc88e5 CM |
304 | .endif |
305 | 1: | |
306 | .if \el != 0 | |
307 | and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit | |
308 | .endif | |
309 | 2: | |
310 | #endif | |
311 | ||
312 | .if \el == 0 | |
60ffc30d | 313 | ldr x23, [sp, #S_SP] // load return stack pointer |
63648dd2 | 314 | msr sp_el0, x23 |
4bf3286d WD |
315 | tst x22, #PSR_MODE32_BIT // native task? |
316 | b.eq 3f | |
317 | ||
905e8c5d | 318 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
6ba3b554 | 319 | alternative_if ARM64_WORKAROUND_845719 |
e28cabf1 DT |
320 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
321 | mrs x29, contextidr_el1 | |
322 | msr contextidr_el1, x29 | |
905e8c5d | 323 | #else |
e28cabf1 | 324 | msr contextidr_el1, xzr |
905e8c5d | 325 | #endif |
6ba3b554 | 326 | alternative_else_nop_endif |
905e8c5d | 327 | #endif |
4bf3286d | 328 | 3: |
a5325089 MZ |
329 | #ifdef CONFIG_ARM64_ERRATUM_1418040 |
330 | alternative_if_not ARM64_WORKAROUND_1418040 | |
0f80cad3 MZ |
331 | b 4f |
332 | alternative_else_nop_endif | |
333 | /* | |
334 | * if (x22.mode32 == cntkctl_el1.el0vcten) | |
335 | * cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten | |
336 | */ | |
337 | mrs x1, cntkctl_el1 | |
338 | eon x0, x1, x22, lsr #3 | |
339 | tbz x0, #1, 4f | |
340 | eor x1, x1, #2 // ARCH_TIMER_USR_VCT_ACCESS_EN | |
341 | msr cntkctl_el1, x1 | |
342 | 4: | |
343 | #endif | |
99ed3ed0 | 344 | apply_ssbd 0, x0, x1 |
60ffc30d | 345 | .endif |
39bc88e5 | 346 | |
63648dd2 WD |
347 | msr elr_el1, x21 // set up the return data |
348 | msr spsr_el1, x22 | |
63648dd2 | 349 | ldp x0, x1, [sp, #16 * 0] |
63648dd2 WD |
350 | ldp x2, x3, [sp, #16 * 1] |
351 | ldp x4, x5, [sp, #16 * 2] | |
352 | ldp x6, x7, [sp, #16 * 3] | |
353 | ldp x8, x9, [sp, #16 * 4] | |
354 | ldp x10, x11, [sp, #16 * 5] | |
355 | ldp x12, x13, [sp, #16 * 6] | |
356 | ldp x14, x15, [sp, #16 * 7] | |
357 | ldp x16, x17, [sp, #16 * 8] | |
358 | ldp x18, x19, [sp, #16 * 9] | |
359 | ldp x20, x21, [sp, #16 * 10] | |
360 | ldp x22, x23, [sp, #16 * 11] | |
361 | ldp x24, x25, [sp, #16 * 12] | |
362 | ldp x26, x27, [sp, #16 * 13] | |
363 | ldp x28, x29, [sp, #16 * 14] | |
364 | ldr lr, [sp, #S_LR] | |
365 | add sp, sp, #S_FRAME_SIZE // restore sp | |
4bf3286d | 366 | |
4bf3286d | 367 | .if \el == 0 |
ea1e3de8 WD |
368 | alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0 |
369 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
0f80cad3 | 370 | bne 5f |
4bf3286d WD |
371 | msr far_el1, x30 |
372 | tramp_alias x30, tramp_exit_native | |
373 | br x30 | |
0f80cad3 | 374 | 5: |
4bf3286d WD |
375 | tramp_alias x30, tramp_exit_compat |
376 | br x30 | |
ea1e3de8 | 377 | #endif |
4bf3286d WD |
378 | .else |
379 | eret | |
380 | .endif | |
679db708 | 381 | sb |
60ffc30d CM |
382 | .endm |
383 | ||
971c67ce | 384 | .macro irq_stack_entry |
8e23dacd JM |
385 | mov x19, sp // preserve the original sp |
386 | ||
8e23dacd | 387 | /* |
c02433dd MR |
388 | * Compare sp with the base of the task stack. |
389 | * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack, | |
390 | * and should switch to the irq stack. | |
8e23dacd | 391 | */ |
c02433dd MR |
392 | ldr x25, [tsk, TSK_STACK] |
393 | eor x25, x25, x19 | |
394 | and x25, x25, #~(THREAD_SIZE - 1) | |
395 | cbnz x25, 9998f | |
8e23dacd | 396 | |
f60fe78f | 397 | ldr_this_cpu x25, irq_stack_ptr, x26 |
34be98f4 | 398 | mov x26, #IRQ_STACK_SIZE |
8e23dacd | 399 | add x26, x25, x26 |
d224a69e JM |
400 | |
401 | /* switch to the irq stack */ | |
8e23dacd | 402 | mov sp, x26 |
8e23dacd JM |
403 | 9998: |
404 | .endm | |
405 | ||
406 | /* | |
407 | * x19 should be preserved between irq_stack_entry and | |
408 | * irq_stack_exit. | |
409 | */ | |
410 | .macro irq_stack_exit | |
411 | mov sp, x19 | |
412 | .endm | |
413 | ||
8c2c596f | 414 | /* GPRs used by entry code */ |
60ffc30d CM |
415 | tsk .req x28 // current thread_info |
416 | ||
417 | /* | |
418 | * Interrupt handling. | |
419 | */ | |
420 | .macro irq_handler | |
8e23dacd | 421 | ldr_l x1, handle_arch_irq |
60ffc30d | 422 | mov x0, sp |
971c67ce | 423 | irq_stack_entry |
60ffc30d | 424 | blr x1 |
8e23dacd | 425 | irq_stack_exit |
60ffc30d CM |
426 | .endm |
427 | ||
17ce302f JT |
428 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
429 | /* | |
430 | * Set res to 0 if irqs were unmasked in interrupted context. | |
431 | * Otherwise set res to non-0 value. | |
432 | */ | |
433 | .macro test_irqs_unmasked res:req, pmr:req | |
434 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING | |
435 | sub \res, \pmr, #GIC_PRIO_IRQON | |
436 | alternative_else | |
437 | mov \res, xzr | |
438 | alternative_endif | |
439 | .endm | |
440 | #endif | |
441 | ||
bd82d4bd JT |
442 | .macro gic_prio_kentry_setup, tmp:req |
443 | #ifdef CONFIG_ARM64_PSEUDO_NMI | |
444 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING | |
445 | mov \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON) | |
446 | msr_s SYS_ICC_PMR_EL1, \tmp | |
447 | alternative_else_nop_endif | |
448 | #endif | |
449 | .endm | |
450 | ||
451 | .macro gic_prio_irq_setup, pmr:req, tmp:req | |
452 | #ifdef CONFIG_ARM64_PSEUDO_NMI | |
453 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING | |
454 | orr \tmp, \pmr, #GIC_PRIO_PSR_I_SET | |
455 | msr_s SYS_ICC_PMR_EL1, \tmp | |
456 | alternative_else_nop_endif | |
457 | #endif | |
458 | .endm | |
459 | ||
60ffc30d CM |
460 | .text |
461 | ||
462 | /* | |
463 | * Exception vectors. | |
464 | */ | |
888b3c87 | 465 | .pushsection ".entry.text", "ax" |
60ffc30d CM |
466 | |
467 | .align 11 | |
468 | ENTRY(vectors) | |
5b1f7fe4 WD |
469 | kernel_ventry 1, sync_invalid // Synchronous EL1t |
470 | kernel_ventry 1, irq_invalid // IRQ EL1t | |
471 | kernel_ventry 1, fiq_invalid // FIQ EL1t | |
472 | kernel_ventry 1, error_invalid // Error EL1t | |
60ffc30d | 473 | |
5b1f7fe4 WD |
474 | kernel_ventry 1, sync // Synchronous EL1h |
475 | kernel_ventry 1, irq // IRQ EL1h | |
476 | kernel_ventry 1, fiq_invalid // FIQ EL1h | |
477 | kernel_ventry 1, error // Error EL1h | |
60ffc30d | 478 | |
5b1f7fe4 WD |
479 | kernel_ventry 0, sync // Synchronous 64-bit EL0 |
480 | kernel_ventry 0, irq // IRQ 64-bit EL0 | |
481 | kernel_ventry 0, fiq_invalid // FIQ 64-bit EL0 | |
482 | kernel_ventry 0, error // Error 64-bit EL0 | |
60ffc30d CM |
483 | |
484 | #ifdef CONFIG_COMPAT | |
5b1f7fe4 WD |
485 | kernel_ventry 0, sync_compat, 32 // Synchronous 32-bit EL0 |
486 | kernel_ventry 0, irq_compat, 32 // IRQ 32-bit EL0 | |
487 | kernel_ventry 0, fiq_invalid_compat, 32 // FIQ 32-bit EL0 | |
488 | kernel_ventry 0, error_compat, 32 // Error 32-bit EL0 | |
60ffc30d | 489 | #else |
5b1f7fe4 WD |
490 | kernel_ventry 0, sync_invalid, 32 // Synchronous 32-bit EL0 |
491 | kernel_ventry 0, irq_invalid, 32 // IRQ 32-bit EL0 | |
492 | kernel_ventry 0, fiq_invalid, 32 // FIQ 32-bit EL0 | |
493 | kernel_ventry 0, error_invalid, 32 // Error 32-bit EL0 | |
60ffc30d CM |
494 | #endif |
495 | END(vectors) | |
496 | ||
872d8327 MR |
497 | #ifdef CONFIG_VMAP_STACK |
498 | /* | |
499 | * We detected an overflow in kernel_ventry, which switched to the | |
500 | * overflow stack. Stash the exception regs, and head to our overflow | |
501 | * handler. | |
502 | */ | |
503 | __bad_stack: | |
504 | /* Restore the original x0 value */ | |
505 | mrs x0, tpidrro_el0 | |
506 | ||
507 | /* | |
508 | * Store the original GPRs to the new stack. The orginal SP (minus | |
509 | * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry. | |
510 | */ | |
511 | sub sp, sp, #S_FRAME_SIZE | |
512 | kernel_entry 1 | |
513 | mrs x0, tpidr_el0 | |
514 | add x0, x0, #S_FRAME_SIZE | |
515 | str x0, [sp, #S_SP] | |
516 | ||
517 | /* Stash the regs for handle_bad_stack */ | |
518 | mov x0, sp | |
519 | ||
520 | /* Time to die */ | |
521 | bl handle_bad_stack | |
522 | ASM_BUG() | |
523 | #endif /* CONFIG_VMAP_STACK */ | |
524 | ||
60ffc30d CM |
525 | /* |
526 | * Invalid mode handlers | |
527 | */ | |
528 | .macro inv_entry, el, reason, regsize = 64 | |
b660950c | 529 | kernel_entry \el, \regsize |
60ffc30d CM |
530 | mov x0, sp |
531 | mov x1, #\reason | |
532 | mrs x2, esr_el1 | |
2d0e751a MR |
533 | bl bad_mode |
534 | ASM_BUG() | |
60ffc30d CM |
535 | .endm |
536 | ||
537 | el0_sync_invalid: | |
538 | inv_entry 0, BAD_SYNC | |
539 | ENDPROC(el0_sync_invalid) | |
540 | ||
541 | el0_irq_invalid: | |
542 | inv_entry 0, BAD_IRQ | |
543 | ENDPROC(el0_irq_invalid) | |
544 | ||
545 | el0_fiq_invalid: | |
546 | inv_entry 0, BAD_FIQ | |
547 | ENDPROC(el0_fiq_invalid) | |
548 | ||
549 | el0_error_invalid: | |
550 | inv_entry 0, BAD_ERROR | |
551 | ENDPROC(el0_error_invalid) | |
552 | ||
553 | #ifdef CONFIG_COMPAT | |
554 | el0_fiq_invalid_compat: | |
555 | inv_entry 0, BAD_FIQ, 32 | |
556 | ENDPROC(el0_fiq_invalid_compat) | |
60ffc30d CM |
557 | #endif |
558 | ||
559 | el1_sync_invalid: | |
560 | inv_entry 1, BAD_SYNC | |
561 | ENDPROC(el1_sync_invalid) | |
562 | ||
563 | el1_irq_invalid: | |
564 | inv_entry 1, BAD_IRQ | |
565 | ENDPROC(el1_irq_invalid) | |
566 | ||
567 | el1_fiq_invalid: | |
568 | inv_entry 1, BAD_FIQ | |
569 | ENDPROC(el1_fiq_invalid) | |
570 | ||
571 | el1_error_invalid: | |
572 | inv_entry 1, BAD_ERROR | |
573 | ENDPROC(el1_error_invalid) | |
574 | ||
575 | /* | |
576 | * EL1 mode handlers. | |
577 | */ | |
578 | .align 6 | |
579 | el1_sync: | |
580 | kernel_entry 1 | |
581 | mrs x1, esr_el1 // read the syndrome register | |
aed40e01 MR |
582 | lsr x24, x1, #ESR_ELx_EC_SHIFT // exception class |
583 | cmp x24, #ESR_ELx_EC_DABT_CUR // data abort in EL1 | |
60ffc30d | 584 | b.eq el1_da |
9adeb8e7 LA |
585 | cmp x24, #ESR_ELx_EC_IABT_CUR // instruction abort in EL1 |
586 | b.eq el1_ia | |
aed40e01 | 587 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
60ffc30d | 588 | b.eq el1_undef |
aed40e01 | 589 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
40ca0ce5 | 590 | b.eq el1_pc |
aed40e01 | 591 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL1 |
60ffc30d | 592 | b.eq el1_undef |
aed40e01 | 593 | cmp x24, #ESR_ELx_EC_BREAKPT_CUR // debug exception in EL1 |
60ffc30d CM |
594 | b.ge el1_dbg |
595 | b el1_inv | |
9adeb8e7 LA |
596 | |
597 | el1_ia: | |
598 | /* | |
599 | * Fall through to the Data abort case | |
600 | */ | |
60ffc30d CM |
601 | el1_da: |
602 | /* | |
603 | * Data abort handling | |
604 | */ | |
276e9327 | 605 | mrs x3, far_el1 |
b55a5a1b | 606 | inherit_daif pstate=x23, tmp=x2 |
276e9327 | 607 | clear_address_tag x0, x3 |
60ffc30d CM |
608 | mov x2, sp // struct pt_regs |
609 | bl do_mem_abort | |
610 | ||
60ffc30d | 611 | kernel_exit 1 |
40ca0ce5 | 612 | el1_pc: |
60ffc30d | 613 | /* |
40ca0ce5 JM |
614 | * PC alignment exception handling. We don't handle SP alignment faults, |
615 | * since we will have hit a recursive exception when trying to push the | |
616 | * initial pt_regs. | |
60ffc30d CM |
617 | */ |
618 | mrs x0, far_el1 | |
b55a5a1b | 619 | inherit_daif pstate=x23, tmp=x2 |
60ffc30d | 620 | mov x2, sp |
2d0e751a MR |
621 | bl do_sp_pc_abort |
622 | ASM_BUG() | |
60ffc30d CM |
623 | el1_undef: |
624 | /* | |
625 | * Undefined instruction | |
626 | */ | |
b55a5a1b | 627 | inherit_daif pstate=x23, tmp=x2 |
60ffc30d | 628 | mov x0, sp |
2d0e751a | 629 | bl do_undefinstr |
0bf0f444 | 630 | kernel_exit 1 |
60ffc30d CM |
631 | el1_dbg: |
632 | /* | |
633 | * Debug exception handling | |
634 | */ | |
aed40e01 | 635 | cmp x24, #ESR_ELx_EC_BRK64 // if BRK64 |
ee6214ce | 636 | cinc x24, x24, eq // set bit '0' |
60ffc30d | 637 | tbz x24, #0, el1_inv // EL1 only |
bd82d4bd | 638 | gic_prio_kentry_setup tmp=x3 |
60ffc30d CM |
639 | mrs x0, far_el1 |
640 | mov x2, sp // struct pt_regs | |
641 | bl do_debug_exception | |
60ffc30d CM |
642 | kernel_exit 1 |
643 | el1_inv: | |
644 | // TODO: add support for undefined instructions in kernel mode | |
b55a5a1b | 645 | inherit_daif pstate=x23, tmp=x2 |
60ffc30d | 646 | mov x0, sp |
1b42804d | 647 | mov x2, x1 |
60ffc30d | 648 | mov x1, #BAD_SYNC |
2d0e751a MR |
649 | bl bad_mode |
650 | ASM_BUG() | |
60ffc30d CM |
651 | ENDPROC(el1_sync) |
652 | ||
653 | .align 6 | |
654 | el1_irq: | |
655 | kernel_entry 1 | |
bd82d4bd | 656 | gic_prio_irq_setup pmr=x20, tmp=x1 |
b282e1ce | 657 | enable_da_f |
17ce302f | 658 | |
c25349fd | 659 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
17ce302f JT |
660 | test_irqs_unmasked res=x0, pmr=x20 |
661 | cbz x0, 1f | |
662 | bl asm_nmi_enter | |
663 | 1: | |
c25349fd | 664 | #endif |
17ce302f JT |
665 | |
666 | #ifdef CONFIG_TRACE_IRQFLAGS | |
60ffc30d CM |
667 | bl trace_hardirqs_off |
668 | #endif | |
64681787 | 669 | |
60ffc30d | 670 | irq_handler |
64681787 | 671 | |
60ffc30d | 672 | #ifdef CONFIG_PREEMPT |
7faa313f | 673 | ldr x24, [tsk, #TSK_TI_PREEMPT] // get preempt count |
1234ad68 JT |
674 | alternative_if ARM64_HAS_IRQ_PRIO_MASKING |
675 | /* | |
676 | * DA_F were cleared at start of handling. If anything is set in DAIF, | |
677 | * we come back from an NMI, so skip preemption | |
678 | */ | |
679 | mrs x0, daif | |
680 | orr x24, x24, x0 | |
681 | alternative_else_nop_endif | |
682 | cbnz x24, 1f // preempt count != 0 || NMI return path | |
8aa67d18 | 683 | bl preempt_schedule_irq // irq en/disable is done inside |
60ffc30d CM |
684 | 1: |
685 | #endif | |
17ce302f | 686 | |
c25349fd JT |
687 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
688 | /* | |
bd82d4bd JT |
689 | * When using IRQ priority masking, we can get spurious interrupts while |
690 | * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a | |
691 | * section with interrupts disabled. Skip tracing in those cases. | |
c25349fd | 692 | */ |
17ce302f JT |
693 | test_irqs_unmasked res=x0, pmr=x20 |
694 | cbz x0, 1f | |
695 | bl asm_nmi_exit | |
696 | 1: | |
697 | #endif | |
698 | ||
699 | #ifdef CONFIG_TRACE_IRQFLAGS | |
700 | #ifdef CONFIG_ARM64_PSEUDO_NMI | |
701 | test_irqs_unmasked res=x0, pmr=x20 | |
702 | cbnz x0, 1f | |
c25349fd | 703 | #endif |
60ffc30d | 704 | bl trace_hardirqs_on |
c25349fd | 705 | 1: |
60ffc30d | 706 | #endif |
c25349fd | 707 | |
60ffc30d CM |
708 | kernel_exit 1 |
709 | ENDPROC(el1_irq) | |
710 | ||
60ffc30d CM |
711 | /* |
712 | * EL0 mode handlers. | |
713 | */ | |
714 | .align 6 | |
715 | el0_sync: | |
716 | kernel_entry 0 | |
717 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
718 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
719 | cmp x24, #ESR_ELx_EC_SVC64 // SVC in 64-bit state | |
60ffc30d | 720 | b.eq el0_svc |
aed40e01 | 721 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 722 | b.eq el0_da |
aed40e01 | 723 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 724 | b.eq el0_ia |
aed40e01 | 725 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 726 | b.eq el0_fpsimd_acc |
bc0ee476 DM |
727 | cmp x24, #ESR_ELx_EC_SVE // SVE access |
728 | b.eq el0_sve_acc | |
aed40e01 | 729 | cmp x24, #ESR_ELx_EC_FP_EXC64 // FP/ASIMD exception |
60ffc30d | 730 | b.eq el0_fpsimd_exc |
aed40e01 | 731 | cmp x24, #ESR_ELx_EC_SYS64 // configurable trap |
c219bc4e | 732 | ccmp x24, #ESR_ELx_EC_WFx, #4, ne |
7dd01aef | 733 | b.eq el0_sys |
aed40e01 | 734 | cmp x24, #ESR_ELx_EC_SP_ALIGN // stack alignment exception |
40ca0ce5 | 735 | b.eq el0_sp |
aed40e01 | 736 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
40ca0ce5 | 737 | b.eq el0_pc |
aed40e01 | 738 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 739 | b.eq el0_undef |
aed40e01 | 740 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
741 | b.ge el0_dbg |
742 | b el0_inv | |
743 | ||
744 | #ifdef CONFIG_COMPAT | |
745 | .align 6 | |
746 | el0_sync_compat: | |
747 | kernel_entry 0, 32 | |
748 | mrs x25, esr_el1 // read the syndrome register | |
aed40e01 MR |
749 | lsr x24, x25, #ESR_ELx_EC_SHIFT // exception class |
750 | cmp x24, #ESR_ELx_EC_SVC32 // SVC in 32-bit state | |
60ffc30d | 751 | b.eq el0_svc_compat |
aed40e01 | 752 | cmp x24, #ESR_ELx_EC_DABT_LOW // data abort in EL0 |
60ffc30d | 753 | b.eq el0_da |
aed40e01 | 754 | cmp x24, #ESR_ELx_EC_IABT_LOW // instruction abort in EL0 |
60ffc30d | 755 | b.eq el0_ia |
aed40e01 | 756 | cmp x24, #ESR_ELx_EC_FP_ASIMD // FP/ASIMD access |
60ffc30d | 757 | b.eq el0_fpsimd_acc |
aed40e01 | 758 | cmp x24, #ESR_ELx_EC_FP_EXC32 // FP/ASIMD exception |
60ffc30d | 759 | b.eq el0_fpsimd_exc |
77f3228f | 760 | cmp x24, #ESR_ELx_EC_PC_ALIGN // pc alignment exception |
40ca0ce5 | 761 | b.eq el0_pc |
aed40e01 | 762 | cmp x24, #ESR_ELx_EC_UNKNOWN // unknown exception in EL0 |
60ffc30d | 763 | b.eq el0_undef |
aed40e01 | 764 | cmp x24, #ESR_ELx_EC_CP15_32 // CP15 MRC/MCR trap |
70c63cdf | 765 | b.eq el0_cp15 |
aed40e01 | 766 | cmp x24, #ESR_ELx_EC_CP15_64 // CP15 MRRC/MCRR trap |
70c63cdf | 767 | b.eq el0_cp15 |
aed40e01 | 768 | cmp x24, #ESR_ELx_EC_CP14_MR // CP14 MRC/MCR trap |
381cc2b9 | 769 | b.eq el0_undef |
aed40e01 | 770 | cmp x24, #ESR_ELx_EC_CP14_LS // CP14 LDC/STC trap |
381cc2b9 | 771 | b.eq el0_undef |
aed40e01 | 772 | cmp x24, #ESR_ELx_EC_CP14_64 // CP14 MRRC/MCRR trap |
381cc2b9 | 773 | b.eq el0_undef |
aed40e01 | 774 | cmp x24, #ESR_ELx_EC_BREAKPT_LOW // debug exception in EL0 |
60ffc30d CM |
775 | b.ge el0_dbg |
776 | b el0_inv | |
777 | el0_svc_compat: | |
3b714275 MR |
778 | mov x0, sp |
779 | bl el0_svc_compat_handler | |
780 | b ret_to_user | |
60ffc30d CM |
781 | |
782 | .align 6 | |
783 | el0_irq_compat: | |
784 | kernel_entry 0, 32 | |
785 | b el0_irq_naked | |
a92d4d14 XX |
786 | |
787 | el0_error_compat: | |
788 | kernel_entry 0, 32 | |
789 | b el0_error_naked | |
70c63cdf MZ |
790 | |
791 | el0_cp15: | |
792 | /* | |
793 | * Trapped CP15 (MRC, MCR, MRRC, MCRR) instructions | |
794 | */ | |
795 | enable_daif | |
796 | ct_user_exit | |
797 | mov x0, x25 | |
798 | mov x1, sp | |
799 | bl do_cp15instr | |
800 | b ret_to_user | |
60ffc30d CM |
801 | #endif |
802 | ||
803 | el0_da: | |
804 | /* | |
805 | * Data abort handling | |
806 | */ | |
6ab6463a | 807 | mrs x26, far_el1 |
746647c7 | 808 | enable_daif |
6c81fe79 | 809 | ct_user_exit |
276e9327 | 810 | clear_address_tag x0, x26 |
60ffc30d CM |
811 | mov x1, x25 |
812 | mov x2, sp | |
d54e81f9 WD |
813 | bl do_mem_abort |
814 | b ret_to_user | |
60ffc30d CM |
815 | el0_ia: |
816 | /* | |
817 | * Instruction abort handling | |
818 | */ | |
6ab6463a | 819 | mrs x26, far_el1 |
bd82d4bd | 820 | gic_prio_kentry_setup tmp=x0 |
0f15adbb WD |
821 | enable_da_f |
822 | #ifdef CONFIG_TRACE_IRQFLAGS | |
823 | bl trace_hardirqs_off | |
824 | #endif | |
6c81fe79 | 825 | ct_user_exit |
6ab6463a | 826 | mov x0, x26 |
541ec870 | 827 | mov x1, x25 |
60ffc30d | 828 | mov x2, sp |
0f15adbb | 829 | bl do_el0_ia_bp_hardening |
d54e81f9 | 830 | b ret_to_user |
60ffc30d CM |
831 | el0_fpsimd_acc: |
832 | /* | |
833 | * Floating Point or Advanced SIMD access | |
834 | */ | |
746647c7 | 835 | enable_daif |
6c81fe79 | 836 | ct_user_exit |
60ffc30d CM |
837 | mov x0, x25 |
838 | mov x1, sp | |
d54e81f9 WD |
839 | bl do_fpsimd_acc |
840 | b ret_to_user | |
bc0ee476 DM |
841 | el0_sve_acc: |
842 | /* | |
843 | * Scalable Vector Extension access | |
844 | */ | |
845 | enable_daif | |
846 | ct_user_exit | |
847 | mov x0, x25 | |
848 | mov x1, sp | |
849 | bl do_sve_acc | |
850 | b ret_to_user | |
60ffc30d CM |
851 | el0_fpsimd_exc: |
852 | /* | |
bc0ee476 | 853 | * Floating Point, Advanced SIMD or SVE exception |
60ffc30d | 854 | */ |
746647c7 | 855 | enable_daif |
6c81fe79 | 856 | ct_user_exit |
60ffc30d CM |
857 | mov x0, x25 |
858 | mov x1, sp | |
d54e81f9 WD |
859 | bl do_fpsimd_exc |
860 | b ret_to_user | |
40ca0ce5 JM |
861 | el0_sp: |
862 | ldr x26, [sp, #S_SP] | |
863 | b el0_sp_pc | |
864 | el0_pc: | |
865 | mrs x26, far_el1 | |
60ffc30d CM |
866 | el0_sp_pc: |
867 | /* | |
868 | * Stack or PC alignment exception handling | |
869 | */ | |
bd82d4bd | 870 | gic_prio_kentry_setup tmp=x0 |
5dfc6ed2 WD |
871 | enable_da_f |
872 | #ifdef CONFIG_TRACE_IRQFLAGS | |
873 | bl trace_hardirqs_off | |
874 | #endif | |
46b0567c | 875 | ct_user_exit |
6ab6463a | 876 | mov x0, x26 |
60ffc30d CM |
877 | mov x1, x25 |
878 | mov x2, sp | |
d54e81f9 WD |
879 | bl do_sp_pc_abort |
880 | b ret_to_user | |
60ffc30d CM |
881 | el0_undef: |
882 | /* | |
883 | * Undefined instruction | |
884 | */ | |
746647c7 | 885 | enable_daif |
6c81fe79 | 886 | ct_user_exit |
2a283070 | 887 | mov x0, sp |
d54e81f9 WD |
888 | bl do_undefinstr |
889 | b ret_to_user | |
7dd01aef AP |
890 | el0_sys: |
891 | /* | |
892 | * System instructions, for trapped cache maintenance instructions | |
893 | */ | |
746647c7 | 894 | enable_daif |
7dd01aef AP |
895 | ct_user_exit |
896 | mov x0, x25 | |
897 | mov x1, sp | |
898 | bl do_sysinstr | |
899 | b ret_to_user | |
60ffc30d CM |
900 | el0_dbg: |
901 | /* | |
902 | * Debug exception handling | |
903 | */ | |
904 | tbnz x24, #0, el0_inv // EL0 only | |
bd82d4bd | 905 | gic_prio_kentry_setup tmp=x3 |
60ffc30d | 906 | mrs x0, far_el1 |
60ffc30d CM |
907 | mov x1, x25 |
908 | mov x2, sp | |
2a283070 | 909 | bl do_debug_exception |
9034f625 | 910 | enable_da_f |
6c81fe79 | 911 | ct_user_exit |
2a283070 | 912 | b ret_to_user |
60ffc30d | 913 | el0_inv: |
746647c7 | 914 | enable_daif |
6c81fe79 | 915 | ct_user_exit |
60ffc30d CM |
916 | mov x0, sp |
917 | mov x1, #BAD_SYNC | |
1b42804d | 918 | mov x2, x25 |
7d9e8f71 | 919 | bl bad_el0_sync |
d54e81f9 | 920 | b ret_to_user |
60ffc30d CM |
921 | ENDPROC(el0_sync) |
922 | ||
923 | .align 6 | |
924 | el0_irq: | |
925 | kernel_entry 0 | |
926 | el0_irq_naked: | |
bd82d4bd | 927 | gic_prio_irq_setup pmr=x20, tmp=x0 |
b282e1ce | 928 | enable_da_f |
bd82d4bd | 929 | |
60ffc30d CM |
930 | #ifdef CONFIG_TRACE_IRQFLAGS |
931 | bl trace_hardirqs_off | |
932 | #endif | |
64681787 | 933 | |
6c81fe79 | 934 | ct_user_exit |
30d88c0e WD |
935 | #ifdef CONFIG_HARDEN_BRANCH_PREDICTOR |
936 | tbz x22, #55, 1f | |
937 | bl do_el0_irq_bp_hardening | |
938 | 1: | |
939 | #endif | |
60ffc30d | 940 | irq_handler |
64681787 | 941 | |
60ffc30d CM |
942 | #ifdef CONFIG_TRACE_IRQFLAGS |
943 | bl trace_hardirqs_on | |
944 | #endif | |
945 | b ret_to_user | |
946 | ENDPROC(el0_irq) | |
947 | ||
a92d4d14 XX |
948 | el1_error: |
949 | kernel_entry 1 | |
950 | mrs x1, esr_el1 | |
bd82d4bd | 951 | gic_prio_kentry_setup tmp=x2 |
a92d4d14 XX |
952 | enable_dbg |
953 | mov x0, sp | |
954 | bl do_serror | |
955 | kernel_exit 1 | |
956 | ENDPROC(el1_error) | |
957 | ||
958 | el0_error: | |
959 | kernel_entry 0 | |
960 | el0_error_naked: | |
961 | mrs x1, esr_el1 | |
bd82d4bd | 962 | gic_prio_kentry_setup tmp=x2 |
a92d4d14 XX |
963 | enable_dbg |
964 | mov x0, sp | |
965 | bl do_serror | |
9034f625 | 966 | enable_da_f |
a92d4d14 XX |
967 | ct_user_exit |
968 | b ret_to_user | |
969 | ENDPROC(el0_error) | |
970 | ||
60ffc30d CM |
971 | /* |
972 | * Ok, we need to do extra processing, enter the slow path. | |
973 | */ | |
60ffc30d | 974 | work_pending: |
60ffc30d | 975 | mov x0, sp // 'regs' |
60ffc30d | 976 | bl do_notify_resume |
db3899a6 | 977 | #ifdef CONFIG_TRACE_IRQFLAGS |
421dd6fa | 978 | bl trace_hardirqs_on // enabled while in userspace |
db3899a6 | 979 | #endif |
c02433dd | 980 | ldr x1, [tsk, #TSK_TI_FLAGS] // re-check for single-step |
421dd6fa | 981 | b finish_ret_to_user |
60ffc30d CM |
982 | /* |
983 | * "slow" syscall return path. | |
984 | */ | |
59dc67b0 | 985 | ret_to_user: |
8d66772e | 986 | disable_daif |
bd82d4bd | 987 | gic_prio_kentry_setup tmp=x3 |
c02433dd | 988 | ldr x1, [tsk, #TSK_TI_FLAGS] |
60ffc30d CM |
989 | and x2, x1, #_TIF_WORK_MASK |
990 | cbnz x2, work_pending | |
421dd6fa | 991 | finish_ret_to_user: |
2a283070 | 992 | enable_step_tsk x1, x2 |
0b3e3366 LA |
993 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
994 | bl stackleak_erase | |
995 | #endif | |
412fcb6c | 996 | kernel_exit 0 |
60ffc30d CM |
997 | ENDPROC(ret_to_user) |
998 | ||
60ffc30d CM |
999 | /* |
1000 | * SVC handler. | |
1001 | */ | |
1002 | .align 6 | |
1003 | el0_svc: | |
bd82d4bd | 1004 | gic_prio_kentry_setup tmp=x1 |
60ffc30d | 1005 | mov x0, sp |
3b714275 | 1006 | bl el0_svc_handler |
60ffc30d | 1007 | b ret_to_user |
f37099b6 | 1008 | ENDPROC(el0_svc) |
60ffc30d | 1009 | |
888b3c87 PA |
1010 | .popsection // .entry.text |
1011 | ||
c7b9adaf WD |
1012 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
1013 | /* | |
1014 | * Exception vectors trampoline. | |
1015 | */ | |
1016 | .pushsection ".entry.tramp.text", "ax" | |
1017 | ||
1018 | .macro tramp_map_kernel, tmp | |
1019 | mrs \tmp, ttbr1_el1 | |
1e1b8c04 | 1020 | add \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) |
c7b9adaf WD |
1021 | bic \tmp, \tmp, #USER_ASID_FLAG |
1022 | msr ttbr1_el1, \tmp | |
d1777e68 WD |
1023 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
1024 | alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 | |
1025 | /* ASID already in \tmp[63:48] */ | |
1026 | movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) | |
1027 | movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) | |
1028 | /* 2MB boundary containing the vectors, so we nobble the walk cache */ | |
1029 | movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) | |
1030 | isb | |
1031 | tlbi vae1, \tmp | |
1032 | dsb nsh | |
1033 | alternative_else_nop_endif | |
1034 | #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ | |
c7b9adaf WD |
1035 | .endm |
1036 | ||
1037 | .macro tramp_unmap_kernel, tmp | |
1038 | mrs \tmp, ttbr1_el1 | |
1e1b8c04 | 1039 | sub \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE) |
c7b9adaf WD |
1040 | orr \tmp, \tmp, #USER_ASID_FLAG |
1041 | msr ttbr1_el1, \tmp | |
1042 | /* | |
f167211a WD |
1043 | * We avoid running the post_ttbr_update_workaround here because |
1044 | * it's only needed by Cavium ThunderX, which requires KPTI to be | |
1045 | * disabled. | |
c7b9adaf WD |
1046 | */ |
1047 | .endm | |
1048 | ||
1049 | .macro tramp_ventry, regsize = 64 | |
1050 | .align 7 | |
1051 | 1: | |
1052 | .if \regsize == 64 | |
1053 | msr tpidrro_el0, x30 // Restored in kernel_ventry | |
1054 | .endif | |
be04a6d1 WD |
1055 | /* |
1056 | * Defend against branch aliasing attacks by pushing a dummy | |
1057 | * entry onto the return stack and using a RET instruction to | |
1058 | * enter the full-fat kernel vectors. | |
1059 | */ | |
1060 | bl 2f | |
1061 | b . | |
1062 | 2: | |
c7b9adaf | 1063 | tramp_map_kernel x30 |
6c27c408 WD |
1064 | #ifdef CONFIG_RANDOMIZE_BASE |
1065 | adr x30, tramp_vectors + PAGE_SIZE | |
1066 | alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 | |
1067 | ldr x30, [x30] | |
1068 | #else | |
c7b9adaf | 1069 | ldr x30, =vectors |
6c27c408 | 1070 | #endif |
c7b9adaf WD |
1071 | prfm plil1strm, [x30, #(1b - tramp_vectors)] |
1072 | msr vbar_el1, x30 | |
1073 | add x30, x30, #(1b - tramp_vectors) | |
1074 | isb | |
be04a6d1 | 1075 | ret |
c7b9adaf WD |
1076 | .endm |
1077 | ||
1078 | .macro tramp_exit, regsize = 64 | |
1079 | adr x30, tramp_vectors | |
1080 | msr vbar_el1, x30 | |
1081 | tramp_unmap_kernel x30 | |
1082 | .if \regsize == 64 | |
1083 | mrs x30, far_el1 | |
1084 | .endif | |
1085 | eret | |
679db708 | 1086 | sb |
c7b9adaf WD |
1087 | .endm |
1088 | ||
1089 | .align 11 | |
1090 | ENTRY(tramp_vectors) | |
1091 | .space 0x400 | |
1092 | ||
1093 | tramp_ventry | |
1094 | tramp_ventry | |
1095 | tramp_ventry | |
1096 | tramp_ventry | |
1097 | ||
1098 | tramp_ventry 32 | |
1099 | tramp_ventry 32 | |
1100 | tramp_ventry 32 | |
1101 | tramp_ventry 32 | |
1102 | END(tramp_vectors) | |
1103 | ||
1104 | ENTRY(tramp_exit_native) | |
1105 | tramp_exit | |
1106 | END(tramp_exit_native) | |
1107 | ||
1108 | ENTRY(tramp_exit_compat) | |
1109 | tramp_exit 32 | |
1110 | END(tramp_exit_compat) | |
1111 | ||
1112 | .ltorg | |
1113 | .popsection // .entry.tramp.text | |
6c27c408 WD |
1114 | #ifdef CONFIG_RANDOMIZE_BASE |
1115 | .pushsection ".rodata", "a" | |
1116 | .align PAGE_SHIFT | |
1117 | .globl __entry_tramp_data_start | |
1118 | __entry_tramp_data_start: | |
1119 | .quad vectors | |
1120 | .popsection // .rodata | |
1121 | #endif /* CONFIG_RANDOMIZE_BASE */ | |
c7b9adaf WD |
1122 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
1123 | ||
ed84b4e9 MR |
1124 | /* |
1125 | * Register switch for AArch64. The callee-saved registers need to be saved | |
1126 | * and restored. On entry: | |
1127 | * x0 = previous task_struct (must be preserved across the switch) | |
1128 | * x1 = next task_struct | |
1129 | * Previous and next are guaranteed not to be the same. | |
1130 | * | |
1131 | */ | |
1132 | ENTRY(cpu_switch_to) | |
1133 | mov x10, #THREAD_CPU_CONTEXT | |
1134 | add x8, x0, x10 | |
1135 | mov x9, sp | |
1136 | stp x19, x20, [x8], #16 // store callee-saved registers | |
1137 | stp x21, x22, [x8], #16 | |
1138 | stp x23, x24, [x8], #16 | |
1139 | stp x25, x26, [x8], #16 | |
1140 | stp x27, x28, [x8], #16 | |
1141 | stp x29, x9, [x8], #16 | |
1142 | str lr, [x8] | |
1143 | add x8, x1, x10 | |
1144 | ldp x19, x20, [x8], #16 // restore callee-saved registers | |
1145 | ldp x21, x22, [x8], #16 | |
1146 | ldp x23, x24, [x8], #16 | |
1147 | ldp x25, x26, [x8], #16 | |
1148 | ldp x27, x28, [x8], #16 | |
1149 | ldp x29, x9, [x8], #16 | |
1150 | ldr lr, [x8] | |
1151 | mov sp, x9 | |
1152 | msr sp_el0, x1 | |
1153 | ret | |
1154 | ENDPROC(cpu_switch_to) | |
1155 | NOKPROBE(cpu_switch_to) | |
1156 | ||
1157 | /* | |
1158 | * This is how we return from a fork. | |
1159 | */ | |
1160 | ENTRY(ret_from_fork) | |
1161 | bl schedule_tail | |
1162 | cbz x19, 1f // not a kernel thread | |
1163 | mov x0, x20 | |
1164 | blr x19 | |
4caf8758 | 1165 | 1: get_current_task tsk |
ed84b4e9 MR |
1166 | b ret_to_user |
1167 | ENDPROC(ret_from_fork) | |
1168 | NOKPROBE(ret_from_fork) | |
f5df2696 JM |
1169 | |
1170 | #ifdef CONFIG_ARM_SDE_INTERFACE | |
1171 | ||
1172 | #include <asm/sdei.h> | |
1173 | #include <uapi/linux/arm_sdei.h> | |
1174 | ||
79e9aa59 JM |
1175 | .macro sdei_handler_exit exit_mode |
1176 | /* On success, this call never returns... */ | |
1177 | cmp \exit_mode, #SDEI_EXIT_SMC | |
1178 | b.ne 99f | |
1179 | smc #0 | |
1180 | b . | |
1181 | 99: hvc #0 | |
1182 | b . | |
1183 | .endm | |
1184 | ||
1185 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
1186 | /* | |
1187 | * The regular SDEI entry point may have been unmapped along with the rest of | |
1188 | * the kernel. This trampoline restores the kernel mapping to make the x1 memory | |
1189 | * argument accessible. | |
1190 | * | |
1191 | * This clobbers x4, __sdei_handler() will restore this from firmware's | |
1192 | * copy. | |
1193 | */ | |
1194 | .ltorg | |
1195 | .pushsection ".entry.tramp.text", "ax" | |
1196 | ENTRY(__sdei_asm_entry_trampoline) | |
1197 | mrs x4, ttbr1_el1 | |
1198 | tbz x4, #USER_ASID_BIT, 1f | |
1199 | ||
1200 | tramp_map_kernel tmp=x4 | |
1201 | isb | |
1202 | mov x4, xzr | |
1203 | ||
1204 | /* | |
1205 | * Use reg->interrupted_regs.addr_limit to remember whether to unmap | |
1206 | * the kernel on exit. | |
1207 | */ | |
1208 | 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] | |
1209 | ||
1210 | #ifdef CONFIG_RANDOMIZE_BASE | |
1211 | adr x4, tramp_vectors + PAGE_SIZE | |
1212 | add x4, x4, #:lo12:__sdei_asm_trampoline_next_handler | |
1213 | ldr x4, [x4] | |
1214 | #else | |
1215 | ldr x4, =__sdei_asm_handler | |
1216 | #endif | |
1217 | br x4 | |
1218 | ENDPROC(__sdei_asm_entry_trampoline) | |
1219 | NOKPROBE(__sdei_asm_entry_trampoline) | |
1220 | ||
1221 | /* | |
1222 | * Make the exit call and restore the original ttbr1_el1 | |
1223 | * | |
1224 | * x0 & x1: setup for the exit API call | |
1225 | * x2: exit_mode | |
1226 | * x4: struct sdei_registered_event argument from registration time. | |
1227 | */ | |
1228 | ENTRY(__sdei_asm_exit_trampoline) | |
1229 | ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)] | |
1230 | cbnz x4, 1f | |
1231 | ||
1232 | tramp_unmap_kernel tmp=x4 | |
1233 | ||
1234 | 1: sdei_handler_exit exit_mode=x2 | |
1235 | ENDPROC(__sdei_asm_exit_trampoline) | |
1236 | NOKPROBE(__sdei_asm_exit_trampoline) | |
1237 | .ltorg | |
1238 | .popsection // .entry.tramp.text | |
1239 | #ifdef CONFIG_RANDOMIZE_BASE | |
1240 | .pushsection ".rodata", "a" | |
1241 | __sdei_asm_trampoline_next_handler: | |
1242 | .quad __sdei_asm_handler | |
1243 | .popsection // .rodata | |
1244 | #endif /* CONFIG_RANDOMIZE_BASE */ | |
1245 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | |
1246 | ||
f5df2696 JM |
1247 | /* |
1248 | * Software Delegated Exception entry point. | |
1249 | * | |
1250 | * x0: Event number | |
1251 | * x1: struct sdei_registered_event argument from registration time. | |
1252 | * x2: interrupted PC | |
1253 | * x3: interrupted PSTATE | |
79e9aa59 | 1254 | * x4: maybe clobbered by the trampoline |
f5df2696 JM |
1255 | * |
1256 | * Firmware has preserved x0->x17 for us, we must save/restore the rest to | |
1257 | * follow SMC-CC. We save (or retrieve) all the registers as the handler may | |
1258 | * want them. | |
1259 | */ | |
1260 | ENTRY(__sdei_asm_handler) | |
1261 | stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] | |
1262 | stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] | |
1263 | stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] | |
1264 | stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] | |
1265 | stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] | |
1266 | stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] | |
1267 | stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] | |
1268 | stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] | |
1269 | stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] | |
1270 | stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] | |
1271 | stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] | |
1272 | stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] | |
1273 | stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] | |
1274 | stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] | |
1275 | mov x4, sp | |
1276 | stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] | |
1277 | ||
1278 | mov x19, x1 | |
1279 | ||
1280 | #ifdef CONFIG_VMAP_STACK | |
1281 | /* | |
1282 | * entry.S may have been using sp as a scratch register, find whether | |
1283 | * this is a normal or critical event and switch to the appropriate | |
1284 | * stack for this CPU. | |
1285 | */ | |
1286 | ldrb w4, [x19, #SDEI_EVENT_PRIORITY] | |
1287 | cbnz w4, 1f | |
1288 | ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 | |
1289 | b 2f | |
1290 | 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 | |
1291 | 2: mov x6, #SDEI_STACK_SIZE | |
1292 | add x5, x5, x6 | |
1293 | mov sp, x5 | |
1294 | #endif | |
1295 | ||
1296 | /* | |
1297 | * We may have interrupted userspace, or a guest, or exit-from or | |
1298 | * return-to either of these. We can't trust sp_el0, restore it. | |
1299 | */ | |
1300 | mrs x28, sp_el0 | |
1301 | ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 | |
1302 | msr sp_el0, x0 | |
1303 | ||
1304 | /* If we interrupted the kernel point to the previous stack/frame. */ | |
1305 | and x0, x3, #0xc | |
1306 | mrs x1, CurrentEL | |
1307 | cmp x0, x1 | |
1308 | csel x29, x29, xzr, eq // fp, or zero | |
1309 | csel x4, x2, xzr, eq // elr, or zero | |
1310 | ||
1311 | stp x29, x4, [sp, #-16]! | |
1312 | mov x29, sp | |
1313 | ||
1314 | add x0, x19, #SDEI_EVENT_INTREGS | |
1315 | mov x1, x19 | |
1316 | bl __sdei_handler | |
1317 | ||
1318 | msr sp_el0, x28 | |
1319 | /* restore regs >x17 that we clobbered */ | |
79e9aa59 JM |
1320 | mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline |
1321 | ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] | |
1322 | ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] | |
1323 | ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] | |
1324 | mov sp, x1 | |
f5df2696 JM |
1325 | |
1326 | mov x1, x0 // address to complete_and_resume | |
1327 | /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */ | |
1328 | cmp x0, #1 | |
1329 | mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE | |
1330 | mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME | |
1331 | csel x0, x2, x3, ls | |
1332 | ||
f5df2696 | 1333 | ldr_l x2, sdei_exit_mode |
79e9aa59 JM |
1334 | |
1335 | alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 | |
1336 | sdei_handler_exit exit_mode=x2 | |
1337 | alternative_else_nop_endif | |
1338 | ||
1339 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
1340 | tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline | |
1341 | br x5 | |
1342 | #endif | |
f5df2696 JM |
1343 | ENDPROC(__sdei_asm_handler) |
1344 | NOKPROBE(__sdei_asm_handler) | |
1345 | #endif /* CONFIG_ARM_SDE_INTERFACE */ |