Commit | Line | Data |
---|---|---|
caab277b | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
60ffc30d CM |
2 | /* |
3 | * Low-level exception handling code | |
4 | * | |
5 | * Copyright (C) 2012 ARM Ltd. | |
6 | * Authors: Catalin Marinas <catalin.marinas@arm.com> | |
7 | * Will Deacon <will.deacon@arm.com> | |
60ffc30d CM |
8 | */ |
9 | ||
8e290624 | 10 | #include <linux/arm-smccc.h> |
60ffc30d CM |
11 | #include <linux/init.h> |
12 | #include <linux/linkage.h> | |
13 | ||
8d883b23 | 14 | #include <asm/alternative.h> |
60ffc30d CM |
15 | #include <asm/assembler.h> |
16 | #include <asm/asm-offsets.h> | |
be129842 | 17 | #include <asm/asm_pointer_auth.h> |
5f1f7f6c | 18 | #include <asm/bug.h> |
905e8c5d | 19 | #include <asm/cpufeature.h> |
60ffc30d | 20 | #include <asm/errno.h> |
5c1ce6f7 | 21 | #include <asm/esr.h> |
8e23dacd | 22 | #include <asm/irq.h> |
c7b9adaf WD |
23 | #include <asm/memory.h> |
24 | #include <asm/mmu.h> | |
eef94a3d | 25 | #include <asm/processor.h> |
39bc88e5 | 26 | #include <asm/ptrace.h> |
5287569a | 27 | #include <asm/scs.h> |
60ffc30d | 28 | #include <asm/thread_info.h> |
b4b8664d | 29 | #include <asm/asm-uaccess.h> |
60ffc30d CM |
30 | #include <asm/unistd.h> |
31 | ||
baaa7237 MR |
32 | .macro clear_gp_regs |
33 | .irp n,0,1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28,29 | |
34 | mov x\n, xzr | |
35 | .endr | |
36 | .endm | |
37 | ||
ec841aab | 38 | .macro kernel_ventry, el:req, ht:req, regsize:req, label:req |
b11e5759 | 39 | .align 7 |
4330e2c5 | 40 | .Lventry_start\@: |
4bf3286d | 41 | .if \el == 0 |
d739da16 JM |
42 | /* |
43 | * This must be the first instruction of the EL0 vector entries. It is | |
44 | * skipped by the trampoline vectors, to trigger the cleanup. | |
45 | */ | |
46 | b .Lskip_tramp_vectors_cleanup\@ | |
4bf3286d WD |
47 | .if \regsize == 64 |
48 | mrs x30, tpidrro_el0 | |
49 | msr tpidrro_el0, xzr | |
50 | .else | |
51 | mov x30, xzr | |
52 | .endif | |
d739da16 | 53 | .Lskip_tramp_vectors_cleanup\@: |
108eae2d | 54 | .endif |
4bf3286d | 55 | |
71e70184 | 56 | sub sp, sp, #PT_REGS_SIZE |
872d8327 MR |
57 | #ifdef CONFIG_VMAP_STACK |
58 | /* | |
59 | * Test whether the SP has overflowed, without corrupting a GPR. | |
de858040 HG |
60 | * Task and IRQ stacks are aligned so that SP & (1 << THREAD_SHIFT) |
61 | * should always be zero. | |
872d8327 MR |
62 | */ |
63 | add sp, sp, x0 // sp' = sp + x0 | |
64 | sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp | |
65 | tbnz x0, #THREAD_SHIFT, 0f | |
66 | sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0 | |
67 | sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp | |
ec841aab | 68 | b el\el\ht\()_\regsize\()_\label |
872d8327 MR |
69 | |
70 | 0: | |
71 | /* | |
72 | * Either we've just detected an overflow, or we've taken an exception | |
73 | * while on the overflow stack. Either way, we won't return to | |
74 | * userspace, and can clobber EL0 registers to free up GPRs. | |
75 | */ | |
76 | ||
71e70184 | 77 | /* Stash the original SP (minus PT_REGS_SIZE) in tpidr_el0. */ |
872d8327 MR |
78 | msr tpidr_el0, x0 |
79 | ||
80 | /* Recover the original x0 value and stash it in tpidrro_el0 */ | |
81 | sub x0, sp, x0 | |
82 | msr tpidrro_el0, x0 | |
83 | ||
84 | /* Switch to the overflow stack */ | |
85 | adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0 | |
86 | ||
87 | /* | |
88 | * Check whether we were already on the overflow stack. This may happen | |
89 | * after panic() re-enables interrupts. | |
90 | */ | |
91 | mrs x0, tpidr_el0 // sp of interrupted context | |
92 | sub x0, sp, x0 // delta with top of overflow stack | |
93 | tst x0, #~(OVERFLOW_STACK_SIZE - 1) // within range? | |
94 | b.ne __bad_stack // no? -> bad stack pointer | |
95 | ||
96 | /* We were already on the overflow stack. Restore sp/x0 and carry on. */ | |
97 | sub sp, sp, x0 | |
98 | mrs x0, tpidrro_el0 | |
99 | #endif | |
ec841aab | 100 | b el\el\ht\()_\regsize\()_\label |
4330e2c5 | 101 | .org .Lventry_start\@ + 128 // Did we overflow the ventry slot? |
b11e5759 MR |
102 | .endm |
103 | ||
211ceca3 AB |
104 | .macro tramp_alias, dst, sym |
105 | .set .Lalias\@, TRAMP_VALIAS + \sym - .entry.tramp.text | |
106 | movz \dst, :abs_g2_s:.Lalias\@ | |
107 | movk \dst, :abs_g1_nc:.Lalias\@ | |
108 | movk \dst, :abs_g0_nc:.Lalias\@ | |
b11e5759 MR |
109 | .endm |
110 | ||
8c3001b9 WD |
111 | /* |
112 | * This macro corrupts x0-x3. It is the caller's duty to save/restore | |
113 | * them if required. | |
114 | */ | |
99ed3ed0 | 115 | .macro apply_ssbd, state, tmp1, tmp2 |
4c0bd995 | 116 | alternative_cb ARM64_ALWAYS_SYSTEM, spectre_v4_patch_fw_mitigation_enable |
c2876207 | 117 | b .L__asm_ssbd_skip\@ // Patched to NOP |
986372c4 | 118 | alternative_cb_end |
5cf9ce6e | 119 | ldr_this_cpu \tmp2, arm64_ssbd_callback_required, \tmp1 |
99ed3ed0 | 120 | cbz \tmp2, .L__asm_ssbd_skip\@ |
9dd9614f | 121 | ldr \tmp2, [tsk, #TSK_TI_FLAGS] |
99ed3ed0 | 122 | tbnz \tmp2, #TIF_SSBD, .L__asm_ssbd_skip\@ |
8e290624 MZ |
123 | mov w0, #ARM_SMCCC_ARCH_WORKAROUND_2 |
124 | mov w1, #\state | |
4c0bd995 | 125 | alternative_cb ARM64_ALWAYS_SYSTEM, smccc_patch_fw_mitigation_conduit |
8e290624 MZ |
126 | nop // Patched to SMC/HVC #0 |
127 | alternative_cb_end | |
99ed3ed0 | 128 | .L__asm_ssbd_skip\@: |
8e290624 MZ |
129 | .endm |
130 | ||
637ec831 | 131 | /* Check for MTE asynchronous tag check faults */ |
42b6b10a | 132 | .macro check_mte_async_tcf, tmp, ti_flags, thread_sctlr |
637ec831 | 133 | #ifdef CONFIG_ARM64_MTE |
2decad92 | 134 | .arch_extension lse |
637ec831 VF |
135 | alternative_if_not ARM64_MTE |
136 | b 1f | |
137 | alternative_else_nop_endif | |
42b6b10a PC |
138 | /* |
139 | * Asynchronous tag check faults are only possible in ASYNC (2) or | |
140 | * ASYM (3) modes. In each of these modes bit 1 of SCTLR_EL1.TCF0 is | |
141 | * set, so skip the check if it is unset. | |
142 | */ | |
143 | tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f | |
637ec831 VF |
144 | mrs_s \tmp, SYS_TFSRE0_EL1 |
145 | tbz \tmp, #SYS_TFSR_EL1_TF0_SHIFT, 1f | |
146 | /* Asynchronous TCF occurred for TTBR0 access, set the TI flag */ | |
2decad92 CM |
147 | mov \tmp, #_TIF_MTE_ASYNC_FAULT |
148 | add \ti_flags, tsk, #TSK_TI_FLAGS | |
149 | stset \tmp, [\ti_flags] | |
637ec831 VF |
150 | 1: |
151 | #endif | |
152 | .endm | |
153 | ||
154 | /* Clear the MTE asynchronous tag check faults */ | |
42b6b10a | 155 | .macro clear_mte_async_tcf thread_sctlr |
637ec831 VF |
156 | #ifdef CONFIG_ARM64_MTE |
157 | alternative_if ARM64_MTE | |
42b6b10a PC |
158 | /* See comment in check_mte_async_tcf above. */ |
159 | tbz \thread_sctlr, #(SCTLR_EL1_TCF0_SHIFT + 1), 1f | |
637ec831 VF |
160 | dsb ish |
161 | msr_s SYS_TFSRE0_EL1, xzr | |
42b6b10a | 162 | 1: |
637ec831 VF |
163 | alternative_else_nop_endif |
164 | #endif | |
165 | .endm | |
166 | ||
afdfd93a | 167 | .macro mte_set_gcr, mte_ctrl, tmp |
bad1e1c6 | 168 | #ifdef CONFIG_ARM64_MTE |
afdfd93a PC |
169 | ubfx \tmp, \mte_ctrl, #MTE_CTRL_GCR_USER_EXCL_SHIFT, #16 |
170 | orr \tmp, \tmp, #SYS_GCR_EL1_RRND | |
171 | msr_s SYS_GCR_EL1, \tmp | |
bad1e1c6 VF |
172 | #endif |
173 | .endm | |
174 | ||
175 | .macro mte_set_kernel_gcr, tmp, tmp2 | |
176 | #ifdef CONFIG_KASAN_HW_TAGS | |
4c0bd995 | 177 | alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable |
bad1e1c6 | 178 | b 1f |
e5af50a5 | 179 | alternative_cb_end |
82868247 MR |
180 | mov \tmp, KERNEL_GCR_EL1 |
181 | msr_s SYS_GCR_EL1, \tmp | |
bad1e1c6 VF |
182 | 1: |
183 | #endif | |
184 | .endm | |
185 | ||
186 | .macro mte_set_user_gcr, tsk, tmp, tmp2 | |
e5af50a5 | 187 | #ifdef CONFIG_KASAN_HW_TAGS |
4c0bd995 | 188 | alternative_cb ARM64_ALWAYS_SYSTEM, kasan_hw_tags_enable |
bad1e1c6 | 189 | b 1f |
e5af50a5 | 190 | alternative_cb_end |
638982a0 | 191 | ldr \tmp, [\tsk, #THREAD_MTE_CTRL] |
bad1e1c6 VF |
192 | |
193 | mte_set_gcr \tmp, \tmp2 | |
194 | 1: | |
195 | #endif | |
196 | .endm | |
197 | ||
b11e5759 | 198 | .macro kernel_entry, el, regsize = 64 |
01ab991f AB |
199 | .if \el == 0 |
200 | alternative_insn nop, SET_PSTATE_DIT(1), ARM64_HAS_DIT | |
201 | .endif | |
60ffc30d CM |
202 | .if \regsize == 32 |
203 | mov w0, w0 // zero upper 32 bits of x0 | |
204 | .endif | |
63648dd2 WD |
205 | stp x0, x1, [sp, #16 * 0] |
206 | stp x2, x3, [sp, #16 * 1] | |
207 | stp x4, x5, [sp, #16 * 2] | |
208 | stp x6, x7, [sp, #16 * 3] | |
209 | stp x8, x9, [sp, #16 * 4] | |
210 | stp x10, x11, [sp, #16 * 5] | |
211 | stp x12, x13, [sp, #16 * 6] | |
212 | stp x14, x15, [sp, #16 * 7] | |
213 | stp x16, x17, [sp, #16 * 8] | |
214 | stp x18, x19, [sp, #16 * 9] | |
215 | stp x20, x21, [sp, #16 * 10] | |
216 | stp x22, x23, [sp, #16 * 11] | |
217 | stp x24, x25, [sp, #16 * 12] | |
218 | stp x26, x27, [sp, #16 * 13] | |
219 | stp x28, x29, [sp, #16 * 14] | |
220 | ||
60ffc30d | 221 | .if \el == 0 |
baaa7237 | 222 | clear_gp_regs |
60ffc30d | 223 | mrs x21, sp_el0 |
3e393417 MR |
224 | ldr_this_cpu tsk, __entry_task, x20 |
225 | msr sp_el0, tsk | |
226 | ||
8c3001b9 WD |
227 | /* |
228 | * Ensure MDSCR_EL1.SS is clear, since we can unmask debug exceptions | |
229 | * when scheduling. | |
230 | */ | |
3e393417 MR |
231 | ldr x19, [tsk, #TSK_TI_FLAGS] |
232 | disable_step_tsk x19, x20 | |
49003a8d | 233 | |
637ec831 | 234 | /* Check for asynchronous tag check faults in user space */ |
42b6b10a PC |
235 | ldr x0, [tsk, THREAD_SCTLR_USER] |
236 | check_mte_async_tcf x22, x23, x0 | |
8e290624 | 237 | |
20169862 PC |
238 | #ifdef CONFIG_ARM64_PTR_AUTH |
239 | alternative_if ARM64_HAS_ADDRESS_AUTH | |
240 | /* | |
241 | * Enable IA for in-kernel PAC if the task had it disabled. Although | |
242 | * this could be implemented with an unconditional MRS which would avoid | |
243 | * a load, this was measured to be slower on Cortex-A75 and Cortex-A76. | |
b90e4839 PC |
244 | * |
245 | * Install the kernel IA key only if IA was enabled in the task. If IA | |
246 | * was disabled on kernel exit then we would have left the kernel IA | |
247 | * installed so there is no need to install it again. | |
20169862 | 248 | */ |
b90e4839 PC |
249 | tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f |
250 | __ptrauth_keys_install_kernel_nosync tsk, x20, x22, x23 | |
251 | b 2f | |
252 | 1: | |
20169862 PC |
253 | mrs x0, sctlr_el1 |
254 | orr x0, x0, SCTLR_ELx_ENIA | |
255 | msr sctlr_el1, x0 | |
b90e4839 | 256 | 2: |
20169862 PC |
257 | alternative_else_nop_endif |
258 | #endif | |
5287569a | 259 | |
42b6b10a PC |
260 | apply_ssbd 1, x22, x23 |
261 | ||
bad1e1c6 VF |
262 | mte_set_kernel_gcr x22, x23 |
263 | ||
d914b80a PC |
264 | /* |
265 | * Any non-self-synchronizing system register updates required for | |
266 | * kernel entry should be placed before this point. | |
267 | */ | |
268 | alternative_if ARM64_MTE | |
269 | isb | |
270 | b 1f | |
271 | alternative_else_nop_endif | |
272 | alternative_if ARM64_HAS_ADDRESS_AUTH | |
273 | isb | |
274 | alternative_else_nop_endif | |
275 | 1: | |
276 | ||
2198d07c | 277 | scs_load_current |
60ffc30d | 278 | .else |
71e70184 | 279 | add x21, sp, #PT_REGS_SIZE |
4caf8758 | 280 | get_current_task tsk |
e19a6ee2 | 281 | .endif /* \el == 0 */ |
60ffc30d CM |
282 | mrs x22, elr_el1 |
283 | mrs x23, spsr_el1 | |
284 | stp lr, x21, [sp, #S_LR] | |
39bc88e5 | 285 | |
73267498 | 286 | /* |
7d7b720a | 287 | * For exceptions from EL0, create a final frame record. |
6106e111 MR |
288 | * For exceptions from EL1, create a synthetic frame record so the |
289 | * interrupted code shows up in the backtrace. | |
73267498 AB |
290 | */ |
291 | .if \el == 0 | |
8533d5bf | 292 | stp xzr, xzr, [sp, #S_STACKFRAME] |
73267498 AB |
293 | .else |
294 | stp x29, x22, [sp, #S_STACKFRAME] | |
6106e111 | 295 | .endif |
8533d5bf | 296 | add x29, sp, #S_STACKFRAME |
73267498 | 297 | |
39bc88e5 | 298 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
0ae3b13a AB |
299 | alternative_if_not ARM64_HAS_PAN |
300 | bl __swpan_entry_el\el | |
39bc88e5 | 301 | alternative_else_nop_endif |
39bc88e5 CM |
302 | #endif |
303 | ||
60ffc30d CM |
304 | stp x22, x23, [sp, #S_PC] |
305 | ||
17c28958 | 306 | /* Not in a syscall by default (el0_svc overwrites for real syscall) */ |
60ffc30d | 307 | .if \el == 0 |
17c28958 | 308 | mov w21, #NO_SYSCALL |
35d0e6fb | 309 | str w21, [sp, #S_SYSCALLNO] |
60ffc30d CM |
310 | .endif |
311 | ||
3352a555 | 312 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
8bf0a804 MR |
313 | alternative_if_not ARM64_HAS_GIC_PRIO_MASKING |
314 | b .Lskip_pmr_save\@ | |
315 | alternative_else_nop_endif | |
316 | ||
133d0518 JT |
317 | mrs_s x20, SYS_ICC_PMR_EL1 |
318 | str x20, [sp, #S_PMR_SAVE] | |
4d6a38da MR |
319 | mov x20, #GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET |
320 | msr_s SYS_ICC_PMR_EL1, x20 | |
8bf0a804 MR |
321 | |
322 | .Lskip_pmr_save\@: | |
637ec831 VF |
323 | #endif |
324 | ||
60ffc30d CM |
325 | /* |
326 | * Registers that may be useful after this macro is invoked: | |
327 | * | |
bd82d4bd | 328 | * x20 - ICC_PMR_EL1 |
60ffc30d CM |
329 | * x21 - aborted SP |
330 | * x22 - aborted PC | |
331 | * x23 - aborted PSTATE | |
332 | */ | |
333 | .endm | |
334 | ||
412fcb6c | 335 | .macro kernel_exit, el |
e19a6ee2 | 336 | .if \el != 0 |
8d66772e | 337 | disable_daif |
e19a6ee2 JM |
338 | .endif |
339 | ||
3352a555 | 340 | #ifdef CONFIG_ARM64_PSEUDO_NMI |
8bf0a804 MR |
341 | alternative_if_not ARM64_HAS_GIC_PRIO_MASKING |
342 | b .Lskip_pmr_restore\@ | |
343 | alternative_else_nop_endif | |
344 | ||
133d0518 JT |
345 | ldr x20, [sp, #S_PMR_SAVE] |
346 | msr_s SYS_ICC_PMR_EL1, x20 | |
8bf0a804 MR |
347 | |
348 | /* Ensure priority change is seen by redistributor */ | |
349 | alternative_if_not ARM64_HAS_GIC_PRIO_RELAXED_SYNC | |
350 | dsb sy | |
133d0518 | 351 | alternative_else_nop_endif |
8bf0a804 MR |
352 | |
353 | .Lskip_pmr_restore\@: | |
3352a555 | 354 | #endif |
133d0518 | 355 | |
60ffc30d | 356 | ldp x21, x22, [sp, #S_PC] // load ELR, SPSR |
39bc88e5 CM |
357 | |
358 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN | |
0ae3b13a AB |
359 | alternative_if_not ARM64_HAS_PAN |
360 | bl __swpan_exit_el\el | |
39bc88e5 | 361 | alternative_else_nop_endif |
39bc88e5 CM |
362 | #endif |
363 | ||
364 | .if \el == 0 | |
60ffc30d | 365 | ldr x23, [sp, #S_SP] // load return stack pointer |
63648dd2 | 366 | msr sp_el0, x23 |
4bf3286d WD |
367 | tst x22, #PSR_MODE32_BIT // native task? |
368 | b.eq 3f | |
369 | ||
905e8c5d | 370 | #ifdef CONFIG_ARM64_ERRATUM_845719 |
6ba3b554 | 371 | alternative_if ARM64_WORKAROUND_845719 |
e28cabf1 DT |
372 | #ifdef CONFIG_PID_IN_CONTEXTIDR |
373 | mrs x29, contextidr_el1 | |
374 | msr contextidr_el1, x29 | |
905e8c5d | 375 | #else |
e28cabf1 | 376 | msr contextidr_el1, xzr |
905e8c5d | 377 | #endif |
6ba3b554 | 378 | alternative_else_nop_endif |
905e8c5d | 379 | #endif |
4bf3286d | 380 | 3: |
16c230b3 | 381 | scs_save tsk |
5287569a | 382 | |
42b6b10a PC |
383 | /* Ignore asynchronous tag check faults in the uaccess routines */ |
384 | ldr x0, [tsk, THREAD_SCTLR_USER] | |
385 | clear_mte_async_tcf x0 | |
386 | ||
20169862 PC |
387 | #ifdef CONFIG_ARM64_PTR_AUTH |
388 | alternative_if ARM64_HAS_ADDRESS_AUTH | |
389 | /* | |
b90e4839 PC |
390 | * IA was enabled for in-kernel PAC. Disable it now if needed, or |
391 | * alternatively install the user's IA. All other per-task keys and | |
392 | * SCTLR bits were updated on task switch. | |
393 | * | |
394 | * No kernel C function calls after this. | |
20169862 | 395 | */ |
b90e4839 PC |
396 | tbz x0, SCTLR_ELx_ENIA_SHIFT, 1f |
397 | __ptrauth_keys_install_user tsk, x0, x1, x2 | |
398 | b 2f | |
399 | 1: | |
20169862 PC |
400 | mrs x0, sctlr_el1 |
401 | bic x0, x0, SCTLR_ELx_ENIA | |
402 | msr sctlr_el1, x0 | |
b90e4839 | 403 | 2: |
20169862 PC |
404 | alternative_else_nop_endif |
405 | #endif | |
be129842 | 406 | |
bad1e1c6 VF |
407 | mte_set_user_gcr tsk, x0, x1 |
408 | ||
99ed3ed0 | 409 | apply_ssbd 0, x0, x1 |
60ffc30d | 410 | .endif |
39bc88e5 | 411 | |
63648dd2 WD |
412 | msr elr_el1, x21 // set up the return data |
413 | msr spsr_el1, x22 | |
63648dd2 | 414 | ldp x0, x1, [sp, #16 * 0] |
63648dd2 WD |
415 | ldp x2, x3, [sp, #16 * 1] |
416 | ldp x4, x5, [sp, #16 * 2] | |
417 | ldp x6, x7, [sp, #16 * 3] | |
418 | ldp x8, x9, [sp, #16 * 4] | |
419 | ldp x10, x11, [sp, #16 * 5] | |
420 | ldp x12, x13, [sp, #16 * 6] | |
421 | ldp x14, x15, [sp, #16 * 7] | |
422 | ldp x16, x17, [sp, #16 * 8] | |
423 | ldp x18, x19, [sp, #16 * 9] | |
424 | ldp x20, x21, [sp, #16 * 10] | |
425 | ldp x22, x23, [sp, #16 * 11] | |
426 | ldp x24, x25, [sp, #16 * 12] | |
427 | ldp x26, x27, [sp, #16 * 13] | |
428 | ldp x28, x29, [sp, #16 * 14] | |
4bf3286d | 429 | |
4bf3286d | 430 | .if \el == 0 |
471470bc RH |
431 | alternative_if ARM64_WORKAROUND_2966298 |
432 | tlbi vale1, xzr | |
433 | dsb nsh | |
434 | alternative_else_nop_endif | |
03aff3a7 JM |
435 | alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 |
436 | ldr lr, [sp, #S_LR] | |
437 | add sp, sp, #PT_REGS_SIZE // restore sp | |
438 | eret | |
439 | alternative_else_nop_endif | |
ea1e3de8 | 440 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
03aff3a7 | 441 | msr far_el1, x29 |
211ceca3 AB |
442 | |
443 | ldr_this_cpu x30, this_cpu_vector, x29 | |
444 | tramp_alias x29, tramp_exit | |
445 | msr vbar_el1, x30 // install vector table | |
446 | ldr lr, [sp, #S_LR] // restore x30 | |
447 | add sp, sp, #PT_REGS_SIZE // restore sp | |
448 | br x29 | |
ea1e3de8 | 449 | #endif |
4bf3286d | 450 | .else |
03aff3a7 JM |
451 | ldr lr, [sp, #S_LR] |
452 | add sp, sp, #PT_REGS_SIZE // restore sp | |
453 | ||
96d389ca RH |
454 | /* Ensure any device/NC reads complete */ |
455 | alternative_insn nop, "dmb sy", ARM64_WORKAROUND_1508412 | |
456 | ||
4bf3286d WD |
457 | eret |
458 | .endif | |
679db708 | 459 | sb |
60ffc30d CM |
460 | .endm |
461 | ||
0ae3b13a AB |
462 | #ifdef CONFIG_ARM64_SW_TTBR0_PAN |
463 | /* | |
464 | * Set the TTBR0 PAN bit in SPSR. When the exception is taken from | |
465 | * EL0, there is no need to check the state of TTBR0_EL1 since | |
466 | * accesses are always enabled. | |
467 | * Note that the meaning of this bit differs from the ARMv8.1 PAN | |
468 | * feature as all TTBR0_EL1 accesses are disabled, not just those to | |
469 | * user mappings. | |
470 | */ | |
471 | SYM_CODE_START_LOCAL(__swpan_entry_el1) | |
472 | mrs x21, ttbr0_el1 | |
473 | tst x21, #TTBR_ASID_MASK // Check for the reserved ASID | |
474 | orr x23, x23, #PSR_PAN_BIT // Set the emulated PAN in the saved SPSR | |
475 | b.eq 1f // TTBR0 access already disabled | |
476 | and x23, x23, #~PSR_PAN_BIT // Clear the emulated PAN in the saved SPSR | |
477 | SYM_INNER_LABEL(__swpan_entry_el0, SYM_L_LOCAL) | |
478 | __uaccess_ttbr0_disable x21 | |
479 | 1: ret | |
480 | SYM_CODE_END(__swpan_entry_el1) | |
481 | ||
482 | /* | |
483 | * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR | |
484 | * PAN bit checking. | |
485 | */ | |
486 | SYM_CODE_START_LOCAL(__swpan_exit_el1) | |
487 | tbnz x22, #22, 1f // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set | |
488 | __uaccess_ttbr0_enable x0, x1 | |
489 | 1: and x22, x22, #~PSR_PAN_BIT // ARMv8.0 CPUs do not understand this bit | |
490 | ret | |
491 | SYM_CODE_END(__swpan_exit_el1) | |
492 | ||
493 | SYM_CODE_START_LOCAL(__swpan_exit_el0) | |
494 | __uaccess_ttbr0_enable x0, x1 | |
495 | /* | |
496 | * Enable errata workarounds only if returning to user. The only | |
497 | * workaround currently required for TTBR0_EL1 changes are for the | |
498 | * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache | |
499 | * corruption). | |
500 | */ | |
501 | b post_ttbr_update_workaround | |
502 | SYM_CODE_END(__swpan_exit_el0) | |
503 | #endif | |
504 | ||
8c2c596f | 505 | /* GPRs used by entry code */ |
60ffc30d CM |
506 | tsk .req x28 // current thread_info |
507 | ||
60ffc30d CM |
508 | .text |
509 | ||
510 | /* | |
511 | * Exception vectors. | |
512 | */ | |
888b3c87 | 513 | .pushsection ".entry.text", "ax" |
60ffc30d CM |
514 | |
515 | .align 11 | |
0ccbd98a | 516 | SYM_CODE_START(vectors) |
ec841aab MR |
517 | kernel_ventry 1, t, 64, sync // Synchronous EL1t |
518 | kernel_ventry 1, t, 64, irq // IRQ EL1t | |
729a9165 | 519 | kernel_ventry 1, t, 64, fiq // FIQ EL1t |
ec841aab MR |
520 | kernel_ventry 1, t, 64, error // Error EL1t |
521 | ||
522 | kernel_ventry 1, h, 64, sync // Synchronous EL1h | |
523 | kernel_ventry 1, h, 64, irq // IRQ EL1h | |
524 | kernel_ventry 1, h, 64, fiq // FIQ EL1h | |
525 | kernel_ventry 1, h, 64, error // Error EL1h | |
526 | ||
527 | kernel_ventry 0, t, 64, sync // Synchronous 64-bit EL0 | |
528 | kernel_ventry 0, t, 64, irq // IRQ 64-bit EL0 | |
529 | kernel_ventry 0, t, 64, fiq // FIQ 64-bit EL0 | |
530 | kernel_ventry 0, t, 64, error // Error 64-bit EL0 | |
531 | ||
532 | kernel_ventry 0, t, 32, sync // Synchronous 32-bit EL0 | |
533 | kernel_ventry 0, t, 32, irq // IRQ 32-bit EL0 | |
534 | kernel_ventry 0, t, 32, fiq // FIQ 32-bit EL0 | |
535 | kernel_ventry 0, t, 32, error // Error 32-bit EL0 | |
0ccbd98a | 536 | SYM_CODE_END(vectors) |
60ffc30d | 537 | |
872d8327 | 538 | #ifdef CONFIG_VMAP_STACK |
ede3241a | 539 | SYM_CODE_START_LOCAL(__bad_stack) |
872d8327 MR |
540 | /* |
541 | * We detected an overflow in kernel_ventry, which switched to the | |
542 | * overflow stack. Stash the exception regs, and head to our overflow | |
543 | * handler. | |
544 | */ | |
ede3241a | 545 | |
872d8327 MR |
546 | /* Restore the original x0 value */ |
547 | mrs x0, tpidrro_el0 | |
548 | ||
549 | /* | |
550 | * Store the original GPRs to the new stack. The orginal SP (minus | |
71e70184 | 551 | * PT_REGS_SIZE) was stashed in tpidr_el0 by kernel_ventry. |
872d8327 | 552 | */ |
71e70184 | 553 | sub sp, sp, #PT_REGS_SIZE |
872d8327 MR |
554 | kernel_entry 1 |
555 | mrs x0, tpidr_el0 | |
71e70184 | 556 | add x0, x0, #PT_REGS_SIZE |
872d8327 MR |
557 | str x0, [sp, #S_SP] |
558 | ||
559 | /* Stash the regs for handle_bad_stack */ | |
560 | mov x0, sp | |
561 | ||
562 | /* Time to die */ | |
563 | bl handle_bad_stack | |
564 | ASM_BUG() | |
ede3241a | 565 | SYM_CODE_END(__bad_stack) |
872d8327 MR |
566 | #endif /* CONFIG_VMAP_STACK */ |
567 | ||
60ffc30d | 568 | |
ec841aab MR |
569 | .macro entry_handler el:req, ht:req, regsize:req, label:req |
570 | SYM_CODE_START_LOCAL(el\el\ht\()_\regsize\()_\label) | |
b660950c | 571 | kernel_entry \el, \regsize |
60ffc30d | 572 | mov x0, sp |
ec841aab | 573 | bl el\el\ht\()_\regsize\()_\label\()_handler |
a5b43a87 MR |
574 | .if \el == 0 |
575 | b ret_to_user | |
576 | .else | |
af541cbb | 577 | b ret_to_kernel |
a5b43a87 | 578 | .endif |
ec841aab | 579 | SYM_CODE_END(el\el\ht\()_\regsize\()_\label) |
60ffc30d CM |
580 | .endm |
581 | ||
60ffc30d | 582 | /* |
a5b43a87 | 583 | * Early exception handlers |
60ffc30d | 584 | */ |
ec841aab MR |
585 | entry_handler 1, t, 64, sync |
586 | entry_handler 1, t, 64, irq | |
587 | entry_handler 1, t, 64, fiq | |
588 | entry_handler 1, t, 64, error | |
589 | ||
590 | entry_handler 1, h, 64, sync | |
591 | entry_handler 1, h, 64, irq | |
592 | entry_handler 1, h, 64, fiq | |
593 | entry_handler 1, h, 64, error | |
594 | ||
595 | entry_handler 0, t, 64, sync | |
596 | entry_handler 0, t, 64, irq | |
597 | entry_handler 0, t, 64, fiq | |
598 | entry_handler 0, t, 64, error | |
599 | ||
600 | entry_handler 0, t, 32, sync | |
601 | entry_handler 0, t, 32, irq | |
602 | entry_handler 0, t, 32, fiq | |
603 | entry_handler 0, t, 32, error | |
60ffc30d | 604 | |
a5b43a87 | 605 | SYM_CODE_START_LOCAL(ret_to_kernel) |
a92d4d14 | 606 | kernel_exit 1 |
a5b43a87 | 607 | SYM_CODE_END(ret_to_kernel) |
a92d4d14 | 608 | |
06607c7e | 609 | SYM_CODE_START_LOCAL(ret_to_user) |
4d1c2ee2 | 610 | ldr x19, [tsk, #TSK_TI_FLAGS] // re-check for single-step |
3cb5ed4d | 611 | enable_step_tsk x19, x2 |
0b3e3366 | 612 | #ifdef CONFIG_GCC_PLUGIN_STACKLEAK |
88959a39 | 613 | bl stackleak_erase_on_task_stack |
0b3e3366 | 614 | #endif |
412fcb6c | 615 | kernel_exit 0 |
06607c7e | 616 | SYM_CODE_END(ret_to_user) |
60ffc30d | 617 | |
888b3c87 PA |
618 | .popsection // .entry.text |
619 | ||
833be850 | 620 | // Move from tramp_pg_dir to swapper_pg_dir |
c7b9adaf WD |
621 | .macro tramp_map_kernel, tmp |
622 | mrs \tmp, ttbr1_el1 | |
0188a894 | 623 | add \tmp, \tmp, #TRAMP_SWAPPER_OFFSET |
c7b9adaf WD |
624 | bic \tmp, \tmp, #USER_ASID_FLAG |
625 | msr ttbr1_el1, \tmp | |
d1777e68 WD |
626 | #ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003 |
627 | alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003 | |
628 | /* ASID already in \tmp[63:48] */ | |
629 | movk \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12) | |
630 | movk \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12) | |
631 | /* 2MB boundary containing the vectors, so we nobble the walk cache */ | |
632 | movk \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12) | |
633 | isb | |
634 | tlbi vae1, \tmp | |
635 | dsb nsh | |
636 | alternative_else_nop_endif | |
637 | #endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */ | |
c7b9adaf WD |
638 | .endm |
639 | ||
833be850 | 640 | // Move from swapper_pg_dir to tramp_pg_dir |
c7b9adaf WD |
641 | .macro tramp_unmap_kernel, tmp |
642 | mrs \tmp, ttbr1_el1 | |
0188a894 | 643 | sub \tmp, \tmp, #TRAMP_SWAPPER_OFFSET |
c7b9adaf WD |
644 | orr \tmp, \tmp, #USER_ASID_FLAG |
645 | msr ttbr1_el1, \tmp | |
646 | /* | |
f167211a WD |
647 | * We avoid running the post_ttbr_update_workaround here because |
648 | * it's only needed by Cavium ThunderX, which requires KPTI to be | |
649 | * disabled. | |
c7b9adaf WD |
650 | */ |
651 | .endm | |
652 | ||
1c9a8e87 AB |
653 | .macro tramp_data_read_var dst, var |
654 | #ifdef CONFIG_RELOCATABLE | |
655 | ldr \dst, .L__tramp_data_\var | |
656 | .ifndef .L__tramp_data_\var | |
657 | .pushsection ".entry.tramp.rodata", "a", %progbits | |
658 | .align 3 | |
659 | .L__tramp_data_\var: | |
660 | .quad \var | |
661 | .popsection | |
662 | .endif | |
b28a8eeb | 663 | #else |
1c9a8e87 AB |
664 | /* |
665 | * As !RELOCATABLE implies !RANDOMIZE_BASE the address is always a | |
666 | * compile time constant (and hence not secret and not worth hiding). | |
667 | * | |
668 | * As statically allocated kernel code and data always live in the top | |
669 | * 47 bits of the address space we can sign-extend bit 47 and avoid an | |
670 | * instruction to load the upper 16 bits (which must be 0xFFFF). | |
671 | */ | |
672 | movz \dst, :abs_g2_s:\var | |
673 | movk \dst, :abs_g1_nc:\var | |
674 | movk \dst, :abs_g0_nc:\var | |
b28a8eeb JM |
675 | #endif |
676 | .endm | |
ba268923 JM |
677 | |
678 | #define BHB_MITIGATION_NONE 0 | |
679 | #define BHB_MITIGATION_LOOP 1 | |
680 | #define BHB_MITIGATION_FW 2 | |
228a26b9 | 681 | #define BHB_MITIGATION_INSN 3 |
ba268923 JM |
682 | |
683 | .macro tramp_ventry, vector_start, regsize, kpti, bhb | |
c7b9adaf WD |
684 | .align 7 |
685 | 1: | |
686 | .if \regsize == 64 | |
687 | msr tpidrro_el0, x30 // Restored in kernel_ventry | |
688 | .endif | |
aff65393 | 689 | |
ba268923 JM |
690 | .if \bhb == BHB_MITIGATION_LOOP |
691 | /* | |
692 | * This sequence must appear before the first indirect branch. i.e. the | |
693 | * ret out of tramp_ventry. It appears here because x30 is free. | |
694 | */ | |
695 | __mitigate_spectre_bhb_loop x30 | |
696 | .endif // \bhb == BHB_MITIGATION_LOOP | |
697 | ||
228a26b9 JM |
698 | .if \bhb == BHB_MITIGATION_INSN |
699 | clearbhb | |
700 | isb | |
701 | .endif // \bhb == BHB_MITIGATION_INSN | |
702 | ||
aff65393 | 703 | .if \kpti == 1 |
be04a6d1 WD |
704 | /* |
705 | * Defend against branch aliasing attacks by pushing a dummy | |
706 | * entry onto the return stack and using a RET instruction to | |
707 | * enter the full-fat kernel vectors. | |
708 | */ | |
709 | bl 2f | |
710 | b . | |
711 | 2: | |
c7b9adaf | 712 | tramp_map_kernel x30 |
6c27c408 | 713 | alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003 |
b28a8eeb | 714 | tramp_data_read_var x30, vectors |
9405447e | 715 | alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM |
ed50da77 | 716 | prfm plil1strm, [x30, #(1b - \vector_start)] |
9405447e | 717 | alternative_else_nop_endif |
c47e4d04 | 718 | |
c7b9adaf | 719 | msr vbar_el1, x30 |
c7b9adaf | 720 | isb |
c47e4d04 | 721 | .else |
1c9a8e87 | 722 | adr_l x30, vectors |
c47e4d04 JM |
723 | .endif // \kpti == 1 |
724 | ||
ba268923 JM |
725 | .if \bhb == BHB_MITIGATION_FW |
726 | /* | |
727 | * The firmware sequence must appear before the first indirect branch. | |
728 | * i.e. the ret out of tramp_ventry. But it also needs the stack to be | |
729 | * mapped to save/restore the registers the SMC clobbers. | |
730 | */ | |
731 | __mitigate_spectre_bhb_fw | |
732 | .endif // \bhb == BHB_MITIGATION_FW | |
733 | ||
c47e4d04 | 734 | add x30, x30, #(1b - \vector_start + 4) |
be04a6d1 | 735 | ret |
4330e2c5 | 736 | .org 1b + 128 // Did we overflow the ventry slot? |
c7b9adaf WD |
737 | .endm |
738 | ||
ba268923 | 739 | .macro generate_tramp_vector, kpti, bhb |
ed50da77 | 740 | .Lvector_start\@: |
c7b9adaf WD |
741 | .space 0x400 |
742 | ||
ed50da77 | 743 | .rept 4 |
ba268923 | 744 | tramp_ventry .Lvector_start\@, 64, \kpti, \bhb |
ed50da77 JM |
745 | .endr |
746 | .rept 4 | |
ba268923 | 747 | tramp_ventry .Lvector_start\@, 32, \kpti, \bhb |
ed50da77 JM |
748 | .endr |
749 | .endm | |
c7b9adaf | 750 | |
13d7a083 JM |
751 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 |
752 | /* | |
753 | * Exception vectors trampoline. | |
ba268923 JM |
754 | * The order must match __bp_harden_el1_vectors and the |
755 | * arm64_bp_harden_el1_vectors enum. | |
13d7a083 JM |
756 | */ |
757 | .pushsection ".entry.tramp.text", "ax" | |
ed50da77 | 758 | .align 11 |
211ceca3 | 759 | SYM_CODE_START_LOCAL_NOALIGN(tramp_vectors) |
ba268923 JM |
760 | #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY |
761 | generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_LOOP | |
762 | generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_FW | |
228a26b9 | 763 | generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_INSN |
ba268923 JM |
764 | #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ |
765 | generate_tramp_vector kpti=1, bhb=BHB_MITIGATION_NONE | |
e7bf6972 | 766 | SYM_CODE_END(tramp_vectors) |
c7b9adaf | 767 | |
211ceca3 AB |
768 | SYM_CODE_START_LOCAL(tramp_exit) |
769 | tramp_unmap_kernel x29 | |
770 | mrs x29, far_el1 // restore x29 | |
771 | eret | |
772 | sb | |
773 | SYM_CODE_END(tramp_exit) | |
c7b9adaf WD |
774 | .popsection // .entry.tramp.text |
775 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ | |
776 | ||
aff65393 JM |
777 | /* |
778 | * Exception vectors for spectre mitigations on entry from EL1 when | |
779 | * kpti is not in use. | |
780 | */ | |
ba268923 | 781 | .macro generate_el1_vector, bhb |
aff65393 JM |
782 | .Lvector_start\@: |
783 | kernel_ventry 1, t, 64, sync // Synchronous EL1t | |
784 | kernel_ventry 1, t, 64, irq // IRQ EL1t | |
785 | kernel_ventry 1, t, 64, fiq // FIQ EL1h | |
786 | kernel_ventry 1, t, 64, error // Error EL1t | |
787 | ||
788 | kernel_ventry 1, h, 64, sync // Synchronous EL1h | |
789 | kernel_ventry 1, h, 64, irq // IRQ EL1h | |
790 | kernel_ventry 1, h, 64, fiq // FIQ EL1h | |
791 | kernel_ventry 1, h, 64, error // Error EL1h | |
792 | ||
793 | .rept 4 | |
ba268923 | 794 | tramp_ventry .Lvector_start\@, 64, 0, \bhb |
aff65393 JM |
795 | .endr |
796 | .rept 4 | |
ba268923 | 797 | tramp_ventry .Lvector_start\@, 32, 0, \bhb |
aff65393 JM |
798 | .endr |
799 | .endm | |
800 | ||
ba268923 | 801 | /* The order must match tramp_vecs and the arm64_bp_harden_el1_vectors enum. */ |
aff65393 JM |
802 | .pushsection ".entry.text", "ax" |
803 | .align 11 | |
804 | SYM_CODE_START(__bp_harden_el1_vectors) | |
ba268923 JM |
805 | #ifdef CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY |
806 | generate_el1_vector bhb=BHB_MITIGATION_LOOP | |
807 | generate_el1_vector bhb=BHB_MITIGATION_FW | |
228a26b9 | 808 | generate_el1_vector bhb=BHB_MITIGATION_INSN |
ba268923 | 809 | #endif /* CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY */ |
aff65393 JM |
810 | SYM_CODE_END(__bp_harden_el1_vectors) |
811 | .popsection | |
812 | ||
813 | ||
ed84b4e9 MR |
814 | /* |
815 | * Register switch for AArch64. The callee-saved registers need to be saved | |
816 | * and restored. On entry: | |
817 | * x0 = previous task_struct (must be preserved across the switch) | |
818 | * x1 = next task_struct | |
819 | * Previous and next are guaranteed not to be the same. | |
820 | * | |
821 | */ | |
e7bf6972 | 822 | SYM_FUNC_START(cpu_switch_to) |
ed84b4e9 MR |
823 | mov x10, #THREAD_CPU_CONTEXT |
824 | add x8, x0, x10 | |
825 | mov x9, sp | |
826 | stp x19, x20, [x8], #16 // store callee-saved registers | |
827 | stp x21, x22, [x8], #16 | |
828 | stp x23, x24, [x8], #16 | |
829 | stp x25, x26, [x8], #16 | |
830 | stp x27, x28, [x8], #16 | |
831 | stp x29, x9, [x8], #16 | |
832 | str lr, [x8] | |
833 | add x8, x1, x10 | |
834 | ldp x19, x20, [x8], #16 // restore callee-saved registers | |
835 | ldp x21, x22, [x8], #16 | |
836 | ldp x23, x24, [x8], #16 | |
837 | ldp x25, x26, [x8], #16 | |
838 | ldp x27, x28, [x8], #16 | |
839 | ldp x29, x9, [x8], #16 | |
840 | ldr lr, [x8] | |
841 | mov sp, x9 | |
842 | msr sp_el0, x1 | |
d0055da5 | 843 | ptrauth_keys_install_kernel x1, x8, x9, x10 |
16c230b3 | 844 | scs_save x0 |
2198d07c | 845 | scs_load_current |
ed84b4e9 | 846 | ret |
e7bf6972 | 847 | SYM_FUNC_END(cpu_switch_to) |
ed84b4e9 MR |
848 | NOKPROBE(cpu_switch_to) |
849 | ||
850 | /* | |
851 | * This is how we return from a fork. | |
852 | */ | |
c3357fc5 | 853 | SYM_CODE_START(ret_from_fork) |
ed84b4e9 MR |
854 | bl schedule_tail |
855 | cbz x19, 1f // not a kernel thread | |
856 | mov x0, x20 | |
857 | blr x19 | |
4caf8758 | 858 | 1: get_current_task tsk |
e130338e MR |
859 | mov x0, sp |
860 | bl asm_exit_to_user_mode | |
ed84b4e9 | 861 | b ret_to_user |
c3357fc5 | 862 | SYM_CODE_END(ret_from_fork) |
ed84b4e9 | 863 | NOKPROBE(ret_from_fork) |
f5df2696 | 864 | |
f8049488 MR |
865 | /* |
866 | * void call_on_irq_stack(struct pt_regs *regs, | |
867 | * void (*func)(struct pt_regs *)); | |
868 | * | |
869 | * Calls func(regs) using this CPU's irq stack and shadow irq stack. | |
870 | */ | |
871 | SYM_FUNC_START(call_on_irq_stack) | |
872 | #ifdef CONFIG_SHADOW_CALL_STACK | |
59b37fe5 AB |
873 | get_current_task x16 |
874 | scs_save x16 | |
f8049488 MR |
875 | ldr_this_cpu scs_sp, irq_shadow_call_stack_ptr, x17 |
876 | #endif | |
59b37fe5 | 877 | |
f8049488 MR |
878 | /* Create a frame record to save our LR and SP (implicit in FP) */ |
879 | stp x29, x30, [sp, #-16]! | |
880 | mov x29, sp | |
881 | ||
882 | ldr_this_cpu x16, irq_stack_ptr, x17 | |
f8049488 MR |
883 | |
884 | /* Move to the new stack and call the function there */ | |
59b37fe5 | 885 | add sp, x16, #IRQ_STACK_SIZE |
f8049488 MR |
886 | blr x1 |
887 | ||
888 | /* | |
889 | * Restore the SP from the FP, and restore the FP and LR from the frame | |
890 | * record. | |
891 | */ | |
892 | mov sp, x29 | |
893 | ldp x29, x30, [sp], #16 | |
59b37fe5 | 894 | scs_load_current |
f8049488 MR |
895 | ret |
896 | SYM_FUNC_END(call_on_irq_stack) | |
897 | NOKPROBE(call_on_irq_stack) | |
898 | ||
f5df2696 JM |
899 | #ifdef CONFIG_ARM_SDE_INTERFACE |
900 | ||
901 | #include <asm/sdei.h> | |
902 | #include <uapi/linux/arm_sdei.h> | |
903 | ||
79e9aa59 JM |
904 | .macro sdei_handler_exit exit_mode |
905 | /* On success, this call never returns... */ | |
906 | cmp \exit_mode, #SDEI_EXIT_SMC | |
907 | b.ne 99f | |
908 | smc #0 | |
909 | b . | |
910 | 99: hvc #0 | |
911 | b . | |
912 | .endm | |
913 | ||
914 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
915 | /* | |
916 | * The regular SDEI entry point may have been unmapped along with the rest of | |
917 | * the kernel. This trampoline restores the kernel mapping to make the x1 memory | |
918 | * argument accessible. | |
919 | * | |
920 | * This clobbers x4, __sdei_handler() will restore this from firmware's | |
921 | * copy. | |
922 | */ | |
79e9aa59 | 923 | .pushsection ".entry.tramp.text", "ax" |
1242b9b3 | 924 | SYM_CODE_START(__sdei_asm_entry_trampoline) |
79e9aa59 JM |
925 | mrs x4, ttbr1_el1 |
926 | tbz x4, #USER_ASID_BIT, 1f | |
927 | ||
928 | tramp_map_kernel tmp=x4 | |
929 | isb | |
930 | mov x4, xzr | |
931 | ||
932 | /* | |
3d2403fd | 933 | * Remember whether to unmap the kernel on exit. |
79e9aa59 | 934 | */ |
3d2403fd | 935 | 1: str x4, [x1, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] |
b28a8eeb | 936 | tramp_data_read_var x4, __sdei_asm_handler |
79e9aa59 | 937 | br x4 |
1242b9b3 | 938 | SYM_CODE_END(__sdei_asm_entry_trampoline) |
79e9aa59 JM |
939 | NOKPROBE(__sdei_asm_entry_trampoline) |
940 | ||
941 | /* | |
942 | * Make the exit call and restore the original ttbr1_el1 | |
943 | * | |
944 | * x0 & x1: setup for the exit API call | |
945 | * x2: exit_mode | |
946 | * x4: struct sdei_registered_event argument from registration time. | |
947 | */ | |
1242b9b3 | 948 | SYM_CODE_START(__sdei_asm_exit_trampoline) |
3d2403fd | 949 | ldr x4, [x4, #(SDEI_EVENT_INTREGS + S_SDEI_TTBR1)] |
79e9aa59 JM |
950 | cbnz x4, 1f |
951 | ||
952 | tramp_unmap_kernel tmp=x4 | |
953 | ||
954 | 1: sdei_handler_exit exit_mode=x2 | |
1242b9b3 | 955 | SYM_CODE_END(__sdei_asm_exit_trampoline) |
79e9aa59 | 956 | NOKPROBE(__sdei_asm_exit_trampoline) |
79e9aa59 | 957 | .popsection // .entry.tramp.text |
79e9aa59 JM |
958 | #endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */ |
959 | ||
f5df2696 JM |
960 | /* |
961 | * Software Delegated Exception entry point. | |
962 | * | |
963 | * x0: Event number | |
964 | * x1: struct sdei_registered_event argument from registration time. | |
965 | * x2: interrupted PC | |
966 | * x3: interrupted PSTATE | |
79e9aa59 | 967 | * x4: maybe clobbered by the trampoline |
f5df2696 JM |
968 | * |
969 | * Firmware has preserved x0->x17 for us, we must save/restore the rest to | |
970 | * follow SMC-CC. We save (or retrieve) all the registers as the handler may | |
971 | * want them. | |
972 | */ | |
1242b9b3 | 973 | SYM_CODE_START(__sdei_asm_handler) |
f5df2696 JM |
974 | stp x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC] |
975 | stp x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2] | |
976 | stp x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3] | |
977 | stp x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4] | |
978 | stp x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5] | |
979 | stp x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6] | |
980 | stp x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7] | |
981 | stp x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8] | |
982 | stp x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9] | |
983 | stp x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10] | |
984 | stp x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11] | |
985 | stp x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12] | |
986 | stp x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13] | |
987 | stp x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14] | |
988 | mov x4, sp | |
989 | stp lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR] | |
990 | ||
991 | mov x19, x1 | |
992 | ||
5cd474e5 | 993 | /* Store the registered-event for crash_smp_send_stop() */ |
439dc2a1 | 994 | ldrb w4, [x19, #SDEI_EVENT_PRIORITY] |
5cd474e5 SP |
995 | cbnz w4, 1f |
996 | adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 | |
997 | b 2f | |
998 | 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 | |
999 | 2: str x19, [x5] | |
439dc2a1 | 1000 | |
f5df2696 JM |
1001 | #ifdef CONFIG_VMAP_STACK |
1002 | /* | |
1003 | * entry.S may have been using sp as a scratch register, find whether | |
1004 | * this is a normal or critical event and switch to the appropriate | |
1005 | * stack for this CPU. | |
1006 | */ | |
f5df2696 JM |
1007 | cbnz w4, 1f |
1008 | ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6 | |
1009 | b 2f | |
1010 | 1: ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6 | |
1011 | 2: mov x6, #SDEI_STACK_SIZE | |
1012 | add x5, x5, x6 | |
1013 | mov sp, x5 | |
1014 | #endif | |
1015 | ||
439dc2a1 ST |
1016 | #ifdef CONFIG_SHADOW_CALL_STACK |
1017 | /* Use a separate shadow call stack for normal and critical events */ | |
1018 | cbnz w4, 3f | |
ac20ffbb | 1019 | ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_normal_ptr, tmp=x6 |
439dc2a1 | 1020 | b 4f |
ac20ffbb | 1021 | 3: ldr_this_cpu dst=scs_sp, sym=sdei_shadow_call_stack_critical_ptr, tmp=x6 |
439dc2a1 ST |
1022 | 4: |
1023 | #endif | |
1024 | ||
f5df2696 JM |
1025 | /* |
1026 | * We may have interrupted userspace, or a guest, or exit-from or | |
1027 | * return-to either of these. We can't trust sp_el0, restore it. | |
1028 | */ | |
1029 | mrs x28, sp_el0 | |
1030 | ldr_this_cpu dst=x0, sym=__entry_task, tmp=x1 | |
1031 | msr sp_el0, x0 | |
1032 | ||
1033 | /* If we interrupted the kernel point to the previous stack/frame. */ | |
1034 | and x0, x3, #0xc | |
1035 | mrs x1, CurrentEL | |
1036 | cmp x0, x1 | |
1037 | csel x29, x29, xzr, eq // fp, or zero | |
1038 | csel x4, x2, xzr, eq // elr, or zero | |
1039 | ||
1040 | stp x29, x4, [sp, #-16]! | |
1041 | mov x29, sp | |
1042 | ||
1043 | add x0, x19, #SDEI_EVENT_INTREGS | |
1044 | mov x1, x19 | |
1045 | bl __sdei_handler | |
1046 | ||
1047 | msr sp_el0, x28 | |
1048 | /* restore regs >x17 that we clobbered */ | |
79e9aa59 JM |
1049 | mov x4, x19 // keep x4 for __sdei_asm_exit_trampoline |
1050 | ldp x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14] | |
1051 | ldp x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9] | |
1052 | ldp lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR] | |
1053 | mov sp, x1 | |
f5df2696 JM |
1054 | |
1055 | mov x1, x0 // address to complete_and_resume | |
c9f5ea08 FF |
1056 | /* x0 = (x0 <= SDEI_EV_FAILED) ? |
1057 | * EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME | |
1058 | */ | |
1059 | cmp x0, #SDEI_EV_FAILED | |
f5df2696 JM |
1060 | mov_q x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE |
1061 | mov_q x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME | |
1062 | csel x0, x2, x3, ls | |
1063 | ||
f5df2696 | 1064 | ldr_l x2, sdei_exit_mode |
79e9aa59 | 1065 | |
5cd474e5 SP |
1066 | /* Clear the registered-event seen by crash_smp_send_stop() */ |
1067 | ldrb w3, [x4, #SDEI_EVENT_PRIORITY] | |
1068 | cbnz w3, 1f | |
1069 | adr_this_cpu dst=x5, sym=sdei_active_normal_event, tmp=x6 | |
1070 | b 2f | |
1071 | 1: adr_this_cpu dst=x5, sym=sdei_active_critical_event, tmp=x6 | |
1072 | 2: str xzr, [x5] | |
1073 | ||
79e9aa59 JM |
1074 | alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0 |
1075 | sdei_handler_exit exit_mode=x2 | |
1076 | alternative_else_nop_endif | |
1077 | ||
1078 | #ifdef CONFIG_UNMAP_KERNEL_AT_EL0 | |
211ceca3 | 1079 | tramp_alias dst=x5, sym=__sdei_asm_exit_trampoline |
79e9aa59 JM |
1080 | br x5 |
1081 | #endif | |
1242b9b3 | 1082 | SYM_CODE_END(__sdei_asm_handler) |
f5df2696 | 1083 | NOKPROBE(__sdei_asm_handler) |
5cd474e5 SP |
1084 | |
1085 | SYM_CODE_START(__sdei_handler_abort) | |
1086 | mov_q x0, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME | |
1087 | adr x1, 1f | |
1088 | ldr_l x2, sdei_exit_mode | |
1089 | sdei_handler_exit exit_mode=x2 | |
1090 | // exit the handler and jump to the next instruction. | |
1091 | // Exit will stomp x0-x17, PSTATE, ELR_ELx, and SPSR_ELx. | |
1092 | 1: ret | |
1093 | SYM_CODE_END(__sdei_handler_abort) | |
1094 | NOKPROBE(__sdei_handler_abort) | |
f5df2696 | 1095 | #endif /* CONFIG_ARM_SDE_INTERFACE */ |