Merge branches 'acpi-bus' and 'acpi-video'
[linux-block.git] / arch / arm64 / kvm / hyp / nvhe / host.S
CommitLineData
6e3bfbb2
AS
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2020 - Google Inc
4 * Author: Andrew Scull <ascull@google.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/assembler.h>
ccac9697 10#include <asm/kvm_arm.h>
6e3bfbb2
AS
11#include <asm/kvm_asm.h>
12#include <asm/kvm_mmu.h>
8c15c2a0 13#include <asm/kvm_ptrauth.h>
6e3bfbb2
AS
14
15 .text
16
4e3393a9 17SYM_FUNC_START(__host_exit)
4e3393a9
AS
18 get_host_ctxt x0, x1
19
4e3393a9
AS
20 /* Store the host regs x2 and x3 */
21 stp x2, x3, [x0, #CPU_XREG_OFFSET(2)]
22
23 /* Retrieve the host regs x0-x1 from the stack */
24 ldp x2, x3, [sp], #16 // x0, x1
25
26 /* Store the host regs x0-x1 and x4-x17 */
27 stp x2, x3, [x0, #CPU_XREG_OFFSET(0)]
28 stp x4, x5, [x0, #CPU_XREG_OFFSET(4)]
29 stp x6, x7, [x0, #CPU_XREG_OFFSET(6)]
30 stp x8, x9, [x0, #CPU_XREG_OFFSET(8)]
31 stp x10, x11, [x0, #CPU_XREG_OFFSET(10)]
32 stp x12, x13, [x0, #CPU_XREG_OFFSET(12)]
33 stp x14, x15, [x0, #CPU_XREG_OFFSET(14)]
34 stp x16, x17, [x0, #CPU_XREG_OFFSET(16)]
35
36 /* Store the host regs x18-x29, lr */
37 save_callee_saved_regs x0
38
39 /* Save the host context pointer in x29 across the function call */
40 mov x29, x0
8c15c2a0
MS
41
42#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
43alternative_if_not ARM64_HAS_ADDRESS_AUTH
44b __skip_pauth_save
45alternative_else_nop_endif
46
47alternative_if ARM64_KVM_PROTECTED_MODE
48 /* Save kernel ptrauth keys. */
49 add x18, x29, #CPU_APIAKEYLO_EL1
50 ptrauth_save_state x18, x19, x20
51
52 /* Use hyp keys. */
53 adr_this_cpu x18, kvm_hyp_ctxt, x19
54 add x18, x18, #CPU_APIAKEYLO_EL1
55 ptrauth_restore_state x18, x19, x20
56 isb
57alternative_else_nop_endif
58__skip_pauth_save:
59#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
60
4e3393a9
AS
61 bl handle_trap
62
04e05f05 63__host_enter_restore_full:
8c15c2a0
MS
64 /* Restore kernel keys. */
65#ifdef CONFIG_ARM64_PTR_AUTH_KERNEL
66alternative_if_not ARM64_HAS_ADDRESS_AUTH
67b __skip_pauth_restore
68alternative_else_nop_endif
69
70alternative_if ARM64_KVM_PROTECTED_MODE
71 add x18, x29, #CPU_APIAKEYLO_EL1
72 ptrauth_restore_state x18, x19, x20
73alternative_else_nop_endif
74__skip_pauth_restore:
75#endif /* CONFIG_ARM64_PTR_AUTH_KERNEL */
76
77 /* Restore host regs x0-x17 */
4e3393a9
AS
78 ldp x0, x1, [x29, #CPU_XREG_OFFSET(0)]
79 ldp x2, x3, [x29, #CPU_XREG_OFFSET(2)]
80 ldp x4, x5, [x29, #CPU_XREG_OFFSET(4)]
81 ldp x6, x7, [x29, #CPU_XREG_OFFSET(6)]
a2e102e2
AS
82
83 /* x0-7 are use for panic arguments */
84__host_enter_for_panic:
4e3393a9
AS
85 ldp x8, x9, [x29, #CPU_XREG_OFFSET(8)]
86 ldp x10, x11, [x29, #CPU_XREG_OFFSET(10)]
87 ldp x12, x13, [x29, #CPU_XREG_OFFSET(12)]
88 ldp x14, x15, [x29, #CPU_XREG_OFFSET(14)]
89 ldp x16, x17, [x29, #CPU_XREG_OFFSET(16)]
90
91 /* Restore host regs x18-x29, lr */
92 restore_callee_saved_regs x29
93
94 /* Do not touch any register after this! */
a2e102e2 95__host_enter_without_restoring:
4e3393a9
AS
96 eret
97 sb
98SYM_FUNC_END(__host_exit)
99
04e05f05
DB
100/*
101 * void __noreturn __host_enter(struct kvm_cpu_context *host_ctxt);
102 */
103SYM_FUNC_START(__host_enter)
104 mov x29, x0
105 b __host_enter_restore_full
106SYM_FUNC_END(__host_enter)
107
a2e102e2 108/*
c4b000c3
AS
109 * void __noreturn __hyp_do_panic(struct kvm_cpu_context *host_ctxt, u64 spsr,
110 * u64 elr, u64 par);
a2e102e2 111 */
6e3bfbb2 112SYM_FUNC_START(__hyp_do_panic)
a2e102e2 113 /* Prepare and exit to the host's panic funciton. */
6e3bfbb2
AS
114 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
115 PSR_MODE_EL1h)
116 msr spsr_el2, lr
7ee74cc7 117 adr_l lr, nvhe_hyp_panic_handler
97cbd2fc 118 hyp_kimg_va lr, x6
6e3bfbb2 119 msr elr_el2, lr
a2e102e2 120
c4b000c3
AS
121 mov x29, x0
122
ccac9697
WD
123#ifdef CONFIG_NVHE_EL2_DEBUG
124 /* Ensure host stage-2 is disabled */
125 mrs x0, hcr_el2
126 bic x0, x0, #HCR_VM
127 msr hcr_el2, x0
128 isb
129 tlbi vmalls12e1
130 dsb nsh
131#endif
132
aec0fae6
AS
133 /* Load the panic arguments into x0-7 */
134 mrs x0, esr_el2
ccac9697
WD
135 mov x4, x3
136 mov x3, x2
137 hyp_pa x3, x6
138 get_vcpu_ptr x5, x6
139 mrs x6, far_el2
140 mrs x7, hpfar_el2
97cbd2fc
DB
141
142 /* Enter the host, conditionally restoring the host context. */
c4b000c3 143 cbz x29, __host_enter_without_restoring
a2e102e2 144 b __host_enter_for_panic
6e3bfbb2
AS
145SYM_FUNC_END(__hyp_do_panic)
146
8579a185 147SYM_FUNC_START(__host_hvc)
83fa381f
MZ
148 ldp x0, x1, [sp] // Don't fixup the stack yet
149
8579a185
WD
150 /* No stub for you, sonny Jim */
151alternative_if ARM64_KVM_PROTECTED_MODE
152 b __host_exit
153alternative_else_nop_endif
154
6e3bfbb2
AS
155 /* Check for a stub HVC call */
156 cmp x0, #HVC_STUB_HCALL_NR
4e3393a9 157 b.hs __host_exit
6e3bfbb2 158
83fa381f 159 add sp, sp, #16
6e3bfbb2
AS
160 /*
161 * Compute the idmap address of __kvm_handle_stub_hvc and
7ee74cc7 162 * jump there.
6e3bfbb2
AS
163 *
164 * Preserve x0-x4, which may contain stub parameters.
165 */
7ee74cc7 166 adr_l x5, __kvm_handle_stub_hvc
97cbd2fc 167 hyp_pa x5, x6
6e3bfbb2 168 br x5
8579a185
WD
169SYM_FUNC_END(__host_hvc)
170
171.macro host_el1_sync_vect
172 .align 7
173.L__vect_start\@:
174 stp x0, x1, [sp, #-16]!
175 mrs x0, esr_el2
8bb08411 176 ubfx x0, x0, #ESR_ELx_EC_SHIFT, #ESR_ELx_EC_WIDTH
8579a185
WD
177 cmp x0, #ESR_ELx_EC_HVC64
178 b.eq __host_hvc
179 b __host_exit
6e3bfbb2
AS
180.L__vect_end\@:
181.if ((.L__vect_end\@ - .L__vect_start\@) > 0x80)
182 .error "host_el1_sync_vect larger than vector entry"
183.endif
184.endm
185
a2e102e2 186.macro invalid_host_el2_vect
6e3bfbb2 187 .align 7
66de19fa
KS
188
189 /*
190 * Test whether the SP has overflowed, without corrupting a GPR.
191 * nVHE hypervisor stacks are aligned so that the PAGE_SHIFT bit
192 * of SP should always be 1.
193 */
194 add sp, sp, x0 // sp' = sp + x0
195 sub x0, sp, x0 // x0' = sp' - x0 = (sp + x0) - x0 = sp
196 tbz x0, #PAGE_SHIFT, .L__hyp_sp_overflow\@
197 sub x0, sp, x0 // x0'' = sp' - x0' = (sp + x0) - sp = x0
198 sub sp, sp, x0 // sp'' = sp' - x0 = (sp + x0) - x0 = sp
199
7db21530
AS
200 /* If a guest is loaded, panic out of it. */
201 stp x0, x1, [sp, #-16]!
202 get_loaded_vcpu x0, x1
203 cbnz x0, __guest_exit_panic
204 add sp, sp, #16
a2e102e2
AS
205
206 /*
207 * The panic may not be clean if the exception is taken before the host
208 * context has been saved by __host_exit or after the hyp context has
209 * been partially clobbered by __host_enter.
210 */
6e3bfbb2 211 b hyp_panic
66de19fa
KS
212
213.L__hyp_sp_overflow\@:
548ec333
KS
214 /* Switch to the overflow stack */
215 adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
66de19fa
KS
216
217 b hyp_panic_bad_stack
218 ASM_BUG()
6e3bfbb2
AS
219.endm
220
a2e102e2
AS
221.macro invalid_host_el1_vect
222 .align 7
223 mov x0, xzr /* restore_host = false */
224 mrs x1, spsr_el2
225 mrs x2, elr_el2
226 mrs x3, par_el1
227 b __hyp_do_panic
228.endm
229
6e3bfbb2 230/*
472fc011
AS
231 * The host vector does not use an ESB instruction in order to avoid consuming
232 * SErrors that should only be consumed by the host. Guest entry is deferred by
233 * __guest_enter if there are any pending asynchronous exceptions so hyp will
234 * always return to the host without having consumerd host SErrors.
235 *
6e3bfbb2
AS
236 * CONFIG_KVM_INDIRECT_VECTORS is not applied to the host vectors because the
237 * host knows about the EL2 vectors already, and there is no point in hiding
238 * them.
239 */
240 .align 11
241SYM_CODE_START(__kvm_hyp_host_vector)
a2e102e2
AS
242 invalid_host_el2_vect // Synchronous EL2t
243 invalid_host_el2_vect // IRQ EL2t
244 invalid_host_el2_vect // FIQ EL2t
245 invalid_host_el2_vect // Error EL2t
6e3bfbb2 246
a2e102e2
AS
247 invalid_host_el2_vect // Synchronous EL2h
248 invalid_host_el2_vect // IRQ EL2h
249 invalid_host_el2_vect // FIQ EL2h
250 invalid_host_el2_vect // Error EL2h
6e3bfbb2 251
2a50fc5f
WD
252 host_el1_sync_vect // Synchronous 64-bit EL1/EL0
253 invalid_host_el1_vect // IRQ 64-bit EL1/EL0
254 invalid_host_el1_vect // FIQ 64-bit EL1/EL0
255 invalid_host_el1_vect // Error 64-bit EL1/EL0
256
257 host_el1_sync_vect // Synchronous 32-bit EL1/EL0
258 invalid_host_el1_vect // IRQ 32-bit EL1/EL0
259 invalid_host_el1_vect // FIQ 32-bit EL1/EL0
260 invalid_host_el1_vect // Error 32-bit EL1/EL0
6e3bfbb2 261SYM_CODE_END(__kvm_hyp_host_vector)
a805e1fb
DB
262
263/*
264 * Forward SMC with arguments in struct kvm_cpu_context, and
265 * store the result into the same struct. Assumes SMCCC 1.2 or older.
266 *
267 * x0: struct kvm_cpu_context*
268 */
269SYM_CODE_START(__kvm_hyp_host_forward_smc)
270 /*
271 * Use x18 to keep the pointer to the host context because
272 * x18 is callee-saved in SMCCC but not in AAPCS64.
273 */
274 mov x18, x0
275
276 ldp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
277 ldp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
278 ldp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
279 ldp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
280 ldp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
281 ldp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
282 ldp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
283 ldp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
284 ldp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
285
286 smc #0
287
288 stp x0, x1, [x18, #CPU_XREG_OFFSET(0)]
289 stp x2, x3, [x18, #CPU_XREG_OFFSET(2)]
290 stp x4, x5, [x18, #CPU_XREG_OFFSET(4)]
291 stp x6, x7, [x18, #CPU_XREG_OFFSET(6)]
292 stp x8, x9, [x18, #CPU_XREG_OFFSET(8)]
293 stp x10, x11, [x18, #CPU_XREG_OFFSET(10)]
294 stp x12, x13, [x18, #CPU_XREG_OFFSET(12)]
295 stp x14, x15, [x18, #CPU_XREG_OFFSET(14)]
296 stp x16, x17, [x18, #CPU_XREG_OFFSET(16)]
297
298 ret
299SYM_CODE_END(__kvm_hyp_host_forward_smc)
dcf89d11
MS
300
301/*
302 * kvm_host_psci_cpu_entry is called through br instruction, which requires
303 * bti j instruction as compilers (gcc and llvm) doesn't insert bti j for external
304 * functions, but bti c instead.
305 */
306SYM_CODE_START(kvm_host_psci_cpu_entry)
307 bti j
308 b __kvm_host_psci_cpu_entry
309SYM_CODE_END(kvm_host_psci_cpu_entry)