Commit | Line | Data |
---|---|---|
be901e9b MZ |
1 | /* |
2 | * Copyright (C) 2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
5f05a72a | 18 | #include <linux/types.h> |
5a7a8426 | 19 | #include <linux/jump_label.h> |
90348689 | 20 | #include <uapi/linux/psci.h> |
5a7a8426 | 21 | |
a4097b35 MZ |
22 | #include <kvm/arm_psci.h> |
23 | ||
68908bf7 | 24 | #include <asm/kvm_asm.h> |
fb5ee369 | 25 | #include <asm/kvm_emulate.h> |
13720a56 | 26 | #include <asm/kvm_hyp.h> |
d6811986 | 27 | #include <asm/kvm_mmu.h> |
82e0191a | 28 | #include <asm/fpsimd.h> |
e3feebf8 | 29 | #include <asm/debug-monitors.h> |
be901e9b | 30 | |
32876224 MZ |
31 | static bool __hyp_text __fpsimd_enabled_nvhe(void) |
32 | { | |
33 | return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); | |
34 | } | |
35 | ||
36 | static bool __hyp_text __fpsimd_enabled_vhe(void) | |
37 | { | |
38 | return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); | |
39 | } | |
40 | ||
41 | static hyp_alternate_select(__fpsimd_is_enabled, | |
42 | __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, | |
43 | ARM64_HAS_VIRT_HOST_EXTN); | |
44 | ||
45 | bool __hyp_text __fpsimd_enabled(void) | |
46 | { | |
47 | return __fpsimd_is_enabled()(); | |
48 | } | |
49 | ||
68908bf7 MZ |
50 | static void __hyp_text __activate_traps_vhe(void) |
51 | { | |
52 | u64 val; | |
53 | ||
54 | val = read_sysreg(cpacr_el1); | |
55 | val |= CPACR_EL1_TTA; | |
17eed27b | 56 | val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN); |
68908bf7 MZ |
57 | write_sysreg(val, cpacr_el1); |
58 | ||
6840bdd7 | 59 | write_sysreg(kvm_get_hyp_vector(), vbar_el1); |
68908bf7 MZ |
60 | } |
61 | ||
62 | static void __hyp_text __activate_traps_nvhe(void) | |
63 | { | |
64 | u64 val; | |
65 | ||
66 | val = CPTR_EL2_DEFAULT; | |
17eed27b | 67 | val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ; |
68908bf7 MZ |
68 | write_sysreg(val, cptr_el2); |
69 | } | |
70 | ||
71 | static hyp_alternate_select(__activate_traps_arch, | |
72 | __activate_traps_nvhe, __activate_traps_vhe, | |
73 | ARM64_HAS_VIRT_HOST_EXTN); | |
74 | ||
be901e9b MZ |
75 | static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) |
76 | { | |
77 | u64 val; | |
78 | ||
79 | /* | |
80 | * We are about to set CPTR_EL2.TFP to trap all floating point | |
81 | * register accesses to EL2, however, the ARM ARM clearly states that | |
82 | * traps are only taken to EL2 if the operation would not otherwise | |
83 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | |
84 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | |
82e0191a SP |
85 | * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to |
86 | * it will cause an exception. | |
be901e9b MZ |
87 | */ |
88 | val = vcpu->arch.hcr_el2; | |
93390c0a | 89 | |
82e0191a | 90 | if (!(val & HCR_RW) && system_supports_fpsimd()) { |
be901e9b MZ |
91 | write_sysreg(1 << 30, fpexc32_el2); |
92 | isb(); | |
93 | } | |
93390c0a DM |
94 | |
95 | if (val & HCR_RW) /* for AArch64 only: */ | |
96 | val |= HCR_TID3; /* TID3: trap feature register accesses */ | |
97 | ||
be901e9b | 98 | write_sysreg(val, hcr_el2); |
93390c0a | 99 | |
4715c14b JM |
100 | if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN) && (val & HCR_VSE)) |
101 | write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); | |
102 | ||
be901e9b MZ |
103 | /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ |
104 | write_sysreg(1 << 15, hstr_el2); | |
21cbe3cc MZ |
105 | /* |
106 | * Make sure we trap PMU access from EL0 to EL2. Also sanitize | |
107 | * PMSELR_EL0 to make sure it never contains the cycle | |
108 | * counter, which could make a PMXEVCNTR_EL0 access UNDEF at | |
109 | * EL1 instead of being trapped to EL2. | |
110 | */ | |
111 | write_sysreg(0, pmselr_el0); | |
d692b8ad | 112 | write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); |
68908bf7 MZ |
113 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); |
114 | __activate_traps_arch()(); | |
115 | } | |
a7e0ac29 | 116 | |
68908bf7 MZ |
117 | static void __hyp_text __deactivate_traps_vhe(void) |
118 | { | |
119 | extern char vectors[]; /* kernel exception vectors */ | |
f85279b4 | 120 | u64 mdcr_el2 = read_sysreg(mdcr_el2); |
a7e0ac29 | 121 | |
f85279b4 WD |
122 | mdcr_el2 &= MDCR_EL2_HPMN_MASK | |
123 | MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT | | |
124 | MDCR_EL2_TPMS; | |
125 | ||
126 | write_sysreg(mdcr_el2, mdcr_el2); | |
68908bf7 | 127 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
17eed27b | 128 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); |
68908bf7 | 129 | write_sysreg(vectors, vbar_el1); |
be901e9b MZ |
130 | } |
131 | ||
68908bf7 | 132 | static void __hyp_text __deactivate_traps_nvhe(void) |
be901e9b | 133 | { |
f85279b4 WD |
134 | u64 mdcr_el2 = read_sysreg(mdcr_el2); |
135 | ||
136 | mdcr_el2 &= MDCR_EL2_HPMN_MASK; | |
137 | mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; | |
138 | ||
139 | write_sysreg(mdcr_el2, mdcr_el2); | |
be901e9b | 140 | write_sysreg(HCR_RW, hcr_el2); |
68908bf7 MZ |
141 | write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); |
142 | } | |
143 | ||
144 | static hyp_alternate_select(__deactivate_traps_arch, | |
145 | __deactivate_traps_nvhe, __deactivate_traps_vhe, | |
146 | ARM64_HAS_VIRT_HOST_EXTN); | |
147 | ||
148 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | |
149 | { | |
44636f97 MZ |
150 | /* |
151 | * If we pended a virtual abort, preserve it until it gets | |
152 | * cleared. See D1.14.3 (Virtual Interrupts) for details, but | |
153 | * the crucial bit is "On taking a vSError interrupt, | |
154 | * HCR_EL2.VSE is cleared to 0." | |
155 | */ | |
156 | if (vcpu->arch.hcr_el2 & HCR_VSE) | |
157 | vcpu->arch.hcr_el2 = read_sysreg(hcr_el2); | |
158 | ||
68908bf7 | 159 | __deactivate_traps_arch()(); |
be901e9b | 160 | write_sysreg(0, hstr_el2); |
d692b8ad | 161 | write_sysreg(0, pmuserenr_el0); |
be901e9b MZ |
162 | } |
163 | ||
164 | static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) | |
165 | { | |
166 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | |
167 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | |
168 | } | |
169 | ||
170 | static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) | |
171 | { | |
172 | write_sysreg(0, vttbr_el2); | |
173 | } | |
174 | ||
be901e9b MZ |
175 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) |
176 | { | |
5a7a8426 VM |
177 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
178 | __vgic_v3_save_state(vcpu); | |
179 | else | |
180 | __vgic_v2_save_state(vcpu); | |
181 | ||
be901e9b MZ |
182 | write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); |
183 | } | |
184 | ||
185 | static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | |
186 | { | |
187 | u64 val; | |
188 | ||
189 | val = read_sysreg(hcr_el2); | |
190 | val |= HCR_INT_OVERRIDE; | |
191 | val |= vcpu->arch.irq_lines; | |
192 | write_sysreg(val, hcr_el2); | |
193 | ||
5a7a8426 VM |
194 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
195 | __vgic_v3_restore_state(vcpu); | |
196 | else | |
197 | __vgic_v2_restore_state(vcpu); | |
be901e9b MZ |
198 | } |
199 | ||
5f05a72a MZ |
200 | static bool __hyp_text __true_value(void) |
201 | { | |
202 | return true; | |
203 | } | |
204 | ||
205 | static bool __hyp_text __false_value(void) | |
206 | { | |
207 | return false; | |
208 | } | |
209 | ||
210 | static hyp_alternate_select(__check_arm_834220, | |
211 | __false_value, __true_value, | |
212 | ARM64_WORKAROUND_834220); | |
213 | ||
214 | static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) | |
215 | { | |
216 | u64 par, tmp; | |
217 | ||
218 | /* | |
219 | * Resolve the IPA the hard way using the guest VA. | |
220 | * | |
221 | * Stage-1 translation already validated the memory access | |
222 | * rights. As such, we can use the EL1 translation regime, and | |
223 | * don't have to distinguish between EL0 and EL1 access. | |
224 | * | |
225 | * We do need to save/restore PAR_EL1 though, as we haven't | |
226 | * saved the guest context yet, and we may return early... | |
227 | */ | |
228 | par = read_sysreg(par_el1); | |
229 | asm volatile("at s1e1r, %0" : : "r" (far)); | |
230 | isb(); | |
231 | ||
232 | tmp = read_sysreg(par_el1); | |
233 | write_sysreg(par, par_el1); | |
234 | ||
235 | if (unlikely(tmp & 1)) | |
236 | return false; /* Translation failed, back to guest */ | |
237 | ||
238 | /* Convert PAR to HPFAR format */ | |
239 | *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; | |
240 | return true; | |
241 | } | |
242 | ||
243 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |
244 | { | |
c60590b5 JM |
245 | u8 ec; |
246 | u64 esr; | |
5f05a72a MZ |
247 | u64 hpfar, far; |
248 | ||
c60590b5 JM |
249 | esr = vcpu->arch.fault.esr_el2; |
250 | ec = ESR_ELx_EC(esr); | |
5f05a72a MZ |
251 | |
252 | if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) | |
253 | return true; | |
254 | ||
255 | far = read_sysreg_el2(far); | |
256 | ||
257 | /* | |
258 | * The HPFAR can be invalid if the stage 2 fault did not | |
259 | * happen during a stage 1 page table walk (the ESR_EL2.S1PTW | |
260 | * bit is clear) and one of the two following cases are true: | |
261 | * 1. The fault was due to a permission fault | |
262 | * 2. The processor carries errata 834220 | |
263 | * | |
264 | * Therefore, for all non S1PTW faults where we either have a | |
265 | * permission fault or the errata workaround is enabled, we | |
266 | * resolve the IPA using the AT instruction. | |
267 | */ | |
268 | if (!(esr & ESR_ELx_S1PTW) && | |
269 | (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { | |
270 | if (!__translate_far_to_hpfar(far, &hpfar)) | |
271 | return false; | |
272 | } else { | |
273 | hpfar = read_sysreg(hpfar_el2); | |
274 | } | |
275 | ||
276 | vcpu->arch.fault.far_el2 = far; | |
277 | vcpu->arch.fault.hpfar_el2 = hpfar; | |
278 | return true; | |
279 | } | |
280 | ||
e3feebf8 AB |
281 | /* Skip an instruction which has been emulated. Returns true if |
282 | * execution can continue or false if we need to exit hyp mode because | |
283 | * single-step was in effect. | |
284 | */ | |
285 | static bool __hyp_text __skip_instr(struct kvm_vcpu *vcpu) | |
fb5ee369 MZ |
286 | { |
287 | *vcpu_pc(vcpu) = read_sysreg_el2(elr); | |
288 | ||
289 | if (vcpu_mode_is_32bit(vcpu)) { | |
290 | vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); | |
291 | kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
292 | write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); | |
293 | } else { | |
294 | *vcpu_pc(vcpu) += 4; | |
295 | } | |
296 | ||
297 | write_sysreg_el2(*vcpu_pc(vcpu), elr); | |
e3feebf8 AB |
298 | |
299 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) { | |
300 | vcpu->arch.fault.esr_el2 = | |
301 | (ESR_ELx_EC_SOFTSTP_LOW << ESR_ELx_EC_SHIFT) | 0x22; | |
302 | return false; | |
303 | } else { | |
304 | return true; | |
305 | } | |
fb5ee369 MZ |
306 | } |
307 | ||
cf0ba18a | 308 | int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
be901e9b MZ |
309 | { |
310 | struct kvm_cpu_context *host_ctxt; | |
311 | struct kvm_cpu_context *guest_ctxt; | |
c13d1683 | 312 | bool fp_enabled; |
be901e9b MZ |
313 | u64 exit_code; |
314 | ||
315 | vcpu = kern_hyp_va(vcpu); | |
be901e9b MZ |
316 | |
317 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
c97e166e | 318 | host_ctxt->__hyp_running_vcpu = vcpu; |
be901e9b MZ |
319 | guest_ctxt = &vcpu->arch.ctxt; |
320 | ||
edef528d | 321 | __sysreg_save_host_state(host_ctxt); |
be901e9b MZ |
322 | __debug_cond_save_host_state(vcpu); |
323 | ||
324 | __activate_traps(vcpu); | |
325 | __activate_vm(vcpu); | |
326 | ||
327 | __vgic_restore_state(vcpu); | |
688c50aa | 328 | __timer_enable_traps(vcpu); |
be901e9b MZ |
329 | |
330 | /* | |
331 | * We must restore the 32-bit state before the sysregs, thanks | |
674e7012 | 332 | * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). |
be901e9b MZ |
333 | */ |
334 | __sysreg32_restore_state(vcpu); | |
edef528d | 335 | __sysreg_restore_guest_state(guest_ctxt); |
be901e9b MZ |
336 | __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
337 | ||
338 | /* Jump in the fire! */ | |
5f05a72a | 339 | again: |
be901e9b MZ |
340 | exit_code = __guest_enter(vcpu, host_ctxt); |
341 | /* And we're baaack! */ | |
342 | ||
c60590b5 JM |
343 | if (ARM_EXCEPTION_CODE(exit_code) != ARM_EXCEPTION_IRQ) |
344 | vcpu->arch.fault.esr_el2 = read_sysreg_el2(esr); | |
395ea79e MZ |
345 | /* |
346 | * We're using the raw exception code in order to only process | |
347 | * the trap if no SError is pending. We will come back to the | |
348 | * same PC once the SError has been injected, and replay the | |
349 | * trapping instruction. | |
350 | */ | |
5f05a72a MZ |
351 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) |
352 | goto again; | |
353 | ||
fb5ee369 MZ |
354 | if (static_branch_unlikely(&vgic_v2_cpuif_trap) && |
355 | exit_code == ARM_EXCEPTION_TRAP) { | |
356 | bool valid; | |
357 | ||
358 | valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && | |
359 | kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && | |
360 | kvm_vcpu_dabt_isvalid(vcpu) && | |
361 | !kvm_vcpu_dabt_isextabt(vcpu) && | |
362 | !kvm_vcpu_dabt_iss1tw(vcpu); | |
363 | ||
3272f0d0 MZ |
364 | if (valid) { |
365 | int ret = __vgic_v2_perform_cpuif_access(vcpu); | |
366 | ||
367 | if (ret == 1) { | |
e3feebf8 AB |
368 | if (__skip_instr(vcpu)) |
369 | goto again; | |
370 | else | |
371 | exit_code = ARM_EXCEPTION_TRAP; | |
3272f0d0 MZ |
372 | } |
373 | ||
374 | if (ret == -1) { | |
e3feebf8 AB |
375 | /* Promote an illegal access to an |
376 | * SError. If we would be returning | |
377 | * due to single-step clear the SS | |
378 | * bit so handle_exit knows what to | |
379 | * do after dealing with the error. | |
380 | */ | |
381 | if (!__skip_instr(vcpu)) | |
382 | *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS; | |
3272f0d0 MZ |
383 | exit_code = ARM_EXCEPTION_EL1_SERROR; |
384 | } | |
385 | ||
386 | /* 0 falls through to be handler out of EL2 */ | |
fb5ee369 MZ |
387 | } |
388 | } | |
389 | ||
59da1cbf MZ |
390 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) && |
391 | exit_code == ARM_EXCEPTION_TRAP && | |
392 | (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 || | |
393 | kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { | |
394 | int ret = __vgic_v3_perform_cpuif_access(vcpu); | |
395 | ||
396 | if (ret == 1) { | |
e3feebf8 AB |
397 | if (__skip_instr(vcpu)) |
398 | goto again; | |
399 | else | |
400 | exit_code = ARM_EXCEPTION_TRAP; | |
59da1cbf MZ |
401 | } |
402 | ||
403 | /* 0 falls through to be handled out of EL2 */ | |
404 | } | |
405 | ||
ec82b567 SD |
406 | if (cpus_have_const_cap(ARM64_HARDEN_BP_POST_GUEST_EXIT)) { |
407 | u32 midr = read_cpuid_id(); | |
408 | ||
409 | /* Apply BTAC predictors mitigation to all Falkor chips */ | |
16e574d7 SD |
410 | if (((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR) || |
411 | ((midr & MIDR_CPU_MODEL_MASK) == MIDR_QCOM_FALKOR_V1)) { | |
ec82b567 | 412 | __qcom_hyp_sanitize_btac_predictors(); |
16e574d7 | 413 | } |
ec82b567 SD |
414 | } |
415 | ||
c13d1683 MZ |
416 | fp_enabled = __fpsimd_enabled(); |
417 | ||
edef528d | 418 | __sysreg_save_guest_state(guest_ctxt); |
be901e9b | 419 | __sysreg32_save_state(vcpu); |
688c50aa | 420 | __timer_disable_traps(vcpu); |
be901e9b MZ |
421 | __vgic_save_state(vcpu); |
422 | ||
423 | __deactivate_traps(vcpu); | |
424 | __deactivate_vm(vcpu); | |
425 | ||
edef528d | 426 | __sysreg_restore_host_state(host_ctxt); |
be901e9b | 427 | |
c13d1683 MZ |
428 | if (fp_enabled) { |
429 | __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); | |
430 | __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); | |
431 | } | |
432 | ||
be901e9b | 433 | __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
f85279b4 WD |
434 | /* |
435 | * This must come after restoring the host sysregs, since a non-VHE | |
436 | * system may enable SPE here and make use of the TTBRs. | |
437 | */ | |
be901e9b MZ |
438 | __debug_cond_restore_host_state(vcpu); |
439 | ||
440 | return exit_code; | |
441 | } | |
53fd5b64 MZ |
442 | |
443 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; | |
444 | ||
c97e166e JM |
445 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par, |
446 | struct kvm_vcpu *vcpu) | |
53fd5b64 | 447 | { |
cf7df13d | 448 | unsigned long str_va; |
253dcbd3 | 449 | |
cf7df13d MZ |
450 | /* |
451 | * Force the panic string to be loaded from the literal pool, | |
452 | * making sure it is a kernel address and not a PC-relative | |
453 | * reference. | |
454 | */ | |
455 | asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); | |
456 | ||
457 | __hyp_do_panic(str_va, | |
253dcbd3 MZ |
458 | spsr, elr, |
459 | read_sysreg(esr_el2), read_sysreg_el2(far), | |
c97e166e | 460 | read_sysreg(hpfar_el2), par, vcpu); |
253dcbd3 MZ |
461 | } |
462 | ||
c97e166e JM |
463 | static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par, |
464 | struct kvm_vcpu *vcpu) | |
253dcbd3 MZ |
465 | { |
466 | panic(__hyp_panic_string, | |
467 | spsr, elr, | |
468 | read_sysreg_el2(esr), read_sysreg_el2(far), | |
c97e166e | 469 | read_sysreg(hpfar_el2), par, vcpu); |
253dcbd3 MZ |
470 | } |
471 | ||
472 | static hyp_alternate_select(__hyp_call_panic, | |
473 | __hyp_call_panic_nvhe, __hyp_call_panic_vhe, | |
474 | ARM64_HAS_VIRT_HOST_EXTN); | |
475 | ||
c97e166e | 476 | void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *__host_ctxt) |
253dcbd3 | 477 | { |
c97e166e JM |
478 | struct kvm_vcpu *vcpu = NULL; |
479 | ||
253dcbd3 MZ |
480 | u64 spsr = read_sysreg_el2(spsr); |
481 | u64 elr = read_sysreg_el2(elr); | |
53fd5b64 MZ |
482 | u64 par = read_sysreg(par_el1); |
483 | ||
484 | if (read_sysreg(vttbr_el2)) { | |
53fd5b64 MZ |
485 | struct kvm_cpu_context *host_ctxt; |
486 | ||
c97e166e JM |
487 | host_ctxt = kern_hyp_va(__host_ctxt); |
488 | vcpu = host_ctxt->__hyp_running_vcpu; | |
688c50aa | 489 | __timer_disable_traps(vcpu); |
53fd5b64 MZ |
490 | __deactivate_traps(vcpu); |
491 | __deactivate_vm(vcpu); | |
edef528d | 492 | __sysreg_restore_host_state(host_ctxt); |
53fd5b64 MZ |
493 | } |
494 | ||
495 | /* Call panic for real */ | |
c97e166e | 496 | __hyp_call_panic()(spsr, elr, par, vcpu); |
53fd5b64 MZ |
497 | |
498 | unreachable(); | |
499 | } |