Commit | Line | Data |
---|---|---|
be901e9b MZ |
1 | /* |
2 | * Copyright (C) 2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
5f05a72a | 18 | #include <linux/types.h> |
5a7a8426 VM |
19 | #include <linux/jump_label.h> |
20 | ||
68908bf7 | 21 | #include <asm/kvm_asm.h> |
fb5ee369 | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
82e0191a | 24 | #include <asm/fpsimd.h> |
be901e9b | 25 | |
32876224 MZ |
26 | static bool __hyp_text __fpsimd_enabled_nvhe(void) |
27 | { | |
28 | return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); | |
29 | } | |
30 | ||
31 | static bool __hyp_text __fpsimd_enabled_vhe(void) | |
32 | { | |
33 | return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); | |
34 | } | |
35 | ||
36 | static hyp_alternate_select(__fpsimd_is_enabled, | |
37 | __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, | |
38 | ARM64_HAS_VIRT_HOST_EXTN); | |
39 | ||
40 | bool __hyp_text __fpsimd_enabled(void) | |
41 | { | |
42 | return __fpsimd_is_enabled()(); | |
43 | } | |
44 | ||
68908bf7 MZ |
45 | static void __hyp_text __activate_traps_vhe(void) |
46 | { | |
47 | u64 val; | |
48 | ||
49 | val = read_sysreg(cpacr_el1); | |
50 | val |= CPACR_EL1_TTA; | |
17eed27b | 51 | val &= ~(CPACR_EL1_FPEN | CPACR_EL1_ZEN); |
68908bf7 MZ |
52 | write_sysreg(val, cpacr_el1); |
53 | ||
54 | write_sysreg(__kvm_hyp_vector, vbar_el1); | |
55 | } | |
56 | ||
57 | static void __hyp_text __activate_traps_nvhe(void) | |
58 | { | |
59 | u64 val; | |
60 | ||
61 | val = CPTR_EL2_DEFAULT; | |
17eed27b | 62 | val |= CPTR_EL2_TTA | CPTR_EL2_TFP | CPTR_EL2_TZ; |
68908bf7 MZ |
63 | write_sysreg(val, cptr_el2); |
64 | } | |
65 | ||
66 | static hyp_alternate_select(__activate_traps_arch, | |
67 | __activate_traps_nvhe, __activate_traps_vhe, | |
68 | ARM64_HAS_VIRT_HOST_EXTN); | |
69 | ||
be901e9b MZ |
70 | static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) |
71 | { | |
72 | u64 val; | |
73 | ||
74 | /* | |
75 | * We are about to set CPTR_EL2.TFP to trap all floating point | |
76 | * register accesses to EL2, however, the ARM ARM clearly states that | |
77 | * traps are only taken to EL2 if the operation would not otherwise | |
78 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | |
79 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | |
82e0191a SP |
80 | * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to |
81 | * it will cause an exception. | |
be901e9b MZ |
82 | */ |
83 | val = vcpu->arch.hcr_el2; | |
93390c0a | 84 | |
82e0191a | 85 | if (!(val & HCR_RW) && system_supports_fpsimd()) { |
be901e9b MZ |
86 | write_sysreg(1 << 30, fpexc32_el2); |
87 | isb(); | |
88 | } | |
93390c0a DM |
89 | |
90 | if (val & HCR_RW) /* for AArch64 only: */ | |
91 | val |= HCR_TID3; /* TID3: trap feature register accesses */ | |
92 | ||
be901e9b | 93 | write_sysreg(val, hcr_el2); |
93390c0a | 94 | |
be901e9b MZ |
95 | /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ |
96 | write_sysreg(1 << 15, hstr_el2); | |
21cbe3cc MZ |
97 | /* |
98 | * Make sure we trap PMU access from EL0 to EL2. Also sanitize | |
99 | * PMSELR_EL0 to make sure it never contains the cycle | |
100 | * counter, which could make a PMXEVCNTR_EL0 access UNDEF at | |
101 | * EL1 instead of being trapped to EL2. | |
102 | */ | |
103 | write_sysreg(0, pmselr_el0); | |
d692b8ad | 104 | write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); |
68908bf7 MZ |
105 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); |
106 | __activate_traps_arch()(); | |
107 | } | |
a7e0ac29 | 108 | |
68908bf7 MZ |
109 | static void __hyp_text __deactivate_traps_vhe(void) |
110 | { | |
111 | extern char vectors[]; /* kernel exception vectors */ | |
f85279b4 | 112 | u64 mdcr_el2 = read_sysreg(mdcr_el2); |
a7e0ac29 | 113 | |
f85279b4 WD |
114 | mdcr_el2 &= MDCR_EL2_HPMN_MASK | |
115 | MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT | | |
116 | MDCR_EL2_TPMS; | |
117 | ||
118 | write_sysreg(mdcr_el2, mdcr_el2); | |
68908bf7 | 119 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
17eed27b | 120 | write_sysreg(CPACR_EL1_DEFAULT, cpacr_el1); |
68908bf7 | 121 | write_sysreg(vectors, vbar_el1); |
be901e9b MZ |
122 | } |
123 | ||
68908bf7 | 124 | static void __hyp_text __deactivate_traps_nvhe(void) |
be901e9b | 125 | { |
f85279b4 WD |
126 | u64 mdcr_el2 = read_sysreg(mdcr_el2); |
127 | ||
128 | mdcr_el2 &= MDCR_EL2_HPMN_MASK; | |
129 | mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; | |
130 | ||
131 | write_sysreg(mdcr_el2, mdcr_el2); | |
be901e9b | 132 | write_sysreg(HCR_RW, hcr_el2); |
68908bf7 MZ |
133 | write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); |
134 | } | |
135 | ||
136 | static hyp_alternate_select(__deactivate_traps_arch, | |
137 | __deactivate_traps_nvhe, __deactivate_traps_vhe, | |
138 | ARM64_HAS_VIRT_HOST_EXTN); | |
139 | ||
140 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | |
141 | { | |
44636f97 MZ |
142 | /* |
143 | * If we pended a virtual abort, preserve it until it gets | |
144 | * cleared. See D1.14.3 (Virtual Interrupts) for details, but | |
145 | * the crucial bit is "On taking a vSError interrupt, | |
146 | * HCR_EL2.VSE is cleared to 0." | |
147 | */ | |
148 | if (vcpu->arch.hcr_el2 & HCR_VSE) | |
149 | vcpu->arch.hcr_el2 = read_sysreg(hcr_el2); | |
150 | ||
68908bf7 | 151 | __deactivate_traps_arch()(); |
be901e9b | 152 | write_sysreg(0, hstr_el2); |
d692b8ad | 153 | write_sysreg(0, pmuserenr_el0); |
be901e9b MZ |
154 | } |
155 | ||
156 | static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) | |
157 | { | |
158 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | |
159 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | |
160 | } | |
161 | ||
162 | static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) | |
163 | { | |
164 | write_sysreg(0, vttbr_el2); | |
165 | } | |
166 | ||
be901e9b MZ |
167 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) |
168 | { | |
5a7a8426 VM |
169 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
170 | __vgic_v3_save_state(vcpu); | |
171 | else | |
172 | __vgic_v2_save_state(vcpu); | |
173 | ||
be901e9b MZ |
174 | write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); |
175 | } | |
176 | ||
177 | static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | |
178 | { | |
179 | u64 val; | |
180 | ||
181 | val = read_sysreg(hcr_el2); | |
182 | val |= HCR_INT_OVERRIDE; | |
183 | val |= vcpu->arch.irq_lines; | |
184 | write_sysreg(val, hcr_el2); | |
185 | ||
5a7a8426 VM |
186 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
187 | __vgic_v3_restore_state(vcpu); | |
188 | else | |
189 | __vgic_v2_restore_state(vcpu); | |
be901e9b MZ |
190 | } |
191 | ||
5f05a72a MZ |
192 | static bool __hyp_text __true_value(void) |
193 | { | |
194 | return true; | |
195 | } | |
196 | ||
197 | static bool __hyp_text __false_value(void) | |
198 | { | |
199 | return false; | |
200 | } | |
201 | ||
202 | static hyp_alternate_select(__check_arm_834220, | |
203 | __false_value, __true_value, | |
204 | ARM64_WORKAROUND_834220); | |
205 | ||
206 | static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) | |
207 | { | |
208 | u64 par, tmp; | |
209 | ||
210 | /* | |
211 | * Resolve the IPA the hard way using the guest VA. | |
212 | * | |
213 | * Stage-1 translation already validated the memory access | |
214 | * rights. As such, we can use the EL1 translation regime, and | |
215 | * don't have to distinguish between EL0 and EL1 access. | |
216 | * | |
217 | * We do need to save/restore PAR_EL1 though, as we haven't | |
218 | * saved the guest context yet, and we may return early... | |
219 | */ | |
220 | par = read_sysreg(par_el1); | |
221 | asm volatile("at s1e1r, %0" : : "r" (far)); | |
222 | isb(); | |
223 | ||
224 | tmp = read_sysreg(par_el1); | |
225 | write_sysreg(par, par_el1); | |
226 | ||
227 | if (unlikely(tmp & 1)) | |
228 | return false; /* Translation failed, back to guest */ | |
229 | ||
230 | /* Convert PAR to HPFAR format */ | |
231 | *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; | |
232 | return true; | |
233 | } | |
234 | ||
235 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |
236 | { | |
237 | u64 esr = read_sysreg_el2(esr); | |
561454e2 | 238 | u8 ec = ESR_ELx_EC(esr); |
5f05a72a MZ |
239 | u64 hpfar, far; |
240 | ||
241 | vcpu->arch.fault.esr_el2 = esr; | |
242 | ||
243 | if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) | |
244 | return true; | |
245 | ||
246 | far = read_sysreg_el2(far); | |
247 | ||
248 | /* | |
249 | * The HPFAR can be invalid if the stage 2 fault did not | |
250 | * happen during a stage 1 page table walk (the ESR_EL2.S1PTW | |
251 | * bit is clear) and one of the two following cases are true: | |
252 | * 1. The fault was due to a permission fault | |
253 | * 2. The processor carries errata 834220 | |
254 | * | |
255 | * Therefore, for all non S1PTW faults where we either have a | |
256 | * permission fault or the errata workaround is enabled, we | |
257 | * resolve the IPA using the AT instruction. | |
258 | */ | |
259 | if (!(esr & ESR_ELx_S1PTW) && | |
260 | (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { | |
261 | if (!__translate_far_to_hpfar(far, &hpfar)) | |
262 | return false; | |
263 | } else { | |
264 | hpfar = read_sysreg(hpfar_el2); | |
265 | } | |
266 | ||
267 | vcpu->arch.fault.far_el2 = far; | |
268 | vcpu->arch.fault.hpfar_el2 = hpfar; | |
269 | return true; | |
270 | } | |
271 | ||
fb5ee369 MZ |
272 | static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) |
273 | { | |
274 | *vcpu_pc(vcpu) = read_sysreg_el2(elr); | |
275 | ||
276 | if (vcpu_mode_is_32bit(vcpu)) { | |
277 | vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); | |
278 | kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
279 | write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); | |
280 | } else { | |
281 | *vcpu_pc(vcpu) += 4; | |
282 | } | |
283 | ||
284 | write_sysreg_el2(*vcpu_pc(vcpu), elr); | |
285 | } | |
286 | ||
cf0ba18a | 287 | int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
be901e9b MZ |
288 | { |
289 | struct kvm_cpu_context *host_ctxt; | |
290 | struct kvm_cpu_context *guest_ctxt; | |
c13d1683 | 291 | bool fp_enabled; |
be901e9b MZ |
292 | u64 exit_code; |
293 | ||
294 | vcpu = kern_hyp_va(vcpu); | |
295 | write_sysreg(vcpu, tpidr_el2); | |
296 | ||
297 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
298 | guest_ctxt = &vcpu->arch.ctxt; | |
299 | ||
edef528d | 300 | __sysreg_save_host_state(host_ctxt); |
be901e9b MZ |
301 | __debug_cond_save_host_state(vcpu); |
302 | ||
303 | __activate_traps(vcpu); | |
304 | __activate_vm(vcpu); | |
305 | ||
306 | __vgic_restore_state(vcpu); | |
688c50aa | 307 | __timer_enable_traps(vcpu); |
be901e9b MZ |
308 | |
309 | /* | |
310 | * We must restore the 32-bit state before the sysregs, thanks | |
674e7012 | 311 | * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). |
be901e9b MZ |
312 | */ |
313 | __sysreg32_restore_state(vcpu); | |
edef528d | 314 | __sysreg_restore_guest_state(guest_ctxt); |
be901e9b MZ |
315 | __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
316 | ||
317 | /* Jump in the fire! */ | |
5f05a72a | 318 | again: |
be901e9b MZ |
319 | exit_code = __guest_enter(vcpu, host_ctxt); |
320 | /* And we're baaack! */ | |
321 | ||
395ea79e MZ |
322 | /* |
323 | * We're using the raw exception code in order to only process | |
324 | * the trap if no SError is pending. We will come back to the | |
325 | * same PC once the SError has been injected, and replay the | |
326 | * trapping instruction. | |
327 | */ | |
5f05a72a MZ |
328 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) |
329 | goto again; | |
330 | ||
fb5ee369 MZ |
331 | if (static_branch_unlikely(&vgic_v2_cpuif_trap) && |
332 | exit_code == ARM_EXCEPTION_TRAP) { | |
333 | bool valid; | |
334 | ||
335 | valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && | |
336 | kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && | |
337 | kvm_vcpu_dabt_isvalid(vcpu) && | |
338 | !kvm_vcpu_dabt_isextabt(vcpu) && | |
339 | !kvm_vcpu_dabt_iss1tw(vcpu); | |
340 | ||
3272f0d0 MZ |
341 | if (valid) { |
342 | int ret = __vgic_v2_perform_cpuif_access(vcpu); | |
343 | ||
344 | if (ret == 1) { | |
345 | __skip_instr(vcpu); | |
346 | goto again; | |
347 | } | |
348 | ||
349 | if (ret == -1) { | |
350 | /* Promote an illegal access to an SError */ | |
351 | __skip_instr(vcpu); | |
352 | exit_code = ARM_EXCEPTION_EL1_SERROR; | |
353 | } | |
354 | ||
355 | /* 0 falls through to be handler out of EL2 */ | |
fb5ee369 MZ |
356 | } |
357 | } | |
358 | ||
59da1cbf MZ |
359 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) && |
360 | exit_code == ARM_EXCEPTION_TRAP && | |
361 | (kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_SYS64 || | |
362 | kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_CP15_32)) { | |
363 | int ret = __vgic_v3_perform_cpuif_access(vcpu); | |
364 | ||
365 | if (ret == 1) { | |
366 | __skip_instr(vcpu); | |
367 | goto again; | |
368 | } | |
369 | ||
370 | /* 0 falls through to be handled out of EL2 */ | |
371 | } | |
372 | ||
c13d1683 MZ |
373 | fp_enabled = __fpsimd_enabled(); |
374 | ||
edef528d | 375 | __sysreg_save_guest_state(guest_ctxt); |
be901e9b | 376 | __sysreg32_save_state(vcpu); |
688c50aa | 377 | __timer_disable_traps(vcpu); |
be901e9b MZ |
378 | __vgic_save_state(vcpu); |
379 | ||
380 | __deactivate_traps(vcpu); | |
381 | __deactivate_vm(vcpu); | |
382 | ||
edef528d | 383 | __sysreg_restore_host_state(host_ctxt); |
be901e9b | 384 | |
c13d1683 MZ |
385 | if (fp_enabled) { |
386 | __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); | |
387 | __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); | |
388 | } | |
389 | ||
be901e9b | 390 | __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
f85279b4 WD |
391 | /* |
392 | * This must come after restoring the host sysregs, since a non-VHE | |
393 | * system may enable SPE here and make use of the TTBRs. | |
394 | */ | |
be901e9b MZ |
395 | __debug_cond_restore_host_state(vcpu); |
396 | ||
397 | return exit_code; | |
398 | } | |
53fd5b64 MZ |
399 | |
400 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; | |
401 | ||
253dcbd3 | 402 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) |
53fd5b64 | 403 | { |
cf7df13d | 404 | unsigned long str_va; |
253dcbd3 | 405 | |
cf7df13d MZ |
406 | /* |
407 | * Force the panic string to be loaded from the literal pool, | |
408 | * making sure it is a kernel address and not a PC-relative | |
409 | * reference. | |
410 | */ | |
411 | asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); | |
412 | ||
413 | __hyp_do_panic(str_va, | |
253dcbd3 MZ |
414 | spsr, elr, |
415 | read_sysreg(esr_el2), read_sysreg_el2(far), | |
416 | read_sysreg(hpfar_el2), par, | |
417 | (void *)read_sysreg(tpidr_el2)); | |
418 | } | |
419 | ||
420 | static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) | |
421 | { | |
422 | panic(__hyp_panic_string, | |
423 | spsr, elr, | |
424 | read_sysreg_el2(esr), read_sysreg_el2(far), | |
425 | read_sysreg(hpfar_el2), par, | |
426 | (void *)read_sysreg(tpidr_el2)); | |
427 | } | |
428 | ||
429 | static hyp_alternate_select(__hyp_call_panic, | |
430 | __hyp_call_panic_nvhe, __hyp_call_panic_vhe, | |
431 | ARM64_HAS_VIRT_HOST_EXTN); | |
432 | ||
433 | void __hyp_text __noreturn __hyp_panic(void) | |
434 | { | |
435 | u64 spsr = read_sysreg_el2(spsr); | |
436 | u64 elr = read_sysreg_el2(elr); | |
53fd5b64 MZ |
437 | u64 par = read_sysreg(par_el1); |
438 | ||
439 | if (read_sysreg(vttbr_el2)) { | |
440 | struct kvm_vcpu *vcpu; | |
441 | struct kvm_cpu_context *host_ctxt; | |
442 | ||
443 | vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); | |
444 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
688c50aa | 445 | __timer_disable_traps(vcpu); |
53fd5b64 MZ |
446 | __deactivate_traps(vcpu); |
447 | __deactivate_vm(vcpu); | |
edef528d | 448 | __sysreg_restore_host_state(host_ctxt); |
53fd5b64 MZ |
449 | } |
450 | ||
451 | /* Call panic for real */ | |
253dcbd3 | 452 | __hyp_call_panic()(spsr, elr, par); |
53fd5b64 MZ |
453 | |
454 | unreachable(); | |
455 | } |