Commit | Line | Data |
---|---|---|
be901e9b MZ |
1 | /* |
2 | * Copyright (C) 2015 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * This program is free software; you can redistribute it and/or modify | |
6 | * it under the terms of the GNU General Public License version 2 as | |
7 | * published by the Free Software Foundation. | |
8 | * | |
9 | * This program is distributed in the hope that it will be useful, | |
10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
12 | * GNU General Public License for more details. | |
13 | * | |
14 | * You should have received a copy of the GNU General Public License | |
15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
16 | */ | |
17 | ||
5f05a72a | 18 | #include <linux/types.h> |
5a7a8426 VM |
19 | #include <linux/jump_label.h> |
20 | ||
68908bf7 | 21 | #include <asm/kvm_asm.h> |
fb5ee369 | 22 | #include <asm/kvm_emulate.h> |
13720a56 | 23 | #include <asm/kvm_hyp.h> |
82e0191a | 24 | #include <asm/fpsimd.h> |
be901e9b | 25 | |
32876224 MZ |
26 | static bool __hyp_text __fpsimd_enabled_nvhe(void) |
27 | { | |
28 | return !(read_sysreg(cptr_el2) & CPTR_EL2_TFP); | |
29 | } | |
30 | ||
31 | static bool __hyp_text __fpsimd_enabled_vhe(void) | |
32 | { | |
33 | return !!(read_sysreg(cpacr_el1) & CPACR_EL1_FPEN); | |
34 | } | |
35 | ||
36 | static hyp_alternate_select(__fpsimd_is_enabled, | |
37 | __fpsimd_enabled_nvhe, __fpsimd_enabled_vhe, | |
38 | ARM64_HAS_VIRT_HOST_EXTN); | |
39 | ||
40 | bool __hyp_text __fpsimd_enabled(void) | |
41 | { | |
42 | return __fpsimd_is_enabled()(); | |
43 | } | |
44 | ||
68908bf7 MZ |
45 | static void __hyp_text __activate_traps_vhe(void) |
46 | { | |
47 | u64 val; | |
48 | ||
49 | val = read_sysreg(cpacr_el1); | |
50 | val |= CPACR_EL1_TTA; | |
51 | val &= ~CPACR_EL1_FPEN; | |
52 | write_sysreg(val, cpacr_el1); | |
53 | ||
54 | write_sysreg(__kvm_hyp_vector, vbar_el1); | |
55 | } | |
56 | ||
57 | static void __hyp_text __activate_traps_nvhe(void) | |
58 | { | |
59 | u64 val; | |
60 | ||
61 | val = CPTR_EL2_DEFAULT; | |
62 | val |= CPTR_EL2_TTA | CPTR_EL2_TFP; | |
63 | write_sysreg(val, cptr_el2); | |
64 | } | |
65 | ||
66 | static hyp_alternate_select(__activate_traps_arch, | |
67 | __activate_traps_nvhe, __activate_traps_vhe, | |
68 | ARM64_HAS_VIRT_HOST_EXTN); | |
69 | ||
be901e9b MZ |
70 | static void __hyp_text __activate_traps(struct kvm_vcpu *vcpu) |
71 | { | |
72 | u64 val; | |
73 | ||
74 | /* | |
75 | * We are about to set CPTR_EL2.TFP to trap all floating point | |
76 | * register accesses to EL2, however, the ARM ARM clearly states that | |
77 | * traps are only taken to EL2 if the operation would not otherwise | |
78 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | |
79 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | |
82e0191a SP |
80 | * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to |
81 | * it will cause an exception. | |
be901e9b MZ |
82 | */ |
83 | val = vcpu->arch.hcr_el2; | |
82e0191a | 84 | if (!(val & HCR_RW) && system_supports_fpsimd()) { |
be901e9b MZ |
85 | write_sysreg(1 << 30, fpexc32_el2); |
86 | isb(); | |
87 | } | |
88 | write_sysreg(val, hcr_el2); | |
89 | /* Trap on AArch32 cp15 c15 accesses (EL1 or EL0) */ | |
90 | write_sysreg(1 << 15, hstr_el2); | |
21cbe3cc MZ |
91 | /* |
92 | * Make sure we trap PMU access from EL0 to EL2. Also sanitize | |
93 | * PMSELR_EL0 to make sure it never contains the cycle | |
94 | * counter, which could make a PMXEVCNTR_EL0 access UNDEF at | |
95 | * EL1 instead of being trapped to EL2. | |
96 | */ | |
97 | write_sysreg(0, pmselr_el0); | |
d692b8ad | 98 | write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); |
68908bf7 MZ |
99 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); |
100 | __activate_traps_arch()(); | |
101 | } | |
a7e0ac29 | 102 | |
68908bf7 MZ |
103 | static void __hyp_text __deactivate_traps_vhe(void) |
104 | { | |
105 | extern char vectors[]; /* kernel exception vectors */ | |
f85279b4 | 106 | u64 mdcr_el2 = read_sysreg(mdcr_el2); |
a7e0ac29 | 107 | |
f85279b4 WD |
108 | mdcr_el2 &= MDCR_EL2_HPMN_MASK | |
109 | MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT | | |
110 | MDCR_EL2_TPMS; | |
111 | ||
112 | write_sysreg(mdcr_el2, mdcr_el2); | |
68908bf7 MZ |
113 | write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2); |
114 | write_sysreg(CPACR_EL1_FPEN, cpacr_el1); | |
115 | write_sysreg(vectors, vbar_el1); | |
be901e9b MZ |
116 | } |
117 | ||
68908bf7 | 118 | static void __hyp_text __deactivate_traps_nvhe(void) |
be901e9b | 119 | { |
f85279b4 WD |
120 | u64 mdcr_el2 = read_sysreg(mdcr_el2); |
121 | ||
122 | mdcr_el2 &= MDCR_EL2_HPMN_MASK; | |
123 | mdcr_el2 |= MDCR_EL2_E2PB_MASK << MDCR_EL2_E2PB_SHIFT; | |
124 | ||
125 | write_sysreg(mdcr_el2, mdcr_el2); | |
be901e9b | 126 | write_sysreg(HCR_RW, hcr_el2); |
68908bf7 MZ |
127 | write_sysreg(CPTR_EL2_DEFAULT, cptr_el2); |
128 | } | |
129 | ||
130 | static hyp_alternate_select(__deactivate_traps_arch, | |
131 | __deactivate_traps_nvhe, __deactivate_traps_vhe, | |
132 | ARM64_HAS_VIRT_HOST_EXTN); | |
133 | ||
134 | static void __hyp_text __deactivate_traps(struct kvm_vcpu *vcpu) | |
135 | { | |
44636f97 MZ |
136 | /* |
137 | * If we pended a virtual abort, preserve it until it gets | |
138 | * cleared. See D1.14.3 (Virtual Interrupts) for details, but | |
139 | * the crucial bit is "On taking a vSError interrupt, | |
140 | * HCR_EL2.VSE is cleared to 0." | |
141 | */ | |
142 | if (vcpu->arch.hcr_el2 & HCR_VSE) | |
143 | vcpu->arch.hcr_el2 = read_sysreg(hcr_el2); | |
144 | ||
68908bf7 | 145 | __deactivate_traps_arch()(); |
be901e9b | 146 | write_sysreg(0, hstr_el2); |
d692b8ad | 147 | write_sysreg(0, pmuserenr_el0); |
be901e9b MZ |
148 | } |
149 | ||
150 | static void __hyp_text __activate_vm(struct kvm_vcpu *vcpu) | |
151 | { | |
152 | struct kvm *kvm = kern_hyp_va(vcpu->kvm); | |
153 | write_sysreg(kvm->arch.vttbr, vttbr_el2); | |
154 | } | |
155 | ||
156 | static void __hyp_text __deactivate_vm(struct kvm_vcpu *vcpu) | |
157 | { | |
158 | write_sysreg(0, vttbr_el2); | |
159 | } | |
160 | ||
be901e9b MZ |
161 | static void __hyp_text __vgic_save_state(struct kvm_vcpu *vcpu) |
162 | { | |
5a7a8426 VM |
163 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
164 | __vgic_v3_save_state(vcpu); | |
165 | else | |
166 | __vgic_v2_save_state(vcpu); | |
167 | ||
be901e9b MZ |
168 | write_sysreg(read_sysreg(hcr_el2) & ~HCR_INT_OVERRIDE, hcr_el2); |
169 | } | |
170 | ||
171 | static void __hyp_text __vgic_restore_state(struct kvm_vcpu *vcpu) | |
172 | { | |
173 | u64 val; | |
174 | ||
175 | val = read_sysreg(hcr_el2); | |
176 | val |= HCR_INT_OVERRIDE; | |
177 | val |= vcpu->arch.irq_lines; | |
178 | write_sysreg(val, hcr_el2); | |
179 | ||
5a7a8426 VM |
180 | if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) |
181 | __vgic_v3_restore_state(vcpu); | |
182 | else | |
183 | __vgic_v2_restore_state(vcpu); | |
be901e9b MZ |
184 | } |
185 | ||
5f05a72a MZ |
186 | static bool __hyp_text __true_value(void) |
187 | { | |
188 | return true; | |
189 | } | |
190 | ||
191 | static bool __hyp_text __false_value(void) | |
192 | { | |
193 | return false; | |
194 | } | |
195 | ||
196 | static hyp_alternate_select(__check_arm_834220, | |
197 | __false_value, __true_value, | |
198 | ARM64_WORKAROUND_834220); | |
199 | ||
200 | static bool __hyp_text __translate_far_to_hpfar(u64 far, u64 *hpfar) | |
201 | { | |
202 | u64 par, tmp; | |
203 | ||
204 | /* | |
205 | * Resolve the IPA the hard way using the guest VA. | |
206 | * | |
207 | * Stage-1 translation already validated the memory access | |
208 | * rights. As such, we can use the EL1 translation regime, and | |
209 | * don't have to distinguish between EL0 and EL1 access. | |
210 | * | |
211 | * We do need to save/restore PAR_EL1 though, as we haven't | |
212 | * saved the guest context yet, and we may return early... | |
213 | */ | |
214 | par = read_sysreg(par_el1); | |
215 | asm volatile("at s1e1r, %0" : : "r" (far)); | |
216 | isb(); | |
217 | ||
218 | tmp = read_sysreg(par_el1); | |
219 | write_sysreg(par, par_el1); | |
220 | ||
221 | if (unlikely(tmp & 1)) | |
222 | return false; /* Translation failed, back to guest */ | |
223 | ||
224 | /* Convert PAR to HPFAR format */ | |
225 | *hpfar = ((tmp >> 12) & ((1UL << 36) - 1)) << 4; | |
226 | return true; | |
227 | } | |
228 | ||
229 | static bool __hyp_text __populate_fault_info(struct kvm_vcpu *vcpu) | |
230 | { | |
231 | u64 esr = read_sysreg_el2(esr); | |
561454e2 | 232 | u8 ec = ESR_ELx_EC(esr); |
5f05a72a MZ |
233 | u64 hpfar, far; |
234 | ||
235 | vcpu->arch.fault.esr_el2 = esr; | |
236 | ||
237 | if (ec != ESR_ELx_EC_DABT_LOW && ec != ESR_ELx_EC_IABT_LOW) | |
238 | return true; | |
239 | ||
240 | far = read_sysreg_el2(far); | |
241 | ||
242 | /* | |
243 | * The HPFAR can be invalid if the stage 2 fault did not | |
244 | * happen during a stage 1 page table walk (the ESR_EL2.S1PTW | |
245 | * bit is clear) and one of the two following cases are true: | |
246 | * 1. The fault was due to a permission fault | |
247 | * 2. The processor carries errata 834220 | |
248 | * | |
249 | * Therefore, for all non S1PTW faults where we either have a | |
250 | * permission fault or the errata workaround is enabled, we | |
251 | * resolve the IPA using the AT instruction. | |
252 | */ | |
253 | if (!(esr & ESR_ELx_S1PTW) && | |
254 | (__check_arm_834220()() || (esr & ESR_ELx_FSC_TYPE) == FSC_PERM)) { | |
255 | if (!__translate_far_to_hpfar(far, &hpfar)) | |
256 | return false; | |
257 | } else { | |
258 | hpfar = read_sysreg(hpfar_el2); | |
259 | } | |
260 | ||
261 | vcpu->arch.fault.far_el2 = far; | |
262 | vcpu->arch.fault.hpfar_el2 = hpfar; | |
263 | return true; | |
264 | } | |
265 | ||
fb5ee369 MZ |
266 | static void __hyp_text __skip_instr(struct kvm_vcpu *vcpu) |
267 | { | |
268 | *vcpu_pc(vcpu) = read_sysreg_el2(elr); | |
269 | ||
270 | if (vcpu_mode_is_32bit(vcpu)) { | |
271 | vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr); | |
272 | kvm_skip_instr32(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
273 | write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr); | |
274 | } else { | |
275 | *vcpu_pc(vcpu) += 4; | |
276 | } | |
277 | ||
278 | write_sysreg_el2(*vcpu_pc(vcpu), elr); | |
279 | } | |
280 | ||
cf0ba18a | 281 | int __hyp_text __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
be901e9b MZ |
282 | { |
283 | struct kvm_cpu_context *host_ctxt; | |
284 | struct kvm_cpu_context *guest_ctxt; | |
c13d1683 | 285 | bool fp_enabled; |
be901e9b MZ |
286 | u64 exit_code; |
287 | ||
288 | vcpu = kern_hyp_va(vcpu); | |
289 | write_sysreg(vcpu, tpidr_el2); | |
290 | ||
291 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
292 | guest_ctxt = &vcpu->arch.ctxt; | |
293 | ||
edef528d | 294 | __sysreg_save_host_state(host_ctxt); |
be901e9b MZ |
295 | __debug_cond_save_host_state(vcpu); |
296 | ||
297 | __activate_traps(vcpu); | |
298 | __activate_vm(vcpu); | |
299 | ||
300 | __vgic_restore_state(vcpu); | |
301 | __timer_restore_state(vcpu); | |
302 | ||
303 | /* | |
304 | * We must restore the 32-bit state before the sysregs, thanks | |
674e7012 | 305 | * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72). |
be901e9b MZ |
306 | */ |
307 | __sysreg32_restore_state(vcpu); | |
edef528d | 308 | __sysreg_restore_guest_state(guest_ctxt); |
be901e9b MZ |
309 | __debug_restore_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
310 | ||
311 | /* Jump in the fire! */ | |
5f05a72a | 312 | again: |
be901e9b MZ |
313 | exit_code = __guest_enter(vcpu, host_ctxt); |
314 | /* And we're baaack! */ | |
315 | ||
395ea79e MZ |
316 | /* |
317 | * We're using the raw exception code in order to only process | |
318 | * the trap if no SError is pending. We will come back to the | |
319 | * same PC once the SError has been injected, and replay the | |
320 | * trapping instruction. | |
321 | */ | |
5f05a72a MZ |
322 | if (exit_code == ARM_EXCEPTION_TRAP && !__populate_fault_info(vcpu)) |
323 | goto again; | |
324 | ||
fb5ee369 MZ |
325 | if (static_branch_unlikely(&vgic_v2_cpuif_trap) && |
326 | exit_code == ARM_EXCEPTION_TRAP) { | |
327 | bool valid; | |
328 | ||
329 | valid = kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_DABT_LOW && | |
330 | kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && | |
331 | kvm_vcpu_dabt_isvalid(vcpu) && | |
332 | !kvm_vcpu_dabt_isextabt(vcpu) && | |
333 | !kvm_vcpu_dabt_iss1tw(vcpu); | |
334 | ||
3272f0d0 MZ |
335 | if (valid) { |
336 | int ret = __vgic_v2_perform_cpuif_access(vcpu); | |
337 | ||
338 | if (ret == 1) { | |
339 | __skip_instr(vcpu); | |
340 | goto again; | |
341 | } | |
342 | ||
343 | if (ret == -1) { | |
344 | /* Promote an illegal access to an SError */ | |
345 | __skip_instr(vcpu); | |
346 | exit_code = ARM_EXCEPTION_EL1_SERROR; | |
347 | } | |
348 | ||
349 | /* 0 falls through to be handler out of EL2 */ | |
fb5ee369 MZ |
350 | } |
351 | } | |
352 | ||
c13d1683 MZ |
353 | fp_enabled = __fpsimd_enabled(); |
354 | ||
edef528d | 355 | __sysreg_save_guest_state(guest_ctxt); |
be901e9b MZ |
356 | __sysreg32_save_state(vcpu); |
357 | __timer_save_state(vcpu); | |
358 | __vgic_save_state(vcpu); | |
359 | ||
360 | __deactivate_traps(vcpu); | |
361 | __deactivate_vm(vcpu); | |
362 | ||
edef528d | 363 | __sysreg_restore_host_state(host_ctxt); |
be901e9b | 364 | |
c13d1683 MZ |
365 | if (fp_enabled) { |
366 | __fpsimd_save_state(&guest_ctxt->gp_regs.fp_regs); | |
367 | __fpsimd_restore_state(&host_ctxt->gp_regs.fp_regs); | |
368 | } | |
369 | ||
be901e9b | 370 | __debug_save_state(vcpu, kern_hyp_va(vcpu->arch.debug_ptr), guest_ctxt); |
f85279b4 WD |
371 | /* |
372 | * This must come after restoring the host sysregs, since a non-VHE | |
373 | * system may enable SPE here and make use of the TTBRs. | |
374 | */ | |
be901e9b MZ |
375 | __debug_cond_restore_host_state(vcpu); |
376 | ||
377 | return exit_code; | |
378 | } | |
53fd5b64 MZ |
379 | |
380 | static const char __hyp_panic_string[] = "HYP panic:\nPS:%08llx PC:%016llx ESR:%08llx\nFAR:%016llx HPFAR:%016llx PAR:%016llx\nVCPU:%p\n"; | |
381 | ||
253dcbd3 | 382 | static void __hyp_text __hyp_call_panic_nvhe(u64 spsr, u64 elr, u64 par) |
53fd5b64 | 383 | { |
cf7df13d | 384 | unsigned long str_va; |
253dcbd3 | 385 | |
cf7df13d MZ |
386 | /* |
387 | * Force the panic string to be loaded from the literal pool, | |
388 | * making sure it is a kernel address and not a PC-relative | |
389 | * reference. | |
390 | */ | |
391 | asm volatile("ldr %0, =__hyp_panic_string" : "=r" (str_va)); | |
392 | ||
393 | __hyp_do_panic(str_va, | |
253dcbd3 MZ |
394 | spsr, elr, |
395 | read_sysreg(esr_el2), read_sysreg_el2(far), | |
396 | read_sysreg(hpfar_el2), par, | |
397 | (void *)read_sysreg(tpidr_el2)); | |
398 | } | |
399 | ||
400 | static void __hyp_text __hyp_call_panic_vhe(u64 spsr, u64 elr, u64 par) | |
401 | { | |
402 | panic(__hyp_panic_string, | |
403 | spsr, elr, | |
404 | read_sysreg_el2(esr), read_sysreg_el2(far), | |
405 | read_sysreg(hpfar_el2), par, | |
406 | (void *)read_sysreg(tpidr_el2)); | |
407 | } | |
408 | ||
409 | static hyp_alternate_select(__hyp_call_panic, | |
410 | __hyp_call_panic_nvhe, __hyp_call_panic_vhe, | |
411 | ARM64_HAS_VIRT_HOST_EXTN); | |
412 | ||
413 | void __hyp_text __noreturn __hyp_panic(void) | |
414 | { | |
415 | u64 spsr = read_sysreg_el2(spsr); | |
416 | u64 elr = read_sysreg_el2(elr); | |
53fd5b64 MZ |
417 | u64 par = read_sysreg(par_el1); |
418 | ||
419 | if (read_sysreg(vttbr_el2)) { | |
420 | struct kvm_vcpu *vcpu; | |
421 | struct kvm_cpu_context *host_ctxt; | |
422 | ||
423 | vcpu = (struct kvm_vcpu *)read_sysreg(tpidr_el2); | |
424 | host_ctxt = kern_hyp_va(vcpu->arch.host_cpu_context); | |
e8ec032b | 425 | __timer_save_state(vcpu); |
53fd5b64 MZ |
426 | __deactivate_traps(vcpu); |
427 | __deactivate_vm(vcpu); | |
edef528d | 428 | __sysreg_restore_host_state(host_ctxt); |
53fd5b64 MZ |
429 | } |
430 | ||
431 | /* Call panic for real */ | |
253dcbd3 | 432 | __hyp_call_panic()(spsr, elr, par); |
53fd5b64 MZ |
433 | |
434 | unreachable(); | |
435 | } |