Commit | Line | Data |
---|---|---|
09cf57eb DB |
1 | // SPDX-License-Identifier: GPL-2.0-only |
2 | /* | |
3 | * Copyright (C) 2015 - ARM Ltd | |
4 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
5 | */ | |
6 | ||
7 | #ifndef __ARM64_KVM_HYP_SWITCH_H__ | |
8 | #define __ARM64_KVM_HYP_SWITCH_H__ | |
9 | ||
cdb5e02e | 10 | #include <hyp/adjust_pc.h> |
7dd9b5a1 | 11 | #include <hyp/fault.h> |
cdb5e02e | 12 | |
09cf57eb DB |
13 | #include <linux/arm-smccc.h> |
14 | #include <linux/kvm_host.h> | |
15 | #include <linux/types.h> | |
16 | #include <linux/jump_label.h> | |
17 | #include <uapi/linux/psci.h> | |
18 | ||
19 | #include <kvm/arm_psci.h> | |
20 | ||
21 | #include <asm/barrier.h> | |
22 | #include <asm/cpufeature.h> | |
e9ee186b | 23 | #include <asm/extable.h> |
09cf57eb DB |
24 | #include <asm/kprobes.h> |
25 | #include <asm/kvm_asm.h> | |
26 | #include <asm/kvm_emulate.h> | |
27 | #include <asm/kvm_hyp.h> | |
28 | #include <asm/kvm_mmu.h> | |
e9adde43 | 29 | #include <asm/kvm_nested.h> |
09cf57eb DB |
30 | #include <asm/fpsimd.h> |
31 | #include <asm/debug-monitors.h> | |
32 | #include <asm/processor.h> | |
09cf57eb | 33 | |
ae2b2f33 MR |
34 | struct kvm_exception_table_entry { |
35 | int insn, fixup; | |
36 | }; | |
37 | ||
38 | extern struct kvm_exception_table_entry __start___kvm_ex_table; | |
39 | extern struct kvm_exception_table_entry __stop___kvm_ex_table; | |
e9ee186b | 40 | |
e9ada6c2 MZ |
41 | /* Check whether the FP regs are owned by the guest */ |
42 | static inline bool guest_owns_fp_regs(struct kvm_vcpu *vcpu) | |
09cf57eb | 43 | { |
f8077b0d | 44 | return vcpu->arch.fp_state == FP_STATE_GUEST_OWNED; |
09cf57eb DB |
45 | } |
46 | ||
47 | /* Save the 32-bit only FPSIMD system register state */ | |
c50cb043 | 48 | static inline void __fpsimd_save_fpexc32(struct kvm_vcpu *vcpu) |
09cf57eb DB |
49 | { |
50 | if (!vcpu_el1_is_32bit(vcpu)) | |
51 | return; | |
52 | ||
71071acf | 53 | __vcpu_sys_reg(vcpu, FPEXC32_EL2) = read_sysreg(fpexc32_el2); |
09cf57eb DB |
54 | } |
55 | ||
c50cb043 | 56 | static inline void __activate_traps_fpsimd32(struct kvm_vcpu *vcpu) |
09cf57eb DB |
57 | { |
58 | /* | |
59 | * We are about to set CPTR_EL2.TFP to trap all floating point | |
60 | * register accesses to EL2, however, the ARM ARM clearly states that | |
61 | * traps are only taken to EL2 if the operation would not otherwise | |
62 | * trap to EL1. Therefore, always make sure that for 32-bit guests, | |
63 | * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit. | |
64 | * If FP/ASIMD is not implemented, FPEXC is UNDEFINED and any access to | |
65 | * it will cause an exception. | |
66 | */ | |
67 | if (vcpu_el1_is_32bit(vcpu) && system_supports_fpsimd()) { | |
68 | write_sysreg(1 << 30, fpexc32_el2); | |
69 | isb(); | |
70 | } | |
71 | } | |
72 | ||
ce4a3622 OU |
73 | static inline bool __hfgxtr_traps_required(void) |
74 | { | |
75 | if (cpus_have_final_cap(ARM64_SME)) | |
76 | return true; | |
77 | ||
082fdfd1 OU |
78 | if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) |
79 | return true; | |
80 | ||
ce4a3622 OU |
81 | return false; |
82 | } | |
83 | ||
84 | static inline void __activate_traps_hfgxtr(void) | |
85 | { | |
86 | u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; | |
87 | ||
88 | if (cpus_have_final_cap(ARM64_SME)) { | |
89 | tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; | |
90 | ||
91 | r_clr |= tmp; | |
92 | w_clr |= tmp; | |
93 | } | |
94 | ||
082fdfd1 OU |
95 | /* |
96 | * Trap guest writes to TCR_EL1 to prevent it from enabling HA or HD. | |
97 | */ | |
98 | if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) | |
99 | w_set |= HFGxTR_EL2_TCR_EL1_MASK; | |
100 | ||
ce4a3622 OU |
101 | sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); |
102 | sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); | |
103 | } | |
104 | ||
105 | static inline void __deactivate_traps_hfgxtr(void) | |
106 | { | |
107 | u64 r_clr = 0, w_clr = 0, r_set = 0, w_set = 0, tmp; | |
108 | ||
109 | if (cpus_have_final_cap(ARM64_SME)) { | |
110 | tmp = HFGxTR_EL2_nSMPRI_EL1_MASK | HFGxTR_EL2_nTPIDR2_EL0_MASK; | |
111 | ||
112 | r_set |= tmp; | |
113 | w_set |= tmp; | |
114 | } | |
115 | ||
082fdfd1 OU |
116 | if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38)) |
117 | w_clr |= HFGxTR_EL2_TCR_EL1_MASK; | |
118 | ||
ce4a3622 OU |
119 | sysreg_clear_set_s(SYS_HFGRTR_EL2, r_clr, r_set); |
120 | sysreg_clear_set_s(SYS_HFGWTR_EL2, w_clr, w_set); | |
121 | } | |
122 | ||
c50cb043 | 123 | static inline void __activate_traps_common(struct kvm_vcpu *vcpu) |
09cf57eb DB |
124 | { |
125 | /* Trap on AArch32 cp15 c15 (impdef sysregs) accesses (EL1 or EL0) */ | |
126 | write_sysreg(1 << 15, hstr_el2); | |
127 | ||
128 | /* | |
129 | * Make sure we trap PMU access from EL0 to EL2. Also sanitize | |
130 | * PMSELR_EL0 to make sure it never contains the cycle | |
131 | * counter, which could make a PMXEVCNTR_EL0 access UNDEF at | |
132 | * EL1 instead of being trapped to EL2. | |
133 | */ | |
f27647b5 | 134 | if (kvm_arm_support_pmu_v3()) { |
8681f717 RW |
135 | struct kvm_cpu_context *hctxt; |
136 | ||
f27647b5 | 137 | write_sysreg(0, pmselr_el0); |
8681f717 RW |
138 | |
139 | hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; | |
140 | ctxt_sys_reg(hctxt, PMUSERENR_EL0) = read_sysreg(pmuserenr_el0); | |
f27647b5 | 141 | write_sysreg(ARMV8_PMU_USERENR_MASK, pmuserenr_el0); |
0c2f9acf | 142 | vcpu_set_flag(vcpu, PMUSERENR_ON_CPU); |
f27647b5 | 143 | } |
1460b4b2 FT |
144 | |
145 | vcpu->arch.mdcr_el2_host = read_sysreg(mdcr_el2); | |
09cf57eb | 146 | write_sysreg(vcpu->arch.mdcr_el2, mdcr_el2); |
4151bb63 | 147 | |
ce4a3622 OU |
148 | if (__hfgxtr_traps_required()) |
149 | __activate_traps_hfgxtr(); | |
09cf57eb DB |
150 | } |
151 | ||
1460b4b2 | 152 | static inline void __deactivate_traps_common(struct kvm_vcpu *vcpu) |
09cf57eb | 153 | { |
1460b4b2 FT |
154 | write_sysreg(vcpu->arch.mdcr_el2_host, mdcr_el2); |
155 | ||
09cf57eb | 156 | write_sysreg(0, hstr_el2); |
8681f717 RW |
157 | if (kvm_arm_support_pmu_v3()) { |
158 | struct kvm_cpu_context *hctxt; | |
159 | ||
160 | hctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt; | |
161 | write_sysreg(ctxt_sys_reg(hctxt, PMUSERENR_EL0), pmuserenr_el0); | |
0c2f9acf | 162 | vcpu_clear_flag(vcpu, PMUSERENR_ON_CPU); |
8681f717 | 163 | } |
4151bb63 | 164 | |
ce4a3622 OU |
165 | if (__hfgxtr_traps_required()) |
166 | __deactivate_traps_hfgxtr(); | |
09cf57eb DB |
167 | } |
168 | ||
c50cb043 | 169 | static inline void ___activate_traps(struct kvm_vcpu *vcpu) |
09cf57eb DB |
170 | { |
171 | u64 hcr = vcpu->arch.hcr_el2; | |
172 | ||
173 | if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM)) | |
174 | hcr |= HCR_TVM; | |
175 | ||
176 | write_sysreg(hcr, hcr_el2); | |
177 | ||
178 | if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN) && (hcr & HCR_VSE)) | |
179 | write_sysreg_s(vcpu->arch.vsesr_el2, SYS_VSESR_EL2); | |
306b4c9f KM |
180 | |
181 | if (cpus_have_final_cap(ARM64_HAS_HCX)) | |
182 | write_sysreg_s(HCRX_GUEST_FLAGS, SYS_HCRX_EL2); | |
09cf57eb DB |
183 | } |
184 | ||
c50cb043 | 185 | static inline void ___deactivate_traps(struct kvm_vcpu *vcpu) |
09cf57eb DB |
186 | { |
187 | /* | |
188 | * If we pended a virtual abort, preserve it until it gets | |
189 | * cleared. See D1.14.3 (Virtual Interrupts) for details, but | |
190 | * the crucial bit is "On taking a vSError interrupt, | |
191 | * HCR_EL2.VSE is cleared to 0." | |
192 | */ | |
193 | if (vcpu->arch.hcr_el2 & HCR_VSE) { | |
194 | vcpu->arch.hcr_el2 &= ~HCR_VSE; | |
195 | vcpu->arch.hcr_el2 |= read_sysreg(hcr_el2) & HCR_VSE; | |
196 | } | |
306b4c9f KM |
197 | |
198 | if (cpus_have_final_cap(ARM64_HAS_HCX)) | |
199 | write_sysreg_s(HCRX_HOST_FLAGS, SYS_HCRX_EL2); | |
09cf57eb DB |
200 | } |
201 | ||
159b859b QP |
202 | static inline bool __populate_fault_info(struct kvm_vcpu *vcpu) |
203 | { | |
8fb20461 | 204 | return __get_fault_info(vcpu->arch.fault.esr_el2, &vcpu->arch.fault); |
159b859b QP |
205 | } |
206 | ||
52029198 MZ |
207 | static inline void __hyp_sve_restore_guest(struct kvm_vcpu *vcpu) |
208 | { | |
209 | sve_cond_update_zcr_vq(vcpu_sve_max_vq(vcpu) - 1, SYS_ZCR_EL2); | |
210 | __sve_restore_state(vcpu_sve_pffr(vcpu), | |
211 | &vcpu->arch.ctxt.fp_regs.fpsr); | |
212 | write_sysreg_el1(__vcpu_sys_reg(vcpu, ZCR_EL1), SYS_ZCR); | |
213 | } | |
214 | ||
8fb20461 MZ |
215 | /* |
216 | * We trap the first access to the FP/SIMD to save the host context and | |
217 | * restore the guest context lazily. | |
218 | * If FP/SIMD is not implemented, handle the trap and inject an undefined | |
219 | * instruction exception to the guest. Similarly for trapped SVE accesses. | |
220 | */ | |
221 | static bool kvm_hyp_handle_fpsimd(struct kvm_vcpu *vcpu, u64 *exit_code) | |
09cf57eb | 222 | { |
8383741a | 223 | bool sve_guest; |
a394cf6e | 224 | u8 esr_ec; |
8c8010d6 | 225 | u64 reg; |
09cf57eb DB |
226 | |
227 | if (!system_supports_fpsimd()) | |
228 | return false; | |
229 | ||
8383741a | 230 | sve_guest = vcpu_has_sve(vcpu); |
a394cf6e | 231 | esr_ec = kvm_vcpu_trap_get_class(vcpu); |
09cf57eb | 232 | |
d071cefd MB |
233 | /* Only handle traps the vCPU can support here: */ |
234 | switch (esr_ec) { | |
235 | case ESR_ELx_EC_FP_ASIMD: | |
236 | break; | |
237 | case ESR_ELx_EC_SVE: | |
238 | if (!sve_guest) | |
239 | return false; | |
240 | break; | |
241 | default: | |
8c8010d6 | 242 | return false; |
d071cefd | 243 | } |
09cf57eb DB |
244 | |
245 | /* Valid trap. Switch the context: */ | |
01a244de MB |
246 | |
247 | /* First disable enough traps to allow us to update the registers */ | |
75c76ab5 | 248 | if (has_vhe() || has_hvhe()) { |
3bb72d86 | 249 | reg = CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN; |
09cf57eb | 250 | if (sve_guest) |
3bb72d86 | 251 | reg |= CPACR_EL1_ZEN_EL0EN | CPACR_EL1_ZEN_EL1EN; |
09cf57eb | 252 | |
8c8010d6 | 253 | sysreg_clear_set(cpacr_el1, 0, reg); |
09cf57eb | 254 | } else { |
8c8010d6 MZ |
255 | reg = CPTR_EL2_TFP; |
256 | if (sve_guest) | |
257 | reg |= CPTR_EL2_TZ; | |
09cf57eb | 258 | |
8c8010d6 MZ |
259 | sysreg_clear_set(cptr_el2, reg, 0); |
260 | } | |
09cf57eb DB |
261 | isb(); |
262 | ||
01a244de | 263 | /* Write out the host state if it's in the registers */ |
f8077b0d | 264 | if (vcpu->arch.fp_state == FP_STATE_HOST_OWNED) |
8383741a | 265 | __fpsimd_save_state(vcpu->arch.host_fpsimd_state); |
09cf57eb | 266 | |
01a244de | 267 | /* Restore the guest state */ |
52029198 MZ |
268 | if (sve_guest) |
269 | __hyp_sve_restore_guest(vcpu); | |
270 | else | |
e47c2055 | 271 | __fpsimd_restore_state(&vcpu->arch.ctxt.fp_regs); |
09cf57eb DB |
272 | |
273 | /* Skip restoring fpexc32 for AArch64 guests */ | |
274 | if (!(read_sysreg(hcr_el2) & HCR_RW)) | |
71071acf | 275 | write_sysreg(__vcpu_sys_reg(vcpu, FPEXC32_EL2), fpexc32_el2); |
09cf57eb | 276 | |
f8077b0d | 277 | vcpu->arch.fp_state = FP_STATE_GUEST_OWNED; |
09cf57eb DB |
278 | |
279 | return true; | |
280 | } | |
281 | ||
c50cb043 | 282 | static inline bool handle_tx2_tvm(struct kvm_vcpu *vcpu) |
09cf57eb | 283 | { |
a394cf6e | 284 | u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); |
09cf57eb DB |
285 | int rt = kvm_vcpu_sys_get_rt(vcpu); |
286 | u64 val = vcpu_get_reg(vcpu, rt); | |
287 | ||
288 | /* | |
289 | * The normal sysreg handling code expects to see the traps, | |
290 | * let's not do anything here. | |
291 | */ | |
292 | if (vcpu->arch.hcr_el2 & HCR_TVM) | |
293 | return false; | |
294 | ||
295 | switch (sysreg) { | |
296 | case SYS_SCTLR_EL1: | |
297 | write_sysreg_el1(val, SYS_SCTLR); | |
298 | break; | |
299 | case SYS_TTBR0_EL1: | |
300 | write_sysreg_el1(val, SYS_TTBR0); | |
301 | break; | |
302 | case SYS_TTBR1_EL1: | |
303 | write_sysreg_el1(val, SYS_TTBR1); | |
304 | break; | |
305 | case SYS_TCR_EL1: | |
306 | write_sysreg_el1(val, SYS_TCR); | |
307 | break; | |
308 | case SYS_ESR_EL1: | |
309 | write_sysreg_el1(val, SYS_ESR); | |
310 | break; | |
311 | case SYS_FAR_EL1: | |
312 | write_sysreg_el1(val, SYS_FAR); | |
313 | break; | |
314 | case SYS_AFSR0_EL1: | |
315 | write_sysreg_el1(val, SYS_AFSR0); | |
316 | break; | |
317 | case SYS_AFSR1_EL1: | |
318 | write_sysreg_el1(val, SYS_AFSR1); | |
319 | break; | |
320 | case SYS_MAIR_EL1: | |
321 | write_sysreg_el1(val, SYS_MAIR); | |
322 | break; | |
323 | case SYS_AMAIR_EL1: | |
324 | write_sysreg_el1(val, SYS_AMAIR); | |
325 | break; | |
326 | case SYS_CONTEXTIDR_EL1: | |
327 | write_sysreg_el1(val, SYS_CONTEXTIDR); | |
328 | break; | |
329 | default: | |
330 | return false; | |
331 | } | |
332 | ||
333 | __kvm_skip_instr(vcpu); | |
334 | return true; | |
335 | } | |
336 | ||
0b12620f | 337 | static inline bool esr_is_ptrauth_trap(u64 esr) |
09cf57eb | 338 | { |
09cf57eb DB |
339 | switch (esr_sys64_to_sysreg(esr)) { |
340 | case SYS_APIAKEYLO_EL1: | |
341 | case SYS_APIAKEYHI_EL1: | |
342 | case SYS_APIBKEYLO_EL1: | |
343 | case SYS_APIBKEYHI_EL1: | |
344 | case SYS_APDAKEYLO_EL1: | |
345 | case SYS_APDAKEYHI_EL1: | |
346 | case SYS_APDBKEYLO_EL1: | |
347 | case SYS_APDBKEYHI_EL1: | |
348 | case SYS_APGAKEYLO_EL1: | |
349 | case SYS_APGAKEYHI_EL1: | |
350 | return true; | |
351 | } | |
352 | ||
353 | return false; | |
354 | } | |
355 | ||
2c3db77c MZ |
356 | #define __ptrauth_save_key(ctxt, key) \ |
357 | do { \ | |
358 | u64 __val; \ | |
359 | __val = read_sysreg_s(SYS_ ## key ## KEYLO_EL1); \ | |
360 | ctxt_sys_reg(ctxt, key ## KEYLO_EL1) = __val; \ | |
361 | __val = read_sysreg_s(SYS_ ## key ## KEYHI_EL1); \ | |
362 | ctxt_sys_reg(ctxt, key ## KEYHI_EL1) = __val; \ | |
363 | } while(0) | |
09cf57eb | 364 | |
4e3393a9 AS |
365 | DECLARE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt); |
366 | ||
8fb20461 | 367 | static bool kvm_hyp_handle_ptrauth(struct kvm_vcpu *vcpu, u64 *exit_code) |
09cf57eb DB |
368 | { |
369 | struct kvm_cpu_context *ctxt; | |
370 | u64 val; | |
371 | ||
8a049862 | 372 | if (!vcpu_has_ptrauth(vcpu)) |
09cf57eb DB |
373 | return false; |
374 | ||
14ef9d04 | 375 | ctxt = this_cpu_ptr(&kvm_hyp_ctxt); |
2c3db77c MZ |
376 | __ptrauth_save_key(ctxt, APIA); |
377 | __ptrauth_save_key(ctxt, APIB); | |
378 | __ptrauth_save_key(ctxt, APDA); | |
379 | __ptrauth_save_key(ctxt, APDB); | |
380 | __ptrauth_save_key(ctxt, APGA); | |
09cf57eb DB |
381 | |
382 | vcpu_ptrauth_enable(vcpu); | |
383 | ||
384 | val = read_sysreg(hcr_el2); | |
385 | val |= (HCR_API | HCR_APK); | |
386 | write_sysreg(val, hcr_el2); | |
387 | ||
388 | return true; | |
389 | } | |
390 | ||
e9adde43 MZ |
391 | static bool kvm_hyp_handle_cntpct(struct kvm_vcpu *vcpu) |
392 | { | |
393 | struct arch_timer_context *ctxt; | |
394 | u32 sysreg; | |
395 | u64 val; | |
396 | ||
397 | /* | |
398 | * We only get here for 64bit guests, 32bit guests will hit | |
399 | * the long and winding road all the way to the standard | |
400 | * handling. Yes, it sucks to be irrelevant. | |
401 | */ | |
402 | sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); | |
403 | ||
404 | switch (sysreg) { | |
405 | case SYS_CNTPCT_EL0: | |
406 | case SYS_CNTPCTSS_EL0: | |
81dc9504 MZ |
407 | if (vcpu_has_nv(vcpu)) { |
408 | if (is_hyp_ctxt(vcpu)) { | |
409 | ctxt = vcpu_hptimer(vcpu); | |
410 | break; | |
411 | } | |
412 | ||
413 | /* Check for guest hypervisor trapping */ | |
414 | val = __vcpu_sys_reg(vcpu, CNTHCTL_EL2); | |
415 | if (!vcpu_el2_e2h_is_set(vcpu)) | |
416 | val = (val & CNTHCTL_EL1PCTEN) << 10; | |
417 | ||
418 | if (!(val & (CNTHCTL_EL1PCTEN << 10))) | |
419 | return false; | |
420 | } | |
421 | ||
e9adde43 MZ |
422 | ctxt = vcpu_ptimer(vcpu); |
423 | break; | |
424 | default: | |
425 | return false; | |
426 | } | |
427 | ||
428 | val = arch_timer_read_cntpct_el0(); | |
429 | ||
430 | if (ctxt->offset.vm_offset) | |
431 | val -= *kern_hyp_va(ctxt->offset.vm_offset); | |
1e0eec09 MZ |
432 | if (ctxt->offset.vcpu_offset) |
433 | val -= *kern_hyp_va(ctxt->offset.vcpu_offset); | |
e9adde43 MZ |
434 | |
435 | vcpu_set_reg(vcpu, kvm_vcpu_sys_get_rt(vcpu), val); | |
436 | __kvm_skip_instr(vcpu); | |
437 | return true; | |
438 | } | |
439 | ||
082fdfd1 OU |
440 | static bool handle_ampere1_tcr(struct kvm_vcpu *vcpu) |
441 | { | |
442 | u32 sysreg = esr_sys64_to_sysreg(kvm_vcpu_get_esr(vcpu)); | |
443 | int rt = kvm_vcpu_sys_get_rt(vcpu); | |
444 | u64 val = vcpu_get_reg(vcpu, rt); | |
445 | ||
446 | if (sysreg != SYS_TCR_EL1) | |
447 | return false; | |
448 | ||
449 | /* | |
450 | * Affected parts do not advertise support for hardware Access Flag / | |
451 | * Dirty state management in ID_AA64MMFR1_EL1.HAFDBS, but the underlying | |
452 | * control bits are still functional. The architecture requires these be | |
453 | * RES0 on systems that do not implement FEAT_HAFDBS. | |
454 | * | |
455 | * Uphold the requirements of the architecture by masking guest writes | |
456 | * to TCR_EL1.{HA,HD} here. | |
457 | */ | |
458 | val &= ~(TCR_HD | TCR_HA); | |
459 | write_sysreg_el1(val, SYS_TCR); | |
74158a8c | 460 | __kvm_skip_instr(vcpu); |
082fdfd1 OU |
461 | return true; |
462 | } | |
463 | ||
8fb20461 MZ |
464 | static bool kvm_hyp_handle_sysreg(struct kvm_vcpu *vcpu, u64 *exit_code) |
465 | { | |
466 | if (cpus_have_final_cap(ARM64_WORKAROUND_CAVIUM_TX2_219_TVM) && | |
467 | handle_tx2_tvm(vcpu)) | |
468 | return true; | |
469 | ||
082fdfd1 OU |
470 | if (cpus_have_final_cap(ARM64_WORKAROUND_AMPERE_AC03_CPU_38) && |
471 | handle_ampere1_tcr(vcpu)) | |
472 | return true; | |
473 | ||
8fb20461 MZ |
474 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) && |
475 | __vgic_v3_perform_cpuif_access(vcpu) == 1) | |
476 | return true; | |
477 | ||
8a049862 MZ |
478 | if (esr_is_ptrauth_trap(kvm_vcpu_get_esr(vcpu))) |
479 | return kvm_hyp_handle_ptrauth(vcpu, exit_code); | |
480 | ||
e9adde43 MZ |
481 | if (kvm_hyp_handle_cntpct(vcpu)) |
482 | return true; | |
483 | ||
8fb20461 MZ |
484 | return false; |
485 | } | |
486 | ||
487 | static bool kvm_hyp_handle_cp15_32(struct kvm_vcpu *vcpu, u64 *exit_code) | |
488 | { | |
489 | if (static_branch_unlikely(&vgic_v3_cpuif_trap) && | |
490 | __vgic_v3_perform_cpuif_access(vcpu) == 1) | |
491 | return true; | |
492 | ||
493 | return false; | |
494 | } | |
495 | ||
811154e2 | 496 | static bool kvm_hyp_handle_memory_fault(struct kvm_vcpu *vcpu, u64 *exit_code) |
8fb20461 MZ |
497 | { |
498 | if (!__populate_fault_info(vcpu)) | |
499 | return true; | |
500 | ||
501 | return false; | |
502 | } | |
811154e2 AO |
503 | static bool kvm_hyp_handle_iabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) |
504 | __alias(kvm_hyp_handle_memory_fault); | |
505 | static bool kvm_hyp_handle_watchpt_low(struct kvm_vcpu *vcpu, u64 *exit_code) | |
506 | __alias(kvm_hyp_handle_memory_fault); | |
8fb20461 MZ |
507 | |
508 | static bool kvm_hyp_handle_dabt_low(struct kvm_vcpu *vcpu, u64 *exit_code) | |
509 | { | |
811154e2 | 510 | if (kvm_hyp_handle_memory_fault(vcpu, exit_code)) |
8fb20461 MZ |
511 | return true; |
512 | ||
513 | if (static_branch_unlikely(&vgic_v2_cpuif_trap)) { | |
514 | bool valid; | |
515 | ||
b0803ba7 | 516 | valid = kvm_vcpu_trap_get_fault_type(vcpu) == ESR_ELx_FSC_FAULT && |
8fb20461 MZ |
517 | kvm_vcpu_dabt_isvalid(vcpu) && |
518 | !kvm_vcpu_abt_issea(vcpu) && | |
519 | !kvm_vcpu_abt_iss1tw(vcpu); | |
520 | ||
521 | if (valid) { | |
522 | int ret = __vgic_v2_perform_cpuif_access(vcpu); | |
523 | ||
524 | if (ret == 1) | |
525 | return true; | |
526 | ||
527 | /* Promote an illegal access to an SError.*/ | |
528 | if (ret == -1) | |
529 | *exit_code = ARM_EXCEPTION_EL1_SERROR; | |
530 | } | |
531 | } | |
532 | ||
533 | return false; | |
534 | } | |
535 | ||
536 | typedef bool (*exit_handler_fn)(struct kvm_vcpu *, u64 *); | |
537 | ||
0c7639cc | 538 | static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu); |
8fb20461 | 539 | |
7183b2b5 MZ |
540 | static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code); |
541 | ||
8fb20461 MZ |
542 | /* |
543 | * Allow the hypervisor to handle the exit with an exit handler if it has one. | |
544 | * | |
545 | * Returns true if the hypervisor handled the exit, and control should go back | |
546 | * to the guest, or false if it hasn't. | |
547 | */ | |
548 | static inline bool kvm_hyp_handle_exit(struct kvm_vcpu *vcpu, u64 *exit_code) | |
549 | { | |
0c7639cc | 550 | const exit_handler_fn *handlers = kvm_get_exit_handler_array(vcpu); |
8fb20461 MZ |
551 | exit_handler_fn fn; |
552 | ||
553 | fn = handlers[kvm_vcpu_trap_get_class(vcpu)]; | |
554 | ||
555 | if (fn) | |
556 | return fn(vcpu, exit_code); | |
557 | ||
558 | return false; | |
559 | } | |
560 | ||
1dd498e5 JM |
561 | static inline void synchronize_vcpu_pstate(struct kvm_vcpu *vcpu, u64 *exit_code) |
562 | { | |
563 | /* | |
564 | * Check for the conditions of Cortex-A510's #2077057. When these occur | |
565 | * SPSR_EL2 can't be trusted, but isn't needed either as it is | |
566 | * unchanged from the value in vcpu_gp_regs(vcpu)->pstate. | |
567 | * Are we single-stepping the guest, and took a PAC exception from the | |
568 | * active-not-pending state? | |
569 | */ | |
570 | if (cpus_have_final_cap(ARM64_WORKAROUND_2077057) && | |
571 | vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP && | |
572 | *vcpu_cpsr(vcpu) & DBG_SPSR_SS && | |
573 | ESR_ELx_EC(read_sysreg_el2(SYS_ESR)) == ESR_ELx_EC_PAC) | |
574 | write_sysreg_el2(*vcpu_cpsr(vcpu), SYS_SPSR); | |
575 | ||
576 | vcpu->arch.ctxt.regs.pstate = read_sysreg_el2(SYS_SPSR); | |
577 | } | |
578 | ||
09cf57eb DB |
579 | /* |
580 | * Return true when we were able to fixup the guest exit and should return to | |
581 | * the guest, false when we should restore the host state and return to the | |
582 | * main run loop. | |
583 | */ | |
c50cb043 | 584 | static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) |
09cf57eb | 585 | { |
83bb2c1a MZ |
586 | /* |
587 | * Save PSTATE early so that we can evaluate the vcpu mode | |
588 | * early on. | |
589 | */ | |
1dd498e5 | 590 | synchronize_vcpu_pstate(vcpu, exit_code); |
83bb2c1a | 591 | |
7183b2b5 MZ |
592 | /* |
593 | * Check whether we want to repaint the state one way or | |
594 | * another. | |
595 | */ | |
596 | early_exit_filter(vcpu, exit_code); | |
597 | ||
09cf57eb DB |
598 | if (ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) |
599 | vcpu->arch.fault.esr_el2 = read_sysreg_el2(SYS_ESR); | |
600 | ||
1c71dbc8 JM |
601 | if (ARM_SERROR_PENDING(*exit_code) && |
602 | ARM_EXCEPTION_CODE(*exit_code) != ARM_EXCEPTION_IRQ) { | |
defe21f4 MZ |
603 | u8 esr_ec = kvm_vcpu_trap_get_class(vcpu); |
604 | ||
605 | /* | |
606 | * HVC already have an adjusted PC, which we need to | |
607 | * correct in order to return to after having injected | |
608 | * the SError. | |
609 | * | |
610 | * SMC, on the other hand, is *trapped*, meaning its | |
611 | * preferred return address is the SMC itself. | |
612 | */ | |
613 | if (esr_ec == ESR_ELx_EC_HVC32 || esr_ec == ESR_ELx_EC_HVC64) | |
614 | write_sysreg_el2(read_sysreg_el2(SYS_ELR) - 4, SYS_ELR); | |
615 | } | |
616 | ||
09cf57eb DB |
617 | /* |
618 | * We're using the raw exception code in order to only process | |
619 | * the trap if no SError is pending. We will come back to the | |
620 | * same PC once the SError has been injected, and replay the | |
621 | * trapping instruction. | |
622 | */ | |
623 | if (*exit_code != ARM_EXCEPTION_TRAP) | |
624 | goto exit; | |
625 | ||
8fb20461 MZ |
626 | /* Check if there's an exit handler and allow it to handle the exit. */ |
627 | if (kvm_hyp_handle_exit(vcpu, exit_code)) | |
96d389ca | 628 | goto guest; |
09cf57eb DB |
629 | exit: |
630 | /* Return to the host kernel and handle the exit */ | |
631 | return false; | |
96d389ca RH |
632 | |
633 | guest: | |
634 | /* Re-enter the guest */ | |
635 | asm(ALTERNATIVE("nop", "dmb sy", ARM64_WORKAROUND_1508412)); | |
636 | return true; | |
09cf57eb DB |
637 | } |
638 | ||
e9ee186b JM |
639 | static inline void __kvm_unexpected_el2_exception(void) |
640 | { | |
7db21530 | 641 | extern char __guest_exit_panic[]; |
e9ee186b | 642 | unsigned long addr, fixup; |
ae2b2f33 | 643 | struct kvm_exception_table_entry *entry, *end; |
e9ee186b JM |
644 | unsigned long elr_el2 = read_sysreg(elr_el2); |
645 | ||
247bc166 DB |
646 | entry = &__start___kvm_ex_table; |
647 | end = &__stop___kvm_ex_table; | |
e9ee186b JM |
648 | |
649 | while (entry < end) { | |
650 | addr = (unsigned long)&entry->insn + entry->insn; | |
651 | fixup = (unsigned long)&entry->fixup + entry->fixup; | |
652 | ||
653 | if (addr != elr_el2) { | |
654 | entry++; | |
655 | continue; | |
656 | } | |
657 | ||
658 | write_sysreg(fixup, elr_el2); | |
659 | return; | |
660 | } | |
661 | ||
7db21530 AS |
662 | /* Trigger a panic after restoring the hyp context. */ |
663 | write_sysreg(__guest_exit_panic, elr_el2); | |
e9ee186b JM |
664 | } |
665 | ||
09cf57eb | 666 | #endif /* __ARM64_KVM_HYP_SWITCH_H__ */ |