1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
7 #include <hyp/switch.h>
8 #include <hyp/sysreg-sr.h>
10 #include <linux/arm-smccc.h>
11 #include <linux/kvm_host.h>
12 #include <linux/types.h>
13 #include <linux/jump_label.h>
14 #include <uapi/linux/psci.h>
16 #include <kvm/arm_psci.h>
18 #include <asm/barrier.h>
19 #include <asm/cpufeature.h>
20 #include <asm/kprobes.h>
21 #include <asm/kvm_asm.h>
22 #include <asm/kvm_emulate.h>
23 #include <asm/kvm_hyp.h>
24 #include <asm/kvm_mmu.h>
25 #include <asm/fpsimd.h>
26 #include <asm/debug-monitors.h>
27 #include <asm/processor.h>
29 #include <nvhe/fixed_config.h>
30 #include <nvhe/mem_protect.h>
32 /* Non-VHE specific context */
33 DEFINE_PER_CPU(struct kvm_host_data, kvm_host_data);
34 DEFINE_PER_CPU(struct kvm_cpu_context, kvm_hyp_ctxt);
35 DEFINE_PER_CPU(unsigned long, kvm_hyp_vector);
37 extern void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
39 static void __activate_traps(struct kvm_vcpu *vcpu)
43 ___activate_traps(vcpu);
44 __activate_traps_common(vcpu);
46 val = vcpu->arch.cptr_el2;
47 val |= CPTR_EL2_TTA | CPTR_EL2_TAM;
48 if (!guest_owns_fp_regs(vcpu)) {
49 val |= CPTR_EL2_TFP | CPTR_EL2_TZ;
50 __activate_traps_fpsimd32(vcpu);
52 if (cpus_have_final_cap(ARM64_SME))
55 write_sysreg(val, cptr_el2);
56 write_sysreg(__this_cpu_read(kvm_hyp_vector), vbar_el2);
58 if (cpus_have_final_cap(ARM64_SME)) {
59 val = read_sysreg_s(SYS_HFGRTR_EL2);
60 val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
61 HFGxTR_EL2_nSMPRI_EL1_MASK);
62 write_sysreg_s(val, SYS_HFGRTR_EL2);
64 val = read_sysreg_s(SYS_HFGWTR_EL2);
65 val &= ~(HFGxTR_EL2_nTPIDR2_EL0_MASK |
66 HFGxTR_EL2_nSMPRI_EL1_MASK);
67 write_sysreg_s(val, SYS_HFGWTR_EL2);
70 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
71 struct kvm_cpu_context *ctxt = &vcpu->arch.ctxt;
75 * At this stage, and thanks to the above isb(), S2 is
76 * configured and enabled. We can now restore the guest's S1
77 * configuration: SCTLR, and only then TCR.
79 write_sysreg_el1(ctxt_sys_reg(ctxt, SCTLR_EL1), SYS_SCTLR);
81 write_sysreg_el1(ctxt_sys_reg(ctxt, TCR_EL1), SYS_TCR);
85 static void __deactivate_traps(struct kvm_vcpu *vcpu)
87 extern char __kvm_hyp_host_vector[];
90 ___deactivate_traps(vcpu);
92 if (cpus_have_final_cap(ARM64_WORKAROUND_SPECULATIVE_AT)) {
96 * Set the TCR and SCTLR registers in the exact opposite
97 * sequence as __activate_traps (first prevent walks,
98 * then force the MMU on). A generous sprinkling of isb()
99 * ensure that things happen in this exact order.
101 val = read_sysreg_el1(SYS_TCR);
102 write_sysreg_el1(val | TCR_EPD1_MASK | TCR_EPD0_MASK, SYS_TCR);
104 val = read_sysreg_el1(SYS_SCTLR);
105 write_sysreg_el1(val | SCTLR_ELx_M, SYS_SCTLR);
109 __deactivate_traps_common(vcpu);
111 write_sysreg(this_cpu_ptr(&kvm_init_params)->hcr_el2, hcr_el2);
113 if (cpus_have_final_cap(ARM64_SME)) {
116 val = read_sysreg_s(SYS_HFGRTR_EL2);
117 val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
118 HFGxTR_EL2_nSMPRI_EL1_MASK;
119 write_sysreg_s(val, SYS_HFGRTR_EL2);
121 val = read_sysreg_s(SYS_HFGWTR_EL2);
122 val |= HFGxTR_EL2_nTPIDR2_EL0_MASK |
123 HFGxTR_EL2_nSMPRI_EL1_MASK;
124 write_sysreg_s(val, SYS_HFGWTR_EL2);
127 cptr = CPTR_EL2_DEFAULT;
128 if (vcpu_has_sve(vcpu) && (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
130 if (cpus_have_final_cap(ARM64_SME))
131 cptr &= ~CPTR_EL2_TSM;
133 write_sysreg(cptr, cptr_el2);
134 write_sysreg(__kvm_hyp_host_vector, vbar_el2);
137 /* Save VGICv3 state on non-VHE systems */
138 static void __hyp_vgic_save_state(struct kvm_vcpu *vcpu)
140 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
141 __vgic_v3_save_state(&vcpu->arch.vgic_cpu.vgic_v3);
142 __vgic_v3_deactivate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
146 /* Restore VGICv3 state on non_VEH systems */
147 static void __hyp_vgic_restore_state(struct kvm_vcpu *vcpu)
149 if (static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif)) {
150 __vgic_v3_activate_traps(&vcpu->arch.vgic_cpu.vgic_v3);
151 __vgic_v3_restore_state(&vcpu->arch.vgic_cpu.vgic_v3);
156 * Disable host events, enable guest events
158 #ifdef CONFIG_HW_PERF_EVENTS
159 static bool __pmu_switch_to_guest(struct kvm_vcpu *vcpu)
161 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
163 if (pmu->events_host)
164 write_sysreg(pmu->events_host, pmcntenclr_el0);
166 if (pmu->events_guest)
167 write_sysreg(pmu->events_guest, pmcntenset_el0);
169 return (pmu->events_host || pmu->events_guest);
173 * Disable guest events, enable host events
175 static void __pmu_switch_to_host(struct kvm_vcpu *vcpu)
177 struct kvm_pmu_events *pmu = &vcpu->arch.pmu.events;
179 if (pmu->events_guest)
180 write_sysreg(pmu->events_guest, pmcntenclr_el0);
182 if (pmu->events_host)
183 write_sysreg(pmu->events_host, pmcntenset_el0);
186 #define __pmu_switch_to_guest(v) ({ false; })
187 #define __pmu_switch_to_host(v) do {} while (0)
191 * Handler for protected VM MSR, MRS or System instruction execution in AArch64.
193 * Returns true if the hypervisor has handled the exit, and control should go
194 * back to the guest, or false if it hasn't.
196 static bool kvm_handle_pvm_sys64(struct kvm_vcpu *vcpu, u64 *exit_code)
199 * Make sure we handle the exit for workarounds and ptrauth
200 * before the pKVM handling, as the latter could decide to
203 return (kvm_hyp_handle_sysreg(vcpu, exit_code) ||
204 kvm_handle_pvm_sysreg(vcpu, exit_code));
207 static const exit_handler_fn hyp_exit_handlers[] = {
208 [0 ... ESR_ELx_EC_MAX] = NULL,
209 [ESR_ELx_EC_CP15_32] = kvm_hyp_handle_cp15_32,
210 [ESR_ELx_EC_SYS64] = kvm_hyp_handle_sysreg,
211 [ESR_ELx_EC_SVE] = kvm_hyp_handle_fpsimd,
212 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
213 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
214 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
215 [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
218 static const exit_handler_fn pvm_exit_handlers[] = {
219 [0 ... ESR_ELx_EC_MAX] = NULL,
220 [ESR_ELx_EC_SYS64] = kvm_handle_pvm_sys64,
221 [ESR_ELx_EC_SVE] = kvm_handle_pvm_restricted,
222 [ESR_ELx_EC_FP_ASIMD] = kvm_hyp_handle_fpsimd,
223 [ESR_ELx_EC_IABT_LOW] = kvm_hyp_handle_iabt_low,
224 [ESR_ELx_EC_DABT_LOW] = kvm_hyp_handle_dabt_low,
225 [ESR_ELx_EC_PAC] = kvm_hyp_handle_ptrauth,
228 static const exit_handler_fn *kvm_get_exit_handler_array(struct kvm_vcpu *vcpu)
230 if (unlikely(kvm_vm_is_protected(kern_hyp_va(vcpu->kvm))))
231 return pvm_exit_handlers;
233 return hyp_exit_handlers;
237 * Some guests (e.g., protected VMs) are not be allowed to run in AArch32.
238 * The ARMv8 architecture does not give the hypervisor a mechanism to prevent a
239 * guest from dropping to AArch32 EL0 if implemented by the CPU. If the
240 * hypervisor spots a guest in such a state ensure it is handled, and don't
241 * trust the host to spot or fix it. The check below is based on the one in
242 * kvm_arch_vcpu_ioctl_run().
244 * Returns false if the guest ran in AArch32 when it shouldn't have, and
245 * thus should exit to the host, or true if a the guest run loop can continue.
247 static void early_exit_filter(struct kvm_vcpu *vcpu, u64 *exit_code)
249 struct kvm *kvm = kern_hyp_va(vcpu->kvm);
251 if (kvm_vm_is_protected(kvm) && vcpu_mode_is_32bit(vcpu)) {
253 * As we have caught the guest red-handed, decide that it isn't
254 * fit for purpose anymore by making the vcpu invalid. The VMM
255 * can try and fix it by re-initializing the vcpu with
256 * KVM_ARM_VCPU_INIT, however, this is likely not possible for
259 vcpu->arch.target = -1;
260 *exit_code &= BIT(ARM_EXIT_WITH_SERROR_BIT);
261 *exit_code |= ARM_EXCEPTION_IL;
265 /* Switch to the guest for legacy non-VHE systems */
266 int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
268 struct kvm_cpu_context *host_ctxt;
269 struct kvm_cpu_context *guest_ctxt;
270 struct kvm_s2_mmu *mmu;
271 bool pmu_switch_needed;
275 * Having IRQs masked via PMR when entering the guest means the GIC
276 * will not signal the CPU of interrupts of lower priority, and the
277 * only way to get out will be via guest exceptions.
278 * Naturally, we want to avoid this.
280 if (system_uses_irq_prio_masking()) {
281 gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
285 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
286 host_ctxt->__hyp_running_vcpu = vcpu;
287 guest_ctxt = &vcpu->arch.ctxt;
289 pmu_switch_needed = __pmu_switch_to_guest(vcpu);
291 __sysreg_save_state_nvhe(host_ctxt);
293 * We must flush and disable the SPE buffer for nVHE, as
294 * the translation regime(EL1&0) is going to be loaded with
295 * that of the guest. And we must do this before we change the
296 * translation regime to EL2 (via MDCR_EL2_E2PB == 0) and
297 * before we load guest Stage1.
299 __debug_save_host_buffers_nvhe(vcpu);
301 __kvm_adjust_pc(vcpu);
304 * We must restore the 32-bit state before the sysregs, thanks
305 * to erratum #852523 (Cortex-A57) or #853709 (Cortex-A72).
307 * Also, and in order to be able to deal with erratum #1319537 (A57)
308 * and #1319367 (A72), we must ensure that all VM-related sysreg are
309 * restored before we enable S2 translation.
311 __sysreg32_restore_state(vcpu);
312 __sysreg_restore_state_nvhe(guest_ctxt);
314 mmu = kern_hyp_va(vcpu->arch.hw_mmu);
315 __load_stage2(mmu, kern_hyp_va(mmu->arch));
316 __activate_traps(vcpu);
318 __hyp_vgic_restore_state(vcpu);
319 __timer_enable_traps(vcpu);
321 __debug_switch_to_guest(vcpu);
324 /* Jump in the fire! */
325 exit_code = __guest_enter(vcpu);
327 /* And we're baaack! */
328 } while (fixup_guest_exit(vcpu, &exit_code));
330 __sysreg_save_state_nvhe(guest_ctxt);
331 __sysreg32_save_state(vcpu);
332 __timer_disable_traps(vcpu);
333 __hyp_vgic_save_state(vcpu);
335 __deactivate_traps(vcpu);
336 __load_host_stage2();
338 __sysreg_restore_state_nvhe(host_ctxt);
340 if (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED)
341 __fpsimd_save_fpexc32(vcpu);
343 __debug_switch_to_host(vcpu);
345 * This must come after restoring the host sysregs, since a non-VHE
346 * system may enable SPE here and make use of the TTBRs.
348 __debug_restore_host_buffers_nvhe(vcpu);
350 if (pmu_switch_needed)
351 __pmu_switch_to_host(vcpu);
353 /* Returning to host will clear PSR.I, remask PMR if needed */
354 if (system_uses_irq_prio_masking())
355 gic_write_pmr(GIC_PRIO_IRQOFF);
357 host_ctxt->__hyp_running_vcpu = NULL;
362 asmlinkage void __noreturn hyp_panic(void)
364 u64 spsr = read_sysreg_el2(SYS_SPSR);
365 u64 elr = read_sysreg_el2(SYS_ELR);
366 u64 par = read_sysreg_par();
367 struct kvm_cpu_context *host_ctxt;
368 struct kvm_vcpu *vcpu;
370 host_ctxt = &this_cpu_ptr(&kvm_host_data)->host_ctxt;
371 vcpu = host_ctxt->__hyp_running_vcpu;
374 __timer_disable_traps(vcpu);
375 __deactivate_traps(vcpu);
376 __load_host_stage2();
377 __sysreg_restore_state_nvhe(host_ctxt);
380 /* Prepare to dump kvm nvhe hyp stacktrace */
381 kvm_nvhe_prepare_backtrace((unsigned long)__builtin_frame_address(0),
384 __hyp_do_panic(host_ctxt, spsr, elr, par);
388 asmlinkage void __noreturn hyp_panic_bad_stack(void)
393 asmlinkage void kvm_unexpected_el2_exception(void)
395 __kvm_unexpected_el2_exception();