1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/reset.c
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
11 #include <linux/errno.h>
12 #include <linux/kernel.h>
13 #include <linux/kvm_host.h>
14 #include <linux/kvm.h>
15 #include <linux/hw_breakpoint.h>
16 #include <linux/slab.h>
17 #include <linux/string.h>
18 #include <linux/types.h>
20 #include <kvm/arm_arch_timer.h>
22 #include <asm/cpufeature.h>
23 #include <asm/cputype.h>
24 #include <asm/fpsimd.h>
25 #include <asm/ptrace.h>
26 #include <asm/kvm_arm.h>
27 #include <asm/kvm_asm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_mmu.h>
30 #include <asm/kvm_nested.h>
33 /* Maximum phys_shift supported for any VM on this host */
34 static u32 __ro_after_init kvm_ipa_limit;
39 #define VCPU_RESET_PSTATE_EL1 (PSR_MODE_EL1h | PSR_A_BIT | PSR_I_BIT | \
40 PSR_F_BIT | PSR_D_BIT)
42 #define VCPU_RESET_PSTATE_EL2 (PSR_MODE_EL2h | PSR_A_BIT | PSR_I_BIT | \
43 PSR_F_BIT | PSR_D_BIT)
45 #define VCPU_RESET_PSTATE_SVC (PSR_AA32_MODE_SVC | PSR_AA32_A_BIT | \
46 PSR_AA32_I_BIT | PSR_AA32_F_BIT)
48 unsigned int __ro_after_init kvm_sve_max_vl;
50 int __init kvm_arm_init_sve(void)
52 if (system_supports_sve()) {
53 kvm_sve_max_vl = sve_max_virtualisable_vl();
56 * The get_sve_reg()/set_sve_reg() ioctl interface will need
57 * to be extended with multiple register slice support in
58 * order to support vector lengths greater than
61 if (WARN_ON(kvm_sve_max_vl > VL_ARCH_MAX))
62 kvm_sve_max_vl = VL_ARCH_MAX;
65 * Don't even try to make use of vector lengths that
66 * aren't available on all CPUs, for now:
68 if (kvm_sve_max_vl < sve_max_vl())
69 pr_warn("KVM: SVE vector length for guests limited to %u bytes\n",
76 static void kvm_vcpu_enable_sve(struct kvm_vcpu *vcpu)
78 vcpu->arch.sve_max_vl = kvm_sve_max_vl;
81 * Userspace can still customize the vector lengths by writing
82 * KVM_REG_ARM64_SVE_VLS. Allocation is deferred until
83 * kvm_arm_vcpu_finalize(), which freezes the configuration.
85 vcpu_set_flag(vcpu, GUEST_HAS_SVE);
89 * Finalize vcpu's maximum SVE vector length, allocating
90 * vcpu->arch.sve_state as necessary.
92 static int kvm_vcpu_finalize_sve(struct kvm_vcpu *vcpu)
99 vl = vcpu->arch.sve_max_vl;
102 * Responsibility for these properties is shared between
103 * kvm_arm_init_sve(), kvm_vcpu_enable_sve() and
104 * set_sve_vls(). Double-check here just to be sure:
106 if (WARN_ON(!sve_vl_valid(vl) || vl > sve_max_virtualisable_vl() ||
110 reg_sz = vcpu_sve_state_size(vcpu);
111 buf = kzalloc(reg_sz, GFP_KERNEL_ACCOUNT);
115 ret = kvm_share_hyp(buf, buf + reg_sz);
121 vcpu->arch.sve_state = buf;
122 vcpu_set_flag(vcpu, VCPU_SVE_FINALIZED);
126 int kvm_arm_vcpu_finalize(struct kvm_vcpu *vcpu, int feature)
129 case KVM_ARM_VCPU_SVE:
130 if (!vcpu_has_sve(vcpu))
133 if (kvm_arm_vcpu_sve_finalized(vcpu))
136 return kvm_vcpu_finalize_sve(vcpu);
142 bool kvm_arm_vcpu_is_finalized(struct kvm_vcpu *vcpu)
144 if (vcpu_has_sve(vcpu) && !kvm_arm_vcpu_sve_finalized(vcpu))
150 void kvm_arm_vcpu_destroy(struct kvm_vcpu *vcpu)
152 void *sve_state = vcpu->arch.sve_state;
154 kvm_unshare_hyp(vcpu, vcpu + 1);
156 kvm_unshare_hyp(sve_state, sve_state + vcpu_sve_state_size(vcpu));
158 kfree(vcpu->arch.ccsidr);
161 static void kvm_vcpu_reset_sve(struct kvm_vcpu *vcpu)
163 if (vcpu_has_sve(vcpu))
164 memset(vcpu->arch.sve_state, 0, vcpu_sve_state_size(vcpu));
167 static void kvm_vcpu_enable_ptrauth(struct kvm_vcpu *vcpu)
169 vcpu_set_flag(vcpu, GUEST_HAS_PTRAUTH);
173 * kvm_reset_vcpu - sets core registers and sys_regs to reset value
174 * @vcpu: The VCPU pointer
176 * This function sets the registers on the virtual CPU struct to their
177 * architecturally defined reset values, except for registers whose reset is
178 * deferred until kvm_arm_vcpu_finalize().
180 * Note: This function can be called from two paths: The KVM_ARM_VCPU_INIT
181 * ioctl or as part of handling a request issued by another VCPU in the PSCI
182 * handling code. In the first case, the VCPU will not be loaded, and in the
183 * second case the VCPU will be loaded. Because this function operates purely
184 * on the memory-backed values of system registers, we want to do a full put if
185 * we were loaded (handling a request) and load the values back at the end of
186 * the function. Otherwise we leave the state alone. In both cases, we
187 * disable preemption around the vcpu reset as we would otherwise race with
188 * preempt notifiers which also call put/load.
190 void kvm_reset_vcpu(struct kvm_vcpu *vcpu)
192 struct vcpu_reset_state reset_state;
196 spin_lock(&vcpu->arch.mp_state_lock);
197 reset_state = vcpu->arch.reset_state;
198 vcpu->arch.reset_state.reset = false;
199 spin_unlock(&vcpu->arch.mp_state_lock);
201 /* Reset PMU outside of the non-preemptible section */
202 kvm_pmu_vcpu_reset(vcpu);
205 loaded = (vcpu->cpu != -1);
207 kvm_arch_vcpu_put(vcpu);
209 if (!kvm_arm_vcpu_sve_finalized(vcpu)) {
210 if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_SVE))
211 kvm_vcpu_enable_sve(vcpu);
213 kvm_vcpu_reset_sve(vcpu);
216 if (vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_ADDRESS) ||
217 vcpu_has_feature(vcpu, KVM_ARM_VCPU_PTRAUTH_GENERIC))
218 kvm_vcpu_enable_ptrauth(vcpu);
220 if (vcpu_el1_is_32bit(vcpu))
221 pstate = VCPU_RESET_PSTATE_SVC;
222 else if (vcpu_has_nv(vcpu))
223 pstate = VCPU_RESET_PSTATE_EL2;
225 pstate = VCPU_RESET_PSTATE_EL1;
227 /* Reset core registers */
228 memset(vcpu_gp_regs(vcpu), 0, sizeof(*vcpu_gp_regs(vcpu)));
229 memset(&vcpu->arch.ctxt.fp_regs, 0, sizeof(vcpu->arch.ctxt.fp_regs));
230 vcpu->arch.ctxt.spsr_abt = 0;
231 vcpu->arch.ctxt.spsr_und = 0;
232 vcpu->arch.ctxt.spsr_irq = 0;
233 vcpu->arch.ctxt.spsr_fiq = 0;
234 vcpu_gp_regs(vcpu)->pstate = pstate;
236 /* Reset system registers */
237 kvm_reset_sys_regs(vcpu);
240 * Additional reset state handling that PSCI may have imposed on us.
241 * Must be done after all the sys_reg reset.
243 if (reset_state.reset) {
244 unsigned long target_pc = reset_state.pc;
246 /* Gracefully handle Thumb2 entry point */
247 if (vcpu_mode_is_32bit(vcpu) && (target_pc & 1)) {
249 vcpu_set_thumb(vcpu);
252 /* Propagate caller endianness */
254 kvm_vcpu_set_be(vcpu);
256 *vcpu_pc(vcpu) = target_pc;
257 vcpu_set_reg(vcpu, 0, reset_state.r0);
261 kvm_timer_vcpu_reset(vcpu);
264 kvm_arch_vcpu_load(vcpu, smp_processor_id());
268 u32 get_kvm_ipa_limit(void)
270 return kvm_ipa_limit;
273 int __init kvm_set_ipa_limit(void)
275 unsigned int parange;
278 mmfr0 = read_sanitised_ftr_reg(SYS_ID_AA64MMFR0_EL1);
279 parange = cpuid_feature_extract_unsigned_field(mmfr0,
280 ID_AA64MMFR0_EL1_PARANGE_SHIFT);
282 * IPA size beyond 48 bits for 4K and 16K page size is only supported
283 * when LPA2 is available. So if we have LPA2, enable it, else cap to 48
284 * bits, in case it's reported as larger on the system.
286 if (!kvm_lpa2_is_enabled() && PAGE_SIZE != SZ_64K)
287 parange = min(parange, (unsigned int)ID_AA64MMFR0_EL1_PARANGE_48);
290 * Check with ARMv8.5-GTG that our PAGE_SIZE is supported at
291 * Stage-2. If not, things will stop very quickly.
293 switch (cpuid_feature_extract_unsigned_field(mmfr0, ID_AA64MMFR0_EL1_TGRAN_2_SHIFT)) {
294 case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_NONE:
295 kvm_err("PAGE_SIZE not supported at Stage-2, giving up\n");
297 case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_DEFAULT:
298 kvm_debug("PAGE_SIZE supported at Stage-2 (default)\n");
300 case ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MIN ... ID_AA64MMFR0_EL1_TGRAN_2_SUPPORTED_MAX:
301 kvm_debug("PAGE_SIZE supported at Stage-2 (advertised)\n");
304 kvm_err("Unsupported value for TGRAN_2, giving up\n");
308 kvm_ipa_limit = id_aa64mmfr0_parange_to_phys_shift(parange);
309 kvm_info("IPA Size Limit: %d bits%s\n", kvm_ipa_limit,
310 ((kvm_ipa_limit < KVM_PHYS_SHIFT) ?
311 " (Reduced IPA size, limited VM/VMM compatibility)" : ""));