2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 #ifndef __ARM64_KVM_EMULATE_H__
23 #define __ARM64_KVM_EMULATE_H__
25 #include <linux/kvm_host.h>
27 #include <asm/debug-monitors.h>
29 #include <asm/kvm_arm.h>
30 #include <asm/kvm_hyp.h>
31 #include <asm/kvm_mmio.h>
32 #include <asm/ptrace.h>
33 #include <asm/cputype.h>
36 unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
37 unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
38 void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
40 bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
41 void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
43 void kvm_inject_undefined(struct kvm_vcpu *vcpu);
44 void kvm_inject_vabt(struct kvm_vcpu *vcpu);
45 void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
46 void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
47 void kvm_inject_undef32(struct kvm_vcpu *vcpu);
48 void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
49 void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
51 static inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
53 return !(vcpu->arch.hcr_el2 & HCR_RW);
56 static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
58 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
59 if (is_kernel_in_hyp_mode())
60 vcpu->arch.hcr_el2 |= HCR_E2H;
61 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
62 /* route synchronous external abort exceptions to EL2 */
63 vcpu->arch.hcr_el2 |= HCR_TEA;
64 /* trap error record accesses */
65 vcpu->arch.hcr_el2 |= HCR_TERR;
67 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
68 vcpu->arch.hcr_el2 |= HCR_FWB;
70 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
71 vcpu->arch.hcr_el2 &= ~HCR_RW;
74 * TID3: trap feature register accesses that we virtualise.
75 * For now this is conditional, since no AArch32 feature regs
76 * are currently virtualised.
78 if (!vcpu_el1_is_32bit(vcpu))
79 vcpu->arch.hcr_el2 |= HCR_TID3;
81 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
82 vcpu_el1_is_32bit(vcpu))
83 vcpu->arch.hcr_el2 |= HCR_TID2;
86 static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
88 return (unsigned long *)&vcpu->arch.hcr_el2;
91 static inline void vcpu_clear_wfe_traps(struct kvm_vcpu *vcpu)
93 vcpu->arch.hcr_el2 &= ~HCR_TWE;
96 static inline void vcpu_set_wfe_traps(struct kvm_vcpu *vcpu)
98 vcpu->arch.hcr_el2 |= HCR_TWE;
101 static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
103 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
106 static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
108 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
111 static inline void vcpu_ptrauth_setup_lazy(struct kvm_vcpu *vcpu)
113 if (vcpu_has_ptrauth(vcpu))
114 vcpu_ptrauth_disable(vcpu);
117 static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
119 return vcpu->arch.vsesr_el2;
122 static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
124 vcpu->arch.vsesr_el2 = vsesr;
127 static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
129 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
132 static inline unsigned long *__vcpu_elr_el1(const struct kvm_vcpu *vcpu)
134 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
137 static inline unsigned long vcpu_read_elr_el1(const struct kvm_vcpu *vcpu)
139 if (vcpu->arch.sysregs_loaded_on_cpu)
140 return read_sysreg_el1(elr);
142 return *__vcpu_elr_el1(vcpu);
145 static inline void vcpu_write_elr_el1(const struct kvm_vcpu *vcpu, unsigned long v)
147 if (vcpu->arch.sysregs_loaded_on_cpu)
148 write_sysreg_el1(v, elr);
150 *__vcpu_elr_el1(vcpu) = v;
153 static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
155 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
158 static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
160 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
163 static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
165 if (vcpu_mode_is_32bit(vcpu))
166 return kvm_condition_valid32(vcpu);
171 static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
173 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
177 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
178 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
179 * AArch32 with banked registers.
181 static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
184 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
187 static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
191 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
194 static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
196 if (vcpu_mode_is_32bit(vcpu))
197 return vcpu_read_spsr32(vcpu);
199 if (vcpu->arch.sysregs_loaded_on_cpu)
200 return read_sysreg_el1(spsr);
202 return vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
205 static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
207 if (vcpu_mode_is_32bit(vcpu)) {
208 vcpu_write_spsr32(vcpu, v);
212 if (vcpu->arch.sysregs_loaded_on_cpu)
213 write_sysreg_el1(v, spsr);
215 vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1] = v;
218 static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
222 if (vcpu_mode_is_32bit(vcpu)) {
223 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
224 return mode > PSR_AA32_MODE_USR;
227 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
229 return mode != PSR_MODE_EL0t;
232 static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
234 return vcpu->arch.fault.esr_el2;
237 static inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
239 u32 esr = kvm_vcpu_get_hsr(vcpu);
241 if (esr & ESR_ELx_CV)
242 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
247 static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
249 return vcpu->arch.fault.far_el2;
252 static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
254 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
257 static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
259 return vcpu->arch.fault.disr_el1;
262 static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
264 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
267 static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
269 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
272 static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
274 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
277 static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
279 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
282 static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
284 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
287 static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
289 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR) ||
290 kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */
293 static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
295 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
298 static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
300 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
303 /* This one is not specific to Data Abort */
304 static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
306 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
309 static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
311 return ESR_ELx_EC(kvm_vcpu_get_hsr(vcpu));
314 static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
316 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
319 static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
321 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
324 static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
326 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
329 static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
331 switch (kvm_vcpu_trap_get_fault(vcpu)) {
348 static inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
350 u32 esr = kvm_vcpu_get_hsr(vcpu);
351 return ESR_ELx_SYS64_ISS_RT(esr);
354 static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
356 if (kvm_vcpu_trap_is_iabt(vcpu))
359 return kvm_vcpu_dabt_iswrite(vcpu);
362 static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
364 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
367 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
369 if (vcpu_mode_is_32bit(vcpu)) {
370 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
372 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
374 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
378 static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
380 if (vcpu_mode_is_32bit(vcpu))
381 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
383 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
386 static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
390 if (kvm_vcpu_is_be(vcpu)) {
395 return be16_to_cpu(data & 0xffff);
397 return be32_to_cpu(data & 0xffffffff);
399 return be64_to_cpu(data);
406 return le16_to_cpu(data & 0xffff);
408 return le32_to_cpu(data & 0xffffffff);
410 return le64_to_cpu(data);
414 return data; /* Leave LE untouched */
417 static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
421 if (kvm_vcpu_is_be(vcpu)) {
426 return cpu_to_be16(data & 0xffff);
428 return cpu_to_be32(data & 0xffffffff);
430 return cpu_to_be64(data);
437 return cpu_to_le16(data & 0xffff);
439 return cpu_to_le32(data & 0xffffffff);
441 return cpu_to_le64(data);
445 return data; /* Leave LE untouched */
448 static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
450 if (vcpu_mode_is_32bit(vcpu))
451 kvm_skip_instr32(vcpu, is_wide_instr);
455 /* advance the singlestep state machine */
456 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
460 * Skip an instruction which has been emulated at hyp while most guest sysregs
463 static inline void __hyp_text __kvm_skip_instr(struct kvm_vcpu *vcpu)
465 *vcpu_pc(vcpu) = read_sysreg_el2(elr);
466 vcpu->arch.ctxt.gp_regs.regs.pstate = read_sysreg_el2(spsr);
468 kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
470 write_sysreg_el2(vcpu->arch.ctxt.gp_regs.regs.pstate, spsr);
471 write_sysreg_el2(*vcpu_pc(vcpu), elr);
474 #endif /* __ARM64_KVM_EMULATE_H__ */