KVM: arm64: Move kvm_vcpu_trap_il_is32bit into kvm_skip_instr32()
[linux-2.6-block.git] / arch / arm64 / include / asm / kvm_emulate.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
83a49794
MZ
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
83a49794
MZ
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/kvm_host.h>
c6d01a94 15
bd7d95ca 16#include <asm/debug-monitors.h>
c6d01a94 17#include <asm/esr.h>
83a49794 18#include <asm/kvm_arm.h>
00536ec4 19#include <asm/kvm_hyp.h>
83a49794 20#include <asm/ptrace.h>
4429fc64 21#include <asm/cputype.h>
68908bf7 22#include <asm/virt.h>
83a49794 23
b547631f 24unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
a8928195
CD
25unsigned long vcpu_read_spsr32(const struct kvm_vcpu *vcpu);
26void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v);
b547631f 27
27b190bd 28bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
6ddbc281 29void kvm_skip_instr32(struct kvm_vcpu *vcpu);
27b190bd 30
83a49794 31void kvm_inject_undefined(struct kvm_vcpu *vcpu);
10cf3390 32void kvm_inject_vabt(struct kvm_vcpu *vcpu);
83a49794
MZ
33void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
34void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
74a64a98
MZ
35void kvm_inject_undef32(struct kvm_vcpu *vcpu);
36void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr);
37void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr);
83a49794 38
5c37f1ae 39static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
e72341c5
CD
40{
41 return !(vcpu->arch.hcr_el2 & HCR_RW);
42}
43
b856a591
CD
44static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
45{
46 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
68908bf7
MZ
47 if (is_kernel_in_hyp_mode())
48 vcpu->arch.hcr_el2 |= HCR_E2H;
558daf69
DG
49 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
50 /* route synchronous external abort exceptions to EL2 */
51 vcpu->arch.hcr_el2 |= HCR_TEA;
52 /* trap error record accesses */
53 vcpu->arch.hcr_el2 |= HCR_TERR;
54 }
5c401308
CD
55
56 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
e48d53a9 57 vcpu->arch.hcr_el2 |= HCR_FWB;
5c401308
CD
58 } else {
59 /*
60 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
61 * get set in SCTLR_EL1 such that we can detect when the guest
62 * MMU gets turned on and do the necessary cache maintenance
63 * then.
64 */
65 vcpu->arch.hcr_el2 |= HCR_TVM;
66 }
558daf69 67
801f6772
MZ
68 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
69 vcpu->arch.hcr_el2 &= ~HCR_RW;
005781be
DM
70
71 /*
72 * TID3: trap feature register accesses that we virtualise.
73 * For now this is conditional, since no AArch32 feature regs
74 * are currently virtualised.
75 */
e72341c5 76 if (!vcpu_el1_is_32bit(vcpu))
005781be 77 vcpu->arch.hcr_el2 |= HCR_TID3;
f7f2b15c 78
793acf87
AB
79 if (cpus_have_const_cap(ARM64_MISMATCHED_CACHE_TYPE) ||
80 vcpu_el1_is_32bit(vcpu))
f7f2b15c 81 vcpu->arch.hcr_el2 |= HCR_TID2;
b856a591
CD
82}
83
3df59d8d 84static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
3c1e7165 85{
3df59d8d 86 return (unsigned long *)&vcpu->arch.hcr_el2;
3c1e7165
MZ
87}
88
ef2e78dd 89static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
de737089
MZ
90{
91 vcpu->arch.hcr_el2 &= ~HCR_TWE;
7bdabad1
MZ
92 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
93 vcpu->kvm->arch.vgic.nassgireq)
ef2e78dd
MZ
94 vcpu->arch.hcr_el2 &= ~HCR_TWI;
95 else
96 vcpu->arch.hcr_el2 |= HCR_TWI;
de737089
MZ
97}
98
ef2e78dd 99static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
de737089
MZ
100{
101 vcpu->arch.hcr_el2 |= HCR_TWE;
ef2e78dd 102 vcpu->arch.hcr_el2 |= HCR_TWI;
de737089
MZ
103}
104
384b40ca
MR
105static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
106{
107 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
108}
109
110static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
111{
112 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
113}
114
b7b27fac
DG
115static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116{
117 return vcpu->arch.vsesr_el2;
118}
119
4715c14b
JM
120static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
121{
122 vcpu->arch.vsesr_el2 = vsesr;
123}
124
5c37f1ae 125static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
83a49794 126{
e47c2055 127 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
83a49794
MZ
128}
129
5c37f1ae 130static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
83a49794 131{
e47c2055 132 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
83a49794
MZ
133}
134
5c37f1ae 135static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
83a49794 136{
b547631f 137 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
83a49794
MZ
138}
139
5c37f1ae 140static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
83a49794 141{
27b190bd
MZ
142 if (vcpu_mode_is_32bit(vcpu))
143 return kvm_condition_valid32(vcpu);
144
145 return true;
83a49794
MZ
146}
147
83a49794
MZ
148static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
149{
256c0960 150 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
83a49794
MZ
151}
152
c0f09634 153/*
f6be563a
PF
154 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156 * AArch32 with banked registers.
c0f09634 157 */
5c37f1ae 158static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
bc45a516
PF
159 u8 reg_num)
160{
e47c2055 161 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
bc45a516
PF
162}
163
5c37f1ae 164static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
bc45a516
PF
165 unsigned long val)
166{
167 if (reg_num != 31)
e47c2055 168 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
bc45a516
PF
169}
170
00536ec4 171static inline unsigned long vcpu_read_spsr(const struct kvm_vcpu *vcpu)
83a49794 172{
a8928195
CD
173 if (vcpu_mode_is_32bit(vcpu))
174 return vcpu_read_spsr32(vcpu);
00536ec4
CD
175
176 if (vcpu->arch.sysregs_loaded_on_cpu)
fdec2a9e 177 return read_sysreg_el1(SYS_SPSR);
00536ec4 178 else
710f1982 179 return __vcpu_sys_reg(vcpu, SPSR_EL1);
00536ec4 180}
b547631f 181
a8928195 182static inline void vcpu_write_spsr(struct kvm_vcpu *vcpu, unsigned long v)
00536ec4 183{
00536ec4 184 if (vcpu_mode_is_32bit(vcpu)) {
a8928195
CD
185 vcpu_write_spsr32(vcpu, v);
186 return;
00536ec4
CD
187 }
188
189 if (vcpu->arch.sysregs_loaded_on_cpu)
fdec2a9e 190 write_sysreg_el1(v, SYS_SPSR);
00536ec4 191 else
710f1982 192 __vcpu_sys_reg(vcpu, SPSR_EL1) = v;
83a49794
MZ
193}
194
1cfbb484
MR
195/*
196 * The layout of SPSR for an AArch32 state is different when observed from an
197 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
198 * view given an AArch64 view.
199 *
200 * In ARM DDI 0487E.a see:
201 *
202 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
203 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
204 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
205 *
206 * Which show the following differences:
207 *
208 * | Bit | AA64 | AA32 | Notes |
209 * +-----+------+------+-----------------------------|
210 * | 24 | DIT | J | J is RES0 in ARMv8 |
211 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
212 *
213 * ... and all other bits are (currently) common.
214 */
215static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
216{
217 const unsigned long overlap = BIT(24) | BIT(21);
218 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
219
220 spsr &= ~overlap;
221
222 spsr |= dit << 21;
223
224 return spsr;
225}
226
83a49794
MZ
227static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
228{
9586a2ea 229 u32 mode;
83a49794 230
9586a2ea 231 if (vcpu_mode_is_32bit(vcpu)) {
256c0960
MR
232 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
233 return mode > PSR_AA32_MODE_USR;
9586a2ea
SZ
234 }
235
236 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
b547631f 237
83a49794
MZ
238 return mode != PSR_MODE_EL0t;
239}
240
3a949f4c 241static __always_inline u32 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
83a49794
MZ
242{
243 return vcpu->arch.fault.esr_el2;
244}
245
5c37f1ae 246static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
3e51d435 247{
3a949f4c 248 u32 esr = kvm_vcpu_get_esr(vcpu);
3e51d435
MZ
249
250 if (esr & ESR_ELx_CV)
251 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
252
253 return -1;
254}
255
5c37f1ae 256static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
83a49794
MZ
257{
258 return vcpu->arch.fault.far_el2;
259}
260
5c37f1ae 261static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
83a49794
MZ
262{
263 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
264}
265
0067df41
JM
266static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
267{
268 return vcpu->arch.fault.disr_el1;
269}
270
0d97f884
WH
271static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
272{
3a949f4c 273 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
0d97f884
WH
274}
275
5c37f1ae 276static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
83a49794 277{
3a949f4c 278 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
83a49794
MZ
279}
280
c726200d
CD
281static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
282{
3a949f4c 283 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
c726200d
CD
284}
285
83a49794
MZ
286static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
287{
3a949f4c 288 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
83a49794
MZ
289}
290
b6ae256a
CD
291static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
292{
3a949f4c 293 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
b6ae256a
CD
294}
295
5c37f1ae 296static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
83a49794 297{
3a949f4c 298 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
83a49794
MZ
299}
300
c4ad98e4 301static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
83a49794 302{
3a949f4c 303 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
83a49794
MZ
304}
305
620cf45f 306/* Always check for S1PTW *before* using this. */
5c37f1ae 307static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
60e21a0e 308{
620cf45f 309 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
60e21a0e
WD
310}
311
57c841f1
MZ
312static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
313{
3a949f4c 314 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
57c841f1
MZ
315}
316
5c37f1ae 317static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
83a49794 318{
3a949f4c 319 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
83a49794
MZ
320}
321
322/* This one is not specific to Data Abort */
5c37f1ae 323static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
83a49794 324{
3a949f4c 325 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
83a49794
MZ
326}
327
5c37f1ae 328static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
83a49794 329{
3a949f4c 330 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
83a49794
MZ
331}
332
333static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
334{
c6d01a94 335 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
83a49794
MZ
336}
337
c4ad98e4
MZ
338static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
339{
340 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
341}
342
5c37f1ae 343static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
0496daa5 344{
3a949f4c 345 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
0496daa5
CD
346}
347
5c37f1ae 348static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
83a49794 349{
3a949f4c 350 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
83a49794
MZ
351}
352
c9a636f2 353static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
bb428921 354{
a2b83133 355 switch (kvm_vcpu_trap_get_fault(vcpu)) {
bb428921
JM
356 case FSC_SEA:
357 case FSC_SEA_TTW0:
358 case FSC_SEA_TTW1:
359 case FSC_SEA_TTW2:
360 case FSC_SEA_TTW3:
361 case FSC_SECC:
362 case FSC_SECC_TTW0:
363 case FSC_SECC_TTW1:
364 case FSC_SECC_TTW2:
365 case FSC_SECC_TTW3:
366 return true;
367 default:
368 return false;
369 }
370}
371
5c37f1ae 372static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
c667186f 373{
3a949f4c 374 u32 esr = kvm_vcpu_get_esr(vcpu);
1c839141 375 return ESR_ELx_SYS64_ISS_RT(esr);
c667186f
MZ
376}
377
64cf98fa
CD
378static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
379{
c4ad98e4
MZ
380 if (kvm_vcpu_abt_iss1tw(vcpu))
381 return true;
382
64cf98fa
CD
383 if (kvm_vcpu_trap_is_iabt(vcpu))
384 return false;
385
386 return kvm_vcpu_dabt_iswrite(vcpu);
387}
388
4429fc64 389static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
79c64880 390{
8d404c4c 391 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
79c64880
MZ
392}
393
ce94fe93
MZ
394static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
395{
8d404c4c 396 if (vcpu_mode_is_32bit(vcpu)) {
256c0960 397 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
8d404c4c
CD
398 } else {
399 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
400 sctlr |= (1 << 25);
1975fa56 401 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
8d404c4c 402 }
ce94fe93
MZ
403}
404
6d89d2d9
MZ
405static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
406{
407 if (vcpu_mode_is_32bit(vcpu))
256c0960 408 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
6d89d2d9 409
8d404c4c 410 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
6d89d2d9
MZ
411}
412
413static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
414 unsigned long data,
415 unsigned int len)
416{
417 if (kvm_vcpu_is_be(vcpu)) {
418 switch (len) {
419 case 1:
420 return data & 0xff;
421 case 2:
422 return be16_to_cpu(data & 0xffff);
423 case 4:
424 return be32_to_cpu(data & 0xffffffff);
425 default:
426 return be64_to_cpu(data);
427 }
b3007086
VK
428 } else {
429 switch (len) {
430 case 1:
431 return data & 0xff;
432 case 2:
433 return le16_to_cpu(data & 0xffff);
434 case 4:
435 return le32_to_cpu(data & 0xffffffff);
436 default:
437 return le64_to_cpu(data);
438 }
6d89d2d9
MZ
439 }
440
441 return data; /* Leave LE untouched */
442}
443
444static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
445 unsigned long data,
446 unsigned int len)
447{
448 if (kvm_vcpu_is_be(vcpu)) {
449 switch (len) {
450 case 1:
451 return data & 0xff;
452 case 2:
453 return cpu_to_be16(data & 0xffff);
454 case 4:
455 return cpu_to_be32(data & 0xffffffff);
456 default:
457 return cpu_to_be64(data);
458 }
b3007086
VK
459 } else {
460 switch (len) {
461 case 1:
462 return data & 0xff;
463 case 2:
464 return cpu_to_le16(data & 0xffff);
465 case 4:
466 return cpu_to_le32(data & 0xffffffff);
467 default:
468 return cpu_to_le64(data);
469 }
6d89d2d9
MZ
470 }
471
472 return data; /* Leave LE untouched */
473}
474
6ddbc281 475static __always_inline void kvm_skip_instr(struct kvm_vcpu *vcpu)
bd7d95ca 476{
30685d78 477 if (vcpu_mode_is_32bit(vcpu)) {
6ddbc281 478 kvm_skip_instr32(vcpu);
30685d78 479 } else {
bd7d95ca 480 *vcpu_pc(vcpu) += 4;
30685d78
DM
481 *vcpu_cpsr(vcpu) &= ~PSR_BTYPE_MASK;
482 }
bd7d95ca
MR
483
484 /* advance the singlestep state machine */
485 *vcpu_cpsr(vcpu) &= ~DBG_SPSR_SS;
486}
487
488/*
489 * Skip an instruction which has been emulated at hyp while most guest sysregs
490 * are live.
491 */
c50cb043 492static __always_inline void __kvm_skip_instr(struct kvm_vcpu *vcpu)
bd7d95ca 493{
fdec2a9e 494 *vcpu_pc(vcpu) = read_sysreg_el2(SYS_ELR);
e47c2055 495 vcpu_gp_regs(vcpu)->pstate = read_sysreg_el2(SYS_SPSR);
bd7d95ca 496
6ddbc281 497 kvm_skip_instr(vcpu);
bd7d95ca 498
e47c2055 499 write_sysreg_el2(vcpu_gp_regs(vcpu)->pstate, SYS_SPSR);
fdec2a9e 500 write_sysreg_el2(*vcpu_pc(vcpu), SYS_ELR);
bd7d95ca
MR
501}
502
83a49794 503#endif /* __ARM64_KVM_EMULATE_H__ */