KVM: arm64: Disable SME traps for (h)VHE at setup
[linux-2.6-block.git] / arch / arm64 / include / asm / kvm_emulate.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
83a49794
MZ
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
83a49794
MZ
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
14#include <linux/kvm_host.h>
c6d01a94 15
bd7d95ca 16#include <asm/debug-monitors.h>
c6d01a94 17#include <asm/esr.h>
83a49794 18#include <asm/kvm_arm.h>
00536ec4 19#include <asm/kvm_hyp.h>
83a49794 20#include <asm/ptrace.h>
4429fc64 21#include <asm/cputype.h>
68908bf7 22#include <asm/virt.h>
83a49794 23
bb666c47
MZ
24#define CURRENT_EL_SP_EL0_VECTOR 0x0
25#define CURRENT_EL_SP_ELx_VECTOR 0x200
26#define LOWER_EL_AArch64_VECTOR 0x400
27#define LOWER_EL_AArch32_VECTOR 0x600
28
29enum exception_type {
30 except_type_sync = 0,
31 except_type_irq = 0x80,
32 except_type_fiq = 0x100,
33 except_type_serror = 0x180,
34};
b547631f 35
47f3a2fc
JL
36#define kvm_exception_type_names \
37 { except_type_sync, "SYNC" }, \
38 { except_type_irq, "IRQ" }, \
39 { except_type_fiq, "FIQ" }, \
40 { except_type_serror, "SERROR" }
41
27b190bd 42bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
6ddbc281 43void kvm_skip_instr32(struct kvm_vcpu *vcpu);
27b190bd 44
83a49794 45void kvm_inject_undefined(struct kvm_vcpu *vcpu);
10cf3390 46void kvm_inject_vabt(struct kvm_vcpu *vcpu);
83a49794
MZ
47void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
48void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
85ea6b1e 49void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
83a49794 50
6109c5a6
SC
51void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
52
47f3a2fc
JL
53void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
54int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
55int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
56
26bf74bd 57#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
5c37f1ae 58static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
e72341c5
CD
59{
60 return !(vcpu->arch.hcr_el2 & HCR_RW);
61}
26bf74bd
RW
62#else
63static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
64{
2251e9ff 65 return test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features);
26bf74bd
RW
66}
67#endif
e72341c5 68
b856a591
CD
69static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
70{
71 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
38cba550 72 if (has_vhe() || has_hvhe())
68908bf7 73 vcpu->arch.hcr_el2 |= HCR_E2H;
558daf69
DG
74 if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN)) {
75 /* route synchronous external abort exceptions to EL2 */
76 vcpu->arch.hcr_el2 |= HCR_TEA;
77 /* trap error record accesses */
78 vcpu->arch.hcr_el2 |= HCR_TERR;
79 }
5c401308
CD
80
81 if (cpus_have_const_cap(ARM64_HAS_STAGE2_FWB)) {
e48d53a9 82 vcpu->arch.hcr_el2 |= HCR_FWB;
5c401308
CD
83 } else {
84 /*
85 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
86 * get set in SCTLR_EL1 such that we can detect when the guest
87 * MMU gets turned on and do the necessary cache maintenance
88 * then.
89 */
90 vcpu->arch.hcr_el2 |= HCR_TVM;
91 }
558daf69 92
c876c3f1
MZ
93 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
94 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE))
95 vcpu->arch.hcr_el2 |= HCR_TID4;
96 else
97 vcpu->arch.hcr_el2 |= HCR_TID2;
98
26bf74bd 99 if (vcpu_el1_is_32bit(vcpu))
801f6772 100 vcpu->arch.hcr_el2 &= ~HCR_RW;
f7f2b15c 101
ea7fc1bb
SP
102 if (kvm_has_mte(vcpu->kvm))
103 vcpu->arch.hcr_el2 |= HCR_ATA;
b856a591
CD
104}
105
3df59d8d 106static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
3c1e7165 107{
3df59d8d 108 return (unsigned long *)&vcpu->arch.hcr_el2;
3c1e7165
MZ
109}
110
ef2e78dd 111static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
de737089
MZ
112{
113 vcpu->arch.hcr_el2 &= ~HCR_TWE;
7bdabad1
MZ
114 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
115 vcpu->kvm->arch.vgic.nassgireq)
ef2e78dd
MZ
116 vcpu->arch.hcr_el2 &= ~HCR_TWI;
117 else
118 vcpu->arch.hcr_el2 |= HCR_TWI;
de737089
MZ
119}
120
ef2e78dd 121static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
de737089
MZ
122{
123 vcpu->arch.hcr_el2 |= HCR_TWE;
ef2e78dd 124 vcpu->arch.hcr_el2 |= HCR_TWI;
de737089
MZ
125}
126
384b40ca
MR
127static inline void vcpu_ptrauth_enable(struct kvm_vcpu *vcpu)
128{
129 vcpu->arch.hcr_el2 |= (HCR_API | HCR_APK);
130}
131
132static inline void vcpu_ptrauth_disable(struct kvm_vcpu *vcpu)
133{
134 vcpu->arch.hcr_el2 &= ~(HCR_API | HCR_APK);
135}
136
b7b27fac
DG
137static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
138{
139 return vcpu->arch.vsesr_el2;
140}
141
4715c14b
JM
142static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
143{
144 vcpu->arch.vsesr_el2 = vsesr;
145}
146
5c37f1ae 147static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
83a49794 148{
e47c2055 149 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
83a49794
MZ
150}
151
5c37f1ae 152static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
83a49794 153{
e47c2055 154 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
83a49794
MZ
155}
156
5c37f1ae 157static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
83a49794 158{
b547631f 159 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
83a49794
MZ
160}
161
5c37f1ae 162static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
83a49794 163{
27b190bd
MZ
164 if (vcpu_mode_is_32bit(vcpu))
165 return kvm_condition_valid32(vcpu);
166
167 return true;
83a49794
MZ
168}
169
83a49794
MZ
170static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
171{
256c0960 172 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
83a49794
MZ
173}
174
c0f09634 175/*
f6be563a
PF
176 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
177 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
178 * AArch32 with banked registers.
c0f09634 179 */
5c37f1ae 180static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
bc45a516
PF
181 u8 reg_num)
182{
e47c2055 183 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
bc45a516
PF
184}
185
5c37f1ae 186static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
bc45a516
PF
187 unsigned long val)
188{
189 if (reg_num != 31)
e47c2055 190 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
bc45a516
PF
191}
192
0043b290
CD
193static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
194{
195 switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
196 case PSR_MODE_EL2h:
197 case PSR_MODE_EL2t:
198 return true;
199 default:
200 return false;
201 }
202}
203
204static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
205{
206 return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
207}
208
209static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
210{
211 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H;
212}
213
214static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
215{
216 return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
217}
218
219static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
220{
221 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
222}
223
224static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
225{
226 return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
227}
228
229static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
230{
231 /*
232 * We are in a hypervisor context if the vcpu mode is EL2 or
233 * E2H and TGE bits are set. The latter means we are in the user space
234 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
235 *
236 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
237 * rest of the KVM code, and will result in a misbehaving guest.
238 */
239 return vcpu_is_el2_ctxt(ctxt) ||
240 (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
241 __vcpu_el2_tge_is_set(ctxt);
242}
243
244static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
245{
246 return __is_hyp_ctxt(&vcpu->arch.ctxt);
247}
248
1cfbb484
MR
249/*
250 * The layout of SPSR for an AArch32 state is different when observed from an
251 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
252 * view given an AArch64 view.
253 *
254 * In ARM DDI 0487E.a see:
255 *
256 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
257 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
258 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
259 *
260 * Which show the following differences:
261 *
262 * | Bit | AA64 | AA32 | Notes |
263 * +-----+------+------+-----------------------------|
264 * | 24 | DIT | J | J is RES0 in ARMv8 |
265 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
266 *
267 * ... and all other bits are (currently) common.
268 */
269static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
270{
271 const unsigned long overlap = BIT(24) | BIT(21);
272 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
273
274 spsr &= ~overlap;
275
276 spsr |= dit << 21;
277
278 return spsr;
279}
280
83a49794
MZ
281static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
282{
9586a2ea 283 u32 mode;
83a49794 284
9586a2ea 285 if (vcpu_mode_is_32bit(vcpu)) {
256c0960
MR
286 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
287 return mode > PSR_AA32_MODE_USR;
9586a2ea
SZ
288 }
289
290 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
b547631f 291
83a49794
MZ
292 return mode != PSR_MODE_EL0t;
293}
294
0b12620f 295static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
83a49794
MZ
296{
297 return vcpu->arch.fault.esr_el2;
298}
299
5c37f1ae 300static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
3e51d435 301{
0b12620f 302 u64 esr = kvm_vcpu_get_esr(vcpu);
3e51d435
MZ
303
304 if (esr & ESR_ELx_CV)
305 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
306
307 return -1;
308}
309
5c37f1ae 310static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
83a49794
MZ
311{
312 return vcpu->arch.fault.far_el2;
313}
314
5c37f1ae 315static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
83a49794
MZ
316{
317 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
318}
319
0067df41
JM
320static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
321{
322 return vcpu->arch.fault.disr_el1;
323}
324
0d97f884
WH
325static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
326{
3a949f4c 327 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
0d97f884
WH
328}
329
5c37f1ae 330static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
83a49794 331{
3a949f4c 332 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
83a49794
MZ
333}
334
c726200d
CD
335static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
336{
3a949f4c 337 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
c726200d
CD
338}
339
83a49794
MZ
340static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
341{
3a949f4c 342 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
83a49794
MZ
343}
344
b6ae256a
CD
345static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
346{
3a949f4c 347 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
b6ae256a
CD
348}
349
5c37f1ae 350static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
83a49794 351{
3a949f4c 352 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
83a49794
MZ
353}
354
c4ad98e4 355static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
83a49794 356{
3a949f4c 357 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
83a49794
MZ
358}
359
620cf45f 360/* Always check for S1PTW *before* using this. */
5c37f1ae 361static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
60e21a0e 362{
620cf45f 363 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
60e21a0e
WD
364}
365
57c841f1
MZ
366static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
367{
3a949f4c 368 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
57c841f1
MZ
369}
370
5c37f1ae 371static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
83a49794 372{
3a949f4c 373 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
83a49794
MZ
374}
375
376/* This one is not specific to Data Abort */
5c37f1ae 377static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
83a49794 378{
3a949f4c 379 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
83a49794
MZ
380}
381
5c37f1ae 382static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
83a49794 383{
3a949f4c 384 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
83a49794
MZ
385}
386
387static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
388{
c6d01a94 389 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
83a49794
MZ
390}
391
c4ad98e4
MZ
392static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
393{
394 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
395}
396
5c37f1ae 397static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
0496daa5 398{
3a949f4c 399 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
0496daa5
CD
400}
401
5c37f1ae 402static __always_inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
83a49794 403{
3a949f4c 404 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_TYPE;
83a49794
MZ
405}
406
7d894834
YW
407static __always_inline u8 kvm_vcpu_trap_get_fault_level(const struct kvm_vcpu *vcpu)
408{
409 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC_LEVEL;
410}
411
c9a636f2 412static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
bb428921 413{
a2b83133 414 switch (kvm_vcpu_trap_get_fault(vcpu)) {
b0803ba7
MZ
415 case ESR_ELx_FSC_EXTABT:
416 case ESR_ELx_FSC_SEA_TTW0:
417 case ESR_ELx_FSC_SEA_TTW1:
418 case ESR_ELx_FSC_SEA_TTW2:
419 case ESR_ELx_FSC_SEA_TTW3:
420 case ESR_ELx_FSC_SECC:
421 case ESR_ELx_FSC_SECC_TTW0:
422 case ESR_ELx_FSC_SECC_TTW1:
423 case ESR_ELx_FSC_SECC_TTW2:
424 case ESR_ELx_FSC_SECC_TTW3:
bb428921
JM
425 return true;
426 default:
427 return false;
428 }
429}
430
5c37f1ae 431static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
c667186f 432{
0b12620f 433 u64 esr = kvm_vcpu_get_esr(vcpu);
1c839141 434 return ESR_ELx_SYS64_ISS_RT(esr);
c667186f
MZ
435}
436
64cf98fa
CD
437static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
438{
406504c7
MZ
439 if (kvm_vcpu_abt_iss1tw(vcpu)) {
440 /*
441 * Only a permission fault on a S1PTW should be
442 * considered as a write. Otherwise, page tables baked
443 * in a read-only memslot will result in an exception
444 * being delivered in the guest.
445 *
446 * The drawback is that we end-up faulting twice if the
447 * guest is using any of HW AF/DB: a translation fault
448 * to map the page containing the PT (read only at
449 * first), then a permission fault to allow the flags
450 * to be set.
451 */
452 switch (kvm_vcpu_trap_get_fault_type(vcpu)) {
453 case ESR_ELx_FSC_PERM:
454 return true;
455 default:
456 return false;
457 }
458 }
c4ad98e4 459
64cf98fa
CD
460 if (kvm_vcpu_trap_is_iabt(vcpu))
461 return false;
462
463 return kvm_vcpu_dabt_iswrite(vcpu);
464}
465
4429fc64 466static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
79c64880 467{
8d404c4c 468 return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
79c64880
MZ
469}
470
ce94fe93
MZ
471static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
472{
8d404c4c 473 if (vcpu_mode_is_32bit(vcpu)) {
256c0960 474 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
8d404c4c
CD
475 } else {
476 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
500ca524 477 sctlr |= SCTLR_ELx_EE;
1975fa56 478 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
8d404c4c 479 }
ce94fe93
MZ
480}
481
6d89d2d9
MZ
482static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
483{
484 if (vcpu_mode_is_32bit(vcpu))
256c0960 485 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
6d89d2d9 486
69adec18
MZ
487 if (vcpu_mode_priv(vcpu))
488 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
489 else
490 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
6d89d2d9
MZ
491}
492
493static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
494 unsigned long data,
495 unsigned int len)
496{
497 if (kvm_vcpu_is_be(vcpu)) {
498 switch (len) {
499 case 1:
500 return data & 0xff;
501 case 2:
502 return be16_to_cpu(data & 0xffff);
503 case 4:
504 return be32_to_cpu(data & 0xffffffff);
505 default:
506 return be64_to_cpu(data);
507 }
b3007086
VK
508 } else {
509 switch (len) {
510 case 1:
511 return data & 0xff;
512 case 2:
513 return le16_to_cpu(data & 0xffff);
514 case 4:
515 return le32_to_cpu(data & 0xffffffff);
516 default:
517 return le64_to_cpu(data);
518 }
6d89d2d9
MZ
519 }
520
521 return data; /* Leave LE untouched */
522}
523
524static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
525 unsigned long data,
526 unsigned int len)
527{
528 if (kvm_vcpu_is_be(vcpu)) {
529 switch (len) {
530 case 1:
531 return data & 0xff;
532 case 2:
533 return cpu_to_be16(data & 0xffff);
534 case 4:
535 return cpu_to_be32(data & 0xffffffff);
536 default:
537 return cpu_to_be64(data);
538 }
b3007086
VK
539 } else {
540 switch (len) {
541 case 1:
542 return data & 0xff;
543 case 2:
544 return cpu_to_le16(data & 0xffff);
545 case 4:
546 return cpu_to_le32(data & 0xffffffff);
547 default:
548 return cpu_to_le64(data);
549 }
6d89d2d9
MZ
550 }
551
552 return data; /* Leave LE untouched */
553}
554
cdb5e02e 555static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
bd7d95ca 556{
e19f2c6c 557 WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
699bb2e0 558 vcpu_set_flag(vcpu, INCREMENT_PC);
bd7d95ca
MR
559}
560
699bb2e0
MZ
561#define kvm_pend_exception(v, e) \
562 do { \
e19f2c6c 563 WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
699bb2e0
MZ
564 vcpu_set_flag((v), PENDING_EXCEPTION); \
565 vcpu_set_flag((v), e); \
566 } while (0)
567
568
66e94d5c
MZ
569static inline bool vcpu_has_feature(struct kvm_vcpu *vcpu, int feature)
570{
571 return test_bit(feature, vcpu->arch.features);
572}
573
75c76ab5
MZ
574static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
575{
576 u64 val;
577
578 if (has_vhe()) {
579 val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN |
580 CPACR_EL1_ZEN_EL1EN);
581 } else if (has_hvhe()) {
582 val = (CPACR_EL1_FPEN_EL0EN | CPACR_EL1_FPEN_EL1EN);
583 } else {
584 val = CPTR_NVHE_EL2_RES1;
585
586 if (vcpu_has_sve(vcpu) &&
587 (vcpu->arch.fp_state == FP_STATE_GUEST_OWNED))
588 val |= CPTR_EL2_TZ;
589 if (cpus_have_final_cap(ARM64_SME))
590 val &= ~CPTR_EL2_TSM;
591 }
592
593 return val;
594}
595
596static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
597{
598 u64 val = kvm_get_reset_cptr_el2(vcpu);
599
600 if (has_vhe() || has_hvhe())
601 write_sysreg(val, cpacr_el1);
602 else
603 write_sysreg(val, cptr_el2);
604}
83a49794 605#endif /* __ARM64_KVM_EMULATE_H__ */