Merge tag 'pull-work.unaligned' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-block.git] / arch / arm64 / include / asm / kvm_emulate.h
CommitLineData
caab277b 1/* SPDX-License-Identifier: GPL-2.0-only */
83a49794
MZ
2/*
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 *
6 * Derived from arch/arm/include/kvm_emulate.h
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
83a49794
MZ
9 */
10
11#ifndef __ARM64_KVM_EMULATE_H__
12#define __ARM64_KVM_EMULATE_H__
13
d2b2ecba 14#include <linux/bitfield.h>
83a49794 15#include <linux/kvm_host.h>
c6d01a94 16
bd7d95ca 17#include <asm/debug-monitors.h>
c6d01a94 18#include <asm/esr.h>
83a49794 19#include <asm/kvm_arm.h>
00536ec4 20#include <asm/kvm_hyp.h>
111903d1 21#include <asm/kvm_nested.h>
83a49794 22#include <asm/ptrace.h>
4429fc64 23#include <asm/cputype.h>
68908bf7 24#include <asm/virt.h>
83a49794 25
bb666c47
MZ
26#define CURRENT_EL_SP_EL0_VECTOR 0x0
27#define CURRENT_EL_SP_ELx_VECTOR 0x200
28#define LOWER_EL_AArch64_VECTOR 0x400
29#define LOWER_EL_AArch32_VECTOR 0x600
30
31enum exception_type {
32 except_type_sync = 0,
33 except_type_irq = 0x80,
34 except_type_fiq = 0x100,
35 except_type_serror = 0x180,
36};
b547631f 37
47f3a2fc
JL
38#define kvm_exception_type_names \
39 { except_type_sync, "SYNC" }, \
40 { except_type_irq, "IRQ" }, \
41 { except_type_fiq, "FIQ" }, \
42 { except_type_serror, "SERROR" }
43
27b190bd 44bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
6ddbc281 45void kvm_skip_instr32(struct kvm_vcpu *vcpu);
27b190bd 46
83a49794 47void kvm_inject_undefined(struct kvm_vcpu *vcpu);
10cf3390 48void kvm_inject_vabt(struct kvm_vcpu *vcpu);
83a49794
MZ
49void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
50void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
85ea6b1e 51void kvm_inject_size_fault(struct kvm_vcpu *vcpu);
83a49794 52
6109c5a6
SC
53void kvm_vcpu_wfi(struct kvm_vcpu *vcpu);
54
47f3a2fc
JL
55void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu);
56int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2);
57int kvm_inject_nested_irq(struct kvm_vcpu *vcpu);
58
b3d29a82
OU
59static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu)
60{
61 u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) |
62 ESR_ELx_IL;
63
64 kvm_inject_nested_sync(vcpu, esr);
65}
66
26bf74bd 67#if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__)
5c37f1ae 68static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
e72341c5
CD
69{
70 return !(vcpu->arch.hcr_el2 & HCR_RW);
71}
26bf74bd
RW
72#else
73static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
74{
1de10b7d 75 return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT);
26bf74bd
RW
76}
77#endif
e72341c5 78
b856a591
CD
79static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
80{
f1ff3fc5
SO
81 if (!vcpu_has_run_once(vcpu))
82 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
83
84 /*
85 * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C
86 * get set in SCTLR_EL1 such that we can detect when the guest
87 * MMU gets turned on and do the necessary cache maintenance
88 * then.
89 */
90 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5c401308 91 vcpu->arch.hcr_el2 |= HCR_TVM;
b856a591
CD
92}
93
3df59d8d 94static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
3c1e7165 95{
3df59d8d 96 return (unsigned long *)&vcpu->arch.hcr_el2;
3c1e7165
MZ
97}
98
ef2e78dd 99static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu)
de737089
MZ
100{
101 vcpu->arch.hcr_el2 &= ~HCR_TWE;
7bdabad1
MZ
102 if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) ||
103 vcpu->kvm->arch.vgic.nassgireq)
ef2e78dd
MZ
104 vcpu->arch.hcr_el2 &= ~HCR_TWI;
105 else
106 vcpu->arch.hcr_el2 |= HCR_TWI;
de737089
MZ
107}
108
ef2e78dd 109static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu)
de737089
MZ
110{
111 vcpu->arch.hcr_el2 |= HCR_TWE;
ef2e78dd 112 vcpu->arch.hcr_el2 |= HCR_TWI;
de737089
MZ
113}
114
b7b27fac
DG
115static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
116{
117 return vcpu->arch.vsesr_el2;
118}
119
4715c14b
JM
120static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
121{
122 vcpu->arch.vsesr_el2 = vsesr;
123}
124
5c37f1ae 125static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
83a49794 126{
e47c2055 127 return (unsigned long *)&vcpu_gp_regs(vcpu)->pc;
83a49794
MZ
128}
129
5c37f1ae 130static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
83a49794 131{
e47c2055 132 return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate;
83a49794
MZ
133}
134
5c37f1ae 135static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
83a49794 136{
b547631f 137 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
83a49794
MZ
138}
139
5c37f1ae 140static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
83a49794 141{
27b190bd
MZ
142 if (vcpu_mode_is_32bit(vcpu))
143 return kvm_condition_valid32(vcpu);
144
145 return true;
83a49794
MZ
146}
147
83a49794
MZ
148static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
149{
256c0960 150 *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT;
83a49794
MZ
151}
152
c0f09634 153/*
f6be563a
PF
154 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
155 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
156 * AArch32 with banked registers.
c0f09634 157 */
5c37f1ae 158static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
bc45a516
PF
159 u8 reg_num)
160{
e47c2055 161 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num];
bc45a516
PF
162}
163
5c37f1ae 164static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
bc45a516
PF
165 unsigned long val)
166{
167 if (reg_num != 31)
e47c2055 168 vcpu_gp_regs(vcpu)->regs[reg_num] = val;
bc45a516
PF
169}
170
0043b290
CD
171static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt)
172{
173 switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) {
174 case PSR_MODE_EL2h:
175 case PSR_MODE_EL2t:
176 return true;
177 default:
178 return false;
179 }
180}
181
182static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu)
183{
184 return vcpu_is_el2_ctxt(&vcpu->arch.ctxt);
185}
186
187static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt)
188{
94f29ab2
MZ
189 return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) ||
190 (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H));
0043b290
CD
191}
192
193static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu)
194{
195 return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt);
196}
197
198static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt)
199{
200 return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE;
201}
202
203static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
204{
205 return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt);
206}
207
208static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt)
209{
210 /*
211 * We are in a hypervisor context if the vcpu mode is EL2 or
212 * E2H and TGE bits are set. The latter means we are in the user space
213 * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost'
214 *
215 * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the
216 * rest of the KVM code, and will result in a misbehaving guest.
217 */
218 return vcpu_is_el2_ctxt(ctxt) ||
219 (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) ||
220 __vcpu_el2_tge_is_set(ctxt);
221}
222
223static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
224{
111903d1 225 return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt);
0043b290
CD
226}
227
1cfbb484
MR
228/*
229 * The layout of SPSR for an AArch32 state is different when observed from an
230 * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32
231 * view given an AArch64 view.
232 *
233 * In ARM DDI 0487E.a see:
234 *
235 * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426
236 * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256
237 * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280
238 *
239 * Which show the following differences:
240 *
241 * | Bit | AA64 | AA32 | Notes |
242 * +-----+------+------+-----------------------------|
243 * | 24 | DIT | J | J is RES0 in ARMv8 |
244 * | 21 | SS | DIT | SS doesn't exist in AArch32 |
245 *
246 * ... and all other bits are (currently) common.
247 */
248static inline unsigned long host_spsr_to_spsr32(unsigned long spsr)
249{
250 const unsigned long overlap = BIT(24) | BIT(21);
251 unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT);
252
253 spsr &= ~overlap;
254
255 spsr |= dit << 21;
256
257 return spsr;
258}
259
83a49794
MZ
260static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
261{
9586a2ea 262 u32 mode;
83a49794 263
9586a2ea 264 if (vcpu_mode_is_32bit(vcpu)) {
256c0960
MR
265 mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK;
266 return mode > PSR_AA32_MODE_USR;
9586a2ea
SZ
267 }
268
269 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
b547631f 270
83a49794
MZ
271 return mode != PSR_MODE_EL0t;
272}
273
0b12620f 274static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
83a49794
MZ
275{
276 return vcpu->arch.fault.esr_el2;
277}
278
5c37f1ae 279static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu)
3e51d435 280{
0b12620f 281 u64 esr = kvm_vcpu_get_esr(vcpu);
3e51d435
MZ
282
283 if (esr & ESR_ELx_CV)
284 return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
285
286 return -1;
287}
288
5c37f1ae 289static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
83a49794
MZ
290{
291 return vcpu->arch.fault.far_el2;
292}
293
5c37f1ae 294static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
83a49794
MZ
295{
296 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
297}
298
0067df41
JM
299static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu)
300{
301 return vcpu->arch.fault.disr_el1;
302}
303
0d97f884
WH
304static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
305{
3a949f4c 306 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK;
0d97f884
WH
307}
308
5c37f1ae 309static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
83a49794 310{
3a949f4c 311 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV);
83a49794
MZ
312}
313
c726200d
CD
314static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
315{
3a949f4c 316 return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC);
c726200d
CD
317}
318
83a49794
MZ
319static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
320{
3a949f4c 321 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE);
83a49794
MZ
322}
323
b6ae256a
CD
324static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu)
325{
3a949f4c 326 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF);
b6ae256a
CD
327}
328
5c37f1ae 329static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
83a49794 330{
3a949f4c 331 return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
83a49794
MZ
332}
333
c4ad98e4 334static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu)
83a49794 335{
3a949f4c 336 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW);
83a49794
MZ
337}
338
620cf45f 339/* Always check for S1PTW *before* using this. */
5c37f1ae 340static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
60e21a0e 341{
620cf45f 342 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR;
60e21a0e
WD
343}
344
57c841f1
MZ
345static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
346{
3a949f4c 347 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM);
57c841f1
MZ
348}
349
5c37f1ae 350static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
83a49794 351{
3a949f4c 352 return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
83a49794
MZ
353}
354
355/* This one is not specific to Data Abort */
5c37f1ae 356static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
83a49794 357{
3a949f4c 358 return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL);
83a49794
MZ
359}
360
5c37f1ae 361static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
83a49794 362{
3a949f4c 363 return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu));
83a49794
MZ
364}
365
366static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
367{
c6d01a94 368 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
83a49794
MZ
369}
370
c4ad98e4
MZ
371static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu)
372{
373 return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu);
374}
375
5c37f1ae 376static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
0496daa5 377{
3a949f4c 378 return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC;
0496daa5
CD
379}
380
11e5ea52
AB
381static inline
382bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
83a49794 383{
11e5ea52 384 return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu));
83a49794
MZ
385}
386
11e5ea52
AB
387static inline
388bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu)
7d894834 389{
11e5ea52
AB
390 return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu));
391}
392
393static inline
394u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu)
395{
396 unsigned long esr = kvm_vcpu_get_esr(vcpu);
397
398 BUG_ON(!esr_fsc_is_permission_fault(esr));
399 return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL));
7d894834
YW
400}
401
c9a636f2 402static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu)
bb428921 403{
a2b83133 404 switch (kvm_vcpu_trap_get_fault(vcpu)) {
b0803ba7 405 case ESR_ELx_FSC_EXTABT:
7ac8d5b2 406 case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3):
b0803ba7 407 case ESR_ELx_FSC_SECC:
7ac8d5b2 408 case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3):
bb428921
JM
409 return true;
410 default:
411 return false;
412 }
413}
414
5c37f1ae 415static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu)
c667186f 416{
0b12620f 417 u64 esr = kvm_vcpu_get_esr(vcpu);
1c839141 418 return ESR_ELx_SYS64_ISS_RT(esr);
c667186f
MZ
419}
420
64cf98fa
CD
421static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu)
422{
406504c7
MZ
423 if (kvm_vcpu_abt_iss1tw(vcpu)) {
424 /*
425 * Only a permission fault on a S1PTW should be
426 * considered as a write. Otherwise, page tables baked
427 * in a read-only memslot will result in an exception
428 * being delivered in the guest.
429 *
430 * The drawback is that we end-up faulting twice if the
431 * guest is using any of HW AF/DB: a translation fault
432 * to map the page containing the PT (read only at
433 * first), then a permission fault to allow the flags
434 * to be set.
435 */
11e5ea52 436 return kvm_vcpu_trap_is_permission_fault(vcpu);
406504c7 437 }
c4ad98e4 438
64cf98fa
CD
439 if (kvm_vcpu_trap_is_iabt(vcpu))
440 return false;
441
442 return kvm_vcpu_dabt_iswrite(vcpu);
443}
444
4429fc64 445static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
79c64880 446{
0a2acd38 447 return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
79c64880
MZ
448}
449
ce94fe93
MZ
450static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
451{
8d404c4c 452 if (vcpu_mode_is_32bit(vcpu)) {
256c0960 453 *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT;
8d404c4c
CD
454 } else {
455 u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1);
500ca524 456 sctlr |= SCTLR_ELx_EE;
1975fa56 457 vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1);
8d404c4c 458 }
ce94fe93
MZ
459}
460
6d89d2d9
MZ
461static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
462{
463 if (vcpu_mode_is_32bit(vcpu))
256c0960 464 return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT);
6d89d2d9 465
69adec18
MZ
466 if (vcpu_mode_priv(vcpu))
467 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE);
468 else
469 return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E);
6d89d2d9
MZ
470}
471
472static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
473 unsigned long data,
474 unsigned int len)
475{
476 if (kvm_vcpu_is_be(vcpu)) {
477 switch (len) {
478 case 1:
479 return data & 0xff;
480 case 2:
481 return be16_to_cpu(data & 0xffff);
482 case 4:
483 return be32_to_cpu(data & 0xffffffff);
484 default:
485 return be64_to_cpu(data);
486 }
b3007086
VK
487 } else {
488 switch (len) {
489 case 1:
490 return data & 0xff;
491 case 2:
492 return le16_to_cpu(data & 0xffff);
493 case 4:
494 return le32_to_cpu(data & 0xffffffff);
495 default:
496 return le64_to_cpu(data);
497 }
6d89d2d9
MZ
498 }
499
500 return data; /* Leave LE untouched */
501}
502
503static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
504 unsigned long data,
505 unsigned int len)
506{
507 if (kvm_vcpu_is_be(vcpu)) {
508 switch (len) {
509 case 1:
510 return data & 0xff;
511 case 2:
512 return cpu_to_be16(data & 0xffff);
513 case 4:
514 return cpu_to_be32(data & 0xffffffff);
515 default:
516 return cpu_to_be64(data);
517 }
b3007086
VK
518 } else {
519 switch (len) {
520 case 1:
521 return data & 0xff;
522 case 2:
523 return cpu_to_le16(data & 0xffff);
524 case 4:
525 return cpu_to_le32(data & 0xffffffff);
526 default:
527 return cpu_to_le64(data);
528 }
6d89d2d9
MZ
529 }
530
531 return data; /* Leave LE untouched */
532}
533
cdb5e02e 534static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu)
bd7d95ca 535{
e19f2c6c 536 WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION));
699bb2e0 537 vcpu_set_flag(vcpu, INCREMENT_PC);
bd7d95ca
MR
538}
539
699bb2e0
MZ
540#define kvm_pend_exception(v, e) \
541 do { \
e19f2c6c 542 WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \
699bb2e0
MZ
543 vcpu_set_flag((v), PENDING_EXCEPTION); \
544 vcpu_set_flag((v), e); \
545 } while (0)
546
6d8fb3cb
FT
547#define __build_check_all_or_none(r, bits) \
548 BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits))
549
550#define __cpacr_to_cptr_clr(clr, set) \
551 ({ \
552 u64 cptr = 0; \
553 \
554 if ((set) & CPACR_ELx_FPEN) \
555 cptr |= CPTR_EL2_TFP; \
556 if ((set) & CPACR_ELx_ZEN) \
557 cptr |= CPTR_EL2_TZ; \
558 if ((set) & CPACR_ELx_SMEN) \
559 cptr |= CPTR_EL2_TSM; \
560 if ((clr) & CPACR_ELx_TTA) \
561 cptr |= CPTR_EL2_TTA; \
562 if ((clr) & CPTR_EL2_TAM) \
563 cptr |= CPTR_EL2_TAM; \
564 if ((clr) & CPTR_EL2_TCPAC) \
565 cptr |= CPTR_EL2_TCPAC; \
566 \
567 cptr; \
568 })
569
570#define __cpacr_to_cptr_set(clr, set) \
571 ({ \
572 u64 cptr = 0; \
573 \
574 if ((clr) & CPACR_ELx_FPEN) \
575 cptr |= CPTR_EL2_TFP; \
576 if ((clr) & CPACR_ELx_ZEN) \
577 cptr |= CPTR_EL2_TZ; \
578 if ((clr) & CPACR_ELx_SMEN) \
579 cptr |= CPTR_EL2_TSM; \
580 if ((set) & CPACR_ELx_TTA) \
581 cptr |= CPTR_EL2_TTA; \
582 if ((set) & CPTR_EL2_TAM) \
583 cptr |= CPTR_EL2_TAM; \
584 if ((set) & CPTR_EL2_TCPAC) \
585 cptr |= CPTR_EL2_TCPAC; \
586 \
587 cptr; \
588 })
589
590#define cpacr_clear_set(clr, set) \
591 do { \
592 BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \
593 BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \
594 __build_check_all_or_none((clr), CPACR_ELx_FPEN); \
595 __build_check_all_or_none((set), CPACR_ELx_FPEN); \
596 __build_check_all_or_none((clr), CPACR_ELx_ZEN); \
597 __build_check_all_or_none((set), CPACR_ELx_ZEN); \
598 __build_check_all_or_none((clr), CPACR_ELx_SMEN); \
599 __build_check_all_or_none((set), CPACR_ELx_SMEN); \
600 \
601 if (has_vhe() || has_hvhe()) \
602 sysreg_clear_set(cpacr_el1, clr, set); \
603 else \
604 sysreg_clear_set(cptr_el2, \
605 __cpacr_to_cptr_clr(clr, set), \
606 __cpacr_to_cptr_set(clr, set));\
607 } while (0)
608
90ae31c6
FT
609static __always_inline void kvm_write_cptr_el2(u64 val)
610{
611 if (has_vhe() || has_hvhe())
612 write_sysreg(val, cpacr_el1);
613 else
614 write_sysreg(val, cptr_el2);
615}
616
75c76ab5
MZ
617static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu)
618{
619 u64 val;
620
621 if (has_vhe()) {
a69283ae 622 val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN);
375110ab
FT
623 if (cpus_have_final_cap(ARM64_SME))
624 val |= CPACR_EL1_SMEN_EL1EN;
75c76ab5 625 } else if (has_hvhe()) {
a69283ae 626 val = CPACR_ELx_FPEN;
7af0d5e5 627
f11290e0 628 if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs())
a69283ae 629 val |= CPACR_ELx_ZEN;
375110ab 630 if (cpus_have_final_cap(ARM64_SME))
a69283ae 631 val |= CPACR_ELx_SMEN;
75c76ab5
MZ
632 } else {
633 val = CPTR_NVHE_EL2_RES1;
634
f11290e0 635 if (vcpu_has_sve(vcpu) && guest_owns_fp_regs())
75c76ab5
MZ
636 val |= CPTR_EL2_TZ;
637 if (cpus_have_final_cap(ARM64_SME))
638 val &= ~CPTR_EL2_TSM;
639 }
640
641 return val;
642}
643
644static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu)
645{
646 u64 val = kvm_get_reset_cptr_el2(vcpu);
647
90ae31c6 648 kvm_write_cptr_el2(val);
75c76ab5 649}
d2b2ecba
JL
650
651/*
652 * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE
653 * format if E2H isn't set.
654 */
655static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu)
656{
657 u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2);
658
659 if (!vcpu_el2_e2h_is_set(vcpu))
660 cptr = translate_cptr_el2_to_cpacr_el1(cptr);
661
662 return cptr;
663}
664
665static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu,
666 unsigned int xen)
667{
668 switch (xen) {
669 case 0b00:
670 case 0b10:
671 return true;
672 case 0b01:
673 return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu);
674 case 0b11:
675 default:
676 return false;
677 }
678}
679
680#define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \
681 (!vcpu_has_nv(vcpu) ? false : \
682 ____cptr_xen_trap_enabled(vcpu, \
683 SYS_FIELD_GET(CPACR_ELx, xen, \
684 vcpu_sanitised_cptr_el2(vcpu))))
685
686static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu)
687{
688 return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN);
689}
690
399debfc
OU
691static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu)
692{
693 return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN);
694}
d2b2ecba 695
83a49794 696#endif /* __ARM64_KVM_EMULATE_H__ */