| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright (C) 2012,2013 - ARM Ltd |
| 4 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 5 | * |
| 6 | * Derived from arch/arm/include/kvm_emulate.h |
| 7 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 8 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 9 | */ |
| 10 | |
| 11 | #ifndef __ARM64_KVM_EMULATE_H__ |
| 12 | #define __ARM64_KVM_EMULATE_H__ |
| 13 | |
| 14 | #include <linux/bitfield.h> |
| 15 | #include <linux/kvm_host.h> |
| 16 | |
| 17 | #include <asm/debug-monitors.h> |
| 18 | #include <asm/esr.h> |
| 19 | #include <asm/kvm_arm.h> |
| 20 | #include <asm/kvm_hyp.h> |
| 21 | #include <asm/kvm_nested.h> |
| 22 | #include <asm/ptrace.h> |
| 23 | #include <asm/cputype.h> |
| 24 | #include <asm/virt.h> |
| 25 | |
| 26 | #define CURRENT_EL_SP_EL0_VECTOR 0x0 |
| 27 | #define CURRENT_EL_SP_ELx_VECTOR 0x200 |
| 28 | #define LOWER_EL_AArch64_VECTOR 0x400 |
| 29 | #define LOWER_EL_AArch32_VECTOR 0x600 |
| 30 | |
| 31 | enum exception_type { |
| 32 | except_type_sync = 0, |
| 33 | except_type_irq = 0x80, |
| 34 | except_type_fiq = 0x100, |
| 35 | except_type_serror = 0x180, |
| 36 | }; |
| 37 | |
| 38 | #define kvm_exception_type_names \ |
| 39 | { except_type_sync, "SYNC" }, \ |
| 40 | { except_type_irq, "IRQ" }, \ |
| 41 | { except_type_fiq, "FIQ" }, \ |
| 42 | { except_type_serror, "SERROR" } |
| 43 | |
| 44 | bool kvm_condition_valid32(const struct kvm_vcpu *vcpu); |
| 45 | void kvm_skip_instr32(struct kvm_vcpu *vcpu); |
| 46 | |
| 47 | void kvm_inject_undefined(struct kvm_vcpu *vcpu); |
| 48 | void kvm_inject_vabt(struct kvm_vcpu *vcpu); |
| 49 | void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr); |
| 50 | void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr); |
| 51 | void kvm_inject_size_fault(struct kvm_vcpu *vcpu); |
| 52 | |
| 53 | void kvm_vcpu_wfi(struct kvm_vcpu *vcpu); |
| 54 | |
| 55 | void kvm_emulate_nested_eret(struct kvm_vcpu *vcpu); |
| 56 | int kvm_inject_nested_sync(struct kvm_vcpu *vcpu, u64 esr_el2); |
| 57 | int kvm_inject_nested_irq(struct kvm_vcpu *vcpu); |
| 58 | |
| 59 | static inline void kvm_inject_nested_sve_trap(struct kvm_vcpu *vcpu) |
| 60 | { |
| 61 | u64 esr = FIELD_PREP(ESR_ELx_EC_MASK, ESR_ELx_EC_SVE) | |
| 62 | ESR_ELx_IL; |
| 63 | |
| 64 | kvm_inject_nested_sync(vcpu, esr); |
| 65 | } |
| 66 | |
| 67 | #if defined(__KVM_VHE_HYPERVISOR__) || defined(__KVM_NVHE_HYPERVISOR__) |
| 68 | static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) |
| 69 | { |
| 70 | return !(vcpu->arch.hcr_el2 & HCR_RW); |
| 71 | } |
| 72 | #else |
| 73 | static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu) |
| 74 | { |
| 75 | return vcpu_has_feature(vcpu, KVM_ARM_VCPU_EL1_32BIT); |
| 76 | } |
| 77 | #endif |
| 78 | |
| 79 | static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu) |
| 80 | { |
| 81 | if (!vcpu_has_run_once(vcpu)) |
| 82 | vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS; |
| 83 | |
| 84 | /* |
| 85 | * For non-FWB CPUs, we trap VM ops (HCR_EL2.TVM) until M+C |
| 86 | * get set in SCTLR_EL1 such that we can detect when the guest |
| 87 | * MMU gets turned on and do the necessary cache maintenance |
| 88 | * then. |
| 89 | */ |
| 90 | if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB)) |
| 91 | vcpu->arch.hcr_el2 |= HCR_TVM; |
| 92 | } |
| 93 | |
| 94 | static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu) |
| 95 | { |
| 96 | return (unsigned long *)&vcpu->arch.hcr_el2; |
| 97 | } |
| 98 | |
| 99 | static inline void vcpu_clear_wfx_traps(struct kvm_vcpu *vcpu) |
| 100 | { |
| 101 | vcpu->arch.hcr_el2 &= ~HCR_TWE; |
| 102 | if (atomic_read(&vcpu->arch.vgic_cpu.vgic_v3.its_vpe.vlpi_count) || |
| 103 | vcpu->kvm->arch.vgic.nassgireq) |
| 104 | vcpu->arch.hcr_el2 &= ~HCR_TWI; |
| 105 | else |
| 106 | vcpu->arch.hcr_el2 |= HCR_TWI; |
| 107 | } |
| 108 | |
| 109 | static inline void vcpu_set_wfx_traps(struct kvm_vcpu *vcpu) |
| 110 | { |
| 111 | vcpu->arch.hcr_el2 |= HCR_TWE; |
| 112 | vcpu->arch.hcr_el2 |= HCR_TWI; |
| 113 | } |
| 114 | |
| 115 | static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu) |
| 116 | { |
| 117 | return vcpu->arch.vsesr_el2; |
| 118 | } |
| 119 | |
| 120 | static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr) |
| 121 | { |
| 122 | vcpu->arch.vsesr_el2 = vsesr; |
| 123 | } |
| 124 | |
| 125 | static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu) |
| 126 | { |
| 127 | return (unsigned long *)&vcpu_gp_regs(vcpu)->pc; |
| 128 | } |
| 129 | |
| 130 | static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu) |
| 131 | { |
| 132 | return (unsigned long *)&vcpu_gp_regs(vcpu)->pstate; |
| 133 | } |
| 134 | |
| 135 | static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu) |
| 136 | { |
| 137 | return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT); |
| 138 | } |
| 139 | |
| 140 | static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu) |
| 141 | { |
| 142 | if (vcpu_mode_is_32bit(vcpu)) |
| 143 | return kvm_condition_valid32(vcpu); |
| 144 | |
| 145 | return true; |
| 146 | } |
| 147 | |
| 148 | static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu) |
| 149 | { |
| 150 | *vcpu_cpsr(vcpu) |= PSR_AA32_T_BIT; |
| 151 | } |
| 152 | |
| 153 | /* |
| 154 | * vcpu_get_reg and vcpu_set_reg should always be passed a register number |
| 155 | * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on |
| 156 | * AArch32 with banked registers. |
| 157 | */ |
| 158 | static __always_inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu, |
| 159 | u8 reg_num) |
| 160 | { |
| 161 | return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs[reg_num]; |
| 162 | } |
| 163 | |
| 164 | static __always_inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num, |
| 165 | unsigned long val) |
| 166 | { |
| 167 | if (reg_num != 31) |
| 168 | vcpu_gp_regs(vcpu)->regs[reg_num] = val; |
| 169 | } |
| 170 | |
| 171 | static inline bool vcpu_is_el2_ctxt(const struct kvm_cpu_context *ctxt) |
| 172 | { |
| 173 | switch (ctxt->regs.pstate & (PSR_MODE32_BIT | PSR_MODE_MASK)) { |
| 174 | case PSR_MODE_EL2h: |
| 175 | case PSR_MODE_EL2t: |
| 176 | return true; |
| 177 | default: |
| 178 | return false; |
| 179 | } |
| 180 | } |
| 181 | |
| 182 | static inline bool vcpu_is_el2(const struct kvm_vcpu *vcpu) |
| 183 | { |
| 184 | return vcpu_is_el2_ctxt(&vcpu->arch.ctxt); |
| 185 | } |
| 186 | |
| 187 | static inline bool __vcpu_el2_e2h_is_set(const struct kvm_cpu_context *ctxt) |
| 188 | { |
| 189 | return (!cpus_have_final_cap(ARM64_HAS_HCR_NV1) || |
| 190 | (ctxt_sys_reg(ctxt, HCR_EL2) & HCR_E2H)); |
| 191 | } |
| 192 | |
| 193 | static inline bool vcpu_el2_e2h_is_set(const struct kvm_vcpu *vcpu) |
| 194 | { |
| 195 | return __vcpu_el2_e2h_is_set(&vcpu->arch.ctxt); |
| 196 | } |
| 197 | |
| 198 | static inline bool __vcpu_el2_tge_is_set(const struct kvm_cpu_context *ctxt) |
| 199 | { |
| 200 | return ctxt_sys_reg(ctxt, HCR_EL2) & HCR_TGE; |
| 201 | } |
| 202 | |
| 203 | static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu) |
| 204 | { |
| 205 | return __vcpu_el2_tge_is_set(&vcpu->arch.ctxt); |
| 206 | } |
| 207 | |
| 208 | static inline bool __is_hyp_ctxt(const struct kvm_cpu_context *ctxt) |
| 209 | { |
| 210 | /* |
| 211 | * We are in a hypervisor context if the vcpu mode is EL2 or |
| 212 | * E2H and TGE bits are set. The latter means we are in the user space |
| 213 | * of the VHE kernel. ARMv8.1 ARM describes this as 'InHost' |
| 214 | * |
| 215 | * Note that the HCR_EL2.{E2H,TGE}={0,1} isn't really handled in the |
| 216 | * rest of the KVM code, and will result in a misbehaving guest. |
| 217 | */ |
| 218 | return vcpu_is_el2_ctxt(ctxt) || |
| 219 | (__vcpu_el2_e2h_is_set(ctxt) && __vcpu_el2_tge_is_set(ctxt)) || |
| 220 | __vcpu_el2_tge_is_set(ctxt); |
| 221 | } |
| 222 | |
| 223 | static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu) |
| 224 | { |
| 225 | return vcpu_has_nv(vcpu) && __is_hyp_ctxt(&vcpu->arch.ctxt); |
| 226 | } |
| 227 | |
| 228 | /* |
| 229 | * The layout of SPSR for an AArch32 state is different when observed from an |
| 230 | * AArch64 SPSR_ELx or an AArch32 SPSR_*. This function generates the AArch32 |
| 231 | * view given an AArch64 view. |
| 232 | * |
| 233 | * In ARM DDI 0487E.a see: |
| 234 | * |
| 235 | * - The AArch64 view (SPSR_EL2) in section C5.2.18, page C5-426 |
| 236 | * - The AArch32 view (SPSR_abt) in section G8.2.126, page G8-6256 |
| 237 | * - The AArch32 view (SPSR_und) in section G8.2.132, page G8-6280 |
| 238 | * |
| 239 | * Which show the following differences: |
| 240 | * |
| 241 | * | Bit | AA64 | AA32 | Notes | |
| 242 | * +-----+------+------+-----------------------------| |
| 243 | * | 24 | DIT | J | J is RES0 in ARMv8 | |
| 244 | * | 21 | SS | DIT | SS doesn't exist in AArch32 | |
| 245 | * |
| 246 | * ... and all other bits are (currently) common. |
| 247 | */ |
| 248 | static inline unsigned long host_spsr_to_spsr32(unsigned long spsr) |
| 249 | { |
| 250 | const unsigned long overlap = BIT(24) | BIT(21); |
| 251 | unsigned long dit = !!(spsr & PSR_AA32_DIT_BIT); |
| 252 | |
| 253 | spsr &= ~overlap; |
| 254 | |
| 255 | spsr |= dit << 21; |
| 256 | |
| 257 | return spsr; |
| 258 | } |
| 259 | |
| 260 | static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu) |
| 261 | { |
| 262 | u32 mode; |
| 263 | |
| 264 | if (vcpu_mode_is_32bit(vcpu)) { |
| 265 | mode = *vcpu_cpsr(vcpu) & PSR_AA32_MODE_MASK; |
| 266 | return mode > PSR_AA32_MODE_USR; |
| 267 | } |
| 268 | |
| 269 | mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK; |
| 270 | |
| 271 | return mode != PSR_MODE_EL0t; |
| 272 | } |
| 273 | |
| 274 | static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu) |
| 275 | { |
| 276 | return vcpu->arch.fault.esr_el2; |
| 277 | } |
| 278 | |
| 279 | static __always_inline int kvm_vcpu_get_condition(const struct kvm_vcpu *vcpu) |
| 280 | { |
| 281 | u64 esr = kvm_vcpu_get_esr(vcpu); |
| 282 | |
| 283 | if (esr & ESR_ELx_CV) |
| 284 | return (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT; |
| 285 | |
| 286 | return -1; |
| 287 | } |
| 288 | |
| 289 | static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu) |
| 290 | { |
| 291 | return vcpu->arch.fault.far_el2; |
| 292 | } |
| 293 | |
| 294 | static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu) |
| 295 | { |
| 296 | return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8; |
| 297 | } |
| 298 | |
| 299 | static inline u64 kvm_vcpu_get_disr(const struct kvm_vcpu *vcpu) |
| 300 | { |
| 301 | return vcpu->arch.fault.disr_el1; |
| 302 | } |
| 303 | |
| 304 | static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu) |
| 305 | { |
| 306 | return kvm_vcpu_get_esr(vcpu) & ESR_ELx_xVC_IMM_MASK; |
| 307 | } |
| 308 | |
| 309 | static __always_inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu) |
| 310 | { |
| 311 | return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_ISV); |
| 312 | } |
| 313 | |
| 314 | static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu) |
| 315 | { |
| 316 | return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR | ESR_ELx_FSC); |
| 317 | } |
| 318 | |
| 319 | static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu) |
| 320 | { |
| 321 | return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SSE); |
| 322 | } |
| 323 | |
| 324 | static inline bool kvm_vcpu_dabt_issf(const struct kvm_vcpu *vcpu) |
| 325 | { |
| 326 | return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_SF); |
| 327 | } |
| 328 | |
| 329 | static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) |
| 330 | { |
| 331 | return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; |
| 332 | } |
| 333 | |
| 334 | static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) |
| 335 | { |
| 336 | return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); |
| 337 | } |
| 338 | |
| 339 | /* Always check for S1PTW *before* using this. */ |
| 340 | static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) |
| 341 | { |
| 342 | return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR; |
| 343 | } |
| 344 | |
| 345 | static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) |
| 346 | { |
| 347 | return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_CM); |
| 348 | } |
| 349 | |
| 350 | static __always_inline unsigned int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu) |
| 351 | { |
| 352 | return 1 << ((kvm_vcpu_get_esr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT); |
| 353 | } |
| 354 | |
| 355 | /* This one is not specific to Data Abort */ |
| 356 | static __always_inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu) |
| 357 | { |
| 358 | return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_IL); |
| 359 | } |
| 360 | |
| 361 | static __always_inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu) |
| 362 | { |
| 363 | return ESR_ELx_EC(kvm_vcpu_get_esr(vcpu)); |
| 364 | } |
| 365 | |
| 366 | static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) |
| 367 | { |
| 368 | return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; |
| 369 | } |
| 370 | |
| 371 | static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) |
| 372 | { |
| 373 | return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); |
| 374 | } |
| 375 | |
| 376 | static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) |
| 377 | { |
| 378 | return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; |
| 379 | } |
| 380 | |
| 381 | static inline |
| 382 | bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu) |
| 383 | { |
| 384 | return esr_fsc_is_permission_fault(kvm_vcpu_get_esr(vcpu)); |
| 385 | } |
| 386 | |
| 387 | static inline |
| 388 | bool kvm_vcpu_trap_is_translation_fault(const struct kvm_vcpu *vcpu) |
| 389 | { |
| 390 | return esr_fsc_is_translation_fault(kvm_vcpu_get_esr(vcpu)); |
| 391 | } |
| 392 | |
| 393 | static inline |
| 394 | u64 kvm_vcpu_trap_get_perm_fault_granule(const struct kvm_vcpu *vcpu) |
| 395 | { |
| 396 | unsigned long esr = kvm_vcpu_get_esr(vcpu); |
| 397 | |
| 398 | BUG_ON(!esr_fsc_is_permission_fault(esr)); |
| 399 | return BIT(ARM64_HW_PGTABLE_LEVEL_SHIFT(esr & ESR_ELx_FSC_LEVEL)); |
| 400 | } |
| 401 | |
| 402 | static __always_inline bool kvm_vcpu_abt_issea(const struct kvm_vcpu *vcpu) |
| 403 | { |
| 404 | switch (kvm_vcpu_trap_get_fault(vcpu)) { |
| 405 | case ESR_ELx_FSC_EXTABT: |
| 406 | case ESR_ELx_FSC_SEA_TTW(-1) ... ESR_ELx_FSC_SEA_TTW(3): |
| 407 | case ESR_ELx_FSC_SECC: |
| 408 | case ESR_ELx_FSC_SECC_TTW(-1) ... ESR_ELx_FSC_SECC_TTW(3): |
| 409 | return true; |
| 410 | default: |
| 411 | return false; |
| 412 | } |
| 413 | } |
| 414 | |
| 415 | static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) |
| 416 | { |
| 417 | u64 esr = kvm_vcpu_get_esr(vcpu); |
| 418 | return ESR_ELx_SYS64_ISS_RT(esr); |
| 419 | } |
| 420 | |
| 421 | static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) |
| 422 | { |
| 423 | if (kvm_vcpu_abt_iss1tw(vcpu)) { |
| 424 | /* |
| 425 | * Only a permission fault on a S1PTW should be |
| 426 | * considered as a write. Otherwise, page tables baked |
| 427 | * in a read-only memslot will result in an exception |
| 428 | * being delivered in the guest. |
| 429 | * |
| 430 | * The drawback is that we end-up faulting twice if the |
| 431 | * guest is using any of HW AF/DB: a translation fault |
| 432 | * to map the page containing the PT (read only at |
| 433 | * first), then a permission fault to allow the flags |
| 434 | * to be set. |
| 435 | */ |
| 436 | return kvm_vcpu_trap_is_permission_fault(vcpu); |
| 437 | } |
| 438 | |
| 439 | if (kvm_vcpu_trap_is_iabt(vcpu)) |
| 440 | return false; |
| 441 | |
| 442 | return kvm_vcpu_dabt_iswrite(vcpu); |
| 443 | } |
| 444 | |
| 445 | static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu) |
| 446 | { |
| 447 | return __vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK; |
| 448 | } |
| 449 | |
| 450 | static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu) |
| 451 | { |
| 452 | if (vcpu_mode_is_32bit(vcpu)) { |
| 453 | *vcpu_cpsr(vcpu) |= PSR_AA32_E_BIT; |
| 454 | } else { |
| 455 | u64 sctlr = vcpu_read_sys_reg(vcpu, SCTLR_EL1); |
| 456 | sctlr |= SCTLR_ELx_EE; |
| 457 | vcpu_write_sys_reg(vcpu, sctlr, SCTLR_EL1); |
| 458 | } |
| 459 | } |
| 460 | |
| 461 | static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu) |
| 462 | { |
| 463 | if (vcpu_mode_is_32bit(vcpu)) |
| 464 | return !!(*vcpu_cpsr(vcpu) & PSR_AA32_E_BIT); |
| 465 | |
| 466 | if (vcpu_mode_priv(vcpu)) |
| 467 | return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_ELx_EE); |
| 468 | else |
| 469 | return !!(vcpu_read_sys_reg(vcpu, SCTLR_EL1) & SCTLR_EL1_E0E); |
| 470 | } |
| 471 | |
| 472 | static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu, |
| 473 | unsigned long data, |
| 474 | unsigned int len) |
| 475 | { |
| 476 | if (kvm_vcpu_is_be(vcpu)) { |
| 477 | switch (len) { |
| 478 | case 1: |
| 479 | return data & 0xff; |
| 480 | case 2: |
| 481 | return be16_to_cpu(data & 0xffff); |
| 482 | case 4: |
| 483 | return be32_to_cpu(data & 0xffffffff); |
| 484 | default: |
| 485 | return be64_to_cpu(data); |
| 486 | } |
| 487 | } else { |
| 488 | switch (len) { |
| 489 | case 1: |
| 490 | return data & 0xff; |
| 491 | case 2: |
| 492 | return le16_to_cpu(data & 0xffff); |
| 493 | case 4: |
| 494 | return le32_to_cpu(data & 0xffffffff); |
| 495 | default: |
| 496 | return le64_to_cpu(data); |
| 497 | } |
| 498 | } |
| 499 | |
| 500 | return data; /* Leave LE untouched */ |
| 501 | } |
| 502 | |
| 503 | static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu, |
| 504 | unsigned long data, |
| 505 | unsigned int len) |
| 506 | { |
| 507 | if (kvm_vcpu_is_be(vcpu)) { |
| 508 | switch (len) { |
| 509 | case 1: |
| 510 | return data & 0xff; |
| 511 | case 2: |
| 512 | return cpu_to_be16(data & 0xffff); |
| 513 | case 4: |
| 514 | return cpu_to_be32(data & 0xffffffff); |
| 515 | default: |
| 516 | return cpu_to_be64(data); |
| 517 | } |
| 518 | } else { |
| 519 | switch (len) { |
| 520 | case 1: |
| 521 | return data & 0xff; |
| 522 | case 2: |
| 523 | return cpu_to_le16(data & 0xffff); |
| 524 | case 4: |
| 525 | return cpu_to_le32(data & 0xffffffff); |
| 526 | default: |
| 527 | return cpu_to_le64(data); |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | return data; /* Leave LE untouched */ |
| 532 | } |
| 533 | |
| 534 | static __always_inline void kvm_incr_pc(struct kvm_vcpu *vcpu) |
| 535 | { |
| 536 | WARN_ON(vcpu_get_flag(vcpu, PENDING_EXCEPTION)); |
| 537 | vcpu_set_flag(vcpu, INCREMENT_PC); |
| 538 | } |
| 539 | |
| 540 | #define kvm_pend_exception(v, e) \ |
| 541 | do { \ |
| 542 | WARN_ON(vcpu_get_flag((v), INCREMENT_PC)); \ |
| 543 | vcpu_set_flag((v), PENDING_EXCEPTION); \ |
| 544 | vcpu_set_flag((v), e); \ |
| 545 | } while (0) |
| 546 | |
| 547 | #define __build_check_all_or_none(r, bits) \ |
| 548 | BUILD_BUG_ON(((r) & (bits)) && ((r) & (bits)) != (bits)) |
| 549 | |
| 550 | #define __cpacr_to_cptr_clr(clr, set) \ |
| 551 | ({ \ |
| 552 | u64 cptr = 0; \ |
| 553 | \ |
| 554 | if ((set) & CPACR_ELx_FPEN) \ |
| 555 | cptr |= CPTR_EL2_TFP; \ |
| 556 | if ((set) & CPACR_ELx_ZEN) \ |
| 557 | cptr |= CPTR_EL2_TZ; \ |
| 558 | if ((set) & CPACR_ELx_SMEN) \ |
| 559 | cptr |= CPTR_EL2_TSM; \ |
| 560 | if ((clr) & CPACR_ELx_TTA) \ |
| 561 | cptr |= CPTR_EL2_TTA; \ |
| 562 | if ((clr) & CPTR_EL2_TAM) \ |
| 563 | cptr |= CPTR_EL2_TAM; \ |
| 564 | if ((clr) & CPTR_EL2_TCPAC) \ |
| 565 | cptr |= CPTR_EL2_TCPAC; \ |
| 566 | \ |
| 567 | cptr; \ |
| 568 | }) |
| 569 | |
| 570 | #define __cpacr_to_cptr_set(clr, set) \ |
| 571 | ({ \ |
| 572 | u64 cptr = 0; \ |
| 573 | \ |
| 574 | if ((clr) & CPACR_ELx_FPEN) \ |
| 575 | cptr |= CPTR_EL2_TFP; \ |
| 576 | if ((clr) & CPACR_ELx_ZEN) \ |
| 577 | cptr |= CPTR_EL2_TZ; \ |
| 578 | if ((clr) & CPACR_ELx_SMEN) \ |
| 579 | cptr |= CPTR_EL2_TSM; \ |
| 580 | if ((set) & CPACR_ELx_TTA) \ |
| 581 | cptr |= CPTR_EL2_TTA; \ |
| 582 | if ((set) & CPTR_EL2_TAM) \ |
| 583 | cptr |= CPTR_EL2_TAM; \ |
| 584 | if ((set) & CPTR_EL2_TCPAC) \ |
| 585 | cptr |= CPTR_EL2_TCPAC; \ |
| 586 | \ |
| 587 | cptr; \ |
| 588 | }) |
| 589 | |
| 590 | #define cpacr_clear_set(clr, set) \ |
| 591 | do { \ |
| 592 | BUILD_BUG_ON((set) & CPTR_VHE_EL2_RES0); \ |
| 593 | BUILD_BUG_ON((clr) & CPACR_ELx_E0POE); \ |
| 594 | __build_check_all_or_none((clr), CPACR_ELx_FPEN); \ |
| 595 | __build_check_all_or_none((set), CPACR_ELx_FPEN); \ |
| 596 | __build_check_all_or_none((clr), CPACR_ELx_ZEN); \ |
| 597 | __build_check_all_or_none((set), CPACR_ELx_ZEN); \ |
| 598 | __build_check_all_or_none((clr), CPACR_ELx_SMEN); \ |
| 599 | __build_check_all_or_none((set), CPACR_ELx_SMEN); \ |
| 600 | \ |
| 601 | if (has_vhe() || has_hvhe()) \ |
| 602 | sysreg_clear_set(cpacr_el1, clr, set); \ |
| 603 | else \ |
| 604 | sysreg_clear_set(cptr_el2, \ |
| 605 | __cpacr_to_cptr_clr(clr, set), \ |
| 606 | __cpacr_to_cptr_set(clr, set));\ |
| 607 | } while (0) |
| 608 | |
| 609 | static __always_inline void kvm_write_cptr_el2(u64 val) |
| 610 | { |
| 611 | if (has_vhe() || has_hvhe()) |
| 612 | write_sysreg(val, cpacr_el1); |
| 613 | else |
| 614 | write_sysreg(val, cptr_el2); |
| 615 | } |
| 616 | |
| 617 | static __always_inline u64 kvm_get_reset_cptr_el2(struct kvm_vcpu *vcpu) |
| 618 | { |
| 619 | u64 val; |
| 620 | |
| 621 | if (has_vhe()) { |
| 622 | val = (CPACR_ELx_FPEN | CPACR_EL1_ZEN_EL1EN); |
| 623 | if (cpus_have_final_cap(ARM64_SME)) |
| 624 | val |= CPACR_EL1_SMEN_EL1EN; |
| 625 | } else if (has_hvhe()) { |
| 626 | val = CPACR_ELx_FPEN; |
| 627 | |
| 628 | if (!vcpu_has_sve(vcpu) || !guest_owns_fp_regs()) |
| 629 | val |= CPACR_ELx_ZEN; |
| 630 | if (cpus_have_final_cap(ARM64_SME)) |
| 631 | val |= CPACR_ELx_SMEN; |
| 632 | } else { |
| 633 | val = CPTR_NVHE_EL2_RES1; |
| 634 | |
| 635 | if (vcpu_has_sve(vcpu) && guest_owns_fp_regs()) |
| 636 | val |= CPTR_EL2_TZ; |
| 637 | if (cpus_have_final_cap(ARM64_SME)) |
| 638 | val &= ~CPTR_EL2_TSM; |
| 639 | } |
| 640 | |
| 641 | return val; |
| 642 | } |
| 643 | |
| 644 | static __always_inline void kvm_reset_cptr_el2(struct kvm_vcpu *vcpu) |
| 645 | { |
| 646 | u64 val = kvm_get_reset_cptr_el2(vcpu); |
| 647 | |
| 648 | kvm_write_cptr_el2(val); |
| 649 | } |
| 650 | |
| 651 | /* |
| 652 | * Returns a 'sanitised' view of CPTR_EL2, translating from nVHE to the VHE |
| 653 | * format if E2H isn't set. |
| 654 | */ |
| 655 | static inline u64 vcpu_sanitised_cptr_el2(const struct kvm_vcpu *vcpu) |
| 656 | { |
| 657 | u64 cptr = __vcpu_sys_reg(vcpu, CPTR_EL2); |
| 658 | |
| 659 | if (!vcpu_el2_e2h_is_set(vcpu)) |
| 660 | cptr = translate_cptr_el2_to_cpacr_el1(cptr); |
| 661 | |
| 662 | return cptr; |
| 663 | } |
| 664 | |
| 665 | static inline bool ____cptr_xen_trap_enabled(const struct kvm_vcpu *vcpu, |
| 666 | unsigned int xen) |
| 667 | { |
| 668 | switch (xen) { |
| 669 | case 0b00: |
| 670 | case 0b10: |
| 671 | return true; |
| 672 | case 0b01: |
| 673 | return vcpu_el2_tge_is_set(vcpu) && !vcpu_is_el2(vcpu); |
| 674 | case 0b11: |
| 675 | default: |
| 676 | return false; |
| 677 | } |
| 678 | } |
| 679 | |
| 680 | #define __guest_hyp_cptr_xen_trap_enabled(vcpu, xen) \ |
| 681 | (!vcpu_has_nv(vcpu) ? false : \ |
| 682 | ____cptr_xen_trap_enabled(vcpu, \ |
| 683 | SYS_FIELD_GET(CPACR_ELx, xen, \ |
| 684 | vcpu_sanitised_cptr_el2(vcpu)))) |
| 685 | |
| 686 | static inline bool guest_hyp_fpsimd_traps_enabled(const struct kvm_vcpu *vcpu) |
| 687 | { |
| 688 | return __guest_hyp_cptr_xen_trap_enabled(vcpu, FPEN); |
| 689 | } |
| 690 | |
| 691 | static inline bool guest_hyp_sve_traps_enabled(const struct kvm_vcpu *vcpu) |
| 692 | { |
| 693 | return __guest_hyp_cptr_xen_trap_enabled(vcpu, ZEN); |
| 694 | } |
| 695 | |
| 696 | #endif /* __ARM64_KVM_EMULATE_H__ */ |