1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/cacheinfo.h>
15 #include <linux/debugfs.h>
16 #include <linux/kvm_host.h>
18 #include <linux/printk.h>
19 #include <linux/uaccess.h>
20 #include <linux/irqchip/arm-gic-v3.h>
22 #include <asm/arm_pmuv3.h>
23 #include <asm/cacheflush.h>
24 #include <asm/cputype.h>
25 #include <asm/debug-monitors.h>
27 #include <asm/kvm_arm.h>
28 #include <asm/kvm_emulate.h>
29 #include <asm/kvm_hyp.h>
30 #include <asm/kvm_mmu.h>
31 #include <asm/kvm_nested.h>
32 #include <asm/perf_event.h>
33 #include <asm/sysreg.h>
35 #include <trace/events/kvm.h>
38 #include "vgic/vgic.h"
43 * For AArch32, we only take care of what is being trapped. Anything
44 * that has to do with init and userspace access has to go via the
48 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
49 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
52 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
53 const struct sys_reg_desc *r)
55 kvm_inject_undefined(vcpu);
59 static bool bad_trap(struct kvm_vcpu *vcpu,
60 struct sys_reg_params *params,
61 const struct sys_reg_desc *r,
64 WARN_ONCE(1, "Unexpected %s\n", msg);
65 print_sys_reg_instr(params);
66 return undef_access(vcpu, params, r);
69 static bool read_from_write_only(struct kvm_vcpu *vcpu,
70 struct sys_reg_params *params,
71 const struct sys_reg_desc *r)
73 return bad_trap(vcpu, params, r,
74 "sys_reg read to write-only register");
77 static bool write_to_read_only(struct kvm_vcpu *vcpu,
78 struct sys_reg_params *params,
79 const struct sys_reg_desc *r)
81 return bad_trap(vcpu, params, r,
82 "sys_reg write to read-only register");
85 #define PURE_EL2_SYSREG(el2) \
91 #define MAPPED_EL2_SYSREG(el2, el1, fn) \
98 static bool get_el2_to_el1_mapping(unsigned int reg,
99 unsigned int *el1r, u64 (**xlate)(u64))
102 PURE_EL2_SYSREG( VPIDR_EL2 );
103 PURE_EL2_SYSREG( VMPIDR_EL2 );
104 PURE_EL2_SYSREG( ACTLR_EL2 );
105 PURE_EL2_SYSREG( HCR_EL2 );
106 PURE_EL2_SYSREG( MDCR_EL2 );
107 PURE_EL2_SYSREG( HSTR_EL2 );
108 PURE_EL2_SYSREG( HACR_EL2 );
109 PURE_EL2_SYSREG( VTTBR_EL2 );
110 PURE_EL2_SYSREG( VTCR_EL2 );
111 PURE_EL2_SYSREG( RVBAR_EL2 );
112 PURE_EL2_SYSREG( TPIDR_EL2 );
113 PURE_EL2_SYSREG( HPFAR_EL2 );
114 PURE_EL2_SYSREG( HCRX_EL2 );
115 PURE_EL2_SYSREG( HFGRTR_EL2 );
116 PURE_EL2_SYSREG( HFGWTR_EL2 );
117 PURE_EL2_SYSREG( HFGITR_EL2 );
118 PURE_EL2_SYSREG( HDFGRTR_EL2 );
119 PURE_EL2_SYSREG( HDFGWTR_EL2 );
120 PURE_EL2_SYSREG( HAFGRTR_EL2 );
121 PURE_EL2_SYSREG( CNTVOFF_EL2 );
122 PURE_EL2_SYSREG( CNTHCTL_EL2 );
123 MAPPED_EL2_SYSREG(SCTLR_EL2, SCTLR_EL1,
124 translate_sctlr_el2_to_sctlr_el1 );
125 MAPPED_EL2_SYSREG(CPTR_EL2, CPACR_EL1,
126 translate_cptr_el2_to_cpacr_el1 );
127 MAPPED_EL2_SYSREG(TTBR0_EL2, TTBR0_EL1,
128 translate_ttbr0_el2_to_ttbr0_el1 );
129 MAPPED_EL2_SYSREG(TTBR1_EL2, TTBR1_EL1, NULL );
130 MAPPED_EL2_SYSREG(TCR_EL2, TCR_EL1,
131 translate_tcr_el2_to_tcr_el1 );
132 MAPPED_EL2_SYSREG(VBAR_EL2, VBAR_EL1, NULL );
133 MAPPED_EL2_SYSREG(AFSR0_EL2, AFSR0_EL1, NULL );
134 MAPPED_EL2_SYSREG(AFSR1_EL2, AFSR1_EL1, NULL );
135 MAPPED_EL2_SYSREG(ESR_EL2, ESR_EL1, NULL );
136 MAPPED_EL2_SYSREG(FAR_EL2, FAR_EL1, NULL );
137 MAPPED_EL2_SYSREG(MAIR_EL2, MAIR_EL1, NULL );
138 MAPPED_EL2_SYSREG(TCR2_EL2, TCR2_EL1, NULL );
139 MAPPED_EL2_SYSREG(PIR_EL2, PIR_EL1, NULL );
140 MAPPED_EL2_SYSREG(PIRE0_EL2, PIRE0_EL1, NULL );
141 MAPPED_EL2_SYSREG(POR_EL2, POR_EL1, NULL );
142 MAPPED_EL2_SYSREG(AMAIR_EL2, AMAIR_EL1, NULL );
143 MAPPED_EL2_SYSREG(ELR_EL2, ELR_EL1, NULL );
144 MAPPED_EL2_SYSREG(SPSR_EL2, SPSR_EL1, NULL );
145 MAPPED_EL2_SYSREG(ZCR_EL2, ZCR_EL1, NULL );
146 MAPPED_EL2_SYSREG(CONTEXTIDR_EL2, CONTEXTIDR_EL1, NULL );
152 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
154 u64 val = 0x8badf00d8badf00d;
155 u64 (*xlate)(u64) = NULL;
158 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
161 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
162 if (!is_hyp_ctxt(vcpu))
166 * CNTHCTL_EL2 requires some special treatment to
167 * account for the bits that can be set via CNTKCTL_EL1.
171 if (vcpu_el2_e2h_is_set(vcpu)) {
172 val = read_sysreg_el1(SYS_CNTKCTL);
173 val &= CNTKCTL_VALID_BITS;
174 val |= __vcpu_sys_reg(vcpu, reg) & ~CNTKCTL_VALID_BITS;
181 * If this register does not have an EL1 counterpart,
182 * then read the stored EL2 version.
188 * If we have a non-VHE guest and that the sysreg
189 * requires translation to be used at EL1, use the
190 * in-memory copy instead.
192 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
195 /* Get the current version of the EL1 counterpart. */
196 WARN_ON(!__vcpu_read_sys_reg_from_cpu(el1r, &val));
197 if (reg >= __SANITISED_REG_START__)
198 val = kvm_vcpu_apply_reg_masks(vcpu, reg, val);
203 /* EL1 register can't be on the CPU if the guest is in vEL2. */
204 if (unlikely(is_hyp_ctxt(vcpu)))
207 if (__vcpu_read_sys_reg_from_cpu(reg, &val))
211 return __vcpu_sys_reg(vcpu, reg);
214 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
216 u64 (*xlate)(u64) = NULL;
219 if (!vcpu_get_flag(vcpu, SYSREGS_ON_CPU))
222 if (unlikely(get_el2_to_el1_mapping(reg, &el1r, &xlate))) {
223 if (!is_hyp_ctxt(vcpu))
227 * Always store a copy of the write to memory to avoid having
228 * to reverse-translate virtual EL2 system registers for a
229 * non-VHE guest hypervisor.
231 __vcpu_assign_sys_reg(vcpu, reg, val);
236 * If E2H=0, CNHTCTL_EL2 is a pure shadow register.
237 * Otherwise, some of the bits are backed by
238 * CNTKCTL_EL1, while the rest is kept in memory.
239 * Yes, this is fun stuff.
241 if (vcpu_el2_e2h_is_set(vcpu))
242 write_sysreg_el1(val, SYS_CNTKCTL);
246 /* No EL1 counterpart? We're done here.? */
250 if (!vcpu_el2_e2h_is_set(vcpu) && xlate)
253 /* Redirect this to the EL1 version of the register. */
254 WARN_ON(!__vcpu_write_sys_reg_to_cpu(val, el1r));
258 /* EL1 register can't be on the CPU if the guest is in vEL2. */
259 if (unlikely(is_hyp_ctxt(vcpu)))
262 if (__vcpu_write_sys_reg_to_cpu(val, reg))
266 __vcpu_assign_sys_reg(vcpu, reg, val);
269 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
270 #define CSSELR_MAX 14
273 * Returns the minimum line size for the selected cache, expressed as
276 static u8 get_min_cache_line_size(bool icache)
278 u64 ctr = read_sanitised_ftr_reg(SYS_CTR_EL0);
282 field = SYS_FIELD_GET(CTR_EL0, IminLine, ctr);
284 field = SYS_FIELD_GET(CTR_EL0, DminLine, ctr);
287 * Cache line size is represented as Log2(words) in CTR_EL0.
288 * Log2(bytes) can be derived with the following:
290 * Log2(words) + 2 = Log2(bytes / 4) + 2
291 * = Log2(bytes) - 2 + 2
297 /* Which cache CCSIDR represents depends on CSSELR value. */
298 static u32 get_ccsidr(struct kvm_vcpu *vcpu, u32 csselr)
302 if (vcpu->arch.ccsidr)
303 return vcpu->arch.ccsidr[csselr];
305 line_size = get_min_cache_line_size(csselr & CSSELR_EL1_InD);
308 * Fabricate a CCSIDR value as the overriding value does not exist.
309 * The real CCSIDR value will not be used as it can vary by the
310 * physical CPU which the vcpu currently resides in.
312 * The line size is determined with get_min_cache_line_size(), which
313 * should be valid for all CPUs even if they have different cache
316 * The associativity bits are cleared, meaning the geometry of all data
317 * and unified caches (which are guaranteed to be PIPT and thus
318 * non-aliasing) are 1 set and 1 way.
319 * Guests should not be doing cache operations by set/way at all, and
320 * for this reason, we trap them and attempt to infer the intent, so
321 * that we can flush the entire guest's address space at the appropriate
322 * time. The exposed geometry minimizes the number of the traps.
323 * [If guests should attempt to infer aliasing properties from the
324 * geometry (which is not permitted by the architecture), they would
325 * only do so for virtually indexed caches.]
327 * We don't check if the cache level exists as it is allowed to return
328 * an UNKNOWN value if not.
330 return SYS_FIELD_PREP(CCSIDR_EL1, LineSize, line_size - 4);
333 static int set_ccsidr(struct kvm_vcpu *vcpu, u32 csselr, u32 val)
335 u8 line_size = FIELD_GET(CCSIDR_EL1_LineSize, val) + 4;
336 u32 *ccsidr = vcpu->arch.ccsidr;
339 if ((val & CCSIDR_EL1_RES0) ||
340 line_size < get_min_cache_line_size(csselr & CSSELR_EL1_InD))
344 if (val == get_ccsidr(vcpu, csselr))
347 ccsidr = kmalloc_array(CSSELR_MAX, sizeof(u32), GFP_KERNEL_ACCOUNT);
351 for (i = 0; i < CSSELR_MAX; i++)
352 ccsidr[i] = get_ccsidr(vcpu, i);
354 vcpu->arch.ccsidr = ccsidr;
357 ccsidr[csselr] = val;
362 static bool access_rw(struct kvm_vcpu *vcpu,
363 struct sys_reg_params *p,
364 const struct sys_reg_desc *r)
367 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
369 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
375 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
377 static bool access_dcsw(struct kvm_vcpu *vcpu,
378 struct sys_reg_params *p,
379 const struct sys_reg_desc *r)
382 return read_from_write_only(vcpu, p, r);
385 * Only track S/W ops if we don't have FWB. It still indicates
386 * that the guest is a bit broken (S/W operations should only
387 * be done by firmware, knowing that there is only a single
388 * CPU left in the system, and certainly not from non-secure
391 if (!cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
392 kvm_set_way_flush(vcpu);
397 static bool access_dcgsw(struct kvm_vcpu *vcpu,
398 struct sys_reg_params *p,
399 const struct sys_reg_desc *r)
401 if (!kvm_has_mte(vcpu->kvm))
402 return undef_access(vcpu, p, r);
404 /* Treat MTE S/W ops as we treat the classic ones: with contempt */
405 return access_dcsw(vcpu, p, r);
408 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
410 switch (r->aarch32_map) {
412 *mask = GENMASK_ULL(31, 0);
416 *mask = GENMASK_ULL(63, 32);
420 *mask = GENMASK_ULL(63, 0);
427 * Generic accessor for VM registers. Only called as long as HCR_TVM
428 * is set. If the guest enables the MMU, we stop trapping the VM
429 * sys_regs and leave it in complete control of the caches.
431 static bool access_vm_reg(struct kvm_vcpu *vcpu,
432 struct sys_reg_params *p,
433 const struct sys_reg_desc *r)
435 bool was_enabled = vcpu_has_cache_enabled(vcpu);
436 u64 val, mask, shift;
438 BUG_ON(!p->is_write);
440 get_access_mask(r, &mask, &shift);
443 val = vcpu_read_sys_reg(vcpu, r->reg);
449 val |= (p->regval & (mask >> shift)) << shift;
450 vcpu_write_sys_reg(vcpu, val, r->reg);
452 kvm_toggle_cache(vcpu, was_enabled);
456 static bool access_actlr(struct kvm_vcpu *vcpu,
457 struct sys_reg_params *p,
458 const struct sys_reg_desc *r)
463 return ignore_write(vcpu, p);
465 get_access_mask(r, &mask, &shift);
466 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
472 * Trap handler for the GICv3 SGI generation system register.
473 * Forward the request to the VGIC emulation.
474 * The cp15_64 code makes sure this automatically works
475 * for both AArch64 and AArch32 accesses.
477 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
478 struct sys_reg_params *p,
479 const struct sys_reg_desc *r)
483 if (!kvm_has_gicv3(vcpu->kvm))
484 return undef_access(vcpu, p, r);
487 return read_from_write_only(vcpu, p, r);
490 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
491 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
492 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
493 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
496 if (p->Op0 == 0) { /* AArch32 */
498 default: /* Keep GCC quiet */
499 case 0: /* ICC_SGI1R */
502 case 1: /* ICC_ASGI1R */
503 case 2: /* ICC_SGI0R */
507 } else { /* AArch64 */
509 default: /* Keep GCC quiet */
510 case 5: /* ICC_SGI1R_EL1 */
513 case 6: /* ICC_ASGI1R_EL1 */
514 case 7: /* ICC_SGI0R_EL1 */
520 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
525 static bool access_gic_sre(struct kvm_vcpu *vcpu,
526 struct sys_reg_params *p,
527 const struct sys_reg_desc *r)
529 if (!kvm_has_gicv3(vcpu->kvm))
530 return undef_access(vcpu, p, r);
533 return ignore_write(vcpu, p);
535 if (p->Op1 == 4) { /* ICC_SRE_EL2 */
536 p->regval = (ICC_SRE_EL2_ENABLE | ICC_SRE_EL2_SRE |
537 ICC_SRE_EL1_DIB | ICC_SRE_EL1_DFB);
538 } else { /* ICC_SRE_EL1 */
539 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
545 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
546 struct sys_reg_params *p,
547 const struct sys_reg_desc *r)
550 return ignore_write(vcpu, p);
552 return read_zero(vcpu, p);
556 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
557 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
558 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
559 * treat it separately.
561 static bool trap_loregion(struct kvm_vcpu *vcpu,
562 struct sys_reg_params *p,
563 const struct sys_reg_desc *r)
565 u32 sr = reg_to_encoding(r);
567 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR1_EL1, LO, IMP))
568 return undef_access(vcpu, p, r);
570 if (p->is_write && sr == SYS_LORID_EL1)
571 return write_to_read_only(vcpu, p, r);
573 return trap_raz_wi(vcpu, p, r);
576 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
577 struct sys_reg_params *p,
578 const struct sys_reg_desc *r)
581 return read_from_write_only(vcpu, p, r);
583 kvm_debug_handle_oslar(vcpu, p->regval);
587 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
588 struct sys_reg_params *p,
589 const struct sys_reg_desc *r)
592 return write_to_read_only(vcpu, p, r);
594 p->regval = __vcpu_sys_reg(vcpu, r->reg);
598 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
602 * The only modifiable bit is the OSLK bit. Refuse the write if
603 * userspace attempts to change any other bit in the register.
605 if ((val ^ rd->val) & ~OSLSR_EL1_OSLK)
608 __vcpu_assign_sys_reg(vcpu, rd->reg, val);
612 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
613 struct sys_reg_params *p,
614 const struct sys_reg_desc *r)
617 return ignore_write(vcpu, p);
619 p->regval = read_sysreg(dbgauthstatus_el1);
624 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
625 struct sys_reg_params *p,
626 const struct sys_reg_desc *r)
628 access_rw(vcpu, p, r);
630 kvm_debug_set_guest_ownership(vcpu);
635 * reg_to_dbg/dbg_to_reg
637 * A 32 bit write to a debug register leave top bits alone
638 * A 32 bit read from a debug register only returns the bottom bits
640 static void reg_to_dbg(struct kvm_vcpu *vcpu,
641 struct sys_reg_params *p,
642 const struct sys_reg_desc *rd,
645 u64 mask, shift, val;
647 get_access_mask(rd, &mask, &shift);
651 val |= (p->regval & (mask >> shift)) << shift;
655 static void dbg_to_reg(struct kvm_vcpu *vcpu,
656 struct sys_reg_params *p,
657 const struct sys_reg_desc *rd,
662 get_access_mask(rd, &mask, &shift);
663 p->regval = (*dbg_reg & mask) >> shift;
666 static u64 *demux_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
668 struct kvm_guest_debug_arch *dbg = &vcpu->arch.vcpu_debug_state;
672 return &dbg->dbg_bvr[rd->CRm];
674 return &dbg->dbg_bcr[rd->CRm];
676 return &dbg->dbg_wvr[rd->CRm];
678 return &dbg->dbg_wcr[rd->CRm];
680 KVM_BUG_ON(1, vcpu->kvm);
685 static bool trap_dbg_wb_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
686 const struct sys_reg_desc *rd)
688 u64 *reg = demux_wb_reg(vcpu, rd);
694 reg_to_dbg(vcpu, p, rd, reg);
696 dbg_to_reg(vcpu, p, rd, reg);
698 kvm_debug_set_guest_ownership(vcpu);
702 static int set_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
705 u64 *reg = demux_wb_reg(vcpu, rd);
714 static int get_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
717 u64 *reg = demux_wb_reg(vcpu, rd);
726 static u64 reset_dbg_wb_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd)
728 u64 *reg = demux_wb_reg(vcpu, rd);
731 * Bail early if we couldn't find storage for the register, the
732 * KVM_BUG_ON() in demux_wb_reg() will prevent this VM from ever
742 static u64 reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
744 u64 amair = read_sysreg(amair_el1);
745 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
749 static u64 reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
751 u64 actlr = read_sysreg(actlr_el1);
752 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
756 static u64 reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
761 * Map the vcpu_id into the first three affinity level fields of
762 * the MPIDR. We limit the number of VCPUs in level 0 due to a
763 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
764 * of the GICv3 to be able to address each CPU directly when
767 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
768 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
769 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
770 mpidr |= (1ULL << 31);
771 vcpu_write_sys_reg(vcpu, mpidr, MPIDR_EL1);
776 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
777 const struct sys_reg_desc *r)
779 if (kvm_vcpu_has_pmu(vcpu))
785 static u64 reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
787 u64 mask = BIT(ARMV8_PMU_CYCLE_IDX);
788 u8 n = vcpu->kvm->arch.nr_pmu_counters;
791 mask |= GENMASK(n - 1, 0);
793 reset_unknown(vcpu, r);
794 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, mask);
796 return __vcpu_sys_reg(vcpu, r->reg);
799 static u64 reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
801 reset_unknown(vcpu, r);
802 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, GENMASK(31, 0));
804 return __vcpu_sys_reg(vcpu, r->reg);
807 static u64 reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
809 /* This thing will UNDEF, who cares about the reset value? */
810 if (!kvm_vcpu_has_pmu(vcpu))
813 reset_unknown(vcpu, r);
814 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, kvm_pmu_evtyper_mask(vcpu->kvm));
816 return __vcpu_sys_reg(vcpu, r->reg);
819 static u64 reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
821 reset_unknown(vcpu, r);
822 __vcpu_rmw_sys_reg(vcpu, r->reg, &=, PMSELR_EL0_SEL_MASK);
824 return __vcpu_sys_reg(vcpu, r->reg);
827 static u64 reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
831 if (!kvm_supports_32bit_el0())
832 pmcr |= ARMV8_PMU_PMCR_LC;
835 * The value of PMCR.N field is included when the
836 * vCPU register is read via kvm_vcpu_read_pmcr().
838 __vcpu_assign_sys_reg(vcpu, r->reg, pmcr);
840 return __vcpu_sys_reg(vcpu, r->reg);
843 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
845 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
846 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
849 kvm_inject_undefined(vcpu);
854 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
856 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
859 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
861 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
864 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
866 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
869 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
871 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
874 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
875 const struct sys_reg_desc *r)
879 if (pmu_access_el0_disabled(vcpu))
884 * Only update writeable bits of PMCR (continuing into
885 * kvm_pmu_handle_pmcr() as well)
887 val = kvm_vcpu_read_pmcr(vcpu);
888 val &= ~ARMV8_PMU_PMCR_MASK;
889 val |= p->regval & ARMV8_PMU_PMCR_MASK;
890 if (!kvm_supports_32bit_el0())
891 val |= ARMV8_PMU_PMCR_LC;
892 kvm_pmu_handle_pmcr(vcpu, val);
894 /* PMCR.P & PMCR.C are RAZ */
895 val = kvm_vcpu_read_pmcr(vcpu)
896 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
903 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
904 const struct sys_reg_desc *r)
906 if (pmu_access_event_counter_el0_disabled(vcpu))
910 __vcpu_assign_sys_reg(vcpu, PMSELR_EL0, p->regval);
912 /* return PMSELR.SEL field */
913 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
914 & PMSELR_EL0_SEL_MASK;
919 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
920 const struct sys_reg_desc *r)
922 u64 pmceid, mask, shift;
926 if (pmu_access_el0_disabled(vcpu))
929 get_access_mask(r, &mask, &shift);
931 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
940 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
944 pmcr = kvm_vcpu_read_pmcr(vcpu);
945 val = FIELD_GET(ARMV8_PMU_PMCR_N, pmcr);
946 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
947 kvm_inject_undefined(vcpu);
954 static int get_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
959 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
961 idx = ARMV8_PMU_CYCLE_IDX;
964 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
966 *val = kvm_pmu_get_counter_value(vcpu, idx);
970 static int set_pmu_evcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
975 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 0)
977 idx = ARMV8_PMU_CYCLE_IDX;
980 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
982 kvm_pmu_set_counter_value_user(vcpu, idx, val);
986 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
987 struct sys_reg_params *p,
988 const struct sys_reg_desc *r)
992 if (r->CRn == 9 && r->CRm == 13) {
995 if (pmu_access_event_counter_el0_disabled(vcpu))
998 idx = SYS_FIELD_GET(PMSELR_EL0, SEL,
999 __vcpu_sys_reg(vcpu, PMSELR_EL0));
1000 } else if (r->Op2 == 0) {
1002 if (pmu_access_cycle_counter_el0_disabled(vcpu))
1005 idx = ARMV8_PMU_CYCLE_IDX;
1007 } else if (r->CRn == 0 && r->CRm == 9) {
1009 if (pmu_access_event_counter_el0_disabled(vcpu))
1012 idx = ARMV8_PMU_CYCLE_IDX;
1013 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
1015 if (pmu_access_event_counter_el0_disabled(vcpu))
1018 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1021 /* Catch any decoding mistake */
1022 WARN_ON(idx == ~0UL);
1024 if (!pmu_counter_idx_valid(vcpu, idx))
1028 if (pmu_access_el0_disabled(vcpu))
1031 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
1033 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
1039 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1040 const struct sys_reg_desc *r)
1044 if (pmu_access_el0_disabled(vcpu))
1047 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
1048 /* PMXEVTYPER_EL0 */
1049 idx = SYS_FIELD_GET(PMSELR_EL0, SEL, __vcpu_sys_reg(vcpu, PMSELR_EL0));
1050 reg = PMEVTYPER0_EL0 + idx;
1051 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
1052 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
1053 if (idx == ARMV8_PMU_CYCLE_IDX)
1054 reg = PMCCFILTR_EL0;
1056 /* PMEVTYPERn_EL0 */
1057 reg = PMEVTYPER0_EL0 + idx;
1062 if (!pmu_counter_idx_valid(vcpu, idx))
1066 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
1067 kvm_vcpu_pmu_restore_guest(vcpu);
1069 p->regval = __vcpu_sys_reg(vcpu, reg);
1075 static int set_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 val)
1077 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1079 __vcpu_assign_sys_reg(vcpu, r->reg, val & mask);
1080 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1085 static int get_pmreg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r, u64 *val)
1087 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1089 *val = __vcpu_sys_reg(vcpu, r->reg) & mask;
1093 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1094 const struct sys_reg_desc *r)
1098 if (pmu_access_el0_disabled(vcpu))
1101 mask = kvm_pmu_accessible_counter_mask(vcpu);
1103 val = p->regval & mask;
1105 /* accessing PMCNTENSET_EL0 */
1106 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, |=, val);
1108 /* accessing PMCNTENCLR_EL0 */
1109 __vcpu_rmw_sys_reg(vcpu, PMCNTENSET_EL0, &=, ~val);
1111 kvm_pmu_reprogram_counter_mask(vcpu, val);
1113 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
1119 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1120 const struct sys_reg_desc *r)
1122 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1124 if (check_pmu_access_disabled(vcpu, 0))
1128 u64 val = p->regval & mask;
1131 /* accessing PMINTENSET_EL1 */
1132 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, |=, val);
1134 /* accessing PMINTENCLR_EL1 */
1135 __vcpu_rmw_sys_reg(vcpu, PMINTENSET_EL1, &=, ~val);
1137 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
1143 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1144 const struct sys_reg_desc *r)
1146 u64 mask = kvm_pmu_accessible_counter_mask(vcpu);
1148 if (pmu_access_el0_disabled(vcpu))
1153 /* accessing PMOVSSET_EL0 */
1154 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, |=, (p->regval & mask));
1156 /* accessing PMOVSCLR_EL0 */
1157 __vcpu_rmw_sys_reg(vcpu, PMOVSSET_EL0, &=, ~(p->regval & mask));
1159 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
1165 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1166 const struct sys_reg_desc *r)
1171 return read_from_write_only(vcpu, p, r);
1173 if (pmu_write_swinc_el0_disabled(vcpu))
1176 mask = kvm_pmu_accessible_counter_mask(vcpu);
1177 kvm_pmu_software_increment(vcpu, p->regval & mask);
1181 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1182 const struct sys_reg_desc *r)
1185 if (!vcpu_mode_priv(vcpu))
1186 return undef_access(vcpu, p, r);
1188 __vcpu_assign_sys_reg(vcpu, PMUSERENR_EL0,
1189 (p->regval & ARMV8_PMU_USERENR_MASK));
1191 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
1192 & ARMV8_PMU_USERENR_MASK;
1198 static int get_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1201 *val = kvm_vcpu_read_pmcr(vcpu);
1205 static int set_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
1208 u8 new_n = FIELD_GET(ARMV8_PMU_PMCR_N, val);
1209 struct kvm *kvm = vcpu->kvm;
1211 mutex_lock(&kvm->arch.config_lock);
1214 * The vCPU can't have more counters than the PMU hardware
1215 * implements. Ignore this error to maintain compatibility
1216 * with the existing KVM behavior.
1218 if (!kvm_vm_has_ran_once(kvm) &&
1219 !vcpu_has_nv(vcpu) &&
1220 new_n <= kvm_arm_pmu_get_max_counters(kvm))
1221 kvm->arch.nr_pmu_counters = new_n;
1223 mutex_unlock(&kvm->arch.config_lock);
1226 * Ignore writes to RES0 bits, read only bits that are cleared on
1227 * vCPU reset, and writable bits that KVM doesn't support yet.
1228 * (i.e. only PMCR.N and bits [7:0] are mutable from userspace)
1229 * The LP bit is RES0 when FEAT_PMUv3p5 is not supported on the vCPU.
1230 * But, we leave the bit as it is here, as the vCPU's PMUver might
1231 * be changed later (NOTE: the bit will be cleared on first vCPU run
1234 val &= ARMV8_PMU_PMCR_MASK;
1236 /* The LC bit is RES1 when AArch32 is not supported */
1237 if (!kvm_supports_32bit_el0())
1238 val |= ARMV8_PMU_PMCR_LC;
1240 __vcpu_assign_sys_reg(vcpu, r->reg, val);
1241 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
1246 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
1247 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
1248 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
1249 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1250 get_dbg_wb_reg, set_dbg_wb_reg }, \
1251 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
1252 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1253 get_dbg_wb_reg, set_dbg_wb_reg }, \
1254 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
1255 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1256 get_dbg_wb_reg, set_dbg_wb_reg }, \
1257 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
1258 trap_dbg_wb_reg, reset_dbg_wb_reg, 0, 0, \
1259 get_dbg_wb_reg, set_dbg_wb_reg }
1261 #define PMU_SYS_REG(name) \
1262 SYS_DESC(SYS_##name), .reset = reset_pmu_reg, \
1263 .visibility = pmu_visibility
1265 /* Macro to expand the PMEVCNTRn_EL0 register */
1266 #define PMU_PMEVCNTR_EL0(n) \
1267 { PMU_SYS_REG(PMEVCNTRn_EL0(n)), \
1268 .reset = reset_pmevcntr, .get_user = get_pmu_evcntr, \
1269 .set_user = set_pmu_evcntr, \
1270 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
1272 /* Macro to expand the PMEVTYPERn_EL0 register */
1273 #define PMU_PMEVTYPER_EL0(n) \
1274 { PMU_SYS_REG(PMEVTYPERn_EL0(n)), \
1275 .reset = reset_pmevtyper, \
1276 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
1278 /* Macro to expand the AMU counter and type registers*/
1279 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1280 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1281 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1282 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1284 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1285 const struct sys_reg_desc *rd)
1287 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1291 * If we land here on a PtrAuth access, that is because we didn't
1292 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1293 * way this happens is when the guest does not have PtrAuth support
1296 #define __PTRAUTH_KEY(k) \
1297 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1298 .visibility = ptrauth_visibility}
1300 #define PTRAUTH_KEY(k) \
1301 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1302 __PTRAUTH_KEY(k ## KEYHI_EL1)
1304 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1305 struct sys_reg_params *p,
1306 const struct sys_reg_desc *r)
1308 enum kvm_arch_timers tmr;
1309 enum kvm_arch_timer_regs treg;
1310 u64 reg = reg_to_encoding(r);
1313 case SYS_CNTP_TVAL_EL0:
1314 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1315 tmr = TIMER_HPTIMER;
1318 treg = TIMER_REG_TVAL;
1321 case SYS_CNTV_TVAL_EL0:
1322 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1323 tmr = TIMER_HVTIMER;
1326 treg = TIMER_REG_TVAL;
1329 case SYS_AARCH32_CNTP_TVAL:
1330 case SYS_CNTP_TVAL_EL02:
1332 treg = TIMER_REG_TVAL;
1335 case SYS_CNTV_TVAL_EL02:
1337 treg = TIMER_REG_TVAL;
1340 case SYS_CNTHP_TVAL_EL2:
1341 tmr = TIMER_HPTIMER;
1342 treg = TIMER_REG_TVAL;
1345 case SYS_CNTHV_TVAL_EL2:
1346 tmr = TIMER_HVTIMER;
1347 treg = TIMER_REG_TVAL;
1350 case SYS_CNTP_CTL_EL0:
1351 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1352 tmr = TIMER_HPTIMER;
1355 treg = TIMER_REG_CTL;
1358 case SYS_CNTV_CTL_EL0:
1359 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1360 tmr = TIMER_HVTIMER;
1363 treg = TIMER_REG_CTL;
1366 case SYS_AARCH32_CNTP_CTL:
1367 case SYS_CNTP_CTL_EL02:
1369 treg = TIMER_REG_CTL;
1372 case SYS_CNTV_CTL_EL02:
1374 treg = TIMER_REG_CTL;
1377 case SYS_CNTHP_CTL_EL2:
1378 tmr = TIMER_HPTIMER;
1379 treg = TIMER_REG_CTL;
1382 case SYS_CNTHV_CTL_EL2:
1383 tmr = TIMER_HVTIMER;
1384 treg = TIMER_REG_CTL;
1387 case SYS_CNTP_CVAL_EL0:
1388 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1389 tmr = TIMER_HPTIMER;
1392 treg = TIMER_REG_CVAL;
1395 case SYS_CNTV_CVAL_EL0:
1396 if (is_hyp_ctxt(vcpu) && vcpu_el2_e2h_is_set(vcpu))
1397 tmr = TIMER_HVTIMER;
1400 treg = TIMER_REG_CVAL;
1403 case SYS_AARCH32_CNTP_CVAL:
1404 case SYS_CNTP_CVAL_EL02:
1406 treg = TIMER_REG_CVAL;
1409 case SYS_CNTV_CVAL_EL02:
1411 treg = TIMER_REG_CVAL;
1414 case SYS_CNTHP_CVAL_EL2:
1415 tmr = TIMER_HPTIMER;
1416 treg = TIMER_REG_CVAL;
1419 case SYS_CNTHV_CVAL_EL2:
1420 tmr = TIMER_HVTIMER;
1421 treg = TIMER_REG_CVAL;
1424 case SYS_CNTPCT_EL0:
1425 case SYS_CNTPCTSS_EL0:
1426 if (is_hyp_ctxt(vcpu))
1427 tmr = TIMER_HPTIMER;
1430 treg = TIMER_REG_CNT;
1433 case SYS_AARCH32_CNTPCT:
1434 case SYS_AARCH32_CNTPCTSS:
1436 treg = TIMER_REG_CNT;
1439 case SYS_CNTVCT_EL0:
1440 case SYS_CNTVCTSS_EL0:
1441 if (is_hyp_ctxt(vcpu))
1442 tmr = TIMER_HVTIMER;
1445 treg = TIMER_REG_CNT;
1448 case SYS_AARCH32_CNTVCT:
1449 case SYS_AARCH32_CNTVCTSS:
1451 treg = TIMER_REG_CNT;
1455 print_sys_reg_msg(p, "%s", "Unhandled trapped timer register");
1456 return undef_access(vcpu, p, r);
1460 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1462 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1467 static bool access_hv_timer(struct kvm_vcpu *vcpu,
1468 struct sys_reg_params *p,
1469 const struct sys_reg_desc *r)
1471 if (!vcpu_el2_e2h_is_set(vcpu))
1472 return undef_access(vcpu, p, r);
1474 return access_arch_timer(vcpu, p, r);
1477 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
1480 struct arm64_ftr_bits kvm_ftr = *ftrp;
1482 /* Some features have different safe value type in KVM than host features */
1484 case SYS_ID_AA64DFR0_EL1:
1485 switch (kvm_ftr.shift) {
1486 case ID_AA64DFR0_EL1_PMUVer_SHIFT:
1487 kvm_ftr.type = FTR_LOWER_SAFE;
1489 case ID_AA64DFR0_EL1_DebugVer_SHIFT:
1490 kvm_ftr.type = FTR_LOWER_SAFE;
1494 case SYS_ID_DFR0_EL1:
1495 if (kvm_ftr.shift == ID_DFR0_EL1_PerfMon_SHIFT)
1496 kvm_ftr.type = FTR_LOWER_SAFE;
1500 return arm64_ftr_safe_value(&kvm_ftr, new, cur);
1504 * arm64_check_features() - Check if a feature register value constitutes
1505 * a subset of features indicated by the idreg's KVM sanitised limit.
1507 * This function will check if each feature field of @val is the "safe" value
1508 * against idreg's KVM sanitised limit return from reset() callback.
1509 * If a field value in @val is the same as the one in limit, it is always
1510 * considered the safe value regardless For register fields that are not in
1511 * writable, only the value in limit is considered the safe value.
1513 * Return: 0 if all the fields are safe. Otherwise, return negative errno.
1515 static int arm64_check_features(struct kvm_vcpu *vcpu,
1516 const struct sys_reg_desc *rd,
1519 const struct arm64_ftr_reg *ftr_reg;
1520 const struct arm64_ftr_bits *ftrp = NULL;
1521 u32 id = reg_to_encoding(rd);
1522 u64 writable_mask = rd->val;
1523 u64 limit = rd->reset(vcpu, rd);
1527 * Hidden and unallocated ID registers may not have a corresponding
1528 * struct arm64_ftr_reg. Of course, if the register is RAZ we know the
1529 * only safe value is 0.
1531 if (sysreg_visible_as_raz(vcpu, rd))
1532 return val ? -E2BIG : 0;
1534 ftr_reg = get_arm64_ftr_reg(id);
1538 ftrp = ftr_reg->ftr_bits;
1540 for (; ftrp && ftrp->width; ftrp++) {
1541 s64 f_val, f_lim, safe_val;
1544 ftr_mask = arm64_ftr_mask(ftrp);
1545 if ((ftr_mask & writable_mask) != ftr_mask)
1548 f_val = arm64_ftr_value(ftrp, val);
1549 f_lim = arm64_ftr_value(ftrp, limit);
1555 safe_val = kvm_arm64_ftr_safe_value(id, ftrp, f_val, f_lim);
1557 if (safe_val != f_val)
1561 /* For fields that are not writable, values in limit are the safe values. */
1562 if ((val & ~mask) != (limit & ~mask))
1568 static u8 pmuver_to_perfmon(u8 pmuver)
1571 case ID_AA64DFR0_EL1_PMUVer_IMP:
1572 return ID_DFR0_EL1_PerfMon_PMUv3;
1573 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1574 return ID_DFR0_EL1_PerfMon_IMPDEF;
1576 /* Anything ARMv8.1+ and NI have the same value. For now. */
1581 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1582 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val);
1584 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1585 static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
1586 const struct sys_reg_desc *r)
1588 u32 id = reg_to_encoding(r);
1591 if (sysreg_visible_as_raz(vcpu, r))
1594 val = read_sanitised_ftr_reg(id);
1597 case SYS_ID_AA64DFR0_EL1:
1598 val = sanitise_id_aa64dfr0_el1(vcpu, val);
1600 case SYS_ID_AA64PFR0_EL1:
1601 val = sanitise_id_aa64pfr0_el1(vcpu, val);
1603 case SYS_ID_AA64PFR1_EL1:
1604 if (!kvm_has_mte(vcpu->kvm)) {
1605 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1606 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
1609 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1610 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
1611 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
1612 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
1613 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
1614 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
1615 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_DF2);
1616 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_PFAR);
1617 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MPAM_frac);
1619 case SYS_ID_AA64PFR2_EL1:
1620 /* We only expose FPMR */
1621 val &= ID_AA64PFR2_EL1_FPMR;
1623 case SYS_ID_AA64ISAR1_EL1:
1624 if (!vcpu_has_ptrauth(vcpu))
1625 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1626 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1627 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1628 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1630 case SYS_ID_AA64ISAR2_EL1:
1631 if (!vcpu_has_ptrauth(vcpu))
1632 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1633 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1634 if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
1635 has_broken_cntvoff())
1636 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1638 case SYS_ID_AA64ISAR3_EL1:
1639 val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
1641 case SYS_ID_AA64MMFR2_EL1:
1642 val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
1643 val &= ~ID_AA64MMFR2_EL1_NV;
1645 case SYS_ID_AA64MMFR3_EL1:
1646 val &= ID_AA64MMFR3_EL1_TCRX | ID_AA64MMFR3_EL1_S1POE |
1647 ID_AA64MMFR3_EL1_S1PIE;
1649 case SYS_ID_MMFR4_EL1:
1650 val &= ~ARM64_FEATURE_MASK(ID_MMFR4_EL1_CCIDX);
1654 if (vcpu_has_nv(vcpu))
1655 val = limit_nv_id_reg(vcpu->kvm, id, val);
1660 static u64 kvm_read_sanitised_id_reg(struct kvm_vcpu *vcpu,
1661 const struct sys_reg_desc *r)
1663 return __kvm_read_sanitised_id_reg(vcpu, r);
1666 static u64 read_id_reg(const struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
1668 return kvm_read_vm_id_reg(vcpu->kvm, reg_to_encoding(r));
1671 static bool is_feature_id_reg(u32 encoding)
1673 return (sys_reg_Op0(encoding) == 3 &&
1674 (sys_reg_Op1(encoding) < 2 || sys_reg_Op1(encoding) == 3) &&
1675 sys_reg_CRn(encoding) == 0 &&
1676 sys_reg_CRm(encoding) <= 7);
1680 * Return true if the register's (Op0, Op1, CRn, CRm, Op2) is
1681 * (3, 0, 0, crm, op2), where 1<=crm<8, 0<=op2<8, which is the range of ID
1682 * registers KVM maintains on a per-VM basis.
1684 * Additionally, the implementation ID registers and CTR_EL0 are handled as
1687 static inline bool is_vm_ftr_id_reg(u32 id)
1692 case SYS_REVIDR_EL1:
1696 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1697 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1698 sys_reg_CRm(id) < 8);
1703 static inline bool is_vcpu_ftr_id_reg(u32 id)
1705 return is_feature_id_reg(id) && !is_vm_ftr_id_reg(id);
1708 static inline bool is_aa32_id_reg(u32 id)
1710 return (sys_reg_Op0(id) == 3 && sys_reg_Op1(id) == 0 &&
1711 sys_reg_CRn(id) == 0 && sys_reg_CRm(id) >= 1 &&
1712 sys_reg_CRm(id) <= 3);
1715 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1716 const struct sys_reg_desc *r)
1718 u32 id = reg_to_encoding(r);
1721 case SYS_ID_AA64ZFR0_EL1:
1722 if (!vcpu_has_sve(vcpu))
1730 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1731 const struct sys_reg_desc *r)
1734 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1735 * EL. Promote to RAZ/WI in order to guarantee consistency between
1738 if (!kvm_supports_32bit_el0())
1739 return REG_RAZ | REG_USER_WI;
1741 return id_visibility(vcpu, r);
1744 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1745 const struct sys_reg_desc *r)
1750 /* cpufeature ID register access trap handlers */
1752 static bool access_id_reg(struct kvm_vcpu *vcpu,
1753 struct sys_reg_params *p,
1754 const struct sys_reg_desc *r)
1757 return write_to_read_only(vcpu, p, r);
1759 p->regval = read_id_reg(vcpu, r);
1764 /* Visibility overrides for SVE-specific control registers */
1765 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1766 const struct sys_reg_desc *rd)
1768 if (vcpu_has_sve(vcpu))
1774 static unsigned int sme_visibility(const struct kvm_vcpu *vcpu,
1775 const struct sys_reg_desc *rd)
1777 if (kvm_has_feat(vcpu->kvm, ID_AA64PFR1_EL1, SME, IMP))
1783 static unsigned int fp8_visibility(const struct kvm_vcpu *vcpu,
1784 const struct sys_reg_desc *rd)
1786 if (kvm_has_fpmr(vcpu->kvm))
1792 static u64 sanitise_id_aa64pfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1794 if (!vcpu_has_sve(vcpu))
1795 val &= ~ID_AA64PFR0_EL1_SVE_MASK;
1798 * The default is to expose CSV2 == 1 if the HW isn't affected.
1799 * Although this is a per-CPU feature, we make it global because
1800 * asymmetric systems are just a nuisance.
1802 * Userspace can override this as long as it doesn't promise
1805 if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED) {
1806 val &= ~ID_AA64PFR0_EL1_CSV2_MASK;
1807 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV2, IMP);
1809 if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED) {
1810 val &= ~ID_AA64PFR0_EL1_CSV3_MASK;
1811 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, CSV3, IMP);
1814 if (kvm_vgic_global_state.type == VGIC_V3) {
1815 val &= ~ID_AA64PFR0_EL1_GIC_MASK;
1816 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, GIC, IMP);
1819 val &= ~ID_AA64PFR0_EL1_AMU_MASK;
1822 * MPAM is disabled by default as KVM also needs a set of PARTID to
1823 * program the MPAMVPMx_EL2 PARTID remapping registers with. But some
1824 * older kernels let the guest see the ID bit.
1826 val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1831 static u64 sanitise_id_aa64dfr0_el1(const struct kvm_vcpu *vcpu, u64 val)
1833 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1836 * Only initialize the PMU version if the vCPU was configured with one.
1838 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1839 if (kvm_vcpu_has_pmu(vcpu))
1840 val |= SYS_FIELD_PREP(ID_AA64DFR0_EL1, PMUVer,
1841 kvm_arm_pmu_get_pmuver_limit());
1843 /* Hide SPE from guests */
1844 val &= ~ID_AA64DFR0_EL1_PMSVer_MASK;
1846 /* Hide BRBE from guests */
1847 val &= ~ID_AA64DFR0_EL1_BRBE_MASK;
1852 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1853 const struct sys_reg_desc *rd,
1856 u8 debugver = SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, val);
1857 u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
1860 * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
1861 * ID_AA64DFR0_EL1.PMUver limit to VM creation"), KVM erroneously
1862 * exposed an IMP_DEF PMU to userspace and the guest on systems w/
1863 * non-architectural PMUs. Of course, PMUv3 is the only game in town for
1864 * PMU virtualization, so the IMP_DEF value was rather user-hostile.
1866 * At minimum, we're on the hook to allow values that were given to
1867 * userspace by KVM. Cover our tracks here and replace the IMP_DEF value
1868 * with a more sensible NI. The value of an ID register changing under
1869 * the nose of the guest is unfortunate, but is certainly no more
1870 * surprising than an ill-guided PMU driver poking at impdef system
1871 * registers that end in an UNDEF...
1873 if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
1874 val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
1877 * ID_AA64DFR0_EL1.DebugVer is one of those awkward fields with a
1878 * nonzero minimum safe value.
1880 if (debugver < ID_AA64DFR0_EL1_DebugVer_IMP)
1883 return set_id_reg(vcpu, rd, val);
1886 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
1887 const struct sys_reg_desc *rd)
1890 u64 val = read_sanitised_ftr_reg(SYS_ID_DFR0_EL1);
1892 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1893 if (kvm_vcpu_has_pmu(vcpu)) {
1894 perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1895 val |= SYS_FIELD_PREP(ID_DFR0_EL1, PerfMon, perfmon);
1898 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_DFR0_EL1, CopDbg, Debugv8p8);
1903 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1904 const struct sys_reg_desc *rd,
1907 u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
1908 u8 copdbg = SYS_FIELD_GET(ID_DFR0_EL1, CopDbg, val);
1910 if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
1911 val &= ~ID_DFR0_EL1_PerfMon_MASK;
1916 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1917 * it doesn't promise more than what the HW gives us on the
1918 * AArch64 side (as everything is emulated with that), and
1919 * that this is a PMUv3.
1921 if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
1924 if (copdbg < ID_DFR0_EL1_CopDbg_Armv8)
1927 return set_id_reg(vcpu, rd, val);
1930 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1931 const struct sys_reg_desc *rd, u64 user_val)
1933 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1934 u64 mpam_mask = ID_AA64PFR0_EL1_MPAM_MASK;
1937 * Commit 011e5f5bf529f ("arm64/cpufeature: Add remaining feature bits
1938 * in ID_AA64PFR0 register") exposed the MPAM field of AA64PFR0_EL1 to
1939 * guests, but didn't add trap handling. KVM doesn't support MPAM and
1940 * always returns an UNDEF for these registers. The guest must see 0
1943 * But KVM must also accept values from user-space that were provided
1944 * by KVM. On CPUs that support MPAM, permit user-space to write
1945 * the sanitizied value to ID_AA64PFR0_EL1.MPAM, but ignore this field.
1947 if ((hw_val & mpam_mask) == (user_val & mpam_mask))
1948 user_val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
1950 /* Fail the guest's request to disable the AA64 ISA at EL{0,1,2} */
1951 if (!FIELD_GET(ID_AA64PFR0_EL1_EL0, user_val) ||
1952 !FIELD_GET(ID_AA64PFR0_EL1_EL1, user_val) ||
1953 (vcpu_has_nv(vcpu) && !FIELD_GET(ID_AA64PFR0_EL1_EL2, user_val)))
1956 return set_id_reg(vcpu, rd, user_val);
1959 static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
1960 const struct sys_reg_desc *rd, u64 user_val)
1962 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
1963 u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
1964 u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
1965 u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
1966 u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
1968 /* See set_id_aa64pfr0_el1 for comment about MPAM */
1969 if ((hw_val & mpam_mask) == (user_val & mpam_mask))
1970 user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
1973 * Previously MTE_frac was hidden from guest. However, if the
1974 * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
1975 * of 0 for this field indicates that the hardware supports
1976 * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
1978 * As KVM must accept values from KVM provided by user-space,
1979 * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
1980 * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
1981 * incorrectly claiming hardware support for MTE_ASYNC in the
1985 if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
1986 hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
1987 user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
1988 user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
1989 user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
1992 return set_id_reg(vcpu, rd, user_val);
1995 static int set_id_aa64mmfr0_el1(struct kvm_vcpu *vcpu,
1996 const struct sys_reg_desc *rd, u64 user_val)
1998 u64 sanitized_val = kvm_read_sanitised_id_reg(vcpu, rd);
1999 u64 tgran2_mask = ID_AA64MMFR0_EL1_TGRAN4_2_MASK |
2000 ID_AA64MMFR0_EL1_TGRAN16_2_MASK |
2001 ID_AA64MMFR0_EL1_TGRAN64_2_MASK;
2003 if (vcpu_has_nv(vcpu) &&
2004 ((sanitized_val & tgran2_mask) != (user_val & tgran2_mask)))
2007 return set_id_reg(vcpu, rd, user_val);
2010 static int set_id_aa64mmfr2_el1(struct kvm_vcpu *vcpu,
2011 const struct sys_reg_desc *rd, u64 user_val)
2013 u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR2_EL1);
2014 u64 nv_mask = ID_AA64MMFR2_EL1_NV_MASK;
2017 * We made the mistake to expose the now deprecated NV field,
2018 * so allow userspace to write it, but silently ignore it.
2020 if ((hw_val & nv_mask) == (user_val & nv_mask))
2021 user_val &= ~nv_mask;
2023 return set_id_reg(vcpu, rd, user_val);
2026 static int set_ctr_el0(struct kvm_vcpu *vcpu,
2027 const struct sys_reg_desc *rd, u64 user_val)
2029 u8 user_L1Ip = SYS_FIELD_GET(CTR_EL0, L1Ip, user_val);
2032 * Both AIVIVT (0b01) and VPIPT (0b00) are documented as reserved.
2033 * Hence only allow to set VIPT(0b10) or PIPT(0b11) for L1Ip based
2034 * on what hardware reports.
2036 * Using a VIPT software model on PIPT will lead to over invalidation,
2037 * but still correct. Hence, we can allow downgrading PIPT to VIPT,
2038 * but not the other way around. This is handled via arm64_ftr_safe_value()
2039 * as CTR_EL0 ftr_bits has L1Ip field with type FTR_EXACT and safe value
2042 switch (user_L1Ip) {
2043 case CTR_EL0_L1Ip_RESERVED_VPIPT:
2044 case CTR_EL0_L1Ip_RESERVED_AIVIVT:
2046 case CTR_EL0_L1Ip_VIPT:
2047 case CTR_EL0_L1Ip_PIPT:
2048 return set_id_reg(vcpu, rd, user_val);
2055 * cpufeature ID register user accessors
2057 * For now, these registers are immutable for userspace, so no values
2058 * are stored, and for set_id_reg() we don't allow the effective value
2061 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2065 * Avoid locking if the VM has already started, as the ID registers are
2066 * guaranteed to be invariant at that point.
2068 if (kvm_vm_has_ran_once(vcpu->kvm)) {
2069 *val = read_id_reg(vcpu, rd);
2073 mutex_lock(&vcpu->kvm->arch.config_lock);
2074 *val = read_id_reg(vcpu, rd);
2075 mutex_unlock(&vcpu->kvm->arch.config_lock);
2080 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2083 u32 id = reg_to_encoding(rd);
2086 mutex_lock(&vcpu->kvm->arch.config_lock);
2089 * Once the VM has started the ID registers are immutable. Reject any
2090 * write that does not match the final register value.
2092 if (kvm_vm_has_ran_once(vcpu->kvm)) {
2093 if (val != read_id_reg(vcpu, rd))
2098 mutex_unlock(&vcpu->kvm->arch.config_lock);
2102 ret = arm64_check_features(vcpu, rd, val);
2104 kvm_set_vm_id_reg(vcpu->kvm, id, val);
2106 mutex_unlock(&vcpu->kvm->arch.config_lock);
2109 * arm64_check_features() returns -E2BIG to indicate the register's
2110 * feature set is a superset of the maximally-allowed register value.
2111 * While it would be nice to precisely describe this to userspace, the
2112 * existing UAPI for KVM_SET_ONE_REG has it that invalid register
2113 * writes return -EINVAL.
2120 void kvm_set_vm_id_reg(struct kvm *kvm, u32 reg, u64 val)
2122 u64 *p = __vm_id_reg(&kvm->arch, reg);
2124 lockdep_assert_held(&kvm->arch.config_lock);
2126 if (KVM_BUG_ON(kvm_vm_has_ran_once(kvm) || !p, kvm))
2132 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2139 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2145 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2146 const struct sys_reg_desc *r)
2149 return write_to_read_only(vcpu, p, r);
2151 p->regval = kvm_read_vm_id_reg(vcpu->kvm, SYS_CTR_EL0);
2155 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2156 const struct sys_reg_desc *r)
2159 return write_to_read_only(vcpu, p, r);
2161 p->regval = __vcpu_sys_reg(vcpu, r->reg);
2166 * Fabricate a CLIDR_EL1 value instead of using the real value, which can vary
2167 * by the physical CPU which the vcpu currently resides in.
2169 static u64 reset_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2171 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2175 if ((ctr_el0 & CTR_EL0_IDC)) {
2177 * Data cache clean to the PoU is not required so LoUU and LoUIS
2178 * will not be set and a unified cache, which will be marked as
2179 * LoC, will be added.
2181 * If not DIC, let the unified cache L2 so that an instruction
2182 * cache can be added as L1 later.
2184 loc = (ctr_el0 & CTR_EL0_DIC) ? 1 : 2;
2185 clidr = CACHE_TYPE_UNIFIED << CLIDR_CTYPE_SHIFT(loc);
2188 * Data cache clean to the PoU is required so let L1 have a data
2189 * cache and mark it as LoUU and LoUIS. As L1 has a data cache,
2190 * it can be marked as LoC too.
2193 clidr = 1 << CLIDR_LOUU_SHIFT;
2194 clidr |= 1 << CLIDR_LOUIS_SHIFT;
2195 clidr |= CACHE_TYPE_DATA << CLIDR_CTYPE_SHIFT(1);
2199 * Instruction cache invalidation to the PoU is required so let L1 have
2200 * an instruction cache. If L1 already has a data cache, it will be
2201 * CACHE_TYPE_SEPARATE.
2203 if (!(ctr_el0 & CTR_EL0_DIC))
2204 clidr |= CACHE_TYPE_INST << CLIDR_CTYPE_SHIFT(1);
2206 clidr |= loc << CLIDR_LOC_SHIFT;
2209 * Add tag cache unified to data cache. Allocation tags and data are
2210 * unified in a cache line so that it looks valid even if there is only
2213 if (kvm_has_mte(vcpu->kvm))
2214 clidr |= 2ULL << CLIDR_TTYPE_SHIFT(loc);
2216 __vcpu_assign_sys_reg(vcpu, r->reg, clidr);
2218 return __vcpu_sys_reg(vcpu, r->reg);
2221 static int set_clidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
2224 u64 ctr_el0 = read_sanitised_ftr_reg(SYS_CTR_EL0);
2225 u64 idc = !CLIDR_LOC(val) || (!CLIDR_LOUIS(val) && !CLIDR_LOUU(val));
2227 if ((val & CLIDR_EL1_RES0) || (!(ctr_el0 & CTR_EL0_IDC) && idc))
2230 __vcpu_assign_sys_reg(vcpu, rd->reg, val);
2235 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2236 const struct sys_reg_desc *r)
2241 vcpu_write_sys_reg(vcpu, p->regval, reg);
2243 p->regval = vcpu_read_sys_reg(vcpu, reg);
2247 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
2248 const struct sys_reg_desc *r)
2253 return write_to_read_only(vcpu, p, r);
2255 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
2256 csselr &= CSSELR_EL1_Level | CSSELR_EL1_InD;
2257 if (csselr < CSSELR_MAX)
2258 p->regval = get_ccsidr(vcpu, csselr);
2263 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
2264 const struct sys_reg_desc *rd)
2266 if (kvm_has_mte(vcpu->kvm))
2272 #define MTE_REG(name) { \
2273 SYS_DESC(SYS_##name), \
2274 .access = undef_access, \
2275 .reset = reset_unknown, \
2277 .visibility = mte_visibility, \
2280 static unsigned int el2_visibility(const struct kvm_vcpu *vcpu,
2281 const struct sys_reg_desc *rd)
2283 if (vcpu_has_nv(vcpu))
2289 static bool bad_vncr_trap(struct kvm_vcpu *vcpu,
2290 struct sys_reg_params *p,
2291 const struct sys_reg_desc *r)
2294 * We really shouldn't be here, and this is likely the result
2295 * of a misconfigured trap, as this register should target the
2296 * VNCR page, and nothing else.
2298 return bad_trap(vcpu, p, r,
2299 "trap of VNCR-backed register");
2302 static bool bad_redir_trap(struct kvm_vcpu *vcpu,
2303 struct sys_reg_params *p,
2304 const struct sys_reg_desc *r)
2307 * We really shouldn't be here, and this is likely the result
2308 * of a misconfigured trap, as this register should target the
2309 * corresponding EL1, and nothing else.
2311 return bad_trap(vcpu, p, r,
2312 "trap of EL2 register redirected to EL1");
2315 #define EL2_REG_FILTERED(name, acc, rst, v, filter) { \
2316 SYS_DESC(SYS_##name), \
2320 .visibility = filter, \
2324 #define EL2_REG(name, acc, rst, v) \
2325 EL2_REG_FILTERED(name, acc, rst, v, el2_visibility)
2327 #define EL2_REG_VNCR(name, rst, v) EL2_REG(name, bad_vncr_trap, rst, v)
2328 #define EL2_REG_REDIR(name, rst, v) EL2_REG(name, bad_redir_trap, rst, v)
2331 * Since reset() callback and field val are not used for idregs, they will be
2332 * used for specific purposes for idregs.
2333 * The reset() would return KVM sanitised register value. The value would be the
2334 * same as the host kernel sanitised value if there is no KVM sanitisation.
2335 * The val would be used as a mask indicating writable fields for the idreg.
2336 * Only bits with 1 are writable from userspace. This mask might not be
2337 * necessary in the future whenever all ID registers are enabled as writable
2341 #define ID_DESC_DEFAULT_CALLBACKS \
2342 .access = access_id_reg, \
2343 .get_user = get_id_reg, \
2344 .set_user = set_id_reg, \
2345 .visibility = id_visibility, \
2346 .reset = kvm_read_sanitised_id_reg
2348 #define ID_DESC(name) \
2349 SYS_DESC(SYS_##name), \
2350 ID_DESC_DEFAULT_CALLBACKS
2352 /* sys_reg_desc initialiser for known cpufeature ID registers */
2353 #define ID_SANITISED(name) { \
2358 /* sys_reg_desc initialiser for known cpufeature ID registers */
2359 #define AA32_ID_SANITISED(name) { \
2361 .visibility = aa32_id_visibility, \
2365 /* sys_reg_desc initialiser for writable ID registers */
2366 #define ID_WRITABLE(name, mask) { \
2371 /* sys_reg_desc initialiser for cpufeature ID registers that need filtering */
2372 #define ID_FILTERED(sysreg, name, mask) { \
2374 .set_user = set_##name, \
2379 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
2380 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
2381 * (1 <= crm < 8, 0 <= Op2 < 8).
2383 #define ID_UNALLOCATED(crm, op2) { \
2384 .name = "S3_0_0_" #crm "_" #op2, \
2385 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
2386 ID_DESC_DEFAULT_CALLBACKS, \
2387 .visibility = raz_visibility, \
2392 * sys_reg_desc initialiser for known ID registers that we hide from guests.
2393 * For now, these are exposed just like unallocated ID regs: they appear
2394 * RAZ for the guest.
2396 #define ID_HIDDEN(name) { \
2398 .visibility = raz_visibility, \
2402 static bool access_sp_el1(struct kvm_vcpu *vcpu,
2403 struct sys_reg_params *p,
2404 const struct sys_reg_desc *r)
2407 __vcpu_assign_sys_reg(vcpu, SP_EL1, p->regval);
2409 p->regval = __vcpu_sys_reg(vcpu, SP_EL1);
2414 static bool access_elr(struct kvm_vcpu *vcpu,
2415 struct sys_reg_params *p,
2416 const struct sys_reg_desc *r)
2419 vcpu_write_sys_reg(vcpu, p->regval, ELR_EL1);
2421 p->regval = vcpu_read_sys_reg(vcpu, ELR_EL1);
2426 static bool access_spsr(struct kvm_vcpu *vcpu,
2427 struct sys_reg_params *p,
2428 const struct sys_reg_desc *r)
2431 __vcpu_assign_sys_reg(vcpu, SPSR_EL1, p->regval);
2433 p->regval = __vcpu_sys_reg(vcpu, SPSR_EL1);
2438 static bool access_cntkctl_el12(struct kvm_vcpu *vcpu,
2439 struct sys_reg_params *p,
2440 const struct sys_reg_desc *r)
2443 __vcpu_assign_sys_reg(vcpu, CNTKCTL_EL1, p->regval);
2445 p->regval = __vcpu_sys_reg(vcpu, CNTKCTL_EL1);
2450 static u64 reset_hcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2454 if (!cpus_have_final_cap(ARM64_HAS_HCR_NV1))
2457 __vcpu_assign_sys_reg(vcpu, r->reg, val);
2459 return __vcpu_sys_reg(vcpu, r->reg);
2462 static unsigned int __el2_visibility(const struct kvm_vcpu *vcpu,
2463 const struct sys_reg_desc *rd,
2464 unsigned int (*fn)(const struct kvm_vcpu *,
2465 const struct sys_reg_desc *))
2467 return el2_visibility(vcpu, rd) ?: fn(vcpu, rd);
2470 static unsigned int sve_el2_visibility(const struct kvm_vcpu *vcpu,
2471 const struct sys_reg_desc *rd)
2473 return __el2_visibility(vcpu, rd, sve_visibility);
2476 static unsigned int vncr_el2_visibility(const struct kvm_vcpu *vcpu,
2477 const struct sys_reg_desc *rd)
2479 if (el2_visibility(vcpu, rd) == 0 &&
2480 kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
2486 static bool access_zcr_el2(struct kvm_vcpu *vcpu,
2487 struct sys_reg_params *p,
2488 const struct sys_reg_desc *r)
2492 if (guest_hyp_sve_traps_enabled(vcpu)) {
2493 kvm_inject_nested_sve_trap(vcpu);
2498 p->regval = vcpu_read_sys_reg(vcpu, ZCR_EL2);
2502 vq = SYS_FIELD_GET(ZCR_ELx, LEN, p->regval) + 1;
2503 vq = min(vq, vcpu_sve_max_vq(vcpu));
2504 vcpu_write_sys_reg(vcpu, vq - 1, ZCR_EL2);
2509 static bool access_gic_vtr(struct kvm_vcpu *vcpu,
2510 struct sys_reg_params *p,
2511 const struct sys_reg_desc *r)
2514 return write_to_read_only(vcpu, p, r);
2516 p->regval = kvm_vgic_global_state.ich_vtr_el2;
2517 p->regval &= ~(ICH_VTR_EL2_DVIM |
2519 ICH_VTR_EL2_IDbits);
2520 p->regval |= ICH_VTR_EL2_nV4;
2525 static bool access_gic_misr(struct kvm_vcpu *vcpu,
2526 struct sys_reg_params *p,
2527 const struct sys_reg_desc *r)
2530 return write_to_read_only(vcpu, p, r);
2532 p->regval = vgic_v3_get_misr(vcpu);
2537 static bool access_gic_eisr(struct kvm_vcpu *vcpu,
2538 struct sys_reg_params *p,
2539 const struct sys_reg_desc *r)
2542 return write_to_read_only(vcpu, p, r);
2544 p->regval = vgic_v3_get_eisr(vcpu);
2549 static bool access_gic_elrsr(struct kvm_vcpu *vcpu,
2550 struct sys_reg_params *p,
2551 const struct sys_reg_desc *r)
2554 return write_to_read_only(vcpu, p, r);
2556 p->regval = vgic_v3_get_elrsr(vcpu);
2561 static unsigned int s1poe_visibility(const struct kvm_vcpu *vcpu,
2562 const struct sys_reg_desc *rd)
2564 if (kvm_has_s1poe(vcpu->kvm))
2570 static unsigned int s1poe_el2_visibility(const struct kvm_vcpu *vcpu,
2571 const struct sys_reg_desc *rd)
2573 return __el2_visibility(vcpu, rd, s1poe_visibility);
2576 static unsigned int tcr2_visibility(const struct kvm_vcpu *vcpu,
2577 const struct sys_reg_desc *rd)
2579 if (kvm_has_tcr2(vcpu->kvm))
2585 static unsigned int tcr2_el2_visibility(const struct kvm_vcpu *vcpu,
2586 const struct sys_reg_desc *rd)
2588 return __el2_visibility(vcpu, rd, tcr2_visibility);
2591 static unsigned int s1pie_visibility(const struct kvm_vcpu *vcpu,
2592 const struct sys_reg_desc *rd)
2594 if (kvm_has_s1pie(vcpu->kvm))
2600 static unsigned int s1pie_el2_visibility(const struct kvm_vcpu *vcpu,
2601 const struct sys_reg_desc *rd)
2603 return __el2_visibility(vcpu, rd, s1pie_visibility);
2606 static bool access_mdcr(struct kvm_vcpu *vcpu,
2607 struct sys_reg_params *p,
2608 const struct sys_reg_desc *r)
2610 u64 hpmn, val, old = __vcpu_sys_reg(vcpu, MDCR_EL2);
2618 hpmn = FIELD_GET(MDCR_EL2_HPMN, val);
2621 * If HPMN is out of bounds, limit it to what we actually
2622 * support. This matches the UNKNOWN definition of the field
2623 * in that case, and keeps the emulation simple. Sort of.
2625 if (hpmn > vcpu->kvm->arch.nr_pmu_counters) {
2626 hpmn = vcpu->kvm->arch.nr_pmu_counters;
2627 u64_replace_bits(val, hpmn, MDCR_EL2_HPMN);
2630 __vcpu_assign_sys_reg(vcpu, MDCR_EL2, val);
2633 * Request a reload of the PMU to enable/disable the counters
2636 if ((old ^ val) & MDCR_EL2_HPME)
2637 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
2643 * For historical (ahem ABI) reasons, KVM treated MIDR_EL1, REVIDR_EL1, and
2644 * AIDR_EL1 as "invariant" registers, meaning userspace cannot change them.
2645 * The values made visible to userspace were the register values of the boot
2648 * At the same time, reads from these registers at EL1 previously were not
2649 * trapped, allowing the guest to read the actual hardware value. On big-little
2650 * machines, this means the VM can see different values depending on where a
2651 * given vCPU got scheduled.
2653 * These registers are now trapped as collateral damage from SME, and what
2654 * follows attempts to give a user / guest view consistent with the existing
2657 static bool access_imp_id_reg(struct kvm_vcpu *vcpu,
2658 struct sys_reg_params *p,
2659 const struct sys_reg_desc *r)
2662 return write_to_read_only(vcpu, p, r);
2665 * Return the VM-scoped implementation ID register values if userspace
2666 * has made them writable.
2668 if (test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &vcpu->kvm->arch.flags))
2669 return access_id_reg(vcpu, p, r);
2672 * Otherwise, fall back to the old behavior of returning the value of
2675 switch (reg_to_encoding(r)) {
2676 case SYS_REVIDR_EL1:
2677 p->regval = read_sysreg(revidr_el1);
2680 p->regval = read_sysreg(aidr_el1);
2689 static u64 __ro_after_init boot_cpu_midr_val;
2690 static u64 __ro_after_init boot_cpu_revidr_val;
2691 static u64 __ro_after_init boot_cpu_aidr_val;
2693 static void init_imp_id_regs(void)
2695 boot_cpu_midr_val = read_sysreg(midr_el1);
2696 boot_cpu_revidr_val = read_sysreg(revidr_el1);
2697 boot_cpu_aidr_val = read_sysreg(aidr_el1);
2700 static u64 reset_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2702 switch (reg_to_encoding(r)) {
2704 return boot_cpu_midr_val;
2705 case SYS_REVIDR_EL1:
2706 return boot_cpu_revidr_val;
2708 return boot_cpu_aidr_val;
2710 KVM_BUG_ON(1, vcpu->kvm);
2715 static int set_imp_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r,
2718 struct kvm *kvm = vcpu->kvm;
2721 guard(mutex)(&kvm->arch.config_lock);
2723 expected = read_id_reg(vcpu, r);
2724 if (expected == val)
2727 if (!test_bit(KVM_ARCH_FLAG_WRITABLE_IMP_ID_REGS, &kvm->arch.flags))
2731 * Once the VM has started the ID registers are immutable. Reject the
2732 * write if userspace tries to change it.
2734 if (kvm_vm_has_ran_once(kvm))
2738 * Any value is allowed for the implementation ID registers so long as
2739 * it is within the writable mask.
2741 if ((val & r->val) != val)
2744 kvm_set_vm_id_reg(kvm, reg_to_encoding(r), val);
2748 #define IMPLEMENTATION_ID(reg, mask) { \
2749 SYS_DESC(SYS_##reg), \
2750 .access = access_imp_id_reg, \
2751 .get_user = get_id_reg, \
2752 .set_user = set_imp_id_reg, \
2753 .reset = reset_imp_id_reg, \
2757 static u64 reset_mdcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
2759 __vcpu_assign_sys_reg(vcpu, r->reg, vcpu->kvm->arch.nr_pmu_counters);
2760 return vcpu->kvm->arch.nr_pmu_counters;
2764 * Architected system registers.
2765 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
2767 * Debug handling: We do trap most, if not all debug related system
2768 * registers. The implementation is good enough to ensure that a guest
2769 * can use these with minimal performance degradation. The drawback is
2770 * that we don't implement any of the external debug architecture.
2771 * This should be revisited if we ever encounter a more demanding
2774 static const struct sys_reg_desc sys_reg_descs[] = {
2775 DBG_BCR_BVR_WCR_WVR_EL1(0),
2776 DBG_BCR_BVR_WCR_WVR_EL1(1),
2777 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
2778 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
2779 DBG_BCR_BVR_WCR_WVR_EL1(2),
2780 DBG_BCR_BVR_WCR_WVR_EL1(3),
2781 DBG_BCR_BVR_WCR_WVR_EL1(4),
2782 DBG_BCR_BVR_WCR_WVR_EL1(5),
2783 DBG_BCR_BVR_WCR_WVR_EL1(6),
2784 DBG_BCR_BVR_WCR_WVR_EL1(7),
2785 DBG_BCR_BVR_WCR_WVR_EL1(8),
2786 DBG_BCR_BVR_WCR_WVR_EL1(9),
2787 DBG_BCR_BVR_WCR_WVR_EL1(10),
2788 DBG_BCR_BVR_WCR_WVR_EL1(11),
2789 DBG_BCR_BVR_WCR_WVR_EL1(12),
2790 DBG_BCR_BVR_WCR_WVR_EL1(13),
2791 DBG_BCR_BVR_WCR_WVR_EL1(14),
2792 DBG_BCR_BVR_WCR_WVR_EL1(15),
2794 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
2795 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
2796 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
2797 OSLSR_EL1_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
2798 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
2799 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
2800 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
2801 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
2802 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
2804 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
2805 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
2806 // DBGDTR[TR]X_EL0 share the same encoding
2807 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
2809 { SYS_DESC(SYS_DBGVCR32_EL2), undef_access, reset_val, DBGVCR32_EL2, 0 },
2811 IMPLEMENTATION_ID(MIDR_EL1, GENMASK_ULL(31, 0)),
2812 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
2813 IMPLEMENTATION_ID(REVIDR_EL1, GENMASK_ULL(63, 0)),
2816 * ID regs: all ID_SANITISED() entries here must have corresponding
2817 * entries in arm64_ftr_regs[].
2820 /* AArch64 mappings of the AArch32 ID registers */
2822 AA32_ID_SANITISED(ID_PFR0_EL1),
2823 AA32_ID_SANITISED(ID_PFR1_EL1),
2824 { SYS_DESC(SYS_ID_DFR0_EL1),
2825 .access = access_id_reg,
2826 .get_user = get_id_reg,
2827 .set_user = set_id_dfr0_el1,
2828 .visibility = aa32_id_visibility,
2829 .reset = read_sanitised_id_dfr0_el1,
2830 .val = ID_DFR0_EL1_PerfMon_MASK |
2831 ID_DFR0_EL1_CopDbg_MASK, },
2832 ID_HIDDEN(ID_AFR0_EL1),
2833 AA32_ID_SANITISED(ID_MMFR0_EL1),
2834 AA32_ID_SANITISED(ID_MMFR1_EL1),
2835 AA32_ID_SANITISED(ID_MMFR2_EL1),
2836 AA32_ID_SANITISED(ID_MMFR3_EL1),
2839 AA32_ID_SANITISED(ID_ISAR0_EL1),
2840 AA32_ID_SANITISED(ID_ISAR1_EL1),
2841 AA32_ID_SANITISED(ID_ISAR2_EL1),
2842 AA32_ID_SANITISED(ID_ISAR3_EL1),
2843 AA32_ID_SANITISED(ID_ISAR4_EL1),
2844 AA32_ID_SANITISED(ID_ISAR5_EL1),
2845 AA32_ID_SANITISED(ID_MMFR4_EL1),
2846 AA32_ID_SANITISED(ID_ISAR6_EL1),
2849 AA32_ID_SANITISED(MVFR0_EL1),
2850 AA32_ID_SANITISED(MVFR1_EL1),
2851 AA32_ID_SANITISED(MVFR2_EL1),
2852 ID_UNALLOCATED(3,3),
2853 AA32_ID_SANITISED(ID_PFR2_EL1),
2854 ID_HIDDEN(ID_DFR1_EL1),
2855 AA32_ID_SANITISED(ID_MMFR5_EL1),
2856 ID_UNALLOCATED(3,7),
2858 /* AArch64 ID registers */
2860 ID_FILTERED(ID_AA64PFR0_EL1, id_aa64pfr0_el1,
2861 ~(ID_AA64PFR0_EL1_AMU |
2862 ID_AA64PFR0_EL1_MPAM |
2863 ID_AA64PFR0_EL1_SVE |
2864 ID_AA64PFR0_EL1_RAS |
2865 ID_AA64PFR0_EL1_AdvSIMD |
2866 ID_AA64PFR0_EL1_FP)),
2867 ID_FILTERED(ID_AA64PFR1_EL1, id_aa64pfr1_el1,
2868 ~(ID_AA64PFR1_EL1_PFAR |
2869 ID_AA64PFR1_EL1_DF2 |
2870 ID_AA64PFR1_EL1_MTEX |
2871 ID_AA64PFR1_EL1_THE |
2872 ID_AA64PFR1_EL1_GCS |
2873 ID_AA64PFR1_EL1_MTE_frac |
2874 ID_AA64PFR1_EL1_NMI |
2875 ID_AA64PFR1_EL1_RNDR_trap |
2876 ID_AA64PFR1_EL1_SME |
2877 ID_AA64PFR1_EL1_RES0 |
2878 ID_AA64PFR1_EL1_MPAM_frac |
2879 ID_AA64PFR1_EL1_RAS_frac |
2880 ID_AA64PFR1_EL1_MTE)),
2881 ID_WRITABLE(ID_AA64PFR2_EL1, ID_AA64PFR2_EL1_FPMR),
2882 ID_UNALLOCATED(4,3),
2883 ID_WRITABLE(ID_AA64ZFR0_EL1, ~ID_AA64ZFR0_EL1_RES0),
2884 ID_HIDDEN(ID_AA64SMFR0_EL1),
2885 ID_UNALLOCATED(4,6),
2886 ID_WRITABLE(ID_AA64FPFR0_EL1, ~ID_AA64FPFR0_EL1_RES0),
2890 * Prior to FEAT_Debugv8.9, the architecture defines context-aware
2891 * breakpoints (CTX_CMPs) as the highest numbered breakpoints (BRPs).
2892 * KVM does not trap + emulate the breakpoint registers, and as such
2893 * cannot support a layout that misaligns with the underlying hardware.
2894 * While it may be possible to describe a subset that aligns with
2895 * hardware, just prevent changes to BRPs and CTX_CMPs altogether for
2898 * See DDI0487K.a, section D2.8.3 Breakpoint types and linking
2899 * of breakpoints for more details.
2901 ID_FILTERED(ID_AA64DFR0_EL1, id_aa64dfr0_el1,
2902 ID_AA64DFR0_EL1_DoubleLock_MASK |
2903 ID_AA64DFR0_EL1_WRPs_MASK |
2904 ID_AA64DFR0_EL1_PMUVer_MASK |
2905 ID_AA64DFR0_EL1_DebugVer_MASK),
2906 ID_SANITISED(ID_AA64DFR1_EL1),
2907 ID_UNALLOCATED(5,2),
2908 ID_UNALLOCATED(5,3),
2909 ID_HIDDEN(ID_AA64AFR0_EL1),
2910 ID_HIDDEN(ID_AA64AFR1_EL1),
2911 ID_UNALLOCATED(5,6),
2912 ID_UNALLOCATED(5,7),
2915 ID_WRITABLE(ID_AA64ISAR0_EL1, ~ID_AA64ISAR0_EL1_RES0),
2916 ID_WRITABLE(ID_AA64ISAR1_EL1, ~(ID_AA64ISAR1_EL1_GPI |
2917 ID_AA64ISAR1_EL1_GPA |
2918 ID_AA64ISAR1_EL1_API |
2919 ID_AA64ISAR1_EL1_APA)),
2920 ID_WRITABLE(ID_AA64ISAR2_EL1, ~(ID_AA64ISAR2_EL1_RES0 |
2921 ID_AA64ISAR2_EL1_APA3 |
2922 ID_AA64ISAR2_EL1_GPA3)),
2923 ID_WRITABLE(ID_AA64ISAR3_EL1, (ID_AA64ISAR3_EL1_FPRCVT |
2924 ID_AA64ISAR3_EL1_FAMINMAX)),
2925 ID_UNALLOCATED(6,4),
2926 ID_UNALLOCATED(6,5),
2927 ID_UNALLOCATED(6,6),
2928 ID_UNALLOCATED(6,7),
2931 ID_FILTERED(ID_AA64MMFR0_EL1, id_aa64mmfr0_el1,
2932 ~(ID_AA64MMFR0_EL1_RES0 |
2933 ID_AA64MMFR0_EL1_ASIDBITS)),
2934 ID_WRITABLE(ID_AA64MMFR1_EL1, ~(ID_AA64MMFR1_EL1_RES0 |
2935 ID_AA64MMFR1_EL1_HCX |
2936 ID_AA64MMFR1_EL1_TWED |
2937 ID_AA64MMFR1_EL1_XNX |
2938 ID_AA64MMFR1_EL1_VH |
2939 ID_AA64MMFR1_EL1_VMIDBits)),
2940 ID_FILTERED(ID_AA64MMFR2_EL1,
2941 id_aa64mmfr2_el1, ~(ID_AA64MMFR2_EL1_RES0 |
2942 ID_AA64MMFR2_EL1_EVT |
2943 ID_AA64MMFR2_EL1_FWB |
2944 ID_AA64MMFR2_EL1_IDS |
2945 ID_AA64MMFR2_EL1_NV |
2946 ID_AA64MMFR2_EL1_CCIDX)),
2947 ID_WRITABLE(ID_AA64MMFR3_EL1, (ID_AA64MMFR3_EL1_TCRX |
2948 ID_AA64MMFR3_EL1_S1PIE |
2949 ID_AA64MMFR3_EL1_S1POE)),
2950 ID_WRITABLE(ID_AA64MMFR4_EL1, ID_AA64MMFR4_EL1_NV_frac),
2951 ID_UNALLOCATED(7,5),
2952 ID_UNALLOCATED(7,6),
2953 ID_UNALLOCATED(7,7),
2955 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
2956 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
2957 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
2962 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
2963 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
2964 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
2965 { SYS_DESC(SYS_SMCR_EL1), undef_access },
2966 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
2967 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
2968 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
2969 { SYS_DESC(SYS_TCR2_EL1), access_vm_reg, reset_val, TCR2_EL1, 0,
2970 .visibility = tcr2_visibility },
2978 { SYS_DESC(SYS_SPSR_EL1), access_spsr},
2979 { SYS_DESC(SYS_ELR_EL1), access_elr},
2981 { SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
2983 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
2984 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
2985 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
2987 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
2988 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
2989 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
2990 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
2991 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
2992 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
2993 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
2994 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
2997 MTE_REG(TFSRE0_EL1),
2999 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
3000 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
3002 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
3003 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
3004 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
3005 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
3006 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
3007 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
3008 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
3009 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
3010 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
3011 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
3012 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
3013 /* PMBIDR_EL1 is not trapped */
3015 { PMU_SYS_REG(PMINTENSET_EL1),
3016 .access = access_pminten, .reg = PMINTENSET_EL1,
3017 .get_user = get_pmreg, .set_user = set_pmreg },
3018 { PMU_SYS_REG(PMINTENCLR_EL1),
3019 .access = access_pminten, .reg = PMINTENSET_EL1,
3020 .get_user = get_pmreg, .set_user = set_pmreg },
3021 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
3023 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
3024 { SYS_DESC(SYS_PIRE0_EL1), NULL, reset_unknown, PIRE0_EL1,
3025 .visibility = s1pie_visibility },
3026 { SYS_DESC(SYS_PIR_EL1), NULL, reset_unknown, PIR_EL1,
3027 .visibility = s1pie_visibility },
3028 { SYS_DESC(SYS_POR_EL1), NULL, reset_unknown, POR_EL1,
3029 .visibility = s1poe_visibility },
3030 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
3032 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
3033 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
3034 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
3035 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
3036 { SYS_DESC(SYS_MPAMIDR_EL1), undef_access },
3037 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
3039 { SYS_DESC(SYS_MPAM1_EL1), undef_access },
3040 { SYS_DESC(SYS_MPAM0_EL1), undef_access },
3041 { SYS_DESC(SYS_VBAR_EL1), access_rw, reset_val, VBAR_EL1, 0 },
3042 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
3044 { SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
3045 { SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
3046 { SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
3047 { SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
3048 { SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
3049 { SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
3050 { SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
3051 { SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
3052 { SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
3053 { SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
3054 { SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
3055 { SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
3056 { SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
3057 { SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
3058 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
3059 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
3060 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
3061 { SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
3062 { SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
3063 { SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
3064 { SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
3065 { SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
3066 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
3067 { SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
3068 { SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
3070 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
3071 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
3073 { SYS_DESC(SYS_ACCDATA_EL1), undef_access },
3075 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
3077 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
3079 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
3080 { SYS_DESC(SYS_CLIDR_EL1), access_clidr, reset_clidr, CLIDR_EL1,
3081 .set_user = set_clidr, .val = ~CLIDR_EL1_RES0 },
3082 { SYS_DESC(SYS_CCSIDR2_EL1), undef_access },
3083 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
3084 IMPLEMENTATION_ID(AIDR_EL1, GENMASK_ULL(63, 0)),
3085 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
3086 ID_FILTERED(CTR_EL0, ctr_el0,
3089 CTR_EL0_DminLine_MASK |
3091 CTR_EL0_IminLine_MASK),
3092 { SYS_DESC(SYS_SVCR), undef_access, reset_val, SVCR, 0, .visibility = sme_visibility },
3093 { SYS_DESC(SYS_FPMR), undef_access, reset_val, FPMR, 0, .visibility = fp8_visibility },
3095 { PMU_SYS_REG(PMCR_EL0), .access = access_pmcr, .reset = reset_pmcr,
3096 .reg = PMCR_EL0, .get_user = get_pmcr, .set_user = set_pmcr },
3097 { PMU_SYS_REG(PMCNTENSET_EL0),
3098 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3099 .get_user = get_pmreg, .set_user = set_pmreg },
3100 { PMU_SYS_REG(PMCNTENCLR_EL0),
3101 .access = access_pmcnten, .reg = PMCNTENSET_EL0,
3102 .get_user = get_pmreg, .set_user = set_pmreg },
3103 { PMU_SYS_REG(PMOVSCLR_EL0),
3104 .access = access_pmovs, .reg = PMOVSSET_EL0,
3105 .get_user = get_pmreg, .set_user = set_pmreg },
3107 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
3108 * previously (and pointlessly) advertised in the past...
3110 { PMU_SYS_REG(PMSWINC_EL0),
3111 .get_user = get_raz_reg, .set_user = set_wi_reg,
3112 .access = access_pmswinc, .reset = NULL },
3113 { PMU_SYS_REG(PMSELR_EL0),
3114 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
3115 { PMU_SYS_REG(PMCEID0_EL0),
3116 .access = access_pmceid, .reset = NULL },
3117 { PMU_SYS_REG(PMCEID1_EL0),
3118 .access = access_pmceid, .reset = NULL },
3119 { PMU_SYS_REG(PMCCNTR_EL0),
3120 .access = access_pmu_evcntr, .reset = reset_unknown,
3121 .reg = PMCCNTR_EL0, .get_user = get_pmu_evcntr,
3122 .set_user = set_pmu_evcntr },
3123 { PMU_SYS_REG(PMXEVTYPER_EL0),
3124 .access = access_pmu_evtyper, .reset = NULL },
3125 { PMU_SYS_REG(PMXEVCNTR_EL0),
3126 .access = access_pmu_evcntr, .reset = NULL },
3128 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
3129 * in 32bit mode. Here we choose to reset it as zero for consistency.
3131 { PMU_SYS_REG(PMUSERENR_EL0), .access = access_pmuserenr,
3132 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
3133 { PMU_SYS_REG(PMOVSSET_EL0),
3134 .access = access_pmovs, .reg = PMOVSSET_EL0,
3135 .get_user = get_pmreg, .set_user = set_pmreg },
3137 { SYS_DESC(SYS_POR_EL0), NULL, reset_unknown, POR_EL0,
3138 .visibility = s1poe_visibility },
3139 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
3140 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
3141 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
3143 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
3145 { SYS_DESC(SYS_AMCR_EL0), undef_access },
3146 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
3147 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
3148 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
3149 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
3150 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
3151 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
3152 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
3153 AMU_AMEVCNTR0_EL0(0),
3154 AMU_AMEVCNTR0_EL0(1),
3155 AMU_AMEVCNTR0_EL0(2),
3156 AMU_AMEVCNTR0_EL0(3),
3157 AMU_AMEVCNTR0_EL0(4),
3158 AMU_AMEVCNTR0_EL0(5),
3159 AMU_AMEVCNTR0_EL0(6),
3160 AMU_AMEVCNTR0_EL0(7),
3161 AMU_AMEVCNTR0_EL0(8),
3162 AMU_AMEVCNTR0_EL0(9),
3163 AMU_AMEVCNTR0_EL0(10),
3164 AMU_AMEVCNTR0_EL0(11),
3165 AMU_AMEVCNTR0_EL0(12),
3166 AMU_AMEVCNTR0_EL0(13),
3167 AMU_AMEVCNTR0_EL0(14),
3168 AMU_AMEVCNTR0_EL0(15),
3169 AMU_AMEVTYPER0_EL0(0),
3170 AMU_AMEVTYPER0_EL0(1),
3171 AMU_AMEVTYPER0_EL0(2),
3172 AMU_AMEVTYPER0_EL0(3),
3173 AMU_AMEVTYPER0_EL0(4),
3174 AMU_AMEVTYPER0_EL0(5),
3175 AMU_AMEVTYPER0_EL0(6),
3176 AMU_AMEVTYPER0_EL0(7),
3177 AMU_AMEVTYPER0_EL0(8),
3178 AMU_AMEVTYPER0_EL0(9),
3179 AMU_AMEVTYPER0_EL0(10),
3180 AMU_AMEVTYPER0_EL0(11),
3181 AMU_AMEVTYPER0_EL0(12),
3182 AMU_AMEVTYPER0_EL0(13),
3183 AMU_AMEVTYPER0_EL0(14),
3184 AMU_AMEVTYPER0_EL0(15),
3185 AMU_AMEVCNTR1_EL0(0),
3186 AMU_AMEVCNTR1_EL0(1),
3187 AMU_AMEVCNTR1_EL0(2),
3188 AMU_AMEVCNTR1_EL0(3),
3189 AMU_AMEVCNTR1_EL0(4),
3190 AMU_AMEVCNTR1_EL0(5),
3191 AMU_AMEVCNTR1_EL0(6),
3192 AMU_AMEVCNTR1_EL0(7),
3193 AMU_AMEVCNTR1_EL0(8),
3194 AMU_AMEVCNTR1_EL0(9),
3195 AMU_AMEVCNTR1_EL0(10),
3196 AMU_AMEVCNTR1_EL0(11),
3197 AMU_AMEVCNTR1_EL0(12),
3198 AMU_AMEVCNTR1_EL0(13),
3199 AMU_AMEVCNTR1_EL0(14),
3200 AMU_AMEVCNTR1_EL0(15),
3201 AMU_AMEVTYPER1_EL0(0),
3202 AMU_AMEVTYPER1_EL0(1),
3203 AMU_AMEVTYPER1_EL0(2),
3204 AMU_AMEVTYPER1_EL0(3),
3205 AMU_AMEVTYPER1_EL0(4),
3206 AMU_AMEVTYPER1_EL0(5),
3207 AMU_AMEVTYPER1_EL0(6),
3208 AMU_AMEVTYPER1_EL0(7),
3209 AMU_AMEVTYPER1_EL0(8),
3210 AMU_AMEVTYPER1_EL0(9),
3211 AMU_AMEVTYPER1_EL0(10),
3212 AMU_AMEVTYPER1_EL0(11),
3213 AMU_AMEVTYPER1_EL0(12),
3214 AMU_AMEVTYPER1_EL0(13),
3215 AMU_AMEVTYPER1_EL0(14),
3216 AMU_AMEVTYPER1_EL0(15),
3218 { SYS_DESC(SYS_CNTPCT_EL0), access_arch_timer },
3219 { SYS_DESC(SYS_CNTVCT_EL0), access_arch_timer },
3220 { SYS_DESC(SYS_CNTPCTSS_EL0), access_arch_timer },
3221 { SYS_DESC(SYS_CNTVCTSS_EL0), access_arch_timer },
3222 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
3223 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
3224 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
3226 { SYS_DESC(SYS_CNTV_TVAL_EL0), access_arch_timer },
3227 { SYS_DESC(SYS_CNTV_CTL_EL0), access_arch_timer },
3228 { SYS_DESC(SYS_CNTV_CVAL_EL0), access_arch_timer },
3231 PMU_PMEVCNTR_EL0(0),
3232 PMU_PMEVCNTR_EL0(1),
3233 PMU_PMEVCNTR_EL0(2),
3234 PMU_PMEVCNTR_EL0(3),
3235 PMU_PMEVCNTR_EL0(4),
3236 PMU_PMEVCNTR_EL0(5),
3237 PMU_PMEVCNTR_EL0(6),
3238 PMU_PMEVCNTR_EL0(7),
3239 PMU_PMEVCNTR_EL0(8),
3240 PMU_PMEVCNTR_EL0(9),
3241 PMU_PMEVCNTR_EL0(10),
3242 PMU_PMEVCNTR_EL0(11),
3243 PMU_PMEVCNTR_EL0(12),
3244 PMU_PMEVCNTR_EL0(13),
3245 PMU_PMEVCNTR_EL0(14),
3246 PMU_PMEVCNTR_EL0(15),
3247 PMU_PMEVCNTR_EL0(16),
3248 PMU_PMEVCNTR_EL0(17),
3249 PMU_PMEVCNTR_EL0(18),
3250 PMU_PMEVCNTR_EL0(19),
3251 PMU_PMEVCNTR_EL0(20),
3252 PMU_PMEVCNTR_EL0(21),
3253 PMU_PMEVCNTR_EL0(22),
3254 PMU_PMEVCNTR_EL0(23),
3255 PMU_PMEVCNTR_EL0(24),
3256 PMU_PMEVCNTR_EL0(25),
3257 PMU_PMEVCNTR_EL0(26),
3258 PMU_PMEVCNTR_EL0(27),
3259 PMU_PMEVCNTR_EL0(28),
3260 PMU_PMEVCNTR_EL0(29),
3261 PMU_PMEVCNTR_EL0(30),
3262 /* PMEVTYPERn_EL0 */
3263 PMU_PMEVTYPER_EL0(0),
3264 PMU_PMEVTYPER_EL0(1),
3265 PMU_PMEVTYPER_EL0(2),
3266 PMU_PMEVTYPER_EL0(3),
3267 PMU_PMEVTYPER_EL0(4),
3268 PMU_PMEVTYPER_EL0(5),
3269 PMU_PMEVTYPER_EL0(6),
3270 PMU_PMEVTYPER_EL0(7),
3271 PMU_PMEVTYPER_EL0(8),
3272 PMU_PMEVTYPER_EL0(9),
3273 PMU_PMEVTYPER_EL0(10),
3274 PMU_PMEVTYPER_EL0(11),
3275 PMU_PMEVTYPER_EL0(12),
3276 PMU_PMEVTYPER_EL0(13),
3277 PMU_PMEVTYPER_EL0(14),
3278 PMU_PMEVTYPER_EL0(15),
3279 PMU_PMEVTYPER_EL0(16),
3280 PMU_PMEVTYPER_EL0(17),
3281 PMU_PMEVTYPER_EL0(18),
3282 PMU_PMEVTYPER_EL0(19),
3283 PMU_PMEVTYPER_EL0(20),
3284 PMU_PMEVTYPER_EL0(21),
3285 PMU_PMEVTYPER_EL0(22),
3286 PMU_PMEVTYPER_EL0(23),
3287 PMU_PMEVTYPER_EL0(24),
3288 PMU_PMEVTYPER_EL0(25),
3289 PMU_PMEVTYPER_EL0(26),
3290 PMU_PMEVTYPER_EL0(27),
3291 PMU_PMEVTYPER_EL0(28),
3292 PMU_PMEVTYPER_EL0(29),
3293 PMU_PMEVTYPER_EL0(30),
3295 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
3296 * in 32bit mode. Here we choose to reset it as zero for consistency.
3298 { PMU_SYS_REG(PMCCFILTR_EL0), .access = access_pmu_evtyper,
3299 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
3301 EL2_REG_VNCR(VPIDR_EL2, reset_unknown, 0),
3302 EL2_REG_VNCR(VMPIDR_EL2, reset_unknown, 0),
3303 EL2_REG(SCTLR_EL2, access_rw, reset_val, SCTLR_EL2_RES1),
3304 EL2_REG(ACTLR_EL2, access_rw, reset_val, 0),
3305 EL2_REG_VNCR(HCR_EL2, reset_hcr, 0),
3306 EL2_REG(MDCR_EL2, access_mdcr, reset_mdcr, 0),
3307 EL2_REG(CPTR_EL2, access_rw, reset_val, CPTR_NVHE_EL2_RES1),
3308 EL2_REG_VNCR(HSTR_EL2, reset_val, 0),
3309 EL2_REG_VNCR(HFGRTR_EL2, reset_val, 0),
3310 EL2_REG_VNCR(HFGWTR_EL2, reset_val, 0),
3311 EL2_REG_VNCR(HFGITR_EL2, reset_val, 0),
3312 EL2_REG_VNCR(HACR_EL2, reset_val, 0),
3314 EL2_REG_FILTERED(ZCR_EL2, access_zcr_el2, reset_val, 0,
3315 sve_el2_visibility),
3317 EL2_REG_VNCR(HCRX_EL2, reset_val, 0),
3319 EL2_REG(TTBR0_EL2, access_rw, reset_val, 0),
3320 EL2_REG(TTBR1_EL2, access_rw, reset_val, 0),
3321 EL2_REG(TCR_EL2, access_rw, reset_val, TCR_EL2_RES1),
3322 EL2_REG_FILTERED(TCR2_EL2, access_rw, reset_val, TCR2_EL2_RES1,
3323 tcr2_el2_visibility),
3324 EL2_REG_VNCR(VTTBR_EL2, reset_val, 0),
3325 EL2_REG_VNCR(VTCR_EL2, reset_val, 0),
3326 EL2_REG_FILTERED(VNCR_EL2, bad_vncr_trap, reset_val, 0,
3327 vncr_el2_visibility),
3329 { SYS_DESC(SYS_DACR32_EL2), undef_access, reset_unknown, DACR32_EL2 },
3330 EL2_REG_VNCR(HDFGRTR_EL2, reset_val, 0),
3331 EL2_REG_VNCR(HDFGWTR_EL2, reset_val, 0),
3332 EL2_REG_VNCR(HAFGRTR_EL2, reset_val, 0),
3333 EL2_REG_REDIR(SPSR_EL2, reset_val, 0),
3334 EL2_REG_REDIR(ELR_EL2, reset_val, 0),
3335 { SYS_DESC(SYS_SP_EL1), access_sp_el1},
3337 /* AArch32 SPSR_* are RES0 if trapped from a NV guest */
3338 { SYS_DESC(SYS_SPSR_irq), .access = trap_raz_wi },
3339 { SYS_DESC(SYS_SPSR_abt), .access = trap_raz_wi },
3340 { SYS_DESC(SYS_SPSR_und), .access = trap_raz_wi },
3341 { SYS_DESC(SYS_SPSR_fiq), .access = trap_raz_wi },
3343 { SYS_DESC(SYS_IFSR32_EL2), undef_access, reset_unknown, IFSR32_EL2 },
3344 EL2_REG(AFSR0_EL2, access_rw, reset_val, 0),
3345 EL2_REG(AFSR1_EL2, access_rw, reset_val, 0),
3346 EL2_REG_REDIR(ESR_EL2, reset_val, 0),
3347 { SYS_DESC(SYS_FPEXC32_EL2), undef_access, reset_val, FPEXC32_EL2, 0x700 },
3349 EL2_REG_REDIR(FAR_EL2, reset_val, 0),
3350 EL2_REG(HPFAR_EL2, access_rw, reset_val, 0),
3352 EL2_REG(MAIR_EL2, access_rw, reset_val, 0),
3353 EL2_REG_FILTERED(PIRE0_EL2, access_rw, reset_val, 0,
3354 s1pie_el2_visibility),
3355 EL2_REG_FILTERED(PIR_EL2, access_rw, reset_val, 0,
3356 s1pie_el2_visibility),
3357 EL2_REG_FILTERED(POR_EL2, access_rw, reset_val, 0,
3358 s1poe_el2_visibility),
3359 EL2_REG(AMAIR_EL2, access_rw, reset_val, 0),
3360 { SYS_DESC(SYS_MPAMHCR_EL2), undef_access },
3361 { SYS_DESC(SYS_MPAMVPMV_EL2), undef_access },
3362 { SYS_DESC(SYS_MPAM2_EL2), undef_access },
3363 { SYS_DESC(SYS_MPAMVPM0_EL2), undef_access },
3364 { SYS_DESC(SYS_MPAMVPM1_EL2), undef_access },
3365 { SYS_DESC(SYS_MPAMVPM2_EL2), undef_access },
3366 { SYS_DESC(SYS_MPAMVPM3_EL2), undef_access },
3367 { SYS_DESC(SYS_MPAMVPM4_EL2), undef_access },
3368 { SYS_DESC(SYS_MPAMVPM5_EL2), undef_access },
3369 { SYS_DESC(SYS_MPAMVPM6_EL2), undef_access },
3370 { SYS_DESC(SYS_MPAMVPM7_EL2), undef_access },
3372 EL2_REG(VBAR_EL2, access_rw, reset_val, 0),
3373 EL2_REG(RVBAR_EL2, access_rw, reset_val, 0),
3374 { SYS_DESC(SYS_RMR_EL2), undef_access },
3376 EL2_REG_VNCR(ICH_AP0R0_EL2, reset_val, 0),
3377 EL2_REG_VNCR(ICH_AP0R1_EL2, reset_val, 0),
3378 EL2_REG_VNCR(ICH_AP0R2_EL2, reset_val, 0),
3379 EL2_REG_VNCR(ICH_AP0R3_EL2, reset_val, 0),
3380 EL2_REG_VNCR(ICH_AP1R0_EL2, reset_val, 0),
3381 EL2_REG_VNCR(ICH_AP1R1_EL2, reset_val, 0),
3382 EL2_REG_VNCR(ICH_AP1R2_EL2, reset_val, 0),
3383 EL2_REG_VNCR(ICH_AP1R3_EL2, reset_val, 0),
3385 { SYS_DESC(SYS_ICC_SRE_EL2), access_gic_sre },
3387 EL2_REG_VNCR(ICH_HCR_EL2, reset_val, 0),
3388 { SYS_DESC(SYS_ICH_VTR_EL2), access_gic_vtr },
3389 { SYS_DESC(SYS_ICH_MISR_EL2), access_gic_misr },
3390 { SYS_DESC(SYS_ICH_EISR_EL2), access_gic_eisr },
3391 { SYS_DESC(SYS_ICH_ELRSR_EL2), access_gic_elrsr },
3392 EL2_REG_VNCR(ICH_VMCR_EL2, reset_val, 0),
3394 EL2_REG_VNCR(ICH_LR0_EL2, reset_val, 0),
3395 EL2_REG_VNCR(ICH_LR1_EL2, reset_val, 0),
3396 EL2_REG_VNCR(ICH_LR2_EL2, reset_val, 0),
3397 EL2_REG_VNCR(ICH_LR3_EL2, reset_val, 0),
3398 EL2_REG_VNCR(ICH_LR4_EL2, reset_val, 0),
3399 EL2_REG_VNCR(ICH_LR5_EL2, reset_val, 0),
3400 EL2_REG_VNCR(ICH_LR6_EL2, reset_val, 0),
3401 EL2_REG_VNCR(ICH_LR7_EL2, reset_val, 0),
3402 EL2_REG_VNCR(ICH_LR8_EL2, reset_val, 0),
3403 EL2_REG_VNCR(ICH_LR9_EL2, reset_val, 0),
3404 EL2_REG_VNCR(ICH_LR10_EL2, reset_val, 0),
3405 EL2_REG_VNCR(ICH_LR11_EL2, reset_val, 0),
3406 EL2_REG_VNCR(ICH_LR12_EL2, reset_val, 0),
3407 EL2_REG_VNCR(ICH_LR13_EL2, reset_val, 0),
3408 EL2_REG_VNCR(ICH_LR14_EL2, reset_val, 0),
3409 EL2_REG_VNCR(ICH_LR15_EL2, reset_val, 0),
3411 EL2_REG(CONTEXTIDR_EL2, access_rw, reset_val, 0),
3412 EL2_REG(TPIDR_EL2, access_rw, reset_val, 0),
3414 EL2_REG_VNCR(CNTVOFF_EL2, reset_val, 0),
3415 EL2_REG(CNTHCTL_EL2, access_rw, reset_val, 0),
3416 { SYS_DESC(SYS_CNTHP_TVAL_EL2), access_arch_timer },
3417 EL2_REG(CNTHP_CTL_EL2, access_arch_timer, reset_val, 0),
3418 EL2_REG(CNTHP_CVAL_EL2, access_arch_timer, reset_val, 0),
3420 { SYS_DESC(SYS_CNTHV_TVAL_EL2), access_hv_timer },
3421 EL2_REG(CNTHV_CTL_EL2, access_hv_timer, reset_val, 0),
3422 EL2_REG(CNTHV_CVAL_EL2, access_hv_timer, reset_val, 0),
3424 { SYS_DESC(SYS_CNTKCTL_EL12), access_cntkctl_el12 },
3426 { SYS_DESC(SYS_CNTP_TVAL_EL02), access_arch_timer },
3427 { SYS_DESC(SYS_CNTP_CTL_EL02), access_arch_timer },
3428 { SYS_DESC(SYS_CNTP_CVAL_EL02), access_arch_timer },
3430 { SYS_DESC(SYS_CNTV_TVAL_EL02), access_arch_timer },
3431 { SYS_DESC(SYS_CNTV_CTL_EL02), access_arch_timer },
3432 { SYS_DESC(SYS_CNTV_CVAL_EL02), access_arch_timer },
3434 EL2_REG(SP_EL2, NULL, reset_unknown, 0),
3437 static bool handle_at_s1e01(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3438 const struct sys_reg_desc *r)
3440 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3442 __kvm_at_s1e01(vcpu, op, p->regval);
3447 static bool handle_at_s1e2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3448 const struct sys_reg_desc *r)
3450 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3452 /* There is no FGT associated with AT S1E2A :-( */
3453 if (op == OP_AT_S1E2A &&
3454 !kvm_has_feat(vcpu->kvm, ID_AA64ISAR2_EL1, ATS1A, IMP)) {
3455 kvm_inject_undefined(vcpu);
3459 __kvm_at_s1e2(vcpu, op, p->regval);
3464 static bool handle_at_s12(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3465 const struct sys_reg_desc *r)
3467 u32 op = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3469 __kvm_at_s12(vcpu, op, p->regval);
3474 static bool kvm_supported_tlbi_s12_op(struct kvm_vcpu *vpcu, u32 instr)
3476 struct kvm *kvm = vpcu->kvm;
3477 u8 CRm = sys_reg_CRm(instr);
3479 if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3480 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3483 if (CRm == TLBI_CRm_nROS &&
3484 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3490 static bool handle_alle1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3491 const struct sys_reg_desc *r)
3493 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3495 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3496 return undef_access(vcpu, p, r);
3498 write_lock(&vcpu->kvm->mmu_lock);
3501 * Drop all shadow S2s, resulting in S1/S2 TLBIs for each of the
3502 * corresponding VMIDs.
3504 kvm_nested_s2_unmap(vcpu->kvm, true);
3506 write_unlock(&vcpu->kvm->mmu_lock);
3511 static bool kvm_supported_tlbi_ipas2_op(struct kvm_vcpu *vpcu, u32 instr)
3513 struct kvm *kvm = vpcu->kvm;
3514 u8 CRm = sys_reg_CRm(instr);
3515 u8 Op2 = sys_reg_Op2(instr);
3517 if (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
3518 !kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
3521 if (CRm == TLBI_CRm_IPAIS && (Op2 == 2 || Op2 == 6) &&
3522 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3525 if (CRm == TLBI_CRm_IPAONS && (Op2 == 0 || Op2 == 4) &&
3526 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
3529 if (CRm == TLBI_CRm_IPAONS && (Op2 == 3 || Op2 == 7) &&
3530 !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
3536 /* Only defined here as this is an internal "abstraction" */
3553 static void s2_mmu_unmap_range(struct kvm_s2_mmu *mmu,
3554 const union tlbi_info *info)
3557 * The unmap operation is allowed to drop the MMU lock and block, which
3558 * means that @mmu could be used for a different context than the one
3559 * currently being invalidated.
3561 * This behavior is still safe, as:
3563 * 1) The vCPU(s) that recycled the MMU are responsible for invalidating
3564 * the entire MMU before reusing it, which still honors the intent
3567 * 2) Until the guest TLBI instruction is 'retired' (i.e. increment PC
3568 * and ERET to the guest), other vCPUs are allowed to use stale
3571 * 3) Accidentally unmapping an unrelated MMU context is nonfatal, and
3572 * at worst may cause more aborts for shadow stage-2 fills.
3574 * Dropping the MMU lock also implies that shadow stage-2 fills could
3575 * happen behind the back of the TLBI. This is still safe, though, as
3576 * the L1 needs to put its stage-2 in a consistent state before doing
3579 kvm_stage2_unmap_range(mmu, info->range.start, info->range.size, true);
3582 static bool handle_vmalls12e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3583 const struct sys_reg_desc *r)
3585 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3588 if (!kvm_supported_tlbi_s12_op(vcpu, sys_encoding))
3589 return undef_access(vcpu, p, r);
3591 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3592 limit = BIT_ULL(kvm_get_pa_bits(vcpu->kvm));
3594 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3595 &(union tlbi_info) {
3601 s2_mmu_unmap_range);
3606 static bool handle_ripas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3607 const struct sys_reg_desc *r)
3609 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3610 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3613 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3614 return undef_access(vcpu, p, r);
3617 * Because the shadow S2 structure doesn't necessarily reflect that
3618 * of the guest's S2 (different base granule size, for example), we
3619 * decide to ignore TTL and only use the described range.
3621 base = decode_range_tlbi(p->regval, &range, NULL);
3623 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3624 &(union tlbi_info) {
3630 s2_mmu_unmap_range);
3635 static void s2_mmu_unmap_ipa(struct kvm_s2_mmu *mmu,
3636 const union tlbi_info *info)
3638 unsigned long max_size;
3642 * We drop a number of things from the supplied value:
3644 * - NS bit: we're non-secure only.
3646 * - IPA[51:48]: We don't support 52bit IPA just yet...
3648 * And of course, adjust the IPA to be on an actual address.
3650 base_addr = (info->ipa.addr & GENMASK_ULL(35, 0)) << 12;
3651 max_size = compute_tlb_inval_range(mmu, info->ipa.addr);
3652 base_addr &= ~(max_size - 1);
3655 * See comment in s2_mmu_unmap_range() for why this is allowed to
3658 kvm_stage2_unmap_range(mmu, base_addr, max_size, true);
3661 static bool handle_ipas2e1is(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3662 const struct sys_reg_desc *r)
3664 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3665 u64 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
3667 if (!kvm_supported_tlbi_ipas2_op(vcpu, sys_encoding))
3668 return undef_access(vcpu, p, r);
3670 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm, get_vmid(vttbr),
3671 &(union tlbi_info) {
3681 static void s2_mmu_tlbi_s1e1(struct kvm_s2_mmu *mmu,
3682 const union tlbi_info *info)
3684 WARN_ON(__kvm_tlbi_s1e2(mmu, info->va.addr, info->va.encoding));
3687 static bool handle_tlbi_el2(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3688 const struct sys_reg_desc *r)
3690 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3692 if (!kvm_supported_tlbi_s1e2_op(vcpu, sys_encoding))
3693 return undef_access(vcpu, p, r);
3695 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
3699 static bool handle_tlbi_el1(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
3700 const struct sys_reg_desc *r)
3702 u32 sys_encoding = sys_insn(p->Op0, p->Op1, p->CRn, p->CRm, p->Op2);
3705 * If we're here, this is because we've trapped on a EL1 TLBI
3706 * instruction that affects the EL1 translation regime while
3707 * we're running in a context that doesn't allow us to let the
3708 * HW do its thing (aka vEL2):
3710 * - HCR_EL2.E2H == 0 : a non-VHE guest
3711 * - HCR_EL2.{E2H,TGE} == { 1, 0 } : a VHE guest in guest mode
3713 * Another possibility is that we are invalidating the EL2 context
3714 * using EL1 instructions, but that we landed here because we need
3715 * additional invalidation for structures that are not held in the
3716 * CPU TLBs (such as the VNCR pseudo-TLB and its EL2 mapping). In
3717 * that case, we are guaranteed that HCR_EL2.{E2H,TGE} == { 1, 1 }
3718 * as we don't allow an NV-capable L1 in a nVHE configuration.
3720 * We don't expect these helpers to ever be called when running
3721 * in a vEL1 context.
3724 WARN_ON(!vcpu_is_el2(vcpu));
3726 if (!kvm_supported_tlbi_s1e1_op(vcpu, sys_encoding))
3727 return undef_access(vcpu, p, r);
3729 if (vcpu_el2_e2h_is_set(vcpu) && vcpu_el2_tge_is_set(vcpu)) {
3730 kvm_handle_s1e2_tlbi(vcpu, sys_encoding, p->regval);
3734 kvm_s2_mmu_iterate_by_vmid(vcpu->kvm,
3735 get_vmid(__vcpu_sys_reg(vcpu, VTTBR_EL2)),
3736 &(union tlbi_info) {
3739 .encoding = sys_encoding,
3747 #define SYS_INSN(insn, access_fn) \
3749 SYS_DESC(OP_##insn), \
3750 .access = (access_fn), \
3753 static struct sys_reg_desc sys_insn_descs[] = {
3754 { SYS_DESC(SYS_DC_ISW), access_dcsw },
3755 { SYS_DESC(SYS_DC_IGSW), access_dcgsw },
3756 { SYS_DESC(SYS_DC_IGDSW), access_dcgsw },
3758 SYS_INSN(AT_S1E1R, handle_at_s1e01),
3759 SYS_INSN(AT_S1E1W, handle_at_s1e01),
3760 SYS_INSN(AT_S1E0R, handle_at_s1e01),
3761 SYS_INSN(AT_S1E0W, handle_at_s1e01),
3762 SYS_INSN(AT_S1E1RP, handle_at_s1e01),
3763 SYS_INSN(AT_S1E1WP, handle_at_s1e01),
3765 { SYS_DESC(SYS_DC_CSW), access_dcsw },
3766 { SYS_DESC(SYS_DC_CGSW), access_dcgsw },
3767 { SYS_DESC(SYS_DC_CGDSW), access_dcgsw },
3768 { SYS_DESC(SYS_DC_CISW), access_dcsw },
3769 { SYS_DESC(SYS_DC_CIGSW), access_dcgsw },
3770 { SYS_DESC(SYS_DC_CIGDSW), access_dcgsw },
3772 SYS_INSN(TLBI_VMALLE1OS, handle_tlbi_el1),
3773 SYS_INSN(TLBI_VAE1OS, handle_tlbi_el1),
3774 SYS_INSN(TLBI_ASIDE1OS, handle_tlbi_el1),
3775 SYS_INSN(TLBI_VAAE1OS, handle_tlbi_el1),
3776 SYS_INSN(TLBI_VALE1OS, handle_tlbi_el1),
3777 SYS_INSN(TLBI_VAALE1OS, handle_tlbi_el1),
3779 SYS_INSN(TLBI_RVAE1IS, handle_tlbi_el1),
3780 SYS_INSN(TLBI_RVAAE1IS, handle_tlbi_el1),
3781 SYS_INSN(TLBI_RVALE1IS, handle_tlbi_el1),
3782 SYS_INSN(TLBI_RVAALE1IS, handle_tlbi_el1),
3784 SYS_INSN(TLBI_VMALLE1IS, handle_tlbi_el1),
3785 SYS_INSN(TLBI_VAE1IS, handle_tlbi_el1),
3786 SYS_INSN(TLBI_ASIDE1IS, handle_tlbi_el1),
3787 SYS_INSN(TLBI_VAAE1IS, handle_tlbi_el1),
3788 SYS_INSN(TLBI_VALE1IS, handle_tlbi_el1),
3789 SYS_INSN(TLBI_VAALE1IS, handle_tlbi_el1),
3791 SYS_INSN(TLBI_RVAE1OS, handle_tlbi_el1),
3792 SYS_INSN(TLBI_RVAAE1OS, handle_tlbi_el1),
3793 SYS_INSN(TLBI_RVALE1OS, handle_tlbi_el1),
3794 SYS_INSN(TLBI_RVAALE1OS, handle_tlbi_el1),
3796 SYS_INSN(TLBI_RVAE1, handle_tlbi_el1),
3797 SYS_INSN(TLBI_RVAAE1, handle_tlbi_el1),
3798 SYS_INSN(TLBI_RVALE1, handle_tlbi_el1),
3799 SYS_INSN(TLBI_RVAALE1, handle_tlbi_el1),
3801 SYS_INSN(TLBI_VMALLE1, handle_tlbi_el1),
3802 SYS_INSN(TLBI_VAE1, handle_tlbi_el1),
3803 SYS_INSN(TLBI_ASIDE1, handle_tlbi_el1),
3804 SYS_INSN(TLBI_VAAE1, handle_tlbi_el1),
3805 SYS_INSN(TLBI_VALE1, handle_tlbi_el1),
3806 SYS_INSN(TLBI_VAALE1, handle_tlbi_el1),
3808 SYS_INSN(TLBI_VMALLE1OSNXS, handle_tlbi_el1),
3809 SYS_INSN(TLBI_VAE1OSNXS, handle_tlbi_el1),
3810 SYS_INSN(TLBI_ASIDE1OSNXS, handle_tlbi_el1),
3811 SYS_INSN(TLBI_VAAE1OSNXS, handle_tlbi_el1),
3812 SYS_INSN(TLBI_VALE1OSNXS, handle_tlbi_el1),
3813 SYS_INSN(TLBI_VAALE1OSNXS, handle_tlbi_el1),
3815 SYS_INSN(TLBI_RVAE1ISNXS, handle_tlbi_el1),
3816 SYS_INSN(TLBI_RVAAE1ISNXS, handle_tlbi_el1),
3817 SYS_INSN(TLBI_RVALE1ISNXS, handle_tlbi_el1),
3818 SYS_INSN(TLBI_RVAALE1ISNXS, handle_tlbi_el1),
3820 SYS_INSN(TLBI_VMALLE1ISNXS, handle_tlbi_el1),
3821 SYS_INSN(TLBI_VAE1ISNXS, handle_tlbi_el1),
3822 SYS_INSN(TLBI_ASIDE1ISNXS, handle_tlbi_el1),
3823 SYS_INSN(TLBI_VAAE1ISNXS, handle_tlbi_el1),
3824 SYS_INSN(TLBI_VALE1ISNXS, handle_tlbi_el1),
3825 SYS_INSN(TLBI_VAALE1ISNXS, handle_tlbi_el1),
3827 SYS_INSN(TLBI_RVAE1OSNXS, handle_tlbi_el1),
3828 SYS_INSN(TLBI_RVAAE1OSNXS, handle_tlbi_el1),
3829 SYS_INSN(TLBI_RVALE1OSNXS, handle_tlbi_el1),
3830 SYS_INSN(TLBI_RVAALE1OSNXS, handle_tlbi_el1),
3832 SYS_INSN(TLBI_RVAE1NXS, handle_tlbi_el1),
3833 SYS_INSN(TLBI_RVAAE1NXS, handle_tlbi_el1),
3834 SYS_INSN(TLBI_RVALE1NXS, handle_tlbi_el1),
3835 SYS_INSN(TLBI_RVAALE1NXS, handle_tlbi_el1),
3837 SYS_INSN(TLBI_VMALLE1NXS, handle_tlbi_el1),
3838 SYS_INSN(TLBI_VAE1NXS, handle_tlbi_el1),
3839 SYS_INSN(TLBI_ASIDE1NXS, handle_tlbi_el1),
3840 SYS_INSN(TLBI_VAAE1NXS, handle_tlbi_el1),
3841 SYS_INSN(TLBI_VALE1NXS, handle_tlbi_el1),
3842 SYS_INSN(TLBI_VAALE1NXS, handle_tlbi_el1),
3844 SYS_INSN(AT_S1E2R, handle_at_s1e2),
3845 SYS_INSN(AT_S1E2W, handle_at_s1e2),
3846 SYS_INSN(AT_S12E1R, handle_at_s12),
3847 SYS_INSN(AT_S12E1W, handle_at_s12),
3848 SYS_INSN(AT_S12E0R, handle_at_s12),
3849 SYS_INSN(AT_S12E0W, handle_at_s12),
3850 SYS_INSN(AT_S1E2A, handle_at_s1e2),
3852 SYS_INSN(TLBI_IPAS2E1IS, handle_ipas2e1is),
3853 SYS_INSN(TLBI_RIPAS2E1IS, handle_ripas2e1is),
3854 SYS_INSN(TLBI_IPAS2LE1IS, handle_ipas2e1is),
3855 SYS_INSN(TLBI_RIPAS2LE1IS, handle_ripas2e1is),
3857 SYS_INSN(TLBI_ALLE2OS, handle_tlbi_el2),
3858 SYS_INSN(TLBI_VAE2OS, handle_tlbi_el2),
3859 SYS_INSN(TLBI_ALLE1OS, handle_alle1is),
3860 SYS_INSN(TLBI_VALE2OS, handle_tlbi_el2),
3861 SYS_INSN(TLBI_VMALLS12E1OS, handle_vmalls12e1is),
3863 SYS_INSN(TLBI_RVAE2IS, handle_tlbi_el2),
3864 SYS_INSN(TLBI_RVALE2IS, handle_tlbi_el2),
3865 SYS_INSN(TLBI_ALLE2IS, handle_tlbi_el2),
3866 SYS_INSN(TLBI_VAE2IS, handle_tlbi_el2),
3868 SYS_INSN(TLBI_ALLE1IS, handle_alle1is),
3870 SYS_INSN(TLBI_VALE2IS, handle_tlbi_el2),
3872 SYS_INSN(TLBI_VMALLS12E1IS, handle_vmalls12e1is),
3873 SYS_INSN(TLBI_IPAS2E1OS, handle_ipas2e1is),
3874 SYS_INSN(TLBI_IPAS2E1, handle_ipas2e1is),
3875 SYS_INSN(TLBI_RIPAS2E1, handle_ripas2e1is),
3876 SYS_INSN(TLBI_RIPAS2E1OS, handle_ripas2e1is),
3877 SYS_INSN(TLBI_IPAS2LE1OS, handle_ipas2e1is),
3878 SYS_INSN(TLBI_IPAS2LE1, handle_ipas2e1is),
3879 SYS_INSN(TLBI_RIPAS2LE1, handle_ripas2e1is),
3880 SYS_INSN(TLBI_RIPAS2LE1OS, handle_ripas2e1is),
3881 SYS_INSN(TLBI_RVAE2OS, handle_tlbi_el2),
3882 SYS_INSN(TLBI_RVALE2OS, handle_tlbi_el2),
3883 SYS_INSN(TLBI_RVAE2, handle_tlbi_el2),
3884 SYS_INSN(TLBI_RVALE2, handle_tlbi_el2),
3885 SYS_INSN(TLBI_ALLE2, handle_tlbi_el2),
3886 SYS_INSN(TLBI_VAE2, handle_tlbi_el2),
3888 SYS_INSN(TLBI_ALLE1, handle_alle1is),
3890 SYS_INSN(TLBI_VALE2, handle_tlbi_el2),
3892 SYS_INSN(TLBI_VMALLS12E1, handle_vmalls12e1is),
3894 SYS_INSN(TLBI_IPAS2E1ISNXS, handle_ipas2e1is),
3895 SYS_INSN(TLBI_RIPAS2E1ISNXS, handle_ripas2e1is),
3896 SYS_INSN(TLBI_IPAS2LE1ISNXS, handle_ipas2e1is),
3897 SYS_INSN(TLBI_RIPAS2LE1ISNXS, handle_ripas2e1is),
3899 SYS_INSN(TLBI_ALLE2OSNXS, handle_tlbi_el2),
3900 SYS_INSN(TLBI_VAE2OSNXS, handle_tlbi_el2),
3901 SYS_INSN(TLBI_ALLE1OSNXS, handle_alle1is),
3902 SYS_INSN(TLBI_VALE2OSNXS, handle_tlbi_el2),
3903 SYS_INSN(TLBI_VMALLS12E1OSNXS, handle_vmalls12e1is),
3905 SYS_INSN(TLBI_RVAE2ISNXS, handle_tlbi_el2),
3906 SYS_INSN(TLBI_RVALE2ISNXS, handle_tlbi_el2),
3907 SYS_INSN(TLBI_ALLE2ISNXS, handle_tlbi_el2),
3908 SYS_INSN(TLBI_VAE2ISNXS, handle_tlbi_el2),
3910 SYS_INSN(TLBI_ALLE1ISNXS, handle_alle1is),
3911 SYS_INSN(TLBI_VALE2ISNXS, handle_tlbi_el2),
3912 SYS_INSN(TLBI_VMALLS12E1ISNXS, handle_vmalls12e1is),
3913 SYS_INSN(TLBI_IPAS2E1OSNXS, handle_ipas2e1is),
3914 SYS_INSN(TLBI_IPAS2E1NXS, handle_ipas2e1is),
3915 SYS_INSN(TLBI_RIPAS2E1NXS, handle_ripas2e1is),
3916 SYS_INSN(TLBI_RIPAS2E1OSNXS, handle_ripas2e1is),
3917 SYS_INSN(TLBI_IPAS2LE1OSNXS, handle_ipas2e1is),
3918 SYS_INSN(TLBI_IPAS2LE1NXS, handle_ipas2e1is),
3919 SYS_INSN(TLBI_RIPAS2LE1NXS, handle_ripas2e1is),
3920 SYS_INSN(TLBI_RIPAS2LE1OSNXS, handle_ripas2e1is),
3921 SYS_INSN(TLBI_RVAE2OSNXS, handle_tlbi_el2),
3922 SYS_INSN(TLBI_RVALE2OSNXS, handle_tlbi_el2),
3923 SYS_INSN(TLBI_RVAE2NXS, handle_tlbi_el2),
3924 SYS_INSN(TLBI_RVALE2NXS, handle_tlbi_el2),
3925 SYS_INSN(TLBI_ALLE2NXS, handle_tlbi_el2),
3926 SYS_INSN(TLBI_VAE2NXS, handle_tlbi_el2),
3927 SYS_INSN(TLBI_ALLE1NXS, handle_alle1is),
3928 SYS_INSN(TLBI_VALE2NXS, handle_tlbi_el2),
3929 SYS_INSN(TLBI_VMALLS12E1NXS, handle_vmalls12e1is),
3932 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
3933 struct sys_reg_params *p,
3934 const struct sys_reg_desc *r)
3937 return ignore_write(vcpu, p);
3939 u64 dfr = kvm_read_vm_id_reg(vcpu->kvm, SYS_ID_AA64DFR0_EL1);
3940 u32 el3 = kvm_has_feat(vcpu->kvm, ID_AA64PFR0_EL1, EL3, IMP);
3942 p->regval = ((SYS_FIELD_GET(ID_AA64DFR0_EL1, WRPs, dfr) << 28) |
3943 (SYS_FIELD_GET(ID_AA64DFR0_EL1, BRPs, dfr) << 24) |
3944 (SYS_FIELD_GET(ID_AA64DFR0_EL1, CTX_CMPs, dfr) << 20) |
3945 (SYS_FIELD_GET(ID_AA64DFR0_EL1, DebugVer, dfr) << 16) |
3946 (1 << 15) | (el3 << 14) | (el3 << 12));
3952 * AArch32 debug register mappings
3954 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
3955 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
3957 * None of the other registers share their location, so treat them as
3958 * if they were 64bit.
3960 #define DBG_BCR_BVR_WCR_WVR(n) \
3962 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), \
3963 trap_dbg_wb_reg, NULL, n }, \
3965 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_dbg_wb_reg, NULL, n }, \
3967 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_dbg_wb_reg, NULL, n }, \
3969 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_dbg_wb_reg, NULL, n }
3971 #define DBGBXVR(n) \
3972 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), \
3973 trap_dbg_wb_reg, NULL, n }
3976 * Trapped cp14 registers. We generally ignore most of the external
3977 * debug, on the principle that they don't really make sense to a
3978 * guest. Revisit this one day, would this principle change.
3980 static const struct sys_reg_desc cp14_regs[] = {
3982 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
3984 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
3986 DBG_BCR_BVR_WCR_WVR(0),
3988 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
3989 DBG_BCR_BVR_WCR_WVR(1),
3991 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
3993 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
3994 DBG_BCR_BVR_WCR_WVR(2),
3995 /* DBGDTR[RT]Xint */
3996 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
3997 /* DBGDTR[RT]Xext */
3998 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
3999 DBG_BCR_BVR_WCR_WVR(3),
4000 DBG_BCR_BVR_WCR_WVR(4),
4001 DBG_BCR_BVR_WCR_WVR(5),
4003 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
4005 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
4006 DBG_BCR_BVR_WCR_WVR(6),
4008 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
4009 DBG_BCR_BVR_WCR_WVR(7),
4010 DBG_BCR_BVR_WCR_WVR(8),
4011 DBG_BCR_BVR_WCR_WVR(9),
4012 DBG_BCR_BVR_WCR_WVR(10),
4013 DBG_BCR_BVR_WCR_WVR(11),
4014 DBG_BCR_BVR_WCR_WVR(12),
4015 DBG_BCR_BVR_WCR_WVR(13),
4016 DBG_BCR_BVR_WCR_WVR(14),
4017 DBG_BCR_BVR_WCR_WVR(15),
4019 /* DBGDRAR (32bit) */
4020 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
4024 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
4027 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
4031 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
4034 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
4047 /* DBGDSAR (32bit) */
4048 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
4051 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
4053 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
4055 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
4057 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
4059 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
4061 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
4064 /* Trapped cp14 64bit registers */
4065 static const struct sys_reg_desc cp14_64_regs[] = {
4066 /* DBGDRAR (64bit) */
4067 { Op1( 0), CRm( 1), .access = trap_raz_wi },
4069 /* DBGDSAR (64bit) */
4070 { Op1( 0), CRm( 2), .access = trap_raz_wi },
4073 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
4075 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
4076 .visibility = pmu_visibility
4078 /* Macro to expand the PMEVCNTRn register */
4079 #define PMU_PMEVCNTR(n) \
4080 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4081 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4082 .access = access_pmu_evcntr }
4084 /* Macro to expand the PMEVTYPERn register */
4085 #define PMU_PMEVTYPER(n) \
4086 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
4087 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
4088 .access = access_pmu_evtyper }
4090 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
4091 * depending on the way they are accessed (as a 32bit or a 64bit
4094 static const struct sys_reg_desc cp15_regs[] = {
4095 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
4096 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
4098 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
4100 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
4101 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4102 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
4104 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
4106 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
4107 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
4108 { CP15_SYS_DESC(SYS_ICC_PMR_EL1), undef_access },
4110 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
4111 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
4113 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
4115 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
4117 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
4119 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
4122 * DC{C,I,CI}SW operations:
4124 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
4125 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
4126 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
4129 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
4130 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
4131 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
4132 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
4133 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
4134 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
4135 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
4136 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
4137 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
4138 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
4139 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
4140 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
4141 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
4142 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
4143 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
4144 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
4145 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
4147 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
4150 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
4152 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
4154 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
4156 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
4158 { CP15_SYS_DESC(SYS_ICC_IAR0_EL1), undef_access },
4159 { CP15_SYS_DESC(SYS_ICC_EOIR0_EL1), undef_access },
4160 { CP15_SYS_DESC(SYS_ICC_HPPIR0_EL1), undef_access },
4161 { CP15_SYS_DESC(SYS_ICC_BPR0_EL1), undef_access },
4162 { CP15_SYS_DESC(SYS_ICC_AP0R0_EL1), undef_access },
4163 { CP15_SYS_DESC(SYS_ICC_AP0R1_EL1), undef_access },
4164 { CP15_SYS_DESC(SYS_ICC_AP0R2_EL1), undef_access },
4165 { CP15_SYS_DESC(SYS_ICC_AP0R3_EL1), undef_access },
4166 { CP15_SYS_DESC(SYS_ICC_AP1R0_EL1), undef_access },
4167 { CP15_SYS_DESC(SYS_ICC_AP1R1_EL1), undef_access },
4168 { CP15_SYS_DESC(SYS_ICC_AP1R2_EL1), undef_access },
4169 { CP15_SYS_DESC(SYS_ICC_AP1R3_EL1), undef_access },
4170 { CP15_SYS_DESC(SYS_ICC_DIR_EL1), undef_access },
4171 { CP15_SYS_DESC(SYS_ICC_RPR_EL1), undef_access },
4172 { CP15_SYS_DESC(SYS_ICC_IAR1_EL1), undef_access },
4173 { CP15_SYS_DESC(SYS_ICC_EOIR1_EL1), undef_access },
4174 { CP15_SYS_DESC(SYS_ICC_HPPIR1_EL1), undef_access },
4175 { CP15_SYS_DESC(SYS_ICC_BPR1_EL1), undef_access },
4176 { CP15_SYS_DESC(SYS_ICC_CTLR_EL1), undef_access },
4177 { CP15_SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
4178 { CP15_SYS_DESC(SYS_ICC_IGRPEN0_EL1), undef_access },
4179 { CP15_SYS_DESC(SYS_ICC_IGRPEN1_EL1), undef_access },
4181 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
4184 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
4185 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
4252 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
4254 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
4255 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
4258 { Op1(1), CRn( 0), CRm( 0), Op2(2), undef_access },
4260 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
4263 static const struct sys_reg_desc cp15_64_regs[] = {
4264 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
4265 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
4266 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
4267 { SYS_DESC(SYS_AARCH32_CNTPCT), access_arch_timer },
4268 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
4269 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
4270 { SYS_DESC(SYS_AARCH32_CNTVCT), access_arch_timer },
4271 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
4272 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
4273 { SYS_DESC(SYS_AARCH32_CNTPCTSS), access_arch_timer },
4274 { SYS_DESC(SYS_AARCH32_CNTVCTSS), access_arch_timer },
4277 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
4282 for (i = 0; i < n; i++) {
4283 if (!is_32 && table[i].reg && !table[i].reset) {
4284 kvm_err("sys_reg table %pS entry %d (%s) lacks reset\n",
4285 &table[i], i, table[i].name);
4289 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
4290 kvm_err("sys_reg table %pS entry %d (%s -> %s) out of order\n",
4291 &table[i], i, table[i - 1].name, table[i].name);
4299 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
4301 kvm_inject_undefined(vcpu);
4305 static void perform_access(struct kvm_vcpu *vcpu,
4306 struct sys_reg_params *params,
4307 const struct sys_reg_desc *r)
4309 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
4311 /* Check for regs disabled by runtime config */
4312 if (sysreg_hidden(vcpu, r)) {
4313 kvm_inject_undefined(vcpu);
4318 * Not having an accessor means that we have configured a trap
4319 * that we don't know how to handle. This certainly qualifies
4320 * as a gross bug that should be fixed right away.
4324 /* Skip instruction if instructed so */
4325 if (likely(r->access(vcpu, params, r)))
4330 * emulate_cp -- tries to match a sys_reg access in a handling table, and
4331 * call the corresponding trap handler.
4333 * @params: pointer to the descriptor of the access
4334 * @table: array of trap descriptors
4335 * @num: size of the trap descriptor array
4337 * Return true if the access has been handled, false if not.
4339 static bool emulate_cp(struct kvm_vcpu *vcpu,
4340 struct sys_reg_params *params,
4341 const struct sys_reg_desc *table,
4344 const struct sys_reg_desc *r;
4347 return false; /* Not handled */
4349 r = find_reg(params, table, num);
4352 perform_access(vcpu, params, r);
4360 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
4361 struct sys_reg_params *params)
4363 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
4367 case ESR_ELx_EC_CP15_32:
4368 case ESR_ELx_EC_CP15_64:
4371 case ESR_ELx_EC_CP14_MR:
4372 case ESR_ELx_EC_CP14_64:
4379 print_sys_reg_msg(params,
4380 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
4381 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4382 kvm_inject_undefined(vcpu);
4386 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
4387 * @vcpu: The VCPU pointer
4388 * @global: &struct sys_reg_desc
4389 * @nr_global: size of the @global array
4391 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
4392 const struct sys_reg_desc *global,
4395 struct sys_reg_params params;
4396 u64 esr = kvm_vcpu_get_esr(vcpu);
4397 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4398 int Rt2 = (esr >> 10) & 0x1f;
4400 params.CRm = (esr >> 1) & 0xf;
4401 params.is_write = ((esr & 1) == 0);
4404 params.Op1 = (esr >> 16) & 0xf;
4409 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
4410 * backends between AArch32 and AArch64, we get away with it.
4412 if (params.is_write) {
4413 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
4414 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
4418 * If the table contains a handler, handle the
4419 * potential register operation in the case of a read and return
4422 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
4423 /* Split up the value between registers for the read side */
4424 if (!params.is_write) {
4425 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
4426 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
4432 unhandled_cp_access(vcpu, ¶ms);
4436 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
4439 * The CP10 ID registers are architecturally mapped to AArch64 feature
4440 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
4443 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
4445 u8 reg_id = (esr >> 10) & 0xf;
4448 params->is_write = ((esr & 1) == 0);
4454 /* CP10 ID registers are read-only */
4455 valid = !params->is_write;
4477 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
4478 params->is_write ? "write" : "read", reg_id);
4483 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
4484 * VFP Register' from AArch32.
4485 * @vcpu: The vCPU pointer
4487 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
4488 * Work out the correct AArch64 system register encoding and reroute to the
4489 * AArch64 system register emulation.
4491 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
4493 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4494 u64 esr = kvm_vcpu_get_esr(vcpu);
4495 struct sys_reg_params params;
4497 /* UNDEF on any unhandled register access */
4498 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
4499 kvm_inject_undefined(vcpu);
4503 if (emulate_sys_reg(vcpu, ¶ms))
4504 vcpu_set_reg(vcpu, Rt, params.regval);
4510 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
4511 * CRn=0, which corresponds to the AArch32 feature
4513 * @vcpu: the vCPU pointer
4514 * @params: the system register access parameters.
4516 * Our cp15 system register tables do not enumerate the AArch32 feature
4517 * registers. Conveniently, our AArch64 table does, and the AArch32 system
4518 * register encoding can be trivially remapped into the AArch64 for the feature
4519 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
4521 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
4522 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
4523 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
4524 * treat undefined registers in this range as RAZ.
4526 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
4527 struct sys_reg_params *params)
4529 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4531 /* Treat impossible writes to RO registers as UNDEFINED */
4532 if (params->is_write) {
4533 unhandled_cp_access(vcpu, params);
4540 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
4541 * Avoid conflicting with future expansion of AArch64 feature registers
4542 * and simply treat them as RAZ here.
4544 if (params->CRm > 3)
4546 else if (!emulate_sys_reg(vcpu, params))
4549 vcpu_set_reg(vcpu, Rt, params->regval);
4554 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
4555 * @vcpu: The VCPU pointer
4556 * @params: &struct sys_reg_params
4557 * @global: &struct sys_reg_desc
4558 * @nr_global: size of the @global array
4560 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
4561 struct sys_reg_params *params,
4562 const struct sys_reg_desc *global,
4565 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4567 params->regval = vcpu_get_reg(vcpu, Rt);
4569 if (emulate_cp(vcpu, params, global, nr_global)) {
4570 if (!params->is_write)
4571 vcpu_set_reg(vcpu, Rt, params->regval);
4575 unhandled_cp_access(vcpu, params);
4579 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
4581 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
4584 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
4586 struct sys_reg_params params;
4588 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4591 * Certain AArch32 ID registers are handled by rerouting to the AArch64
4592 * system register table. Registers in the ID range where CRm=0 are
4593 * excluded from this scheme as they do not trivially map into AArch64
4594 * system register encodings, except for AIDR/REVIDR.
4596 if (params.Op1 == 0 && params.CRn == 0 &&
4597 (params.CRm || params.Op2 == 6 /* REVIDR */))
4598 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
4599 if (params.Op1 == 1 && params.CRn == 0 &&
4600 params.CRm == 0 && params.Op2 == 7 /* AIDR */)
4601 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
4603 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
4606 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
4608 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
4611 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
4613 struct sys_reg_params params;
4615 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
4617 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
4621 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
4622 * @vcpu: The VCPU pointer
4623 * @params: Decoded system register parameters
4625 * Return: true if the system register access was successful, false otherwise.
4627 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
4628 struct sys_reg_params *params)
4630 const struct sys_reg_desc *r;
4632 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4634 perform_access(vcpu, params, r);
4638 print_sys_reg_msg(params,
4639 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
4640 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
4641 kvm_inject_undefined(vcpu);
4646 static const struct sys_reg_desc *idregs_debug_find(struct kvm *kvm, u8 pos)
4648 unsigned long i, idreg_idx = 0;
4650 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4651 const struct sys_reg_desc *r = &sys_reg_descs[i];
4653 if (!is_vm_ftr_id_reg(reg_to_encoding(r)))
4656 if (idreg_idx == pos)
4665 static void *idregs_debug_start(struct seq_file *s, loff_t *pos)
4667 struct kvm *kvm = s->private;
4670 mutex_lock(&kvm->arch.config_lock);
4672 iter = &kvm->arch.idreg_debugfs_iter;
4673 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags) &&
4676 if (!idregs_debug_find(kvm, *iter))
4679 iter = ERR_PTR(-EBUSY);
4682 mutex_unlock(&kvm->arch.config_lock);
4687 static void *idregs_debug_next(struct seq_file *s, void *v, loff_t *pos)
4689 struct kvm *kvm = s->private;
4693 if (idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter + 1)) {
4694 kvm->arch.idreg_debugfs_iter++;
4696 return &kvm->arch.idreg_debugfs_iter;
4702 static void idregs_debug_stop(struct seq_file *s, void *v)
4704 struct kvm *kvm = s->private;
4709 mutex_lock(&kvm->arch.config_lock);
4711 kvm->arch.idreg_debugfs_iter = ~0;
4713 mutex_unlock(&kvm->arch.config_lock);
4716 static int idregs_debug_show(struct seq_file *s, void *v)
4718 const struct sys_reg_desc *desc;
4719 struct kvm *kvm = s->private;
4721 desc = idregs_debug_find(kvm, kvm->arch.idreg_debugfs_iter);
4726 seq_printf(s, "%20s:\t%016llx\n",
4727 desc->name, kvm_read_vm_id_reg(kvm, reg_to_encoding(desc)));
4732 static const struct seq_operations idregs_debug_sops = {
4733 .start = idregs_debug_start,
4734 .next = idregs_debug_next,
4735 .stop = idregs_debug_stop,
4736 .show = idregs_debug_show,
4739 DEFINE_SEQ_ATTRIBUTE(idregs_debug);
4741 void kvm_sys_regs_create_debugfs(struct kvm *kvm)
4743 kvm->arch.idreg_debugfs_iter = ~0;
4745 debugfs_create_file("idregs", 0444, kvm->debugfs_dentry, kvm,
4746 &idregs_debug_fops);
4749 static void reset_vm_ftr_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *reg)
4751 u32 id = reg_to_encoding(reg);
4752 struct kvm *kvm = vcpu->kvm;
4754 if (test_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags))
4757 kvm_set_vm_id_reg(kvm, id, reg->reset(vcpu, reg));
4760 static void reset_vcpu_ftr_id_reg(struct kvm_vcpu *vcpu,
4761 const struct sys_reg_desc *reg)
4763 if (kvm_vcpu_initialized(vcpu))
4766 reg->reset(vcpu, reg);
4770 * kvm_reset_sys_regs - sets system registers to reset value
4771 * @vcpu: The VCPU pointer
4773 * This function finds the right table above and sets the registers on the
4774 * virtual CPU struct to their architecturally defined reset values.
4776 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
4778 struct kvm *kvm = vcpu->kvm;
4781 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
4782 const struct sys_reg_desc *r = &sys_reg_descs[i];
4787 if (is_vm_ftr_id_reg(reg_to_encoding(r)))
4788 reset_vm_ftr_id_reg(vcpu, r);
4789 else if (is_vcpu_ftr_id_reg(reg_to_encoding(r)))
4790 reset_vcpu_ftr_id_reg(vcpu, r);
4794 if (r->reg >= __SANITISED_REG_START__ && r->reg < NR_SYS_REGS)
4795 __vcpu_rmw_sys_reg(vcpu, r->reg, |=, 0);
4798 set_bit(KVM_ARCH_FLAG_ID_REGS_INITIALIZED, &kvm->arch.flags);
4800 if (kvm_vcpu_has_pmu(vcpu))
4801 kvm_make_request(KVM_REQ_RELOAD_PMU, vcpu);
4805 * kvm_handle_sys_reg -- handles a system instruction or mrs/msr instruction
4806 * trap on a guest execution
4807 * @vcpu: The VCPU pointer
4809 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
4811 const struct sys_reg_desc *desc = NULL;
4812 struct sys_reg_params params;
4813 unsigned long esr = kvm_vcpu_get_esr(vcpu);
4814 int Rt = kvm_vcpu_sys_get_rt(vcpu);
4817 trace_kvm_handle_sys_reg(esr);
4819 if (triage_sysreg_trap(vcpu, &sr_idx))
4822 params = esr_sys64_to_params(esr);
4823 params.regval = vcpu_get_reg(vcpu, Rt);
4825 /* System registers have Op0=={2,3}, as per DDI487 J.a C5.1.2 */
4826 if (params.Op0 == 2 || params.Op0 == 3)
4827 desc = &sys_reg_descs[sr_idx];
4829 desc = &sys_insn_descs[sr_idx];
4831 perform_access(vcpu, ¶ms, desc);
4833 /* Read from system register? */
4834 if (!params.is_write &&
4835 (params.Op0 == 2 || params.Op0 == 3))
4836 vcpu_set_reg(vcpu, Rt, params.regval);
4841 /******************************************************************************
4843 *****************************************************************************/
4845 static bool index_to_params(u64 id, struct sys_reg_params *params)
4847 switch (id & KVM_REG_SIZE_MASK) {
4848 case KVM_REG_SIZE_U64:
4849 /* Any unused index bits means it's not valid. */
4850 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
4851 | KVM_REG_ARM_COPROC_MASK
4852 | KVM_REG_ARM64_SYSREG_OP0_MASK
4853 | KVM_REG_ARM64_SYSREG_OP1_MASK
4854 | KVM_REG_ARM64_SYSREG_CRN_MASK
4855 | KVM_REG_ARM64_SYSREG_CRM_MASK
4856 | KVM_REG_ARM64_SYSREG_OP2_MASK))
4858 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
4859 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
4860 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
4861 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
4862 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
4863 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
4864 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
4865 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
4866 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
4867 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
4874 const struct sys_reg_desc *get_reg_by_id(u64 id,
4875 const struct sys_reg_desc table[],
4878 struct sys_reg_params params;
4880 if (!index_to_params(id, ¶ms))
4883 return find_reg(¶ms, table, num);
4886 /* Decode an index value, and find the sys_reg_desc entry. */
4887 static const struct sys_reg_desc *
4888 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
4889 const struct sys_reg_desc table[], unsigned int num)
4892 const struct sys_reg_desc *r;
4894 /* We only do sys_reg for now. */
4895 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
4898 r = get_reg_by_id(id, table, num);
4900 /* Not saved in the sys_reg array and not otherwise accessible? */
4901 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
4907 static int demux_c15_get(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
4910 u32 __user *uval = uaddr;
4912 /* Fail if we have unknown bits set. */
4913 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
4914 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
4917 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
4918 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
4919 if (KVM_REG_SIZE(id) != 4)
4921 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
4922 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
4923 if (val >= CSSELR_MAX)
4926 return put_user(get_ccsidr(vcpu, val), uval);
4932 static int demux_c15_set(struct kvm_vcpu *vcpu, u64 id, void __user *uaddr)
4935 u32 __user *uval = uaddr;
4937 /* Fail if we have unknown bits set. */
4938 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
4939 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
4942 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
4943 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
4944 if (KVM_REG_SIZE(id) != 4)
4946 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
4947 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
4948 if (val >= CSSELR_MAX)
4951 if (get_user(newval, uval))
4954 return set_ccsidr(vcpu, val, newval);
4960 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4961 const struct sys_reg_desc table[], unsigned int num)
4963 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
4964 const struct sys_reg_desc *r;
4968 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
4969 if (!r || sysreg_hidden(vcpu, r))
4973 ret = (r->get_user)(vcpu, r, &val);
4975 val = __vcpu_sys_reg(vcpu, r->reg);
4980 ret = put_user(val, uaddr);
4985 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
4987 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
4989 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
4990 return demux_c15_get(vcpu, reg->id, uaddr);
4992 return kvm_sys_reg_get_user(vcpu, reg,
4993 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
4996 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
4997 const struct sys_reg_desc table[], unsigned int num)
4999 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
5000 const struct sys_reg_desc *r;
5004 if (get_user(val, uaddr))
5007 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
5008 if (!r || sysreg_hidden(vcpu, r))
5011 if (sysreg_user_write_ignore(vcpu, r))
5015 ret = (r->set_user)(vcpu, r, val);
5017 __vcpu_assign_sys_reg(vcpu, r->reg, val);
5024 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
5026 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
5028 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
5029 return demux_c15_set(vcpu, reg->id, uaddr);
5031 return kvm_sys_reg_set_user(vcpu, reg,
5032 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
5035 static unsigned int num_demux_regs(void)
5040 static int write_demux_regids(u64 __user *uindices)
5042 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
5045 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
5046 for (i = 0; i < CSSELR_MAX; i++) {
5047 if (put_user(val | i, uindices))
5054 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
5056 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
5057 KVM_REG_ARM64_SYSREG |
5058 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
5059 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
5060 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
5061 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
5062 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
5065 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
5070 if (put_user(sys_reg_to_index(reg), *uind))
5077 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
5078 const struct sys_reg_desc *rd,
5080 unsigned int *total)
5083 * Ignore registers we trap but don't save,
5084 * and for which no custom user accessor is provided.
5086 if (!(rd->reg || rd->get_user))
5089 if (sysreg_hidden(vcpu, rd))
5092 if (!copy_reg_to_user(rd, uind))
5099 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
5100 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
5102 const struct sys_reg_desc *i2, *end2;
5103 unsigned int total = 0;
5107 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
5109 while (i2 != end2) {
5110 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
5117 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
5119 return num_demux_regs()
5120 + walk_sys_regs(vcpu, (u64 __user *)NULL);
5123 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
5127 err = walk_sys_regs(vcpu, uindices);
5132 return write_demux_regids(uindices);
5135 #define KVM_ARM_FEATURE_ID_RANGE_INDEX(r) \
5136 KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(r), \
5142 int kvm_vm_ioctl_get_reg_writable_masks(struct kvm *kvm, struct reg_mask_range *range)
5144 const void *zero_page = page_to_virt(ZERO_PAGE(0));
5145 u64 __user *masks = (u64 __user *)range->addr;
5147 /* Only feature id range is supported, reserved[13] must be zero. */
5149 memcmp(range->reserved, zero_page, sizeof(range->reserved)))
5152 /* Wipe the whole thing first */
5153 if (clear_user(masks, KVM_ARM_FEATURE_ID_RANGE_SIZE * sizeof(__u64)))
5156 for (int i = 0; i < ARRAY_SIZE(sys_reg_descs); i++) {
5157 const struct sys_reg_desc *reg = &sys_reg_descs[i];
5158 u32 encoding = reg_to_encoding(reg);
5161 if (!is_feature_id_reg(encoding) || !reg->set_user)
5165 (is_aa32_id_reg(encoding) && !kvm_supports_32bit_el0())) {
5170 if (put_user(val, (masks + KVM_ARM_FEATURE_ID_RANGE_INDEX(encoding))))
5177 static void vcpu_set_hcr(struct kvm_vcpu *vcpu)
5179 struct kvm *kvm = vcpu->kvm;
5181 if (has_vhe() || has_hvhe())
5182 vcpu->arch.hcr_el2 |= HCR_E2H;
5183 if (cpus_have_final_cap(ARM64_HAS_RAS_EXTN)) {
5184 /* route synchronous external abort exceptions to EL2 */
5185 vcpu->arch.hcr_el2 |= HCR_TEA;
5186 /* trap error record accesses */
5187 vcpu->arch.hcr_el2 |= HCR_TERR;
5190 if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
5191 vcpu->arch.hcr_el2 |= HCR_FWB;
5193 if (cpus_have_final_cap(ARM64_HAS_EVT) &&
5194 !cpus_have_final_cap(ARM64_MISMATCHED_CACHE_TYPE) &&
5195 kvm_read_vm_id_reg(kvm, SYS_CTR_EL0) == read_sanitised_ftr_reg(SYS_CTR_EL0))
5196 vcpu->arch.hcr_el2 |= HCR_TID4;
5198 vcpu->arch.hcr_el2 |= HCR_TID2;
5200 if (vcpu_el1_is_32bit(vcpu))
5201 vcpu->arch.hcr_el2 &= ~HCR_RW;
5203 if (kvm_has_mte(vcpu->kvm))
5204 vcpu->arch.hcr_el2 |= HCR_ATA;
5207 * In the absence of FGT, we cannot independently trap TLBI
5208 * Range instructions. This isn't great, but trapping all
5209 * TLBIs would be far worse. Live with it...
5211 if (!kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
5212 vcpu->arch.hcr_el2 |= HCR_TTLBOS;
5215 void kvm_calculate_traps(struct kvm_vcpu *vcpu)
5217 struct kvm *kvm = vcpu->kvm;
5219 mutex_lock(&kvm->arch.config_lock);
5221 vcpu_set_ich_hcr(vcpu);
5222 vcpu_set_hcrx(vcpu);
5224 if (test_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags))
5227 compute_fgu(kvm, HFGRTR_GROUP);
5228 compute_fgu(kvm, HFGITR_GROUP);
5229 compute_fgu(kvm, HDFGRTR_GROUP);
5230 compute_fgu(kvm, HAFGRTR_GROUP);
5231 compute_fgu(kvm, HFGRTR2_GROUP);
5232 compute_fgu(kvm, HFGITR2_GROUP);
5233 compute_fgu(kvm, HDFGRTR2_GROUP);
5235 set_bit(KVM_ARCH_FLAG_FGU_INITIALIZED, &kvm->arch.flags);
5237 mutex_unlock(&kvm->arch.config_lock);
5241 * Perform last adjustments to the ID registers that are implied by the
5242 * configuration outside of the ID regs themselves, as well as any
5243 * initialisation that directly depend on these ID registers (such as
5244 * RES0/RES1 behaviours). This is not the place to configure traps though.
5246 * Because this can be called once per CPU, changes must be idempotent.
5248 int kvm_finalize_sys_regs(struct kvm_vcpu *vcpu)
5250 struct kvm *kvm = vcpu->kvm;
5252 guard(mutex)(&kvm->arch.config_lock);
5254 if (!(static_branch_unlikely(&kvm_vgic_global_state.gicv3_cpuif) &&
5255 irqchip_in_kernel(kvm) &&
5256 kvm->arch.vgic.vgic_model == KVM_DEV_TYPE_ARM_VGIC_V3)) {
5257 kvm->arch.id_regs[IDREG_IDX(SYS_ID_AA64PFR0_EL1)] &= ~ID_AA64PFR0_EL1_GIC_MASK;
5258 kvm->arch.id_regs[IDREG_IDX(SYS_ID_PFR1_EL1)] &= ~ID_PFR1_EL1_GIC_MASK;
5261 if (vcpu_has_nv(vcpu)) {
5262 int ret = kvm_init_nv_sysregs(vcpu);
5270 int __init kvm_sys_reg_table_init(void)
5276 /* Make sure tables are unique and in order. */
5277 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
5278 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
5279 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
5280 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
5281 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
5282 valid &= check_sysreg_table(sys_insn_descs, ARRAY_SIZE(sys_insn_descs), false);
5289 ret = populate_nv_trap_config();
5291 check_feature_map();
5293 for (i = 0; !ret && i < ARRAY_SIZE(sys_reg_descs); i++)
5294 ret = populate_sysreg_config(sys_reg_descs + i, i);
5296 for (i = 0; !ret && i < ARRAY_SIZE(sys_insn_descs); i++)
5297 ret = populate_sysreg_config(sys_insn_descs + i, i);