1 // SPDX-License-Identifier: GPL-2.0-only
3 * Copyright (C) 2012,2013 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 * Derived from arch/arm/kvm/coproc.c:
7 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
8 * Authors: Rusty Russell <rusty@rustcorp.com.au>
9 * Christoffer Dall <c.dall@virtualopensystems.com>
12 #include <linux/bitfield.h>
13 #include <linux/bsearch.h>
14 #include <linux/kvm_host.h>
16 #include <linux/printk.h>
17 #include <linux/uaccess.h>
19 #include <asm/cacheflush.h>
20 #include <asm/cputype.h>
21 #include <asm/debug-monitors.h>
23 #include <asm/kvm_arm.h>
24 #include <asm/kvm_emulate.h>
25 #include <asm/kvm_hyp.h>
26 #include <asm/kvm_mmu.h>
27 #include <asm/perf_event.h>
28 #include <asm/sysreg.h>
30 #include <trace/events/kvm.h>
37 * For AArch32, we only take care of what is being trapped. Anything
38 * that has to do with init and userspace access has to go via the
42 static u64 sys_reg_to_index(const struct sys_reg_desc *reg);
44 static bool read_from_write_only(struct kvm_vcpu *vcpu,
45 struct sys_reg_params *params,
46 const struct sys_reg_desc *r)
48 WARN_ONCE(1, "Unexpected sys_reg read to write-only register\n");
49 print_sys_reg_instr(params);
50 kvm_inject_undefined(vcpu);
54 static bool write_to_read_only(struct kvm_vcpu *vcpu,
55 struct sys_reg_params *params,
56 const struct sys_reg_desc *r)
58 WARN_ONCE(1, "Unexpected sys_reg write to read-only register\n");
59 print_sys_reg_instr(params);
60 kvm_inject_undefined(vcpu);
64 u64 vcpu_read_sys_reg(const struct kvm_vcpu *vcpu, int reg)
66 u64 val = 0x8badf00d8badf00d;
68 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
69 __vcpu_read_sys_reg_from_cpu(reg, &val))
72 return __vcpu_sys_reg(vcpu, reg);
75 void vcpu_write_sys_reg(struct kvm_vcpu *vcpu, u64 val, int reg)
77 if (vcpu_get_flag(vcpu, SYSREGS_ON_CPU) &&
78 __vcpu_write_sys_reg_to_cpu(val, reg))
81 __vcpu_sys_reg(vcpu, reg) = val;
84 /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */
85 static u32 cache_levels;
87 /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */
90 /* Which cache CCSIDR represents depends on CSSELR value. */
91 static u32 get_ccsidr(u32 csselr)
95 /* Make sure noone else changes CSSELR during this! */
97 write_sysreg(csselr, csselr_el1);
99 ccsidr = read_sysreg(ccsidr_el1);
106 * See note at ARMv7 ARM B1.14.4 (TL;DR: S/W ops are not easily virtualized).
108 static bool access_dcsw(struct kvm_vcpu *vcpu,
109 struct sys_reg_params *p,
110 const struct sys_reg_desc *r)
113 return read_from_write_only(vcpu, p, r);
116 * Only track S/W ops if we don't have FWB. It still indicates
117 * that the guest is a bit broken (S/W operations should only
118 * be done by firmware, knowing that there is only a single
119 * CPU left in the system, and certainly not from non-secure
122 if (!cpus_have_const_cap(ARM64_HAS_STAGE2_FWB))
123 kvm_set_way_flush(vcpu);
128 static void get_access_mask(const struct sys_reg_desc *r, u64 *mask, u64 *shift)
130 switch (r->aarch32_map) {
132 *mask = GENMASK_ULL(31, 0);
136 *mask = GENMASK_ULL(63, 32);
140 *mask = GENMASK_ULL(63, 0);
147 * Generic accessor for VM registers. Only called as long as HCR_TVM
148 * is set. If the guest enables the MMU, we stop trapping the VM
149 * sys_regs and leave it in complete control of the caches.
151 static bool access_vm_reg(struct kvm_vcpu *vcpu,
152 struct sys_reg_params *p,
153 const struct sys_reg_desc *r)
155 bool was_enabled = vcpu_has_cache_enabled(vcpu);
156 u64 val, mask, shift;
158 BUG_ON(!p->is_write);
160 get_access_mask(r, &mask, &shift);
163 val = vcpu_read_sys_reg(vcpu, r->reg);
169 val |= (p->regval & (mask >> shift)) << shift;
170 vcpu_write_sys_reg(vcpu, val, r->reg);
172 kvm_toggle_cache(vcpu, was_enabled);
176 static bool access_actlr(struct kvm_vcpu *vcpu,
177 struct sys_reg_params *p,
178 const struct sys_reg_desc *r)
183 return ignore_write(vcpu, p);
185 get_access_mask(r, &mask, &shift);
186 p->regval = (vcpu_read_sys_reg(vcpu, r->reg) & mask) >> shift;
192 * Trap handler for the GICv3 SGI generation system register.
193 * Forward the request to the VGIC emulation.
194 * The cp15_64 code makes sure this automatically works
195 * for both AArch64 and AArch32 accesses.
197 static bool access_gic_sgi(struct kvm_vcpu *vcpu,
198 struct sys_reg_params *p,
199 const struct sys_reg_desc *r)
204 return read_from_write_only(vcpu, p, r);
207 * In a system where GICD_CTLR.DS=1, a ICC_SGI0R_EL1 access generates
208 * Group0 SGIs only, while ICC_SGI1R_EL1 can generate either group,
209 * depending on the SGI configuration. ICC_ASGI1R_EL1 is effectively
210 * equivalent to ICC_SGI0R_EL1, as there is no "alternative" secure
213 if (p->Op0 == 0) { /* AArch32 */
215 default: /* Keep GCC quiet */
216 case 0: /* ICC_SGI1R */
219 case 1: /* ICC_ASGI1R */
220 case 2: /* ICC_SGI0R */
224 } else { /* AArch64 */
226 default: /* Keep GCC quiet */
227 case 5: /* ICC_SGI1R_EL1 */
230 case 6: /* ICC_ASGI1R_EL1 */
231 case 7: /* ICC_SGI0R_EL1 */
237 vgic_v3_dispatch_sgi(vcpu, p->regval, g1);
242 static bool access_gic_sre(struct kvm_vcpu *vcpu,
243 struct sys_reg_params *p,
244 const struct sys_reg_desc *r)
247 return ignore_write(vcpu, p);
249 p->regval = vcpu->arch.vgic_cpu.vgic_v3.vgic_sre;
253 static bool trap_raz_wi(struct kvm_vcpu *vcpu,
254 struct sys_reg_params *p,
255 const struct sys_reg_desc *r)
258 return ignore_write(vcpu, p);
260 return read_zero(vcpu, p);
264 * ARMv8.1 mandates at least a trivial LORegion implementation, where all the
265 * RW registers are RES0 (which we can implement as RAZ/WI). On an ARMv8.0
266 * system, these registers should UNDEF. LORID_EL1 being a RO register, we
267 * treat it separately.
269 static bool trap_loregion(struct kvm_vcpu *vcpu,
270 struct sys_reg_params *p,
271 const struct sys_reg_desc *r)
273 u64 val = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
274 u32 sr = reg_to_encoding(r);
276 if (!(val & (0xfUL << ID_AA64MMFR1_EL1_LO_SHIFT))) {
277 kvm_inject_undefined(vcpu);
281 if (p->is_write && sr == SYS_LORID_EL1)
282 return write_to_read_only(vcpu, p, r);
284 return trap_raz_wi(vcpu, p, r);
287 static bool trap_oslar_el1(struct kvm_vcpu *vcpu,
288 struct sys_reg_params *p,
289 const struct sys_reg_desc *r)
294 return read_from_write_only(vcpu, p, r);
296 /* Forward the OSLK bit to OSLSR */
297 oslsr = __vcpu_sys_reg(vcpu, OSLSR_EL1) & ~SYS_OSLSR_OSLK;
298 if (p->regval & SYS_OSLAR_OSLK)
299 oslsr |= SYS_OSLSR_OSLK;
301 __vcpu_sys_reg(vcpu, OSLSR_EL1) = oslsr;
305 static bool trap_oslsr_el1(struct kvm_vcpu *vcpu,
306 struct sys_reg_params *p,
307 const struct sys_reg_desc *r)
310 return write_to_read_only(vcpu, p, r);
312 p->regval = __vcpu_sys_reg(vcpu, r->reg);
316 static int set_oslsr_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
320 * The only modifiable bit is the OSLK bit. Refuse the write if
321 * userspace attempts to change any other bit in the register.
323 if ((val ^ rd->val) & ~SYS_OSLSR_OSLK)
326 __vcpu_sys_reg(vcpu, rd->reg) = val;
330 static bool trap_dbgauthstatus_el1(struct kvm_vcpu *vcpu,
331 struct sys_reg_params *p,
332 const struct sys_reg_desc *r)
335 return ignore_write(vcpu, p);
337 p->regval = read_sysreg(dbgauthstatus_el1);
343 * We want to avoid world-switching all the DBG registers all the
346 * - If we've touched any debug register, it is likely that we're
347 * going to touch more of them. It then makes sense to disable the
348 * traps and start doing the save/restore dance
349 * - If debug is active (DBG_MDSCR_KDE or DBG_MDSCR_MDE set), it is
350 * then mandatory to save/restore the registers, as the guest
353 * For this, we use a DIRTY bit, indicating the guest has modified the
354 * debug registers, used as follow:
357 * - If the dirty bit is set (because we're coming back from trapping),
358 * disable the traps, save host registers, restore guest registers.
359 * - If debug is actively in use (DBG_MDSCR_KDE or DBG_MDSCR_MDE set),
360 * set the dirty bit, disable the traps, save host registers,
361 * restore guest registers.
362 * - Otherwise, enable the traps
365 * - If the dirty bit is set, save guest registers, restore host
366 * registers and clear the dirty bit. This ensure that the host can
367 * now use the debug registers.
369 static bool trap_debug_regs(struct kvm_vcpu *vcpu,
370 struct sys_reg_params *p,
371 const struct sys_reg_desc *r)
374 vcpu_write_sys_reg(vcpu, p->regval, r->reg);
375 vcpu_set_flag(vcpu, DEBUG_DIRTY);
377 p->regval = vcpu_read_sys_reg(vcpu, r->reg);
380 trace_trap_reg(__func__, r->reg, p->is_write, p->regval);
386 * reg_to_dbg/dbg_to_reg
388 * A 32 bit write to a debug register leave top bits alone
389 * A 32 bit read from a debug register only returns the bottom bits
391 * All writes will set the DEBUG_DIRTY flag to ensure the hyp code
392 * switches between host and guest values in future.
394 static void reg_to_dbg(struct kvm_vcpu *vcpu,
395 struct sys_reg_params *p,
396 const struct sys_reg_desc *rd,
399 u64 mask, shift, val;
401 get_access_mask(rd, &mask, &shift);
405 val |= (p->regval & (mask >> shift)) << shift;
408 vcpu_set_flag(vcpu, DEBUG_DIRTY);
411 static void dbg_to_reg(struct kvm_vcpu *vcpu,
412 struct sys_reg_params *p,
413 const struct sys_reg_desc *rd,
418 get_access_mask(rd, &mask, &shift);
419 p->regval = (*dbg_reg & mask) >> shift;
422 static bool trap_bvr(struct kvm_vcpu *vcpu,
423 struct sys_reg_params *p,
424 const struct sys_reg_desc *rd)
426 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
429 reg_to_dbg(vcpu, p, rd, dbg_reg);
431 dbg_to_reg(vcpu, p, rd, dbg_reg);
433 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
438 static int set_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
441 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = val;
445 static int get_bvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
448 *val = vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm];
452 static void reset_bvr(struct kvm_vcpu *vcpu,
453 const struct sys_reg_desc *rd)
455 vcpu->arch.vcpu_debug_state.dbg_bvr[rd->CRm] = rd->val;
458 static bool trap_bcr(struct kvm_vcpu *vcpu,
459 struct sys_reg_params *p,
460 const struct sys_reg_desc *rd)
462 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
465 reg_to_dbg(vcpu, p, rd, dbg_reg);
467 dbg_to_reg(vcpu, p, rd, dbg_reg);
469 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
474 static int set_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
477 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = val;
481 static int get_bcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
484 *val = vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm];
488 static void reset_bcr(struct kvm_vcpu *vcpu,
489 const struct sys_reg_desc *rd)
491 vcpu->arch.vcpu_debug_state.dbg_bcr[rd->CRm] = rd->val;
494 static bool trap_wvr(struct kvm_vcpu *vcpu,
495 struct sys_reg_params *p,
496 const struct sys_reg_desc *rd)
498 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
501 reg_to_dbg(vcpu, p, rd, dbg_reg);
503 dbg_to_reg(vcpu, p, rd, dbg_reg);
505 trace_trap_reg(__func__, rd->CRm, p->is_write,
506 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm]);
511 static int set_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
514 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = val;
518 static int get_wvr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
521 *val = vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm];
525 static void reset_wvr(struct kvm_vcpu *vcpu,
526 const struct sys_reg_desc *rd)
528 vcpu->arch.vcpu_debug_state.dbg_wvr[rd->CRm] = rd->val;
531 static bool trap_wcr(struct kvm_vcpu *vcpu,
532 struct sys_reg_params *p,
533 const struct sys_reg_desc *rd)
535 u64 *dbg_reg = &vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
538 reg_to_dbg(vcpu, p, rd, dbg_reg);
540 dbg_to_reg(vcpu, p, rd, dbg_reg);
542 trace_trap_reg(__func__, rd->CRm, p->is_write, *dbg_reg);
547 static int set_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
550 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = val;
554 static int get_wcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
557 *val = vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm];
561 static void reset_wcr(struct kvm_vcpu *vcpu,
562 const struct sys_reg_desc *rd)
564 vcpu->arch.vcpu_debug_state.dbg_wcr[rd->CRm] = rd->val;
567 static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
569 u64 amair = read_sysreg(amair_el1);
570 vcpu_write_sys_reg(vcpu, amair, AMAIR_EL1);
573 static void reset_actlr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
575 u64 actlr = read_sysreg(actlr_el1);
576 vcpu_write_sys_reg(vcpu, actlr, ACTLR_EL1);
579 static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
584 * Map the vcpu_id into the first three affinity level fields of
585 * the MPIDR. We limit the number of VCPUs in level 0 due to a
586 * limitation to 16 CPUs in that level in the ICC_SGIxR registers
587 * of the GICv3 to be able to address each CPU directly when
590 mpidr = (vcpu->vcpu_id & 0x0f) << MPIDR_LEVEL_SHIFT(0);
591 mpidr |= ((vcpu->vcpu_id >> 4) & 0xff) << MPIDR_LEVEL_SHIFT(1);
592 mpidr |= ((vcpu->vcpu_id >> 12) & 0xff) << MPIDR_LEVEL_SHIFT(2);
593 vcpu_write_sys_reg(vcpu, (1ULL << 31) | mpidr, MPIDR_EL1);
596 static unsigned int pmu_visibility(const struct kvm_vcpu *vcpu,
597 const struct sys_reg_desc *r)
599 if (kvm_vcpu_has_pmu(vcpu))
605 static void reset_pmu_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
607 u64 n, mask = BIT(ARMV8_PMU_CYCLE_IDX);
609 /* No PMU available, any PMU reg may UNDEF... */
610 if (!kvm_arm_support_pmu_v3())
613 n = read_sysreg(pmcr_el0) >> ARMV8_PMU_PMCR_N_SHIFT;
614 n &= ARMV8_PMU_PMCR_N_MASK;
616 mask |= GENMASK(n - 1, 0);
618 reset_unknown(vcpu, r);
619 __vcpu_sys_reg(vcpu, r->reg) &= mask;
622 static void reset_pmevcntr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
624 reset_unknown(vcpu, r);
625 __vcpu_sys_reg(vcpu, r->reg) &= GENMASK(31, 0);
628 static void reset_pmevtyper(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
630 reset_unknown(vcpu, r);
631 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_EVTYPE_MASK;
634 static void reset_pmselr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
636 reset_unknown(vcpu, r);
637 __vcpu_sys_reg(vcpu, r->reg) &= ARMV8_PMU_COUNTER_MASK;
640 static void reset_pmcr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r)
644 /* No PMU available, PMCR_EL0 may UNDEF... */
645 if (!kvm_arm_support_pmu_v3())
648 /* Only preserve PMCR_EL0.N, and reset the rest to 0 */
649 pmcr = read_sysreg(pmcr_el0) & ARMV8_PMU_PMCR_N_MASK;
650 if (!kvm_supports_32bit_el0())
651 pmcr |= ARMV8_PMU_PMCR_LC;
653 __vcpu_sys_reg(vcpu, r->reg) = pmcr;
656 static bool check_pmu_access_disabled(struct kvm_vcpu *vcpu, u64 flags)
658 u64 reg = __vcpu_sys_reg(vcpu, PMUSERENR_EL0);
659 bool enabled = (reg & flags) || vcpu_mode_priv(vcpu);
662 kvm_inject_undefined(vcpu);
667 static bool pmu_access_el0_disabled(struct kvm_vcpu *vcpu)
669 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_EN);
672 static bool pmu_write_swinc_el0_disabled(struct kvm_vcpu *vcpu)
674 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_SW | ARMV8_PMU_USERENR_EN);
677 static bool pmu_access_cycle_counter_el0_disabled(struct kvm_vcpu *vcpu)
679 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_CR | ARMV8_PMU_USERENR_EN);
682 static bool pmu_access_event_counter_el0_disabled(struct kvm_vcpu *vcpu)
684 return check_pmu_access_disabled(vcpu, ARMV8_PMU_USERENR_ER | ARMV8_PMU_USERENR_EN);
687 static bool access_pmcr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
688 const struct sys_reg_desc *r)
692 if (pmu_access_el0_disabled(vcpu))
697 * Only update writeable bits of PMCR (continuing into
698 * kvm_pmu_handle_pmcr() as well)
700 val = __vcpu_sys_reg(vcpu, PMCR_EL0);
701 val &= ~ARMV8_PMU_PMCR_MASK;
702 val |= p->regval & ARMV8_PMU_PMCR_MASK;
703 if (!kvm_supports_32bit_el0())
704 val |= ARMV8_PMU_PMCR_LC;
705 kvm_pmu_handle_pmcr(vcpu, val);
706 kvm_vcpu_pmu_restore_guest(vcpu);
708 /* PMCR.P & PMCR.C are RAZ */
709 val = __vcpu_sys_reg(vcpu, PMCR_EL0)
710 & ~(ARMV8_PMU_PMCR_P | ARMV8_PMU_PMCR_C);
717 static bool access_pmselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
718 const struct sys_reg_desc *r)
720 if (pmu_access_event_counter_el0_disabled(vcpu))
724 __vcpu_sys_reg(vcpu, PMSELR_EL0) = p->regval;
726 /* return PMSELR.SEL field */
727 p->regval = __vcpu_sys_reg(vcpu, PMSELR_EL0)
728 & ARMV8_PMU_COUNTER_MASK;
733 static bool access_pmceid(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
734 const struct sys_reg_desc *r)
736 u64 pmceid, mask, shift;
740 if (pmu_access_el0_disabled(vcpu))
743 get_access_mask(r, &mask, &shift);
745 pmceid = kvm_pmu_get_pmceid(vcpu, (p->Op2 & 1));
754 static bool pmu_counter_idx_valid(struct kvm_vcpu *vcpu, u64 idx)
758 pmcr = __vcpu_sys_reg(vcpu, PMCR_EL0);
759 val = (pmcr >> ARMV8_PMU_PMCR_N_SHIFT) & ARMV8_PMU_PMCR_N_MASK;
760 if (idx >= val && idx != ARMV8_PMU_CYCLE_IDX) {
761 kvm_inject_undefined(vcpu);
768 static bool access_pmu_evcntr(struct kvm_vcpu *vcpu,
769 struct sys_reg_params *p,
770 const struct sys_reg_desc *r)
774 if (r->CRn == 9 && r->CRm == 13) {
777 if (pmu_access_event_counter_el0_disabled(vcpu))
780 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0)
781 & ARMV8_PMU_COUNTER_MASK;
782 } else if (r->Op2 == 0) {
784 if (pmu_access_cycle_counter_el0_disabled(vcpu))
787 idx = ARMV8_PMU_CYCLE_IDX;
789 } else if (r->CRn == 0 && r->CRm == 9) {
791 if (pmu_access_event_counter_el0_disabled(vcpu))
794 idx = ARMV8_PMU_CYCLE_IDX;
795 } else if (r->CRn == 14 && (r->CRm & 12) == 8) {
797 if (pmu_access_event_counter_el0_disabled(vcpu))
800 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
803 /* Catch any decoding mistake */
804 WARN_ON(idx == ~0UL);
806 if (!pmu_counter_idx_valid(vcpu, idx))
810 if (pmu_access_el0_disabled(vcpu))
813 kvm_pmu_set_counter_value(vcpu, idx, p->regval);
815 p->regval = kvm_pmu_get_counter_value(vcpu, idx);
821 static bool access_pmu_evtyper(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
822 const struct sys_reg_desc *r)
826 if (pmu_access_el0_disabled(vcpu))
829 if (r->CRn == 9 && r->CRm == 13 && r->Op2 == 1) {
831 idx = __vcpu_sys_reg(vcpu, PMSELR_EL0) & ARMV8_PMU_COUNTER_MASK;
832 reg = PMEVTYPER0_EL0 + idx;
833 } else if (r->CRn == 14 && (r->CRm & 12) == 12) {
834 idx = ((r->CRm & 3) << 3) | (r->Op2 & 7);
835 if (idx == ARMV8_PMU_CYCLE_IDX)
839 reg = PMEVTYPER0_EL0 + idx;
844 if (!pmu_counter_idx_valid(vcpu, idx))
848 kvm_pmu_set_counter_event_type(vcpu, p->regval, idx);
849 __vcpu_sys_reg(vcpu, reg) = p->regval & ARMV8_PMU_EVTYPE_MASK;
850 kvm_vcpu_pmu_restore_guest(vcpu);
852 p->regval = __vcpu_sys_reg(vcpu, reg) & ARMV8_PMU_EVTYPE_MASK;
858 static bool access_pmcnten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
859 const struct sys_reg_desc *r)
863 if (pmu_access_el0_disabled(vcpu))
866 mask = kvm_pmu_valid_counter_mask(vcpu);
868 val = p->regval & mask;
870 /* accessing PMCNTENSET_EL0 */
871 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) |= val;
872 kvm_pmu_enable_counter_mask(vcpu, val);
873 kvm_vcpu_pmu_restore_guest(vcpu);
875 /* accessing PMCNTENCLR_EL0 */
876 __vcpu_sys_reg(vcpu, PMCNTENSET_EL0) &= ~val;
877 kvm_pmu_disable_counter_mask(vcpu, val);
880 p->regval = __vcpu_sys_reg(vcpu, PMCNTENSET_EL0);
886 static bool access_pminten(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
887 const struct sys_reg_desc *r)
889 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
891 if (check_pmu_access_disabled(vcpu, 0))
895 u64 val = p->regval & mask;
898 /* accessing PMINTENSET_EL1 */
899 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) |= val;
901 /* accessing PMINTENCLR_EL1 */
902 __vcpu_sys_reg(vcpu, PMINTENSET_EL1) &= ~val;
904 p->regval = __vcpu_sys_reg(vcpu, PMINTENSET_EL1);
910 static bool access_pmovs(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
911 const struct sys_reg_desc *r)
913 u64 mask = kvm_pmu_valid_counter_mask(vcpu);
915 if (pmu_access_el0_disabled(vcpu))
920 /* accessing PMOVSSET_EL0 */
921 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) |= (p->regval & mask);
923 /* accessing PMOVSCLR_EL0 */
924 __vcpu_sys_reg(vcpu, PMOVSSET_EL0) &= ~(p->regval & mask);
926 p->regval = __vcpu_sys_reg(vcpu, PMOVSSET_EL0);
932 static bool access_pmswinc(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
933 const struct sys_reg_desc *r)
938 return read_from_write_only(vcpu, p, r);
940 if (pmu_write_swinc_el0_disabled(vcpu))
943 mask = kvm_pmu_valid_counter_mask(vcpu);
944 kvm_pmu_software_increment(vcpu, p->regval & mask);
948 static bool access_pmuserenr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
949 const struct sys_reg_desc *r)
952 if (!vcpu_mode_priv(vcpu)) {
953 kvm_inject_undefined(vcpu);
957 __vcpu_sys_reg(vcpu, PMUSERENR_EL0) =
958 p->regval & ARMV8_PMU_USERENR_MASK;
960 p->regval = __vcpu_sys_reg(vcpu, PMUSERENR_EL0)
961 & ARMV8_PMU_USERENR_MASK;
967 /* Silly macro to expand the DBG{BCR,BVR,WVR,WCR}n_EL1 registers in one go */
968 #define DBG_BCR_BVR_WCR_WVR_EL1(n) \
969 { SYS_DESC(SYS_DBGBVRn_EL1(n)), \
970 trap_bvr, reset_bvr, 0, 0, get_bvr, set_bvr }, \
971 { SYS_DESC(SYS_DBGBCRn_EL1(n)), \
972 trap_bcr, reset_bcr, 0, 0, get_bcr, set_bcr }, \
973 { SYS_DESC(SYS_DBGWVRn_EL1(n)), \
974 trap_wvr, reset_wvr, 0, 0, get_wvr, set_wvr }, \
975 { SYS_DESC(SYS_DBGWCRn_EL1(n)), \
976 trap_wcr, reset_wcr, 0, 0, get_wcr, set_wcr }
978 #define PMU_SYS_REG(r) \
979 SYS_DESC(r), .reset = reset_pmu_reg, .visibility = pmu_visibility
981 /* Macro to expand the PMEVCNTRn_EL0 register */
982 #define PMU_PMEVCNTR_EL0(n) \
983 { PMU_SYS_REG(SYS_PMEVCNTRn_EL0(n)), \
984 .reset = reset_pmevcntr, \
985 .access = access_pmu_evcntr, .reg = (PMEVCNTR0_EL0 + n), }
987 /* Macro to expand the PMEVTYPERn_EL0 register */
988 #define PMU_PMEVTYPER_EL0(n) \
989 { PMU_SYS_REG(SYS_PMEVTYPERn_EL0(n)), \
990 .reset = reset_pmevtyper, \
991 .access = access_pmu_evtyper, .reg = (PMEVTYPER0_EL0 + n), }
993 static bool undef_access(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
994 const struct sys_reg_desc *r)
996 kvm_inject_undefined(vcpu);
1001 /* Macro to expand the AMU counter and type registers*/
1002 #define AMU_AMEVCNTR0_EL0(n) { SYS_DESC(SYS_AMEVCNTR0_EL0(n)), undef_access }
1003 #define AMU_AMEVTYPER0_EL0(n) { SYS_DESC(SYS_AMEVTYPER0_EL0(n)), undef_access }
1004 #define AMU_AMEVCNTR1_EL0(n) { SYS_DESC(SYS_AMEVCNTR1_EL0(n)), undef_access }
1005 #define AMU_AMEVTYPER1_EL0(n) { SYS_DESC(SYS_AMEVTYPER1_EL0(n)), undef_access }
1007 static unsigned int ptrauth_visibility(const struct kvm_vcpu *vcpu,
1008 const struct sys_reg_desc *rd)
1010 return vcpu_has_ptrauth(vcpu) ? 0 : REG_HIDDEN;
1014 * If we land here on a PtrAuth access, that is because we didn't
1015 * fixup the access on exit by allowing the PtrAuth sysregs. The only
1016 * way this happens is when the guest does not have PtrAuth support
1019 #define __PTRAUTH_KEY(k) \
1020 { SYS_DESC(SYS_## k), undef_access, reset_unknown, k, \
1021 .visibility = ptrauth_visibility}
1023 #define PTRAUTH_KEY(k) \
1024 __PTRAUTH_KEY(k ## KEYLO_EL1), \
1025 __PTRAUTH_KEY(k ## KEYHI_EL1)
1027 static bool access_arch_timer(struct kvm_vcpu *vcpu,
1028 struct sys_reg_params *p,
1029 const struct sys_reg_desc *r)
1031 enum kvm_arch_timers tmr;
1032 enum kvm_arch_timer_regs treg;
1033 u64 reg = reg_to_encoding(r);
1036 case SYS_CNTP_TVAL_EL0:
1037 case SYS_AARCH32_CNTP_TVAL:
1039 treg = TIMER_REG_TVAL;
1041 case SYS_CNTP_CTL_EL0:
1042 case SYS_AARCH32_CNTP_CTL:
1044 treg = TIMER_REG_CTL;
1046 case SYS_CNTP_CVAL_EL0:
1047 case SYS_AARCH32_CNTP_CVAL:
1049 treg = TIMER_REG_CVAL;
1056 kvm_arm_timer_write_sysreg(vcpu, tmr, treg, p->regval);
1058 p->regval = kvm_arm_timer_read_sysreg(vcpu, tmr, treg);
1063 static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
1065 if (kvm_vcpu_has_pmu(vcpu))
1066 return vcpu->kvm->arch.dfr0_pmuver.imp;
1068 return vcpu->kvm->arch.dfr0_pmuver.unimp;
1071 static u8 perfmon_to_pmuver(u8 perfmon)
1074 case ID_DFR0_EL1_PerfMon_PMUv3:
1075 return ID_AA64DFR0_EL1_PMUVer_IMP;
1076 case ID_DFR0_EL1_PerfMon_IMPDEF:
1077 return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
1079 /* Anything ARMv8.1+ and NI have the same value. For now. */
1084 static u8 pmuver_to_perfmon(u8 pmuver)
1087 case ID_AA64DFR0_EL1_PMUVer_IMP:
1088 return ID_DFR0_EL1_PerfMon_PMUv3;
1089 case ID_AA64DFR0_EL1_PMUVer_IMP_DEF:
1090 return ID_DFR0_EL1_PerfMon_IMPDEF;
1092 /* Anything ARMv8.1+ and NI have the same value. For now. */
1097 /* Read a sanitised cpufeature ID register by sys_reg_desc */
1098 static u64 read_id_reg(const struct kvm_vcpu *vcpu, struct sys_reg_desc const *r)
1100 u32 id = reg_to_encoding(r);
1103 if (sysreg_visible_as_raz(vcpu, r))
1106 val = read_sanitised_ftr_reg(id);
1109 case SYS_ID_AA64PFR0_EL1:
1110 if (!vcpu_has_sve(vcpu))
1111 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
1112 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
1113 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
1114 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
1115 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
1116 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
1117 if (kvm_vgic_global_state.type == VGIC_V3) {
1118 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
1119 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
1122 case SYS_ID_AA64PFR1_EL1:
1123 if (!kvm_has_mte(vcpu->kvm))
1124 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
1126 val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
1128 case SYS_ID_AA64ISAR1_EL1:
1129 if (!vcpu_has_ptrauth(vcpu))
1130 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_APA) |
1131 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_API) |
1132 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPA) |
1133 ARM64_FEATURE_MASK(ID_AA64ISAR1_EL1_GPI));
1135 case SYS_ID_AA64ISAR2_EL1:
1136 if (!vcpu_has_ptrauth(vcpu))
1137 val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
1138 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
1139 if (!cpus_have_final_cap(ARM64_HAS_WFXT))
1140 val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
1142 case SYS_ID_AA64DFR0_EL1:
1143 /* Limit debug to ARMv8.0 */
1144 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
1145 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
1146 /* Set PMUver to the required version */
1147 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1148 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
1150 /* Hide SPE from guests */
1151 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
1153 case SYS_ID_DFR0_EL1:
1154 val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1155 val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
1156 pmuver_to_perfmon(vcpu_pmuver(vcpu)));
1163 static unsigned int id_visibility(const struct kvm_vcpu *vcpu,
1164 const struct sys_reg_desc *r)
1166 u32 id = reg_to_encoding(r);
1169 case SYS_ID_AA64ZFR0_EL1:
1170 if (!vcpu_has_sve(vcpu))
1178 static unsigned int aa32_id_visibility(const struct kvm_vcpu *vcpu,
1179 const struct sys_reg_desc *r)
1182 * AArch32 ID registers are UNKNOWN if AArch32 isn't implemented at any
1183 * EL. Promote to RAZ/WI in order to guarantee consistency between
1186 if (!kvm_supports_32bit_el0())
1187 return REG_RAZ | REG_USER_WI;
1189 return id_visibility(vcpu, r);
1192 static unsigned int raz_visibility(const struct kvm_vcpu *vcpu,
1193 const struct sys_reg_desc *r)
1198 /* cpufeature ID register access trap handlers */
1200 static bool access_id_reg(struct kvm_vcpu *vcpu,
1201 struct sys_reg_params *p,
1202 const struct sys_reg_desc *r)
1205 return write_to_read_only(vcpu, p, r);
1207 p->regval = read_id_reg(vcpu, r);
1211 /* Visibility overrides for SVE-specific control registers */
1212 static unsigned int sve_visibility(const struct kvm_vcpu *vcpu,
1213 const struct sys_reg_desc *rd)
1215 if (vcpu_has_sve(vcpu))
1221 static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
1222 const struct sys_reg_desc *rd,
1228 * Allow AA64PFR0_EL1.CSV2 to be set from userspace as long as
1229 * it doesn't promise more than what is actually provided (the
1230 * guest could otherwise be covered in ectoplasmic residue).
1232 csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
1234 (csv2 && arm64_get_spectre_v2_state() != SPECTRE_UNAFFECTED))
1237 /* Same thing for CSV3 */
1238 csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
1240 (csv3 && arm64_get_meltdown_state() != SPECTRE_UNAFFECTED))
1243 /* We can only differ with CSV[23], and anything else is an error */
1244 val ^= read_id_reg(vcpu, rd);
1245 val &= ~(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2) |
1246 ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3));
1250 vcpu->kvm->arch.pfr0_csv2 = csv2;
1251 vcpu->kvm->arch.pfr0_csv3 = csv3;
1256 static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
1257 const struct sys_reg_desc *rd,
1260 u8 pmuver, host_pmuver;
1263 host_pmuver = kvm_arm_pmu_get_pmuver_limit();
1266 * Allow AA64DFR0_EL1.PMUver to be set from userspace as long
1267 * as it doesn't promise more than what the HW gives us. We
1268 * allow an IMPDEF PMU though, only if no PMU is supported
1269 * (KVM backward compatibility handling).
1271 pmuver = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer), val);
1272 if ((pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF && pmuver > host_pmuver))
1275 valid_pmu = (pmuver != 0 && pmuver != ID_AA64DFR0_EL1_PMUVer_IMP_DEF);
1277 /* Make sure view register and PMU support do match */
1278 if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1281 /* We can only differ with PMUver, and anything else is an error */
1282 val ^= read_id_reg(vcpu, rd);
1283 val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
1288 vcpu->kvm->arch.dfr0_pmuver.imp = pmuver;
1290 vcpu->kvm->arch.dfr0_pmuver.unimp = pmuver;
1295 static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
1296 const struct sys_reg_desc *rd,
1299 u8 perfmon, host_perfmon;
1302 host_perfmon = pmuver_to_perfmon(kvm_arm_pmu_get_pmuver_limit());
1305 * Allow DFR0_EL1.PerfMon to be set from userspace as long as
1306 * it doesn't promise more than what the HW gives us on the
1307 * AArch64 side (as everything is emulated with that), and
1308 * that this is a PMUv3.
1310 perfmon = FIELD_GET(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon), val);
1311 if ((perfmon != ID_DFR0_EL1_PerfMon_IMPDEF && perfmon > host_perfmon) ||
1312 (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3))
1315 valid_pmu = (perfmon != 0 && perfmon != ID_DFR0_EL1_PerfMon_IMPDEF);
1317 /* Make sure view register and PMU support do match */
1318 if (kvm_vcpu_has_pmu(vcpu) != valid_pmu)
1321 /* We can only differ with PerfMon, and anything else is an error */
1322 val ^= read_id_reg(vcpu, rd);
1323 val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
1328 vcpu->kvm->arch.dfr0_pmuver.imp = perfmon_to_pmuver(perfmon);
1330 vcpu->kvm->arch.dfr0_pmuver.unimp = perfmon_to_pmuver(perfmon);
1336 * cpufeature ID register user accessors
1338 * For now, these registers are immutable for userspace, so no values
1339 * are stored, and for set_id_reg() we don't allow the effective value
1342 static int get_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1345 *val = read_id_reg(vcpu, rd);
1349 static int set_id_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1352 /* This is what we mean by invariant: you can't change it. */
1353 if (val != read_id_reg(vcpu, rd))
1359 static int get_raz_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1366 static int set_wi_reg(struct kvm_vcpu *vcpu, const struct sys_reg_desc *rd,
1372 static bool access_ctr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1373 const struct sys_reg_desc *r)
1376 return write_to_read_only(vcpu, p, r);
1378 p->regval = read_sanitised_ftr_reg(SYS_CTR_EL0);
1382 static bool access_clidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1383 const struct sys_reg_desc *r)
1386 return write_to_read_only(vcpu, p, r);
1388 p->regval = read_sysreg(clidr_el1);
1392 static bool access_csselr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1393 const struct sys_reg_desc *r)
1398 vcpu_write_sys_reg(vcpu, p->regval, reg);
1400 p->regval = vcpu_read_sys_reg(vcpu, reg);
1404 static bool access_ccsidr(struct kvm_vcpu *vcpu, struct sys_reg_params *p,
1405 const struct sys_reg_desc *r)
1410 return write_to_read_only(vcpu, p, r);
1412 csselr = vcpu_read_sys_reg(vcpu, CSSELR_EL1);
1413 p->regval = get_ccsidr(csselr);
1416 * Guests should not be doing cache operations by set/way at all, and
1417 * for this reason, we trap them and attempt to infer the intent, so
1418 * that we can flush the entire guest's address space at the appropriate
1420 * To prevent this trapping from causing performance problems, let's
1421 * expose the geometry of all data and unified caches (which are
1422 * guaranteed to be PIPT and thus non-aliasing) as 1 set and 1 way.
1423 * [If guests should attempt to infer aliasing properties from the
1424 * geometry (which is not permitted by the architecture), they would
1425 * only do so for virtually indexed caches.]
1427 if (!(csselr & 1)) // data or unified cache
1428 p->regval &= ~GENMASK(27, 3);
1432 static unsigned int mte_visibility(const struct kvm_vcpu *vcpu,
1433 const struct sys_reg_desc *rd)
1435 if (kvm_has_mte(vcpu->kvm))
1441 #define MTE_REG(name) { \
1442 SYS_DESC(SYS_##name), \
1443 .access = undef_access, \
1444 .reset = reset_unknown, \
1446 .visibility = mte_visibility, \
1449 /* sys_reg_desc initialiser for known cpufeature ID registers */
1450 #define ID_SANITISED(name) { \
1451 SYS_DESC(SYS_##name), \
1452 .access = access_id_reg, \
1453 .get_user = get_id_reg, \
1454 .set_user = set_id_reg, \
1455 .visibility = id_visibility, \
1458 /* sys_reg_desc initialiser for known cpufeature ID registers */
1459 #define AA32_ID_SANITISED(name) { \
1460 SYS_DESC(SYS_##name), \
1461 .access = access_id_reg, \
1462 .get_user = get_id_reg, \
1463 .set_user = set_id_reg, \
1464 .visibility = aa32_id_visibility, \
1468 * sys_reg_desc initialiser for architecturally unallocated cpufeature ID
1469 * register with encoding Op0=3, Op1=0, CRn=0, CRm=crm, Op2=op2
1470 * (1 <= crm < 8, 0 <= Op2 < 8).
1472 #define ID_UNALLOCATED(crm, op2) { \
1473 Op0(3), Op1(0), CRn(0), CRm(crm), Op2(op2), \
1474 .access = access_id_reg, \
1475 .get_user = get_id_reg, \
1476 .set_user = set_id_reg, \
1477 .visibility = raz_visibility \
1481 * sys_reg_desc initialiser for known ID registers that we hide from guests.
1482 * For now, these are exposed just like unallocated ID regs: they appear
1483 * RAZ for the guest.
1485 #define ID_HIDDEN(name) { \
1486 SYS_DESC(SYS_##name), \
1487 .access = access_id_reg, \
1488 .get_user = get_id_reg, \
1489 .set_user = set_id_reg, \
1490 .visibility = raz_visibility, \
1494 * Architected system registers.
1495 * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2
1497 * Debug handling: We do trap most, if not all debug related system
1498 * registers. The implementation is good enough to ensure that a guest
1499 * can use these with minimal performance degradation. The drawback is
1500 * that we don't implement any of the external debug architecture.
1501 * This should be revisited if we ever encounter a more demanding
1504 static const struct sys_reg_desc sys_reg_descs[] = {
1505 { SYS_DESC(SYS_DC_ISW), access_dcsw },
1506 { SYS_DESC(SYS_DC_CSW), access_dcsw },
1507 { SYS_DESC(SYS_DC_CISW), access_dcsw },
1509 DBG_BCR_BVR_WCR_WVR_EL1(0),
1510 DBG_BCR_BVR_WCR_WVR_EL1(1),
1511 { SYS_DESC(SYS_MDCCINT_EL1), trap_debug_regs, reset_val, MDCCINT_EL1, 0 },
1512 { SYS_DESC(SYS_MDSCR_EL1), trap_debug_regs, reset_val, MDSCR_EL1, 0 },
1513 DBG_BCR_BVR_WCR_WVR_EL1(2),
1514 DBG_BCR_BVR_WCR_WVR_EL1(3),
1515 DBG_BCR_BVR_WCR_WVR_EL1(4),
1516 DBG_BCR_BVR_WCR_WVR_EL1(5),
1517 DBG_BCR_BVR_WCR_WVR_EL1(6),
1518 DBG_BCR_BVR_WCR_WVR_EL1(7),
1519 DBG_BCR_BVR_WCR_WVR_EL1(8),
1520 DBG_BCR_BVR_WCR_WVR_EL1(9),
1521 DBG_BCR_BVR_WCR_WVR_EL1(10),
1522 DBG_BCR_BVR_WCR_WVR_EL1(11),
1523 DBG_BCR_BVR_WCR_WVR_EL1(12),
1524 DBG_BCR_BVR_WCR_WVR_EL1(13),
1525 DBG_BCR_BVR_WCR_WVR_EL1(14),
1526 DBG_BCR_BVR_WCR_WVR_EL1(15),
1528 { SYS_DESC(SYS_MDRAR_EL1), trap_raz_wi },
1529 { SYS_DESC(SYS_OSLAR_EL1), trap_oslar_el1 },
1530 { SYS_DESC(SYS_OSLSR_EL1), trap_oslsr_el1, reset_val, OSLSR_EL1,
1531 SYS_OSLSR_OSLM_IMPLEMENTED, .set_user = set_oslsr_el1, },
1532 { SYS_DESC(SYS_OSDLR_EL1), trap_raz_wi },
1533 { SYS_DESC(SYS_DBGPRCR_EL1), trap_raz_wi },
1534 { SYS_DESC(SYS_DBGCLAIMSET_EL1), trap_raz_wi },
1535 { SYS_DESC(SYS_DBGCLAIMCLR_EL1), trap_raz_wi },
1536 { SYS_DESC(SYS_DBGAUTHSTATUS_EL1), trap_dbgauthstatus_el1 },
1538 { SYS_DESC(SYS_MDCCSR_EL0), trap_raz_wi },
1539 { SYS_DESC(SYS_DBGDTR_EL0), trap_raz_wi },
1540 // DBGDTR[TR]X_EL0 share the same encoding
1541 { SYS_DESC(SYS_DBGDTRTX_EL0), trap_raz_wi },
1543 { SYS_DESC(SYS_DBGVCR32_EL2), NULL, reset_val, DBGVCR32_EL2, 0 },
1545 { SYS_DESC(SYS_MPIDR_EL1), NULL, reset_mpidr, MPIDR_EL1 },
1548 * ID regs: all ID_SANITISED() entries here must have corresponding
1549 * entries in arm64_ftr_regs[].
1552 /* AArch64 mappings of the AArch32 ID registers */
1554 AA32_ID_SANITISED(ID_PFR0_EL1),
1555 AA32_ID_SANITISED(ID_PFR1_EL1),
1556 { SYS_DESC(SYS_ID_DFR0_EL1), .access = access_id_reg,
1557 .get_user = get_id_reg, .set_user = set_id_dfr0_el1,
1558 .visibility = aa32_id_visibility, },
1559 ID_HIDDEN(ID_AFR0_EL1),
1560 AA32_ID_SANITISED(ID_MMFR0_EL1),
1561 AA32_ID_SANITISED(ID_MMFR1_EL1),
1562 AA32_ID_SANITISED(ID_MMFR2_EL1),
1563 AA32_ID_SANITISED(ID_MMFR3_EL1),
1566 AA32_ID_SANITISED(ID_ISAR0_EL1),
1567 AA32_ID_SANITISED(ID_ISAR1_EL1),
1568 AA32_ID_SANITISED(ID_ISAR2_EL1),
1569 AA32_ID_SANITISED(ID_ISAR3_EL1),
1570 AA32_ID_SANITISED(ID_ISAR4_EL1),
1571 AA32_ID_SANITISED(ID_ISAR5_EL1),
1572 AA32_ID_SANITISED(ID_MMFR4_EL1),
1573 AA32_ID_SANITISED(ID_ISAR6_EL1),
1576 AA32_ID_SANITISED(MVFR0_EL1),
1577 AA32_ID_SANITISED(MVFR1_EL1),
1578 AA32_ID_SANITISED(MVFR2_EL1),
1579 ID_UNALLOCATED(3,3),
1580 AA32_ID_SANITISED(ID_PFR2_EL1),
1581 ID_HIDDEN(ID_DFR1_EL1),
1582 AA32_ID_SANITISED(ID_MMFR5_EL1),
1583 ID_UNALLOCATED(3,7),
1585 /* AArch64 ID registers */
1587 { SYS_DESC(SYS_ID_AA64PFR0_EL1), .access = access_id_reg,
1588 .get_user = get_id_reg, .set_user = set_id_aa64pfr0_el1, },
1589 ID_SANITISED(ID_AA64PFR1_EL1),
1590 ID_UNALLOCATED(4,2),
1591 ID_UNALLOCATED(4,3),
1592 ID_SANITISED(ID_AA64ZFR0_EL1),
1593 ID_HIDDEN(ID_AA64SMFR0_EL1),
1594 ID_UNALLOCATED(4,6),
1595 ID_UNALLOCATED(4,7),
1598 { SYS_DESC(SYS_ID_AA64DFR0_EL1), .access = access_id_reg,
1599 .get_user = get_id_reg, .set_user = set_id_aa64dfr0_el1, },
1600 ID_SANITISED(ID_AA64DFR1_EL1),
1601 ID_UNALLOCATED(5,2),
1602 ID_UNALLOCATED(5,3),
1603 ID_HIDDEN(ID_AA64AFR0_EL1),
1604 ID_HIDDEN(ID_AA64AFR1_EL1),
1605 ID_UNALLOCATED(5,6),
1606 ID_UNALLOCATED(5,7),
1609 ID_SANITISED(ID_AA64ISAR0_EL1),
1610 ID_SANITISED(ID_AA64ISAR1_EL1),
1611 ID_SANITISED(ID_AA64ISAR2_EL1),
1612 ID_UNALLOCATED(6,3),
1613 ID_UNALLOCATED(6,4),
1614 ID_UNALLOCATED(6,5),
1615 ID_UNALLOCATED(6,6),
1616 ID_UNALLOCATED(6,7),
1619 ID_SANITISED(ID_AA64MMFR0_EL1),
1620 ID_SANITISED(ID_AA64MMFR1_EL1),
1621 ID_SANITISED(ID_AA64MMFR2_EL1),
1622 ID_UNALLOCATED(7,3),
1623 ID_UNALLOCATED(7,4),
1624 ID_UNALLOCATED(7,5),
1625 ID_UNALLOCATED(7,6),
1626 ID_UNALLOCATED(7,7),
1628 { SYS_DESC(SYS_SCTLR_EL1), access_vm_reg, reset_val, SCTLR_EL1, 0x00C50078 },
1629 { SYS_DESC(SYS_ACTLR_EL1), access_actlr, reset_actlr, ACTLR_EL1 },
1630 { SYS_DESC(SYS_CPACR_EL1), NULL, reset_val, CPACR_EL1, 0 },
1635 { SYS_DESC(SYS_ZCR_EL1), NULL, reset_val, ZCR_EL1, 0, .visibility = sve_visibility },
1636 { SYS_DESC(SYS_TRFCR_EL1), undef_access },
1637 { SYS_DESC(SYS_SMPRI_EL1), undef_access },
1638 { SYS_DESC(SYS_SMCR_EL1), undef_access },
1639 { SYS_DESC(SYS_TTBR0_EL1), access_vm_reg, reset_unknown, TTBR0_EL1 },
1640 { SYS_DESC(SYS_TTBR1_EL1), access_vm_reg, reset_unknown, TTBR1_EL1 },
1641 { SYS_DESC(SYS_TCR_EL1), access_vm_reg, reset_val, TCR_EL1, 0 },
1649 { SYS_DESC(SYS_AFSR0_EL1), access_vm_reg, reset_unknown, AFSR0_EL1 },
1650 { SYS_DESC(SYS_AFSR1_EL1), access_vm_reg, reset_unknown, AFSR1_EL1 },
1651 { SYS_DESC(SYS_ESR_EL1), access_vm_reg, reset_unknown, ESR_EL1 },
1653 { SYS_DESC(SYS_ERRIDR_EL1), trap_raz_wi },
1654 { SYS_DESC(SYS_ERRSELR_EL1), trap_raz_wi },
1655 { SYS_DESC(SYS_ERXFR_EL1), trap_raz_wi },
1656 { SYS_DESC(SYS_ERXCTLR_EL1), trap_raz_wi },
1657 { SYS_DESC(SYS_ERXSTATUS_EL1), trap_raz_wi },
1658 { SYS_DESC(SYS_ERXADDR_EL1), trap_raz_wi },
1659 { SYS_DESC(SYS_ERXMISC0_EL1), trap_raz_wi },
1660 { SYS_DESC(SYS_ERXMISC1_EL1), trap_raz_wi },
1663 MTE_REG(TFSRE0_EL1),
1665 { SYS_DESC(SYS_FAR_EL1), access_vm_reg, reset_unknown, FAR_EL1 },
1666 { SYS_DESC(SYS_PAR_EL1), NULL, reset_unknown, PAR_EL1 },
1668 { SYS_DESC(SYS_PMSCR_EL1), undef_access },
1669 { SYS_DESC(SYS_PMSNEVFR_EL1), undef_access },
1670 { SYS_DESC(SYS_PMSICR_EL1), undef_access },
1671 { SYS_DESC(SYS_PMSIRR_EL1), undef_access },
1672 { SYS_DESC(SYS_PMSFCR_EL1), undef_access },
1673 { SYS_DESC(SYS_PMSEVFR_EL1), undef_access },
1674 { SYS_DESC(SYS_PMSLATFR_EL1), undef_access },
1675 { SYS_DESC(SYS_PMSIDR_EL1), undef_access },
1676 { SYS_DESC(SYS_PMBLIMITR_EL1), undef_access },
1677 { SYS_DESC(SYS_PMBPTR_EL1), undef_access },
1678 { SYS_DESC(SYS_PMBSR_EL1), undef_access },
1679 /* PMBIDR_EL1 is not trapped */
1681 { PMU_SYS_REG(SYS_PMINTENSET_EL1),
1682 .access = access_pminten, .reg = PMINTENSET_EL1 },
1683 { PMU_SYS_REG(SYS_PMINTENCLR_EL1),
1684 .access = access_pminten, .reg = PMINTENSET_EL1 },
1685 { SYS_DESC(SYS_PMMIR_EL1), trap_raz_wi },
1687 { SYS_DESC(SYS_MAIR_EL1), access_vm_reg, reset_unknown, MAIR_EL1 },
1688 { SYS_DESC(SYS_AMAIR_EL1), access_vm_reg, reset_amair_el1, AMAIR_EL1 },
1690 { SYS_DESC(SYS_LORSA_EL1), trap_loregion },
1691 { SYS_DESC(SYS_LOREA_EL1), trap_loregion },
1692 { SYS_DESC(SYS_LORN_EL1), trap_loregion },
1693 { SYS_DESC(SYS_LORC_EL1), trap_loregion },
1694 { SYS_DESC(SYS_LORID_EL1), trap_loregion },
1696 { SYS_DESC(SYS_VBAR_EL1), NULL, reset_val, VBAR_EL1, 0 },
1697 { SYS_DESC(SYS_DISR_EL1), NULL, reset_val, DISR_EL1, 0 },
1699 { SYS_DESC(SYS_ICC_IAR0_EL1), write_to_read_only },
1700 { SYS_DESC(SYS_ICC_EOIR0_EL1), read_from_write_only },
1701 { SYS_DESC(SYS_ICC_HPPIR0_EL1), write_to_read_only },
1702 { SYS_DESC(SYS_ICC_DIR_EL1), read_from_write_only },
1703 { SYS_DESC(SYS_ICC_RPR_EL1), write_to_read_only },
1704 { SYS_DESC(SYS_ICC_SGI1R_EL1), access_gic_sgi },
1705 { SYS_DESC(SYS_ICC_ASGI1R_EL1), access_gic_sgi },
1706 { SYS_DESC(SYS_ICC_SGI0R_EL1), access_gic_sgi },
1707 { SYS_DESC(SYS_ICC_IAR1_EL1), write_to_read_only },
1708 { SYS_DESC(SYS_ICC_EOIR1_EL1), read_from_write_only },
1709 { SYS_DESC(SYS_ICC_HPPIR1_EL1), write_to_read_only },
1710 { SYS_DESC(SYS_ICC_SRE_EL1), access_gic_sre },
1712 { SYS_DESC(SYS_CONTEXTIDR_EL1), access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 },
1713 { SYS_DESC(SYS_TPIDR_EL1), NULL, reset_unknown, TPIDR_EL1 },
1715 { SYS_DESC(SYS_SCXTNUM_EL1), undef_access },
1717 { SYS_DESC(SYS_CNTKCTL_EL1), NULL, reset_val, CNTKCTL_EL1, 0},
1719 { SYS_DESC(SYS_CCSIDR_EL1), access_ccsidr },
1720 { SYS_DESC(SYS_CLIDR_EL1), access_clidr },
1721 { SYS_DESC(SYS_SMIDR_EL1), undef_access },
1722 { SYS_DESC(SYS_CSSELR_EL1), access_csselr, reset_unknown, CSSELR_EL1 },
1723 { SYS_DESC(SYS_CTR_EL0), access_ctr },
1724 { SYS_DESC(SYS_SVCR), undef_access },
1726 { PMU_SYS_REG(SYS_PMCR_EL0), .access = access_pmcr,
1727 .reset = reset_pmcr, .reg = PMCR_EL0 },
1728 { PMU_SYS_REG(SYS_PMCNTENSET_EL0),
1729 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1730 { PMU_SYS_REG(SYS_PMCNTENCLR_EL0),
1731 .access = access_pmcnten, .reg = PMCNTENSET_EL0 },
1732 { PMU_SYS_REG(SYS_PMOVSCLR_EL0),
1733 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1735 * PM_SWINC_EL0 is exposed to userspace as RAZ/WI, as it was
1736 * previously (and pointlessly) advertised in the past...
1738 { PMU_SYS_REG(SYS_PMSWINC_EL0),
1739 .get_user = get_raz_reg, .set_user = set_wi_reg,
1740 .access = access_pmswinc, .reset = NULL },
1741 { PMU_SYS_REG(SYS_PMSELR_EL0),
1742 .access = access_pmselr, .reset = reset_pmselr, .reg = PMSELR_EL0 },
1743 { PMU_SYS_REG(SYS_PMCEID0_EL0),
1744 .access = access_pmceid, .reset = NULL },
1745 { PMU_SYS_REG(SYS_PMCEID1_EL0),
1746 .access = access_pmceid, .reset = NULL },
1747 { PMU_SYS_REG(SYS_PMCCNTR_EL0),
1748 .access = access_pmu_evcntr, .reset = reset_unknown, .reg = PMCCNTR_EL0 },
1749 { PMU_SYS_REG(SYS_PMXEVTYPER_EL0),
1750 .access = access_pmu_evtyper, .reset = NULL },
1751 { PMU_SYS_REG(SYS_PMXEVCNTR_EL0),
1752 .access = access_pmu_evcntr, .reset = NULL },
1754 * PMUSERENR_EL0 resets as unknown in 64bit mode while it resets as zero
1755 * in 32bit mode. Here we choose to reset it as zero for consistency.
1757 { PMU_SYS_REG(SYS_PMUSERENR_EL0), .access = access_pmuserenr,
1758 .reset = reset_val, .reg = PMUSERENR_EL0, .val = 0 },
1759 { PMU_SYS_REG(SYS_PMOVSSET_EL0),
1760 .access = access_pmovs, .reg = PMOVSSET_EL0 },
1762 { SYS_DESC(SYS_TPIDR_EL0), NULL, reset_unknown, TPIDR_EL0 },
1763 { SYS_DESC(SYS_TPIDRRO_EL0), NULL, reset_unknown, TPIDRRO_EL0 },
1764 { SYS_DESC(SYS_TPIDR2_EL0), undef_access },
1766 { SYS_DESC(SYS_SCXTNUM_EL0), undef_access },
1768 { SYS_DESC(SYS_AMCR_EL0), undef_access },
1769 { SYS_DESC(SYS_AMCFGR_EL0), undef_access },
1770 { SYS_DESC(SYS_AMCGCR_EL0), undef_access },
1771 { SYS_DESC(SYS_AMUSERENR_EL0), undef_access },
1772 { SYS_DESC(SYS_AMCNTENCLR0_EL0), undef_access },
1773 { SYS_DESC(SYS_AMCNTENSET0_EL0), undef_access },
1774 { SYS_DESC(SYS_AMCNTENCLR1_EL0), undef_access },
1775 { SYS_DESC(SYS_AMCNTENSET1_EL0), undef_access },
1776 AMU_AMEVCNTR0_EL0(0),
1777 AMU_AMEVCNTR0_EL0(1),
1778 AMU_AMEVCNTR0_EL0(2),
1779 AMU_AMEVCNTR0_EL0(3),
1780 AMU_AMEVCNTR0_EL0(4),
1781 AMU_AMEVCNTR0_EL0(5),
1782 AMU_AMEVCNTR0_EL0(6),
1783 AMU_AMEVCNTR0_EL0(7),
1784 AMU_AMEVCNTR0_EL0(8),
1785 AMU_AMEVCNTR0_EL0(9),
1786 AMU_AMEVCNTR0_EL0(10),
1787 AMU_AMEVCNTR0_EL0(11),
1788 AMU_AMEVCNTR0_EL0(12),
1789 AMU_AMEVCNTR0_EL0(13),
1790 AMU_AMEVCNTR0_EL0(14),
1791 AMU_AMEVCNTR0_EL0(15),
1792 AMU_AMEVTYPER0_EL0(0),
1793 AMU_AMEVTYPER0_EL0(1),
1794 AMU_AMEVTYPER0_EL0(2),
1795 AMU_AMEVTYPER0_EL0(3),
1796 AMU_AMEVTYPER0_EL0(4),
1797 AMU_AMEVTYPER0_EL0(5),
1798 AMU_AMEVTYPER0_EL0(6),
1799 AMU_AMEVTYPER0_EL0(7),
1800 AMU_AMEVTYPER0_EL0(8),
1801 AMU_AMEVTYPER0_EL0(9),
1802 AMU_AMEVTYPER0_EL0(10),
1803 AMU_AMEVTYPER0_EL0(11),
1804 AMU_AMEVTYPER0_EL0(12),
1805 AMU_AMEVTYPER0_EL0(13),
1806 AMU_AMEVTYPER0_EL0(14),
1807 AMU_AMEVTYPER0_EL0(15),
1808 AMU_AMEVCNTR1_EL0(0),
1809 AMU_AMEVCNTR1_EL0(1),
1810 AMU_AMEVCNTR1_EL0(2),
1811 AMU_AMEVCNTR1_EL0(3),
1812 AMU_AMEVCNTR1_EL0(4),
1813 AMU_AMEVCNTR1_EL0(5),
1814 AMU_AMEVCNTR1_EL0(6),
1815 AMU_AMEVCNTR1_EL0(7),
1816 AMU_AMEVCNTR1_EL0(8),
1817 AMU_AMEVCNTR1_EL0(9),
1818 AMU_AMEVCNTR1_EL0(10),
1819 AMU_AMEVCNTR1_EL0(11),
1820 AMU_AMEVCNTR1_EL0(12),
1821 AMU_AMEVCNTR1_EL0(13),
1822 AMU_AMEVCNTR1_EL0(14),
1823 AMU_AMEVCNTR1_EL0(15),
1824 AMU_AMEVTYPER1_EL0(0),
1825 AMU_AMEVTYPER1_EL0(1),
1826 AMU_AMEVTYPER1_EL0(2),
1827 AMU_AMEVTYPER1_EL0(3),
1828 AMU_AMEVTYPER1_EL0(4),
1829 AMU_AMEVTYPER1_EL0(5),
1830 AMU_AMEVTYPER1_EL0(6),
1831 AMU_AMEVTYPER1_EL0(7),
1832 AMU_AMEVTYPER1_EL0(8),
1833 AMU_AMEVTYPER1_EL0(9),
1834 AMU_AMEVTYPER1_EL0(10),
1835 AMU_AMEVTYPER1_EL0(11),
1836 AMU_AMEVTYPER1_EL0(12),
1837 AMU_AMEVTYPER1_EL0(13),
1838 AMU_AMEVTYPER1_EL0(14),
1839 AMU_AMEVTYPER1_EL0(15),
1841 { SYS_DESC(SYS_CNTP_TVAL_EL0), access_arch_timer },
1842 { SYS_DESC(SYS_CNTP_CTL_EL0), access_arch_timer },
1843 { SYS_DESC(SYS_CNTP_CVAL_EL0), access_arch_timer },
1846 PMU_PMEVCNTR_EL0(0),
1847 PMU_PMEVCNTR_EL0(1),
1848 PMU_PMEVCNTR_EL0(2),
1849 PMU_PMEVCNTR_EL0(3),
1850 PMU_PMEVCNTR_EL0(4),
1851 PMU_PMEVCNTR_EL0(5),
1852 PMU_PMEVCNTR_EL0(6),
1853 PMU_PMEVCNTR_EL0(7),
1854 PMU_PMEVCNTR_EL0(8),
1855 PMU_PMEVCNTR_EL0(9),
1856 PMU_PMEVCNTR_EL0(10),
1857 PMU_PMEVCNTR_EL0(11),
1858 PMU_PMEVCNTR_EL0(12),
1859 PMU_PMEVCNTR_EL0(13),
1860 PMU_PMEVCNTR_EL0(14),
1861 PMU_PMEVCNTR_EL0(15),
1862 PMU_PMEVCNTR_EL0(16),
1863 PMU_PMEVCNTR_EL0(17),
1864 PMU_PMEVCNTR_EL0(18),
1865 PMU_PMEVCNTR_EL0(19),
1866 PMU_PMEVCNTR_EL0(20),
1867 PMU_PMEVCNTR_EL0(21),
1868 PMU_PMEVCNTR_EL0(22),
1869 PMU_PMEVCNTR_EL0(23),
1870 PMU_PMEVCNTR_EL0(24),
1871 PMU_PMEVCNTR_EL0(25),
1872 PMU_PMEVCNTR_EL0(26),
1873 PMU_PMEVCNTR_EL0(27),
1874 PMU_PMEVCNTR_EL0(28),
1875 PMU_PMEVCNTR_EL0(29),
1876 PMU_PMEVCNTR_EL0(30),
1877 /* PMEVTYPERn_EL0 */
1878 PMU_PMEVTYPER_EL0(0),
1879 PMU_PMEVTYPER_EL0(1),
1880 PMU_PMEVTYPER_EL0(2),
1881 PMU_PMEVTYPER_EL0(3),
1882 PMU_PMEVTYPER_EL0(4),
1883 PMU_PMEVTYPER_EL0(5),
1884 PMU_PMEVTYPER_EL0(6),
1885 PMU_PMEVTYPER_EL0(7),
1886 PMU_PMEVTYPER_EL0(8),
1887 PMU_PMEVTYPER_EL0(9),
1888 PMU_PMEVTYPER_EL0(10),
1889 PMU_PMEVTYPER_EL0(11),
1890 PMU_PMEVTYPER_EL0(12),
1891 PMU_PMEVTYPER_EL0(13),
1892 PMU_PMEVTYPER_EL0(14),
1893 PMU_PMEVTYPER_EL0(15),
1894 PMU_PMEVTYPER_EL0(16),
1895 PMU_PMEVTYPER_EL0(17),
1896 PMU_PMEVTYPER_EL0(18),
1897 PMU_PMEVTYPER_EL0(19),
1898 PMU_PMEVTYPER_EL0(20),
1899 PMU_PMEVTYPER_EL0(21),
1900 PMU_PMEVTYPER_EL0(22),
1901 PMU_PMEVTYPER_EL0(23),
1902 PMU_PMEVTYPER_EL0(24),
1903 PMU_PMEVTYPER_EL0(25),
1904 PMU_PMEVTYPER_EL0(26),
1905 PMU_PMEVTYPER_EL0(27),
1906 PMU_PMEVTYPER_EL0(28),
1907 PMU_PMEVTYPER_EL0(29),
1908 PMU_PMEVTYPER_EL0(30),
1910 * PMCCFILTR_EL0 resets as unknown in 64bit mode while it resets as zero
1911 * in 32bit mode. Here we choose to reset it as zero for consistency.
1913 { PMU_SYS_REG(SYS_PMCCFILTR_EL0), .access = access_pmu_evtyper,
1914 .reset = reset_val, .reg = PMCCFILTR_EL0, .val = 0 },
1916 { SYS_DESC(SYS_DACR32_EL2), NULL, reset_unknown, DACR32_EL2 },
1917 { SYS_DESC(SYS_IFSR32_EL2), NULL, reset_unknown, IFSR32_EL2 },
1918 { SYS_DESC(SYS_FPEXC32_EL2), NULL, reset_val, FPEXC32_EL2, 0x700 },
1921 static bool trap_dbgdidr(struct kvm_vcpu *vcpu,
1922 struct sys_reg_params *p,
1923 const struct sys_reg_desc *r)
1926 return ignore_write(vcpu, p);
1928 u64 dfr = read_sanitised_ftr_reg(SYS_ID_AA64DFR0_EL1);
1929 u64 pfr = read_sanitised_ftr_reg(SYS_ID_AA64PFR0_EL1);
1930 u32 el3 = !!cpuid_feature_extract_unsigned_field(pfr, ID_AA64PFR0_EL1_EL3_SHIFT);
1932 p->regval = ((((dfr >> ID_AA64DFR0_EL1_WRPs_SHIFT) & 0xf) << 28) |
1933 (((dfr >> ID_AA64DFR0_EL1_BRPs_SHIFT) & 0xf) << 24) |
1934 (((dfr >> ID_AA64DFR0_EL1_CTX_CMPs_SHIFT) & 0xf) << 20)
1935 | (6 << 16) | (1 << 15) | (el3 << 14) | (el3 << 12));
1941 * AArch32 debug register mappings
1943 * AArch32 DBGBVRn is mapped to DBGBVRn_EL1[31:0]
1944 * AArch32 DBGBXVRn is mapped to DBGBVRn_EL1[63:32]
1946 * None of the other registers share their location, so treat them as
1947 * if they were 64bit.
1949 #define DBG_BCR_BVR_WCR_WVR(n) \
1951 { AA32(LO), Op1( 0), CRn( 0), CRm((n)), Op2( 4), trap_bvr, NULL, n }, \
1953 { Op1( 0), CRn( 0), CRm((n)), Op2( 5), trap_bcr, NULL, n }, \
1955 { Op1( 0), CRn( 0), CRm((n)), Op2( 6), trap_wvr, NULL, n }, \
1957 { Op1( 0), CRn( 0), CRm((n)), Op2( 7), trap_wcr, NULL, n }
1959 #define DBGBXVR(n) \
1960 { AA32(HI), Op1( 0), CRn( 1), CRm((n)), Op2( 1), trap_bvr, NULL, n }
1963 * Trapped cp14 registers. We generally ignore most of the external
1964 * debug, on the principle that they don't really make sense to a
1965 * guest. Revisit this one day, would this principle change.
1967 static const struct sys_reg_desc cp14_regs[] = {
1969 { Op1( 0), CRn( 0), CRm( 0), Op2( 0), trap_dbgdidr },
1971 { Op1( 0), CRn( 0), CRm( 0), Op2( 2), trap_raz_wi },
1973 DBG_BCR_BVR_WCR_WVR(0),
1975 { Op1( 0), CRn( 0), CRm( 1), Op2( 0), trap_raz_wi },
1976 DBG_BCR_BVR_WCR_WVR(1),
1978 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), trap_debug_regs, NULL, MDCCINT_EL1 },
1980 { Op1( 0), CRn( 0), CRm( 2), Op2( 2), trap_debug_regs, NULL, MDSCR_EL1 },
1981 DBG_BCR_BVR_WCR_WVR(2),
1982 /* DBGDTR[RT]Xint */
1983 { Op1( 0), CRn( 0), CRm( 3), Op2( 0), trap_raz_wi },
1984 /* DBGDTR[RT]Xext */
1985 { Op1( 0), CRn( 0), CRm( 3), Op2( 2), trap_raz_wi },
1986 DBG_BCR_BVR_WCR_WVR(3),
1987 DBG_BCR_BVR_WCR_WVR(4),
1988 DBG_BCR_BVR_WCR_WVR(5),
1990 { Op1( 0), CRn( 0), CRm( 6), Op2( 0), trap_raz_wi },
1992 { Op1( 0), CRn( 0), CRm( 6), Op2( 2), trap_raz_wi },
1993 DBG_BCR_BVR_WCR_WVR(6),
1995 { Op1( 0), CRn( 0), CRm( 7), Op2( 0), trap_debug_regs, NULL, DBGVCR32_EL2 },
1996 DBG_BCR_BVR_WCR_WVR(7),
1997 DBG_BCR_BVR_WCR_WVR(8),
1998 DBG_BCR_BVR_WCR_WVR(9),
1999 DBG_BCR_BVR_WCR_WVR(10),
2000 DBG_BCR_BVR_WCR_WVR(11),
2001 DBG_BCR_BVR_WCR_WVR(12),
2002 DBG_BCR_BVR_WCR_WVR(13),
2003 DBG_BCR_BVR_WCR_WVR(14),
2004 DBG_BCR_BVR_WCR_WVR(15),
2006 /* DBGDRAR (32bit) */
2007 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), trap_raz_wi },
2011 { Op1( 0), CRn( 1), CRm( 0), Op2( 4), trap_oslar_el1 },
2014 { Op1( 0), CRn( 1), CRm( 1), Op2( 4), trap_oslsr_el1, NULL, OSLSR_EL1 },
2018 { Op1( 0), CRn( 1), CRm( 3), Op2( 4), trap_raz_wi },
2021 { Op1( 0), CRn( 1), CRm( 4), Op2( 4), trap_raz_wi },
2034 /* DBGDSAR (32bit) */
2035 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), trap_raz_wi },
2038 { Op1( 0), CRn( 7), CRm( 0), Op2( 7), trap_raz_wi },
2040 { Op1( 0), CRn( 7), CRm( 1), Op2( 7), trap_raz_wi },
2042 { Op1( 0), CRn( 7), CRm( 2), Op2( 7), trap_raz_wi },
2044 { Op1( 0), CRn( 7), CRm( 8), Op2( 6), trap_raz_wi },
2046 { Op1( 0), CRn( 7), CRm( 9), Op2( 6), trap_raz_wi },
2048 { Op1( 0), CRn( 7), CRm(14), Op2( 6), trap_dbgauthstatus_el1 },
2051 /* Trapped cp14 64bit registers */
2052 static const struct sys_reg_desc cp14_64_regs[] = {
2053 /* DBGDRAR (64bit) */
2054 { Op1( 0), CRm( 1), .access = trap_raz_wi },
2056 /* DBGDSAR (64bit) */
2057 { Op1( 0), CRm( 2), .access = trap_raz_wi },
2060 #define CP15_PMU_SYS_REG(_map, _Op1, _CRn, _CRm, _Op2) \
2062 Op1(_Op1), CRn(_CRn), CRm(_CRm), Op2(_Op2), \
2063 .visibility = pmu_visibility
2065 /* Macro to expand the PMEVCNTRn register */
2066 #define PMU_PMEVCNTR(n) \
2067 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2068 (0b1000 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2069 .access = access_pmu_evcntr }
2071 /* Macro to expand the PMEVTYPERn register */
2072 #define PMU_PMEVTYPER(n) \
2073 { CP15_PMU_SYS_REG(DIRECT, 0, 0b1110, \
2074 (0b1100 | (((n) >> 3) & 0x3)), ((n) & 0x7)), \
2075 .access = access_pmu_evtyper }
2077 * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding,
2078 * depending on the way they are accessed (as a 32bit or a 64bit
2081 static const struct sys_reg_desc cp15_regs[] = {
2082 { Op1( 0), CRn( 0), CRm( 0), Op2( 1), access_ctr },
2083 { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_vm_reg, NULL, SCTLR_EL1 },
2085 { AA32(LO), Op1( 0), CRn( 1), CRm( 0), Op2( 1), access_actlr, NULL, ACTLR_EL1 },
2087 { AA32(HI), Op1( 0), CRn( 1), CRm( 0), Op2( 3), access_actlr, NULL, ACTLR_EL1 },
2088 { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2089 { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, TTBR1_EL1 },
2091 { AA32(LO), Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, TCR_EL1 },
2093 { AA32(HI), Op1( 0), CRn( 2), CRm( 0), Op2( 3), access_vm_reg, NULL, TCR_EL1 },
2094 { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, DACR32_EL2 },
2096 { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, ESR_EL1 },
2097 { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, IFSR32_EL2 },
2099 { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, AFSR0_EL1 },
2101 { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, AFSR1_EL1 },
2103 { AA32(LO), Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, FAR_EL1 },
2105 { AA32(HI), Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, FAR_EL1 },
2108 * DC{C,I,CI}SW operations:
2110 { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw },
2111 { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw },
2112 { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw },
2115 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 0), .access = access_pmcr },
2116 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 1), .access = access_pmcnten },
2117 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 2), .access = access_pmcnten },
2118 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 3), .access = access_pmovs },
2119 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 4), .access = access_pmswinc },
2120 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 12, 5), .access = access_pmselr },
2121 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 6), .access = access_pmceid },
2122 { CP15_PMU_SYS_REG(LO, 0, 9, 12, 7), .access = access_pmceid },
2123 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 0), .access = access_pmu_evcntr },
2124 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 1), .access = access_pmu_evtyper },
2125 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 13, 2), .access = access_pmu_evcntr },
2126 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 0), .access = access_pmuserenr },
2127 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 1), .access = access_pminten },
2128 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 2), .access = access_pminten },
2129 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 3), .access = access_pmovs },
2130 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 4), .access = access_pmceid },
2131 { CP15_PMU_SYS_REG(HI, 0, 9, 14, 5), .access = access_pmceid },
2133 { CP15_PMU_SYS_REG(DIRECT, 0, 9, 14, 6), .access = trap_raz_wi },
2136 { AA32(LO), Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, MAIR_EL1 },
2138 { AA32(HI), Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, MAIR_EL1 },
2140 { AA32(LO), Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, AMAIR_EL1 },
2142 { AA32(HI), Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, AMAIR_EL1 },
2145 { Op1( 0), CRn(12), CRm(12), Op2( 5), access_gic_sre },
2147 { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, CONTEXTIDR_EL1 },
2150 { SYS_DESC(SYS_AARCH32_CNTP_TVAL), access_arch_timer },
2151 { SYS_DESC(SYS_AARCH32_CNTP_CTL), access_arch_timer },
2218 { CP15_PMU_SYS_REG(DIRECT, 0, 14, 15, 7), .access = access_pmu_evtyper },
2220 { Op1(1), CRn( 0), CRm( 0), Op2(0), access_ccsidr },
2221 { Op1(1), CRn( 0), CRm( 0), Op2(1), access_clidr },
2222 { Op1(2), CRn( 0), CRm( 0), Op2(0), access_csselr, NULL, CSSELR_EL1 },
2225 static const struct sys_reg_desc cp15_64_regs[] = {
2226 { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR0_EL1 },
2227 { CP15_PMU_SYS_REG(DIRECT, 0, 0, 9, 0), .access = access_pmu_evcntr },
2228 { Op1( 0), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI1R */
2229 { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, TTBR1_EL1 },
2230 { Op1( 1), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_ASGI1R */
2231 { Op1( 2), CRn( 0), CRm(12), Op2( 0), access_gic_sgi }, /* ICC_SGI0R */
2232 { SYS_DESC(SYS_AARCH32_CNTP_CVAL), access_arch_timer },
2235 static bool check_sysreg_table(const struct sys_reg_desc *table, unsigned int n,
2240 for (i = 0; i < n; i++) {
2241 if (!is_32 && table[i].reg && !table[i].reset) {
2242 kvm_err("sys_reg table %pS entry %d lacks reset\n", &table[i], i);
2246 if (i && cmp_sys_reg(&table[i-1], &table[i]) >= 0) {
2247 kvm_err("sys_reg table %pS entry %d out of order\n", &table[i - 1], i - 1);
2255 int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu)
2257 kvm_inject_undefined(vcpu);
2261 static void perform_access(struct kvm_vcpu *vcpu,
2262 struct sys_reg_params *params,
2263 const struct sys_reg_desc *r)
2265 trace_kvm_sys_access(*vcpu_pc(vcpu), params, r);
2267 /* Check for regs disabled by runtime config */
2268 if (sysreg_hidden(vcpu, r)) {
2269 kvm_inject_undefined(vcpu);
2274 * Not having an accessor means that we have configured a trap
2275 * that we don't know how to handle. This certainly qualifies
2276 * as a gross bug that should be fixed right away.
2280 /* Skip instruction if instructed so */
2281 if (likely(r->access(vcpu, params, r)))
2286 * emulate_cp -- tries to match a sys_reg access in a handling table, and
2287 * call the corresponding trap handler.
2289 * @params: pointer to the descriptor of the access
2290 * @table: array of trap descriptors
2291 * @num: size of the trap descriptor array
2293 * Return true if the access has been handled, false if not.
2295 static bool emulate_cp(struct kvm_vcpu *vcpu,
2296 struct sys_reg_params *params,
2297 const struct sys_reg_desc *table,
2300 const struct sys_reg_desc *r;
2303 return false; /* Not handled */
2305 r = find_reg(params, table, num);
2308 perform_access(vcpu, params, r);
2316 static void unhandled_cp_access(struct kvm_vcpu *vcpu,
2317 struct sys_reg_params *params)
2319 u8 esr_ec = kvm_vcpu_trap_get_class(vcpu);
2323 case ESR_ELx_EC_CP15_32:
2324 case ESR_ELx_EC_CP15_64:
2327 case ESR_ELx_EC_CP14_MR:
2328 case ESR_ELx_EC_CP14_64:
2335 print_sys_reg_msg(params,
2336 "Unsupported guest CP%d access at: %08lx [%08lx]\n",
2337 cp, *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2338 kvm_inject_undefined(vcpu);
2342 * kvm_handle_cp_64 -- handles a mrrc/mcrr trap on a guest CP14/CP15 access
2343 * @vcpu: The VCPU pointer
2344 * @run: The kvm_run struct
2346 static int kvm_handle_cp_64(struct kvm_vcpu *vcpu,
2347 const struct sys_reg_desc *global,
2350 struct sys_reg_params params;
2351 u64 esr = kvm_vcpu_get_esr(vcpu);
2352 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2353 int Rt2 = (esr >> 10) & 0x1f;
2355 params.CRm = (esr >> 1) & 0xf;
2356 params.is_write = ((esr & 1) == 0);
2359 params.Op1 = (esr >> 16) & 0xf;
2364 * Make a 64-bit value out of Rt and Rt2. As we use the same trap
2365 * backends between AArch32 and AArch64, we get away with it.
2367 if (params.is_write) {
2368 params.regval = vcpu_get_reg(vcpu, Rt) & 0xffffffff;
2369 params.regval |= vcpu_get_reg(vcpu, Rt2) << 32;
2373 * If the table contains a handler, handle the
2374 * potential register operation in the case of a read and return
2377 if (emulate_cp(vcpu, ¶ms, global, nr_global)) {
2378 /* Split up the value between registers for the read side */
2379 if (!params.is_write) {
2380 vcpu_set_reg(vcpu, Rt, lower_32_bits(params.regval));
2381 vcpu_set_reg(vcpu, Rt2, upper_32_bits(params.regval));
2387 unhandled_cp_access(vcpu, ¶ms);
2391 static bool emulate_sys_reg(struct kvm_vcpu *vcpu, struct sys_reg_params *params);
2394 * The CP10 ID registers are architecturally mapped to AArch64 feature
2395 * registers. Abuse that fact so we can rely on the AArch64 handler for accesses
2398 static bool kvm_esr_cp10_id_to_sys64(u64 esr, struct sys_reg_params *params)
2400 u8 reg_id = (esr >> 10) & 0xf;
2403 params->is_write = ((esr & 1) == 0);
2409 /* CP10 ID registers are read-only */
2410 valid = !params->is_write;
2432 kvm_pr_unimpl("Unhandled cp10 register %s: %u\n",
2433 params->is_write ? "write" : "read", reg_id);
2438 * kvm_handle_cp10_id() - Handles a VMRS trap on guest access to a 'Media and
2439 * VFP Register' from AArch32.
2440 * @vcpu: The vCPU pointer
2442 * MVFR{0-2} are architecturally mapped to the AArch64 MVFR{0-2}_EL1 registers.
2443 * Work out the correct AArch64 system register encoding and reroute to the
2444 * AArch64 system register emulation.
2446 int kvm_handle_cp10_id(struct kvm_vcpu *vcpu)
2448 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2449 u64 esr = kvm_vcpu_get_esr(vcpu);
2450 struct sys_reg_params params;
2452 /* UNDEF on any unhandled register access */
2453 if (!kvm_esr_cp10_id_to_sys64(esr, ¶ms)) {
2454 kvm_inject_undefined(vcpu);
2458 if (emulate_sys_reg(vcpu, ¶ms))
2459 vcpu_set_reg(vcpu, Rt, params.regval);
2465 * kvm_emulate_cp15_id_reg() - Handles an MRC trap on a guest CP15 access where
2466 * CRn=0, which corresponds to the AArch32 feature
2468 * @vcpu: the vCPU pointer
2469 * @params: the system register access parameters.
2471 * Our cp15 system register tables do not enumerate the AArch32 feature
2472 * registers. Conveniently, our AArch64 table does, and the AArch32 system
2473 * register encoding can be trivially remapped into the AArch64 for the feature
2474 * registers: Append op0=3, leaving op1, CRn, CRm, and op2 the same.
2476 * According to DDI0487G.b G7.3.1, paragraph "Behavior of VMSAv8-32 32-bit
2477 * System registers with (coproc=0b1111, CRn==c0)", read accesses from this
2478 * range are either UNKNOWN or RES0. Rerouting remains architectural as we
2479 * treat undefined registers in this range as RAZ.
2481 static int kvm_emulate_cp15_id_reg(struct kvm_vcpu *vcpu,
2482 struct sys_reg_params *params)
2484 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2486 /* Treat impossible writes to RO registers as UNDEFINED */
2487 if (params->is_write) {
2488 unhandled_cp_access(vcpu, params);
2495 * All registers where CRm > 3 are known to be UNKNOWN/RAZ from AArch32.
2496 * Avoid conflicting with future expansion of AArch64 feature registers
2497 * and simply treat them as RAZ here.
2499 if (params->CRm > 3)
2501 else if (!emulate_sys_reg(vcpu, params))
2504 vcpu_set_reg(vcpu, Rt, params->regval);
2509 * kvm_handle_cp_32 -- handles a mrc/mcr trap on a guest CP14/CP15 access
2510 * @vcpu: The VCPU pointer
2511 * @run: The kvm_run struct
2513 static int kvm_handle_cp_32(struct kvm_vcpu *vcpu,
2514 struct sys_reg_params *params,
2515 const struct sys_reg_desc *global,
2518 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2520 params->regval = vcpu_get_reg(vcpu, Rt);
2522 if (emulate_cp(vcpu, params, global, nr_global)) {
2523 if (!params->is_write)
2524 vcpu_set_reg(vcpu, Rt, params->regval);
2528 unhandled_cp_access(vcpu, params);
2532 int kvm_handle_cp15_64(struct kvm_vcpu *vcpu)
2534 return kvm_handle_cp_64(vcpu, cp15_64_regs, ARRAY_SIZE(cp15_64_regs));
2537 int kvm_handle_cp15_32(struct kvm_vcpu *vcpu)
2539 struct sys_reg_params params;
2541 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2544 * Certain AArch32 ID registers are handled by rerouting to the AArch64
2545 * system register table. Registers in the ID range where CRm=0 are
2546 * excluded from this scheme as they do not trivially map into AArch64
2547 * system register encodings.
2549 if (params.Op1 == 0 && params.CRn == 0 && params.CRm)
2550 return kvm_emulate_cp15_id_reg(vcpu, ¶ms);
2552 return kvm_handle_cp_32(vcpu, ¶ms, cp15_regs, ARRAY_SIZE(cp15_regs));
2555 int kvm_handle_cp14_64(struct kvm_vcpu *vcpu)
2557 return kvm_handle_cp_64(vcpu, cp14_64_regs, ARRAY_SIZE(cp14_64_regs));
2560 int kvm_handle_cp14_32(struct kvm_vcpu *vcpu)
2562 struct sys_reg_params params;
2564 params = esr_cp1x_32_to_params(kvm_vcpu_get_esr(vcpu));
2566 return kvm_handle_cp_32(vcpu, ¶ms, cp14_regs, ARRAY_SIZE(cp14_regs));
2569 static bool is_imp_def_sys_reg(struct sys_reg_params *params)
2571 // See ARM DDI 0487E.a, section D12.3.2
2572 return params->Op0 == 3 && (params->CRn & 0b1011) == 0b1011;
2576 * emulate_sys_reg - Emulate a guest access to an AArch64 system register
2577 * @vcpu: The VCPU pointer
2578 * @params: Decoded system register parameters
2580 * Return: true if the system register access was successful, false otherwise.
2582 static bool emulate_sys_reg(struct kvm_vcpu *vcpu,
2583 struct sys_reg_params *params)
2585 const struct sys_reg_desc *r;
2587 r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2590 perform_access(vcpu, params, r);
2594 if (is_imp_def_sys_reg(params)) {
2595 kvm_inject_undefined(vcpu);
2597 print_sys_reg_msg(params,
2598 "Unsupported guest sys_reg access at: %lx [%08lx]\n",
2599 *vcpu_pc(vcpu), *vcpu_cpsr(vcpu));
2600 kvm_inject_undefined(vcpu);
2606 * kvm_reset_sys_regs - sets system registers to reset value
2607 * @vcpu: The VCPU pointer
2609 * This function finds the right table above and sets the registers on the
2610 * virtual CPU struct to their architecturally defined reset values.
2612 void kvm_reset_sys_regs(struct kvm_vcpu *vcpu)
2616 for (i = 0; i < ARRAY_SIZE(sys_reg_descs); i++)
2617 if (sys_reg_descs[i].reset)
2618 sys_reg_descs[i].reset(vcpu, &sys_reg_descs[i]);
2622 * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access
2623 * @vcpu: The VCPU pointer
2625 int kvm_handle_sys_reg(struct kvm_vcpu *vcpu)
2627 struct sys_reg_params params;
2628 unsigned long esr = kvm_vcpu_get_esr(vcpu);
2629 int Rt = kvm_vcpu_sys_get_rt(vcpu);
2631 trace_kvm_handle_sys_reg(esr);
2633 params = esr_sys64_to_params(esr);
2634 params.regval = vcpu_get_reg(vcpu, Rt);
2636 if (!emulate_sys_reg(vcpu, ¶ms))
2639 if (!params.is_write)
2640 vcpu_set_reg(vcpu, Rt, params.regval);
2644 /******************************************************************************
2646 *****************************************************************************/
2648 static bool index_to_params(u64 id, struct sys_reg_params *params)
2650 switch (id & KVM_REG_SIZE_MASK) {
2651 case KVM_REG_SIZE_U64:
2652 /* Any unused index bits means it's not valid. */
2653 if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK
2654 | KVM_REG_ARM_COPROC_MASK
2655 | KVM_REG_ARM64_SYSREG_OP0_MASK
2656 | KVM_REG_ARM64_SYSREG_OP1_MASK
2657 | KVM_REG_ARM64_SYSREG_CRN_MASK
2658 | KVM_REG_ARM64_SYSREG_CRM_MASK
2659 | KVM_REG_ARM64_SYSREG_OP2_MASK))
2661 params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK)
2662 >> KVM_REG_ARM64_SYSREG_OP0_SHIFT);
2663 params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK)
2664 >> KVM_REG_ARM64_SYSREG_OP1_SHIFT);
2665 params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK)
2666 >> KVM_REG_ARM64_SYSREG_CRN_SHIFT);
2667 params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK)
2668 >> KVM_REG_ARM64_SYSREG_CRM_SHIFT);
2669 params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK)
2670 >> KVM_REG_ARM64_SYSREG_OP2_SHIFT);
2677 const struct sys_reg_desc *get_reg_by_id(u64 id,
2678 const struct sys_reg_desc table[],
2681 struct sys_reg_params params;
2683 if (!index_to_params(id, ¶ms))
2686 return find_reg(¶ms, table, num);
2689 /* Decode an index value, and find the sys_reg_desc entry. */
2690 static const struct sys_reg_desc *
2691 id_to_sys_reg_desc(struct kvm_vcpu *vcpu, u64 id,
2692 const struct sys_reg_desc table[], unsigned int num)
2695 const struct sys_reg_desc *r;
2697 /* We only do sys_reg for now. */
2698 if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG)
2701 r = get_reg_by_id(id, table, num);
2703 /* Not saved in the sys_reg array and not otherwise accessible? */
2704 if (r && (!(r->reg || r->get_user) || sysreg_hidden(vcpu, r)))
2711 * These are the invariant sys_reg registers: we let the guest see the
2712 * host versions of these, so they're part of the guest state.
2714 * A future CPU may provide a mechanism to present different values to
2715 * the guest, or a future kvm may trap them.
2718 #define FUNCTION_INVARIANT(reg) \
2719 static void get_##reg(struct kvm_vcpu *v, \
2720 const struct sys_reg_desc *r) \
2722 ((struct sys_reg_desc *)r)->val = read_sysreg(reg); \
2725 FUNCTION_INVARIANT(midr_el1)
2726 FUNCTION_INVARIANT(revidr_el1)
2727 FUNCTION_INVARIANT(clidr_el1)
2728 FUNCTION_INVARIANT(aidr_el1)
2730 static void get_ctr_el0(struct kvm_vcpu *v, const struct sys_reg_desc *r)
2732 ((struct sys_reg_desc *)r)->val = read_sanitised_ftr_reg(SYS_CTR_EL0);
2735 /* ->val is filled in by kvm_sys_reg_table_init() */
2736 static struct sys_reg_desc invariant_sys_regs[] = {
2737 { SYS_DESC(SYS_MIDR_EL1), NULL, get_midr_el1 },
2738 { SYS_DESC(SYS_REVIDR_EL1), NULL, get_revidr_el1 },
2739 { SYS_DESC(SYS_CLIDR_EL1), NULL, get_clidr_el1 },
2740 { SYS_DESC(SYS_AIDR_EL1), NULL, get_aidr_el1 },
2741 { SYS_DESC(SYS_CTR_EL0), NULL, get_ctr_el0 },
2744 static int get_invariant_sys_reg(u64 id, u64 __user *uaddr)
2746 const struct sys_reg_desc *r;
2748 r = get_reg_by_id(id, invariant_sys_regs,
2749 ARRAY_SIZE(invariant_sys_regs));
2753 return put_user(r->val, uaddr);
2756 static int set_invariant_sys_reg(u64 id, u64 __user *uaddr)
2758 const struct sys_reg_desc *r;
2761 r = get_reg_by_id(id, invariant_sys_regs,
2762 ARRAY_SIZE(invariant_sys_regs));
2766 if (get_user(val, uaddr))
2769 /* This is what we mean by invariant: you can't change it. */
2776 static bool is_valid_cache(u32 val)
2780 if (val >= CSSELR_MAX)
2783 /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */
2785 ctype = (cache_levels >> (level * 3)) & 7;
2788 case 0: /* No cache */
2790 case 1: /* Instruction cache only */
2792 case 2: /* Data cache only */
2793 case 4: /* Unified cache */
2795 case 3: /* Separate instruction and data caches */
2797 default: /* Reserved: we can't know instruction or data. */
2802 static int demux_c15_get(u64 id, void __user *uaddr)
2805 u32 __user *uval = uaddr;
2807 /* Fail if we have unknown bits set. */
2808 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2809 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2812 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2813 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2814 if (KVM_REG_SIZE(id) != 4)
2816 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2817 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2818 if (!is_valid_cache(val))
2821 return put_user(get_ccsidr(val), uval);
2827 static int demux_c15_set(u64 id, void __user *uaddr)
2830 u32 __user *uval = uaddr;
2832 /* Fail if we have unknown bits set. */
2833 if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK
2834 | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1)))
2837 switch (id & KVM_REG_ARM_DEMUX_ID_MASK) {
2838 case KVM_REG_ARM_DEMUX_ID_CCSIDR:
2839 if (KVM_REG_SIZE(id) != 4)
2841 val = (id & KVM_REG_ARM_DEMUX_VAL_MASK)
2842 >> KVM_REG_ARM_DEMUX_VAL_SHIFT;
2843 if (!is_valid_cache(val))
2846 if (get_user(newval, uval))
2849 /* This is also invariant: you can't change it. */
2850 if (newval != get_ccsidr(val))
2858 int kvm_sys_reg_get_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
2859 const struct sys_reg_desc table[], unsigned int num)
2861 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2862 const struct sys_reg_desc *r;
2866 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
2871 ret = (r->get_user)(vcpu, r, &val);
2873 val = __vcpu_sys_reg(vcpu, r->reg);
2878 ret = put_user(val, uaddr);
2883 int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2885 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2888 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2889 return demux_c15_get(reg->id, uaddr);
2891 err = get_invariant_sys_reg(reg->id, uaddr);
2895 return kvm_sys_reg_get_user(vcpu, reg,
2896 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2899 int kvm_sys_reg_set_user(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg,
2900 const struct sys_reg_desc table[], unsigned int num)
2902 u64 __user *uaddr = (u64 __user *)(unsigned long)reg->addr;
2903 const struct sys_reg_desc *r;
2907 if (get_user(val, uaddr))
2910 r = id_to_sys_reg_desc(vcpu, reg->id, table, num);
2914 if (sysreg_user_write_ignore(vcpu, r))
2918 ret = (r->set_user)(vcpu, r, val);
2920 __vcpu_sys_reg(vcpu, r->reg) = val;
2927 int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
2929 void __user *uaddr = (void __user *)(unsigned long)reg->addr;
2932 if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX)
2933 return demux_c15_set(reg->id, uaddr);
2935 err = set_invariant_sys_reg(reg->id, uaddr);
2939 return kvm_sys_reg_set_user(vcpu, reg,
2940 sys_reg_descs, ARRAY_SIZE(sys_reg_descs));
2943 static unsigned int num_demux_regs(void)
2945 unsigned int i, count = 0;
2947 for (i = 0; i < CSSELR_MAX; i++)
2948 if (is_valid_cache(i))
2954 static int write_demux_regids(u64 __user *uindices)
2956 u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX;
2959 val |= KVM_REG_ARM_DEMUX_ID_CCSIDR;
2960 for (i = 0; i < CSSELR_MAX; i++) {
2961 if (!is_valid_cache(i))
2963 if (put_user(val | i, uindices))
2970 static u64 sys_reg_to_index(const struct sys_reg_desc *reg)
2972 return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 |
2973 KVM_REG_ARM64_SYSREG |
2974 (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) |
2975 (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) |
2976 (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) |
2977 (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) |
2978 (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT));
2981 static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind)
2986 if (put_user(sys_reg_to_index(reg), *uind))
2993 static int walk_one_sys_reg(const struct kvm_vcpu *vcpu,
2994 const struct sys_reg_desc *rd,
2996 unsigned int *total)
2999 * Ignore registers we trap but don't save,
3000 * and for which no custom user accessor is provided.
3002 if (!(rd->reg || rd->get_user))
3005 if (sysreg_hidden(vcpu, rd))
3008 if (!copy_reg_to_user(rd, uind))
3015 /* Assumed ordered tables, see kvm_sys_reg_table_init. */
3016 static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind)
3018 const struct sys_reg_desc *i2, *end2;
3019 unsigned int total = 0;
3023 end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs);
3025 while (i2 != end2) {
3026 err = walk_one_sys_reg(vcpu, i2++, &uind, &total);
3033 unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu)
3035 return ARRAY_SIZE(invariant_sys_regs)
3037 + walk_sys_regs(vcpu, (u64 __user *)NULL);
3040 int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices)
3045 /* Then give them all the invariant registers' indices. */
3046 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) {
3047 if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices))
3052 err = walk_sys_regs(vcpu, uindices);
3057 return write_demux_regids(uindices);
3060 int kvm_sys_reg_table_init(void)
3064 struct sys_reg_desc clidr;
3066 /* Make sure tables are unique and in order. */
3067 valid &= check_sysreg_table(sys_reg_descs, ARRAY_SIZE(sys_reg_descs), false);
3068 valid &= check_sysreg_table(cp14_regs, ARRAY_SIZE(cp14_regs), true);
3069 valid &= check_sysreg_table(cp14_64_regs, ARRAY_SIZE(cp14_64_regs), true);
3070 valid &= check_sysreg_table(cp15_regs, ARRAY_SIZE(cp15_regs), true);
3071 valid &= check_sysreg_table(cp15_64_regs, ARRAY_SIZE(cp15_64_regs), true);
3072 valid &= check_sysreg_table(invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs), false);
3077 /* We abuse the reset function to overwrite the table itself. */
3078 for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++)
3079 invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]);
3082 * CLIDR format is awkward, so clean it up. See ARM B4.1.20:
3084 * If software reads the Cache Type fields from Ctype1
3085 * upwards, once it has seen a value of 0b000, no caches
3086 * exist at further-out levels of the hierarchy. So, for
3087 * example, if Ctype3 is the first Cache Type field with a
3088 * value of 0b000, the values of Ctype4 to Ctype7 must be
3091 get_clidr_el1(NULL, &clidr); /* Ugly... */
3092 cache_levels = clidr.val;
3093 for (i = 0; i < 7; i++)
3094 if (((cache_levels >> (i*3)) & 7) == 0)
3096 /* Clear all higher bits. */
3097 cache_levels &= (1 << (i*3))-1;