Merge tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64...
authorLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Jan 2025 17:01:36 +0000 (09:01 -0800)
committerLinus Torvalds <torvalds@linux-foundation.org>
Tue, 28 Jan 2025 17:01:36 +0000 (09:01 -0800)
Pull KVM/arm64 updates from Will Deacon:
 "New features:

   - Support for non-protected guest in protected mode, achieving near
     feature parity with the non-protected mode

   - Support for the EL2 timers as part of the ongoing NV support

   - Allow control of hardware tracing for nVHE/hVHE

  Improvements, fixes and cleanups:

   - Massive cleanup of the debug infrastructure, making it a bit less
     awkward and definitely easier to maintain. This should pave the way
     for further optimisations

   - Complete rewrite of pKVM's fixed-feature infrastructure, aligning
     it with the rest of KVM and making the code easier to follow

   - Large simplification of pKVM's memory protection infrastructure

   - Better handling of RES0/RES1 fields for memory-backed system
     registers

   - Add a workaround for Qualcomm's Snapdragon X CPUs, which suffer
     from a pretty nasty timer bug

   - Small collection of cleanups and low-impact fixes"

* tag 'arm64-upstream' of git://git.kernel.org/pub/scm/linux/kernel/git/arm64/linux: (87 commits)
  arm64/sysreg: Get rid of TRFCR_ELx SysregFields
  KVM: arm64: nv: Fix doc header layout for timers
  KVM: arm64: nv: Apply RESx settings to sysreg reset values
  KVM: arm64: nv: Always evaluate HCR_EL2 using sanitising accessors
  KVM: arm64: Fix selftests after sysreg field name update
  coresight: Pass guest TRFCR value to KVM
  KVM: arm64: Support trace filtering for guests
  KVM: arm64: coresight: Give TRBE enabled state to KVM
  coresight: trbe: Remove redundant disable call
  arm64/sysreg/tools: Move TRFCR definitions to sysreg
  tools: arm64: Update sysreg.h header files
  KVM: arm64: Drop pkvm_mem_transition for host/hyp donations
  KVM: arm64: Drop pkvm_mem_transition for host/hyp sharing
  KVM: arm64: Drop pkvm_mem_transition for FF-A
  KVM: arm64: Explicitly handle BRBE traps as UNDEFINED
  KVM: arm64: vgic: Use str_enabled_disabled() in vgic_v3_probe()
  arm64: kvm: Introduce nvhe stack size constants
  KVM: arm64: Fix nVHE stacktrace VA bits mask
  KVM: arm64: Fix FEAT_MTE in pKVM
  Documentation: Update the behaviour of "kvm-arm.mode"
  ...

1  2 
Documentation/admin-guide/kernel-parameters.txt
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/pgtable.c
arch/arm64/kvm/sys_regs.c
arch/arm64/tools/sysreg
tools/testing/selftests/kvm/arm64/aarch32_id_regs.c
tools/testing/selftests/kvm/arm64/set_id_regs.c

Simple merge
Simple merge
index e4749ecbcd79c8e748cb623ab923d96ef04b30ae,526d66f24e34aea2eb62415f14e9cdb3858e6484..f6cd1ea7fb55e509f4faae067eb57ee7e24e8d29
@@@ -1599,12 -1608,10 +1608,13 @@@ static u64 __kvm_read_sanitised_id_reg(
                if (!vcpu_has_ptrauth(vcpu))
                        val &= ~(ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_APA3) |
                                 ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_GPA3));
-               if (!cpus_have_final_cap(ARM64_HAS_WFXT))
+               if (!cpus_have_final_cap(ARM64_HAS_WFXT) ||
+                   has_broken_cntvoff())
                        val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
                break;
 +      case SYS_ID_AA64ISAR3_EL1:
 +              val &= ID_AA64ISAR3_EL1_FPRCVT | ID_AA64ISAR3_EL1_FAMINMAX;
 +              break;
        case SYS_ID_AA64MMFR2_EL1:
                val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
                break;
Simple merge
index 447d61cae4db119029dd4278ab23ad7afe6c51cb,0000000000000000000000000000000000000000..cef8f7323ceb8842145d6678f5106fe839c7fd8e
mode 100644,000000..100644
--- /dev/null
@@@ -1,167 -1,0 +1,167 @@@
-       return el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY;
 +// SPDX-License-Identifier: GPL-2.0-only
 +/*
 + * aarch32_id_regs - Test for ID register behavior on AArch64-only systems
 + *
 + * Copyright (c) 2022 Google LLC.
 + *
 + * Test that KVM handles the AArch64 views of the AArch32 ID registers as RAZ
 + * and WI from userspace.
 + */
 +
 +#include <stdint.h>
 +
 +#include "kvm_util.h"
 +#include "processor.h"
 +#include "test_util.h"
 +#include <linux/bitfield.h>
 +
 +#define BAD_ID_REG_VAL        0x1badc0deul
 +
 +#define GUEST_ASSERT_REG_RAZ(reg)     GUEST_ASSERT_EQ(read_sysreg_s(reg), 0)
 +
 +static void guest_main(void)
 +{
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_PFR0_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_PFR1_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_DFR0_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_AFR0_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR0_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR1_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR2_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR3_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR0_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR1_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR2_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR3_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR4_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR5_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR4_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_ISAR6_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_MVFR0_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_MVFR1_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_MVFR2_EL1);
 +      GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 3));
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_PFR2_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_DFR1_EL1);
 +      GUEST_ASSERT_REG_RAZ(SYS_ID_MMFR5_EL1);
 +      GUEST_ASSERT_REG_RAZ(sys_reg(3, 0, 0, 3, 7));
 +
 +      GUEST_DONE();
 +}
 +
 +static void test_guest_raz(struct kvm_vcpu *vcpu)
 +{
 +      struct ucall uc;
 +
 +      vcpu_run(vcpu);
 +
 +      switch (get_ucall(vcpu, &uc)) {
 +      case UCALL_ABORT:
 +              REPORT_GUEST_ASSERT(uc);
 +              break;
 +      case UCALL_DONE:
 +              break;
 +      default:
 +              TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
 +      }
 +}
 +
 +static uint64_t raz_wi_reg_ids[] = {
 +      KVM_ARM64_SYS_REG(SYS_ID_PFR0_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_PFR1_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_DFR0_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_MMFR0_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_MMFR1_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_MMFR2_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_MMFR3_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR0_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR1_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR2_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR3_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR4_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR5_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_MMFR4_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_ISAR6_EL1),
 +      KVM_ARM64_SYS_REG(SYS_MVFR0_EL1),
 +      KVM_ARM64_SYS_REG(SYS_MVFR1_EL1),
 +      KVM_ARM64_SYS_REG(SYS_MVFR2_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_PFR2_EL1),
 +      KVM_ARM64_SYS_REG(SYS_ID_MMFR5_EL1),
 +};
 +
 +static void test_user_raz_wi(struct kvm_vcpu *vcpu)
 +{
 +      int i;
 +
 +      for (i = 0; i < ARRAY_SIZE(raz_wi_reg_ids); i++) {
 +              uint64_t reg_id = raz_wi_reg_ids[i];
 +              uint64_t val;
 +
 +              val = vcpu_get_reg(vcpu, reg_id);
 +              TEST_ASSERT_EQ(val, 0);
 +
 +              /*
 +               * Expect the ioctl to succeed with no effect on the register
 +               * value.
 +               */
 +              vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
 +
 +              val = vcpu_get_reg(vcpu, reg_id);
 +              TEST_ASSERT_EQ(val, 0);
 +      }
 +}
 +
 +static uint64_t raz_invariant_reg_ids[] = {
 +      KVM_ARM64_SYS_REG(SYS_ID_AFR0_EL1),
 +      KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 3)),
 +      KVM_ARM64_SYS_REG(SYS_ID_DFR1_EL1),
 +      KVM_ARM64_SYS_REG(sys_reg(3, 0, 0, 3, 7)),
 +};
 +
 +static void test_user_raz_invariant(struct kvm_vcpu *vcpu)
 +{
 +      int i, r;
 +
 +      for (i = 0; i < ARRAY_SIZE(raz_invariant_reg_ids); i++) {
 +              uint64_t reg_id = raz_invariant_reg_ids[i];
 +              uint64_t val;
 +
 +              val = vcpu_get_reg(vcpu, reg_id);
 +              TEST_ASSERT_EQ(val, 0);
 +
 +              r = __vcpu_set_reg(vcpu, reg_id, BAD_ID_REG_VAL);
 +              TEST_ASSERT(r < 0 && errno == EINVAL,
 +                          "unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
 +
 +              val = vcpu_get_reg(vcpu, reg_id);
 +              TEST_ASSERT_EQ(val, 0);
 +      }
 +}
 +
 +
 +
 +static bool vcpu_aarch64_only(struct kvm_vcpu *vcpu)
 +{
 +      uint64_t val, el0;
 +
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 +
 +      el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
++      return el0 == ID_AA64PFR0_EL1_EL0_IMP;
 +}
 +
 +int main(void)
 +{
 +      struct kvm_vcpu *vcpu;
 +      struct kvm_vm *vm;
 +
 +      vm = vm_create_with_one_vcpu(&vcpu, guest_main);
 +
 +      TEST_REQUIRE(vcpu_aarch64_only(vcpu));
 +
 +      test_user_raz_wi(vcpu);
 +      test_user_raz_invariant(vcpu);
 +      test_guest_raz(vcpu);
 +
 +      kvm_vm_free(vm);
 +}
index 3dd85ce8551c6a1811757f4e950d60ef3b5fc3cc,0000000000000000000000000000000000000000..217541fe653601a82697134c8d773e901a205cfe
mode 100644,000000..100644
--- /dev/null
@@@ -1,694 -1,0 +1,694 @@@
-       aarch64_only = (el0 == ID_AA64PFR0_EL1_ELx_64BIT_ONLY);
 +// SPDX-License-Identifier: GPL-2.0-only
 +/*
 + * set_id_regs - Test for setting ID register from usersapce.
 + *
 + * Copyright (c) 2023 Google LLC.
 + *
 + *
 + * Test that KVM supports setting ID registers from userspace and handles the
 + * feature set correctly.
 + */
 +
 +#include <stdint.h>
 +#include "kvm_util.h"
 +#include "processor.h"
 +#include "test_util.h"
 +#include <linux/bitfield.h>
 +
 +enum ftr_type {
 +      FTR_EXACT,                      /* Use a predefined safe value */
 +      FTR_LOWER_SAFE,                 /* Smaller value is safe */
 +      FTR_HIGHER_SAFE,                /* Bigger value is safe */
 +      FTR_HIGHER_OR_ZERO_SAFE,        /* Bigger value is safe, but 0 is biggest */
 +      FTR_END,                        /* Mark the last ftr bits */
 +};
 +
 +#define FTR_SIGNED    true    /* Value should be treated as signed */
 +#define FTR_UNSIGNED  false   /* Value should be treated as unsigned */
 +
 +struct reg_ftr_bits {
 +      char *name;
 +      bool sign;
 +      enum ftr_type type;
 +      uint8_t shift;
 +      uint64_t mask;
 +      /*
 +       * For FTR_EXACT, safe_val is used as the exact safe value.
 +       * For FTR_LOWER_SAFE, safe_val is used as the minimal safe value.
 +       */
 +      int64_t safe_val;
 +};
 +
 +struct test_feature_reg {
 +      uint32_t reg;
 +      const struct reg_ftr_bits *ftr_bits;
 +};
 +
 +#define __REG_FTR_BITS(NAME, SIGNED, TYPE, SHIFT, MASK, SAFE_VAL)     \
 +      {                                                               \
 +              .name = #NAME,                                          \
 +              .sign = SIGNED,                                         \
 +              .type = TYPE,                                           \
 +              .shift = SHIFT,                                         \
 +              .mask = MASK,                                           \
 +              .safe_val = SAFE_VAL,                                   \
 +      }
 +
 +#define REG_FTR_BITS(type, reg, field, safe_val) \
 +      __REG_FTR_BITS(reg##_##field, FTR_UNSIGNED, type, reg##_##field##_SHIFT, \
 +                     reg##_##field##_MASK, safe_val)
 +
 +#define S_REG_FTR_BITS(type, reg, field, safe_val) \
 +      __REG_FTR_BITS(reg##_##field, FTR_SIGNED, type, reg##_##field##_SHIFT, \
 +                     reg##_##field##_MASK, safe_val)
 +
 +#define REG_FTR_END                                   \
 +      {                                               \
 +              .type = FTR_END,                        \
 +      }
 +
 +static const struct reg_ftr_bits ftr_id_aa64dfr0_el1[] = {
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DoubleLock, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, WRPs, 0),
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, PMUVer, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64DFR0_EL1, DebugVer, ID_AA64DFR0_EL1_DebugVer_IMP),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_dfr0_el1[] = {
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, PerfMon, ID_DFR0_EL1_PerfMon_PMUv3),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_DFR0_EL1, CopDbg, ID_DFR0_EL1_CopDbg_Armv8),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64isar0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RNDR, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TLB, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, FHM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, DP, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM4, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SM3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, RDM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, TME, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, ATOMIC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, CRC32, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA2, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, SHA1, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR0_EL1, AES, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64isar1_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LS64, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, XS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, I8MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DGH, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, BF16, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SPECRES, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, SB, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FRINTTS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, LRCPC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, FCMA, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, JSCVT, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR1_EL1, DPB, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64isar2_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, BC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, RPRES, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ISAR2_EL1, WFxT, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64pfr0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, CSV2, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, DIT, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, SEL2, 0),
 +      REG_FTR_BITS(FTR_EXACT, ID_AA64PFR0_EL1, GIC, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL2, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL1, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR0_EL1, EL0, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64pfr1_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, CSV2_frac, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, SSBS, ID_AA64PFR1_EL1_SSBS_NI),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64PFR1_EL1, BT, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64mmfr0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, ECV, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, EXS, 0),
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN4, 0),
 +      S_REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN64, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, TGRAN16, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGENDEL0, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, SNSMEM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, BIGEND, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR0_EL1, PARANGE, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64mmfr1_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, TIDCP1, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, AFP, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, ETS, 0),
 +      REG_FTR_BITS(FTR_HIGHER_SAFE, ID_AA64MMFR1_EL1, SpecSEI, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, PAN, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, LO, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HPDS, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR1_EL1, HAFDBS, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64mmfr2_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, E0PD, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, BBM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, TTL, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, AT, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, ST, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, VARange, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, IESB, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, LSM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, UAO, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64MMFR2_EL1, CnP, 0),
 +      REG_FTR_END,
 +};
 +
 +static const struct reg_ftr_bits ftr_id_aa64zfr0_el1[] = {
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F64MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, F32MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, I8MM, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SM4, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SHA3, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BF16, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, BitPerm, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, AES, 0),
 +      REG_FTR_BITS(FTR_LOWER_SAFE, ID_AA64ZFR0_EL1, SVEver, 0),
 +      REG_FTR_END,
 +};
 +
 +#define TEST_REG(id, table)                   \
 +      {                                       \
 +              .reg = id,                      \
 +              .ftr_bits = &((table)[0]),      \
 +      }
 +
 +static struct test_feature_reg test_regs[] = {
 +      TEST_REG(SYS_ID_AA64DFR0_EL1, ftr_id_aa64dfr0_el1),
 +      TEST_REG(SYS_ID_DFR0_EL1, ftr_id_dfr0_el1),
 +      TEST_REG(SYS_ID_AA64ISAR0_EL1, ftr_id_aa64isar0_el1),
 +      TEST_REG(SYS_ID_AA64ISAR1_EL1, ftr_id_aa64isar1_el1),
 +      TEST_REG(SYS_ID_AA64ISAR2_EL1, ftr_id_aa64isar2_el1),
 +      TEST_REG(SYS_ID_AA64PFR0_EL1, ftr_id_aa64pfr0_el1),
 +      TEST_REG(SYS_ID_AA64PFR1_EL1, ftr_id_aa64pfr1_el1),
 +      TEST_REG(SYS_ID_AA64MMFR0_EL1, ftr_id_aa64mmfr0_el1),
 +      TEST_REG(SYS_ID_AA64MMFR1_EL1, ftr_id_aa64mmfr1_el1),
 +      TEST_REG(SYS_ID_AA64MMFR2_EL1, ftr_id_aa64mmfr2_el1),
 +      TEST_REG(SYS_ID_AA64ZFR0_EL1, ftr_id_aa64zfr0_el1),
 +};
 +
 +#define GUEST_REG_SYNC(id) GUEST_SYNC_ARGS(0, id, read_sysreg_s(id), 0, 0);
 +
 +static void guest_code(void)
 +{
 +      GUEST_REG_SYNC(SYS_ID_AA64DFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_DFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ISAR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ISAR1_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ISAR2_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64PFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64MMFR0_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64MMFR1_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64MMFR2_EL1);
 +      GUEST_REG_SYNC(SYS_ID_AA64ZFR0_EL1);
 +      GUEST_REG_SYNC(SYS_CTR_EL0);
 +
 +      GUEST_DONE();
 +}
 +
 +/* Return a safe value to a given ftr_bits an ftr value */
 +uint64_t get_safe_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
 +{
 +      uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
 +
 +      if (ftr_bits->sign == FTR_UNSIGNED) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = ftr_bits->safe_val;
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      if (ftr > ftr_bits->safe_val)
 +                              ftr--;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      if (ftr < ftr_max)
 +                              ftr++;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr == ftr_max)
 +                              ftr = 0;
 +                      else if (ftr != 0)
 +                              ftr++;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      } else if (ftr != ftr_max) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = ftr_bits->safe_val;
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      if (ftr > ftr_bits->safe_val)
 +                              ftr--;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      if (ftr < ftr_max - 1)
 +                              ftr++;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr != 0 && ftr != ftr_max - 1)
 +                              ftr++;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      }
 +
 +      return ftr;
 +}
 +
 +/* Return an invalid value to a given ftr_bits an ftr value */
 +uint64_t get_invalid_value(const struct reg_ftr_bits *ftr_bits, uint64_t ftr)
 +{
 +      uint64_t ftr_max = GENMASK_ULL(ARM64_FEATURE_FIELD_BITS - 1, 0);
 +
 +      if (ftr_bits->sign == FTR_UNSIGNED) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      ftr++;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      ftr--;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr == 0)
 +                              ftr = ftr_max;
 +                      else
 +                              ftr--;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      } else if (ftr != ftr_max) {
 +              switch (ftr_bits->type) {
 +              case FTR_EXACT:
 +                      ftr = max((uint64_t)ftr_bits->safe_val + 1, ftr + 1);
 +                      break;
 +              case FTR_LOWER_SAFE:
 +                      ftr++;
 +                      break;
 +              case FTR_HIGHER_SAFE:
 +                      ftr--;
 +                      break;
 +              case FTR_HIGHER_OR_ZERO_SAFE:
 +                      if (ftr == 0)
 +                              ftr = ftr_max - 1;
 +                      else
 +                              ftr--;
 +                      break;
 +              default:
 +                      break;
 +              }
 +      } else {
 +              ftr = 0;
 +      }
 +
 +      return ftr;
 +}
 +
 +static uint64_t test_reg_set_success(struct kvm_vcpu *vcpu, uint64_t reg,
 +                                   const struct reg_ftr_bits *ftr_bits)
 +{
 +      uint8_t shift = ftr_bits->shift;
 +      uint64_t mask = ftr_bits->mask;
 +      uint64_t val, new_val, ftr;
 +
 +      val = vcpu_get_reg(vcpu, reg);
 +      ftr = (val & mask) >> shift;
 +
 +      ftr = get_safe_value(ftr_bits, ftr);
 +
 +      ftr <<= shift;
 +      val &= ~mask;
 +      val |= ftr;
 +
 +      vcpu_set_reg(vcpu, reg, val);
 +      new_val = vcpu_get_reg(vcpu, reg);
 +      TEST_ASSERT_EQ(new_val, val);
 +
 +      return new_val;
 +}
 +
 +static void test_reg_set_fail(struct kvm_vcpu *vcpu, uint64_t reg,
 +                            const struct reg_ftr_bits *ftr_bits)
 +{
 +      uint8_t shift = ftr_bits->shift;
 +      uint64_t mask = ftr_bits->mask;
 +      uint64_t val, old_val, ftr;
 +      int r;
 +
 +      val = vcpu_get_reg(vcpu, reg);
 +      ftr = (val & mask) >> shift;
 +
 +      ftr = get_invalid_value(ftr_bits, ftr);
 +
 +      old_val = val;
 +      ftr <<= shift;
 +      val &= ~mask;
 +      val |= ftr;
 +
 +      r = __vcpu_set_reg(vcpu, reg, val);
 +      TEST_ASSERT(r < 0 && errno == EINVAL,
 +                  "Unexpected KVM_SET_ONE_REG error: r=%d, errno=%d", r, errno);
 +
 +      val = vcpu_get_reg(vcpu, reg);
 +      TEST_ASSERT_EQ(val, old_val);
 +}
 +
 +static uint64_t test_reg_vals[KVM_ARM_FEATURE_ID_RANGE_SIZE];
 +
 +#define encoding_to_range_idx(encoding)                                                       \
 +      KVM_ARM_FEATURE_ID_RANGE_IDX(sys_reg_Op0(encoding), sys_reg_Op1(encoding),      \
 +                                   sys_reg_CRn(encoding), sys_reg_CRm(encoding),      \
 +                                   sys_reg_Op2(encoding))
 +
 +
 +static void test_vm_ftr_id_regs(struct kvm_vcpu *vcpu, bool aarch64_only)
 +{
 +      uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
 +      struct reg_mask_range range = {
 +              .addr = (__u64)masks,
 +      };
 +      int ret;
 +
 +      /* KVM should return error when reserved field is not zero */
 +      range.reserved[0] = 1;
 +      ret = __vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
 +      TEST_ASSERT(ret, "KVM doesn't check invalid parameters.");
 +
 +      /* Get writable masks for feature ID registers */
 +      memset(range.reserved, 0, sizeof(range.reserved));
 +      vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
 +
 +      for (int i = 0; i < ARRAY_SIZE(test_regs); i++) {
 +              const struct reg_ftr_bits *ftr_bits = test_regs[i].ftr_bits;
 +              uint32_t reg_id = test_regs[i].reg;
 +              uint64_t reg = KVM_ARM64_SYS_REG(reg_id);
 +              int idx;
 +
 +              /* Get the index to masks array for the idreg */
 +              idx = encoding_to_range_idx(reg_id);
 +
 +              for (int j = 0;  ftr_bits[j].type != FTR_END; j++) {
 +                      /* Skip aarch32 reg on aarch64 only system, since they are RAZ/WI. */
 +                      if (aarch64_only && sys_reg_CRm(reg_id) < 4) {
 +                              ksft_test_result_skip("%s on AARCH64 only system\n",
 +                                                    ftr_bits[j].name);
 +                              continue;
 +                      }
 +
 +                      /* Make sure the feature field is writable */
 +                      TEST_ASSERT_EQ(masks[idx] & ftr_bits[j].mask, ftr_bits[j].mask);
 +
 +                      test_reg_set_fail(vcpu, reg, &ftr_bits[j]);
 +
 +                      test_reg_vals[idx] = test_reg_set_success(vcpu, reg,
 +                                                                &ftr_bits[j]);
 +
 +                      ksft_test_result_pass("%s\n", ftr_bits[j].name);
 +              }
 +      }
 +}
 +
 +#define MPAM_IDREG_TEST       6
 +static void test_user_set_mpam_reg(struct kvm_vcpu *vcpu)
 +{
 +      uint64_t masks[KVM_ARM_FEATURE_ID_RANGE_SIZE];
 +      struct reg_mask_range range = {
 +              .addr = (__u64)masks,
 +      };
 +      uint64_t val;
 +      int idx, err;
 +
 +      /*
 +       * If ID_AA64PFR0.MPAM is _not_ officially modifiable and is zero,
 +       * check that if it can be set to 1, (i.e. it is supported by the
 +       * hardware), that it can't be set to other values.
 +       */
 +
 +      /* Get writable masks for feature ID registers */
 +      memset(range.reserved, 0, sizeof(range.reserved));
 +      vm_ioctl(vcpu->vm, KVM_ARM_GET_REG_WRITABLE_MASKS, &range);
 +
 +      /* Writeable? Nothing to test! */
 +      idx = encoding_to_range_idx(SYS_ID_AA64PFR0_EL1);
 +      if ((masks[idx] & ID_AA64PFR0_EL1_MPAM_MASK) == ID_AA64PFR0_EL1_MPAM_MASK) {
 +              ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is officially writable, nothing to test\n");
 +              return;
 +      }
 +
 +      /* Get the id register value */
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 +
 +      /* Try to set MPAM=0. This should always be possible. */
 +      val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 0);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
 +      if (err)
 +              ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM=0 was not accepted\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=0 worked\n");
 +
 +      /* Try to set MPAM=1 */
 +      val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 1);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
 +      if (err)
 +              ksft_test_result_skip("ID_AA64PFR0_EL1.MPAM is not writable, nothing to test\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM=1 was writable\n");
 +
 +      /* Try to set MPAM=2 */
 +      val &= ~ID_AA64PFR0_EL1_MPAM_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR0_EL1_MPAM_MASK, 2);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1), val);
 +      if (err)
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM not arbitrarily modifiable\n");
 +      else
 +              ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM value should not be ignored\n");
 +
 +      /* And again for ID_AA64PFR1_EL1.MPAM_frac */
 +      idx = encoding_to_range_idx(SYS_ID_AA64PFR1_EL1);
 +      if ((masks[idx] & ID_AA64PFR1_EL1_MPAM_frac_MASK) == ID_AA64PFR1_EL1_MPAM_frac_MASK) {
 +              ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is officially writable, nothing to test\n");
 +              return;
 +      }
 +
 +      /* Get the id register value */
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1));
 +
 +      /* Try to set MPAM_frac=0. This should always be possible. */
 +      val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 0);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
 +      if (err)
 +              ksft_test_result_fail("ID_AA64PFR0_EL1.MPAM_frac=0 was not accepted\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=0 worked\n");
 +
 +      /* Try to set MPAM_frac=1 */
 +      val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 1);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
 +      if (err)
 +              ksft_test_result_skip("ID_AA64PFR1_EL1.MPAM_frac is not writable, nothing to test\n");
 +      else
 +              ksft_test_result_pass("ID_AA64PFR0_EL1.MPAM_frac=1 was writable\n");
 +
 +      /* Try to set MPAM_frac=2 */
 +      val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 +      val |= FIELD_PREP(ID_AA64PFR1_EL1_MPAM_frac_MASK, 2);
 +      err = __vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR1_EL1), val);
 +      if (err)
 +              ksft_test_result_pass("ID_AA64PFR1_EL1.MPAM_frac not arbitrarily modifiable\n");
 +      else
 +              ksft_test_result_fail("ID_AA64PFR1_EL1.MPAM_frac value should not be ignored\n");
 +}
 +
 +static void test_guest_reg_read(struct kvm_vcpu *vcpu)
 +{
 +      bool done = false;
 +      struct ucall uc;
 +
 +      while (!done) {
 +              vcpu_run(vcpu);
 +
 +              switch (get_ucall(vcpu, &uc)) {
 +              case UCALL_ABORT:
 +                      REPORT_GUEST_ASSERT(uc);
 +                      break;
 +              case UCALL_SYNC:
 +                      /* Make sure the written values are seen by guest */
 +                      TEST_ASSERT_EQ(test_reg_vals[encoding_to_range_idx(uc.args[2])],
 +                                     uc.args[3]);
 +                      break;
 +              case UCALL_DONE:
 +                      done = true;
 +                      break;
 +              default:
 +                      TEST_FAIL("Unexpected ucall: %lu", uc.cmd);
 +              }
 +      }
 +}
 +
 +/* Politely lifted from arch/arm64/include/asm/cache.h */
 +/* Ctypen, bits[3(n - 1) + 2 : 3(n - 1)], for n = 1 to 7 */
 +#define CLIDR_CTYPE_SHIFT(level)      (3 * (level - 1))
 +#define CLIDR_CTYPE_MASK(level)               (7 << CLIDR_CTYPE_SHIFT(level))
 +#define CLIDR_CTYPE(clidr, level)     \
 +      (((clidr) & CLIDR_CTYPE_MASK(level)) >> CLIDR_CTYPE_SHIFT(level))
 +
 +static void test_clidr(struct kvm_vcpu *vcpu)
 +{
 +      uint64_t clidr;
 +      int level;
 +
 +      clidr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1));
 +
 +      /* find the first empty level in the cache hierarchy */
 +      for (level = 1; level < 7; level++) {
 +              if (!CLIDR_CTYPE(clidr, level))
 +                      break;
 +      }
 +
 +      /*
 +       * If you have a mind-boggling 7 levels of cache, congratulations, you
 +       * get to fix this.
 +       */
 +      TEST_ASSERT(level <= 7, "can't find an empty level in cache hierarchy");
 +
 +      /* stick in a unified cache level */
 +      clidr |= BIT(2) << CLIDR_CTYPE_SHIFT(level);
 +
 +      vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CLIDR_EL1), clidr);
 +      test_reg_vals[encoding_to_range_idx(SYS_CLIDR_EL1)] = clidr;
 +}
 +
 +static void test_ctr(struct kvm_vcpu *vcpu)
 +{
 +      u64 ctr;
 +
 +      ctr = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0));
 +      ctr &= ~CTR_EL0_DIC_MASK;
 +      if (ctr & CTR_EL0_IminLine_MASK)
 +              ctr--;
 +
 +      vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_CTR_EL0), ctr);
 +      test_reg_vals[encoding_to_range_idx(SYS_CTR_EL0)] = ctr;
 +}
 +
 +static void test_vcpu_ftr_id_regs(struct kvm_vcpu *vcpu)
 +{
 +      u64 val;
 +
 +      test_clidr(vcpu);
 +      test_ctr(vcpu);
 +
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1));
 +      val++;
 +      vcpu_set_reg(vcpu, KVM_ARM64_SYS_REG(SYS_MPIDR_EL1), val);
 +
 +      test_reg_vals[encoding_to_range_idx(SYS_MPIDR_EL1)] = val;
 +      ksft_test_result_pass("%s\n", __func__);
 +}
 +
 +static void test_assert_id_reg_unchanged(struct kvm_vcpu *vcpu, uint32_t encoding)
 +{
 +      size_t idx = encoding_to_range_idx(encoding);
 +      uint64_t observed;
 +
 +      observed = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(encoding));
 +      TEST_ASSERT_EQ(test_reg_vals[idx], observed);
 +}
 +
 +static void test_reset_preserves_id_regs(struct kvm_vcpu *vcpu)
 +{
 +      /*
 +       * Calls KVM_ARM_VCPU_INIT behind the scenes, which will do an
 +       * architectural reset of the vCPU.
 +       */
 +      aarch64_vcpu_setup(vcpu, NULL);
 +
 +      for (int i = 0; i < ARRAY_SIZE(test_regs); i++)
 +              test_assert_id_reg_unchanged(vcpu, test_regs[i].reg);
 +
 +      test_assert_id_reg_unchanged(vcpu, SYS_MPIDR_EL1);
 +      test_assert_id_reg_unchanged(vcpu, SYS_CLIDR_EL1);
 +      test_assert_id_reg_unchanged(vcpu, SYS_CTR_EL0);
 +
 +      ksft_test_result_pass("%s\n", __func__);
 +}
 +
 +int main(void)
 +{
 +      struct kvm_vcpu *vcpu;
 +      struct kvm_vm *vm;
 +      bool aarch64_only;
 +      uint64_t val, el0;
 +      int test_cnt;
 +
 +      TEST_REQUIRE(kvm_has_cap(KVM_CAP_ARM_SUPPORTED_REG_MASK_RANGES));
 +
 +      vm = vm_create_with_one_vcpu(&vcpu, guest_code);
 +
 +      /* Check for AARCH64 only system */
 +      val = vcpu_get_reg(vcpu, KVM_ARM64_SYS_REG(SYS_ID_AA64PFR0_EL1));
 +      el0 = FIELD_GET(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_EL0), val);
++      aarch64_only = (el0 == ID_AA64PFR0_EL1_EL0_IMP);
 +
 +      ksft_print_header();
 +
 +      test_cnt = ARRAY_SIZE(ftr_id_aa64dfr0_el1) + ARRAY_SIZE(ftr_id_dfr0_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64isar0_el1) + ARRAY_SIZE(ftr_id_aa64isar1_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64isar2_el1) + ARRAY_SIZE(ftr_id_aa64pfr0_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64pfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr0_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64mmfr1_el1) + ARRAY_SIZE(ftr_id_aa64mmfr2_el1) +
 +                 ARRAY_SIZE(ftr_id_aa64zfr0_el1) - ARRAY_SIZE(test_regs) + 2 +
 +                 MPAM_IDREG_TEST;
 +
 +      ksft_set_plan(test_cnt);
 +
 +      test_vm_ftr_id_regs(vcpu, aarch64_only);
 +      test_vcpu_ftr_id_regs(vcpu);
 +      test_user_set_mpam_reg(vcpu);
 +
 +      test_guest_reg_read(vcpu);
 +
 +      test_reset_preserves_id_regs(vcpu);
 +
 +      kvm_vm_free(vm);
 +
 +      ksft_finished();
 +}