KVM: arm64: Use HCRX_EL2 feature map to drive fixed-value bits
authorMarc Zyngier <maz@kernel.org>
Sun, 9 Feb 2025 14:51:23 +0000 (14:51 +0000)
committerMarc Zyngier <maz@kernel.org>
Mon, 19 May 2025 10:35:30 +0000 (11:35 +0100)
Similarly to other registers, describe which HCR_EL2 bit depends
on which feature, and use this to compute the RES0 status of these
bits.

Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/config.c
arch/arm64/kvm/nested.c

index 22aea2d9dcaea8aa538fcd07ff6d2b4d106c588a..b205cbb3cdcd0ca4b8344625a8486444b9409fc7 100644 (file)
@@ -77,6 +77,8 @@ struct reg_bits_to_feat_map {
 #define FEAT_THE               ID_AA64PFR1_EL1, THE, IMP
 #define FEAT_SME               ID_AA64PFR1_EL1, SME, IMP
 #define FEAT_GCS               ID_AA64PFR1_EL1, GCS, IMP
+#define FEAT_LS64              ID_AA64ISAR1_EL1, LS64, LS64
+#define FEAT_LS64_V            ID_AA64ISAR1_EL1, LS64, LS64_V
 #define FEAT_LS64_ACCDATA      ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA
 #define FEAT_RAS               ID_AA64PFR0_EL1, RAS, IMP
 #define FEAT_GICv3             ID_AA64PFR0_EL1, GIC, IMP
@@ -90,6 +92,16 @@ struct reg_bits_to_feat_map {
 #define FEAT_PAN2              ID_AA64MMFR1_EL1, PAN, PAN2
 #define FEAT_DPB2              ID_AA64ISAR1_EL1, DPB, DPB2
 #define FEAT_AMUv1             ID_AA64PFR0_EL1, AMU, IMP
+#define FEAT_CMOW              ID_AA64MMFR1_EL1, CMOW, IMP
+#define FEAT_D128              ID_AA64MMFR3_EL1, D128, IMP
+#define FEAT_DoubleFault2      ID_AA64PFR1_EL1, DF2, IMP
+#define FEAT_FPMR              ID_AA64PFR2_EL1, FPMR, IMP
+#define FEAT_MOPS              ID_AA64ISAR2_EL1, MOPS, IMP
+#define FEAT_NMI               ID_AA64PFR1_EL1, NMI, IMP
+#define FEAT_SCTLR2            ID_AA64MMFR3_EL1, SCTLRX, IMP
+#define FEAT_SYSREG128         ID_AA64ISAR2_EL1, SYSREG_128, IMP
+#define FEAT_TCR2              ID_AA64MMFR3_EL1, TCRX, IMP
+#define FEAT_XS                        ID_AA64ISAR1_EL1, XS, IMP
 
 static bool feat_rasv1p1(struct kvm *kvm)
 {
@@ -110,6 +122,35 @@ static bool feat_pauth(struct kvm *kvm)
        return kvm_has_pauth(kvm, PAuth);
 }
 
+static bool feat_pauth_lr(struct kvm *kvm)
+{
+       return kvm_has_pauth(kvm, PAuth_LR);
+}
+
+static bool feat_aderr(struct kvm *kvm)
+{
+       return (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, FEAT_ADERR) &&
+               kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SDERR, FEAT_ADERR));
+}
+
+static bool feat_anerr(struct kvm *kvm)
+{
+       return (kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ANERR, FEAT_ANERR) &&
+               kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SNERR, FEAT_ANERR));
+}
+
+static bool feat_sme_smps(struct kvm *kvm)
+{
+       /*
+        * Revists this if KVM ever supports SME -- this really should
+        * look at the guest's view of SMIDR_EL1. Funnily enough, this
+        * is not captured in the JSON file, but only as a note in the
+        * ARM ARM.
+        */
+       return (kvm_has_feat(kvm, FEAT_SME) &&
+               (read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS));
+}
+
 static const struct reg_bits_to_feat_map hfgrtr_feat_map[] = {
        NEEDS_FEAT(HFGRTR_EL2_nAMAIR2_EL1       |
                   HFGRTR_EL2_nMAIR2_EL1,
@@ -494,6 +535,35 @@ static const struct reg_bits_to_feat_map hafgrtr_feat_map[] = {
                   FEAT_AMUv1),
 };
 
+static const struct reg_bits_to_feat_map hcrx_feat_map[] = {
+       NEEDS_FEAT(HCRX_EL2_PACMEn, feat_pauth_lr),
+       NEEDS_FEAT(HCRX_EL2_EnFPM, FEAT_FPMR),
+       NEEDS_FEAT(HCRX_EL2_GCSEn, FEAT_GCS),
+       NEEDS_FEAT(HCRX_EL2_EnIDCP128, FEAT_SYSREG128),
+       NEEDS_FEAT(HCRX_EL2_EnSDERR, feat_aderr),
+       NEEDS_FEAT(HCRX_EL2_TMEA, FEAT_DoubleFault2),
+       NEEDS_FEAT(HCRX_EL2_EnSNERR, feat_anerr),
+       NEEDS_FEAT(HCRX_EL2_D128En, FEAT_D128),
+       NEEDS_FEAT(HCRX_EL2_PTTWI, FEAT_THE),
+       NEEDS_FEAT(HCRX_EL2_SCTLR2En, FEAT_SCTLR2),
+       NEEDS_FEAT(HCRX_EL2_TCR2En, FEAT_TCR2),
+       NEEDS_FEAT(HCRX_EL2_MSCEn               |
+                  HCRX_EL2_MCE2,
+                  FEAT_MOPS),
+       NEEDS_FEAT(HCRX_EL2_CMOW, FEAT_CMOW),
+       NEEDS_FEAT(HCRX_EL2_VFNMI               |
+                  HCRX_EL2_VINMI               |
+                  HCRX_EL2_TALLINT,
+                  FEAT_NMI),
+       NEEDS_FEAT(HCRX_EL2_SMPME, feat_sme_smps),
+       NEEDS_FEAT(HCRX_EL2_FGTnXS              |
+                  HCRX_EL2_FnXS,
+                  FEAT_XS),
+       NEEDS_FEAT(HCRX_EL2_EnASR, FEAT_LS64_V),
+       NEEDS_FEAT(HCRX_EL2_EnALS, FEAT_LS64),
+       NEEDS_FEAT(HCRX_EL2_EnAS0, FEAT_LS64_ACCDATA),
+};
+
 static void __init check_feat_map(const struct reg_bits_to_feat_map *map,
                                  int map_size, u64 res0, const char *str)
 {
@@ -521,6 +591,8 @@ void __init check_feature_map(void)
                       hdfgwtr_masks.res0, hdfgwtr_masks.str);
        check_feat_map(hafgrtr_feat_map, ARRAY_SIZE(hafgrtr_feat_map),
                       hafgrtr_masks.res0, hafgrtr_masks.str);
+       check_feat_map(hcrx_feat_map, ARRAY_SIZE(hcrx_feat_map),
+                      __HCRX_EL2_RES0, "HCRX_EL2");
 }
 
 static bool idreg_feat_match(struct kvm *kvm, const struct reg_bits_to_feat_map *map)
@@ -656,6 +728,12 @@ void get_reg_fixed_bits(struct kvm *kvm, enum vcpu_sysreg reg, u64 *res0, u64 *r
                *res0 |= hafgrtr_masks.res0;
                *res1 = HAFGRTR_EL2_RES1;
                break;
+       case HCRX_EL2:
+               *res0 = compute_res0_bits(kvm, hcrx_feat_map,
+                                         ARRAY_SIZE(hcrx_feat_map), 0, 0);
+               *res0 |= __HCRX_EL2_RES0;
+               *res1 = __HCRX_EL2_RES1;
+               break;
        default:
                WARN_ON_ONCE(1);
                *res0 = *res1 = 0;
index 3d91a0233652bab97c3bff91f0ce1d9ac8899f40..20c79f1eaebab3f8bd59b7293c08d08e41f17453 100644 (file)
@@ -1058,45 +1058,7 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
        set_sysreg_masks(kvm, HCR_EL2, res0, res1);
 
        /* HCRX_EL2 */
-       res0 = __HCRX_EL2_RES0;
-       res1 = __HCRX_EL2_RES1;
-       if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
-               res0 |= HCRX_EL2_PACMEn;
-       if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))
-               res0 |= HCRX_EL2_EnFPM;
-       if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, GCS, IMP))
-               res0 |= HCRX_EL2_GCSEn;
-       if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, SYSREG_128, IMP))
-               res0 |= HCRX_EL2_EnIDCP128;
-       if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, ADERR, DEV_ASYNC))
-               res0 |= (HCRX_EL2_EnSDERR | HCRX_EL2_EnSNERR);
-       if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, DF2, IMP))
-               res0 |= HCRX_EL2_TMEA;
-       if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
-               res0 |= HCRX_EL2_D128En;
-       if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
-               res0 |= HCRX_EL2_PTTWI;
-       if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, SCTLRX, IMP))
-               res0 |= HCRX_EL2_SCTLR2En;
-       if (!kvm_has_tcr2(kvm))
-               res0 |= HCRX_EL2_TCR2En;
-       if (!kvm_has_feat(kvm, ID_AA64ISAR2_EL1, MOPS, IMP))
-               res0 |= (HCRX_EL2_MSCEn | HCRX_EL2_MCE2);
-       if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, CMOW, IMP))
-               res0 |= HCRX_EL2_CMOW;
-       if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, NMI, IMP))
-               res0 |= (HCRX_EL2_VFNMI | HCRX_EL2_VINMI | HCRX_EL2_TALLINT);
-       if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, SME, IMP) ||
-           !(read_sysreg_s(SYS_SMIDR_EL1) & SMIDR_EL1_SMPS))
-               res0 |= HCRX_EL2_SMPME;
-       if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))
-               res0 |= (HCRX_EL2_FGTnXS | HCRX_EL2_FnXS);
-       if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_V))
-               res0 |= HCRX_EL2_EnASR;
-       if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64))
-               res0 |= HCRX_EL2_EnALS;
-       if (!kvm_has_feat(kvm, ID_AA64ISAR1_EL1, LS64, LS64_ACCDATA))
-               res0 |= HCRX_EL2_EnAS0;
+       get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1);
        set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
 
        /* HFG[RW]TR_EL2 */