KVM: arm64: Use KVM-specific HCRX_EL2 RES0 mask
authorMarc Zyngier <maz@kernel.org>
Fri, 24 Jan 2025 19:04:26 +0000 (19:04 +0000)
committerMarc Zyngier <maz@kernel.org>
Sat, 10 May 2025 10:04:35 +0000 (11:04 +0100)
We do not have a computed table for HCRX_EL2, so statically define
the bits we know about. A warning will fire if the architecture
grows bits that are not handled yet.

Reviewed-by: Joey Gouly <joey.gouly@arm.com>
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/include/asm/kvm_arm.h
arch/arm64/kvm/emulate-nested.c
arch/arm64/kvm/nested.c

index e7c73d16cd451af10e4044e869c90800f7fdfa6e..52b3aeb19efc6aa9237b2ce9d421032650571d1a 100644 (file)
                                 GENMASK(19, 18) |      \
                                 GENMASK(15, 0))
 
-/* Polarity masks for HCRX_EL2 */
-#define __HCRX_EL2_RES0         HCRX_EL2_RES0
-#define __HCRX_EL2_MASK                (BIT(6))
-#define __HCRX_EL2_nMASK       ~(__HCRX_EL2_RES0 | __HCRX_EL2_MASK)
+/*
+ * Polarity masks for HCRX_EL2, limited to the bits that we know about
+ * at this point in time. It doesn't mean that we actually *handle*
+ * them, but that at least those that are not advertised to a guest
+ * will be RES0 for that guest.
+ */
+#define __HCRX_EL2_MASK                (BIT_ULL(6))
+#define __HCRX_EL2_nMASK       (GENMASK_ULL(24, 14) | \
+                                GENMASK_ULL(11, 7)  | \
+                                GENMASK_ULL(5, 0))
+#define __HCRX_EL2_RES0                ~(__HCRX_EL2_nMASK | __HCRX_EL2_MASK)
+#define __HCRX_EL2_RES1                ~(__HCRX_EL2_nMASK | \
+                                 __HCRX_EL2_MASK  | \
+                                 __HCRX_EL2_RES0)
 
 /* Hyp Prefetch Fault Address Register (HPFAR/HDFAR) */
 #define HPFAR_MASK     (~UL(0xf))
index c30d970bf81cb5e16a9c6255e8eecc86adfe1ed5..c581cf29bc59e0ef2bd508b101d8535ee556b13e 100644 (file)
@@ -2157,6 +2157,7 @@ int __init populate_nv_trap_config(void)
        BUILD_BUG_ON(__NR_CGT_GROUP_IDS__ > BIT(TC_CGT_BITS));
        BUILD_BUG_ON(__NR_FGT_GROUP_IDS__ > BIT(TC_FGT_BITS));
        BUILD_BUG_ON(__NR_FG_FILTER_IDS__ > BIT(TC_FGF_BITS));
+       BUILD_BUG_ON(__HCRX_EL2_MASK & __HCRX_EL2_nMASK);
 
        for (int i = 0; i < ARRAY_SIZE(encoding_to_cgt); i++) {
                const struct encoding_to_trap_config *cgt = &encoding_to_cgt[i];
@@ -2182,6 +2183,10 @@ int __init populate_nv_trap_config(void)
                }
        }
 
+       if (__HCRX_EL2_RES0 != HCRX_EL2_RES0)
+               kvm_info("Sanitised HCR_EL2_RES0 = %016llx, expecting %016llx\n",
+                        __HCRX_EL2_RES0, HCRX_EL2_RES0);
+
        kvm_info("nv: %ld coarse grained trap handlers\n",
                 ARRAY_SIZE(encoding_to_cgt));
 
index 479ffd25eea63e8f4df2d326c16916ef6554a795..666df85230c9bf12d7ab153037c8d6a7f4b0f90b 100644 (file)
@@ -1058,8 +1058,8 @@ int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
        set_sysreg_masks(kvm, HCR_EL2, res0, res1);
 
        /* HCRX_EL2 */
-       res0 = HCRX_EL2_RES0;
-       res1 = HCRX_EL2_RES1;
+       res0 = __HCRX_EL2_RES0;
+       res1 = __HCRX_EL2_RES1;
        if (!kvm_has_feat(kvm, ID_AA64ISAR3_EL1, PACM, TRIVIAL_IMP))
                res0 |= HCRX_EL2_PACMEn;
        if (!kvm_has_feat(kvm, ID_AA64PFR2_EL1, FPMR, IMP))