KVM: arm64: Make MTE_frac masking conditional on MTE capability
authorBen Horgan <ben.horgan@arm.com>
Mon, 12 May 2025 11:41:11 +0000 (12:41 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 16 May 2025 12:01:18 +0000 (13:01 +0100)
If MTE_frac is masked out unconditionally then the guest will always
see ID_AA64PFR1_EL1_MTE_frac as 0. However, a value of 0 when
ID_AA64PFR1_EL1_MTE is 2 indicates that MTE_ASYNC is supported. Hence, for
a host with ID_AA64PFR1_EL1_MTE==2 and ID_AA64PFR1_EL1_MTE_frac==0xf
(MTE_ASYNC unsupported) the guest would see MTE_ASYNC advertised as
supported whilst the host does not support it. Hence, expose the sanitised
value of MTE_frac to the guest and user-space.

As MTE_frac was previously hidden, always 0, and KVM must accept values
from KVM provided by user-space, when ID_AA64PFR1_EL1.MTE is 2 allow
user-space to set ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to
avoid incorrectly claiming hardware support for MTE_ASYNC in the guest.

Note that linux does not check the value of ID_AA64PFR1_EL1_MTE_frac and
wrongly assumes that MTE async faults can be generated even on hardware
that does nto support them. This issue is not addressed here.

Signed-off-by: Ben Horgan <ben.horgan@arm.com>
Link: https://lore.kernel.org/r/20250512114112.359087-3-ben.horgan@arm.com
Signed-off-by: Marc Zyngier <maz@kernel.org>
arch/arm64/kvm/sys_regs.c

index 005ad28f7306810201df9d093f7bbb6d936d6c2b..ee30c94c0aceb93056336149a44ccfee9ba3c005 100644 (file)
@@ -1600,13 +1600,14 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
                val = sanitise_id_aa64pfr0_el1(vcpu, val);
                break;
        case SYS_ID_AA64PFR1_EL1:
-               if (!kvm_has_mte(vcpu->kvm))
+               if (!kvm_has_mte(vcpu->kvm)) {
                        val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
+                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
+               }
 
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_SME);
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_RNDR_trap);
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_NMI);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE_frac);
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_GCS);
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_THE);
                val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTEX);
@@ -1953,11 +1954,34 @@ static int set_id_aa64pfr1_el1(struct kvm_vcpu *vcpu,
 {
        u64 hw_val = read_sanitised_ftr_reg(SYS_ID_AA64PFR1_EL1);
        u64 mpam_mask = ID_AA64PFR1_EL1_MPAM_frac_MASK;
+       u8 mte = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE, hw_val);
+       u8 user_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, user_val);
+       u8 hw_mte_frac = SYS_FIELD_GET(ID_AA64PFR1_EL1, MTE_frac, hw_val);
 
        /* See set_id_aa64pfr0_el1 for comment about MPAM */
        if ((hw_val & mpam_mask) == (user_val & mpam_mask))
                user_val &= ~ID_AA64PFR1_EL1_MPAM_frac_MASK;
 
+       /*
+        * Previously MTE_frac was hidden from guest. However, if the
+        * hardware supports MTE2 but not MTE_ASYM_FAULT then a value
+        * of 0 for this field indicates that the hardware supports
+        * MTE_ASYNC. Whereas, 0xf indicates MTE_ASYNC is not supported.
+        *
+        * As KVM must accept values from KVM provided by user-space,
+        * when ID_AA64PFR1_EL1.MTE is 2 allow user-space to set
+        * ID_AA64PFR1_EL1.MTE_frac to 0. However, ignore it to avoid
+        * incorrectly claiming hardware support for MTE_ASYNC in the
+        * guest.
+        */
+
+       if (mte == ID_AA64PFR1_EL1_MTE_MTE2 &&
+           hw_mte_frac == ID_AA64PFR1_EL1_MTE_frac_NI &&
+           user_mte_frac == ID_AA64PFR1_EL1_MTE_frac_ASYNC) {
+               user_val &= ~ID_AA64PFR1_EL1_MTE_frac_MASK;
+               user_val |= hw_val & ID_AA64PFR1_EL1_MTE_frac_MASK;
+       }
+
        return set_id_reg(vcpu, rd, user_val);
 }