KVM: arm64: Rip out the vestiges of the 'old' ID register scheme
authorOliver Upton <oliver.upton@linux.dev>
Fri, 9 Jun 2023 19:00:54 +0000 (19:00 +0000)
committerOliver Upton <oliver.upton@linux.dev>
Thu, 15 Jun 2023 12:55:35 +0000 (12:55 +0000)
There's no longer a need for the baggage of the old scheme for handling
configurable ID register fields. Rip it all out in favor of the
generalized infrastructure.

Link: https://lore.kernel.org/r/20230609190054.1542113-12-oliver.upton@linux.dev
Signed-off-by: Oliver Upton <oliver.upton@linux.dev>
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/sys_regs.c
include/kvm/arm_pmu.h

index 39270bc29a3fa73a0e346614b798db55746f448e..e2e9552f2808b5e80844636b98cc9512a656f994 100644 (file)
@@ -241,10 +241,6 @@ struct kvm_arch {
 
        cpumask_var_t supported_cpus;
 
-       u8 pfr0_csv2;
-       u8 pfr0_csv3;
-       u8 dfr0_pmuver;
-
        /* Hypercall features firmware registers' descriptor */
        struct kvm_smccc_features smccc_feat;
        struct maple_tree smccc_filter;
index 695239e016189595600fa4193ad2cea6d5638837..80ca7ec269dc13bdb594195a998f804a52cbafaf 100644 (file)
@@ -102,22 +102,6 @@ static int kvm_arm_default_max_vcpus(void)
        return vgic_present ? kvm_vgic_get_max_vcpus() : KVM_MAX_VCPUS;
 }
 
-static void set_default_spectre(struct kvm *kvm)
-{
-       /*
-        * The default is to expose CSV2 == 1 if the HW isn't affected.
-        * Although this is a per-CPU feature, we make it global because
-        * asymmetric systems are just a nuisance.
-        *
-        * Userspace can override this as long as it doesn't promise
-        * the impossible.
-        */
-       if (arm64_get_spectre_v2_state() == SPECTRE_UNAFFECTED)
-               kvm->arch.pfr0_csv2 = 1;
-       if (arm64_get_meltdown_state() == SPECTRE_UNAFFECTED)
-               kvm->arch.pfr0_csv3 = 1;
-}
-
 /**
  * kvm_arch_init_vm - initializes a VM data structure
  * @kvm:       pointer to the KVM struct
@@ -161,15 +145,8 @@ int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
        /* The maximum number of VCPUs is limited by the host's GIC model */
        kvm->max_vcpus = kvm_arm_default_max_vcpus();
 
-       set_default_spectre(kvm);
        kvm_arm_init_hypercalls(kvm);
 
-       /*
-        * Initialise the default PMUver before there is a chance to
-        * create an actual PMU.
-        */
-       kvm->arch.dfr0_pmuver = kvm_arm_pmu_get_pmuver_limit();
-
        bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
 
        return 0;
index 7e8772508315e5f681b1b2fdc376c7a831472979..5aa0c977aaa3133a2032c93eae90a2314cd14d6d 100644 (file)
@@ -1195,14 +1195,6 @@ static bool access_arch_timer(struct kvm_vcpu *vcpu,
        return true;
 }
 
-static u8 vcpu_pmuver(const struct kvm_vcpu *vcpu)
-{
-       if (kvm_vcpu_has_pmu(vcpu))
-               return vcpu->kvm->arch.dfr0_pmuver;
-
-       return 0;
-}
-
 static s64 kvm_arm64_ftr_safe_value(u32 id, const struct arm64_ftr_bits *ftrp,
                                    s64 new, s64 cur)
 {
@@ -1288,19 +1280,6 @@ static int arm64_check_features(struct kvm_vcpu *vcpu,
        return 0;
 }
 
-static u8 perfmon_to_pmuver(u8 perfmon)
-{
-       switch (perfmon) {
-       case ID_DFR0_EL1_PerfMon_PMUv3:
-               return ID_AA64DFR0_EL1_PMUVer_IMP;
-       case ID_DFR0_EL1_PerfMon_IMPDEF:
-               return ID_AA64DFR0_EL1_PMUVer_IMP_DEF;
-       default:
-               /* Anything ARMv8.1+ and NI have the same value. For now. */
-               return perfmon;
-       }
-}
-
 static u8 pmuver_to_perfmon(u8 pmuver)
 {
        switch (pmuver) {
@@ -1327,19 +1306,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
        val = read_sanitised_ftr_reg(id);
 
        switch (id) {
-       case SYS_ID_AA64PFR0_EL1:
-               if (!vcpu_has_sve(vcpu))
-                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_SVE);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_AMU);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2);
-               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV2), (u64)vcpu->kvm->arch.pfr0_csv2);
-               val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3);
-               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_CSV3), (u64)vcpu->kvm->arch.pfr0_csv3);
-               if (kvm_vgic_global_state.type == VGIC_V3) {
-                       val &= ~ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC);
-                       val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64PFR0_EL1_GIC), 1);
-               }
-               break;
        case SYS_ID_AA64PFR1_EL1:
                if (!kvm_has_mte(vcpu->kvm))
                        val &= ~ARM64_FEATURE_MASK(ID_AA64PFR1_EL1_MTE);
@@ -1360,22 +1326,6 @@ static u64 __kvm_read_sanitised_id_reg(const struct kvm_vcpu *vcpu,
                if (!cpus_have_final_cap(ARM64_HAS_WFXT))
                        val &= ~ARM64_FEATURE_MASK(ID_AA64ISAR2_EL1_WFxT);
                break;
-       case SYS_ID_AA64DFR0_EL1:
-               /* Limit debug to ARMv8.0 */
-               val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer);
-               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_DebugVer), 6);
-               /* Set PMUver to the required version */
-               val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer);
-               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMUVer),
-                                 vcpu_pmuver(vcpu));
-               /* Hide SPE from guests */
-               val &= ~ARM64_FEATURE_MASK(ID_AA64DFR0_EL1_PMSVer);
-               break;
-       case SYS_ID_DFR0_EL1:
-               val &= ~ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon);
-               val |= FIELD_PREP(ARM64_FEATURE_MASK(ID_DFR0_EL1_PerfMon),
-                                 pmuver_to_perfmon(vcpu_pmuver(vcpu)));
-               break;
        case SYS_ID_AA64MMFR2_EL1:
                val &= ~ID_AA64MMFR2_EL1_CCIDX_MASK;
                break;
@@ -1505,26 +1455,6 @@ static u64 read_sanitised_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
        return val;
 }
 
-static int set_id_aa64pfr0_el1(struct kvm_vcpu *vcpu,
-                              const struct sys_reg_desc *rd,
-                              u64 val)
-{
-       u8 csv2, csv3;
-       int r;
-
-       csv2 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV2_SHIFT);
-       csv3 = cpuid_feature_extract_unsigned_field(val, ID_AA64PFR0_EL1_CSV3_SHIFT);
-
-       r = set_id_reg(vcpu, rd, val);
-       if (r)
-               return r;
-
-       vcpu->kvm->arch.pfr0_csv2 = csv2;
-       vcpu->kvm->arch.pfr0_csv3 = csv3;
-
-       return 0;
-}
-
 static u64 read_sanitised_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
                                          const struct sys_reg_desc *rd)
 {
@@ -1553,7 +1483,6 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
                               u64 val)
 {
        u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);
-       int r;
 
        /*
         * Prior to commit 3d0dba5764b9 ("KVM: arm64: PMU: Move the
@@ -1569,17 +1498,10 @@ static int set_id_aa64dfr0_el1(struct kvm_vcpu *vcpu,
         * surprising than an ill-guided PMU driver poking at impdef system
         * registers that end in an UNDEF...
         */
-       if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF) {
+       if (pmuver == ID_AA64DFR0_EL1_PMUVer_IMP_DEF)
                val &= ~ID_AA64DFR0_EL1_PMUVer_MASK;
-               pmuver = 0;
-       }
 
-       r = set_id_reg(vcpu, rd, val);
-       if (r)
-               return r;
-
-       vcpu->kvm->arch.dfr0_pmuver = pmuver;
-       return 0;
+       return set_id_reg(vcpu, rd, val);
 }
 
 static u64 read_sanitised_id_dfr0_el1(struct kvm_vcpu *vcpu,
@@ -1600,7 +1522,6 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
                           u64 val)
 {
        u8 perfmon = SYS_FIELD_GET(ID_DFR0_EL1, PerfMon, val);
-       int r;
 
        if (perfmon == ID_DFR0_EL1_PerfMon_IMPDEF) {
                val &= ~ID_DFR0_EL1_PerfMon_MASK;
@@ -1616,12 +1537,7 @@ static int set_id_dfr0_el1(struct kvm_vcpu *vcpu,
        if (perfmon != 0 && perfmon < ID_DFR0_EL1_PerfMon_PMUv3)
                return -EINVAL;
 
-       r = set_id_reg(vcpu, rd, val);
-       if (r)
-               return r;
-
-       vcpu->kvm->arch.dfr0_pmuver = perfmon_to_pmuver(perfmon);
-       return 0;
+       return set_id_reg(vcpu, rd, val);
 }
 
 /*
@@ -2076,7 +1992,7 @@ static const struct sys_reg_desc sys_reg_descs[] = {
        { SYS_DESC(SYS_ID_AA64PFR0_EL1),
          .access = access_id_reg,
          .get_user = get_id_reg,
-         .set_user = set_id_aa64pfr0_el1,
+         .set_user = set_id_reg,
          .reset = read_sanitised_id_aa64pfr0_el1,
          .val = ID_AA64PFR0_EL1_CSV2_MASK | ID_AA64PFR0_EL1_CSV3_MASK, },
        ID_SANITISED(ID_AA64PFR1_EL1),
index 0be60d5eebd757fa69db4cbef3a54f735d3ce166..847da6fc271394c6c2516ae060a43b5d0f84cef6 100644 (file)
@@ -92,8 +92,12 @@ void kvm_vcpu_pmu_restore_host(struct kvm_vcpu *vcpu);
 /*
  * Evaluates as true when emulating PMUv3p5, and false otherwise.
  */
-#define kvm_pmu_is_3p5(vcpu)                                           \
-       (vcpu->kvm->arch.dfr0_pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5)
+#define kvm_pmu_is_3p5(vcpu) ({                                                \
+       u64 val = IDREG(vcpu->kvm, SYS_ID_AA64DFR0_EL1);                \
+       u8 pmuver = SYS_FIELD_GET(ID_AA64DFR0_EL1, PMUVer, val);        \
+                                                                       \
+       pmuver >= ID_AA64DFR0_EL1_PMUVer_V3P5;                          \
+})
 
 u8 kvm_arm_pmu_get_pmuver_limit(void);