KVM: arm64: Fix PMU probe ordering
authorMarc Zyngier <maz@kernel.org>
Sun, 19 Sep 2021 13:09:49 +0000 (14:09 +0100)
committerMarc Zyngier <maz@kernel.org>
Mon, 20 Sep 2021 11:43:34 +0000 (12:43 +0100)
Russell reported that since 5.13, KVM's probing of the PMU has
started to fail on his HW. As it turns out, there is an implicit
ordering dependency between the architectural PMU probing code and
and KVM's own probing. If, due to probe ordering reasons, KVM probes
before the PMU driver, it will fail to detect the PMU and prevent it
from being advertised to guests as well as the VMM.

Obviously, this is one probing too many, and we should be able to
deal with any ordering.

Add a callback from the PMU code into KVM to advertise the registration
of a host CPU PMU, allowing for any probing order.

Fixes: 5421db1be3b1 ("KVM: arm64: Divorce the perf code from oprofile helpers")
Reported-by: "Russell King (Oracle)" <linux@armlinux.org.uk>
Tested-by: Russell King (Oracle) <rmk+kernel@armlinux.org.uk>
Signed-off-by: Marc Zyngier <maz@kernel.org>
Link: https://lore.kernel.org/r/YUYRKVflRtUytzy5@shell.armlinux.org.uk
Cc: stable@vger.kernel.org
arch/arm64/kvm/perf.c
arch/arm64/kvm/pmu-emul.c
drivers/perf/arm_pmu.c
include/kvm/arm_pmu.h
include/linux/perf/arm_pmu.h

index f9bb3b14130eef9da9e6fe0214c65bc9d23f6861..c84fe24b2ea1e8f1f3a2ab6e1f6e6c9bb46801ed 100644 (file)
@@ -50,9 +50,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = {
 
 int kvm_perf_init(void)
 {
-       if (kvm_pmu_probe_pmuver() != ID_AA64DFR0_PMUVER_IMP_DEF && !is_protected_kvm_enabled())
-               static_branch_enable(&kvm_arm_pmu_available);
-
        return perf_register_guest_info_callbacks(&kvm_guest_cbs);
 }
 
index f5065f23b413faf606137b3ceabc1bc81b87a27b..2af3c37445e00899217ac82a6c9e7ddc474b7093 100644 (file)
@@ -740,7 +740,14 @@ void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
        kvm_pmu_create_perf_event(vcpu, select_idx);
 }
 
-int kvm_pmu_probe_pmuver(void)
+void kvm_host_pmu_init(struct arm_pmu *pmu)
+{
+       if (pmu->pmuver != 0 && pmu->pmuver != ID_AA64DFR0_PMUVER_IMP_DEF &&
+           !kvm_arm_support_pmu_v3() && !is_protected_kvm_enabled())
+               static_branch_enable(&kvm_arm_pmu_available);
+}
+
+static int kvm_pmu_probe_pmuver(void)
 {
        struct perf_event_attr attr = { };
        struct perf_event *event;
index 3cbc3baf087f3938d8bef85f79fb930d1a7f47f9..295cc7952d0edf9d622727357d1c3d607ac4aa0b 100644 (file)
@@ -952,6 +952,8 @@ int armpmu_register(struct arm_pmu *pmu)
                pmu->name, pmu->num_events,
                has_nmi ? ", using NMIs" : "");
 
+       kvm_host_pmu_init(pmu);
+
        return 0;
 
 out_destroy:
index 864b9997efb20d55c5df3fd2b544575134e070e9..90f21898aad838660e993c3e9fce553ea4b7af56 100644 (file)
@@ -61,7 +61,6 @@ int kvm_arm_pmu_v3_get_attr(struct kvm_vcpu *vcpu,
 int kvm_arm_pmu_v3_has_attr(struct kvm_vcpu *vcpu,
                            struct kvm_device_attr *attr);
 int kvm_arm_pmu_v3_enable(struct kvm_vcpu *vcpu);
-int kvm_pmu_probe_pmuver(void);
 #else
 struct kvm_pmu {
 };
@@ -118,8 +117,6 @@ static inline u64 kvm_pmu_get_pmceid(struct kvm_vcpu *vcpu, bool pmceid1)
        return 0;
 }
 
-static inline int kvm_pmu_probe_pmuver(void) { return 0xf; }
-
 #endif
 
 #endif
index 505480217cf1a9b4f779b454d42bc78c464e189b..2512e2f9cd4e7964fe08c42052d4313f5cfc6679 100644 (file)
@@ -163,6 +163,12 @@ int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
 static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
 #endif
 
+#ifdef CONFIG_KVM
+void kvm_host_pmu_init(struct arm_pmu *pmu);
+#else
+#define kvm_host_pmu_init(x)   do { } while(0)
+#endif
+
 /* Internal functions only for core arm_pmu code */
 struct arm_pmu *armpmu_alloc(void);
 struct arm_pmu *armpmu_alloc_atomic(void);