perf/x86/amd/uncore: Avoid PMU registration if counters are unavailable
authorSandipan Das <sandipan.das@amd.com>
Wed, 26 Jun 2024 07:44:04 +0000 (13:14 +0530)
committerPeter Zijlstra <peterz@infradead.org>
Thu, 4 Jul 2024 14:00:41 +0000 (16:00 +0200)
X86_FEATURE_PERFCTR_NB and X86_FEATURE_PERFCTR_LLC are derived from
CPUID leaf 0x80000001 ECX bits 24 and 28 respectively and denote the
availability of DF and L3 counters. When these bits are not set, the
corresponding PMUs have no counters and hence, should not be registered.

Fixes: 07888daa056e ("perf/x86/amd/uncore: Move discovery and registration")
Signed-off-by: Sandipan Das <sandipan.das@amd.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lkml.kernel.org/r/20240626074404.1044230-1-sandipan.das@amd.com
arch/x86/events/amd/uncore.c

index 0fafe233bba47d72620bd4d71df0f6994c8a9efb..a0b4405476b70fe086983f16173806ee803c0f42 100644 (file)
@@ -658,17 +658,20 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
 {
        struct attribute **df_attr = amd_uncore_df_format_attr;
        struct amd_uncore_pmu *pmu;
+       int num_counters;
 
        /* Run just once */
        if (uncore->init_done)
                return amd_uncore_ctx_init(uncore, cpu);
 
+       num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       if (!num_counters)
+               goto done;
+
        /* No grouping, single instance for a system */
        uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL);
-       if (!uncore->pmus) {
-               uncore->num_pmus = 0;
+       if (!uncore->pmus)
                goto done;
-       }
 
        /*
         * For Family 17h and above, the Northbridge counters are repurposed
@@ -678,7 +681,7 @@ int amd_uncore_df_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
        pmu = &uncore->pmus[0];
        strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_df" : "amd_nb",
                sizeof(pmu->name));
-       pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       pmu->num_counters = num_counters;
        pmu->msr_base = MSR_F15H_NB_PERF_CTL;
        pmu->rdpmc_base = RDPMC_BASE_NB;
        pmu->group = amd_uncore_ctx_gid(uncore, cpu);
@@ -789,17 +792,20 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
 {
        struct attribute **l3_attr = amd_uncore_l3_format_attr;
        struct amd_uncore_pmu *pmu;
+       int num_counters;
 
        /* Run just once */
        if (uncore->init_done)
                return amd_uncore_ctx_init(uncore, cpu);
 
+       num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       if (!num_counters)
+               goto done;
+
        /* No grouping, single instance for a system */
        uncore->pmus = kzalloc(sizeof(*uncore->pmus), GFP_KERNEL);
-       if (!uncore->pmus) {
-               uncore->num_pmus = 0;
+       if (!uncore->pmus)
                goto done;
-       }
 
        /*
         * For Family 17h and above, L3 cache counters are available instead
@@ -809,7 +815,7 @@ int amd_uncore_l3_ctx_init(struct amd_uncore *uncore, unsigned int cpu)
        pmu = &uncore->pmus[0];
        strscpy(pmu->name, boot_cpu_data.x86 >= 0x17 ? "amd_l3" : "amd_l2",
                sizeof(pmu->name));
-       pmu->num_counters = amd_uncore_ctx_num_pmcs(uncore, cpu);
+       pmu->num_counters = num_counters;
        pmu->msr_base = MSR_F16H_L2I_PERF_CTL;
        pmu->rdpmc_base = RDPMC_BASE_LLC;
        pmu->group = amd_uncore_ctx_gid(uncore, cpu);