perf mem: Avoid hybrid PMU list
authorIan Rogers <irogers@google.com>
Sat, 27 May 2023 07:21:59 +0000 (00:21 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Sat, 27 May 2023 12:41:09 +0000 (09:41 -0300)
Add perf_pmu__num_mem_pmus that scans/counts the number of PMUs for
mem events. Switch perf_pmu__for_each_hybrid_pmu to iterating all PMUs
and only handling is_core ones.

Reviewed-by: Kan Liang <kan.liang@linux.intel.com>
Signed-off-by: Ian Rogers <irogers@google.com>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ali Saidi <alisaidi@amazon.com>
Cc: Athira Rajeev <atrajeev@linux.vnet.ibm.com>
Cc: Dmitrii Dolgov <9erthalion6@gmail.com>
Cc: Huacai Chen <chenhuacai@kernel.org>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jing Zhang <renyu.zj@linux.alibaba.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Garry <john.g.garry@oracle.com>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Kang Minchul <tegongkang@gmail.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Madhavan Srinivasan <maddy@linux.ibm.com>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Ming Wang <wangming01@loongson.cn>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Ravi Bangoria <ravi.bangoria@amd.com>
Cc: Rob Herring <robh@kernel.org>
Cc: Sandipan Das <sandipan.das@amd.com>
Cc: Sean Christopherson <seanjc@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Thomas Richter <tmricht@linux.ibm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Xing Zhengjun <zhengjun.xing@linux.intel.com>
Cc: coresight@lists.linaro.org
Cc: linux-arm-kernel@lists.infradead.org
Link: https://lore.kernel.org/r/20230527072210.2900565-24-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-c2c.c
tools/perf/builtin-mem.c
tools/perf/util/mem-events.c
tools/perf/util/pmu.c
tools/perf/util/pmu.h

index 08455e26b606f643183e29bbce729985e438b33f..2757ccc19c5e3b5b64246ff8f1646cf201680b96 100644 (file)
@@ -42,7 +42,6 @@
 #include "ui/ui.h"
 #include "ui/progress.h"
 #include "pmu.h"
-#include "pmu-hybrid.h"
 #include "string2.h"
 #include "util/util.h"
 
@@ -3259,10 +3258,8 @@ static int perf_c2c__record(int argc, const char **argv)
        argc = parse_options(argc, argv, options, record_mem_usage,
                             PARSE_OPT_KEEP_UNKNOWN);
 
-       if (!perf_pmu__has_hybrid())
-               rec_argc = argc + 11; /* max number of arguments */
-       else
-               rec_argc = argc + 11 * perf_pmu__hybrid_pmu_num();
+       /* Max number of arguments multiplied by number of PMUs that can support them. */
+       rec_argc = argc + 11 * perf_pmu__num_mem_pmus();
 
        rec_argv = calloc(rec_argc + 1, sizeof(char *));
        if (!rec_argv)
index 65465930ef8e4fe5cf7409d516b23c8c3f723794..f4f1ff76d49de81db5747f5f9f8720a756e6652c 100644 (file)
@@ -18,7 +18,6 @@
 #include "util/map.h"
 #include "util/symbol.h"
 #include "util/pmu.h"
-#include "util/pmu-hybrid.h"
 #include "util/sample.h"
 #include "util/string2.h"
 #include "util/util.h"
@@ -93,10 +92,8 @@ static int __cmd_record(int argc, const char **argv, struct perf_mem *mem)
        argc = parse_options(argc, argv, options, record_mem_usage,
                             PARSE_OPT_KEEP_UNKNOWN);
 
-       if (!perf_pmu__has_hybrid())
-               rec_argc = argc + 9; /* max number of arguments */
-       else
-               rec_argc = argc + 9 * perf_pmu__hybrid_pmu_num();
+       /* Max number of arguments multiplied by number of PMUs that can support them. */
+       rec_argc = argc + 9 * perf_pmu__num_mem_pmus();
 
        if (mem->cpu_list)
                rec_argc += 2;
index ed1ee4b05356e828f554ab38e68dd42b4217b8b6..c9e422a382582f9ac6208a5d60740e51090750a7 100644 (file)
@@ -13,7 +13,6 @@
 #include "debug.h"
 #include "symbol.h"
 #include "pmu.h"
-#include "pmu-hybrid.h"
 
 unsigned int perf_mem_events__loads_ldlat = 30;
 
@@ -120,7 +119,6 @@ int perf_mem_events__init(void)
 
        for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
                struct perf_mem_event *e = perf_mem_events__ptr(j);
-               struct perf_pmu *pmu;
                char sysfs_name[100];
 
                /*
@@ -135,7 +133,12 @@ int perf_mem_events__init(void)
                                  e->sysfs_name, "cpu");
                        e->supported = perf_mem_event__supported(mnt, sysfs_name);
                } else {
-                       perf_pmu__for_each_hybrid_pmu(pmu) {
+                       struct perf_pmu *pmu = NULL;
+
+                       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+                               if (!pmu->is_core)
+                                       continue;
+
                                scnprintf(sysfs_name, sizeof(sysfs_name),
                                          e->sysfs_name, pmu->name);
                                e->supported |= perf_mem_event__supported(mnt, sysfs_name);
@@ -170,9 +173,12 @@ static void perf_mem_events__print_unsupport_hybrid(struct perf_mem_event *e,
 {
        const char *mnt = sysfs__mount();
        char sysfs_name[100];
-       struct perf_pmu *pmu;
+       struct perf_pmu *pmu = NULL;
+
+       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+               if (!pmu->is_core)
+                       continue;
 
-       perf_pmu__for_each_hybrid_pmu(pmu) {
                scnprintf(sysfs_name, sizeof(sysfs_name), e->sysfs_name,
                          pmu->name);
                if (!perf_mem_event__supported(mnt, sysfs_name)) {
@@ -210,7 +216,9 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr,
                                return -1;
                        }
 
-                       perf_pmu__for_each_hybrid_pmu(pmu) {
+                       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+                               if (!pmu->is_core)
+                                       continue;
                                rec_argv[i++] = "-e";
                                s = perf_mem_events__name(j, pmu->name);
                                if (s) {
index 5a7bfbf621d0823532b372d5a972d11d1bc1f053..65daa0cc71d61dbbfd004b6b5d83c1144601ad11 100644 (file)
@@ -1660,6 +1660,23 @@ bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu)
        return !is_pmu_hybrid(pmu->name);
 }
 
+static bool perf_pmu__is_mem_pmu(const struct perf_pmu *pmu)
+{
+       return pmu->is_core;
+}
+
+int perf_pmu__num_mem_pmus(void)
+{
+       struct perf_pmu *pmu = NULL;
+       int count = 0;
+
+       while ((pmu = perf_pmu__scan(pmu)) != NULL) {
+               if (perf_pmu__is_mem_pmu(pmu))
+                       count++;
+       }
+       return count;
+}
+
 static bool pmu_alias_is_duplicate(struct sevent *alias_a,
                                   struct sevent *alias_b)
 {
index af10d137e2b5ccdc13acf0405f39be50a83bda26..5f5de7c20ab6ae85203657d9d0a6e593010df81f 100644 (file)
@@ -231,6 +231,7 @@ bool is_pmu_hybrid(const char *name);
 bool perf_pmu__supports_legacy_cache(const struct perf_pmu *pmu);
 bool perf_pmu__supports_wildcard_numeric(const struct perf_pmu *pmu);
 bool perf_pmu__auto_merge_stats(const struct perf_pmu *pmu);
+int perf_pmu__num_mem_pmus(void);
 void print_pmu_events(const struct print_callbacks *print_cb, void *print_state);
 bool pmu_have_event(const char *pname, const char *name);