perf cpumap: Increment reference count for online cpumap
authorIan Rogers <irogers@google.com>
Tue, 18 Mar 2025 17:19:14 +0000 (10:19 -0700)
committerNamhyung Kim <namhyung@kernel.org>
Wed, 19 Mar 2025 23:56:33 +0000 (16:56 -0700)
Thomas Richter <tmricht@linux.ibm.com> reported a double put on the
cpumap for the placeholder core PMU:
https://lore.kernel.org/lkml/20250318095132.1502654-3-tmricht@linux.ibm.com/
Requiring the caller to get the cpumap is not how these things are
usually done, switch cpu_map__online to do the get and then fix up any
use cases where a put is needed.

Signed-off-by: Ian Rogers <irogers@google.com>
Tested-by: Thomas Richter <tmricht@linux.ibm.com>
Link: https://lore.kernel.org/r/20250318171914.145616-1-irogers@google.com
Signed-off-by: Namhyung Kim <namhyung@kernel.org>
tools/perf/arch/arm/util/pmu.c
tools/perf/util/cpumap.c
tools/perf/util/evlist.c
tools/perf/util/mem-events.c
tools/perf/util/mmap.c
tools/perf/util/pmu.c
tools/perf/util/tool_pmu.c

index 57dc94a6e38ce21a13c21491a72d1d8c167d0011..f70075c89aa07a3169d0082a81160b195b850a71 100644 (file)
@@ -18,7 +18,7 @@
 
 void perf_pmu__arch_init(struct perf_pmu *pmu)
 {
-       struct perf_cpu_map *intersect;
+       struct perf_cpu_map *intersect, *online = cpu_map__online();
 
 #ifdef HAVE_AUXTRACE_SUPPORT
        if (!strcmp(pmu->name, CORESIGHT_ETM_PMU_NAME)) {
@@ -41,7 +41,8 @@ void perf_pmu__arch_init(struct perf_pmu *pmu)
        }
 #endif
        /* Workaround some ARM PMU's failing to correctly set CPU maps for online processors. */
-       intersect = perf_cpu_map__intersect(cpu_map__online(), pmu->cpus);
+       intersect = perf_cpu_map__intersect(online, pmu->cpus);
+       perf_cpu_map__put(online);
        perf_cpu_map__put(pmu->cpus);
        pmu->cpus = intersect;
 }
index 9bc5e037023462e68b67d9b59bc52faef752e5cc..89570397a4b350d738a31d62e4bea8fa13c9a13c 100644 (file)
@@ -722,7 +722,7 @@ struct perf_cpu_map *cpu_map__online(void) /* thread unsafe */
        if (!online)
                online = perf_cpu_map__new_online_cpus(); /* from /sys/devices/system/cpu/online */
 
-       return online;
+       return perf_cpu_map__get(online);
 }
 
 bool aggr_cpu_id__equal(const struct aggr_cpu_id *a, const struct aggr_cpu_id *b)
index 49e10d6981add4e3af6332a41f85cb643510e449..c1a04141aed0bc6a69cf14e526cc02c939c7c769 100644 (file)
@@ -2534,10 +2534,10 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
                return;
 
        evlist__for_each_entry(evlist, pos) {
-               struct perf_cpu_map *intersect, *to_test;
+               struct perf_cpu_map *intersect, *to_test, *online = cpu_map__online();
                const struct perf_pmu *pmu = evsel__find_pmu(pos);
 
-               to_test = pmu && pmu->is_core ? pmu->cpus : cpu_map__online();
+               to_test = pmu && pmu->is_core ? pmu->cpus : online;
                intersect = perf_cpu_map__intersect(to_test, user_requested_cpus);
                if (!perf_cpu_map__equal(intersect, user_requested_cpus)) {
                        char buf[128];
@@ -2547,6 +2547,7 @@ void evlist__warn_user_requested_cpus(struct evlist *evlist, const char *cpu_lis
                                cpu_list, pmu ? pmu->name : "cpu", buf, evsel__name(pos));
                }
                perf_cpu_map__put(intersect);
+               perf_cpu_map__put(online);
        }
        perf_cpu_map__put(user_requested_cpus);
 }
index 9011784b950d76be2f778bb2c64fce59e13d2a77..884d9aebce9199c0f55c3211d163ffc139c54be4 100644 (file)
@@ -303,12 +303,15 @@ int perf_mem_events__record_args(const char **rec_argv, int *argv_nr, char **eve
        }
 
        if (cpu_map) {
-               if (!perf_cpu_map__equal(cpu_map, cpu_map__online())) {
+               struct perf_cpu_map *online = cpu_map__online();
+
+               if (!perf_cpu_map__equal(cpu_map, online)) {
                        char buf[200];
 
                        cpu_map__snprint(cpu_map, buf, sizeof(buf));
                        pr_warning("Memory events are enabled on a subset of CPUs: %s\n", buf);
                }
+               perf_cpu_map__put(online);
                perf_cpu_map__put(cpu_map);
        }
 
index a7ef4d8d57d8bc9efc097e2fdc173ea76bc39487..a34726219af3da0b76465a5d071eaf180c65ca58 100644 (file)
@@ -244,9 +244,8 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
 {
        int idx, nr_cpus;
        struct perf_cpu cpu;
-       const struct perf_cpu_map *cpu_map = NULL;
+       struct perf_cpu_map *cpu_map = cpu_map__online();
 
-       cpu_map = cpu_map__online();
        if (!cpu_map)
                return;
 
@@ -256,6 +255,7 @@ static void build_node_mask(int node, struct mmap_cpu_mask *mask)
                if (cpu__get_node(cpu) == node)
                        __set_bit(cpu.cpu, mask->bits);
        }
+       perf_cpu_map__put(cpu_map);
 }
 
 static int perf_mmap__setup_affinity_mask(struct mmap *map, struct mmap_params *mp)
index 57450c73fb63303bca6a35e3e34ddaef20f42556..b7ebac5ab1d11267b12233ce110cdfd0c9764a09 100644 (file)
@@ -779,7 +779,7 @@ static struct perf_cpu_map *pmu_cpumask(int dirfd, const char *pmu_name, bool is
        }
 
        /* Nothing found, for core PMUs assume this means all CPUs. */
-       return is_core ? perf_cpu_map__get(cpu_map__online()) : NULL;
+       return is_core ? cpu_map__online() : NULL;
 }
 
 static bool pmu_is_uncore(int dirfd, const char *name)
index 9156745ea180d33fe62c939b7a4f9912fe943760..b60ac390d52d27459881bb60031e767423cb906f 100644 (file)
@@ -355,6 +355,7 @@ bool tool_pmu__read_event(enum tool_pmu_event ev, u64 *result)
 
                if (online) {
                        *result = perf_cpu_map__nr(online);
+                       perf_cpu_map__put(online);
                        return true;
                }
                return false;