perf cpumap: Switch to using perf_cpu_map API
authorIan Rogers <irogers@google.com>
Tue, 3 May 2022 04:17:52 +0000 (21:17 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Thu, 5 May 2022 17:07:27 +0000 (14:07 -0300)
Switch some raw accesses to the cpu map to using the library API. This
can help with reference count checking. Some BPF cases switch from index
to CPU for consistency, this shouldn't matter as the CPU map is full.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Antonov <alexander.antonov@linux.intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexei Starovoitov <ast@kernel.org>
Cc: Alexey Bayduraev <alexey.v.bayduraev@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Andrii Nakryiko <andrii@kernel.org>
Cc: Daniel Borkmann <daniel@iogearbox.net>
Cc: German Gomez <german.gomez@arm.com>
Cc: James Clark <james.clark@arm.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: John Fastabend <john.fastabend@gmail.com>
Cc: John Garry <john.garry@huawei.com>
Cc: KP Singh <kpsingh@kernel.org>
Cc: Kajol Jain <kjain@linux.ibm.com>
Cc: Leo Yan <leo.yan@linaro.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Martin KaFai Lau <kafai@fb.com>
Cc: Mathieu Poirier <mathieu.poirier@linaro.org>
Cc: Mike Leach <mike.leach@linaro.org>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Riccardo Mancini <rickyman7@gmail.com>
Cc: Song Liu <songliubraving@fb.com>
Cc: Stephane Eranian <eranian@google.com>
Cc: Suzuki Poulouse <suzuki.poulose@arm.com>
Cc: Will Deacon <will@kernel.org>
Cc: Yonghong Song <yhs@fb.com>
Link: http://lore.kernel.org/lkml/20220503041757.2365696-2-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/builtin-record.c
tools/perf/util/bpf_counter_cgroup.c

index 069825c48d404bc4dbe4901f0f45b8d97f683ff9..a5cf6a99d67f78c42885d58101e2994839a6b30b 100644 (file)
@@ -1011,7 +1011,7 @@ static int record__thread_data_init_maps(struct record_thread *thread_data, stru
 
        for (m = 0, tm = 0; m < nr_mmaps && tm < thread_data->nr_mmaps; m++) {
                if (cpu_map__is_dummy(cpus) ||
-                   test_bit(cpus->map[m].cpu, thread_data->mask->maps.bits)) {
+                   test_bit(perf_cpu_map__cpu(cpus, m).cpu, thread_data->mask->maps.bits)) {
                        if (thread_data->maps) {
                                thread_data->maps[tm] = &mmap[m];
                                pr_debug2("thread_data[%p]: cpu%d: maps[%d] -> mmap[%d]\n",
@@ -3331,13 +3331,14 @@ struct option *record_options = __record_options;
 
 static void record__mmap_cpu_mask_init(struct mmap_cpu_mask *mask, struct perf_cpu_map *cpus)
 {
-       int c;
+       struct perf_cpu cpu;
+       int idx;
 
        if (cpu_map__is_dummy(cpus))
                return;
 
-       for (c = 0; c < cpus->nr; c++)
-               set_bit(cpus->map[c].cpu, mask->bits);
+       perf_cpu_map__for_each_cpu(cpu, idx, cpus)
+               set_bit(cpu.cpu, mask->bits);
 }
 
 static int record__mmap_cpu_mask_init_spec(struct mmap_cpu_mask *mask, const char *mask_spec)
@@ -3404,8 +3405,8 @@ static int record__init_thread_cpu_masks(struct record *rec, struct perf_cpu_map
        pr_debug("nr_threads: %d\n", rec->nr_threads);
 
        for (t = 0; t < rec->nr_threads; t++) {
-               set_bit(cpus->map[t].cpu, rec->thread_masks[t].maps.bits);
-               set_bit(cpus->map[t].cpu, rec->thread_masks[t].affinity.bits);
+               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].maps.bits);
+               set_bit(perf_cpu_map__cpu(cpus, t).cpu, rec->thread_masks[t].affinity.bits);
                if (verbose) {
                        pr_debug("thread_masks[%d]: ", t);
                        mmap_cpu_mask__scnprintf(&rec->thread_masks[t].maps, "maps");
index ac60c08e8e2a5ba4653c554f8b6b23200fb8d39d..63b9db657442510a6e8a4cc69266ad4aa4b49bde 100644 (file)
@@ -46,8 +46,8 @@ static int bperf_load_program(struct evlist *evlist)
        struct bpf_link *link;
        struct evsel *evsel;
        struct cgroup *cgrp, *leader_cgrp;
-       __u32 i, cpu;
-       __u32 nr_cpus = evlist->core.all_cpus->nr;
+       int i, j;
+       struct perf_cpu cpu;
        int total_cpus = cpu__max_cpu().cpu;
        int map_size, map_fd;
        int prog_fd, err;
@@ -93,9 +93,9 @@ static int bperf_load_program(struct evlist *evlist)
                goto out;
        }
 
-       for (i = 0; i < nr_cpus; i++) {
+       perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
                link = bpf_program__attach_perf_event(skel->progs.on_cgrp_switch,
-                                                     FD(cgrp_switch, i));
+                                                     FD(cgrp_switch, cpu.cpu));
                if (IS_ERR(link)) {
                        pr_err("Failed to attach cgroup program\n");
                        err = PTR_ERR(link);
@@ -122,10 +122,9 @@ static int bperf_load_program(struct evlist *evlist)
                        }
 
                        map_fd = bpf_map__fd(skel->maps.events);
-                       for (cpu = 0; cpu < nr_cpus; cpu++) {
-                               int fd = FD(evsel, cpu);
-                               __u32 idx = evsel->core.idx * total_cpus +
-                                       evlist->core.all_cpus->map[cpu].cpu;
+                       perf_cpu_map__for_each_cpu(cpu, j, evlist->core.all_cpus) {
+                               int fd = FD(evsel, cpu.cpu);
+                               __u32 idx = evsel->core.idx * total_cpus + cpu.cpu;
 
                                err = bpf_map_update_elem(map_fd, &idx, &fd,
                                                          BPF_ANY);
@@ -207,14 +206,12 @@ static int bperf_cgrp__install_pe(struct evsel *evsel __maybe_unused,
  */
 static int bperf_cgrp__sync_counters(struct evlist *evlist)
 {
-       int i, cpu;
-       int nr_cpus = evlist->core.all_cpus->nr;
+       struct perf_cpu cpu;
+       int idx;
        int prog_fd = bpf_program__fd(skel->progs.trigger_read);
 
-       for (i = 0; i < nr_cpus; i++) {
-               cpu = evlist->core.all_cpus->map[i].cpu;
-               bperf_trigger_reading(prog_fd, cpu);
-       }
+       perf_cpu_map__for_each_cpu(cpu, idx, evlist->core.all_cpus)
+               bperf_trigger_reading(prog_fd, cpu.cpu);
 
        return 0;
 }
@@ -244,12 +241,10 @@ static int bperf_cgrp__disable(struct evsel *evsel)
 static int bperf_cgrp__read(struct evsel *evsel)
 {
        struct evlist *evlist = evsel->evlist;
-       int i, cpu, nr_cpus = evlist->core.all_cpus->nr;
        int total_cpus = cpu__max_cpu().cpu;
        struct perf_counts_values *counts;
        struct bpf_perf_event_value *values;
        int reading_map_fd, err = 0;
-       __u32 idx;
 
        if (evsel->core.idx)
                return 0;
@@ -263,7 +258,10 @@ static int bperf_cgrp__read(struct evsel *evsel)
        reading_map_fd = bpf_map__fd(skel->maps.cgrp_readings);
 
        evlist__for_each_entry(evlist, evsel) {
-               idx = evsel->core.idx;
+               __u32 idx = evsel->core.idx;
+               int i;
+               struct perf_cpu cpu;
+
                err = bpf_map_lookup_elem(reading_map_fd, &idx, values);
                if (err) {
                        pr_err("bpf map lookup failed: idx=%u, event=%s, cgrp=%s\n",
@@ -271,13 +269,11 @@ static int bperf_cgrp__read(struct evsel *evsel)
                        goto out;
                }
 
-               for (i = 0; i < nr_cpus; i++) {
-                       cpu = evlist->core.all_cpus->map[i].cpu;
-
+               perf_cpu_map__for_each_cpu(cpu, i, evlist->core.all_cpus) {
                        counts = perf_counts(evsel->counts, i, 0);
-                       counts->val = values[cpu].counter;
-                       counts->ena = values[cpu].enabled;
-                       counts->run = values[cpu].running;
+                       counts->val = values[cpu.cpu].counter;
+                       counts->ena = values[cpu.cpu].enabled;
+                       counts->run = values[cpu.cpu].running;
                }
        }