perf env: Add perf_env__numa_node()
authorJiri Olsa <jolsa@kernel.org>
Thu, 29 Aug 2019 11:31:48 +0000 (13:31 +0200)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Wed, 6 Nov 2019 18:49:39 +0000 (15:49 -0300)
To speed up cpu to node lookup, add perf_env__numa_node(), that creates
cpu array on the first lookup, that holds numa nodes for each stored
cpu.

Signed-off-by: Jiri Olsa <jolsa@kernel.org>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Alexey Budankov <alexey.budankov@linux.intel.com>
Cc: Andi Kleen <ak@linux.intel.com>
Cc: Joe Mario <jmario@redhat.com>
Cc: Kan Liang <kan.liang@linux.intel.com>
Cc: Michael Petlan <mpetlan@redhat.com>
Cc: Namhyung Kim <namhyung@kernel.org>
Cc: Peter Zijlstra <a.p.zijlstra@chello.nl>
Link: http://lkml.kernel.org/r/20190904073415.723-3-jolsa@kernel.org
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/env.c
tools/perf/util/env.h

index 2a91a10ccfcc081e0765f5c7e2985fd5d6d10563..6242a9215df7ee325265646dd9ce71acbc0c73b8 100644 (file)
@@ -180,6 +180,7 @@ void perf_env__exit(struct perf_env *env)
        zfree(&env->sibling_threads);
        zfree(&env->pmu_mappings);
        zfree(&env->cpu);
+       zfree(&env->numa_map);
 
        for (i = 0; i < env->nr_numa_nodes; i++)
                perf_cpu_map__put(env->numa_nodes[i].map);
@@ -354,3 +355,42 @@ const char *perf_env__arch(struct perf_env *env)
 
        return normalize_arch(arch_name);
 }
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+       if (!env->nr_numa_map) {
+               struct numa_node *nn;
+               int i, nr = 0;
+
+               for (i = 0; i < env->nr_numa_nodes; i++) {
+                       nn = &env->numa_nodes[i];
+                       nr = max(nr, perf_cpu_map__max(nn->map));
+               }
+
+               nr++;
+
+               /*
+                * We initialize the numa_map array to prepare
+                * it for missing cpus, which return node -1
+                */
+               env->numa_map = malloc(nr * sizeof(int));
+               if (!env->numa_map)
+                       return -1;
+
+               for (i = 0; i < nr; i++)
+                       env->numa_map[i] = -1;
+
+               env->nr_numa_map = nr;
+
+               for (i = 0; i < env->nr_numa_nodes; i++) {
+                       int tmp, j;
+
+                       nn = &env->numa_nodes[i];
+                       perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+                               env->numa_map[j] = i;
+               }
+       }
+
+       return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
index a3059dc1abe5a6c24e7e95d1c825568ac394ddb2..11d05ae3606a012df2070b0a0b1307669efabea2 100644 (file)
@@ -87,6 +87,10 @@ struct perf_env {
                struct rb_root          btfs;
                u32                     btfs_cnt;
        } bpf_progs;
+
+       /* For fast cpu to numa node lookup via perf_env__numa_node */
+       int                     *numa_map;
+       int                      nr_numa_map;
 };
 
 enum perf_compress_type {
@@ -120,4 +124,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
                                                        __u32 prog_id);
 void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
 struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
 #endif /* __PERF_ENV_H */