perf lock contention: Support pre-5.14 kernels
authorIan Rogers <irogers@google.com>
Sat, 8 Apr 2023 05:52:07 +0000 (22:52 -0700)
committerArnaldo Carvalho de Melo <acme@redhat.com>
Mon, 10 Apr 2023 21:55:12 +0000 (18:55 -0300)
'struct rq's member '__lock' was renamed from 'lock' in 5.14.

Signed-off-by: Ian Rogers <irogers@google.com>
Acked-by: Namhyung Kim <namhyung@kernel.org>
Cc: Adrian Hunter <adrian.hunter@intel.com>
Cc: Alexander Shishkin <alexander.shishkin@linux.intel.com>
Cc: Ingo Molnar <mingo@redhat.com>
Cc: Jiri Olsa <jolsa@kernel.org>
Cc: Mark Rutland <mark.rutland@arm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: bpf@vger.kernel.org
Link: https://lore.kernel.org/r/20230408055208.1283832-1-irogers@google.com
Signed-off-by: Arnaldo Carvalho de Melo <acme@redhat.com>
tools/perf/util/bpf_skel/lock_contention.bpf.c

index 23f6e63544ed40d2e229bbbefc8b5aaf16137691..8911e2a077d8cb98b159e62906f5f48e5df9d12c 100644 (file)
@@ -418,6 +418,14 @@ int contention_end(u64 *ctx)
 
 extern struct rq runqueues __ksym;
 
+struct rq__old {
+       raw_spinlock_t lock;
+} __attribute__((preserve_access_index));
+
+struct rq__new {
+       raw_spinlock_t __lock;
+} __attribute__((preserve_access_index));
+
 SEC("raw_tp/bpf_test_finish")
 int BPF_PROG(collect_lock_syms)
 {
@@ -426,11 +434,16 @@ int BPF_PROG(collect_lock_syms)
 
        for (int i = 0; i < MAX_CPUS; i++) {
                struct rq *rq = bpf_per_cpu_ptr(&runqueues, i);
+               struct rq__new *rq_new = (void *)rq;
+               struct rq__old *rq_old = (void *)rq;
 
                if (rq == NULL)
                        break;
 
-               lock_addr = (__u64)&rq->__lock;
+               if (bpf_core_field_exists(rq_new->__lock))
+                       lock_addr = (__u64)&rq_new->__lock;
+               else
+                       lock_addr = (__u64)&rq_old->lock;
                lock_flag = LOCK_CLASS_RQLOCK;
                bpf_map_update_elem(&lock_syms, &lock_addr, &lock_flag, BPF_ANY);
        }