perf lock: Use BPF for lock contention analysis
[linux-2.6-block.git] / tools / perf / util / bpf_lock_contention.c
CommitLineData
407b36f6
NK
1// SPDX-License-Identifier: GPL-2.0
2#include "util/debug.h"
3#include "util/machine.h"
4#include "util/map.h"
5#include "util/symbol.h"
6#include "util/lock-contention.h"
7#include <linux/zalloc.h>
8#include <bpf/bpf.h>
9
10#include "bpf_skel/lock_contention.skel.h"
11
12static struct lock_contention_bpf *skel;
13
14/* should be same as bpf_skel/lock_contention.bpf.c */
15struct lock_contention_key {
16 u32 stack_id;
17};
18
19struct lock_contention_data {
20 u64 total_time;
21 u64 min_time;
22 u64 max_time;
23 u32 count;
24 u32 flags;
25};
26
27int lock_contention_prepare(void)
28{
29 skel = lock_contention_bpf__open();
30 if (!skel) {
31 pr_err("Failed to open lock-contention BPF skeleton\n");
32 return -1;
33 }
34
35 if (lock_contention_bpf__load(skel) < 0) {
36 pr_err("Failed to load lock-contention BPF skeleton\n");
37 return -1;
38 }
39
40 lock_contention_bpf__attach(skel);
41 return 0;
42}
43
44int lock_contention_start(void)
45{
46 skel->bss->enabled = 1;
47 return 0;
48}
49
50int lock_contention_stop(void)
51{
52 skel->bss->enabled = 0;
53 return 0;
54}
55
56int lock_contention_read(struct machine *machine, struct hlist_head *head)
57{
58 int fd, stack;
59 u32 prev_key, key;
60 struct lock_contention_data data;
61 struct lock_stat *st;
62 u64 stack_trace[CONTENTION_STACK_DEPTH];
63
64 fd = bpf_map__fd(skel->maps.lock_stat);
65 stack = bpf_map__fd(skel->maps.stacks);
66
67 prev_key = 0;
68 while (!bpf_map_get_next_key(fd, &prev_key, &key)) {
69 struct map *kmap;
70 struct symbol *sym;
71 int idx;
72
73 bpf_map_lookup_elem(fd, &key, &data);
74 st = zalloc(sizeof(*st));
75 if (st == NULL)
76 return -1;
77
78 st->nr_contended = data.count;
79 st->wait_time_total = data.total_time;
80 st->wait_time_max = data.max_time;
81 st->wait_time_min = data.min_time;
82
83 if (data.count)
84 st->avg_wait_time = data.total_time / data.count;
85
86 st->flags = data.flags;
87
88 bpf_map_lookup_elem(stack, &key, stack_trace);
89
90 /* skip BPF + lock internal functions */
91 idx = CONTENTION_STACK_SKIP;
92 while (is_lock_function(machine, stack_trace[idx]) &&
93 idx < CONTENTION_STACK_DEPTH - 1)
94 idx++;
95
96 st->addr = stack_trace[idx];
97 sym = machine__find_kernel_symbol(machine, st->addr, &kmap);
98
99 if (sym) {
100 unsigned long offset;
101 int ret = 0;
102
103 offset = kmap->map_ip(kmap, st->addr) - sym->start;
104
105 if (offset)
106 ret = asprintf(&st->name, "%s+%#lx", sym->name, offset);
107 else
108 st->name = strdup(sym->name);
109
110 if (ret < 0 || st->name == NULL)
111 return -1;
112 } else if (asprintf(&st->name, "%#lx", (unsigned long)st->addr) < 0) {
113 free(st);
114 return -1;
115 }
116
117 hlist_add_head(&st->hash_entry, head);
118 prev_key = key;
119 }
120
121 return 0;
122}
123
124int lock_contention_finish(void)
125{
126 if (skel) {
127 skel->bss->enabled = 0;
128 lock_contention_bpf__destroy(skel);
129 }
130
131 return 0;
132}