perf maps: Rename clone to copy from
[linux-2.6-block.git] / tools / perf / util / bpf_lock_contention.c
CommitLineData
407b36f6 1// SPDX-License-Identifier: GPL-2.0
d0c502e4 2#include "util/cgroup.h"
407b36f6 3#include "util/debug.h"
6fda2405 4#include "util/evlist.h"
407b36f6
NK
5#include "util/machine.h"
6#include "util/map.h"
7#include "util/symbol.h"
6fda2405 8#include "util/target.h"
eca949b2 9#include "util/thread.h"
6fda2405 10#include "util/thread_map.h"
407b36f6
NK
11#include "util/lock-contention.h"
12#include <linux/zalloc.h>
a6eaf966 13#include <linux/string.h>
407b36f6 14#include <bpf/bpf.h>
29b8e94d 15#include <inttypes.h>
407b36f6
NK
16
17#include "bpf_skel/lock_contention.skel.h"
fd507d3e 18#include "bpf_skel/lock_data.h"
407b36f6
NK
19
20static struct lock_contention_bpf *skel;
21
447ec4e5 22int lock_contention_prepare(struct lock_contention *con)
407b36f6 23{
6fda2405 24 int i, fd;
4fd06bd2 25 int ncpus = 1, ntasks = 1, ntypes = 1, naddrs = 1, ncgrps = 1;
447ec4e5
NK
26 struct evlist *evlist = con->evlist;
27 struct target *target = con->target;
6fda2405 28
407b36f6
NK
29 skel = lock_contention_bpf__open();
30 if (!skel) {
31 pr_err("Failed to open lock-contention BPF skeleton\n");
32 return -1;
33 }
34
96532a83 35 bpf_map__set_value_size(skel->maps.stacks, con->max_stack * sizeof(u64));
ceb13bfc 36 bpf_map__set_max_entries(skel->maps.lock_stat, con->map_nr_entries);
c66a36af 37 bpf_map__set_max_entries(skel->maps.tstamp, con->map_nr_entries);
ceb13bfc 38
ebab2916 39 if (con->aggr_mode == LOCK_AGGR_TASK)
eca949b2 40 bpf_map__set_max_entries(skel->maps.task_data, con->map_nr_entries);
ebab2916 41 else
eca949b2 42 bpf_map__set_max_entries(skel->maps.task_data, 1);
ebab2916
NK
43
44 if (con->save_callstack)
eca949b2 45 bpf_map__set_max_entries(skel->maps.stacks, con->map_nr_entries);
ebab2916
NK
46 else
47 bpf_map__set_max_entries(skel->maps.stacks, 1);
eca949b2 48
6fda2405
NK
49 if (target__has_cpu(target))
50 ncpus = perf_cpu_map__nr(evlist->core.user_requested_cpus);
51 if (target__has_task(target))
52 ntasks = perf_thread_map__nr(evlist->core.threads);
529772c4
NK
53 if (con->filters->nr_types)
54 ntypes = con->filters->nr_types;
4fd06bd2
NK
55 if (con->filters->nr_cgrps)
56 ncgrps = con->filters->nr_cgrps;
6fda2405 57
5e3febe7
NK
58 /* resolve lock name filters to addr */
59 if (con->filters->nr_syms) {
60 struct symbol *sym;
61 struct map *kmap;
62 unsigned long *addrs;
63
64 for (i = 0; i < con->filters->nr_syms; i++) {
65 sym = machine__find_kernel_symbol_by_name(con->machine,
66 con->filters->syms[i],
67 &kmap);
68 if (sym == NULL) {
69 pr_warning("ignore unknown symbol: %s\n",
70 con->filters->syms[i]);
71 continue;
72 }
73
74 addrs = realloc(con->filters->addrs,
75 (con->filters->nr_addrs + 1) * sizeof(*addrs));
76 if (addrs == NULL) {
77 pr_warning("memory allocation failure\n");
78 continue;
79 }
80
78a1f7cd 81 addrs[con->filters->nr_addrs++] = map__unmap_ip(kmap, sym->start);
5e3febe7
NK
82 con->filters->addrs = addrs;
83 }
84 naddrs = con->filters->nr_addrs;
85 }
86
6fda2405
NK
87 bpf_map__set_max_entries(skel->maps.cpu_filter, ncpus);
88 bpf_map__set_max_entries(skel->maps.task_filter, ntasks);
529772c4 89 bpf_map__set_max_entries(skel->maps.type_filter, ntypes);
5e3febe7 90 bpf_map__set_max_entries(skel->maps.addr_filter, naddrs);
4fd06bd2 91 bpf_map__set_max_entries(skel->maps.cgroup_filter, ncgrps);
6fda2405 92
407b36f6
NK
93 if (lock_contention_bpf__load(skel) < 0) {
94 pr_err("Failed to load lock-contention BPF skeleton\n");
95 return -1;
96 }
97
6fda2405
NK
98 if (target__has_cpu(target)) {
99 u32 cpu;
100 u8 val = 1;
101
102 skel->bss->has_cpu = 1;
103 fd = bpf_map__fd(skel->maps.cpu_filter);
104
105 for (i = 0; i < ncpus; i++) {
106 cpu = perf_cpu_map__cpu(evlist->core.user_requested_cpus, i).cpu;
107 bpf_map_update_elem(fd, &cpu, &val, BPF_ANY);
108 }
109 }
110
111 if (target__has_task(target)) {
112 u32 pid;
113 u8 val = 1;
114
115 skel->bss->has_task = 1;
116 fd = bpf_map__fd(skel->maps.task_filter);
117
118 for (i = 0; i < ntasks; i++) {
119 pid = perf_thread_map__pid(evlist->core.threads, i);
120 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
121 }
122 }
123
124 if (target__none(target) && evlist->workload.pid > 0) {
125 u32 pid = evlist->workload.pid;
126 u8 val = 1;
127
128 skel->bss->has_task = 1;
129 fd = bpf_map__fd(skel->maps.task_filter);
130 bpf_map_update_elem(fd, &pid, &val, BPF_ANY);
131 }
132
529772c4
NK
133 if (con->filters->nr_types) {
134 u8 val = 1;
135
136 skel->bss->has_type = 1;
137 fd = bpf_map__fd(skel->maps.type_filter);
138
139 for (i = 0; i < con->filters->nr_types; i++)
140 bpf_map_update_elem(fd, &con->filters->types[i], &val, BPF_ANY);
141 }
142
5e3febe7
NK
143 if (con->filters->nr_addrs) {
144 u8 val = 1;
145
146 skel->bss->has_addr = 1;
147 fd = bpf_map__fd(skel->maps.addr_filter);
148
149 for (i = 0; i < con->filters->nr_addrs; i++)
150 bpf_map_update_elem(fd, &con->filters->addrs[i], &val, BPF_ANY);
151 }
152
4fd06bd2
NK
153 if (con->filters->nr_cgrps) {
154 u8 val = 1;
155
156 skel->bss->has_cgroup = 1;
157 fd = bpf_map__fd(skel->maps.cgroup_filter);
158
159 for (i = 0; i < con->filters->nr_cgrps; i++)
160 bpf_map_update_elem(fd, &con->filters->cgrps[i], &val, BPF_ANY);
161 }
162
eca949b2 163 /* these don't work well if in the rodata section */
c1da8dd5 164 skel->bss->stack_skip = con->stack_skip;
eca949b2 165 skel->bss->aggr_mode = con->aggr_mode;
ebab2916 166 skel->bss->needs_callstack = con->save_callstack;
3477f079 167 skel->bss->lock_owner = con->owner;
c1da8dd5 168
4d1792d0
NK
169 if (con->aggr_mode == LOCK_AGGR_CGROUP) {
170 if (cgroup_is_v2("perf_event"))
171 skel->bss->use_cgroup_v2 = 1;
172
d0c502e4
NK
173 read_all_cgroups(&con->cgroups);
174 }
175
d24c0144
NK
176 bpf_program__set_autoload(skel->progs.collect_lock_syms, false);
177
407b36f6
NK
178 lock_contention_bpf__attach(skel);
179 return 0;
180}
181
182int lock_contention_start(void)
183{
184 skel->bss->enabled = 1;
185 return 0;
186}
187
188int lock_contention_stop(void)
189{
190 skel->bss->enabled = 0;
191 return 0;
192}
193
492fef21
NK
194static const char *lock_contention_get_name(struct lock_contention *con,
195 struct contention_key *key,
1811e827 196 u64 *stack_trace, u32 flags)
492fef21
NK
197{
198 int idx = 0;
199 u64 addr;
200 const char *name = "";
201 static char name_buf[KSYM_NAME_LEN];
202 struct symbol *sym;
203 struct map *kmap;
204 struct machine *machine = con->machine;
205
206 if (con->aggr_mode == LOCK_AGGR_TASK) {
207 struct contention_task_data task;
ebab2916 208 int pid = key->pid;
492fef21
NK
209 int task_fd = bpf_map__fd(skel->maps.task_data);
210
211 /* do not update idle comm which contains CPU number */
212 if (pid) {
213 struct thread *t = __machine__findnew_thread(machine, /*pid=*/-1, pid);
214
215 if (t == NULL)
216 return name;
217 if (!bpf_map_lookup_elem(task_fd, &pid, &task) &&
218 thread__set_comm(t, task.comm, /*timestamp=*/0))
219 name = task.comm;
220 }
221 return name;
222 }
223
224 if (con->aggr_mode == LOCK_AGGR_ADDR) {
d24c0144
NK
225 int lock_fd = bpf_map__fd(skel->maps.lock_syms);
226
227 /* per-process locks set upper bits of the flags */
1811e827
NK
228 if (flags & LCD_F_MMAP_LOCK)
229 return "mmap_lock";
230 if (flags & LCD_F_SIGHAND_LOCK)
231 return "siglock";
d24c0144
NK
232
233 /* global locks with symbols */
4d1792d0 234 sym = machine__find_kernel_symbol(machine, key->lock_addr_or_cgroup, &kmap);
492fef21 235 if (sym)
d24c0144
NK
236 return sym->name;
237
238 /* try semi-global locks collected separately */
4d1792d0 239 if (!bpf_map_lookup_elem(lock_fd, &key->lock_addr_or_cgroup, &flags)) {
d24c0144
NK
240 if (flags == LOCK_CLASS_RQLOCK)
241 return "rq_lock";
242 }
243
244 return "";
492fef21
NK
245 }
246
4d1792d0
NK
247 if (con->aggr_mode == LOCK_AGGR_CGROUP) {
248 u64 cgrp_id = key->lock_addr_or_cgroup;
d0c502e4
NK
249 struct cgroup *cgrp = __cgroup__find(&con->cgroups, cgrp_id);
250
251 if (cgrp)
252 return cgrp->name;
253
29b8e94d 254 snprintf(name_buf, sizeof(name_buf), "cgroup:%" PRIu64 "", cgrp_id);
d0c502e4
NK
255 return name_buf;
256 }
257
492fef21
NK
258 /* LOCK_AGGR_CALLER: skip lock internal functions */
259 while (machine__is_lock_function(machine, stack_trace[idx]) &&
260 idx < con->max_stack - 1)
261 idx++;
262
263 addr = stack_trace[idx];
264 sym = machine__find_kernel_symbol(machine, addr, &kmap);
265
266 if (sym) {
267 unsigned long offset;
268
78a1f7cd 269 offset = map__map_ip(kmap, addr) - sym->start;
492fef21
NK
270
271 if (offset == 0)
272 return sym->name;
273
274 snprintf(name_buf, sizeof(name_buf), "%s+%#lx", sym->name, offset);
275 } else {
276 snprintf(name_buf, sizeof(name_buf), "%#lx", (unsigned long)addr);
277 }
278
279 return name_buf;
280}
281
447ec4e5 282int lock_contention_read(struct lock_contention *con)
407b36f6 283{
492fef21 284 int fd, stack, err = 0;
5d8c0f0e 285 struct contention_key *prev_key, key = {};
fd507d3e 286 struct contention_data data = {};
9e9c5f3c 287 struct lock_stat *st = NULL;
447ec4e5 288 struct machine *machine = con->machine;
9e9c5f3c
NK
289 u64 *stack_trace;
290 size_t stack_size = con->max_stack * sizeof(*stack_trace);
407b36f6
NK
291
292 fd = bpf_map__fd(skel->maps.lock_stat);
293 stack = bpf_map__fd(skel->maps.stacks);
294
84c3a2bb
NK
295 con->fails.task = skel->bss->task_fail;
296 con->fails.stack = skel->bss->stack_fail;
297 con->fails.time = skel->bss->time_fail;
954cdac7 298 con->fails.data = skel->bss->data_fail;
6d499a6b 299
9e9c5f3c
NK
300 stack_trace = zalloc(stack_size);
301 if (stack_trace == NULL)
302 return -1;
303
eca949b2
NK
304 if (con->aggr_mode == LOCK_AGGR_TASK) {
305 struct thread *idle = __machine__findnew_thread(machine,
306 /*pid=*/0,
307 /*tid=*/0);
308 thread__set_comm(idle, "swapper", /*timestamp=*/0);
309 }
310
d24c0144
NK
311 if (con->aggr_mode == LOCK_AGGR_ADDR) {
312 DECLARE_LIBBPF_OPTS(bpf_test_run_opts, opts,
313 .flags = BPF_F_TEST_RUN_ON_CPU,
314 );
315 int prog_fd = bpf_program__fd(skel->progs.collect_lock_syms);
316
317 bpf_prog_test_run_opts(prog_fd, &opts);
318 }
319
688d2e8d 320 /* make sure it loads the kernel map */
ff583dc4 321 map__load(maps__first(machine->kmaps)->map);
688d2e8d 322
fd507d3e
NK
323 prev_key = NULL;
324 while (!bpf_map_get_next_key(fd, prev_key, &key)) {
ebab2916 325 s64 ls_key;
16cad1d3 326 const char *name;
407b36f6 327
9e9c5f3c
NK
328 /* to handle errors in the loop body */
329 err = -1;
330
407b36f6 331 bpf_map_lookup_elem(fd, &key, &data);
16cad1d3 332 if (con->save_callstack) {
ebab2916
NK
333 bpf_map_lookup_elem(stack, &key.stack_id, stack_trace);
334
aae7e453
NK
335 if (!match_callstack_filter(machine, stack_trace)) {
336 con->nr_filtered += data.count;
ebab2916 337 goto next;
aae7e453 338 }
16cad1d3
NK
339 }
340
ebab2916
NK
341 switch (con->aggr_mode) {
342 case LOCK_AGGR_CALLER:
343 ls_key = key.stack_id;
344 break;
345 case LOCK_AGGR_TASK:
346 ls_key = key.pid;
347 break;
348 case LOCK_AGGR_ADDR:
4d1792d0
NK
349 case LOCK_AGGR_CGROUP:
350 ls_key = key.lock_addr_or_cgroup;
ebab2916
NK
351 break;
352 default:
353 goto next;
354 }
355
356 st = lock_stat_find(ls_key);
16cad1d3
NK
357 if (st != NULL) {
358 st->wait_time_total += data.total_time;
359 if (st->wait_time_max < data.max_time)
360 st->wait_time_max = data.max_time;
361 if (st->wait_time_min > data.min_time)
362 st->wait_time_min = data.min_time;
363
364 st->nr_contended += data.count;
365 if (st->nr_contended)
366 st->avg_wait_time = st->wait_time_total / st->nr_contended;
367 goto next;
368 }
369
1811e827 370 name = lock_contention_get_name(con, &key, stack_trace, data.flags);
ebab2916 371 st = lock_stat_findnew(ls_key, name, data.flags);
407b36f6 372 if (st == NULL)
9e9c5f3c 373 break;
407b36f6
NK
374
375 st->nr_contended = data.count;
376 st->wait_time_total = data.total_time;
377 st->wait_time_max = data.max_time;
378 st->wait_time_min = data.min_time;
379
380 if (data.count)
381 st->avg_wait_time = data.total_time / data.count;
382
0fba2265 383 if (con->aggr_mode == LOCK_AGGR_CALLER && verbose > 0) {
9e9c5f3c
NK
384 st->callstack = memdup(stack_trace, stack_size);
385 if (st->callstack == NULL)
386 break;
a6eaf966 387 }
492fef21 388
16cad1d3 389next:
fd507d3e 390 prev_key = &key;
9e9c5f3c 391
16cad1d3 392 /* we're fine now, reset the error */
9e9c5f3c 393 err = 0;
407b36f6
NK
394 }
395
9e9c5f3c 396 free(stack_trace);
9e9c5f3c
NK
397
398 return err;
407b36f6
NK
399}
400
d0c502e4 401int lock_contention_finish(struct lock_contention *con)
407b36f6
NK
402{
403 if (skel) {
404 skel->bss->enabled = 0;
405 lock_contention_bpf__destroy(skel);
406 }
407
d0c502e4
NK
408 while (!RB_EMPTY_ROOT(&con->cgroups)) {
409 struct rb_node *node = rb_first(&con->cgroups);
410 struct cgroup *cgrp = rb_entry(node, struct cgroup, node);
411
412 rb_erase(node, &con->cgroups);
413 cgroup__put(cgrp);
414 }
415
407b36f6
NK
416 return 0;
417}