Commit | Line | Data |
---|---|---|
b2441318 | 1 | // SPDX-License-Identifier: GPL-2.0 |
69d262a9 WN |
2 | /* |
3 | * bpf-loader.c | |
4 | * | |
5 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> | |
6 | * Copyright (C) 2015 Huawei Inc. | |
7 | */ | |
8 | ||
a08357d8 | 9 | #include <linux/bpf.h> |
69d262a9 | 10 | #include <bpf/libbpf.h> |
8690a2a7 | 11 | #include <bpf/bpf.h> |
69d262a9 | 12 | #include <linux/err.h> |
877a7a11 | 13 | #include <linux/kernel.h> |
03e01f56 | 14 | #include <linux/string.h> |
7f7c536f | 15 | #include <linux/zalloc.h> |
a43783ae | 16 | #include <errno.h> |
69d262a9 WN |
17 | #include "perf.h" |
18 | #include "debug.h" | |
ebc52aee | 19 | #include "evlist.h" |
69d262a9 | 20 | #include "bpf-loader.h" |
a08357d8 | 21 | #include "bpf-prologue.h" |
aa3abf30 WN |
22 | #include "probe-event.h" |
23 | #include "probe-finder.h" // for MAX_PROBES | |
2d055bf2 | 24 | #include "parse-events.h" |
8ec20b17 | 25 | #include "strfilter.h" |
d509db04 | 26 | #include "llvm-utils.h" |
edd695b0 | 27 | #include "c++/clang-c.h" |
69d262a9 | 28 | |
6f1ae8b6 | 29 | static int libbpf_perf_print(enum libbpf_print_level level __attribute__((unused)), |
a8a1f7d0 | 30 | const char *fmt, va_list args) |
6f1ae8b6 | 31 | { |
a8a1f7d0 | 32 | return veprintf(1, verbose, pr_fmt(fmt), args); |
6f1ae8b6 | 33 | } |
69d262a9 | 34 | |
aa3abf30 | 35 | struct bpf_prog_priv { |
b4ee6d41 WN |
36 | bool is_tp; |
37 | char *sys_name; | |
38 | char *evt_name; | |
aa3abf30 | 39 | struct perf_probe_event pev; |
a08357d8 WN |
40 | bool need_prologue; |
41 | struct bpf_insn *insns_buf; | |
d35b3289 WN |
42 | int nr_types; |
43 | int *type_mapping; | |
aa3abf30 WN |
44 | }; |
45 | ||
ba1fae43 WN |
46 | static bool libbpf_initialized; |
47 | ||
48 | struct bpf_object * | |
49 | bpf__prepare_load_buffer(void *obj_buf, size_t obj_buf_sz, const char *name) | |
50 | { | |
51 | struct bpf_object *obj; | |
52 | ||
53 | if (!libbpf_initialized) { | |
6f1ae8b6 | 54 | libbpf_set_print(libbpf_perf_print); |
ba1fae43 WN |
55 | libbpf_initialized = true; |
56 | } | |
57 | ||
58 | obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, name); | |
ab4e32ff | 59 | if (IS_ERR_OR_NULL(obj)) { |
ba1fae43 WN |
60 | pr_debug("bpf: failed to load buffer\n"); |
61 | return ERR_PTR(-EINVAL); | |
62 | } | |
63 | ||
64 | return obj; | |
65 | } | |
66 | ||
d509db04 | 67 | struct bpf_object *bpf__prepare_load(const char *filename, bool source) |
69d262a9 WN |
68 | { |
69 | struct bpf_object *obj; | |
69d262a9 WN |
70 | |
71 | if (!libbpf_initialized) { | |
6f1ae8b6 | 72 | libbpf_set_print(libbpf_perf_print); |
69d262a9 WN |
73 | libbpf_initialized = true; |
74 | } | |
75 | ||
d509db04 WN |
76 | if (source) { |
77 | int err; | |
78 | void *obj_buf; | |
79 | size_t obj_buf_sz; | |
80 | ||
edd695b0 WN |
81 | perf_clang__init(); |
82 | err = perf_clang__compile_bpf(filename, &obj_buf, &obj_buf_sz); | |
83 | perf_clang__cleanup(); | |
84 | if (err) { | |
87252323 | 85 | pr_debug("bpf: builtin compilation failed: %d, try external compiler\n", err); |
edd695b0 WN |
86 | err = llvm__compile_bpf(filename, &obj_buf, &obj_buf_sz); |
87 | if (err) | |
88 | return ERR_PTR(-BPF_LOADER_ERRNO__COMPILE); | |
89 | } else | |
e4a8b0af | 90 | pr_debug("bpf: successful builtin compilation\n"); |
d509db04 | 91 | obj = bpf_object__open_buffer(obj_buf, obj_buf_sz, filename); |
2bd42de0 | 92 | |
ab4e32ff | 93 | if (!IS_ERR_OR_NULL(obj) && llvm_param.dump_obj) |
2bd42de0 WN |
94 | llvm__dump_obj(filename, obj_buf, obj_buf_sz); |
95 | ||
d509db04 WN |
96 | free(obj_buf); |
97 | } else | |
98 | obj = bpf_object__open(filename); | |
99 | ||
ab4e32ff | 100 | if (IS_ERR_OR_NULL(obj)) { |
69d262a9 | 101 | pr_debug("bpf: failed to load %s\n", filename); |
6371ca3b | 102 | return obj; |
69d262a9 WN |
103 | } |
104 | ||
105 | return obj; | |
106 | } | |
107 | ||
108 | void bpf__clear(void) | |
109 | { | |
110 | struct bpf_object *obj, *tmp; | |
111 | ||
aa3abf30 WN |
112 | bpf_object__for_each_safe(obj, tmp) { |
113 | bpf__unprobe(obj); | |
69d262a9 | 114 | bpf_object__close(obj); |
aa3abf30 WN |
115 | } |
116 | } | |
117 | ||
118 | static void | |
80cdce76 WN |
119 | clear_prog_priv(struct bpf_program *prog __maybe_unused, |
120 | void *_priv) | |
aa3abf30 WN |
121 | { |
122 | struct bpf_prog_priv *priv = _priv; | |
123 | ||
124 | cleanup_perf_probe_events(&priv->pev, 1); | |
a08357d8 | 125 | zfree(&priv->insns_buf); |
d35b3289 | 126 | zfree(&priv->type_mapping); |
b4ee6d41 WN |
127 | zfree(&priv->sys_name); |
128 | zfree(&priv->evt_name); | |
aa3abf30 WN |
129 | free(priv); |
130 | } | |
131 | ||
361f2b1d | 132 | static int |
0bb93490 | 133 | prog_config__exec(const char *value, struct perf_probe_event *pev) |
361f2b1d WN |
134 | { |
135 | pev->uprobes = true; | |
136 | pev->target = strdup(value); | |
137 | if (!pev->target) | |
138 | return -ENOMEM; | |
139 | return 0; | |
140 | } | |
141 | ||
5dbd16c0 | 142 | static int |
0bb93490 | 143 | prog_config__module(const char *value, struct perf_probe_event *pev) |
5dbd16c0 WN |
144 | { |
145 | pev->uprobes = false; | |
146 | pev->target = strdup(value); | |
147 | if (!pev->target) | |
148 | return -ENOMEM; | |
149 | return 0; | |
150 | } | |
151 | ||
03e01f56 | 152 | static int |
0bb93490 | 153 | prog_config__bool(const char *value, bool *pbool, bool invert) |
03e01f56 WN |
154 | { |
155 | int err; | |
156 | bool bool_value; | |
157 | ||
158 | if (!pbool) | |
159 | return -EINVAL; | |
160 | ||
161 | err = strtobool(value, &bool_value); | |
162 | if (err) | |
163 | return err; | |
164 | ||
165 | *pbool = invert ? !bool_value : bool_value; | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static int | |
0bb93490 WN |
170 | prog_config__inlines(const char *value, |
171 | struct perf_probe_event *pev __maybe_unused) | |
03e01f56 | 172 | { |
0bb93490 | 173 | return prog_config__bool(value, &probe_conf.no_inlines, true); |
03e01f56 WN |
174 | } |
175 | ||
176 | static int | |
0bb93490 WN |
177 | prog_config__force(const char *value, |
178 | struct perf_probe_event *pev __maybe_unused) | |
03e01f56 | 179 | { |
0bb93490 | 180 | return prog_config__bool(value, &probe_conf.force_add, false); |
03e01f56 WN |
181 | } |
182 | ||
361f2b1d WN |
183 | static struct { |
184 | const char *key; | |
185 | const char *usage; | |
186 | const char *desc; | |
187 | int (*func)(const char *, struct perf_probe_event *); | |
0bb93490 | 188 | } bpf_prog_config_terms[] = { |
361f2b1d WN |
189 | { |
190 | .key = "exec", | |
191 | .usage = "exec=<full path of file>", | |
192 | .desc = "Set uprobe target", | |
0bb93490 | 193 | .func = prog_config__exec, |
361f2b1d | 194 | }, |
5dbd16c0 WN |
195 | { |
196 | .key = "module", | |
197 | .usage = "module=<module name> ", | |
198 | .desc = "Set kprobe module", | |
0bb93490 | 199 | .func = prog_config__module, |
03e01f56 WN |
200 | }, |
201 | { | |
202 | .key = "inlines", | |
203 | .usage = "inlines=[yes|no] ", | |
204 | .desc = "Probe at inline symbol", | |
0bb93490 | 205 | .func = prog_config__inlines, |
03e01f56 WN |
206 | }, |
207 | { | |
208 | .key = "force", | |
209 | .usage = "force=[yes|no] ", | |
210 | .desc = "Forcibly add events with existing name", | |
0bb93490 | 211 | .func = prog_config__force, |
03e01f56 | 212 | }, |
361f2b1d WN |
213 | }; |
214 | ||
215 | static int | |
0bb93490 WN |
216 | do_prog_config(const char *key, const char *value, |
217 | struct perf_probe_event *pev) | |
361f2b1d WN |
218 | { |
219 | unsigned int i; | |
220 | ||
221 | pr_debug("config bpf program: %s=%s\n", key, value); | |
0bb93490 WN |
222 | for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) |
223 | if (strcmp(key, bpf_prog_config_terms[i].key) == 0) | |
224 | return bpf_prog_config_terms[i].func(value, pev); | |
361f2b1d | 225 | |
0bb93490 | 226 | pr_debug("BPF: ERROR: invalid program config option: %s=%s\n", |
361f2b1d WN |
227 | key, value); |
228 | ||
0bb93490 WN |
229 | pr_debug("\nHint: Valid options are:\n"); |
230 | for (i = 0; i < ARRAY_SIZE(bpf_prog_config_terms); i++) | |
231 | pr_debug("\t%s:\t%s\n", bpf_prog_config_terms[i].usage, | |
232 | bpf_prog_config_terms[i].desc); | |
361f2b1d WN |
233 | pr_debug("\n"); |
234 | ||
0bb93490 | 235 | return -BPF_LOADER_ERRNO__PROGCONF_TERM; |
361f2b1d WN |
236 | } |
237 | ||
238 | static const char * | |
0bb93490 | 239 | parse_prog_config_kvpair(const char *config_str, struct perf_probe_event *pev) |
361f2b1d WN |
240 | { |
241 | char *text = strdup(config_str); | |
242 | char *sep, *line; | |
243 | const char *main_str = NULL; | |
244 | int err = 0; | |
245 | ||
246 | if (!text) { | |
042cfb5f | 247 | pr_debug("Not enough memory: dup config_str failed\n"); |
361f2b1d WN |
248 | return ERR_PTR(-ENOMEM); |
249 | } | |
250 | ||
251 | line = text; | |
252 | while ((sep = strchr(line, ';'))) { | |
253 | char *equ; | |
254 | ||
255 | *sep = '\0'; | |
256 | equ = strchr(line, '='); | |
257 | if (!equ) { | |
258 | pr_warning("WARNING: invalid config in BPF object: %s\n", | |
259 | line); | |
260 | pr_warning("\tShould be 'key=value'.\n"); | |
261 | goto nextline; | |
262 | } | |
263 | *equ = '\0'; | |
264 | ||
0bb93490 | 265 | err = do_prog_config(line, equ + 1, pev); |
361f2b1d WN |
266 | if (err) |
267 | break; | |
268 | nextline: | |
269 | line = sep + 1; | |
270 | } | |
271 | ||
272 | if (!err) | |
273 | main_str = config_str + (line - text); | |
274 | free(text); | |
275 | ||
276 | return err ? ERR_PTR(err) : main_str; | |
277 | } | |
278 | ||
279 | static int | |
b4ee6d41 WN |
280 | parse_prog_config(const char *config_str, const char **p_main_str, |
281 | bool *is_tp, struct perf_probe_event *pev) | |
361f2b1d WN |
282 | { |
283 | int err; | |
0bb93490 | 284 | const char *main_str = parse_prog_config_kvpair(config_str, pev); |
361f2b1d WN |
285 | |
286 | if (IS_ERR(main_str)) | |
287 | return PTR_ERR(main_str); | |
288 | ||
b4ee6d41 WN |
289 | *p_main_str = main_str; |
290 | if (!strchr(main_str, '=')) { | |
291 | /* Is a tracepoint event? */ | |
292 | const char *s = strchr(main_str, ':'); | |
293 | ||
294 | if (!s) { | |
295 | pr_debug("bpf: '%s' is not a valid tracepoint\n", | |
296 | config_str); | |
297 | return -BPF_LOADER_ERRNO__CONFIG; | |
298 | } | |
299 | ||
300 | *is_tp = true; | |
301 | return 0; | |
302 | } | |
303 | ||
304 | *is_tp = false; | |
361f2b1d WN |
305 | err = parse_perf_probe_command(main_str, pev); |
306 | if (err < 0) { | |
307 | pr_debug("bpf: '%s' is not a valid config string\n", | |
308 | config_str); | |
309 | /* parse failed, don't need clear pev. */ | |
310 | return -BPF_LOADER_ERRNO__CONFIG; | |
311 | } | |
312 | return 0; | |
313 | } | |
314 | ||
aa3abf30 WN |
315 | static int |
316 | config_bpf_program(struct bpf_program *prog) | |
317 | { | |
318 | struct perf_probe_event *pev = NULL; | |
319 | struct bpf_prog_priv *priv = NULL; | |
b4ee6d41 WN |
320 | const char *config_str, *main_str; |
321 | bool is_tp = false; | |
aa3abf30 WN |
322 | int err; |
323 | ||
03e01f56 WN |
324 | /* Initialize per-program probing setting */ |
325 | probe_conf.no_inlines = false; | |
326 | probe_conf.force_add = false; | |
327 | ||
aa3abf30 | 328 | config_str = bpf_program__title(prog, false); |
6371ca3b | 329 | if (IS_ERR(config_str)) { |
aa3abf30 | 330 | pr_debug("bpf: unable to get title for program\n"); |
6371ca3b | 331 | return PTR_ERR(config_str); |
aa3abf30 WN |
332 | } |
333 | ||
334 | priv = calloc(sizeof(*priv), 1); | |
335 | if (!priv) { | |
336 | pr_debug("bpf: failed to alloc priv\n"); | |
337 | return -ENOMEM; | |
338 | } | |
339 | pev = &priv->pev; | |
340 | ||
341 | pr_debug("bpf: config program '%s'\n", config_str); | |
b4ee6d41 | 342 | err = parse_prog_config(config_str, &main_str, &is_tp, pev); |
361f2b1d | 343 | if (err) |
aa3abf30 | 344 | goto errout; |
aa3abf30 | 345 | |
b4ee6d41 WN |
346 | if (is_tp) { |
347 | char *s = strchr(main_str, ':'); | |
348 | ||
349 | priv->is_tp = true; | |
350 | priv->sys_name = strndup(main_str, s - main_str); | |
351 | priv->evt_name = strdup(s + 1); | |
352 | goto set_priv; | |
353 | } | |
354 | ||
aa3abf30 WN |
355 | if (pev->group && strcmp(pev->group, PERF_BPF_PROBE_GROUP)) { |
356 | pr_debug("bpf: '%s': group for event is set and not '%s'.\n", | |
357 | config_str, PERF_BPF_PROBE_GROUP); | |
d3e0ce39 | 358 | err = -BPF_LOADER_ERRNO__GROUP; |
aa3abf30 WN |
359 | goto errout; |
360 | } else if (!pev->group) | |
361 | pev->group = strdup(PERF_BPF_PROBE_GROUP); | |
362 | ||
363 | if (!pev->group) { | |
364 | pr_debug("bpf: strdup failed\n"); | |
365 | err = -ENOMEM; | |
366 | goto errout; | |
367 | } | |
368 | ||
369 | if (!pev->event) { | |
d3e0ce39 | 370 | pr_debug("bpf: '%s': event name is missing. Section name should be 'key=value'\n", |
aa3abf30 | 371 | config_str); |
d3e0ce39 | 372 | err = -BPF_LOADER_ERRNO__EVENTNAME; |
aa3abf30 WN |
373 | goto errout; |
374 | } | |
375 | pr_debug("bpf: config '%s' is ok\n", config_str); | |
376 | ||
b4ee6d41 | 377 | set_priv: |
edb13ed4 | 378 | err = bpf_program__set_priv(prog, priv, clear_prog_priv); |
aa3abf30 WN |
379 | if (err) { |
380 | pr_debug("Failed to set priv for program '%s'\n", config_str); | |
381 | goto errout; | |
382 | } | |
383 | ||
384 | return 0; | |
385 | ||
386 | errout: | |
387 | if (pev) | |
388 | clear_perf_probe_event(pev); | |
389 | free(priv); | |
390 | return err; | |
391 | } | |
392 | ||
393 | static int bpf__prepare_probe(void) | |
394 | { | |
395 | static int err = 0; | |
396 | static bool initialized = false; | |
397 | ||
398 | /* | |
399 | * Make err static, so if init failed the first, bpf__prepare_probe() | |
400 | * fails each time without calling init_probe_symbol_maps multiple | |
401 | * times. | |
402 | */ | |
403 | if (initialized) | |
404 | return err; | |
405 | ||
406 | initialized = true; | |
407 | err = init_probe_symbol_maps(false); | |
408 | if (err < 0) | |
409 | pr_debug("Failed to init_probe_symbol_maps\n"); | |
410 | probe_conf.max_probes = MAX_PROBES; | |
411 | return err; | |
412 | } | |
413 | ||
a08357d8 WN |
414 | static int |
415 | preproc_gen_prologue(struct bpf_program *prog, int n, | |
416 | struct bpf_insn *orig_insns, int orig_insns_cnt, | |
417 | struct bpf_prog_prep_result *res) | |
418 | { | |
be834ffb | 419 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
a08357d8 WN |
420 | struct probe_trace_event *tev; |
421 | struct perf_probe_event *pev; | |
a08357d8 WN |
422 | struct bpf_insn *buf; |
423 | size_t prologue_cnt = 0; | |
d35b3289 | 424 | int i, err; |
a08357d8 | 425 | |
b4ee6d41 | 426 | if (IS_ERR(priv) || !priv || priv->is_tp) |
a08357d8 WN |
427 | goto errout; |
428 | ||
429 | pev = &priv->pev; | |
430 | ||
d35b3289 | 431 | if (n < 0 || n >= priv->nr_types) |
a08357d8 WN |
432 | goto errout; |
433 | ||
d35b3289 WN |
434 | /* Find a tev belongs to that type */ |
435 | for (i = 0; i < pev->ntevs; i++) { | |
436 | if (priv->type_mapping[i] == n) | |
437 | break; | |
438 | } | |
439 | ||
440 | if (i >= pev->ntevs) { | |
441 | pr_debug("Internal error: prologue type %d not found\n", n); | |
442 | return -BPF_LOADER_ERRNO__PROLOGUE; | |
443 | } | |
444 | ||
445 | tev = &pev->tevs[i]; | |
a08357d8 WN |
446 | |
447 | buf = priv->insns_buf; | |
448 | err = bpf__gen_prologue(tev->args, tev->nargs, | |
449 | buf, &prologue_cnt, | |
450 | BPF_MAXINSNS - orig_insns_cnt); | |
451 | if (err) { | |
452 | const char *title; | |
453 | ||
454 | title = bpf_program__title(prog, false); | |
455 | if (!title) | |
456 | title = "[unknown]"; | |
457 | ||
458 | pr_debug("Failed to generate prologue for program %s\n", | |
459 | title); | |
460 | return err; | |
461 | } | |
462 | ||
463 | memcpy(&buf[prologue_cnt], orig_insns, | |
464 | sizeof(struct bpf_insn) * orig_insns_cnt); | |
465 | ||
466 | res->new_insn_ptr = buf; | |
467 | res->new_insn_cnt = prologue_cnt + orig_insns_cnt; | |
468 | res->pfd = NULL; | |
469 | return 0; | |
470 | ||
471 | errout: | |
472 | pr_debug("Internal error in preproc_gen_prologue\n"); | |
473 | return -BPF_LOADER_ERRNO__PROLOGUE; | |
474 | } | |
475 | ||
d35b3289 WN |
476 | /* |
477 | * compare_tev_args is reflexive, transitive and antisymmetric. | |
478 | * I can proof it but this margin is too narrow to contain. | |
479 | */ | |
480 | static int compare_tev_args(const void *ptev1, const void *ptev2) | |
481 | { | |
482 | int i, ret; | |
483 | const struct probe_trace_event *tev1 = | |
484 | *(const struct probe_trace_event **)ptev1; | |
485 | const struct probe_trace_event *tev2 = | |
486 | *(const struct probe_trace_event **)ptev2; | |
487 | ||
488 | ret = tev2->nargs - tev1->nargs; | |
489 | if (ret) | |
490 | return ret; | |
491 | ||
492 | for (i = 0; i < tev1->nargs; i++) { | |
493 | struct probe_trace_arg *arg1, *arg2; | |
494 | struct probe_trace_arg_ref *ref1, *ref2; | |
495 | ||
496 | arg1 = &tev1->args[i]; | |
497 | arg2 = &tev2->args[i]; | |
498 | ||
499 | ret = strcmp(arg1->value, arg2->value); | |
500 | if (ret) | |
501 | return ret; | |
502 | ||
503 | ref1 = arg1->ref; | |
504 | ref2 = arg2->ref; | |
505 | ||
506 | while (ref1 && ref2) { | |
507 | ret = ref2->offset - ref1->offset; | |
508 | if (ret) | |
509 | return ret; | |
510 | ||
511 | ref1 = ref1->next; | |
512 | ref2 = ref2->next; | |
513 | } | |
514 | ||
515 | if (ref1 || ref2) | |
516 | return ref2 ? 1 : -1; | |
517 | } | |
518 | ||
519 | return 0; | |
520 | } | |
521 | ||
522 | /* | |
523 | * Assign a type number to each tevs in a pev. | |
524 | * mapping is an array with same slots as tevs in that pev. | |
525 | * nr_types will be set to number of types. | |
526 | */ | |
527 | static int map_prologue(struct perf_probe_event *pev, int *mapping, | |
528 | int *nr_types) | |
529 | { | |
530 | int i, type = 0; | |
531 | struct probe_trace_event **ptevs; | |
532 | ||
533 | size_t array_sz = sizeof(*ptevs) * pev->ntevs; | |
534 | ||
535 | ptevs = malloc(array_sz); | |
536 | if (!ptevs) { | |
042cfb5f | 537 | pr_debug("Not enough memory: alloc ptevs failed\n"); |
d35b3289 WN |
538 | return -ENOMEM; |
539 | } | |
540 | ||
541 | pr_debug("In map_prologue, ntevs=%d\n", pev->ntevs); | |
542 | for (i = 0; i < pev->ntevs; i++) | |
543 | ptevs[i] = &pev->tevs[i]; | |
544 | ||
545 | qsort(ptevs, pev->ntevs, sizeof(*ptevs), | |
546 | compare_tev_args); | |
547 | ||
548 | for (i = 0; i < pev->ntevs; i++) { | |
549 | int n; | |
550 | ||
551 | n = ptevs[i] - pev->tevs; | |
552 | if (i == 0) { | |
553 | mapping[n] = type; | |
554 | pr_debug("mapping[%d]=%d\n", n, type); | |
555 | continue; | |
556 | } | |
557 | ||
558 | if (compare_tev_args(ptevs + i, ptevs + i - 1) == 0) | |
559 | mapping[n] = type; | |
560 | else | |
561 | mapping[n] = ++type; | |
562 | ||
563 | pr_debug("mapping[%d]=%d\n", n, mapping[n]); | |
564 | } | |
565 | free(ptevs); | |
566 | *nr_types = type + 1; | |
567 | ||
568 | return 0; | |
569 | } | |
570 | ||
a08357d8 WN |
571 | static int hook_load_preprocessor(struct bpf_program *prog) |
572 | { | |
be834ffb | 573 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
a08357d8 | 574 | struct perf_probe_event *pev; |
a08357d8 WN |
575 | bool need_prologue = false; |
576 | int err, i; | |
577 | ||
be834ffb | 578 | if (IS_ERR(priv) || !priv) { |
a08357d8 WN |
579 | pr_debug("Internal error when hook preprocessor\n"); |
580 | return -BPF_LOADER_ERRNO__INTERNAL; | |
581 | } | |
582 | ||
b4ee6d41 WN |
583 | if (priv->is_tp) { |
584 | priv->need_prologue = false; | |
585 | return 0; | |
586 | } | |
587 | ||
a08357d8 WN |
588 | pev = &priv->pev; |
589 | for (i = 0; i < pev->ntevs; i++) { | |
590 | struct probe_trace_event *tev = &pev->tevs[i]; | |
591 | ||
592 | if (tev->nargs > 0) { | |
593 | need_prologue = true; | |
594 | break; | |
595 | } | |
596 | } | |
597 | ||
598 | /* | |
599 | * Since all tevs don't have argument, we don't need generate | |
600 | * prologue. | |
601 | */ | |
602 | if (!need_prologue) { | |
603 | priv->need_prologue = false; | |
604 | return 0; | |
605 | } | |
606 | ||
607 | priv->need_prologue = true; | |
608 | priv->insns_buf = malloc(sizeof(struct bpf_insn) * BPF_MAXINSNS); | |
609 | if (!priv->insns_buf) { | |
042cfb5f | 610 | pr_debug("Not enough memory: alloc insns_buf failed\n"); |
a08357d8 WN |
611 | return -ENOMEM; |
612 | } | |
613 | ||
d35b3289 WN |
614 | priv->type_mapping = malloc(sizeof(int) * pev->ntevs); |
615 | if (!priv->type_mapping) { | |
042cfb5f | 616 | pr_debug("Not enough memory: alloc type_mapping failed\n"); |
d35b3289 WN |
617 | return -ENOMEM; |
618 | } | |
619 | memset(priv->type_mapping, -1, | |
620 | sizeof(int) * pev->ntevs); | |
621 | ||
622 | err = map_prologue(pev, priv->type_mapping, &priv->nr_types); | |
623 | if (err) | |
624 | return err; | |
625 | ||
626 | err = bpf_program__set_prep(prog, priv->nr_types, | |
a08357d8 WN |
627 | preproc_gen_prologue); |
628 | return err; | |
629 | } | |
630 | ||
aa3abf30 WN |
631 | int bpf__probe(struct bpf_object *obj) |
632 | { | |
633 | int err = 0; | |
634 | struct bpf_program *prog; | |
635 | struct bpf_prog_priv *priv; | |
636 | struct perf_probe_event *pev; | |
637 | ||
638 | err = bpf__prepare_probe(); | |
639 | if (err) { | |
640 | pr_debug("bpf__prepare_probe failed\n"); | |
641 | return err; | |
642 | } | |
643 | ||
644 | bpf_object__for_each_program(prog, obj) { | |
645 | err = config_bpf_program(prog); | |
646 | if (err) | |
647 | goto out; | |
648 | ||
be834ffb ACM |
649 | priv = bpf_program__priv(prog); |
650 | if (IS_ERR(priv) || !priv) { | |
651 | err = PTR_ERR(priv); | |
aa3abf30 | 652 | goto out; |
be834ffb | 653 | } |
b4ee6d41 WN |
654 | |
655 | if (priv->is_tp) { | |
656 | bpf_program__set_tracepoint(prog); | |
657 | continue; | |
658 | } | |
659 | ||
660 | bpf_program__set_kprobe(prog); | |
aa3abf30 WN |
661 | pev = &priv->pev; |
662 | ||
663 | err = convert_perf_probe_events(pev, 1); | |
664 | if (err < 0) { | |
4d416436 | 665 | pr_debug("bpf_probe: failed to convert perf probe events\n"); |
aa3abf30 WN |
666 | goto out; |
667 | } | |
668 | ||
669 | err = apply_perf_probe_events(pev, 1); | |
670 | if (err < 0) { | |
4d416436 | 671 | pr_debug("bpf_probe: failed to apply perf probe events\n"); |
aa3abf30 WN |
672 | goto out; |
673 | } | |
a08357d8 WN |
674 | |
675 | /* | |
676 | * After probing, let's consider prologue, which | |
677 | * adds program fetcher to BPF programs. | |
678 | * | |
679 | * hook_load_preprocessorr() hooks pre-processor | |
680 | * to bpf_program, let it generate prologue | |
681 | * dynamically during loading. | |
682 | */ | |
683 | err = hook_load_preprocessor(prog); | |
684 | if (err) | |
685 | goto out; | |
aa3abf30 WN |
686 | } |
687 | out: | |
688 | return err < 0 ? err : 0; | |
689 | } | |
690 | ||
691 | #define EVENTS_WRITE_BUFSIZE 4096 | |
692 | int bpf__unprobe(struct bpf_object *obj) | |
693 | { | |
694 | int err, ret = 0; | |
695 | struct bpf_program *prog; | |
aa3abf30 WN |
696 | |
697 | bpf_object__for_each_program(prog, obj) { | |
be834ffb | 698 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
aa3abf30 WN |
699 | int i; |
700 | ||
b4ee6d41 | 701 | if (IS_ERR(priv) || !priv || priv->is_tp) |
aa3abf30 WN |
702 | continue; |
703 | ||
704 | for (i = 0; i < priv->pev.ntevs; i++) { | |
705 | struct probe_trace_event *tev = &priv->pev.tevs[i]; | |
706 | char name_buf[EVENTS_WRITE_BUFSIZE]; | |
707 | struct strfilter *delfilter; | |
708 | ||
709 | snprintf(name_buf, EVENTS_WRITE_BUFSIZE, | |
710 | "%s:%s", tev->group, tev->event); | |
711 | name_buf[EVENTS_WRITE_BUFSIZE - 1] = '\0'; | |
712 | ||
713 | delfilter = strfilter__new(name_buf, NULL); | |
714 | if (!delfilter) { | |
715 | pr_debug("Failed to create filter for unprobing\n"); | |
716 | ret = -ENOMEM; | |
717 | continue; | |
718 | } | |
719 | ||
720 | err = del_perf_probe_events(delfilter); | |
721 | strfilter__delete(delfilter); | |
722 | if (err) { | |
723 | pr_debug("Failed to delete %s\n", name_buf); | |
724 | ret = err; | |
725 | continue; | |
726 | } | |
727 | } | |
728 | } | |
729 | return ret; | |
730 | } | |
731 | ||
1e5e3ee8 WN |
732 | int bpf__load(struct bpf_object *obj) |
733 | { | |
734 | int err; | |
735 | ||
736 | err = bpf_object__load(obj); | |
737 | if (err) { | |
739e2edc ACM |
738 | char bf[128]; |
739 | libbpf_strerror(err, bf, sizeof(bf)); | |
740 | pr_debug("bpf: load objects failed: err=%d: (%s)\n", err, bf); | |
1e5e3ee8 WN |
741 | return err; |
742 | } | |
743 | return 0; | |
744 | } | |
745 | ||
cd102d70 WN |
746 | int bpf__foreach_event(struct bpf_object *obj, |
747 | bpf_prog_iter_callback_t func, | |
748 | void *arg) | |
4edf30e3 WN |
749 | { |
750 | struct bpf_program *prog; | |
751 | int err; | |
752 | ||
753 | bpf_object__for_each_program(prog, obj) { | |
be834ffb | 754 | struct bpf_prog_priv *priv = bpf_program__priv(prog); |
4edf30e3 WN |
755 | struct probe_trace_event *tev; |
756 | struct perf_probe_event *pev; | |
4edf30e3 WN |
757 | int i, fd; |
758 | ||
be834ffb | 759 | if (IS_ERR(priv) || !priv) { |
4edf30e3 | 760 | pr_debug("bpf: failed to get private field\n"); |
d3e0ce39 | 761 | return -BPF_LOADER_ERRNO__INTERNAL; |
4edf30e3 WN |
762 | } |
763 | ||
b4ee6d41 WN |
764 | if (priv->is_tp) { |
765 | fd = bpf_program__fd(prog); | |
af4a0991 | 766 | err = (*func)(priv->sys_name, priv->evt_name, fd, obj, arg); |
b4ee6d41 WN |
767 | if (err) { |
768 | pr_debug("bpf: tracepoint call back failed, stop iterate\n"); | |
769 | return err; | |
770 | } | |
771 | continue; | |
772 | } | |
773 | ||
4edf30e3 WN |
774 | pev = &priv->pev; |
775 | for (i = 0; i < pev->ntevs; i++) { | |
776 | tev = &pev->tevs[i]; | |
777 | ||
d35b3289 WN |
778 | if (priv->need_prologue) { |
779 | int type = priv->type_mapping[i]; | |
780 | ||
781 | fd = bpf_program__nth_fd(prog, type); | |
782 | } else { | |
a08357d8 | 783 | fd = bpf_program__fd(prog); |
d35b3289 | 784 | } |
a08357d8 | 785 | |
4edf30e3 WN |
786 | if (fd < 0) { |
787 | pr_debug("bpf: failed to get file descriptor\n"); | |
788 | return fd; | |
789 | } | |
790 | ||
af4a0991 | 791 | err = (*func)(tev->group, tev->event, fd, obj, arg); |
4edf30e3 WN |
792 | if (err) { |
793 | pr_debug("bpf: call back failed, stop iterate\n"); | |
794 | return err; | |
795 | } | |
796 | } | |
797 | } | |
798 | return 0; | |
799 | } | |
800 | ||
066dacbf WN |
801 | enum bpf_map_op_type { |
802 | BPF_MAP_OP_SET_VALUE, | |
7630b3e2 | 803 | BPF_MAP_OP_SET_EVSEL, |
066dacbf WN |
804 | }; |
805 | ||
806 | enum bpf_map_key_type { | |
807 | BPF_MAP_KEY_ALL, | |
2d055bf2 | 808 | BPF_MAP_KEY_RANGES, |
066dacbf WN |
809 | }; |
810 | ||
811 | struct bpf_map_op { | |
812 | struct list_head list; | |
813 | enum bpf_map_op_type op_type; | |
814 | enum bpf_map_key_type key_type; | |
2d055bf2 WN |
815 | union { |
816 | struct parse_events_array array; | |
817 | } k; | |
066dacbf WN |
818 | union { |
819 | u64 value; | |
32dcd021 | 820 | struct evsel *evsel; |
066dacbf WN |
821 | } v; |
822 | }; | |
823 | ||
824 | struct bpf_map_priv { | |
825 | struct list_head ops_list; | |
826 | }; | |
827 | ||
828 | static void | |
829 | bpf_map_op__delete(struct bpf_map_op *op) | |
830 | { | |
831 | if (!list_empty(&op->list)) | |
e56fbc9d | 832 | list_del_init(&op->list); |
2d055bf2 WN |
833 | if (op->key_type == BPF_MAP_KEY_RANGES) |
834 | parse_events__clear_array(&op->k.array); | |
066dacbf WN |
835 | free(op); |
836 | } | |
837 | ||
838 | static void | |
839 | bpf_map_priv__purge(struct bpf_map_priv *priv) | |
840 | { | |
841 | struct bpf_map_op *pos, *n; | |
842 | ||
843 | list_for_each_entry_safe(pos, n, &priv->ops_list, list) { | |
844 | list_del_init(&pos->list); | |
845 | bpf_map_op__delete(pos); | |
846 | } | |
847 | } | |
848 | ||
849 | static void | |
850 | bpf_map_priv__clear(struct bpf_map *map __maybe_unused, | |
851 | void *_priv) | |
852 | { | |
853 | struct bpf_map_priv *priv = _priv; | |
854 | ||
855 | bpf_map_priv__purge(priv); | |
856 | free(priv); | |
857 | } | |
858 | ||
2d055bf2 WN |
859 | static int |
860 | bpf_map_op_setkey(struct bpf_map_op *op, struct parse_events_term *term) | |
861 | { | |
862 | op->key_type = BPF_MAP_KEY_ALL; | |
863 | if (!term) | |
864 | return 0; | |
865 | ||
866 | if (term->array.nr_ranges) { | |
867 | size_t memsz = term->array.nr_ranges * | |
868 | sizeof(op->k.array.ranges[0]); | |
869 | ||
870 | op->k.array.ranges = memdup(term->array.ranges, memsz); | |
871 | if (!op->k.array.ranges) { | |
042cfb5f | 872 | pr_debug("Not enough memory to alloc indices for map\n"); |
2d055bf2 WN |
873 | return -ENOMEM; |
874 | } | |
875 | op->key_type = BPF_MAP_KEY_RANGES; | |
876 | op->k.array.nr_ranges = term->array.nr_ranges; | |
877 | } | |
878 | return 0; | |
879 | } | |
880 | ||
066dacbf | 881 | static struct bpf_map_op * |
2d055bf2 | 882 | bpf_map_op__new(struct parse_events_term *term) |
066dacbf WN |
883 | { |
884 | struct bpf_map_op *op; | |
2d055bf2 | 885 | int err; |
066dacbf WN |
886 | |
887 | op = zalloc(sizeof(*op)); | |
888 | if (!op) { | |
889 | pr_debug("Failed to alloc bpf_map_op\n"); | |
890 | return ERR_PTR(-ENOMEM); | |
891 | } | |
892 | INIT_LIST_HEAD(&op->list); | |
893 | ||
2d055bf2 WN |
894 | err = bpf_map_op_setkey(op, term); |
895 | if (err) { | |
896 | free(op); | |
897 | return ERR_PTR(err); | |
898 | } | |
066dacbf WN |
899 | return op; |
900 | } | |
901 | ||
d7888573 WN |
902 | static struct bpf_map_op * |
903 | bpf_map_op__clone(struct bpf_map_op *op) | |
904 | { | |
905 | struct bpf_map_op *newop; | |
906 | ||
907 | newop = memdup(op, sizeof(*op)); | |
908 | if (!newop) { | |
909 | pr_debug("Failed to alloc bpf_map_op\n"); | |
910 | return NULL; | |
911 | } | |
912 | ||
913 | INIT_LIST_HEAD(&newop->list); | |
914 | if (op->key_type == BPF_MAP_KEY_RANGES) { | |
915 | size_t memsz = op->k.array.nr_ranges * | |
916 | sizeof(op->k.array.ranges[0]); | |
917 | ||
918 | newop->k.array.ranges = memdup(op->k.array.ranges, memsz); | |
919 | if (!newop->k.array.ranges) { | |
920 | pr_debug("Failed to alloc indices for map\n"); | |
921 | free(newop); | |
922 | return NULL; | |
923 | } | |
924 | } | |
925 | ||
926 | return newop; | |
927 | } | |
928 | ||
929 | static struct bpf_map_priv * | |
930 | bpf_map_priv__clone(struct bpf_map_priv *priv) | |
931 | { | |
932 | struct bpf_map_priv *newpriv; | |
933 | struct bpf_map_op *pos, *newop; | |
934 | ||
935 | newpriv = zalloc(sizeof(*newpriv)); | |
936 | if (!newpriv) { | |
042cfb5f | 937 | pr_debug("Not enough memory to alloc map private\n"); |
d7888573 WN |
938 | return NULL; |
939 | } | |
940 | INIT_LIST_HEAD(&newpriv->ops_list); | |
941 | ||
942 | list_for_each_entry(pos, &priv->ops_list, list) { | |
943 | newop = bpf_map_op__clone(pos); | |
944 | if (!newop) { | |
945 | bpf_map_priv__purge(newpriv); | |
946 | return NULL; | |
947 | } | |
948 | list_add_tail(&newop->list, &newpriv->ops_list); | |
949 | } | |
950 | ||
951 | return newpriv; | |
952 | } | |
953 | ||
066dacbf WN |
954 | static int |
955 | bpf_map__add_op(struct bpf_map *map, struct bpf_map_op *op) | |
956 | { | |
009ad5d5 | 957 | const char *map_name = bpf_map__name(map); |
b4cbfa56 | 958 | struct bpf_map_priv *priv = bpf_map__priv(map); |
066dacbf | 959 | |
b4cbfa56 | 960 | if (IS_ERR(priv)) { |
066dacbf | 961 | pr_debug("Failed to get private from map %s\n", map_name); |
b4cbfa56 | 962 | return PTR_ERR(priv); |
066dacbf WN |
963 | } |
964 | ||
965 | if (!priv) { | |
966 | priv = zalloc(sizeof(*priv)); | |
967 | if (!priv) { | |
042cfb5f | 968 | pr_debug("Not enough memory to alloc map private\n"); |
066dacbf WN |
969 | return -ENOMEM; |
970 | } | |
971 | INIT_LIST_HEAD(&priv->ops_list); | |
972 | ||
edb13ed4 | 973 | if (bpf_map__set_priv(map, priv, bpf_map_priv__clear)) { |
066dacbf WN |
974 | free(priv); |
975 | return -BPF_LOADER_ERRNO__INTERNAL; | |
976 | } | |
977 | } | |
978 | ||
979 | list_add_tail(&op->list, &priv->ops_list); | |
980 | return 0; | |
981 | } | |
982 | ||
7630b3e2 | 983 | static struct bpf_map_op * |
2d055bf2 | 984 | bpf_map__add_newop(struct bpf_map *map, struct parse_events_term *term) |
7630b3e2 WN |
985 | { |
986 | struct bpf_map_op *op; | |
987 | int err; | |
988 | ||
2d055bf2 | 989 | op = bpf_map_op__new(term); |
7630b3e2 WN |
990 | if (IS_ERR(op)) |
991 | return op; | |
992 | ||
993 | err = bpf_map__add_op(map, op); | |
994 | if (err) { | |
995 | bpf_map_op__delete(op); | |
996 | return ERR_PTR(err); | |
997 | } | |
998 | return op; | |
999 | } | |
1000 | ||
066dacbf WN |
1001 | static int |
1002 | __bpf_map__config_value(struct bpf_map *map, | |
1003 | struct parse_events_term *term) | |
1004 | { | |
066dacbf | 1005 | struct bpf_map_op *op; |
009ad5d5 | 1006 | const char *map_name = bpf_map__name(map); |
53897a78 | 1007 | const struct bpf_map_def *def = bpf_map__def(map); |
066dacbf | 1008 | |
53897a78 | 1009 | if (IS_ERR(def)) { |
066dacbf WN |
1010 | pr_debug("Unable to get map definition from '%s'\n", |
1011 | map_name); | |
1012 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1013 | } | |
1014 | ||
53897a78 | 1015 | if (def->type != BPF_MAP_TYPE_ARRAY) { |
066dacbf WN |
1016 | pr_debug("Map %s type is not BPF_MAP_TYPE_ARRAY\n", |
1017 | map_name); | |
1018 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; | |
1019 | } | |
53897a78 | 1020 | if (def->key_size < sizeof(unsigned int)) { |
066dacbf WN |
1021 | pr_debug("Map %s has incorrect key size\n", map_name); |
1022 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_KEYSIZE; | |
1023 | } | |
53897a78 | 1024 | switch (def->value_size) { |
066dacbf WN |
1025 | case 1: |
1026 | case 2: | |
1027 | case 4: | |
1028 | case 8: | |
1029 | break; | |
1030 | default: | |
1031 | pr_debug("Map %s has incorrect value size\n", map_name); | |
1032 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; | |
1033 | } | |
1034 | ||
2d055bf2 | 1035 | op = bpf_map__add_newop(map, term); |
066dacbf WN |
1036 | if (IS_ERR(op)) |
1037 | return PTR_ERR(op); | |
1038 | op->op_type = BPF_MAP_OP_SET_VALUE; | |
1039 | op->v.value = term->val.num; | |
7630b3e2 | 1040 | return 0; |
066dacbf WN |
1041 | } |
1042 | ||
1043 | static int | |
1044 | bpf_map__config_value(struct bpf_map *map, | |
1045 | struct parse_events_term *term, | |
1046 | struct perf_evlist *evlist __maybe_unused) | |
1047 | { | |
1048 | if (!term->err_val) { | |
1049 | pr_debug("Config value not set\n"); | |
1050 | return -BPF_LOADER_ERRNO__OBJCONF_CONF; | |
1051 | } | |
1052 | ||
1053 | if (term->type_val != PARSE_EVENTS__TERM_TYPE_NUM) { | |
7630b3e2 | 1054 | pr_debug("ERROR: wrong value type for 'value'\n"); |
066dacbf WN |
1055 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; |
1056 | } | |
1057 | ||
1058 | return __bpf_map__config_value(map, term); | |
1059 | } | |
1060 | ||
7630b3e2 WN |
1061 | static int |
1062 | __bpf_map__config_event(struct bpf_map *map, | |
1063 | struct parse_events_term *term, | |
1064 | struct perf_evlist *evlist) | |
1065 | { | |
32dcd021 | 1066 | struct evsel *evsel; |
53897a78 | 1067 | const struct bpf_map_def *def; |
7630b3e2 | 1068 | struct bpf_map_op *op; |
009ad5d5 | 1069 | const char *map_name = bpf_map__name(map); |
7630b3e2 | 1070 | |
7630b3e2 WN |
1071 | evsel = perf_evlist__find_evsel_by_str(evlist, term->val.str); |
1072 | if (!evsel) { | |
1073 | pr_debug("Event (for '%s') '%s' doesn't exist\n", | |
1074 | map_name, term->val.str); | |
1075 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_NOEVT; | |
1076 | } | |
1077 | ||
53897a78 ACM |
1078 | def = bpf_map__def(map); |
1079 | if (IS_ERR(def)) { | |
7630b3e2 WN |
1080 | pr_debug("Unable to get map definition from '%s'\n", |
1081 | map_name); | |
53897a78 | 1082 | return PTR_ERR(def); |
7630b3e2 WN |
1083 | } |
1084 | ||
1085 | /* | |
1086 | * No need to check key_size and value_size: | |
1087 | * kernel has already checked them. | |
1088 | */ | |
53897a78 | 1089 | if (def->type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { |
7630b3e2 WN |
1090 | pr_debug("Map %s type is not BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", |
1091 | map_name); | |
1092 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; | |
1093 | } | |
1094 | ||
2d055bf2 | 1095 | op = bpf_map__add_newop(map, term); |
7630b3e2 WN |
1096 | if (IS_ERR(op)) |
1097 | return PTR_ERR(op); | |
1098 | op->op_type = BPF_MAP_OP_SET_EVSEL; | |
1099 | op->v.evsel = evsel; | |
1100 | return 0; | |
1101 | } | |
1102 | ||
1103 | static int | |
1104 | bpf_map__config_event(struct bpf_map *map, | |
1105 | struct parse_events_term *term, | |
1106 | struct perf_evlist *evlist) | |
1107 | { | |
1108 | if (!term->err_val) { | |
1109 | pr_debug("Config value not set\n"); | |
1110 | return -BPF_LOADER_ERRNO__OBJCONF_CONF; | |
1111 | } | |
1112 | ||
1113 | if (term->type_val != PARSE_EVENTS__TERM_TYPE_STR) { | |
1114 | pr_debug("ERROR: wrong value type for 'event'\n"); | |
1115 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUE; | |
1116 | } | |
1117 | ||
1118 | return __bpf_map__config_event(map, term, evlist); | |
1119 | } | |
1120 | ||
066dacbf WN |
1121 | struct bpf_obj_config__map_func { |
1122 | const char *config_opt; | |
1123 | int (*config_func)(struct bpf_map *, struct parse_events_term *, | |
1124 | struct perf_evlist *); | |
1125 | }; | |
1126 | ||
1127 | struct bpf_obj_config__map_func bpf_obj_config__map_funcs[] = { | |
1128 | {"value", bpf_map__config_value}, | |
7630b3e2 | 1129 | {"event", bpf_map__config_event}, |
066dacbf WN |
1130 | }; |
1131 | ||
2d055bf2 WN |
1132 | static int |
1133 | config_map_indices_range_check(struct parse_events_term *term, | |
1134 | struct bpf_map *map, | |
1135 | const char *map_name) | |
1136 | { | |
1137 | struct parse_events_array *array = &term->array; | |
53897a78 | 1138 | const struct bpf_map_def *def; |
2d055bf2 | 1139 | unsigned int i; |
2d055bf2 WN |
1140 | |
1141 | if (!array->nr_ranges) | |
1142 | return 0; | |
1143 | if (!array->ranges) { | |
1144 | pr_debug("ERROR: map %s: array->nr_ranges is %d but range array is NULL\n", | |
1145 | map_name, (int)array->nr_ranges); | |
1146 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1147 | } | |
1148 | ||
53897a78 ACM |
1149 | def = bpf_map__def(map); |
1150 | if (IS_ERR(def)) { | |
2d055bf2 WN |
1151 | pr_debug("ERROR: Unable to get map definition from '%s'\n", |
1152 | map_name); | |
1153 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1154 | } | |
1155 | ||
1156 | for (i = 0; i < array->nr_ranges; i++) { | |
1157 | unsigned int start = array->ranges[i].start; | |
1158 | size_t length = array->ranges[i].length; | |
1159 | unsigned int idx = start + length - 1; | |
1160 | ||
53897a78 | 1161 | if (idx >= def->max_entries) { |
2d055bf2 WN |
1162 | pr_debug("ERROR: index %d too large\n", idx); |
1163 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_IDX2BIG; | |
1164 | } | |
1165 | } | |
1166 | return 0; | |
1167 | } | |
1168 | ||
066dacbf WN |
1169 | static int |
1170 | bpf__obj_config_map(struct bpf_object *obj, | |
1171 | struct parse_events_term *term, | |
1172 | struct perf_evlist *evlist, | |
1173 | int *key_scan_pos) | |
1174 | { | |
1175 | /* key is "map:<mapname>.<config opt>" */ | |
1176 | char *map_name = strdup(term->config + sizeof("map:") - 1); | |
1177 | struct bpf_map *map; | |
1178 | int err = -BPF_LOADER_ERRNO__OBJCONF_OPT; | |
1179 | char *map_opt; | |
1180 | size_t i; | |
1181 | ||
1182 | if (!map_name) | |
1183 | return -ENOMEM; | |
1184 | ||
1185 | map_opt = strchr(map_name, '.'); | |
1186 | if (!map_opt) { | |
1187 | pr_debug("ERROR: Invalid map config: %s\n", map_name); | |
1188 | goto out; | |
1189 | } | |
1190 | ||
1191 | *map_opt++ = '\0'; | |
1192 | if (*map_opt == '\0') { | |
1193 | pr_debug("ERROR: Invalid map option: %s\n", term->config); | |
1194 | goto out; | |
1195 | } | |
1196 | ||
a7fe0450 | 1197 | map = bpf_object__find_map_by_name(obj, map_name); |
066dacbf WN |
1198 | if (!map) { |
1199 | pr_debug("ERROR: Map %s doesn't exist\n", map_name); | |
1200 | err = -BPF_LOADER_ERRNO__OBJCONF_MAP_NOTEXIST; | |
1201 | goto out; | |
1202 | } | |
1203 | ||
2d055bf2 WN |
1204 | *key_scan_pos += strlen(map_opt); |
1205 | err = config_map_indices_range_check(term, map, map_name); | |
1206 | if (err) | |
1207 | goto out; | |
1208 | *key_scan_pos -= strlen(map_opt); | |
1209 | ||
066dacbf WN |
1210 | for (i = 0; i < ARRAY_SIZE(bpf_obj_config__map_funcs); i++) { |
1211 | struct bpf_obj_config__map_func *func = | |
1212 | &bpf_obj_config__map_funcs[i]; | |
1213 | ||
1214 | if (strcmp(map_opt, func->config_opt) == 0) { | |
1215 | err = func->config_func(map, term, evlist); | |
1216 | goto out; | |
1217 | } | |
1218 | } | |
1219 | ||
1220 | pr_debug("ERROR: Invalid map config option '%s'\n", map_opt); | |
1221 | err = -BPF_LOADER_ERRNO__OBJCONF_MAP_OPT; | |
1222 | out: | |
1223 | free(map_name); | |
1224 | if (!err) | |
1225 | key_scan_pos += strlen(map_opt); | |
1226 | return err; | |
1227 | } | |
1228 | ||
1229 | int bpf__config_obj(struct bpf_object *obj, | |
1230 | struct parse_events_term *term, | |
1231 | struct perf_evlist *evlist, | |
1232 | int *error_pos) | |
1233 | { | |
1234 | int key_scan_pos = 0; | |
1235 | int err; | |
1236 | ||
1237 | if (!obj || !term || !term->config) | |
1238 | return -EINVAL; | |
1239 | ||
8e99b6d4 | 1240 | if (strstarts(term->config, "map:")) { |
066dacbf WN |
1241 | key_scan_pos = sizeof("map:") - 1; |
1242 | err = bpf__obj_config_map(obj, term, evlist, &key_scan_pos); | |
1243 | goto out; | |
1244 | } | |
1245 | err = -BPF_LOADER_ERRNO__OBJCONF_OPT; | |
1246 | out: | |
1247 | if (error_pos) | |
1248 | *error_pos = key_scan_pos; | |
1249 | return err; | |
1250 | ||
1251 | } | |
1252 | ||
8690a2a7 | 1253 | typedef int (*map_config_func_t)(const char *name, int map_fd, |
53897a78 | 1254 | const struct bpf_map_def *pdef, |
8690a2a7 WN |
1255 | struct bpf_map_op *op, |
1256 | void *pkey, void *arg); | |
1257 | ||
1258 | static int | |
1259 | foreach_key_array_all(map_config_func_t func, | |
1260 | void *arg, const char *name, | |
53897a78 | 1261 | int map_fd, const struct bpf_map_def *pdef, |
8690a2a7 WN |
1262 | struct bpf_map_op *op) |
1263 | { | |
1264 | unsigned int i; | |
1265 | int err; | |
1266 | ||
1267 | for (i = 0; i < pdef->max_entries; i++) { | |
1268 | err = func(name, map_fd, pdef, op, &i, arg); | |
1269 | if (err) { | |
1270 | pr_debug("ERROR: failed to insert value to %s[%u]\n", | |
1271 | name, i); | |
1272 | return err; | |
1273 | } | |
1274 | } | |
1275 | return 0; | |
1276 | } | |
1277 | ||
2d055bf2 WN |
1278 | static int |
1279 | foreach_key_array_ranges(map_config_func_t func, void *arg, | |
1280 | const char *name, int map_fd, | |
53897a78 | 1281 | const struct bpf_map_def *pdef, |
2d055bf2 WN |
1282 | struct bpf_map_op *op) |
1283 | { | |
1284 | unsigned int i, j; | |
1285 | int err; | |
1286 | ||
1287 | for (i = 0; i < op->k.array.nr_ranges; i++) { | |
1288 | unsigned int start = op->k.array.ranges[i].start; | |
1289 | size_t length = op->k.array.ranges[i].length; | |
1290 | ||
1291 | for (j = 0; j < length; j++) { | |
1292 | unsigned int idx = start + j; | |
1293 | ||
1294 | err = func(name, map_fd, pdef, op, &idx, arg); | |
1295 | if (err) { | |
1296 | pr_debug("ERROR: failed to insert value to %s[%u]\n", | |
1297 | name, idx); | |
1298 | return err; | |
1299 | } | |
1300 | } | |
1301 | } | |
1302 | return 0; | |
1303 | } | |
1304 | ||
8690a2a7 WN |
1305 | static int |
1306 | bpf_map_config_foreach_key(struct bpf_map *map, | |
1307 | map_config_func_t func, | |
1308 | void *arg) | |
1309 | { | |
1310 | int err, map_fd; | |
8690a2a7 | 1311 | struct bpf_map_op *op; |
53897a78 | 1312 | const struct bpf_map_def *def; |
009ad5d5 | 1313 | const char *name = bpf_map__name(map); |
b4cbfa56 | 1314 | struct bpf_map_priv *priv = bpf_map__priv(map); |
8690a2a7 | 1315 | |
b4cbfa56 | 1316 | if (IS_ERR(priv)) { |
8690a2a7 WN |
1317 | pr_debug("ERROR: failed to get private from map %s\n", name); |
1318 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1319 | } | |
1320 | if (!priv || list_empty(&priv->ops_list)) { | |
1321 | pr_debug("INFO: nothing to config for map %s\n", name); | |
1322 | return 0; | |
1323 | } | |
1324 | ||
53897a78 ACM |
1325 | def = bpf_map__def(map); |
1326 | if (IS_ERR(def)) { | |
8690a2a7 WN |
1327 | pr_debug("ERROR: failed to get definition from map %s\n", name); |
1328 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1329 | } | |
6e009e65 | 1330 | map_fd = bpf_map__fd(map); |
8690a2a7 WN |
1331 | if (map_fd < 0) { |
1332 | pr_debug("ERROR: failed to get fd from map %s\n", name); | |
1333 | return map_fd; | |
1334 | } | |
1335 | ||
1336 | list_for_each_entry(op, &priv->ops_list, list) { | |
53897a78 | 1337 | switch (def->type) { |
8690a2a7 | 1338 | case BPF_MAP_TYPE_ARRAY: |
7630b3e2 | 1339 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: |
8690a2a7 WN |
1340 | switch (op->key_type) { |
1341 | case BPF_MAP_KEY_ALL: | |
1342 | err = foreach_key_array_all(func, arg, name, | |
53897a78 | 1343 | map_fd, def, op); |
2d055bf2 WN |
1344 | break; |
1345 | case BPF_MAP_KEY_RANGES: | |
1346 | err = foreach_key_array_ranges(func, arg, name, | |
53897a78 | 1347 | map_fd, def, |
2d055bf2 | 1348 | op); |
8690a2a7 WN |
1349 | break; |
1350 | default: | |
1351 | pr_debug("ERROR: keytype for map '%s' invalid\n", | |
1352 | name); | |
1353 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1354 | } | |
2d055bf2 WN |
1355 | if (err) |
1356 | return err; | |
8690a2a7 WN |
1357 | break; |
1358 | default: | |
1359 | pr_debug("ERROR: type of '%s' incorrect\n", name); | |
1360 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE; | |
1361 | } | |
1362 | } | |
1363 | ||
1364 | return 0; | |
1365 | } | |
1366 | ||
1367 | static int | |
1368 | apply_config_value_for_key(int map_fd, void *pkey, | |
1369 | size_t val_size, u64 val) | |
1370 | { | |
1371 | int err = 0; | |
1372 | ||
1373 | switch (val_size) { | |
1374 | case 1: { | |
1375 | u8 _val = (u8)(val); | |
1376 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); | |
1377 | break; | |
1378 | } | |
1379 | case 2: { | |
1380 | u16 _val = (u16)(val); | |
1381 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); | |
1382 | break; | |
1383 | } | |
1384 | case 4: { | |
1385 | u32 _val = (u32)(val); | |
1386 | err = bpf_map_update_elem(map_fd, pkey, &_val, BPF_ANY); | |
1387 | break; | |
1388 | } | |
1389 | case 8: { | |
1390 | err = bpf_map_update_elem(map_fd, pkey, &val, BPF_ANY); | |
1391 | break; | |
1392 | } | |
1393 | default: | |
1394 | pr_debug("ERROR: invalid value size\n"); | |
1395 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_VALUESIZE; | |
1396 | } | |
1397 | if (err && errno) | |
1398 | err = -errno; | |
1399 | return err; | |
1400 | } | |
1401 | ||
7630b3e2 WN |
1402 | static int |
1403 | apply_config_evsel_for_key(const char *name, int map_fd, void *pkey, | |
32dcd021 | 1404 | struct evsel *evsel) |
7630b3e2 WN |
1405 | { |
1406 | struct xyarray *xy = evsel->fd; | |
1407 | struct perf_event_attr *attr; | |
1408 | unsigned int key, events; | |
1409 | bool check_pass = false; | |
1410 | int *evt_fd; | |
1411 | int err; | |
1412 | ||
1413 | if (!xy) { | |
1414 | pr_debug("ERROR: evsel not ready for map %s\n", name); | |
1415 | return -BPF_LOADER_ERRNO__INTERNAL; | |
1416 | } | |
1417 | ||
1418 | if (xy->row_size / xy->entry_size != 1) { | |
1419 | pr_debug("ERROR: Dimension of target event is incorrect for map %s\n", | |
1420 | name); | |
1421 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM; | |
1422 | } | |
1423 | ||
1424 | attr = &evsel->attr; | |
1425 | if (attr->inherit) { | |
1426 | pr_debug("ERROR: Can't put inherit event into map %s\n", name); | |
1427 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH; | |
1428 | } | |
1429 | ||
03e0a7df WN |
1430 | if (perf_evsel__is_bpf_output(evsel)) |
1431 | check_pass = true; | |
7630b3e2 WN |
1432 | if (attr->type == PERF_TYPE_RAW) |
1433 | check_pass = true; | |
1434 | if (attr->type == PERF_TYPE_HARDWARE) | |
1435 | check_pass = true; | |
7630b3e2 WN |
1436 | if (!check_pass) { |
1437 | pr_debug("ERROR: Event type is wrong for map %s\n", name); | |
1438 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE; | |
1439 | } | |
1440 | ||
1441 | events = xy->entries / (xy->row_size / xy->entry_size); | |
1442 | key = *((unsigned int *)pkey); | |
1443 | if (key >= events) { | |
1444 | pr_debug("ERROR: there is no event %d for map %s\n", | |
1445 | key, name); | |
1446 | return -BPF_LOADER_ERRNO__OBJCONF_MAP_MAPSIZE; | |
1447 | } | |
1448 | evt_fd = xyarray__entry(xy, key, 0); | |
1449 | err = bpf_map_update_elem(map_fd, pkey, evt_fd, BPF_ANY); | |
1450 | if (err && errno) | |
1451 | err = -errno; | |
1452 | return err; | |
1453 | } | |
1454 | ||
8690a2a7 WN |
1455 | static int |
1456 | apply_obj_config_map_for_key(const char *name, int map_fd, | |
53897a78 | 1457 | const struct bpf_map_def *pdef, |
8690a2a7 WN |
1458 | struct bpf_map_op *op, |
1459 | void *pkey, void *arg __maybe_unused) | |
1460 | { | |
1461 | int err; | |
1462 | ||
1463 | switch (op->op_type) { | |
1464 | case BPF_MAP_OP_SET_VALUE: | |
1465 | err = apply_config_value_for_key(map_fd, pkey, | |
1466 | pdef->value_size, | |
1467 | op->v.value); | |
1468 | break; | |
7630b3e2 WN |
1469 | case BPF_MAP_OP_SET_EVSEL: |
1470 | err = apply_config_evsel_for_key(name, map_fd, pkey, | |
1471 | op->v.evsel); | |
1472 | break; | |
8690a2a7 WN |
1473 | default: |
1474 | pr_debug("ERROR: unknown value type for '%s'\n", name); | |
1475 | err = -BPF_LOADER_ERRNO__INTERNAL; | |
1476 | } | |
1477 | return err; | |
1478 | } | |
1479 | ||
1480 | static int | |
1481 | apply_obj_config_map(struct bpf_map *map) | |
1482 | { | |
1483 | return bpf_map_config_foreach_key(map, | |
1484 | apply_obj_config_map_for_key, | |
1485 | NULL); | |
1486 | } | |
1487 | ||
1488 | static int | |
1489 | apply_obj_config_object(struct bpf_object *obj) | |
1490 | { | |
1491 | struct bpf_map *map; | |
1492 | int err; | |
1493 | ||
f74a53d9 | 1494 | bpf_object__for_each_map(map, obj) { |
8690a2a7 WN |
1495 | err = apply_obj_config_map(map); |
1496 | if (err) | |
1497 | return err; | |
1498 | } | |
1499 | return 0; | |
1500 | } | |
1501 | ||
1502 | int bpf__apply_obj_config(void) | |
1503 | { | |
1504 | struct bpf_object *obj, *tmp; | |
1505 | int err; | |
1506 | ||
1507 | bpf_object__for_each_safe(obj, tmp) { | |
1508 | err = apply_obj_config_object(obj); | |
1509 | if (err) | |
1510 | return err; | |
1511 | } | |
1512 | ||
1513 | return 0; | |
1514 | } | |
1515 | ||
d7888573 WN |
1516 | #define bpf__for_each_map(pos, obj, objtmp) \ |
1517 | bpf_object__for_each_safe(obj, objtmp) \ | |
f74a53d9 | 1518 | bpf_object__for_each_map(pos, obj) |
d7888573 | 1519 | |
5941d856 | 1520 | #define bpf__for_each_map_named(pos, obj, objtmp, name) \ |
d7888573 | 1521 | bpf__for_each_map(pos, obj, objtmp) \ |
009ad5d5 | 1522 | if (bpf_map__name(pos) && \ |
5941d856 | 1523 | (strcmp(name, \ |
009ad5d5 | 1524 | bpf_map__name(pos)) == 0)) |
d7888573 | 1525 | |
32dcd021 | 1526 | struct evsel *bpf__setup_output_event(struct perf_evlist *evlist, const char *name) |
d7888573 WN |
1527 | { |
1528 | struct bpf_map_priv *tmpl_priv = NULL; | |
1529 | struct bpf_object *obj, *tmp; | |
32dcd021 | 1530 | struct evsel *evsel = NULL; |
d7888573 WN |
1531 | struct bpf_map *map; |
1532 | int err; | |
1533 | bool need_init = false; | |
1534 | ||
92bbe8d8 | 1535 | bpf__for_each_map_named(map, obj, tmp, name) { |
b4cbfa56 | 1536 | struct bpf_map_priv *priv = bpf_map__priv(map); |
d7888573 | 1537 | |
b4cbfa56 | 1538 | if (IS_ERR(priv)) |
78e890ea | 1539 | return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); |
d7888573 WN |
1540 | |
1541 | /* | |
1542 | * No need to check map type: type should have been | |
1543 | * verified by kernel. | |
1544 | */ | |
1545 | if (!need_init && !priv) | |
1546 | need_init = !priv; | |
1547 | if (!tmpl_priv && priv) | |
1548 | tmpl_priv = priv; | |
1549 | } | |
1550 | ||
1551 | if (!need_init) | |
78e890ea | 1552 | return NULL; |
d7888573 | 1553 | |
72c08098 | 1554 | if (!tmpl_priv) { |
92bbe8d8 ACM |
1555 | char *event_definition = NULL; |
1556 | ||
1557 | if (asprintf(&event_definition, "bpf-output/no-inherit=1,name=%s/", name) < 0) | |
78e890ea | 1558 | return ERR_PTR(-ENOMEM); |
92bbe8d8 ACM |
1559 | |
1560 | err = parse_events(evlist, event_definition, NULL); | |
1561 | free(event_definition); | |
1562 | ||
72c08098 | 1563 | if (err) { |
92bbe8d8 | 1564 | pr_debug("ERROR: failed to create the \"%s\" bpf-output event\n", name); |
78e890ea | 1565 | return ERR_PTR(-err); |
72c08098 WN |
1566 | } |
1567 | ||
1568 | evsel = perf_evlist__last(evlist); | |
1569 | } | |
d7888573 | 1570 | |
92bbe8d8 | 1571 | bpf__for_each_map_named(map, obj, tmp, name) { |
b4cbfa56 | 1572 | struct bpf_map_priv *priv = bpf_map__priv(map); |
d7888573 | 1573 | |
b4cbfa56 | 1574 | if (IS_ERR(priv)) |
78e890ea | 1575 | return ERR_PTR(-BPF_LOADER_ERRNO__INTERNAL); |
d7888573 WN |
1576 | if (priv) |
1577 | continue; | |
1578 | ||
72c08098 WN |
1579 | if (tmpl_priv) { |
1580 | priv = bpf_map_priv__clone(tmpl_priv); | |
1581 | if (!priv) | |
78e890ea | 1582 | return ERR_PTR(-ENOMEM); |
d7888573 | 1583 | |
edb13ed4 | 1584 | err = bpf_map__set_priv(map, priv, bpf_map_priv__clear); |
72c08098 WN |
1585 | if (err) { |
1586 | bpf_map_priv__clear(map, priv); | |
78e890ea | 1587 | return ERR_PTR(err); |
72c08098 WN |
1588 | } |
1589 | } else if (evsel) { | |
1590 | struct bpf_map_op *op; | |
1591 | ||
1592 | op = bpf_map__add_newop(map, NULL); | |
1593 | if (IS_ERR(op)) | |
19702894 | 1594 | return ERR_CAST(op); |
72c08098 WN |
1595 | op->op_type = BPF_MAP_OP_SET_EVSEL; |
1596 | op->v.evsel = evsel; | |
d7888573 WN |
1597 | } |
1598 | } | |
1599 | ||
78e890ea | 1600 | return evsel; |
d7888573 WN |
1601 | } |
1602 | ||
92bbe8d8 ACM |
1603 | int bpf__setup_stdout(struct perf_evlist *evlist) |
1604 | { | |
32dcd021 | 1605 | struct evsel *evsel = bpf__setup_output_event(evlist, "__bpf_stdout__"); |
e381d1c2 | 1606 | return PTR_ERR_OR_ZERO(evsel); |
92bbe8d8 ACM |
1607 | } |
1608 | ||
d3e0ce39 WN |
1609 | #define ERRNO_OFFSET(e) ((e) - __BPF_LOADER_ERRNO__START) |
1610 | #define ERRCODE_OFFSET(c) ERRNO_OFFSET(BPF_LOADER_ERRNO__##c) | |
1611 | #define NR_ERRNO (__BPF_LOADER_ERRNO__END - __BPF_LOADER_ERRNO__START) | |
1612 | ||
1613 | static const char *bpf_loader_strerror_table[NR_ERRNO] = { | |
1614 | [ERRCODE_OFFSET(CONFIG)] = "Invalid config string", | |
1615 | [ERRCODE_OFFSET(GROUP)] = "Invalid group name", | |
1616 | [ERRCODE_OFFSET(EVENTNAME)] = "No event name found in config string", | |
1617 | [ERRCODE_OFFSET(INTERNAL)] = "BPF loader internal error", | |
1618 | [ERRCODE_OFFSET(COMPILE)] = "Error when compiling BPF scriptlet", | |
0bb93490 | 1619 | [ERRCODE_OFFSET(PROGCONF_TERM)] = "Invalid program config term in config string", |
bfc077b4 HK |
1620 | [ERRCODE_OFFSET(PROLOGUE)] = "Failed to generate prologue", |
1621 | [ERRCODE_OFFSET(PROLOGUE2BIG)] = "Prologue too big for program", | |
1622 | [ERRCODE_OFFSET(PROLOGUEOOB)] = "Offset out of bound for prologue", | |
066dacbf WN |
1623 | [ERRCODE_OFFSET(OBJCONF_OPT)] = "Invalid object config option", |
1624 | [ERRCODE_OFFSET(OBJCONF_CONF)] = "Config value not set (missing '=')", | |
1625 | [ERRCODE_OFFSET(OBJCONF_MAP_OPT)] = "Invalid object map config option", | |
1626 | [ERRCODE_OFFSET(OBJCONF_MAP_NOTEXIST)] = "Target map doesn't exist", | |
1627 | [ERRCODE_OFFSET(OBJCONF_MAP_VALUE)] = "Incorrect value type for map", | |
1628 | [ERRCODE_OFFSET(OBJCONF_MAP_TYPE)] = "Incorrect map type", | |
1629 | [ERRCODE_OFFSET(OBJCONF_MAP_KEYSIZE)] = "Incorrect map key size", | |
1630 | [ERRCODE_OFFSET(OBJCONF_MAP_VALUESIZE)] = "Incorrect map value size", | |
7630b3e2 WN |
1631 | [ERRCODE_OFFSET(OBJCONF_MAP_NOEVT)] = "Event not found for map setting", |
1632 | [ERRCODE_OFFSET(OBJCONF_MAP_MAPSIZE)] = "Invalid map size for event setting", | |
1633 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTDIM)] = "Event dimension too large", | |
1634 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTINH)] = "Doesn't support inherit event", | |
1635 | [ERRCODE_OFFSET(OBJCONF_MAP_EVTTYPE)] = "Wrong event type for map", | |
2d055bf2 | 1636 | [ERRCODE_OFFSET(OBJCONF_MAP_IDX2BIG)] = "Index too large", |
d3e0ce39 WN |
1637 | }; |
1638 | ||
6371ca3b WN |
1639 | static int |
1640 | bpf_loader_strerror(int err, char *buf, size_t size) | |
1641 | { | |
1642 | char sbuf[STRERR_BUFSIZE]; | |
1643 | const char *msg; | |
1644 | ||
1645 | if (!buf || !size) | |
1646 | return -1; | |
1647 | ||
1648 | err = err > 0 ? err : -err; | |
1649 | ||
1650 | if (err >= __LIBBPF_ERRNO__START) | |
1651 | return libbpf_strerror(err, buf, size); | |
1652 | ||
d3e0ce39 WN |
1653 | if (err >= __BPF_LOADER_ERRNO__START && err < __BPF_LOADER_ERRNO__END) { |
1654 | msg = bpf_loader_strerror_table[ERRNO_OFFSET(err)]; | |
1655 | snprintf(buf, size, "%s", msg); | |
1656 | buf[size - 1] = '\0'; | |
1657 | return 0; | |
1658 | } | |
1659 | ||
1660 | if (err >= __BPF_LOADER_ERRNO__END) | |
1661 | snprintf(buf, size, "Unknown bpf loader error %d", err); | |
1662 | else | |
1663 | snprintf(buf, size, "%s", | |
c8b5f2c9 | 1664 | str_error_r(err, sbuf, sizeof(sbuf))); |
d3e0ce39 | 1665 | |
6371ca3b | 1666 | buf[size - 1] = '\0'; |
d3e0ce39 | 1667 | return -1; |
6371ca3b WN |
1668 | } |
1669 | ||
aa3abf30 WN |
1670 | #define bpf__strerror_head(err, buf, size) \ |
1671 | char sbuf[STRERR_BUFSIZE], *emsg;\ | |
1672 | if (!size)\ | |
1673 | return 0;\ | |
1674 | if (err < 0)\ | |
1675 | err = -err;\ | |
6371ca3b WN |
1676 | bpf_loader_strerror(err, sbuf, sizeof(sbuf));\ |
1677 | emsg = sbuf;\ | |
aa3abf30 WN |
1678 | switch (err) {\ |
1679 | default:\ | |
1680 | scnprintf(buf, size, "%s", emsg);\ | |
1681 | break; | |
1682 | ||
1683 | #define bpf__strerror_entry(val, fmt...)\ | |
1684 | case val: {\ | |
1685 | scnprintf(buf, size, fmt);\ | |
1686 | break;\ | |
1687 | } | |
1688 | ||
1689 | #define bpf__strerror_end(buf, size)\ | |
1690 | }\ | |
1691 | buf[size - 1] = '\0'; | |
1692 | ||
d3e0ce39 WN |
1693 | int bpf__strerror_prepare_load(const char *filename, bool source, |
1694 | int err, char *buf, size_t size) | |
1695 | { | |
1696 | size_t n; | |
1697 | int ret; | |
1698 | ||
1699 | n = snprintf(buf, size, "Failed to load %s%s: ", | |
1700 | filename, source ? " from source" : ""); | |
1701 | if (n >= size) { | |
1702 | buf[size - 1] = '\0'; | |
1703 | return 0; | |
1704 | } | |
1705 | buf += n; | |
1706 | size -= n; | |
1707 | ||
1708 | ret = bpf_loader_strerror(err, buf, size); | |
1709 | buf[size - 1] = '\0'; | |
1710 | return ret; | |
1711 | } | |
1712 | ||
aa3abf30 WN |
1713 | int bpf__strerror_probe(struct bpf_object *obj __maybe_unused, |
1714 | int err, char *buf, size_t size) | |
1715 | { | |
1716 | bpf__strerror_head(err, buf, size); | |
0bb93490 | 1717 | case BPF_LOADER_ERRNO__PROGCONF_TERM: { |
361f2b1d WN |
1718 | scnprintf(buf, size, "%s (add -v to see detail)", emsg); |
1719 | break; | |
1720 | } | |
03e01f56 | 1721 | bpf__strerror_entry(EEXIST, "Probe point exist. Try 'perf probe -d \"*\"' and set 'force=yes'"); |
d3e0ce39 WN |
1722 | bpf__strerror_entry(EACCES, "You need to be root"); |
1723 | bpf__strerror_entry(EPERM, "You need to be root, and /proc/sys/kernel/kptr_restrict should be 0"); | |
1724 | bpf__strerror_entry(ENOENT, "You need to check probing points in BPF file"); | |
aa3abf30 WN |
1725 | bpf__strerror_end(buf, size); |
1726 | return 0; | |
69d262a9 | 1727 | } |
1e5e3ee8 | 1728 | |
d3e0ce39 | 1729 | int bpf__strerror_load(struct bpf_object *obj, |
1e5e3ee8 WN |
1730 | int err, char *buf, size_t size) |
1731 | { | |
1732 | bpf__strerror_head(err, buf, size); | |
d3e0ce39 | 1733 | case LIBBPF_ERRNO__KVER: { |
a7fe0450 | 1734 | unsigned int obj_kver = bpf_object__kversion(obj); |
d3e0ce39 WN |
1735 | unsigned int real_kver; |
1736 | ||
1737 | if (fetch_kernel_version(&real_kver, NULL, 0)) { | |
1738 | scnprintf(buf, size, "Unable to fetch kernel version"); | |
1739 | break; | |
1740 | } | |
1741 | ||
1742 | if (obj_kver != real_kver) { | |
1743 | scnprintf(buf, size, | |
1744 | "'version' ("KVER_FMT") doesn't match running kernel ("KVER_FMT")", | |
1745 | KVER_PARAM(obj_kver), | |
1746 | KVER_PARAM(real_kver)); | |
1747 | break; | |
1748 | } | |
1749 | ||
1750 | scnprintf(buf, size, "Failed to load program for unknown reason"); | |
1751 | break; | |
1752 | } | |
1e5e3ee8 WN |
1753 | bpf__strerror_end(buf, size); |
1754 | return 0; | |
1755 | } | |
066dacbf WN |
1756 | |
1757 | int bpf__strerror_config_obj(struct bpf_object *obj __maybe_unused, | |
1758 | struct parse_events_term *term __maybe_unused, | |
1759 | struct perf_evlist *evlist __maybe_unused, | |
1760 | int *error_pos __maybe_unused, int err, | |
1761 | char *buf, size_t size) | |
1762 | { | |
1763 | bpf__strerror_head(err, buf, size); | |
1764 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_TYPE, | |
1765 | "Can't use this config term with this map type"); | |
1766 | bpf__strerror_end(buf, size); | |
1767 | return 0; | |
1768 | } | |
8690a2a7 WN |
1769 | |
1770 | int bpf__strerror_apply_obj_config(int err, char *buf, size_t size) | |
1771 | { | |
1772 | bpf__strerror_head(err, buf, size); | |
7630b3e2 WN |
1773 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTDIM, |
1774 | "Cannot set event to BPF map in multi-thread tracing"); | |
1775 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTINH, | |
1776 | "%s (Hint: use -i to turn off inherit)", emsg); | |
1777 | bpf__strerror_entry(BPF_LOADER_ERRNO__OBJCONF_MAP_EVTTYPE, | |
1778 | "Can only put raw, hardware and BPF output event into a BPF map"); | |
8690a2a7 WN |
1779 | bpf__strerror_end(buf, size); |
1780 | return 0; | |
1781 | } | |
d7888573 | 1782 | |
aa31be3a ACM |
1783 | int bpf__strerror_setup_output_event(struct perf_evlist *evlist __maybe_unused, |
1784 | int err, char *buf, size_t size) | |
d7888573 WN |
1785 | { |
1786 | bpf__strerror_head(err, buf, size); | |
1787 | bpf__strerror_end(buf, size); | |
1788 | return 0; | |
1789 | } |