Commit | Line | Data |
---|---|---|
1bc38b8f | 1 | // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause) |
6061a3d6 | 2 | |
1b76c13e WN |
3 | /* |
4 | * Common eBPF ELF object loading operations. | |
5 | * | |
6 | * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org> | |
7 | * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com> | |
8 | * Copyright (C) 2015 Huawei Inc. | |
f367540c | 9 | * Copyright (C) 2017 Nicira, Inc. |
d859900c | 10 | * Copyright (C) 2019 Isovalent, Inc. |
1b76c13e WN |
11 | */ |
12 | ||
b4269954 | 13 | #ifndef _GNU_SOURCE |
531b014e | 14 | #define _GNU_SOURCE |
b4269954 | 15 | #endif |
1b76c13e | 16 | #include <stdlib.h> |
b3f59d66 WN |
17 | #include <stdio.h> |
18 | #include <stdarg.h> | |
f367540c | 19 | #include <libgen.h> |
34090915 | 20 | #include <inttypes.h> |
8ab9da57 | 21 | #include <limits.h> |
b3f59d66 | 22 | #include <string.h> |
1b76c13e | 23 | #include <unistd.h> |
cdb2f920 | 24 | #include <endian.h> |
1a5e3fb1 WN |
25 | #include <fcntl.h> |
26 | #include <errno.h> | |
113e6b7e | 27 | #include <ctype.h> |
1b76c13e | 28 | #include <asm/unistd.h> |
e28ff1a8 | 29 | #include <linux/err.h> |
cb1e5e96 | 30 | #include <linux/kernel.h> |
1b76c13e | 31 | #include <linux/bpf.h> |
38d5d3b3 | 32 | #include <linux/btf.h> |
47eff617 | 33 | #include <linux/filter.h> |
9a208eff | 34 | #include <linux/list.h> |
f367540c | 35 | #include <linux/limits.h> |
438363c0 | 36 | #include <linux/perf_event.h> |
a64af0ef | 37 | #include <linux/ring_buffer.h> |
5e61f270 | 38 | #include <linux/version.h> |
fb84b822 | 39 | #include <sys/epoll.h> |
63f2f5ee | 40 | #include <sys/ioctl.h> |
fb84b822 | 41 | #include <sys/mman.h> |
f367540c JS |
42 | #include <sys/stat.h> |
43 | #include <sys/types.h> | |
44 | #include <sys/vfs.h> | |
ddc7c304 | 45 | #include <sys/utsname.h> |
dc3a2d25 | 46 | #include <sys/resource.h> |
1a5e3fb1 WN |
47 | #include <libelf.h> |
48 | #include <gelf.h> | |
166750bc | 49 | #include <zlib.h> |
1b76c13e WN |
50 | |
51 | #include "libbpf.h" | |
52d3352e | 52 | #include "bpf.h" |
8a138aed | 53 | #include "btf.h" |
6d41907c | 54 | #include "str_error.h" |
d7c4b398 | 55 | #include "libbpf_internal.h" |
ddc7c304 | 56 | #include "hashmap.h" |
67234743 | 57 | #include "bpf_gen_internal.h" |
b3f59d66 | 58 | |
f367540c JS |
59 | #ifndef BPF_FS_MAGIC |
60 | #define BPF_FS_MAGIC 0xcafe4a11 | |
61 | #endif | |
62 | ||
9c0f8cbd AN |
63 | #define BPF_INSN_SZ (sizeof(struct bpf_insn)) |
64 | ||
ff466b58 AI |
65 | /* vsprintf() in __base_pr() uses nonliteral format string. It may break |
66 | * compilation if user enables corresponding warning. Disable it explicitly. | |
67 | */ | |
68 | #pragma GCC diagnostic ignored "-Wformat-nonliteral" | |
69 | ||
b3f59d66 WN |
70 | #define __printf(a, b) __attribute__((format(printf, a, b))) |
71 | ||
590a0088 | 72 | static struct bpf_map *bpf_object__add_map(struct bpf_object *obj); |
aea28a60 | 73 | static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog); |
590a0088 | 74 | |
a8a1f7d0 SF |
75 | static int __base_pr(enum libbpf_print_level level, const char *format, |
76 | va_list args) | |
b3f59d66 | 77 | { |
6f1ae8b6 YS |
78 | if (level == LIBBPF_DEBUG) |
79 | return 0; | |
80 | ||
a8a1f7d0 | 81 | return vfprintf(stderr, format, args); |
b3f59d66 WN |
82 | } |
83 | ||
a8a1f7d0 | 84 | static libbpf_print_fn_t __libbpf_pr = __base_pr; |
b3f59d66 | 85 | |
e87fd8ba | 86 | libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn) |
b3f59d66 | 87 | { |
e87fd8ba AN |
88 | libbpf_print_fn_t old_print_fn = __libbpf_pr; |
89 | ||
6f1ae8b6 | 90 | __libbpf_pr = fn; |
e87fd8ba | 91 | return old_print_fn; |
b3f59d66 | 92 | } |
1a5e3fb1 | 93 | |
8461ef8b YS |
94 | __printf(2, 3) |
95 | void libbpf_print(enum libbpf_print_level level, const char *format, ...) | |
96 | { | |
97 | va_list args; | |
98 | ||
6f1ae8b6 YS |
99 | if (!__libbpf_pr) |
100 | return; | |
101 | ||
8461ef8b | 102 | va_start(args, format); |
6f1ae8b6 | 103 | __libbpf_pr(level, format, args); |
8461ef8b YS |
104 | va_end(args); |
105 | } | |
106 | ||
dc3a2d25 THJ |
107 | static void pr_perm_msg(int err) |
108 | { | |
109 | struct rlimit limit; | |
110 | char buf[100]; | |
111 | ||
112 | if (err != -EPERM || geteuid() != 0) | |
113 | return; | |
114 | ||
115 | err = getrlimit(RLIMIT_MEMLOCK, &limit); | |
116 | if (err) | |
117 | return; | |
118 | ||
119 | if (limit.rlim_cur == RLIM_INFINITY) | |
120 | return; | |
121 | ||
122 | if (limit.rlim_cur < 1024) | |
b5c7d0d0 | 123 | snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur); |
dc3a2d25 THJ |
124 | else if (limit.rlim_cur < 1024*1024) |
125 | snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024); | |
126 | else | |
127 | snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024)); | |
128 | ||
129 | pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n", | |
130 | buf); | |
131 | } | |
132 | ||
6371ca3b WN |
133 | #define STRERR_BUFSIZE 128 |
134 | ||
1a5e3fb1 WN |
135 | /* Copied from tools/perf/util/util.h */ |
136 | #ifndef zfree | |
137 | # define zfree(ptr) ({ free(*ptr); *ptr = NULL; }) | |
138 | #endif | |
139 | ||
140 | #ifndef zclose | |
141 | # define zclose(fd) ({ \ | |
142 | int ___err = 0; \ | |
143 | if ((fd) >= 0) \ | |
144 | ___err = close((fd)); \ | |
145 | fd = -1; \ | |
146 | ___err; }) | |
147 | #endif | |
148 | ||
34be1646 SL |
149 | static inline __u64 ptr_to_u64(const void *ptr) |
150 | { | |
151 | return (__u64) (unsigned long) ptr; | |
152 | } | |
153 | ||
5981881d AN |
154 | /* this goes away in libbpf 1.0 */ |
155 | enum libbpf_strict_mode libbpf_mode = LIBBPF_STRICT_NONE; | |
156 | ||
157 | int libbpf_set_strict_mode(enum libbpf_strict_mode mode) | |
158 | { | |
159 | /* __LIBBPF_STRICT_LAST is the last power-of-2 value used + 1, so to | |
160 | * get all possible values we compensate last +1, and then (2*x - 1) | |
161 | * to get the bit mask | |
162 | */ | |
163 | if (mode != LIBBPF_STRICT_ALL | |
164 | && (mode & ~((__LIBBPF_STRICT_LAST - 1) * 2 - 1))) | |
165 | return errno = EINVAL, -EINVAL; | |
166 | ||
167 | libbpf_mode = mode; | |
168 | return 0; | |
169 | } | |
170 | ||
47b6cb4d | 171 | enum kern_feature_id { |
47eff617 | 172 | /* v4.14: kernel support for program & map names. */ |
47b6cb4d | 173 | FEAT_PROG_NAME, |
8837fe5d | 174 | /* v5.2: kernel support for global data sections. */ |
47b6cb4d | 175 | FEAT_GLOBAL_DATA, |
68b08647 AN |
176 | /* BTF support */ |
177 | FEAT_BTF, | |
d7c4b398 | 178 | /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */ |
47b6cb4d | 179 | FEAT_BTF_FUNC, |
d7c4b398 | 180 | /* BTF_KIND_VAR and BTF_KIND_DATASEC support */ |
47b6cb4d | 181 | FEAT_BTF_DATASEC, |
2d3eb67f | 182 | /* BTF_FUNC_GLOBAL is supported */ |
47b6cb4d AN |
183 | FEAT_BTF_GLOBAL_FUNC, |
184 | /* BPF_F_MMAPABLE is supported for arrays */ | |
185 | FEAT_ARRAY_MMAP, | |
25498a19 | 186 | /* kernel support for expected_attach_type in BPF_PROG_LOAD */ |
47b6cb4d | 187 | FEAT_EXP_ATTACH_TYPE, |
109cea5a AN |
188 | /* bpf_probe_read_{kernel,user}[_str] helpers */ |
189 | FEAT_PROBE_READ_KERN, | |
5d23328d YZ |
190 | /* BPF_PROG_BIND_MAP is supported */ |
191 | FEAT_PROG_BIND_MAP, | |
4f33a53d AN |
192 | /* Kernel support for module BTFs */ |
193 | FEAT_MODULE_BTF, | |
22541a9e IL |
194 | /* BTF_KIND_FLOAT support */ |
195 | FEAT_BTF_FLOAT, | |
668ace0e AN |
196 | /* BPF perf link support */ |
197 | FEAT_PERF_LINK, | |
223f903e YS |
198 | /* BTF_KIND_DECL_TAG support */ |
199 | FEAT_BTF_DECL_TAG, | |
47b6cb4d | 200 | __FEAT_CNT, |
47eff617 SF |
201 | }; |
202 | ||
9ca1f56a | 203 | static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id); |
47b6cb4d | 204 | |
166750bc AN |
205 | enum reloc_type { |
206 | RELO_LD64, | |
207 | RELO_CALL, | |
208 | RELO_DATA, | |
0c091e5c | 209 | RELO_EXTERN_VAR, |
5bd022ec | 210 | RELO_EXTERN_FUNC, |
53eddb5e | 211 | RELO_SUBPROG_ADDR, |
166750bc AN |
212 | }; |
213 | ||
214 | struct reloc_desc { | |
215 | enum reloc_type type; | |
216 | int insn_idx; | |
217 | int map_idx; | |
218 | int sym_off; | |
219 | }; | |
220 | ||
25498a19 AN |
221 | struct bpf_sec_def; |
222 | ||
12d9466d AN |
223 | typedef int (*init_fn_t)(struct bpf_program *prog, long cookie); |
224 | typedef int (*preload_fn_t)(struct bpf_program *prog, struct bpf_prog_load_params *attr, long cookie); | |
225 | typedef struct bpf_link *(*attach_fn_t)(const struct bpf_program *prog, long cookie); | |
25498a19 | 226 | |
15ea31fa AN |
227 | /* stored as sec_def->cookie for all libbpf-supported SEC()s */ |
228 | enum sec_def_flags { | |
229 | SEC_NONE = 0, | |
230 | /* expected_attach_type is optional, if kernel doesn't support that */ | |
231 | SEC_EXP_ATTACH_OPT = 1, | |
232 | /* legacy, only used by libbpf_get_type_names() and | |
233 | * libbpf_attach_type_by_name(), not used by libbpf itself at all. | |
234 | * This used to be associated with cgroup (and few other) BPF programs | |
235 | * that were attachable through BPF_PROG_ATTACH command. Pretty | |
236 | * meaningless nowadays, though. | |
237 | */ | |
238 | SEC_ATTACHABLE = 2, | |
239 | SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT, | |
240 | /* attachment target is specified through BTF ID in either kernel or | |
241 | * other BPF program's BTF object */ | |
242 | SEC_ATTACH_BTF = 4, | |
243 | /* BPF program type allows sleeping/blocking in kernel */ | |
244 | SEC_SLEEPABLE = 8, | |
dd94d45c AN |
245 | /* allow non-strict prefix matching */ |
246 | SEC_SLOPPY_PFX = 16, | |
15ea31fa AN |
247 | }; |
248 | ||
25498a19 AN |
249 | struct bpf_sec_def { |
250 | const char *sec; | |
25498a19 AN |
251 | enum bpf_prog_type prog_type; |
252 | enum bpf_attach_type expected_attach_type; | |
15ea31fa | 253 | long cookie; |
12d9466d AN |
254 | |
255 | init_fn_t init_fn; | |
256 | preload_fn_t preload_fn; | |
25498a19 AN |
257 | attach_fn_t attach_fn; |
258 | }; | |
259 | ||
a5b8bd47 WN |
260 | /* |
261 | * bpf_prog should be a better name but it has been used in | |
262 | * linux/filter.h. | |
263 | */ | |
264 | struct bpf_program { | |
25498a19 | 265 | const struct bpf_sec_def *sec_def; |
52109584 | 266 | char *sec_name; |
c1122392 AN |
267 | size_t sec_idx; |
268 | /* this program's instruction offset (in number of instructions) | |
269 | * within its containing ELF section | |
270 | */ | |
271 | size_t sec_insn_off; | |
272 | /* number of original instructions in ELF section belonging to this | |
273 | * program, not taking into account subprogram instructions possible | |
274 | * appended later during relocation | |
275 | */ | |
276 | size_t sec_insn_cnt; | |
277 | /* Offset (in number of instructions) of the start of instruction | |
278 | * belonging to this BPF program within its containing main BPF | |
279 | * program. For the entry-point (main) BPF program, this is always | |
280 | * zero. For a sub-program, this gets reset before each of main BPF | |
281 | * programs are processed and relocated and is used to determined | |
282 | * whether sub-program was already appended to the main program, and | |
283 | * if yes, at which instruction offset. | |
284 | */ | |
285 | size_t sub_insn_off; | |
286 | ||
287 | char *name; | |
52109584 | 288 | /* sec_name with / replaced by _; makes recursive pinning |
33a2c75c SF |
289 | * in bpf_object__pin_programs easier |
290 | */ | |
291 | char *pin_name; | |
c1122392 AN |
292 | |
293 | /* instructions that belong to BPF program; insns[0] is located at | |
294 | * sec_insn_off instruction within its ELF section in ELF file, so | |
295 | * when mapping ELF file instruction index to the local instruction, | |
296 | * one needs to subtract sec_insn_off; and vice versa. | |
297 | */ | |
a5b8bd47 | 298 | struct bpf_insn *insns; |
c1122392 AN |
299 | /* actual number of instruction in this BPF program's image; for |
300 | * entry-point BPF programs this includes the size of main program | |
301 | * itself plus all the used sub-programs, appended at the end | |
302 | */ | |
c3c55696 | 303 | size_t insns_cnt; |
34090915 | 304 | |
166750bc | 305 | struct reloc_desc *reloc_desc; |
34090915 | 306 | int nr_reloc; |
da11b417 | 307 | int log_level; |
55cffde2 | 308 | |
b580563e WN |
309 | struct { |
310 | int nr; | |
311 | int *fds; | |
312 | } instances; | |
313 | bpf_program_prep_t preprocessor; | |
aa9b1ac3 WN |
314 | |
315 | struct bpf_object *obj; | |
316 | void *priv; | |
317 | bpf_program_clear_priv_t clear_priv; | |
d7be143b | 318 | |
c1122392 | 319 | bool load; |
aea28a60 | 320 | bool mark_btf_static; |
c1122392 | 321 | enum bpf_prog_type type; |
d7be143b | 322 | enum bpf_attach_type expected_attach_type; |
c1122392 | 323 | int prog_ifindex; |
91abb4a6 | 324 | __u32 attach_btf_obj_fd; |
12a8654b | 325 | __u32 attach_btf_id; |
e7bf94db | 326 | __u32 attach_prog_fd; |
2993e051 YS |
327 | void *func_info; |
328 | __u32 func_info_rec_size; | |
f0187f0b | 329 | __u32 func_info_cnt; |
47eff617 | 330 | |
3d650141 MKL |
331 | void *line_info; |
332 | __u32 line_info_rec_size; | |
333 | __u32 line_info_cnt; | |
04656198 | 334 | __u32 prog_flags; |
a5b8bd47 WN |
335 | }; |
336 | ||
590a0088 MKL |
337 | struct bpf_struct_ops { |
338 | const char *tname; | |
339 | const struct btf_type *type; | |
340 | struct bpf_program **progs; | |
341 | __u32 *kern_func_off; | |
342 | /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */ | |
343 | void *data; | |
344 | /* e.g. struct bpf_struct_ops_tcp_congestion_ops in | |
345 | * btf_vmlinux's format. | |
346 | * struct bpf_struct_ops_tcp_congestion_ops { | |
347 | * [... some other kernel fields ...] | |
348 | * struct tcp_congestion_ops data; | |
349 | * } | |
350 | * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops) | |
351 | * bpf_map__init_kern_struct_ops() will populate the "kern_vdata" | |
352 | * from "data". | |
353 | */ | |
354 | void *kern_vdata; | |
355 | __u32 type_id; | |
356 | }; | |
357 | ||
ac9d1389 AN |
358 | #define DATA_SEC ".data" |
359 | #define BSS_SEC ".bss" | |
360 | #define RODATA_SEC ".rodata" | |
81bfdd08 | 361 | #define KCONFIG_SEC ".kconfig" |
1c0c7074 | 362 | #define KSYMS_SEC ".ksyms" |
590a0088 | 363 | #define STRUCT_OPS_SEC ".struct_ops" |
ac9d1389 | 364 | |
d859900c DB |
365 | enum libbpf_map_type { |
366 | LIBBPF_MAP_UNSPEC, | |
367 | LIBBPF_MAP_DATA, | |
368 | LIBBPF_MAP_BSS, | |
369 | LIBBPF_MAP_RODATA, | |
81bfdd08 | 370 | LIBBPF_MAP_KCONFIG, |
d859900c DB |
371 | }; |
372 | ||
9d759a9b | 373 | struct bpf_map { |
561bbcca | 374 | char *name; |
aed65917 AN |
375 | /* real_name is defined for special internal maps (.rodata*, |
376 | * .data*, .bss, .kconfig) and preserves their original ELF section | |
377 | * name. This is important to be be able to find corresponding BTF | |
378 | * DATASEC information. | |
379 | */ | |
380 | char *real_name; | |
01af3bf0 | 381 | int fd; |
db48814b AN |
382 | int sec_idx; |
383 | size_t sec_offset; | |
f0307a7e | 384 | int map_ifindex; |
addb9fc9 | 385 | int inner_map_fd; |
9d759a9b | 386 | struct bpf_map_def def; |
1bdb6c9a | 387 | __u32 numa_node; |
646f02ff | 388 | __u32 btf_var_idx; |
5b891af7 MKL |
389 | __u32 btf_key_type_id; |
390 | __u32 btf_value_type_id; | |
590a0088 | 391 | __u32 btf_vmlinux_value_type_id; |
9d759a9b WN |
392 | void *priv; |
393 | bpf_map_clear_priv_t clear_priv; | |
d859900c | 394 | enum libbpf_map_type libbpf_type; |
eba9c5f4 | 395 | void *mmaped; |
590a0088 | 396 | struct bpf_struct_ops *st_ops; |
646f02ff AN |
397 | struct bpf_map *inner_map; |
398 | void **init_slots; | |
399 | int init_slots_sz; | |
4580b25f THJ |
400 | char *pin_path; |
401 | bool pinned; | |
ec6d5f47 | 402 | bool reused; |
d859900c DB |
403 | }; |
404 | ||
166750bc AN |
405 | enum extern_type { |
406 | EXT_UNKNOWN, | |
2e33efe3 | 407 | EXT_KCFG, |
1c0c7074 | 408 | EXT_KSYM, |
2e33efe3 AN |
409 | }; |
410 | ||
411 | enum kcfg_type { | |
412 | KCFG_UNKNOWN, | |
413 | KCFG_CHAR, | |
414 | KCFG_BOOL, | |
415 | KCFG_INT, | |
416 | KCFG_TRISTATE, | |
417 | KCFG_CHAR_ARR, | |
166750bc AN |
418 | }; |
419 | ||
420 | struct extern_desc { | |
2e33efe3 | 421 | enum extern_type type; |
166750bc AN |
422 | int sym_idx; |
423 | int btf_id; | |
2e33efe3 AN |
424 | int sec_btf_id; |
425 | const char *name; | |
166750bc | 426 | bool is_set; |
2e33efe3 AN |
427 | bool is_weak; |
428 | union { | |
429 | struct { | |
430 | enum kcfg_type type; | |
431 | int sz; | |
432 | int align; | |
433 | int data_off; | |
434 | bool is_signed; | |
435 | } kcfg; | |
1c0c7074 AN |
436 | struct { |
437 | unsigned long long addr; | |
d370bbe1 HL |
438 | |
439 | /* target btf_id of the corresponding kernel var. */ | |
284d2587 AN |
440 | int kernel_btf_obj_fd; |
441 | int kernel_btf_id; | |
d370bbe1 HL |
442 | |
443 | /* local btf_id of the ksym extern's type. */ | |
444 | __u32 type_id; | |
9dbe6015 KKD |
445 | /* BTF fd index to be patched in for insn->off, this is |
446 | * 0 for vmlinux BTF, index in obj->fd_array for module | |
447 | * BTF | |
448 | */ | |
449 | __s16 btf_fd_idx; | |
1c0c7074 | 450 | } ksym; |
2e33efe3 | 451 | }; |
166750bc AN |
452 | }; |
453 | ||
9a208eff WN |
454 | static LIST_HEAD(bpf_objects_list); |
455 | ||
4f33a53d AN |
456 | struct module_btf { |
457 | struct btf *btf; | |
458 | char *name; | |
459 | __u32 id; | |
91abb4a6 | 460 | int fd; |
9dbe6015 | 461 | int fd_array_idx; |
4f33a53d AN |
462 | }; |
463 | ||
25bbbd7a AN |
464 | enum sec_type { |
465 | SEC_UNUSED = 0, | |
466 | SEC_RELO, | |
467 | SEC_BSS, | |
468 | SEC_DATA, | |
469 | SEC_RODATA, | |
470 | }; | |
471 | ||
472 | struct elf_sec_desc { | |
473 | enum sec_type sec_type; | |
474 | Elf64_Shdr *shdr; | |
475 | Elf_Data *data; | |
476 | }; | |
477 | ||
29a30ff5 AN |
478 | struct elf_state { |
479 | int fd; | |
480 | const void *obj_buf; | |
481 | size_t obj_buf_sz; | |
482 | Elf *elf; | |
ad23b723 | 483 | Elf64_Ehdr *ehdr; |
29a30ff5 | 484 | Elf_Data *symbols; |
29a30ff5 AN |
485 | Elf_Data *st_ops_data; |
486 | size_t shstrndx; /* section index for section name strings */ | |
487 | size_t strtabidx; | |
25bbbd7a AN |
488 | struct elf_sec_desc *secs; |
489 | int sec_cnt; | |
29a30ff5 AN |
490 | int maps_shndx; |
491 | int btf_maps_shndx; | |
492 | __u32 btf_maps_sec_btf_id; | |
493 | int text_shndx; | |
494 | int symbols_shndx; | |
29a30ff5 AN |
495 | int st_ops_shndx; |
496 | }; | |
497 | ||
1a5e3fb1 | 498 | struct bpf_object { |
d859900c | 499 | char name[BPF_OBJ_NAME_LEN]; |
cb1e5e96 | 500 | char license[64]; |
438363c0 | 501 | __u32 kern_version; |
0b3d1efa | 502 | |
a5b8bd47 WN |
503 | struct bpf_program *programs; |
504 | size_t nr_programs; | |
9d759a9b WN |
505 | struct bpf_map *maps; |
506 | size_t nr_maps; | |
bf829271 | 507 | size_t maps_cap; |
9d759a9b | 508 | |
8601fd42 | 509 | char *kconfig; |
166750bc AN |
510 | struct extern_desc *externs; |
511 | int nr_extern; | |
81bfdd08 | 512 | int kconfig_map_idx; |
166750bc | 513 | |
52d3352e | 514 | bool loaded; |
c3c55696 | 515 | bool has_subcalls; |
25bbbd7a | 516 | bool has_rodata; |
a5b8bd47 | 517 | |
e2fa0156 AS |
518 | struct bpf_gen *gen_loader; |
519 | ||
29a30ff5 AN |
520 | /* Information when doing ELF related work. Only valid if efile.elf is not NULL */ |
521 | struct elf_state efile; | |
1a5e3fb1 | 522 | /* |
29a30ff5 | 523 | * All loaded bpf_object are linked in a list, which is |
9a208eff WN |
524 | * hidden to caller. bpf_objects__<func> handlers deal with |
525 | * all objects. | |
526 | */ | |
527 | struct list_head list; | |
10931d24 | 528 | |
8a138aed | 529 | struct btf *btf; |
0f7515ca AN |
530 | struct btf_ext *btf_ext; |
531 | ||
a6ed02ca KS |
532 | /* Parse and load BTF vmlinux if any of the programs in the object need |
533 | * it at load time. | |
534 | */ | |
535 | struct btf *btf_vmlinux; | |
1373ff59 SC |
536 | /* Path to the custom BTF to be used for BPF CO-RE relocations as an |
537 | * override for vmlinux BTF. | |
538 | */ | |
539 | char *btf_custom_path; | |
0f7515ca AN |
540 | /* vmlinux BTF override for CO-RE relocations */ |
541 | struct btf *btf_vmlinux_override; | |
4f33a53d AN |
542 | /* Lazily initialized kernel module BTFs */ |
543 | struct module_btf *btf_modules; | |
544 | bool btf_modules_loaded; | |
545 | size_t btf_module_cnt; | |
546 | size_t btf_module_cap; | |
8a138aed | 547 | |
10931d24 WN |
548 | void *priv; |
549 | bpf_object_clear_priv_t clear_priv; | |
550 | ||
9dbe6015 KKD |
551 | int *fd_array; |
552 | size_t fd_array_cap; | |
553 | size_t fd_array_cnt; | |
554 | ||
1a5e3fb1 WN |
555 | char path[]; |
556 | }; | |
1a5e3fb1 | 557 | |
88a82120 AN |
558 | static const char *elf_sym_str(const struct bpf_object *obj, size_t off); |
559 | static const char *elf_sec_str(const struct bpf_object *obj, size_t off); | |
560 | static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx); | |
561 | static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name); | |
ad23b723 | 562 | static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn); |
88a82120 AN |
563 | static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn); |
564 | static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn); | |
ad23b723 AN |
565 | static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx); |
566 | static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx); | |
88a82120 | 567 | |
29cd77f4 | 568 | void bpf_program__unload(struct bpf_program *prog) |
55cffde2 | 569 | { |
b580563e WN |
570 | int i; |
571 | ||
55cffde2 WN |
572 | if (!prog) |
573 | return; | |
574 | ||
b580563e WN |
575 | /* |
576 | * If the object is opened but the program was never loaded, | |
577 | * it is possible that prog->instances.nr == -1. | |
578 | */ | |
579 | if (prog->instances.nr > 0) { | |
580 | for (i = 0; i < prog->instances.nr; i++) | |
581 | zclose(prog->instances.fds[i]); | |
582 | } else if (prog->instances.nr != -1) { | |
be18010e KW |
583 | pr_warn("Internal error: instances.nr is %d\n", |
584 | prog->instances.nr); | |
b580563e WN |
585 | } |
586 | ||
587 | prog->instances.nr = -1; | |
588 | zfree(&prog->instances.fds); | |
2993e051 | 589 | |
2993e051 | 590 | zfree(&prog->func_info); |
07a09d1b | 591 | zfree(&prog->line_info); |
55cffde2 WN |
592 | } |
593 | ||
a5b8bd47 WN |
594 | static void bpf_program__exit(struct bpf_program *prog) |
595 | { | |
596 | if (!prog) | |
597 | return; | |
598 | ||
aa9b1ac3 WN |
599 | if (prog->clear_priv) |
600 | prog->clear_priv(prog, prog->priv); | |
601 | ||
602 | prog->priv = NULL; | |
603 | prog->clear_priv = NULL; | |
604 | ||
55cffde2 | 605 | bpf_program__unload(prog); |
88cda1c9 | 606 | zfree(&prog->name); |
52109584 | 607 | zfree(&prog->sec_name); |
33a2c75c | 608 | zfree(&prog->pin_name); |
a5b8bd47 | 609 | zfree(&prog->insns); |
34090915 WN |
610 | zfree(&prog->reloc_desc); |
611 | ||
612 | prog->nr_reloc = 0; | |
a5b8bd47 | 613 | prog->insns_cnt = 0; |
c1122392 | 614 | prog->sec_idx = -1; |
a5b8bd47 WN |
615 | } |
616 | ||
33a2c75c SF |
617 | static char *__bpf_program__pin_name(struct bpf_program *prog) |
618 | { | |
619 | char *name, *p; | |
620 | ||
52109584 | 621 | name = p = strdup(prog->sec_name); |
33a2c75c SF |
622 | while ((p = strchr(p, '/'))) |
623 | *p = '_'; | |
624 | ||
625 | return name; | |
626 | } | |
627 | ||
c3c55696 AN |
628 | static bool insn_is_subprog_call(const struct bpf_insn *insn) |
629 | { | |
630 | return BPF_CLASS(insn->code) == BPF_JMP && | |
631 | BPF_OP(insn->code) == BPF_CALL && | |
632 | BPF_SRC(insn->code) == BPF_K && | |
633 | insn->src_reg == BPF_PSEUDO_CALL && | |
634 | insn->dst_reg == 0 && | |
635 | insn->off == 0; | |
636 | } | |
637 | ||
aa0b8d43 MKL |
638 | static bool is_call_insn(const struct bpf_insn *insn) |
639 | { | |
640 | return insn->code == (BPF_JMP | BPF_CALL); | |
641 | } | |
642 | ||
53eddb5e YS |
643 | static bool insn_is_pseudo_func(struct bpf_insn *insn) |
644 | { | |
aa0b8d43 | 645 | return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC; |
53eddb5e YS |
646 | } |
647 | ||
a5b8bd47 | 648 | static int |
c3c55696 AN |
649 | bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog, |
650 | const char *name, size_t sec_idx, const char *sec_name, | |
651 | size_t sec_off, void *insn_data, size_t insn_data_sz) | |
a5b8bd47 | 652 | { |
c1122392 AN |
653 | if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) { |
654 | pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n", | |
655 | sec_name, name, sec_off, insn_data_sz); | |
a5b8bd47 WN |
656 | return -EINVAL; |
657 | } | |
658 | ||
1ad9cbb8 | 659 | memset(prog, 0, sizeof(*prog)); |
c3c55696 AN |
660 | prog->obj = obj; |
661 | ||
c1122392 AN |
662 | prog->sec_idx = sec_idx; |
663 | prog->sec_insn_off = sec_off / BPF_INSN_SZ; | |
664 | prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ; | |
665 | /* insns_cnt can later be increased by appending used subprograms */ | |
666 | prog->insns_cnt = prog->sec_insn_cnt; | |
a5b8bd47 | 667 | |
c1122392 AN |
668 | prog->type = BPF_PROG_TYPE_UNSPEC; |
669 | prog->load = true; | |
a5b8bd47 | 670 | |
c1122392 AN |
671 | prog->instances.fds = NULL; |
672 | prog->instances.nr = -1; | |
673 | ||
52109584 AN |
674 | prog->sec_name = strdup(sec_name); |
675 | if (!prog->sec_name) | |
c1122392 AN |
676 | goto errout; |
677 | ||
678 | prog->name = strdup(name); | |
679 | if (!prog->name) | |
a5b8bd47 | 680 | goto errout; |
a5b8bd47 | 681 | |
33a2c75c | 682 | prog->pin_name = __bpf_program__pin_name(prog); |
c1122392 | 683 | if (!prog->pin_name) |
33a2c75c | 684 | goto errout; |
33a2c75c | 685 | |
c1122392 AN |
686 | prog->insns = malloc(insn_data_sz); |
687 | if (!prog->insns) | |
a5b8bd47 | 688 | goto errout; |
c1122392 | 689 | memcpy(prog->insns, insn_data, insn_data_sz); |
a5b8bd47 WN |
690 | |
691 | return 0; | |
692 | errout: | |
c1122392 | 693 | pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name); |
a5b8bd47 WN |
694 | bpf_program__exit(prog); |
695 | return -ENOMEM; | |
696 | } | |
697 | ||
698 | static int | |
c1122392 AN |
699 | bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data, |
700 | const char *sec_name, int sec_idx) | |
a5b8bd47 | 701 | { |
6245947c | 702 | Elf_Data *symbols = obj->efile.symbols; |
c1122392 AN |
703 | struct bpf_program *prog, *progs; |
704 | void *data = sec_data->d_buf; | |
6245947c AN |
705 | size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms; |
706 | int nr_progs, err, i; | |
c1122392 | 707 | const char *name; |
ad23b723 | 708 | Elf64_Sym *sym; |
a5b8bd47 WN |
709 | |
710 | progs = obj->programs; | |
711 | nr_progs = obj->nr_programs; | |
ad23b723 | 712 | nr_syms = symbols->d_size / sizeof(Elf64_Sym); |
c1122392 | 713 | sec_off = 0; |
a5b8bd47 | 714 | |
6245947c | 715 | for (i = 0; i < nr_syms; i++) { |
ad23b723 AN |
716 | sym = elf_sym_by_idx(obj, i); |
717 | ||
718 | if (sym->st_shndx != sec_idx) | |
6245947c | 719 | continue; |
ad23b723 | 720 | if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC) |
6245947c | 721 | continue; |
88cda1c9 | 722 | |
ad23b723 AN |
723 | prog_sz = sym->st_size; |
724 | sec_off = sym->st_value; | |
88cda1c9 | 725 | |
ad23b723 | 726 | name = elf_sym_str(obj, sym->st_name); |
c1122392 AN |
727 | if (!name) { |
728 | pr_warn("sec '%s': failed to get symbol name for offset %zu\n", | |
729 | sec_name, sec_off); | |
730 | return -LIBBPF_ERRNO__FORMAT; | |
731 | } | |
88cda1c9 | 732 | |
c1122392 AN |
733 | if (sec_off + prog_sz > sec_sz) { |
734 | pr_warn("sec '%s': program at offset %zu crosses section boundary\n", | |
735 | sec_name, sec_off); | |
736 | return -LIBBPF_ERRNO__FORMAT; | |
737 | } | |
88cda1c9 | 738 | |
ad23b723 | 739 | if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { |
513f485c AN |
740 | pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name); |
741 | return -ENOTSUP; | |
742 | } | |
743 | ||
c3c55696 AN |
744 | pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n", |
745 | sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz); | |
88cda1c9 | 746 | |
c3c55696 | 747 | progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs)); |
c1122392 AN |
748 | if (!progs) { |
749 | /* | |
750 | * In this case the original obj->programs | |
751 | * is still valid, so don't need special treat for | |
752 | * bpf_close_object(). | |
753 | */ | |
754 | pr_warn("sec '%s': failed to alloc memory for new program '%s'\n", | |
755 | sec_name, name); | |
756 | return -ENOMEM; | |
88cda1c9 | 757 | } |
c1122392 | 758 | obj->programs = progs; |
88cda1c9 | 759 | |
c1122392 | 760 | prog = &progs[nr_progs]; |
9a94f277 | 761 | |
c3c55696 AN |
762 | err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name, |
763 | sec_off, data + sec_off, prog_sz); | |
c1122392 AN |
764 | if (err) |
765 | return err; | |
9a94f277 | 766 | |
e5670fa0 AN |
767 | /* if function is a global/weak symbol, but has restricted |
768 | * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC | |
769 | * as static to enable more permissive BPF verification mode | |
770 | * with more outside context available to BPF verifier | |
aea28a60 | 771 | */ |
ad23b723 AN |
772 | if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL |
773 | && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN | |
774 | || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)) | |
aea28a60 AN |
775 | prog->mark_btf_static = true; |
776 | ||
c1122392 AN |
777 | nr_progs++; |
778 | obj->nr_programs = nr_progs; | |
88cda1c9 MKL |
779 | } |
780 | ||
781 | return 0; | |
782 | } | |
783 | ||
5e61f270 AN |
784 | static __u32 get_kernel_version(void) |
785 | { | |
786 | __u32 major, minor, patch; | |
787 | struct utsname info; | |
788 | ||
789 | uname(&info); | |
790 | if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3) | |
791 | return 0; | |
792 | return KERNEL_VERSION(major, minor, patch); | |
793 | } | |
794 | ||
590a0088 MKL |
795 | static const struct btf_member * |
796 | find_member_by_offset(const struct btf_type *t, __u32 bit_offset) | |
797 | { | |
798 | struct btf_member *m; | |
799 | int i; | |
800 | ||
801 | for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { | |
802 | if (btf_member_bit_offset(t, i) == bit_offset) | |
803 | return m; | |
804 | } | |
805 | ||
806 | return NULL; | |
807 | } | |
808 | ||
809 | static const struct btf_member * | |
810 | find_member_by_name(const struct btf *btf, const struct btf_type *t, | |
811 | const char *name) | |
812 | { | |
813 | struct btf_member *m; | |
814 | int i; | |
815 | ||
816 | for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) { | |
817 | if (!strcmp(btf__name_by_offset(btf, m->name_off), name)) | |
818 | return m; | |
819 | } | |
820 | ||
821 | return NULL; | |
822 | } | |
823 | ||
824 | #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_" | |
a6ed02ca KS |
825 | static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, |
826 | const char *name, __u32 kind); | |
590a0088 MKL |
827 | |
828 | static int | |
829 | find_struct_ops_kern_types(const struct btf *btf, const char *tname, | |
830 | const struct btf_type **type, __u32 *type_id, | |
831 | const struct btf_type **vtype, __u32 *vtype_id, | |
832 | const struct btf_member **data_member) | |
833 | { | |
834 | const struct btf_type *kern_type, *kern_vtype; | |
835 | const struct btf_member *kern_data_member; | |
836 | __s32 kern_vtype_id, kern_type_id; | |
590a0088 MKL |
837 | __u32 i; |
838 | ||
839 | kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT); | |
840 | if (kern_type_id < 0) { | |
841 | pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n", | |
842 | tname); | |
843 | return kern_type_id; | |
844 | } | |
845 | kern_type = btf__type_by_id(btf, kern_type_id); | |
846 | ||
847 | /* Find the corresponding "map_value" type that will be used | |
848 | * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example, | |
849 | * find "struct bpf_struct_ops_tcp_congestion_ops" from the | |
850 | * btf_vmlinux. | |
851 | */ | |
a6ed02ca KS |
852 | kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX, |
853 | tname, BTF_KIND_STRUCT); | |
590a0088 | 854 | if (kern_vtype_id < 0) { |
a6ed02ca KS |
855 | pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n", |
856 | STRUCT_OPS_VALUE_PREFIX, tname); | |
590a0088 MKL |
857 | return kern_vtype_id; |
858 | } | |
859 | kern_vtype = btf__type_by_id(btf, kern_vtype_id); | |
860 | ||
861 | /* Find "struct tcp_congestion_ops" from | |
862 | * struct bpf_struct_ops_tcp_congestion_ops { | |
863 | * [ ... ] | |
864 | * struct tcp_congestion_ops data; | |
865 | * } | |
866 | */ | |
867 | kern_data_member = btf_members(kern_vtype); | |
868 | for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) { | |
869 | if (kern_data_member->type == kern_type_id) | |
870 | break; | |
871 | } | |
872 | if (i == btf_vlen(kern_vtype)) { | |
a6ed02ca KS |
873 | pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n", |
874 | tname, STRUCT_OPS_VALUE_PREFIX, tname); | |
590a0088 MKL |
875 | return -EINVAL; |
876 | } | |
877 | ||
878 | *type = kern_type; | |
879 | *type_id = kern_type_id; | |
880 | *vtype = kern_vtype; | |
881 | *vtype_id = kern_vtype_id; | |
882 | *data_member = kern_data_member; | |
883 | ||
884 | return 0; | |
885 | } | |
886 | ||
887 | static bool bpf_map__is_struct_ops(const struct bpf_map *map) | |
888 | { | |
889 | return map->def.type == BPF_MAP_TYPE_STRUCT_OPS; | |
890 | } | |
891 | ||
892 | /* Init the map's fields that depend on kern_btf */ | |
893 | static int bpf_map__init_kern_struct_ops(struct bpf_map *map, | |
894 | const struct btf *btf, | |
895 | const struct btf *kern_btf) | |
896 | { | |
897 | const struct btf_member *member, *kern_member, *kern_data_member; | |
898 | const struct btf_type *type, *kern_type, *kern_vtype; | |
899 | __u32 i, kern_type_id, kern_vtype_id, kern_data_off; | |
900 | struct bpf_struct_ops *st_ops; | |
901 | void *data, *kern_data; | |
902 | const char *tname; | |
903 | int err; | |
904 | ||
905 | st_ops = map->st_ops; | |
906 | type = st_ops->type; | |
907 | tname = st_ops->tname; | |
908 | err = find_struct_ops_kern_types(kern_btf, tname, | |
909 | &kern_type, &kern_type_id, | |
910 | &kern_vtype, &kern_vtype_id, | |
911 | &kern_data_member); | |
912 | if (err) | |
913 | return err; | |
914 | ||
915 | pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n", | |
916 | map->name, st_ops->type_id, kern_type_id, kern_vtype_id); | |
917 | ||
918 | map->def.value_size = kern_vtype->size; | |
919 | map->btf_vmlinux_value_type_id = kern_vtype_id; | |
920 | ||
921 | st_ops->kern_vdata = calloc(1, kern_vtype->size); | |
922 | if (!st_ops->kern_vdata) | |
923 | return -ENOMEM; | |
924 | ||
925 | data = st_ops->data; | |
926 | kern_data_off = kern_data_member->offset / 8; | |
927 | kern_data = st_ops->kern_vdata + kern_data_off; | |
928 | ||
929 | member = btf_members(type); | |
930 | for (i = 0; i < btf_vlen(type); i++, member++) { | |
931 | const struct btf_type *mtype, *kern_mtype; | |
932 | __u32 mtype_id, kern_mtype_id; | |
933 | void *mdata, *kern_mdata; | |
934 | __s64 msize, kern_msize; | |
935 | __u32 moff, kern_moff; | |
936 | __u32 kern_member_idx; | |
937 | const char *mname; | |
938 | ||
939 | mname = btf__name_by_offset(btf, member->name_off); | |
940 | kern_member = find_member_by_name(kern_btf, kern_type, mname); | |
941 | if (!kern_member) { | |
942 | pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n", | |
943 | map->name, mname); | |
944 | return -ENOTSUP; | |
945 | } | |
946 | ||
947 | kern_member_idx = kern_member - btf_members(kern_type); | |
948 | if (btf_member_bitfield_size(type, i) || | |
949 | btf_member_bitfield_size(kern_type, kern_member_idx)) { | |
950 | pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n", | |
951 | map->name, mname); | |
952 | return -ENOTSUP; | |
953 | } | |
954 | ||
955 | moff = member->offset / 8; | |
956 | kern_moff = kern_member->offset / 8; | |
957 | ||
958 | mdata = data + moff; | |
959 | kern_mdata = kern_data + kern_moff; | |
960 | ||
961 | mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id); | |
962 | kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type, | |
963 | &kern_mtype_id); | |
964 | if (BTF_INFO_KIND(mtype->info) != | |
965 | BTF_INFO_KIND(kern_mtype->info)) { | |
966 | pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n", | |
967 | map->name, mname, BTF_INFO_KIND(mtype->info), | |
968 | BTF_INFO_KIND(kern_mtype->info)); | |
969 | return -ENOTSUP; | |
970 | } | |
971 | ||
972 | if (btf_is_ptr(mtype)) { | |
973 | struct bpf_program *prog; | |
974 | ||
d2836ddd MKL |
975 | prog = st_ops->progs[i]; |
976 | if (!prog) | |
977 | continue; | |
978 | ||
590a0088 MKL |
979 | kern_mtype = skip_mods_and_typedefs(kern_btf, |
980 | kern_mtype->type, | |
981 | &kern_mtype_id); | |
d2836ddd MKL |
982 | |
983 | /* mtype->type must be a func_proto which was | |
984 | * guaranteed in bpf_object__collect_st_ops_relos(), | |
985 | * so only check kern_mtype for func_proto here. | |
986 | */ | |
987 | if (!btf_is_func_proto(kern_mtype)) { | |
988 | pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n", | |
590a0088 MKL |
989 | map->name, mname); |
990 | return -ENOTSUP; | |
991 | } | |
992 | ||
590a0088 MKL |
993 | prog->attach_btf_id = kern_type_id; |
994 | prog->expected_attach_type = kern_member_idx; | |
995 | ||
996 | st_ops->kern_func_off[i] = kern_data_off + kern_moff; | |
997 | ||
998 | pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n", | |
999 | map->name, mname, prog->name, moff, | |
1000 | kern_moff); | |
1001 | ||
1002 | continue; | |
1003 | } | |
1004 | ||
1005 | msize = btf__resolve_size(btf, mtype_id); | |
1006 | kern_msize = btf__resolve_size(kern_btf, kern_mtype_id); | |
1007 | if (msize < 0 || kern_msize < 0 || msize != kern_msize) { | |
1008 | pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n", | |
1009 | map->name, mname, (ssize_t)msize, | |
1010 | (ssize_t)kern_msize); | |
1011 | return -ENOTSUP; | |
1012 | } | |
1013 | ||
1014 | pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n", | |
1015 | map->name, mname, (unsigned int)msize, | |
1016 | moff, kern_moff); | |
1017 | memcpy(kern_mdata, mdata, msize); | |
1018 | } | |
1019 | ||
1020 | return 0; | |
1021 | } | |
1022 | ||
1023 | static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj) | |
1024 | { | |
590a0088 MKL |
1025 | struct bpf_map *map; |
1026 | size_t i; | |
1027 | int err; | |
1028 | ||
1029 | for (i = 0; i < obj->nr_maps; i++) { | |
1030 | map = &obj->maps[i]; | |
1031 | ||
1032 | if (!bpf_map__is_struct_ops(map)) | |
1033 | continue; | |
1034 | ||
a6ed02ca KS |
1035 | err = bpf_map__init_kern_struct_ops(map, obj->btf, |
1036 | obj->btf_vmlinux); | |
1037 | if (err) | |
590a0088 | 1038 | return err; |
590a0088 MKL |
1039 | } |
1040 | ||
590a0088 MKL |
1041 | return 0; |
1042 | } | |
1043 | ||
1044 | static int bpf_object__init_struct_ops_maps(struct bpf_object *obj) | |
1045 | { | |
1046 | const struct btf_type *type, *datasec; | |
1047 | const struct btf_var_secinfo *vsi; | |
1048 | struct bpf_struct_ops *st_ops; | |
1049 | const char *tname, *var_name; | |
1050 | __s32 type_id, datasec_id; | |
1051 | const struct btf *btf; | |
1052 | struct bpf_map *map; | |
1053 | __u32 i; | |
1054 | ||
1055 | if (obj->efile.st_ops_shndx == -1) | |
1056 | return 0; | |
1057 | ||
1058 | btf = obj->btf; | |
1059 | datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC, | |
1060 | BTF_KIND_DATASEC); | |
1061 | if (datasec_id < 0) { | |
1062 | pr_warn("struct_ops init: DATASEC %s not found\n", | |
1063 | STRUCT_OPS_SEC); | |
1064 | return -EINVAL; | |
1065 | } | |
1066 | ||
1067 | datasec = btf__type_by_id(btf, datasec_id); | |
1068 | vsi = btf_var_secinfos(datasec); | |
1069 | for (i = 0; i < btf_vlen(datasec); i++, vsi++) { | |
1070 | type = btf__type_by_id(obj->btf, vsi->type); | |
1071 | var_name = btf__name_by_offset(obj->btf, type->name_off); | |
1072 | ||
1073 | type_id = btf__resolve_type(obj->btf, vsi->type); | |
1074 | if (type_id < 0) { | |
1075 | pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n", | |
1076 | vsi->type, STRUCT_OPS_SEC); | |
1077 | return -EINVAL; | |
1078 | } | |
1079 | ||
1080 | type = btf__type_by_id(obj->btf, type_id); | |
1081 | tname = btf__name_by_offset(obj->btf, type->name_off); | |
1082 | if (!tname[0]) { | |
1083 | pr_warn("struct_ops init: anonymous type is not supported\n"); | |
1084 | return -ENOTSUP; | |
1085 | } | |
1086 | if (!btf_is_struct(type)) { | |
1087 | pr_warn("struct_ops init: %s is not a struct\n", tname); | |
1088 | return -EINVAL; | |
1089 | } | |
1090 | ||
1091 | map = bpf_object__add_map(obj); | |
1092 | if (IS_ERR(map)) | |
1093 | return PTR_ERR(map); | |
1094 | ||
1095 | map->sec_idx = obj->efile.st_ops_shndx; | |
1096 | map->sec_offset = vsi->offset; | |
1097 | map->name = strdup(var_name); | |
1098 | if (!map->name) | |
1099 | return -ENOMEM; | |
1100 | ||
1101 | map->def.type = BPF_MAP_TYPE_STRUCT_OPS; | |
1102 | map->def.key_size = sizeof(int); | |
1103 | map->def.value_size = type->size; | |
1104 | map->def.max_entries = 1; | |
1105 | ||
1106 | map->st_ops = calloc(1, sizeof(*map->st_ops)); | |
1107 | if (!map->st_ops) | |
1108 | return -ENOMEM; | |
1109 | st_ops = map->st_ops; | |
1110 | st_ops->data = malloc(type->size); | |
1111 | st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs)); | |
1112 | st_ops->kern_func_off = malloc(btf_vlen(type) * | |
1113 | sizeof(*st_ops->kern_func_off)); | |
1114 | if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off) | |
1115 | return -ENOMEM; | |
1116 | ||
1117 | if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) { | |
1118 | pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n", | |
1119 | var_name, STRUCT_OPS_SEC); | |
1120 | return -EINVAL; | |
1121 | } | |
1122 | ||
1123 | memcpy(st_ops->data, | |
1124 | obj->efile.st_ops_data->d_buf + vsi->offset, | |
1125 | type->size); | |
1126 | st_ops->tname = tname; | |
1127 | st_ops->type = type; | |
1128 | st_ops->type_id = type_id; | |
1129 | ||
1130 | pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n", | |
1131 | tname, type_id, var_name, vsi->offset); | |
1132 | } | |
1133 | ||
1134 | return 0; | |
1135 | } | |
1136 | ||
6c956392 | 1137 | static struct bpf_object *bpf_object__new(const char *path, |
5e61f270 | 1138 | const void *obj_buf, |
2ce8450e AN |
1139 | size_t obj_buf_sz, |
1140 | const char *obj_name) | |
1a5e3fb1 WN |
1141 | { |
1142 | struct bpf_object *obj; | |
d859900c | 1143 | char *end; |
1a5e3fb1 WN |
1144 | |
1145 | obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1); | |
1146 | if (!obj) { | |
be18010e | 1147 | pr_warn("alloc memory failed for %s\n", path); |
6371ca3b | 1148 | return ERR_PTR(-ENOMEM); |
1a5e3fb1 WN |
1149 | } |
1150 | ||
1151 | strcpy(obj->path, path); | |
2ce8450e AN |
1152 | if (obj_name) { |
1153 | strncpy(obj->name, obj_name, sizeof(obj->name) - 1); | |
1154 | obj->name[sizeof(obj->name) - 1] = 0; | |
1155 | } else { | |
1156 | /* Using basename() GNU version which doesn't modify arg. */ | |
1157 | strncpy(obj->name, basename((void *)path), | |
1158 | sizeof(obj->name) - 1); | |
1159 | end = strchr(obj->name, '.'); | |
1160 | if (end) | |
1161 | *end = 0; | |
1162 | } | |
6c956392 | 1163 | |
d859900c | 1164 | obj->efile.fd = -1; |
6c956392 | 1165 | /* |
76e1022b | 1166 | * Caller of this function should also call |
6c956392 WN |
1167 | * bpf_object__elf_finish() after data collection to return |
1168 | * obj_buf to user. If not, we should duplicate the buffer to | |
1169 | * avoid user freeing them before elf finish. | |
1170 | */ | |
1171 | obj->efile.obj_buf = obj_buf; | |
1172 | obj->efile.obj_buf_sz = obj_buf_sz; | |
666810e8 | 1173 | obj->efile.maps_shndx = -1; |
abd29c93 | 1174 | obj->efile.btf_maps_shndx = -1; |
590a0088 | 1175 | obj->efile.st_ops_shndx = -1; |
81bfdd08 | 1176 | obj->kconfig_map_idx = -1; |
6c956392 | 1177 | |
5e61f270 | 1178 | obj->kern_version = get_kernel_version(); |
52d3352e | 1179 | obj->loaded = false; |
9a208eff WN |
1180 | |
1181 | INIT_LIST_HEAD(&obj->list); | |
1182 | list_add(&obj->list, &bpf_objects_list); | |
1a5e3fb1 WN |
1183 | return obj; |
1184 | } | |
1185 | ||
1186 | static void bpf_object__elf_finish(struct bpf_object *obj) | |
1187 | { | |
29a30ff5 | 1188 | if (!obj->efile.elf) |
1a5e3fb1 WN |
1189 | return; |
1190 | ||
1191 | if (obj->efile.elf) { | |
1192 | elf_end(obj->efile.elf); | |
1193 | obj->efile.elf = NULL; | |
1194 | } | |
bec7d68c | 1195 | obj->efile.symbols = NULL; |
590a0088 | 1196 | obj->efile.st_ops_data = NULL; |
b62f06e8 | 1197 | |
25bbbd7a AN |
1198 | zfree(&obj->efile.secs); |
1199 | obj->efile.sec_cnt = 0; | |
1a5e3fb1 | 1200 | zclose(obj->efile.fd); |
6c956392 WN |
1201 | obj->efile.obj_buf = NULL; |
1202 | obj->efile.obj_buf_sz = 0; | |
1a5e3fb1 WN |
1203 | } |
1204 | ||
1205 | static int bpf_object__elf_init(struct bpf_object *obj) | |
1206 | { | |
ad23b723 | 1207 | Elf64_Ehdr *ehdr; |
1a5e3fb1 | 1208 | int err = 0; |
ad23b723 | 1209 | Elf *elf; |
1a5e3fb1 | 1210 | |
29a30ff5 | 1211 | if (obj->efile.elf) { |
88a82120 | 1212 | pr_warn("elf: init internal error\n"); |
6371ca3b | 1213 | return -LIBBPF_ERRNO__LIBELF; |
1a5e3fb1 WN |
1214 | } |
1215 | ||
6c956392 WN |
1216 | if (obj->efile.obj_buf_sz > 0) { |
1217 | /* | |
1218 | * obj_buf should have been validated by | |
1219 | * bpf_object__open_buffer(). | |
1220 | */ | |
ad23b723 | 1221 | elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz); |
6c956392 WN |
1222 | } else { |
1223 | obj->efile.fd = open(obj->path, O_RDONLY); | |
1224 | if (obj->efile.fd < 0) { | |
be5c5d4e | 1225 | char errmsg[STRERR_BUFSIZE], *cp; |
1ce6a9fc | 1226 | |
be5c5d4e AN |
1227 | err = -errno; |
1228 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); | |
88a82120 | 1229 | pr_warn("elf: failed to open %s: %s\n", obj->path, cp); |
be5c5d4e | 1230 | return err; |
6c956392 WN |
1231 | } |
1232 | ||
ad23b723 | 1233 | elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL); |
1a5e3fb1 WN |
1234 | } |
1235 | ||
ad23b723 | 1236 | if (!elf) { |
88a82120 | 1237 | pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1)); |
6371ca3b | 1238 | err = -LIBBPF_ERRNO__LIBELF; |
1a5e3fb1 WN |
1239 | goto errout; |
1240 | } | |
1241 | ||
ad23b723 AN |
1242 | obj->efile.elf = elf; |
1243 | ||
1244 | if (elf_kind(elf) != ELF_K_ELF) { | |
1245 | err = -LIBBPF_ERRNO__FORMAT; | |
1246 | pr_warn("elf: '%s' is not a proper ELF object\n", obj->path); | |
1247 | goto errout; | |
1248 | } | |
1249 | ||
1250 | if (gelf_getclass(elf) != ELFCLASS64) { | |
1251 | err = -LIBBPF_ERRNO__FORMAT; | |
1252 | pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path); | |
1253 | goto errout; | |
1254 | } | |
1255 | ||
1256 | obj->efile.ehdr = ehdr = elf64_getehdr(elf); | |
1257 | if (!obj->efile.ehdr) { | |
88a82120 | 1258 | pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1)); |
6371ca3b | 1259 | err = -LIBBPF_ERRNO__FORMAT; |
1a5e3fb1 WN |
1260 | goto errout; |
1261 | } | |
1a5e3fb1 | 1262 | |
ad23b723 | 1263 | if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) { |
88a82120 AN |
1264 | pr_warn("elf: failed to get section names section index for %s: %s\n", |
1265 | obj->path, elf_errmsg(-1)); | |
1266 | err = -LIBBPF_ERRNO__FORMAT; | |
1267 | goto errout; | |
1268 | } | |
1269 | ||
1270 | /* Elf is corrupted/truncated, avoid calling elf_strptr. */ | |
ad23b723 | 1271 | if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) { |
88a82120 AN |
1272 | pr_warn("elf: failed to get section names strings from %s: %s\n", |
1273 | obj->path, elf_errmsg(-1)); | |
8f3f5792 NK |
1274 | err = -LIBBPF_ERRNO__FORMAT; |
1275 | goto errout; | |
88a82120 AN |
1276 | } |
1277 | ||
9b16137a | 1278 | /* Old LLVM set e_machine to EM_NONE */ |
ad23b723 | 1279 | if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) { |
88a82120 | 1280 | pr_warn("elf: %s is not a valid eBPF object file\n", obj->path); |
6371ca3b | 1281 | err = -LIBBPF_ERRNO__FORMAT; |
1a5e3fb1 WN |
1282 | goto errout; |
1283 | } | |
1284 | ||
1285 | return 0; | |
1286 | errout: | |
1287 | bpf_object__elf_finish(obj); | |
1288 | return err; | |
1289 | } | |
1290 | ||
12ef5634 | 1291 | static int bpf_object__check_endianness(struct bpf_object *obj) |
cc4228d5 | 1292 | { |
cdb2f920 | 1293 | #if __BYTE_ORDER == __LITTLE_ENDIAN |
ad23b723 | 1294 | if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB) |
12ef5634 | 1295 | return 0; |
cdb2f920 | 1296 | #elif __BYTE_ORDER == __BIG_ENDIAN |
ad23b723 | 1297 | if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB) |
12ef5634 AN |
1298 | return 0; |
1299 | #else | |
1300 | # error "Unrecognized __BYTE_ORDER__" | |
1301 | #endif | |
88a82120 | 1302 | pr_warn("elf: endianness mismatch in %s.\n", obj->path); |
6371ca3b | 1303 | return -LIBBPF_ERRNO__ENDIAN; |
cc4228d5 WN |
1304 | } |
1305 | ||
cb1e5e96 | 1306 | static int |
399dc65e | 1307 | bpf_object__init_license(struct bpf_object *obj, void *data, size_t size) |
cb1e5e96 | 1308 | { |
399dc65e | 1309 | memcpy(obj->license, data, min(size, sizeof(obj->license) - 1)); |
cb1e5e96 WN |
1310 | pr_debug("license of %s is %s\n", obj->path, obj->license); |
1311 | return 0; | |
1312 | } | |
1313 | ||
54b8625c JF |
1314 | static int |
1315 | bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size) | |
1316 | { | |
1317 | __u32 kver; | |
1318 | ||
1319 | if (size != sizeof(kver)) { | |
be18010e | 1320 | pr_warn("invalid kver section in %s\n", obj->path); |
54b8625c JF |
1321 | return -LIBBPF_ERRNO__FORMAT; |
1322 | } | |
1323 | memcpy(&kver, data, sizeof(kver)); | |
1324 | obj->kern_version = kver; | |
1325 | pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version); | |
1326 | return 0; | |
1327 | } | |
1328 | ||
addb9fc9 NS |
1329 | static bool bpf_map_type__is_map_in_map(enum bpf_map_type type) |
1330 | { | |
1331 | if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS || | |
1332 | type == BPF_MAP_TYPE_HASH_OF_MAPS) | |
1333 | return true; | |
1334 | return false; | |
1335 | } | |
1336 | ||
b96c07f3 | 1337 | static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size) |
1713d68b DB |
1338 | { |
1339 | int ret = -ENOENT; | |
25bbbd7a AN |
1340 | Elf_Data *data; |
1341 | Elf_Scn *scn; | |
1713d68b DB |
1342 | |
1343 | *size = 0; | |
25bbbd7a | 1344 | if (!name) |
1713d68b | 1345 | return -EINVAL; |
88a82120 | 1346 | |
25bbbd7a AN |
1347 | scn = elf_sec_by_name(obj, name); |
1348 | data = elf_sec_data(obj, scn); | |
1349 | if (data) { | |
1350 | ret = 0; /* found it */ | |
1351 | *size = data->d_size; | |
1713d68b DB |
1352 | } |
1353 | ||
1354 | return *size ? 0 : ret; | |
1355 | } | |
1356 | ||
b96c07f3 | 1357 | static int find_elf_var_offset(const struct bpf_object *obj, const char *name, __u32 *off) |
1713d68b DB |
1358 | { |
1359 | Elf_Data *symbols = obj->efile.symbols; | |
1360 | const char *sname; | |
1361 | size_t si; | |
1362 | ||
1363 | if (!name || !off) | |
1364 | return -EINVAL; | |
1365 | ||
ad23b723 AN |
1366 | for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) { |
1367 | Elf64_Sym *sym = elf_sym_by_idx(obj, si); | |
1713d68b | 1368 | |
ad23b723 AN |
1369 | if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL || |
1370 | ELF64_ST_TYPE(sym->st_info) != STT_OBJECT) | |
1713d68b DB |
1371 | continue; |
1372 | ||
ad23b723 | 1373 | sname = elf_sym_str(obj, sym->st_name); |
1713d68b | 1374 | if (!sname) { |
ad23b723 | 1375 | pr_warn("failed to get sym name string for var %s\n", name); |
1713d68b DB |
1376 | return -EIO; |
1377 | } | |
1378 | if (strcmp(name, sname) == 0) { | |
ad23b723 | 1379 | *off = sym->st_value; |
1713d68b DB |
1380 | return 0; |
1381 | } | |
1382 | } | |
1383 | ||
1384 | return -ENOENT; | |
1385 | } | |
1386 | ||
bf829271 | 1387 | static struct bpf_map *bpf_object__add_map(struct bpf_object *obj) |
d859900c | 1388 | { |
bf829271 AN |
1389 | struct bpf_map *new_maps; |
1390 | size_t new_cap; | |
1391 | int i; | |
1392 | ||
1393 | if (obj->nr_maps < obj->maps_cap) | |
1394 | return &obj->maps[obj->nr_maps++]; | |
1395 | ||
95064979 | 1396 | new_cap = max((size_t)4, obj->maps_cap * 3 / 2); |
029258d7 | 1397 | new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps)); |
bf829271 | 1398 | if (!new_maps) { |
be18010e | 1399 | pr_warn("alloc maps for object failed\n"); |
bf829271 AN |
1400 | return ERR_PTR(-ENOMEM); |
1401 | } | |
1402 | ||
1403 | obj->maps_cap = new_cap; | |
1404 | obj->maps = new_maps; | |
1405 | ||
1406 | /* zero out new maps */ | |
1407 | memset(obj->maps + obj->nr_maps, 0, | |
1408 | (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps)); | |
1409 | /* | |
1410 | * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin) | |
1411 | * when failure (zclose won't close negative fd)). | |
1412 | */ | |
1413 | for (i = obj->nr_maps; i < obj->maps_cap; i++) { | |
1414 | obj->maps[i].fd = -1; | |
1415 | obj->maps[i].inner_map_fd = -1; | |
1416 | } | |
1417 | ||
1418 | return &obj->maps[obj->nr_maps++]; | |
d859900c DB |
1419 | } |
1420 | ||
eba9c5f4 AN |
1421 | static size_t bpf_map_mmap_sz(const struct bpf_map *map) |
1422 | { | |
1423 | long page_sz = sysconf(_SC_PAGE_SIZE); | |
1424 | size_t map_sz; | |
1425 | ||
c701917e | 1426 | map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries; |
eba9c5f4 AN |
1427 | map_sz = roundup(map_sz, page_sz); |
1428 | return map_sz; | |
1429 | } | |
1430 | ||
aed65917 | 1431 | static char *internal_map_name(struct bpf_object *obj, const char *real_name) |
81bfdd08 | 1432 | { |
113e6b7e | 1433 | char map_name[BPF_OBJ_NAME_LEN], *p; |
aed65917 AN |
1434 | int pfx_len, sfx_len = max((size_t)7, strlen(real_name)); |
1435 | ||
1436 | /* This is one of the more confusing parts of libbpf for various | |
1437 | * reasons, some of which are historical. The original idea for naming | |
1438 | * internal names was to include as much of BPF object name prefix as | |
1439 | * possible, so that it can be distinguished from similar internal | |
1440 | * maps of a different BPF object. | |
1441 | * As an example, let's say we have bpf_object named 'my_object_name' | |
1442 | * and internal map corresponding to '.rodata' ELF section. The final | |
1443 | * map name advertised to user and to the kernel will be | |
1444 | * 'my_objec.rodata', taking first 8 characters of object name and | |
1445 | * entire 7 characters of '.rodata'. | |
1446 | * Somewhat confusingly, if internal map ELF section name is shorter | |
1447 | * than 7 characters, e.g., '.bss', we still reserve 7 characters | |
1448 | * for the suffix, even though we only have 4 actual characters, and | |
1449 | * resulting map will be called 'my_objec.bss', not even using all 15 | |
1450 | * characters allowed by the kernel. Oh well, at least the truncated | |
1451 | * object name is somewhat consistent in this case. But if the map | |
1452 | * name is '.kconfig', we'll still have entirety of '.kconfig' added | |
1453 | * (8 chars) and thus will be left with only first 7 characters of the | |
1454 | * object name ('my_obje'). Happy guessing, user, that the final map | |
1455 | * name will be "my_obje.kconfig". | |
1456 | * Now, with libbpf starting to support arbitrarily named .rodata.* | |
1457 | * and .data.* data sections, it's possible that ELF section name is | |
1458 | * longer than allowed 15 chars, so we now need to be careful to take | |
1459 | * only up to 15 first characters of ELF name, taking no BPF object | |
1460 | * name characters at all. So '.rodata.abracadabra' will result in | |
1461 | * '.rodata.abracad' kernel and user-visible name. | |
1462 | * We need to keep this convoluted logic intact for .data, .bss and | |
1463 | * .rodata maps, but for new custom .data.custom and .rodata.custom | |
1464 | * maps we use their ELF names as is, not prepending bpf_object name | |
1465 | * in front. We still need to truncate them to 15 characters for the | |
1466 | * kernel. Full name can be recovered for such maps by using DATASEC | |
1467 | * BTF type associated with such map's value type, though. | |
1468 | */ | |
1469 | if (sfx_len >= BPF_OBJ_NAME_LEN) | |
1470 | sfx_len = BPF_OBJ_NAME_LEN - 1; | |
1471 | ||
1472 | /* if there are two or more dots in map name, it's a custom dot map */ | |
1473 | if (strchr(real_name + 1, '.') != NULL) | |
1474 | pfx_len = 0; | |
1475 | else | |
1476 | pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name)); | |
81bfdd08 AN |
1477 | |
1478 | snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name, | |
aed65917 | 1479 | sfx_len, real_name); |
81bfdd08 | 1480 | |
113e6b7e THJ |
1481 | /* sanitise map name to characters allowed by kernel */ |
1482 | for (p = map_name; *p && p < map_name + sizeof(map_name); p++) | |
1483 | if (!isalnum(*p) && *p != '_' && *p != '.') | |
1484 | *p = '_'; | |
1485 | ||
81bfdd08 AN |
1486 | return strdup(map_name); |
1487 | } | |
1488 | ||
d859900c | 1489 | static int |
bf829271 | 1490 | bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type, |
aed65917 | 1491 | const char *real_name, int sec_idx, void *data, size_t data_sz) |
d859900c | 1492 | { |
bf829271 AN |
1493 | struct bpf_map_def *def; |
1494 | struct bpf_map *map; | |
eba9c5f4 | 1495 | int err; |
bf829271 AN |
1496 | |
1497 | map = bpf_object__add_map(obj); | |
1498 | if (IS_ERR(map)) | |
1499 | return PTR_ERR(map); | |
d859900c DB |
1500 | |
1501 | map->libbpf_type = type; | |
db48814b AN |
1502 | map->sec_idx = sec_idx; |
1503 | map->sec_offset = 0; | |
aed65917 AN |
1504 | map->real_name = strdup(real_name); |
1505 | map->name = internal_map_name(obj, real_name); | |
1506 | if (!map->real_name || !map->name) { | |
1507 | zfree(&map->real_name); | |
1508 | zfree(&map->name); | |
d859900c DB |
1509 | return -ENOMEM; |
1510 | } | |
1511 | ||
bf829271 | 1512 | def = &map->def; |
d859900c DB |
1513 | def->type = BPF_MAP_TYPE_ARRAY; |
1514 | def->key_size = sizeof(int); | |
eba9c5f4 | 1515 | def->value_size = data_sz; |
d859900c | 1516 | def->max_entries = 1; |
81bfdd08 | 1517 | def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG |
166750bc | 1518 | ? BPF_F_RDONLY_PROG : 0; |
0d13bfce | 1519 | def->map_flags |= BPF_F_MMAPABLE; |
7fe74b43 AN |
1520 | |
1521 | pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n", | |
81bfdd08 | 1522 | map->name, map->sec_idx, map->sec_offset, def->map_flags); |
7fe74b43 | 1523 | |
eba9c5f4 AN |
1524 | map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE, |
1525 | MAP_SHARED | MAP_ANONYMOUS, -1, 0); | |
1526 | if (map->mmaped == MAP_FAILED) { | |
1527 | err = -errno; | |
1528 | map->mmaped = NULL; | |
1529 | pr_warn("failed to alloc map '%s' content buffer: %d\n", | |
1530 | map->name, err); | |
aed65917 | 1531 | zfree(&map->real_name); |
eba9c5f4 AN |
1532 | zfree(&map->name); |
1533 | return err; | |
d859900c DB |
1534 | } |
1535 | ||
166750bc | 1536 | if (data) |
eba9c5f4 AN |
1537 | memcpy(map->mmaped, data, data_sz); |
1538 | ||
e1d1dc46 | 1539 | pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name); |
d859900c DB |
1540 | return 0; |
1541 | } | |
1542 | ||
bf829271 AN |
1543 | static int bpf_object__init_global_data_maps(struct bpf_object *obj) |
1544 | { | |
25bbbd7a | 1545 | struct elf_sec_desc *sec_desc; |
aed65917 | 1546 | const char *sec_name; |
25bbbd7a | 1547 | int err = 0, sec_idx; |
bf829271 | 1548 | |
bf829271 AN |
1549 | /* |
1550 | * Populate obj->maps with libbpf internal maps. | |
1551 | */ | |
25bbbd7a AN |
1552 | for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) { |
1553 | sec_desc = &obj->efile.secs[sec_idx]; | |
1554 | ||
1555 | switch (sec_desc->sec_type) { | |
1556 | case SEC_DATA: | |
aed65917 | 1557 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
25bbbd7a | 1558 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA, |
aed65917 | 1559 | sec_name, sec_idx, |
25bbbd7a AN |
1560 | sec_desc->data->d_buf, |
1561 | sec_desc->data->d_size); | |
1562 | break; | |
1563 | case SEC_RODATA: | |
1564 | obj->has_rodata = true; | |
aed65917 | 1565 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
25bbbd7a | 1566 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA, |
aed65917 | 1567 | sec_name, sec_idx, |
25bbbd7a AN |
1568 | sec_desc->data->d_buf, |
1569 | sec_desc->data->d_size); | |
1570 | break; | |
1571 | case SEC_BSS: | |
aed65917 | 1572 | sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx)); |
25bbbd7a | 1573 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS, |
aed65917 | 1574 | sec_name, sec_idx, |
25bbbd7a AN |
1575 | NULL, |
1576 | sec_desc->data->d_size); | |
1577 | break; | |
1578 | default: | |
1579 | /* skip */ | |
1580 | break; | |
1581 | } | |
bf829271 AN |
1582 | if (err) |
1583 | return err; | |
1584 | } | |
1585 | return 0; | |
1586 | } | |
1587 | ||
166750bc AN |
1588 | |
1589 | static struct extern_desc *find_extern_by_name(const struct bpf_object *obj, | |
1590 | const void *name) | |
1591 | { | |
1592 | int i; | |
1593 | ||
1594 | for (i = 0; i < obj->nr_extern; i++) { | |
1595 | if (strcmp(obj->externs[i].name, name) == 0) | |
1596 | return &obj->externs[i]; | |
1597 | } | |
1598 | return NULL; | |
1599 | } | |
1600 | ||
2e33efe3 AN |
1601 | static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val, |
1602 | char value) | |
166750bc | 1603 | { |
2e33efe3 AN |
1604 | switch (ext->kcfg.type) { |
1605 | case KCFG_BOOL: | |
166750bc | 1606 | if (value == 'm') { |
2e33efe3 | 1607 | pr_warn("extern (kcfg) %s=%c should be tristate or char\n", |
166750bc AN |
1608 | ext->name, value); |
1609 | return -EINVAL; | |
1610 | } | |
1611 | *(bool *)ext_val = value == 'y' ? true : false; | |
1612 | break; | |
2e33efe3 | 1613 | case KCFG_TRISTATE: |
166750bc AN |
1614 | if (value == 'y') |
1615 | *(enum libbpf_tristate *)ext_val = TRI_YES; | |
1616 | else if (value == 'm') | |
1617 | *(enum libbpf_tristate *)ext_val = TRI_MODULE; | |
1618 | else /* value == 'n' */ | |
1619 | *(enum libbpf_tristate *)ext_val = TRI_NO; | |
1620 | break; | |
2e33efe3 | 1621 | case KCFG_CHAR: |
166750bc AN |
1622 | *(char *)ext_val = value; |
1623 | break; | |
2e33efe3 AN |
1624 | case KCFG_UNKNOWN: |
1625 | case KCFG_INT: | |
1626 | case KCFG_CHAR_ARR: | |
166750bc | 1627 | default: |
2e33efe3 | 1628 | pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n", |
166750bc AN |
1629 | ext->name, value); |
1630 | return -EINVAL; | |
1631 | } | |
1632 | ext->is_set = true; | |
1633 | return 0; | |
1634 | } | |
1635 | ||
2e33efe3 AN |
1636 | static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val, |
1637 | const char *value) | |
166750bc AN |
1638 | { |
1639 | size_t len; | |
1640 | ||
2e33efe3 AN |
1641 | if (ext->kcfg.type != KCFG_CHAR_ARR) { |
1642 | pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value); | |
166750bc AN |
1643 | return -EINVAL; |
1644 | } | |
1645 | ||
1646 | len = strlen(value); | |
1647 | if (value[len - 1] != '"') { | |
2e33efe3 | 1648 | pr_warn("extern (kcfg) '%s': invalid string config '%s'\n", |
166750bc AN |
1649 | ext->name, value); |
1650 | return -EINVAL; | |
1651 | } | |
1652 | ||
1653 | /* strip quotes */ | |
1654 | len -= 2; | |
2e33efe3 AN |
1655 | if (len >= ext->kcfg.sz) { |
1656 | pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n", | |
1657 | ext->name, value, len, ext->kcfg.sz - 1); | |
1658 | len = ext->kcfg.sz - 1; | |
166750bc AN |
1659 | } |
1660 | memcpy(ext_val, value + 1, len); | |
1661 | ext_val[len] = '\0'; | |
1662 | ext->is_set = true; | |
1663 | return 0; | |
1664 | } | |
1665 | ||
1666 | static int parse_u64(const char *value, __u64 *res) | |
1667 | { | |
1668 | char *value_end; | |
1669 | int err; | |
1670 | ||
1671 | errno = 0; | |
1672 | *res = strtoull(value, &value_end, 0); | |
1673 | if (errno) { | |
1674 | err = -errno; | |
1675 | pr_warn("failed to parse '%s' as integer: %d\n", value, err); | |
1676 | return err; | |
1677 | } | |
1678 | if (*value_end) { | |
1679 | pr_warn("failed to parse '%s' as integer completely\n", value); | |
1680 | return -EINVAL; | |
1681 | } | |
1682 | return 0; | |
1683 | } | |
1684 | ||
2e33efe3 | 1685 | static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v) |
166750bc | 1686 | { |
2e33efe3 | 1687 | int bit_sz = ext->kcfg.sz * 8; |
166750bc | 1688 | |
2e33efe3 | 1689 | if (ext->kcfg.sz == 8) |
166750bc AN |
1690 | return true; |
1691 | ||
1692 | /* Validate that value stored in u64 fits in integer of `ext->sz` | |
1693 | * bytes size without any loss of information. If the target integer | |
1694 | * is signed, we rely on the following limits of integer type of | |
1695 | * Y bits and subsequent transformation: | |
1696 | * | |
1697 | * -2^(Y-1) <= X <= 2^(Y-1) - 1 | |
1698 | * 0 <= X + 2^(Y-1) <= 2^Y - 1 | |
1699 | * 0 <= X + 2^(Y-1) < 2^Y | |
1700 | * | |
1701 | * For unsigned target integer, check that all the (64 - Y) bits are | |
1702 | * zero. | |
1703 | */ | |
2e33efe3 | 1704 | if (ext->kcfg.is_signed) |
166750bc AN |
1705 | return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz); |
1706 | else | |
1707 | return (v >> bit_sz) == 0; | |
1708 | } | |
1709 | ||
2e33efe3 AN |
1710 | static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val, |
1711 | __u64 value) | |
166750bc | 1712 | { |
2e33efe3 AN |
1713 | if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) { |
1714 | pr_warn("extern (kcfg) %s=%llu should be integer\n", | |
7745ff98 | 1715 | ext->name, (unsigned long long)value); |
166750bc AN |
1716 | return -EINVAL; |
1717 | } | |
2e33efe3 AN |
1718 | if (!is_kcfg_value_in_range(ext, value)) { |
1719 | pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n", | |
1720 | ext->name, (unsigned long long)value, ext->kcfg.sz); | |
166750bc AN |
1721 | return -ERANGE; |
1722 | } | |
2e33efe3 | 1723 | switch (ext->kcfg.sz) { |
166750bc AN |
1724 | case 1: *(__u8 *)ext_val = value; break; |
1725 | case 2: *(__u16 *)ext_val = value; break; | |
1726 | case 4: *(__u32 *)ext_val = value; break; | |
1727 | case 8: *(__u64 *)ext_val = value; break; | |
1728 | default: | |
1729 | return -EINVAL; | |
1730 | } | |
1731 | ext->is_set = true; | |
1732 | return 0; | |
1733 | } | |
1734 | ||
8601fd42 AN |
1735 | static int bpf_object__process_kconfig_line(struct bpf_object *obj, |
1736 | char *buf, void *data) | |
166750bc | 1737 | { |
166750bc | 1738 | struct extern_desc *ext; |
8601fd42 | 1739 | char *sep, *value; |
166750bc AN |
1740 | int len, err = 0; |
1741 | void *ext_val; | |
1742 | __u64 num; | |
166750bc | 1743 | |
13d35a0c | 1744 | if (!str_has_pfx(buf, "CONFIG_")) |
8601fd42 | 1745 | return 0; |
166750bc | 1746 | |
8601fd42 AN |
1747 | sep = strchr(buf, '='); |
1748 | if (!sep) { | |
1749 | pr_warn("failed to parse '%s': no separator\n", buf); | |
1750 | return -EINVAL; | |
1751 | } | |
1752 | ||
1753 | /* Trim ending '\n' */ | |
1754 | len = strlen(buf); | |
1755 | if (buf[len - 1] == '\n') | |
1756 | buf[len - 1] = '\0'; | |
1757 | /* Split on '=' and ensure that a value is present. */ | |
1758 | *sep = '\0'; | |
1759 | if (!sep[1]) { | |
1760 | *sep = '='; | |
1761 | pr_warn("failed to parse '%s': no value\n", buf); | |
1762 | return -EINVAL; | |
1763 | } | |
1764 | ||
1765 | ext = find_extern_by_name(obj, buf); | |
1766 | if (!ext || ext->is_set) | |
1767 | return 0; | |
1768 | ||
2e33efe3 | 1769 | ext_val = data + ext->kcfg.data_off; |
8601fd42 AN |
1770 | value = sep + 1; |
1771 | ||
1772 | switch (*value) { | |
1773 | case 'y': case 'n': case 'm': | |
2e33efe3 | 1774 | err = set_kcfg_value_tri(ext, ext_val, *value); |
8601fd42 AN |
1775 | break; |
1776 | case '"': | |
2e33efe3 | 1777 | err = set_kcfg_value_str(ext, ext_val, value); |
8601fd42 AN |
1778 | break; |
1779 | default: | |
1780 | /* assume integer */ | |
1781 | err = parse_u64(value, &num); | |
1782 | if (err) { | |
2e33efe3 | 1783 | pr_warn("extern (kcfg) %s=%s should be integer\n", |
8601fd42 AN |
1784 | ext->name, value); |
1785 | return err; | |
1786 | } | |
2e33efe3 | 1787 | err = set_kcfg_value_num(ext, ext_val, num); |
8601fd42 | 1788 | break; |
166750bc | 1789 | } |
8601fd42 AN |
1790 | if (err) |
1791 | return err; | |
2e33efe3 | 1792 | pr_debug("extern (kcfg) %s=%s\n", ext->name, value); |
8601fd42 AN |
1793 | return 0; |
1794 | } | |
1795 | ||
1796 | static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data) | |
1797 | { | |
1798 | char buf[PATH_MAX]; | |
1799 | struct utsname uts; | |
1800 | int len, err = 0; | |
1801 | gzFile file; | |
1802 | ||
1803 | uname(&uts); | |
1804 | len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release); | |
1805 | if (len < 0) | |
1806 | return -EINVAL; | |
1807 | else if (len >= PATH_MAX) | |
1808 | return -ENAMETOOLONG; | |
1809 | ||
1810 | /* gzopen also accepts uncompressed files. */ | |
1811 | file = gzopen(buf, "r"); | |
1812 | if (!file) | |
1813 | file = gzopen("/proc/config.gz", "r"); | |
1814 | ||
166750bc | 1815 | if (!file) { |
8601fd42 | 1816 | pr_warn("failed to open system Kconfig\n"); |
166750bc AN |
1817 | return -ENOENT; |
1818 | } | |
1819 | ||
1820 | while (gzgets(file, buf, sizeof(buf))) { | |
8601fd42 AN |
1821 | err = bpf_object__process_kconfig_line(obj, buf, data); |
1822 | if (err) { | |
1823 | pr_warn("error parsing system Kconfig line '%s': %d\n", | |
1824 | buf, err); | |
166750bc AN |
1825 | goto out; |
1826 | } | |
8601fd42 | 1827 | } |
166750bc | 1828 | |
8601fd42 AN |
1829 | out: |
1830 | gzclose(file); | |
1831 | return err; | |
1832 | } | |
166750bc | 1833 | |
8601fd42 AN |
1834 | static int bpf_object__read_kconfig_mem(struct bpf_object *obj, |
1835 | const char *config, void *data) | |
1836 | { | |
1837 | char buf[PATH_MAX]; | |
1838 | int err = 0; | |
1839 | FILE *file; | |
166750bc | 1840 | |
8601fd42 AN |
1841 | file = fmemopen((void *)config, strlen(config), "r"); |
1842 | if (!file) { | |
1843 | err = -errno; | |
1844 | pr_warn("failed to open in-memory Kconfig: %d\n", err); | |
1845 | return err; | |
1846 | } | |
1847 | ||
1848 | while (fgets(buf, sizeof(buf), file)) { | |
1849 | err = bpf_object__process_kconfig_line(obj, buf, data); | |
1850 | if (err) { | |
1851 | pr_warn("error parsing in-memory Kconfig line '%s': %d\n", | |
1852 | buf, err); | |
166750bc AN |
1853 | break; |
1854 | } | |
166750bc AN |
1855 | } |
1856 | ||
8601fd42 | 1857 | fclose(file); |
166750bc AN |
1858 | return err; |
1859 | } | |
1860 | ||
81bfdd08 | 1861 | static int bpf_object__init_kconfig_map(struct bpf_object *obj) |
166750bc | 1862 | { |
2e33efe3 | 1863 | struct extern_desc *last_ext = NULL, *ext; |
166750bc | 1864 | size_t map_sz; |
2e33efe3 | 1865 | int i, err; |
166750bc | 1866 | |
2e33efe3 AN |
1867 | for (i = 0; i < obj->nr_extern; i++) { |
1868 | ext = &obj->externs[i]; | |
1869 | if (ext->type == EXT_KCFG) | |
1870 | last_ext = ext; | |
1871 | } | |
166750bc | 1872 | |
2e33efe3 AN |
1873 | if (!last_ext) |
1874 | return 0; | |
166750bc | 1875 | |
2e33efe3 | 1876 | map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz; |
81bfdd08 | 1877 | err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG, |
aed65917 | 1878 | ".kconfig", obj->efile.symbols_shndx, |
166750bc AN |
1879 | NULL, map_sz); |
1880 | if (err) | |
1881 | return err; | |
1882 | ||
81bfdd08 | 1883 | obj->kconfig_map_idx = obj->nr_maps - 1; |
166750bc AN |
1884 | |
1885 | return 0; | |
1886 | } | |
1887 | ||
bf829271 | 1888 | static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict) |
561bbcca | 1889 | { |
561bbcca | 1890 | Elf_Data *symbols = obj->efile.symbols; |
bf829271 | 1891 | int i, map_def_sz = 0, nr_maps = 0, nr_syms; |
d859900c | 1892 | Elf_Data *data = NULL; |
bf829271 AN |
1893 | Elf_Scn *scn; |
1894 | ||
1895 | if (obj->efile.maps_shndx < 0) | |
1896 | return 0; | |
561bbcca | 1897 | |
4708bbda EL |
1898 | if (!symbols) |
1899 | return -EINVAL; | |
1900 | ||
88a82120 AN |
1901 | scn = elf_sec_by_idx(obj, obj->efile.maps_shndx); |
1902 | data = elf_sec_data(obj, scn); | |
bf829271 | 1903 | if (!scn || !data) { |
88a82120 AN |
1904 | pr_warn("elf: failed to get legacy map definitions for %s\n", |
1905 | obj->path); | |
bf829271 | 1906 | return -EINVAL; |
4708bbda | 1907 | } |
561bbcca | 1908 | |
4708bbda EL |
1909 | /* |
1910 | * Count number of maps. Each map has a name. | |
1911 | * Array of maps is not supported: only the first element is | |
1912 | * considered. | |
1913 | * | |
1914 | * TODO: Detect array of map and report error. | |
1915 | */ | |
ad23b723 | 1916 | nr_syms = symbols->d_size / sizeof(Elf64_Sym); |
bf829271 | 1917 | for (i = 0; i < nr_syms; i++) { |
ad23b723 | 1918 | Elf64_Sym *sym = elf_sym_by_idx(obj, i); |
4708bbda | 1919 | |
ad23b723 | 1920 | if (sym->st_shndx != obj->efile.maps_shndx) |
4708bbda | 1921 | continue; |
ad23b723 | 1922 | if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) |
161ecd53 | 1923 | continue; |
4708bbda EL |
1924 | nr_maps++; |
1925 | } | |
b13c5c14 | 1926 | /* Assume equally sized map definitions */ |
88a82120 AN |
1927 | pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n", |
1928 | nr_maps, data->d_size, obj->path); | |
bf829271 | 1929 | |
98e527af | 1930 | if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) { |
88a82120 AN |
1931 | pr_warn("elf: unable to determine legacy map definition size in %s\n", |
1932 | obj->path); | |
bf829271 | 1933 | return -EINVAL; |
addb9fc9 | 1934 | } |
98e527af | 1935 | map_def_sz = data->d_size / nr_maps; |
4708bbda | 1936 | |
bf829271 AN |
1937 | /* Fill obj->maps using data in "maps" section. */ |
1938 | for (i = 0; i < nr_syms; i++) { | |
ad23b723 | 1939 | Elf64_Sym *sym = elf_sym_by_idx(obj, i); |
561bbcca | 1940 | const char *map_name; |
4708bbda | 1941 | struct bpf_map_def *def; |
bf829271 | 1942 | struct bpf_map *map; |
561bbcca | 1943 | |
ad23b723 | 1944 | if (sym->st_shndx != obj->efile.maps_shndx) |
561bbcca | 1945 | continue; |
ad23b723 | 1946 | if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION) |
c3e8c44a | 1947 | continue; |
561bbcca | 1948 | |
bf829271 AN |
1949 | map = bpf_object__add_map(obj); |
1950 | if (IS_ERR(map)) | |
1951 | return PTR_ERR(map); | |
1952 | ||
ad23b723 | 1953 | map_name = elf_sym_str(obj, sym->st_name); |
c51829bb | 1954 | if (!map_name) { |
be18010e KW |
1955 | pr_warn("failed to get map #%d name sym string for obj %s\n", |
1956 | i, obj->path); | |
c51829bb AN |
1957 | return -LIBBPF_ERRNO__FORMAT; |
1958 | } | |
d859900c | 1959 | |
ad23b723 | 1960 | if (ELF64_ST_BIND(sym->st_info) == STB_LOCAL) { |
c1cccec9 AN |
1961 | pr_warn("map '%s' (legacy): static maps are not supported\n", map_name); |
1962 | return -ENOTSUP; | |
1963 | } | |
1964 | ||
bf829271 | 1965 | map->libbpf_type = LIBBPF_MAP_UNSPEC; |
ad23b723 AN |
1966 | map->sec_idx = sym->st_shndx; |
1967 | map->sec_offset = sym->st_value; | |
db48814b AN |
1968 | pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n", |
1969 | map_name, map->sec_idx, map->sec_offset); | |
ad23b723 | 1970 | if (sym->st_value + map_def_sz > data->d_size) { |
be18010e KW |
1971 | pr_warn("corrupted maps section in %s: last map \"%s\" too small\n", |
1972 | obj->path, map_name); | |
4708bbda | 1973 | return -EINVAL; |
561bbcca | 1974 | } |
4708bbda | 1975 | |
bf829271 AN |
1976 | map->name = strdup(map_name); |
1977 | if (!map->name) { | |
aed65917 | 1978 | pr_warn("map '%s': failed to alloc map name\n", map_name); |
973170e6 WN |
1979 | return -ENOMEM; |
1980 | } | |
bf829271 | 1981 | pr_debug("map %d is \"%s\"\n", i, map->name); |
ad23b723 | 1982 | def = (struct bpf_map_def *)(data->d_buf + sym->st_value); |
b13c5c14 CG |
1983 | /* |
1984 | * If the definition of the map in the object file fits in | |
1985 | * bpf_map_def, copy it. Any extra fields in our version | |
1986 | * of bpf_map_def will default to zero as a result of the | |
1987 | * calloc above. | |
1988 | */ | |
1989 | if (map_def_sz <= sizeof(struct bpf_map_def)) { | |
bf829271 | 1990 | memcpy(&map->def, def, map_def_sz); |
b13c5c14 CG |
1991 | } else { |
1992 | /* | |
1993 | * Here the map structure being read is bigger than what | |
1994 | * we expect, truncate if the excess bits are all zero. | |
1995 | * If they are not zero, reject this map as | |
1996 | * incompatible. | |
1997 | */ | |
1998 | char *b; | |
8983b731 | 1999 | |
b13c5c14 CG |
2000 | for (b = ((char *)def) + sizeof(struct bpf_map_def); |
2001 | b < ((char *)def) + map_def_sz; b++) { | |
2002 | if (*b != 0) { | |
8983b731 | 2003 | pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n", |
be18010e | 2004 | obj->path, map_name); |
c034a177 JF |
2005 | if (strict) |
2006 | return -EINVAL; | |
b13c5c14 CG |
2007 | } |
2008 | } | |
bf829271 | 2009 | memcpy(&map->def, def, sizeof(struct bpf_map_def)); |
b13c5c14 | 2010 | } |
561bbcca | 2011 | } |
bf829271 AN |
2012 | return 0; |
2013 | } | |
4708bbda | 2014 | |
42869d28 | 2015 | const struct btf_type * |
ddc7c304 | 2016 | skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id) |
abd29c93 AN |
2017 | { |
2018 | const struct btf_type *t = btf__type_by_id(btf, id); | |
8837fe5d | 2019 | |
ddc7c304 AN |
2020 | if (res_id) |
2021 | *res_id = id; | |
2022 | ||
2023 | while (btf_is_mod(t) || btf_is_typedef(t)) { | |
2024 | if (res_id) | |
2025 | *res_id = t->type; | |
2026 | t = btf__type_by_id(btf, t->type); | |
abd29c93 | 2027 | } |
ddc7c304 AN |
2028 | |
2029 | return t; | |
abd29c93 AN |
2030 | } |
2031 | ||
590a0088 MKL |
2032 | static const struct btf_type * |
2033 | resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id) | |
2034 | { | |
2035 | const struct btf_type *t; | |
2036 | ||
2037 | t = skip_mods_and_typedefs(btf, id, NULL); | |
2038 | if (!btf_is_ptr(t)) | |
2039 | return NULL; | |
2040 | ||
2041 | t = skip_mods_and_typedefs(btf, t->type, res_id); | |
2042 | ||
2043 | return btf_is_func_proto(t) ? t : NULL; | |
2044 | } | |
2045 | ||
774e132e | 2046 | static const char *__btf_kind_str(__u16 kind) |
81ba0889 | 2047 | { |
774e132e | 2048 | switch (kind) { |
81ba0889 AN |
2049 | case BTF_KIND_UNKN: return "void"; |
2050 | case BTF_KIND_INT: return "int"; | |
2051 | case BTF_KIND_PTR: return "ptr"; | |
2052 | case BTF_KIND_ARRAY: return "array"; | |
2053 | case BTF_KIND_STRUCT: return "struct"; | |
2054 | case BTF_KIND_UNION: return "union"; | |
2055 | case BTF_KIND_ENUM: return "enum"; | |
2056 | case BTF_KIND_FWD: return "fwd"; | |
2057 | case BTF_KIND_TYPEDEF: return "typedef"; | |
2058 | case BTF_KIND_VOLATILE: return "volatile"; | |
2059 | case BTF_KIND_CONST: return "const"; | |
2060 | case BTF_KIND_RESTRICT: return "restrict"; | |
2061 | case BTF_KIND_FUNC: return "func"; | |
2062 | case BTF_KIND_FUNC_PROTO: return "func_proto"; | |
2063 | case BTF_KIND_VAR: return "var"; | |
2064 | case BTF_KIND_DATASEC: return "datasec"; | |
22541a9e | 2065 | case BTF_KIND_FLOAT: return "float"; |
223f903e | 2066 | case BTF_KIND_DECL_TAG: return "decl_tag"; |
81ba0889 AN |
2067 | default: return "unknown"; |
2068 | } | |
2069 | } | |
2070 | ||
42869d28 | 2071 | const char *btf_kind_str(const struct btf_type *t) |
774e132e MKL |
2072 | { |
2073 | return __btf_kind_str(btf_kind(t)); | |
2074 | } | |
2075 | ||
ef99b02b AN |
2076 | /* |
2077 | * Fetch integer attribute of BTF map definition. Such attributes are | |
2078 | * represented using a pointer to an array, in which dimensionality of array | |
2079 | * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY]; | |
2080 | * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF | |
2081 | * type definition, while using only sizeof(void *) space in ELF data section. | |
2082 | */ | |
2083 | static bool get_map_field_int(const char *map_name, const struct btf *btf, | |
8983b731 AN |
2084 | const struct btf_member *m, __u32 *res) |
2085 | { | |
ddc7c304 | 2086 | const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL); |
abd29c93 | 2087 | const char *name = btf__name_by_offset(btf, m->name_off); |
ef99b02b AN |
2088 | const struct btf_array *arr_info; |
2089 | const struct btf_type *arr_t; | |
abd29c93 | 2090 | |
b03bc685 | 2091 | if (!btf_is_ptr(t)) { |
81ba0889 AN |
2092 | pr_warn("map '%s': attr '%s': expected PTR, got %s.\n", |
2093 | map_name, name, btf_kind_str(t)); | |
abd29c93 AN |
2094 | return false; |
2095 | } | |
ef99b02b AN |
2096 | |
2097 | arr_t = btf__type_by_id(btf, t->type); | |
2098 | if (!arr_t) { | |
be18010e KW |
2099 | pr_warn("map '%s': attr '%s': type [%u] not found.\n", |
2100 | map_name, name, t->type); | |
abd29c93 AN |
2101 | return false; |
2102 | } | |
b03bc685 | 2103 | if (!btf_is_array(arr_t)) { |
81ba0889 AN |
2104 | pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n", |
2105 | map_name, name, btf_kind_str(arr_t)); | |
abd29c93 AN |
2106 | return false; |
2107 | } | |
b03bc685 | 2108 | arr_info = btf_array(arr_t); |
ef99b02b | 2109 | *res = arr_info->nelems; |
abd29c93 AN |
2110 | return true; |
2111 | } | |
2112 | ||
57a00f41 THJ |
2113 | static int build_map_pin_path(struct bpf_map *map, const char *path) |
2114 | { | |
2115 | char buf[PATH_MAX]; | |
6e9cab2e | 2116 | int len; |
57a00f41 THJ |
2117 | |
2118 | if (!path) | |
2119 | path = "/sys/fs/bpf"; | |
2120 | ||
2121 | len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map)); | |
2122 | if (len < 0) | |
2123 | return -EINVAL; | |
2124 | else if (len >= PATH_MAX) | |
2125 | return -ENAMETOOLONG; | |
2126 | ||
6e9cab2e | 2127 | return bpf_map__set_pin_path(map, buf); |
57a00f41 THJ |
2128 | } |
2129 | ||
c7ef5ec9 AN |
2130 | int parse_btf_map_def(const char *map_name, struct btf *btf, |
2131 | const struct btf_type *def_t, bool strict, | |
2132 | struct btf_map_def *map_def, struct btf_map_def *inner_def) | |
abd29c93 | 2133 | { |
41017e56 | 2134 | const struct btf_type *t; |
abd29c93 | 2135 | const struct btf_member *m; |
c7ef5ec9 | 2136 | bool is_inner = inner_def == NULL; |
abd29c93 AN |
2137 | int vlen, i; |
2138 | ||
c7ef5ec9 AN |
2139 | vlen = btf_vlen(def_t); |
2140 | m = btf_members(def_t); | |
abd29c93 | 2141 | for (i = 0; i < vlen; i++, m++) { |
c7ef5ec9 | 2142 | const char *name = btf__name_by_offset(btf, m->name_off); |
abd29c93 AN |
2143 | |
2144 | if (!name) { | |
c7ef5ec9 | 2145 | pr_warn("map '%s': invalid field #%d.\n", map_name, i); |
abd29c93 AN |
2146 | return -EINVAL; |
2147 | } | |
2148 | if (strcmp(name, "type") == 0) { | |
c7ef5ec9 | 2149 | if (!get_map_field_int(map_name, btf, m, &map_def->map_type)) |
abd29c93 | 2150 | return -EINVAL; |
c7ef5ec9 | 2151 | map_def->parts |= MAP_DEF_MAP_TYPE; |
abd29c93 | 2152 | } else if (strcmp(name, "max_entries") == 0) { |
c7ef5ec9 | 2153 | if (!get_map_field_int(map_name, btf, m, &map_def->max_entries)) |
abd29c93 | 2154 | return -EINVAL; |
c7ef5ec9 | 2155 | map_def->parts |= MAP_DEF_MAX_ENTRIES; |
abd29c93 | 2156 | } else if (strcmp(name, "map_flags") == 0) { |
c7ef5ec9 | 2157 | if (!get_map_field_int(map_name, btf, m, &map_def->map_flags)) |
abd29c93 | 2158 | return -EINVAL; |
c7ef5ec9 | 2159 | map_def->parts |= MAP_DEF_MAP_FLAGS; |
1bdb6c9a | 2160 | } else if (strcmp(name, "numa_node") == 0) { |
c7ef5ec9 | 2161 | if (!get_map_field_int(map_name, btf, m, &map_def->numa_node)) |
1bdb6c9a | 2162 | return -EINVAL; |
c7ef5ec9 | 2163 | map_def->parts |= MAP_DEF_NUMA_NODE; |
abd29c93 AN |
2164 | } else if (strcmp(name, "key_size") == 0) { |
2165 | __u32 sz; | |
2166 | ||
c7ef5ec9 | 2167 | if (!get_map_field_int(map_name, btf, m, &sz)) |
abd29c93 | 2168 | return -EINVAL; |
c7ef5ec9 | 2169 | if (map_def->key_size && map_def->key_size != sz) { |
be18010e | 2170 | pr_warn("map '%s': conflicting key size %u != %u.\n", |
c7ef5ec9 | 2171 | map_name, map_def->key_size, sz); |
abd29c93 AN |
2172 | return -EINVAL; |
2173 | } | |
c7ef5ec9 AN |
2174 | map_def->key_size = sz; |
2175 | map_def->parts |= MAP_DEF_KEY_SIZE; | |
abd29c93 AN |
2176 | } else if (strcmp(name, "key") == 0) { |
2177 | __s64 sz; | |
2178 | ||
c7ef5ec9 | 2179 | t = btf__type_by_id(btf, m->type); |
abd29c93 | 2180 | if (!t) { |
be18010e | 2181 | pr_warn("map '%s': key type [%d] not found.\n", |
c7ef5ec9 | 2182 | map_name, m->type); |
abd29c93 AN |
2183 | return -EINVAL; |
2184 | } | |
b03bc685 | 2185 | if (!btf_is_ptr(t)) { |
81ba0889 | 2186 | pr_warn("map '%s': key spec is not PTR: %s.\n", |
c7ef5ec9 | 2187 | map_name, btf_kind_str(t)); |
abd29c93 AN |
2188 | return -EINVAL; |
2189 | } | |
c7ef5ec9 | 2190 | sz = btf__resolve_size(btf, t->type); |
abd29c93 | 2191 | if (sz < 0) { |
679152d3 | 2192 | pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n", |
c7ef5ec9 | 2193 | map_name, t->type, (ssize_t)sz); |
abd29c93 AN |
2194 | return sz; |
2195 | } | |
c7ef5ec9 | 2196 | if (map_def->key_size && map_def->key_size != sz) { |
679152d3 | 2197 | pr_warn("map '%s': conflicting key size %u != %zd.\n", |
c7ef5ec9 | 2198 | map_name, map_def->key_size, (ssize_t)sz); |
abd29c93 AN |
2199 | return -EINVAL; |
2200 | } | |
c7ef5ec9 AN |
2201 | map_def->key_size = sz; |
2202 | map_def->key_type_id = t->type; | |
2203 | map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE; | |
abd29c93 AN |
2204 | } else if (strcmp(name, "value_size") == 0) { |
2205 | __u32 sz; | |
2206 | ||
c7ef5ec9 | 2207 | if (!get_map_field_int(map_name, btf, m, &sz)) |
abd29c93 | 2208 | return -EINVAL; |
c7ef5ec9 | 2209 | if (map_def->value_size && map_def->value_size != sz) { |
be18010e | 2210 | pr_warn("map '%s': conflicting value size %u != %u.\n", |
c7ef5ec9 | 2211 | map_name, map_def->value_size, sz); |
abd29c93 AN |
2212 | return -EINVAL; |
2213 | } | |
c7ef5ec9 AN |
2214 | map_def->value_size = sz; |
2215 | map_def->parts |= MAP_DEF_VALUE_SIZE; | |
abd29c93 AN |
2216 | } else if (strcmp(name, "value") == 0) { |
2217 | __s64 sz; | |
2218 | ||
c7ef5ec9 | 2219 | t = btf__type_by_id(btf, m->type); |
abd29c93 | 2220 | if (!t) { |
be18010e | 2221 | pr_warn("map '%s': value type [%d] not found.\n", |
c7ef5ec9 | 2222 | map_name, m->type); |
abd29c93 AN |
2223 | return -EINVAL; |
2224 | } | |
b03bc685 | 2225 | if (!btf_is_ptr(t)) { |
81ba0889 | 2226 | pr_warn("map '%s': value spec is not PTR: %s.\n", |
c7ef5ec9 | 2227 | map_name, btf_kind_str(t)); |
abd29c93 AN |
2228 | return -EINVAL; |
2229 | } | |
c7ef5ec9 | 2230 | sz = btf__resolve_size(btf, t->type); |
abd29c93 | 2231 | if (sz < 0) { |
679152d3 | 2232 | pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n", |
c7ef5ec9 | 2233 | map_name, t->type, (ssize_t)sz); |
abd29c93 AN |
2234 | return sz; |
2235 | } | |
c7ef5ec9 | 2236 | if (map_def->value_size && map_def->value_size != sz) { |
679152d3 | 2237 | pr_warn("map '%s': conflicting value size %u != %zd.\n", |
c7ef5ec9 | 2238 | map_name, map_def->value_size, (ssize_t)sz); |
abd29c93 AN |
2239 | return -EINVAL; |
2240 | } | |
c7ef5ec9 AN |
2241 | map_def->value_size = sz; |
2242 | map_def->value_type_id = t->type; | |
2243 | map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE; | |
646f02ff AN |
2244 | } |
2245 | else if (strcmp(name, "values") == 0) { | |
c7ef5ec9 | 2246 | char inner_map_name[128]; |
646f02ff AN |
2247 | int err; |
2248 | ||
2249 | if (is_inner) { | |
2250 | pr_warn("map '%s': multi-level inner maps not supported.\n", | |
c7ef5ec9 | 2251 | map_name); |
646f02ff AN |
2252 | return -ENOTSUP; |
2253 | } | |
2254 | if (i != vlen - 1) { | |
2255 | pr_warn("map '%s': '%s' member should be last.\n", | |
c7ef5ec9 | 2256 | map_name, name); |
646f02ff AN |
2257 | return -EINVAL; |
2258 | } | |
c7ef5ec9 | 2259 | if (!bpf_map_type__is_map_in_map(map_def->map_type)) { |
646f02ff | 2260 | pr_warn("map '%s': should be map-in-map.\n", |
c7ef5ec9 | 2261 | map_name); |
646f02ff AN |
2262 | return -ENOTSUP; |
2263 | } | |
c7ef5ec9 | 2264 | if (map_def->value_size && map_def->value_size != 4) { |
646f02ff | 2265 | pr_warn("map '%s': conflicting value size %u != 4.\n", |
c7ef5ec9 | 2266 | map_name, map_def->value_size); |
646f02ff AN |
2267 | return -EINVAL; |
2268 | } | |
c7ef5ec9 AN |
2269 | map_def->value_size = 4; |
2270 | t = btf__type_by_id(btf, m->type); | |
646f02ff AN |
2271 | if (!t) { |
2272 | pr_warn("map '%s': map-in-map inner type [%d] not found.\n", | |
c7ef5ec9 | 2273 | map_name, m->type); |
646f02ff AN |
2274 | return -EINVAL; |
2275 | } | |
2276 | if (!btf_is_array(t) || btf_array(t)->nelems) { | |
2277 | pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n", | |
c7ef5ec9 | 2278 | map_name); |
646f02ff AN |
2279 | return -EINVAL; |
2280 | } | |
c7ef5ec9 | 2281 | t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL); |
646f02ff | 2282 | if (!btf_is_ptr(t)) { |
81ba0889 | 2283 | pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", |
c7ef5ec9 | 2284 | map_name, btf_kind_str(t)); |
646f02ff AN |
2285 | return -EINVAL; |
2286 | } | |
c7ef5ec9 | 2287 | t = skip_mods_and_typedefs(btf, t->type, NULL); |
646f02ff | 2288 | if (!btf_is_struct(t)) { |
81ba0889 | 2289 | pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n", |
c7ef5ec9 | 2290 | map_name, btf_kind_str(t)); |
646f02ff AN |
2291 | return -EINVAL; |
2292 | } | |
2293 | ||
c7ef5ec9 AN |
2294 | snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name); |
2295 | err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL); | |
646f02ff AN |
2296 | if (err) |
2297 | return err; | |
c7ef5ec9 AN |
2298 | |
2299 | map_def->parts |= MAP_DEF_INNER_MAP; | |
57a00f41 THJ |
2300 | } else if (strcmp(name, "pinning") == 0) { |
2301 | __u32 val; | |
57a00f41 | 2302 | |
646f02ff | 2303 | if (is_inner) { |
c7ef5ec9 | 2304 | pr_warn("map '%s': inner def can't be pinned.\n", map_name); |
646f02ff AN |
2305 | return -EINVAL; |
2306 | } | |
c7ef5ec9 | 2307 | if (!get_map_field_int(map_name, btf, m, &val)) |
57a00f41 | 2308 | return -EINVAL; |
c7ef5ec9 | 2309 | if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) { |
57a00f41 | 2310 | pr_warn("map '%s': invalid pinning value %u.\n", |
c7ef5ec9 | 2311 | map_name, val); |
57a00f41 THJ |
2312 | return -EINVAL; |
2313 | } | |
c7ef5ec9 AN |
2314 | map_def->pinning = val; |
2315 | map_def->parts |= MAP_DEF_PINNING; | |
abd29c93 AN |
2316 | } else { |
2317 | if (strict) { | |
c7ef5ec9 | 2318 | pr_warn("map '%s': unknown field '%s'.\n", map_name, name); |
abd29c93 AN |
2319 | return -ENOTSUP; |
2320 | } | |
c7ef5ec9 | 2321 | pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name); |
abd29c93 AN |
2322 | } |
2323 | } | |
2324 | ||
c7ef5ec9 AN |
2325 | if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) { |
2326 | pr_warn("map '%s': map type isn't specified.\n", map_name); | |
abd29c93 AN |
2327 | return -EINVAL; |
2328 | } | |
2329 | ||
2330 | return 0; | |
2331 | } | |
2332 | ||
c7ef5ec9 AN |
2333 | static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def) |
2334 | { | |
2335 | map->def.type = def->map_type; | |
2336 | map->def.key_size = def->key_size; | |
2337 | map->def.value_size = def->value_size; | |
2338 | map->def.max_entries = def->max_entries; | |
2339 | map->def.map_flags = def->map_flags; | |
2340 | ||
2341 | map->numa_node = def->numa_node; | |
2342 | map->btf_key_type_id = def->key_type_id; | |
2343 | map->btf_value_type_id = def->value_type_id; | |
2344 | ||
2345 | if (def->parts & MAP_DEF_MAP_TYPE) | |
2346 | pr_debug("map '%s': found type = %u.\n", map->name, def->map_type); | |
2347 | ||
2348 | if (def->parts & MAP_DEF_KEY_TYPE) | |
2349 | pr_debug("map '%s': found key [%u], sz = %u.\n", | |
2350 | map->name, def->key_type_id, def->key_size); | |
2351 | else if (def->parts & MAP_DEF_KEY_SIZE) | |
2352 | pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size); | |
2353 | ||
2354 | if (def->parts & MAP_DEF_VALUE_TYPE) | |
2355 | pr_debug("map '%s': found value [%u], sz = %u.\n", | |
2356 | map->name, def->value_type_id, def->value_size); | |
2357 | else if (def->parts & MAP_DEF_VALUE_SIZE) | |
2358 | pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size); | |
2359 | ||
2360 | if (def->parts & MAP_DEF_MAX_ENTRIES) | |
2361 | pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries); | |
2362 | if (def->parts & MAP_DEF_MAP_FLAGS) | |
2363 | pr_debug("map '%s': found map_flags = %u.\n", map->name, def->map_flags); | |
2364 | if (def->parts & MAP_DEF_PINNING) | |
2365 | pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning); | |
2366 | if (def->parts & MAP_DEF_NUMA_NODE) | |
2367 | pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node); | |
2368 | ||
2369 | if (def->parts & MAP_DEF_INNER_MAP) | |
2370 | pr_debug("map '%s': found inner map definition.\n", map->name); | |
2371 | } | |
2372 | ||
c1cccec9 AN |
2373 | static const char *btf_var_linkage_str(__u32 linkage) |
2374 | { | |
2375 | switch (linkage) { | |
2376 | case BTF_VAR_STATIC: return "static"; | |
2377 | case BTF_VAR_GLOBAL_ALLOCATED: return "global"; | |
2378 | case BTF_VAR_GLOBAL_EXTERN: return "extern"; | |
2379 | default: return "unknown"; | |
2380 | } | |
2381 | } | |
2382 | ||
41017e56 AN |
2383 | static int bpf_object__init_user_btf_map(struct bpf_object *obj, |
2384 | const struct btf_type *sec, | |
2385 | int var_idx, int sec_idx, | |
2386 | const Elf_Data *data, bool strict, | |
2387 | const char *pin_root_path) | |
2388 | { | |
c7ef5ec9 | 2389 | struct btf_map_def map_def = {}, inner_def = {}; |
41017e56 AN |
2390 | const struct btf_type *var, *def; |
2391 | const struct btf_var_secinfo *vi; | |
2392 | const struct btf_var *var_extra; | |
2393 | const char *map_name; | |
2394 | struct bpf_map *map; | |
c7ef5ec9 | 2395 | int err; |
41017e56 AN |
2396 | |
2397 | vi = btf_var_secinfos(sec) + var_idx; | |
2398 | var = btf__type_by_id(obj->btf, vi->type); | |
2399 | var_extra = btf_var(var); | |
2400 | map_name = btf__name_by_offset(obj->btf, var->name_off); | |
2401 | ||
2402 | if (map_name == NULL || map_name[0] == '\0') { | |
2403 | pr_warn("map #%d: empty name.\n", var_idx); | |
2404 | return -EINVAL; | |
2405 | } | |
2406 | if ((__u64)vi->offset + vi->size > data->d_size) { | |
2407 | pr_warn("map '%s' BTF data is corrupted.\n", map_name); | |
2408 | return -EINVAL; | |
2409 | } | |
2410 | if (!btf_is_var(var)) { | |
81ba0889 AN |
2411 | pr_warn("map '%s': unexpected var kind %s.\n", |
2412 | map_name, btf_kind_str(var)); | |
41017e56 AN |
2413 | return -EINVAL; |
2414 | } | |
c1cccec9 AN |
2415 | if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) { |
2416 | pr_warn("map '%s': unsupported map linkage %s.\n", | |
2417 | map_name, btf_var_linkage_str(var_extra->linkage)); | |
41017e56 AN |
2418 | return -EOPNOTSUPP; |
2419 | } | |
2420 | ||
2421 | def = skip_mods_and_typedefs(obj->btf, var->type, NULL); | |
2422 | if (!btf_is_struct(def)) { | |
81ba0889 AN |
2423 | pr_warn("map '%s': unexpected def kind %s.\n", |
2424 | map_name, btf_kind_str(var)); | |
41017e56 AN |
2425 | return -EINVAL; |
2426 | } | |
2427 | if (def->size > vi->size) { | |
2428 | pr_warn("map '%s': invalid def size.\n", map_name); | |
2429 | return -EINVAL; | |
2430 | } | |
2431 | ||
2432 | map = bpf_object__add_map(obj); | |
2433 | if (IS_ERR(map)) | |
2434 | return PTR_ERR(map); | |
2435 | map->name = strdup(map_name); | |
2436 | if (!map->name) { | |
2437 | pr_warn("map '%s': failed to alloc map name.\n", map_name); | |
2438 | return -ENOMEM; | |
2439 | } | |
2440 | map->libbpf_type = LIBBPF_MAP_UNSPEC; | |
2441 | map->def.type = BPF_MAP_TYPE_UNSPEC; | |
2442 | map->sec_idx = sec_idx; | |
2443 | map->sec_offset = vi->offset; | |
646f02ff | 2444 | map->btf_var_idx = var_idx; |
41017e56 AN |
2445 | pr_debug("map '%s': at sec_idx %d, offset %zu.\n", |
2446 | map_name, map->sec_idx, map->sec_offset); | |
2447 | ||
c7ef5ec9 AN |
2448 | err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def); |
2449 | if (err) | |
2450 | return err; | |
2451 | ||
2452 | fill_map_from_def(map, &map_def); | |
2453 | ||
2454 | if (map_def.pinning == LIBBPF_PIN_BY_NAME) { | |
2455 | err = build_map_pin_path(map, pin_root_path); | |
2456 | if (err) { | |
2457 | pr_warn("map '%s': couldn't build pin path.\n", map->name); | |
2458 | return err; | |
2459 | } | |
2460 | } | |
2461 | ||
2462 | if (map_def.parts & MAP_DEF_INNER_MAP) { | |
2463 | map->inner_map = calloc(1, sizeof(*map->inner_map)); | |
2464 | if (!map->inner_map) | |
2465 | return -ENOMEM; | |
2466 | map->inner_map->fd = -1; | |
2467 | map->inner_map->sec_idx = sec_idx; | |
2468 | map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1); | |
2469 | if (!map->inner_map->name) | |
2470 | return -ENOMEM; | |
2471 | sprintf(map->inner_map->name, "%s.inner", map_name); | |
2472 | ||
2473 | fill_map_from_def(map->inner_map, &inner_def); | |
2474 | } | |
2475 | ||
2476 | return 0; | |
41017e56 AN |
2477 | } |
2478 | ||
57a00f41 THJ |
2479 | static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict, |
2480 | const char *pin_root_path) | |
abd29c93 AN |
2481 | { |
2482 | const struct btf_type *sec = NULL; | |
2483 | int nr_types, i, vlen, err; | |
2484 | const struct btf_type *t; | |
2485 | const char *name; | |
2486 | Elf_Data *data; | |
2487 | Elf_Scn *scn; | |
2488 | ||
2489 | if (obj->efile.btf_maps_shndx < 0) | |
2490 | return 0; | |
2491 | ||
88a82120 AN |
2492 | scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx); |
2493 | data = elf_sec_data(obj, scn); | |
abd29c93 | 2494 | if (!scn || !data) { |
88a82120 AN |
2495 | pr_warn("elf: failed to get %s map definitions for %s\n", |
2496 | MAPS_ELF_SEC, obj->path); | |
abd29c93 AN |
2497 | return -EINVAL; |
2498 | } | |
2499 | ||
2500 | nr_types = btf__get_nr_types(obj->btf); | |
2501 | for (i = 1; i <= nr_types; i++) { | |
2502 | t = btf__type_by_id(obj->btf, i); | |
b03bc685 | 2503 | if (!btf_is_datasec(t)) |
abd29c93 AN |
2504 | continue; |
2505 | name = btf__name_by_offset(obj->btf, t->name_off); | |
2506 | if (strcmp(name, MAPS_ELF_SEC) == 0) { | |
2507 | sec = t; | |
646f02ff | 2508 | obj->efile.btf_maps_sec_btf_id = i; |
abd29c93 AN |
2509 | break; |
2510 | } | |
2511 | } | |
2512 | ||
2513 | if (!sec) { | |
be18010e | 2514 | pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC); |
abd29c93 AN |
2515 | return -ENOENT; |
2516 | } | |
2517 | ||
b03bc685 | 2518 | vlen = btf_vlen(sec); |
abd29c93 AN |
2519 | for (i = 0; i < vlen; i++) { |
2520 | err = bpf_object__init_user_btf_map(obj, sec, i, | |
2521 | obj->efile.btf_maps_shndx, | |
8983b731 AN |
2522 | data, strict, |
2523 | pin_root_path); | |
abd29c93 AN |
2524 | if (err) |
2525 | return err; | |
2526 | } | |
2527 | ||
2528 | return 0; | |
2529 | } | |
2530 | ||
0d13bfce | 2531 | static int bpf_object__init_maps(struct bpf_object *obj, |
01af3bf0 | 2532 | const struct bpf_object_open_opts *opts) |
bf829271 | 2533 | { |
166750bc AN |
2534 | const char *pin_root_path; |
2535 | bool strict; | |
bf829271 | 2536 | int err; |
8837fe5d | 2537 | |
166750bc AN |
2538 | strict = !OPTS_GET(opts, relaxed_maps, false); |
2539 | pin_root_path = OPTS_GET(opts, pin_root_path, NULL); | |
bf829271 | 2540 | |
166750bc AN |
2541 | err = bpf_object__init_user_maps(obj, strict); |
2542 | err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path); | |
2543 | err = err ?: bpf_object__init_global_data_maps(obj); | |
81bfdd08 | 2544 | err = err ?: bpf_object__init_kconfig_map(obj); |
590a0088 | 2545 | err = err ?: bpf_object__init_struct_ops_maps(obj); |
bf829271 | 2546 | |
3b3af91c | 2547 | return err; |
561bbcca WN |
2548 | } |
2549 | ||
e3d91b0c JDB |
2550 | static bool section_have_execinstr(struct bpf_object *obj, int idx) |
2551 | { | |
ad23b723 | 2552 | Elf64_Shdr *sh; |
e3d91b0c | 2553 | |
ad23b723 AN |
2554 | sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx)); |
2555 | if (!sh) | |
e3d91b0c JDB |
2556 | return false; |
2557 | ||
ad23b723 | 2558 | return sh->sh_flags & SHF_EXECINSTR; |
e3d91b0c JDB |
2559 | } |
2560 | ||
0f0e55d8 AN |
2561 | static bool btf_needs_sanitization(struct bpf_object *obj) |
2562 | { | |
9ca1f56a AS |
2563 | bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); |
2564 | bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); | |
2565 | bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); | |
2566 | bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); | |
223f903e | 2567 | bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); |
0f0e55d8 | 2568 | |
223f903e | 2569 | return !has_func || !has_datasec || !has_func_global || !has_float || !has_decl_tag; |
0f0e55d8 AN |
2570 | } |
2571 | ||
2572 | static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf) | |
d7c4b398 | 2573 | { |
9ca1f56a AS |
2574 | bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC); |
2575 | bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC); | |
2576 | bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT); | |
2577 | bool has_func = kernel_supports(obj, FEAT_BTF_FUNC); | |
223f903e | 2578 | bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG); |
d7c4b398 AN |
2579 | struct btf_type *t; |
2580 | int i, j, vlen; | |
d7c4b398 | 2581 | |
d7c4b398 AN |
2582 | for (i = 1; i <= btf__get_nr_types(btf); i++) { |
2583 | t = (struct btf_type *)btf__type_by_id(btf, i); | |
d7c4b398 | 2584 | |
223f903e YS |
2585 | if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) { |
2586 | /* replace VAR/DECL_TAG with INT */ | |
d7c4b398 | 2587 | t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0); |
1d4126c4 AN |
2588 | /* |
2589 | * using size = 1 is the safest choice, 4 will be too | |
2590 | * big and cause kernel BTF validation failure if | |
2591 | * original variable took less than 4 bytes | |
2592 | */ | |
2593 | t->size = 1; | |
708852dc | 2594 | *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8); |
b03bc685 | 2595 | } else if (!has_datasec && btf_is_datasec(t)) { |
d7c4b398 | 2596 | /* replace DATASEC with STRUCT */ |
b03bc685 AN |
2597 | const struct btf_var_secinfo *v = btf_var_secinfos(t); |
2598 | struct btf_member *m = btf_members(t); | |
d7c4b398 AN |
2599 | struct btf_type *vt; |
2600 | char *name; | |
2601 | ||
2602 | name = (char *)btf__name_by_offset(btf, t->name_off); | |
2603 | while (*name) { | |
2604 | if (*name == '.') | |
2605 | *name = '_'; | |
2606 | name++; | |
2607 | } | |
2608 | ||
b03bc685 | 2609 | vlen = btf_vlen(t); |
d7c4b398 AN |
2610 | t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen); |
2611 | for (j = 0; j < vlen; j++, v++, m++) { | |
2612 | /* order of field assignments is important */ | |
2613 | m->offset = v->offset * 8; | |
2614 | m->type = v->type; | |
2615 | /* preserve variable name as member name */ | |
2616 | vt = (void *)btf__type_by_id(btf, v->type); | |
2617 | m->name_off = vt->name_off; | |
2618 | } | |
b03bc685 | 2619 | } else if (!has_func && btf_is_func_proto(t)) { |
d7c4b398 | 2620 | /* replace FUNC_PROTO with ENUM */ |
b03bc685 | 2621 | vlen = btf_vlen(t); |
d7c4b398 AN |
2622 | t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen); |
2623 | t->size = sizeof(__u32); /* kernel enforced */ | |
b03bc685 | 2624 | } else if (!has_func && btf_is_func(t)) { |
d7c4b398 AN |
2625 | /* replace FUNC with TYPEDEF */ |
2626 | t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0); | |
2d3eb67f AS |
2627 | } else if (!has_func_global && btf_is_func(t)) { |
2628 | /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */ | |
2629 | t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0); | |
22541a9e IL |
2630 | } else if (!has_float && btf_is_float(t)) { |
2631 | /* replace FLOAT with an equally-sized empty STRUCT; | |
2632 | * since C compilers do not accept e.g. "float" as a | |
2633 | * valid struct name, make it anonymous | |
2634 | */ | |
2635 | t->name_off = 0; | |
2636 | t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0); | |
d7c4b398 AN |
2637 | } |
2638 | } | |
2639 | } | |
2640 | ||
b35f14f4 | 2641 | static bool libbpf_needs_btf(const struct bpf_object *obj) |
abd29c93 | 2642 | { |
b35f14f4 AN |
2643 | return obj->efile.btf_maps_shndx >= 0 || |
2644 | obj->efile.st_ops_shndx >= 0 || | |
2645 | obj->nr_extern > 0; | |
2646 | } | |
2647 | ||
2648 | static bool kernel_needs_btf(const struct bpf_object *obj) | |
2649 | { | |
2650 | return obj->efile.st_ops_shndx >= 0; | |
abd29c93 AN |
2651 | } |
2652 | ||
063183bf | 2653 | static int bpf_object__init_btf(struct bpf_object *obj, |
9c6660d0 AN |
2654 | Elf_Data *btf_data, |
2655 | Elf_Data *btf_ext_data) | |
2656 | { | |
b7d7f3e1 | 2657 | int err = -ENOENT; |
9c6660d0 AN |
2658 | |
2659 | if (btf_data) { | |
2660 | obj->btf = btf__new(btf_data->d_buf, btf_data->d_size); | |
e9fc3ce9 AN |
2661 | err = libbpf_get_error(obj->btf); |
2662 | if (err) { | |
b7d7f3e1 | 2663 | obj->btf = NULL; |
e9fc3ce9 | 2664 | pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err); |
9c6660d0 AN |
2665 | goto out; |
2666 | } | |
4c01925f AN |
2667 | /* enforce 8-byte pointers for BPF-targeted BTFs */ |
2668 | btf__set_pointer_size(obj->btf, 8); | |
9c6660d0 AN |
2669 | } |
2670 | if (btf_ext_data) { | |
2671 | if (!obj->btf) { | |
2672 | pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n", | |
2673 | BTF_EXT_ELF_SEC, BTF_ELF_SEC); | |
2674 | goto out; | |
2675 | } | |
e9fc3ce9 AN |
2676 | obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size); |
2677 | err = libbpf_get_error(obj->btf_ext); | |
2678 | if (err) { | |
2679 | pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n", | |
2680 | BTF_EXT_ELF_SEC, err); | |
9c6660d0 AN |
2681 | obj->btf_ext = NULL; |
2682 | goto out; | |
2683 | } | |
9c6660d0 AN |
2684 | } |
2685 | out: | |
b35f14f4 | 2686 | if (err && libbpf_needs_btf(obj)) { |
be18010e | 2687 | pr_warn("BTF is required, but is missing or corrupted.\n"); |
b7d7f3e1 | 2688 | return err; |
abd29c93 | 2689 | } |
9c6660d0 AN |
2690 | return 0; |
2691 | } | |
2692 | ||
b96c07f3 AN |
2693 | static int compare_vsi_off(const void *_a, const void *_b) |
2694 | { | |
2695 | const struct btf_var_secinfo *a = _a; | |
2696 | const struct btf_var_secinfo *b = _b; | |
2697 | ||
2698 | return a->offset - b->offset; | |
2699 | } | |
2700 | ||
2701 | static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf, | |
2702 | struct btf_type *t) | |
2703 | { | |
2704 | __u32 size = 0, off = 0, i, vars = btf_vlen(t); | |
2705 | const char *name = btf__name_by_offset(btf, t->name_off); | |
2706 | const struct btf_type *t_var; | |
2707 | struct btf_var_secinfo *vsi; | |
2708 | const struct btf_var *var; | |
2709 | int ret; | |
2710 | ||
2711 | if (!name) { | |
2712 | pr_debug("No name found in string section for DATASEC kind.\n"); | |
2713 | return -ENOENT; | |
2714 | } | |
2715 | ||
2716 | /* .extern datasec size and var offsets were set correctly during | |
2717 | * extern collection step, so just skip straight to sorting variables | |
2718 | */ | |
2719 | if (t->size) | |
2720 | goto sort_vars; | |
2721 | ||
2722 | ret = find_elf_sec_sz(obj, name, &size); | |
2723 | if (ret || !size || (t->size && t->size != size)) { | |
2724 | pr_debug("Invalid size for section %s: %u bytes\n", name, size); | |
2725 | return -ENOENT; | |
2726 | } | |
2727 | ||
2728 | t->size = size; | |
2729 | ||
2730 | for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) { | |
2731 | t_var = btf__type_by_id(btf, vsi->type); | |
2732 | var = btf_var(t_var); | |
2733 | ||
2734 | if (!btf_is_var(t_var)) { | |
2735 | pr_debug("Non-VAR type seen in section %s\n", name); | |
2736 | return -EINVAL; | |
2737 | } | |
2738 | ||
2739 | if (var->linkage == BTF_VAR_STATIC) | |
2740 | continue; | |
2741 | ||
2742 | name = btf__name_by_offset(btf, t_var->name_off); | |
2743 | if (!name) { | |
2744 | pr_debug("No name found in string section for VAR kind\n"); | |
2745 | return -ENOENT; | |
2746 | } | |
2747 | ||
2748 | ret = find_elf_var_offset(obj, name, &off); | |
2749 | if (ret) { | |
2750 | pr_debug("No offset found in symbol table for VAR %s\n", | |
2751 | name); | |
2752 | return -ENOENT; | |
2753 | } | |
2754 | ||
2755 | vsi->offset = off; | |
2756 | } | |
2757 | ||
2758 | sort_vars: | |
2759 | qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off); | |
2760 | return 0; | |
2761 | } | |
2762 | ||
2763 | static int btf_finalize_data(struct bpf_object *obj, struct btf *btf) | |
2764 | { | |
2765 | int err = 0; | |
2766 | __u32 i, n = btf__get_nr_types(btf); | |
2767 | ||
2768 | for (i = 1; i <= n; i++) { | |
2769 | struct btf_type *t = btf_type_by_id(btf, i); | |
2770 | ||
2771 | /* Loader needs to fix up some of the things compiler | |
2772 | * couldn't get its hands on while emitting BTF. This | |
2773 | * is section size and global variable offset. We use | |
2774 | * the info from the ELF itself for this purpose. | |
2775 | */ | |
2776 | if (btf_is_datasec(t)) { | |
2777 | err = btf_fixup_datasec(obj, btf, t); | |
2778 | if (err) | |
2779 | break; | |
2780 | } | |
2781 | } | |
2782 | ||
2783 | return libbpf_err(err); | |
2784 | } | |
2785 | ||
2786 | int btf__finalize_data(struct bpf_object *obj, struct btf *btf) | |
2787 | { | |
2788 | return btf_finalize_data(obj, btf); | |
2789 | } | |
2790 | ||
166750bc AN |
2791 | static int bpf_object__finalize_btf(struct bpf_object *obj) |
2792 | { | |
2793 | int err; | |
2794 | ||
2795 | if (!obj->btf) | |
2796 | return 0; | |
2797 | ||
b96c07f3 | 2798 | err = btf_finalize_data(obj, obj->btf); |
bfc96656 AN |
2799 | if (err) { |
2800 | pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err); | |
2801 | return err; | |
166750bc | 2802 | } |
bfc96656 | 2803 | |
166750bc AN |
2804 | return 0; |
2805 | } | |
2806 | ||
fe62de31 | 2807 | static bool prog_needs_vmlinux_btf(struct bpf_program *prog) |
a6ed02ca | 2808 | { |
1e092a03 KS |
2809 | if (prog->type == BPF_PROG_TYPE_STRUCT_OPS || |
2810 | prog->type == BPF_PROG_TYPE_LSM) | |
a6ed02ca KS |
2811 | return true; |
2812 | ||
2813 | /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs | |
2814 | * also need vmlinux BTF | |
2815 | */ | |
2816 | if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd) | |
2817 | return true; | |
2818 | ||
2819 | return false; | |
2820 | } | |
2821 | ||
fe62de31 | 2822 | static bool obj_needs_vmlinux_btf(const struct bpf_object *obj) |
a6ed02ca KS |
2823 | { |
2824 | struct bpf_program *prog; | |
fe62de31 | 2825 | int i; |
a6ed02ca | 2826 | |
1373ff59 SC |
2827 | /* CO-RE relocations need kernel BTF, only when btf_custom_path |
2828 | * is not specified | |
2829 | */ | |
2830 | if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path) | |
fe62de31 | 2831 | return true; |
192b6638 | 2832 | |
d370bbe1 HL |
2833 | /* Support for typed ksyms needs kernel BTF */ |
2834 | for (i = 0; i < obj->nr_extern; i++) { | |
2835 | const struct extern_desc *ext; | |
2836 | ||
2837 | ext = &obj->externs[i]; | |
fe62de31 AN |
2838 | if (ext->type == EXT_KSYM && ext->ksym.type_id) |
2839 | return true; | |
d370bbe1 HL |
2840 | } |
2841 | ||
a6ed02ca | 2842 | bpf_object__for_each_program(prog, obj) { |
d9297581 AN |
2843 | if (!prog->load) |
2844 | continue; | |
fe62de31 AN |
2845 | if (prog_needs_vmlinux_btf(prog)) |
2846 | return true; | |
a6ed02ca KS |
2847 | } |
2848 | ||
fe62de31 AN |
2849 | return false; |
2850 | } | |
2851 | ||
2852 | static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force) | |
2853 | { | |
2854 | int err; | |
2855 | ||
2856 | /* btf_vmlinux could be loaded earlier */ | |
67234743 | 2857 | if (obj->btf_vmlinux || obj->gen_loader) |
fe62de31 AN |
2858 | return 0; |
2859 | ||
2860 | if (!force && !obj_needs_vmlinux_btf(obj)) | |
192b6638 AN |
2861 | return 0; |
2862 | ||
a710eed3 | 2863 | obj->btf_vmlinux = btf__load_vmlinux_btf(); |
e9fc3ce9 AN |
2864 | err = libbpf_get_error(obj->btf_vmlinux); |
2865 | if (err) { | |
192b6638 AN |
2866 | pr_warn("Error loading vmlinux BTF: %d\n", err); |
2867 | obj->btf_vmlinux = NULL; | |
2868 | return err; | |
2869 | } | |
a6ed02ca KS |
2870 | return 0; |
2871 | } | |
2872 | ||
063183bf AN |
2873 | static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj) |
2874 | { | |
0f0e55d8 AN |
2875 | struct btf *kern_btf = obj->btf; |
2876 | bool btf_mandatory, sanitize; | |
aea28a60 | 2877 | int i, err = 0; |
063183bf AN |
2878 | |
2879 | if (!obj->btf) | |
2880 | return 0; | |
2881 | ||
9ca1f56a | 2882 | if (!kernel_supports(obj, FEAT_BTF)) { |
68b08647 AN |
2883 | if (kernel_needs_btf(obj)) { |
2884 | err = -EOPNOTSUPP; | |
2885 | goto report; | |
2886 | } | |
2887 | pr_debug("Kernel doesn't support BTF, skipping uploading it.\n"); | |
2888 | return 0; | |
2889 | } | |
2890 | ||
aea28a60 AN |
2891 | /* Even though some subprogs are global/weak, user might prefer more |
2892 | * permissive BPF verification process that BPF verifier performs for | |
2893 | * static functions, taking into account more context from the caller | |
2894 | * functions. In such case, they need to mark such subprogs with | |
2895 | * __attribute__((visibility("hidden"))) and libbpf will adjust | |
2896 | * corresponding FUNC BTF type to be marked as static and trigger more | |
2897 | * involved BPF verification process. | |
2898 | */ | |
2899 | for (i = 0; i < obj->nr_programs; i++) { | |
2900 | struct bpf_program *prog = &obj->programs[i]; | |
2901 | struct btf_type *t; | |
2902 | const char *name; | |
2903 | int j, n; | |
2904 | ||
2905 | if (!prog->mark_btf_static || !prog_is_subprog(obj, prog)) | |
2906 | continue; | |
2907 | ||
2908 | n = btf__get_nr_types(obj->btf); | |
2909 | for (j = 1; j <= n; j++) { | |
2910 | t = btf_type_by_id(obj->btf, j); | |
2911 | if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) | |
2912 | continue; | |
2913 | ||
2914 | name = btf__str_by_offset(obj->btf, t->name_off); | |
2915 | if (strcmp(name, prog->name) != 0) | |
2916 | continue; | |
2917 | ||
2918 | t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0); | |
2919 | break; | |
2920 | } | |
2921 | } | |
2922 | ||
0f0e55d8 AN |
2923 | sanitize = btf_needs_sanitization(obj); |
2924 | if (sanitize) { | |
5c3320d7 | 2925 | const void *raw_data; |
0f0e55d8 | 2926 | __u32 sz; |
063183bf | 2927 | |
0f0e55d8 | 2928 | /* clone BTF to sanitize a copy and leave the original intact */ |
5c3320d7 AN |
2929 | raw_data = btf__get_raw_data(obj->btf, &sz); |
2930 | kern_btf = btf__new(raw_data, sz); | |
e9fc3ce9 AN |
2931 | err = libbpf_get_error(kern_btf); |
2932 | if (err) | |
2933 | return err; | |
04efe591 | 2934 | |
4c01925f AN |
2935 | /* enforce 8-byte pointers for BPF-targeted BTFs */ |
2936 | btf__set_pointer_size(obj->btf, 8); | |
0f0e55d8 | 2937 | bpf_object__sanitize_btf(obj, kern_btf); |
063183bf | 2938 | } |
0f0e55d8 | 2939 | |
67234743 AS |
2940 | if (obj->gen_loader) { |
2941 | __u32 raw_size = 0; | |
2942 | const void *raw_data = btf__get_raw_data(kern_btf, &raw_size); | |
2943 | ||
2944 | if (!raw_data) | |
2945 | return -ENOMEM; | |
2946 | bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size); | |
2947 | /* Pretend to have valid FD to pass various fd >= 0 checks. | |
2948 | * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. | |
2949 | */ | |
2950 | btf__set_fd(kern_btf, 0); | |
2951 | } else { | |
3c7e5859 | 2952 | err = btf__load_into_kernel(kern_btf); |
67234743 | 2953 | } |
0f0e55d8 AN |
2954 | if (sanitize) { |
2955 | if (!err) { | |
2956 | /* move fd to libbpf's BTF */ | |
2957 | btf__set_fd(obj->btf, btf__fd(kern_btf)); | |
2958 | btf__set_fd(kern_btf, -1); | |
2959 | } | |
2960 | btf__free(kern_btf); | |
2961 | } | |
68b08647 | 2962 | report: |
0f0e55d8 AN |
2963 | if (err) { |
2964 | btf_mandatory = kernel_needs_btf(obj); | |
2965 | pr_warn("Error loading .BTF into kernel: %d. %s\n", err, | |
2966 | btf_mandatory ? "BTF is mandatory, can't proceed." | |
2967 | : "BTF is optional, ignoring."); | |
2968 | if (!btf_mandatory) | |
2969 | err = 0; | |
2970 | } | |
2971 | return err; | |
063183bf AN |
2972 | } |
2973 | ||
88a82120 AN |
2974 | static const char *elf_sym_str(const struct bpf_object *obj, size_t off) |
2975 | { | |
2976 | const char *name; | |
2977 | ||
2978 | name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off); | |
2979 | if (!name) { | |
2980 | pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", | |
2981 | off, obj->path, elf_errmsg(-1)); | |
2982 | return NULL; | |
2983 | } | |
2984 | ||
2985 | return name; | |
2986 | } | |
2987 | ||
2988 | static const char *elf_sec_str(const struct bpf_object *obj, size_t off) | |
2989 | { | |
2990 | const char *name; | |
2991 | ||
2992 | name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off); | |
2993 | if (!name) { | |
2994 | pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n", | |
2995 | off, obj->path, elf_errmsg(-1)); | |
2996 | return NULL; | |
2997 | } | |
2998 | ||
2999 | return name; | |
3000 | } | |
3001 | ||
3002 | static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx) | |
3003 | { | |
3004 | Elf_Scn *scn; | |
3005 | ||
3006 | scn = elf_getscn(obj->efile.elf, idx); | |
3007 | if (!scn) { | |
3008 | pr_warn("elf: failed to get section(%zu) from %s: %s\n", | |
3009 | idx, obj->path, elf_errmsg(-1)); | |
3010 | return NULL; | |
3011 | } | |
3012 | return scn; | |
3013 | } | |
3014 | ||
3015 | static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name) | |
3016 | { | |
3017 | Elf_Scn *scn = NULL; | |
3018 | Elf *elf = obj->efile.elf; | |
3019 | const char *sec_name; | |
3020 | ||
3021 | while ((scn = elf_nextscn(elf, scn)) != NULL) { | |
3022 | sec_name = elf_sec_name(obj, scn); | |
3023 | if (!sec_name) | |
3024 | return NULL; | |
3025 | ||
3026 | if (strcmp(sec_name, name) != 0) | |
3027 | continue; | |
3028 | ||
3029 | return scn; | |
3030 | } | |
3031 | return NULL; | |
3032 | } | |
3033 | ||
ad23b723 | 3034 | static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn) |
88a82120 | 3035 | { |
ad23b723 AN |
3036 | Elf64_Shdr *shdr; |
3037 | ||
88a82120 | 3038 | if (!scn) |
ad23b723 | 3039 | return NULL; |
88a82120 | 3040 | |
ad23b723 AN |
3041 | shdr = elf64_getshdr(scn); |
3042 | if (!shdr) { | |
88a82120 AN |
3043 | pr_warn("elf: failed to get section(%zu) header from %s: %s\n", |
3044 | elf_ndxscn(scn), obj->path, elf_errmsg(-1)); | |
ad23b723 | 3045 | return NULL; |
88a82120 AN |
3046 | } |
3047 | ||
ad23b723 | 3048 | return shdr; |
88a82120 AN |
3049 | } |
3050 | ||
3051 | static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn) | |
3052 | { | |
3053 | const char *name; | |
ad23b723 | 3054 | Elf64_Shdr *sh; |
88a82120 AN |
3055 | |
3056 | if (!scn) | |
3057 | return NULL; | |
3058 | ||
ad23b723 AN |
3059 | sh = elf_sec_hdr(obj, scn); |
3060 | if (!sh) | |
88a82120 AN |
3061 | return NULL; |
3062 | ||
ad23b723 | 3063 | name = elf_sec_str(obj, sh->sh_name); |
88a82120 AN |
3064 | if (!name) { |
3065 | pr_warn("elf: failed to get section(%zu) name from %s: %s\n", | |
3066 | elf_ndxscn(scn), obj->path, elf_errmsg(-1)); | |
3067 | return NULL; | |
3068 | } | |
3069 | ||
3070 | return name; | |
3071 | } | |
3072 | ||
3073 | static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn) | |
3074 | { | |
3075 | Elf_Data *data; | |
3076 | ||
3077 | if (!scn) | |
3078 | return NULL; | |
3079 | ||
3080 | data = elf_getdata(scn, 0); | |
3081 | if (!data) { | |
3082 | pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n", | |
3083 | elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>", | |
3084 | obj->path, elf_errmsg(-1)); | |
3085 | return NULL; | |
3086 | } | |
3087 | ||
3088 | return data; | |
3089 | } | |
3090 | ||
ad23b723 AN |
3091 | static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx) |
3092 | { | |
3093 | if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym)) | |
3094 | return NULL; | |
3095 | ||
3096 | return (Elf64_Sym *)obj->efile.symbols->d_buf + idx; | |
3097 | } | |
3098 | ||
3099 | static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx) | |
3100 | { | |
3101 | if (idx >= data->d_size / sizeof(Elf64_Rel)) | |
3102 | return NULL; | |
3103 | ||
3104 | return (Elf64_Rel *)data->d_buf + idx; | |
3105 | } | |
3106 | ||
50e09460 AN |
3107 | static bool is_sec_name_dwarf(const char *name) |
3108 | { | |
3109 | /* approximation, but the actual list is too long */ | |
13d35a0c | 3110 | return str_has_pfx(name, ".debug_"); |
50e09460 AN |
3111 | } |
3112 | ||
ad23b723 | 3113 | static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name) |
50e09460 AN |
3114 | { |
3115 | /* no special handling of .strtab */ | |
3116 | if (hdr->sh_type == SHT_STRTAB) | |
3117 | return true; | |
3118 | ||
3119 | /* ignore .llvm_addrsig section as well */ | |
faf6ed32 | 3120 | if (hdr->sh_type == SHT_LLVM_ADDRSIG) |
50e09460 AN |
3121 | return true; |
3122 | ||
3123 | /* no subprograms will lead to an empty .text section, ignore it */ | |
3124 | if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 && | |
3125 | strcmp(name, ".text") == 0) | |
3126 | return true; | |
3127 | ||
3128 | /* DWARF sections */ | |
3129 | if (is_sec_name_dwarf(name)) | |
3130 | return true; | |
3131 | ||
13d35a0c | 3132 | if (str_has_pfx(name, ".rel")) { |
50e09460 AN |
3133 | name += sizeof(".rel") - 1; |
3134 | /* DWARF section relocations */ | |
3135 | if (is_sec_name_dwarf(name)) | |
3136 | return true; | |
3137 | ||
3138 | /* .BTF and .BTF.ext don't need relocations */ | |
3139 | if (strcmp(name, BTF_ELF_SEC) == 0 || | |
3140 | strcmp(name, BTF_EXT_ELF_SEC) == 0) | |
3141 | return true; | |
3142 | } | |
3143 | ||
3144 | return false; | |
3145 | } | |
3146 | ||
db2b8b06 AN |
3147 | static int cmp_progs(const void *_a, const void *_b) |
3148 | { | |
3149 | const struct bpf_program *a = _a; | |
3150 | const struct bpf_program *b = _b; | |
3151 | ||
3152 | if (a->sec_idx != b->sec_idx) | |
3153 | return a->sec_idx < b->sec_idx ? -1 : 1; | |
3154 | ||
3155 | /* sec_insn_off can't be the same within the section */ | |
3156 | return a->sec_insn_off < b->sec_insn_off ? -1 : 1; | |
3157 | } | |
3158 | ||
0d13bfce | 3159 | static int bpf_object__elf_collect(struct bpf_object *obj) |
29603665 | 3160 | { |
25bbbd7a | 3161 | struct elf_sec_desc *sec_desc; |
29603665 | 3162 | Elf *elf = obj->efile.elf; |
f0187f0b | 3163 | Elf_Data *btf_ext_data = NULL; |
1713d68b | 3164 | Elf_Data *btf_data = NULL; |
666810e8 | 3165 | int idx = 0, err = 0; |
0201c575 AN |
3166 | const char *name; |
3167 | Elf_Data *data; | |
3168 | Elf_Scn *scn; | |
ad23b723 | 3169 | Elf64_Shdr *sh; |
29603665 | 3170 | |
25bbbd7a AN |
3171 | /* ELF section indices are 1-based, so allocate +1 element to keep |
3172 | * indexing simple. Also include 0th invalid section into sec_cnt for | |
3173 | * simpler and more traditional iteration logic. | |
3174 | */ | |
3175 | obj->efile.sec_cnt = 1 + obj->efile.ehdr->e_shnum; | |
3176 | obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs)); | |
3177 | if (!obj->efile.secs) | |
3178 | return -ENOMEM; | |
3179 | ||
0201c575 AN |
3180 | /* a bunch of ELF parsing functionality depends on processing symbols, |
3181 | * so do the first pass and find the symbol table | |
3182 | */ | |
3183 | scn = NULL; | |
29603665 | 3184 | while ((scn = elf_nextscn(elf, scn)) != NULL) { |
ad23b723 AN |
3185 | sh = elf_sec_hdr(obj, scn); |
3186 | if (!sh) | |
0201c575 AN |
3187 | return -LIBBPF_ERRNO__FORMAT; |
3188 | ||
ad23b723 | 3189 | if (sh->sh_type == SHT_SYMTAB) { |
0201c575 AN |
3190 | if (obj->efile.symbols) { |
3191 | pr_warn("elf: multiple symbol tables in %s\n", obj->path); | |
3192 | return -LIBBPF_ERRNO__FORMAT; | |
3193 | } | |
29603665 | 3194 | |
0201c575 AN |
3195 | data = elf_sec_data(obj, scn); |
3196 | if (!data) | |
3197 | return -LIBBPF_ERRNO__FORMAT; | |
3198 | ||
25bbbd7a AN |
3199 | idx = elf_ndxscn(scn); |
3200 | ||
0201c575 | 3201 | obj->efile.symbols = data; |
25bbbd7a | 3202 | obj->efile.symbols_shndx = idx; |
ad23b723 | 3203 | obj->efile.strtabidx = sh->sh_link; |
0201c575 AN |
3204 | } |
3205 | } | |
3206 | ||
03e601f4 THJ |
3207 | if (!obj->efile.symbols) { |
3208 | pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n", | |
3209 | obj->path); | |
3210 | return -ENOENT; | |
3211 | } | |
3212 | ||
0201c575 AN |
3213 | scn = NULL; |
3214 | while ((scn = elf_nextscn(elf, scn)) != NULL) { | |
25bbbd7a AN |
3215 | idx = elf_ndxscn(scn); |
3216 | sec_desc = &obj->efile.secs[idx]; | |
88a82120 | 3217 | |
ad23b723 AN |
3218 | sh = elf_sec_hdr(obj, scn); |
3219 | if (!sh) | |
01b29d1d | 3220 | return -LIBBPF_ERRNO__FORMAT; |
29603665 | 3221 | |
ad23b723 | 3222 | name = elf_sec_str(obj, sh->sh_name); |
88a82120 | 3223 | if (!name) |
01b29d1d | 3224 | return -LIBBPF_ERRNO__FORMAT; |
29603665 | 3225 | |
ad23b723 | 3226 | if (ignore_elf_section(sh, name)) |
50e09460 AN |
3227 | continue; |
3228 | ||
88a82120 AN |
3229 | data = elf_sec_data(obj, scn); |
3230 | if (!data) | |
01b29d1d | 3231 | return -LIBBPF_ERRNO__FORMAT; |
88a82120 AN |
3232 | |
3233 | pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n", | |
077c066a | 3234 | idx, name, (unsigned long)data->d_size, |
ad23b723 AN |
3235 | (int)sh->sh_link, (unsigned long)sh->sh_flags, |
3236 | (int)sh->sh_type); | |
cb1e5e96 | 3237 | |
1713d68b | 3238 | if (strcmp(name, "license") == 0) { |
88a82120 | 3239 | err = bpf_object__init_license(obj, data->d_buf, data->d_size); |
01b29d1d AN |
3240 | if (err) |
3241 | return err; | |
1713d68b | 3242 | } else if (strcmp(name, "version") == 0) { |
88a82120 | 3243 | err = bpf_object__init_kversion(obj, data->d_buf, data->d_size); |
54b8625c JF |
3244 | if (err) |
3245 | return err; | |
1713d68b | 3246 | } else if (strcmp(name, "maps") == 0) { |
666810e8 | 3247 | obj->efile.maps_shndx = idx; |
abd29c93 AN |
3248 | } else if (strcmp(name, MAPS_ELF_SEC) == 0) { |
3249 | obj->efile.btf_maps_shndx = idx; | |
1713d68b DB |
3250 | } else if (strcmp(name, BTF_ELF_SEC) == 0) { |
3251 | btf_data = data; | |
2993e051 | 3252 | } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) { |
f0187f0b | 3253 | btf_ext_data = data; |
ad23b723 | 3254 | } else if (sh->sh_type == SHT_SYMTAB) { |
0201c575 | 3255 | /* already processed during the first pass above */ |
ad23b723 AN |
3256 | } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) { |
3257 | if (sh->sh_flags & SHF_EXECINSTR) { | |
f8c7a4d4 JS |
3258 | if (strcmp(name, ".text") == 0) |
3259 | obj->efile.text_shndx = idx; | |
c1122392 | 3260 | err = bpf_object__add_programs(obj, data, name, idx); |
88a82120 | 3261 | if (err) |
01b29d1d | 3262 | return err; |
aed65917 AN |
3263 | } else if (strcmp(name, DATA_SEC) == 0 || |
3264 | str_has_pfx(name, DATA_SEC ".")) { | |
25bbbd7a AN |
3265 | sec_desc->sec_type = SEC_DATA; |
3266 | sec_desc->shdr = sh; | |
3267 | sec_desc->data = data; | |
aed65917 AN |
3268 | } else if (strcmp(name, RODATA_SEC) == 0 || |
3269 | str_has_pfx(name, RODATA_SEC ".")) { | |
25bbbd7a AN |
3270 | sec_desc->sec_type = SEC_RODATA; |
3271 | sec_desc->shdr = sh; | |
3272 | sec_desc->data = data; | |
590a0088 MKL |
3273 | } else if (strcmp(name, STRUCT_OPS_SEC) == 0) { |
3274 | obj->efile.st_ops_data = data; | |
3275 | obj->efile.st_ops_shndx = idx; | |
d859900c | 3276 | } else { |
50e09460 AN |
3277 | pr_info("elf: skipping unrecognized data section(%d) %s\n", |
3278 | idx, name); | |
a5b8bd47 | 3279 | } |
ad23b723 | 3280 | } else if (sh->sh_type == SHT_REL) { |
25bbbd7a | 3281 | int targ_sec_idx = sh->sh_info; /* points to other section */ |
e3d91b0c JDB |
3282 | |
3283 | /* Only do relo for section with exec instructions */ | |
25bbbd7a | 3284 | if (!section_have_execinstr(obj, targ_sec_idx) && |
646f02ff AN |
3285 | strcmp(name, ".rel" STRUCT_OPS_SEC) && |
3286 | strcmp(name, ".rel" MAPS_ELF_SEC)) { | |
50e09460 | 3287 | pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n", |
25bbbd7a AN |
3288 | idx, name, targ_sec_idx, |
3289 | elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>"); | |
e3d91b0c JDB |
3290 | continue; |
3291 | } | |
b62f06e8 | 3292 | |
25bbbd7a AN |
3293 | sec_desc->sec_type = SEC_RELO; |
3294 | sec_desc->shdr = sh; | |
3295 | sec_desc->data = data; | |
ad23b723 | 3296 | } else if (sh->sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) { |
25bbbd7a AN |
3297 | sec_desc->sec_type = SEC_BSS; |
3298 | sec_desc->shdr = sh; | |
3299 | sec_desc->data = data; | |
077c066a | 3300 | } else { |
2e80be60 | 3301 | pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name, |
ad23b723 | 3302 | (size_t)sh->sh_size); |
bec7d68c | 3303 | } |
29603665 | 3304 | } |
561bbcca | 3305 | |
d3a3aa0c | 3306 | if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) { |
88a82120 | 3307 | pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path); |
f102154d | 3308 | return -LIBBPF_ERRNO__FORMAT; |
77ba9a5b | 3309 | } |
db2b8b06 AN |
3310 | |
3311 | /* sort BPF programs by section name and in-section instruction offset | |
3312 | * for faster search */ | |
3313 | qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs); | |
3314 | ||
0d13bfce | 3315 | return bpf_object__init_btf(obj, btf_data, btf_ext_data); |
29603665 WN |
3316 | } |
3317 | ||
ad23b723 | 3318 | static bool sym_is_extern(const Elf64_Sym *sym) |
166750bc | 3319 | { |
ad23b723 | 3320 | int bind = ELF64_ST_BIND(sym->st_info); |
166750bc AN |
3321 | /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */ |
3322 | return sym->st_shndx == SHN_UNDEF && | |
3323 | (bind == STB_GLOBAL || bind == STB_WEAK) && | |
ad23b723 | 3324 | ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE; |
166750bc AN |
3325 | } |
3326 | ||
ad23b723 | 3327 | static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx) |
53eddb5e | 3328 | { |
ad23b723 AN |
3329 | int bind = ELF64_ST_BIND(sym->st_info); |
3330 | int type = ELF64_ST_TYPE(sym->st_info); | |
53eddb5e YS |
3331 | |
3332 | /* in .text section */ | |
3333 | if (sym->st_shndx != text_shndx) | |
3334 | return false; | |
3335 | ||
3336 | /* local function */ | |
3337 | if (bind == STB_LOCAL && type == STT_SECTION) | |
3338 | return true; | |
3339 | ||
3340 | /* global function */ | |
3341 | return bind == STB_GLOBAL && type == STT_FUNC; | |
3342 | } | |
3343 | ||
166750bc AN |
3344 | static int find_extern_btf_id(const struct btf *btf, const char *ext_name) |
3345 | { | |
3346 | const struct btf_type *t; | |
5bd022ec | 3347 | const char *tname; |
166750bc AN |
3348 | int i, n; |
3349 | ||
3350 | if (!btf) | |
3351 | return -ESRCH; | |
3352 | ||
3353 | n = btf__get_nr_types(btf); | |
3354 | for (i = 1; i <= n; i++) { | |
3355 | t = btf__type_by_id(btf, i); | |
3356 | ||
5bd022ec | 3357 | if (!btf_is_var(t) && !btf_is_func(t)) |
166750bc AN |
3358 | continue; |
3359 | ||
5bd022ec MKL |
3360 | tname = btf__name_by_offset(btf, t->name_off); |
3361 | if (strcmp(tname, ext_name)) | |
166750bc AN |
3362 | continue; |
3363 | ||
5bd022ec MKL |
3364 | if (btf_is_var(t) && |
3365 | btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN) | |
3366 | return -EINVAL; | |
3367 | ||
3368 | if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN) | |
166750bc AN |
3369 | return -EINVAL; |
3370 | ||
3371 | return i; | |
3372 | } | |
3373 | ||
3374 | return -ENOENT; | |
3375 | } | |
3376 | ||
2e33efe3 AN |
3377 | static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) { |
3378 | const struct btf_var_secinfo *vs; | |
3379 | const struct btf_type *t; | |
3380 | int i, j, n; | |
3381 | ||
3382 | if (!btf) | |
3383 | return -ESRCH; | |
3384 | ||
3385 | n = btf__get_nr_types(btf); | |
3386 | for (i = 1; i <= n; i++) { | |
3387 | t = btf__type_by_id(btf, i); | |
3388 | ||
3389 | if (!btf_is_datasec(t)) | |
3390 | continue; | |
3391 | ||
3392 | vs = btf_var_secinfos(t); | |
3393 | for (j = 0; j < btf_vlen(t); j++, vs++) { | |
3394 | if (vs->type == ext_btf_id) | |
3395 | return i; | |
3396 | } | |
3397 | } | |
3398 | ||
3399 | return -ENOENT; | |
3400 | } | |
3401 | ||
3402 | static enum kcfg_type find_kcfg_type(const struct btf *btf, int id, | |
3403 | bool *is_signed) | |
166750bc AN |
3404 | { |
3405 | const struct btf_type *t; | |
3406 | const char *name; | |
3407 | ||
3408 | t = skip_mods_and_typedefs(btf, id, NULL); | |
3409 | name = btf__name_by_offset(btf, t->name_off); | |
3410 | ||
3411 | if (is_signed) | |
3412 | *is_signed = false; | |
3413 | switch (btf_kind(t)) { | |
3414 | case BTF_KIND_INT: { | |
3415 | int enc = btf_int_encoding(t); | |
3416 | ||
3417 | if (enc & BTF_INT_BOOL) | |
2e33efe3 | 3418 | return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN; |
166750bc AN |
3419 | if (is_signed) |
3420 | *is_signed = enc & BTF_INT_SIGNED; | |
3421 | if (t->size == 1) | |
2e33efe3 | 3422 | return KCFG_CHAR; |
166750bc | 3423 | if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1))) |
2e33efe3 AN |
3424 | return KCFG_UNKNOWN; |
3425 | return KCFG_INT; | |
166750bc AN |
3426 | } |
3427 | case BTF_KIND_ENUM: | |
3428 | if (t->size != 4) | |
2e33efe3 | 3429 | return KCFG_UNKNOWN; |
166750bc | 3430 | if (strcmp(name, "libbpf_tristate")) |
2e33efe3 AN |
3431 | return KCFG_UNKNOWN; |
3432 | return KCFG_TRISTATE; | |
166750bc AN |
3433 | case BTF_KIND_ARRAY: |
3434 | if (btf_array(t)->nelems == 0) | |
2e33efe3 AN |
3435 | return KCFG_UNKNOWN; |
3436 | if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR) | |
3437 | return KCFG_UNKNOWN; | |
3438 | return KCFG_CHAR_ARR; | |
166750bc | 3439 | default: |
2e33efe3 | 3440 | return KCFG_UNKNOWN; |
166750bc AN |
3441 | } |
3442 | } | |
3443 | ||
3444 | static int cmp_externs(const void *_a, const void *_b) | |
3445 | { | |
3446 | const struct extern_desc *a = _a; | |
3447 | const struct extern_desc *b = _b; | |
3448 | ||
2e33efe3 AN |
3449 | if (a->type != b->type) |
3450 | return a->type < b->type ? -1 : 1; | |
3451 | ||
3452 | if (a->type == EXT_KCFG) { | |
3453 | /* descending order by alignment requirements */ | |
3454 | if (a->kcfg.align != b->kcfg.align) | |
3455 | return a->kcfg.align > b->kcfg.align ? -1 : 1; | |
3456 | /* ascending order by size, within same alignment class */ | |
3457 | if (a->kcfg.sz != b->kcfg.sz) | |
3458 | return a->kcfg.sz < b->kcfg.sz ? -1 : 1; | |
3459 | } | |
3460 | ||
166750bc AN |
3461 | /* resolve ties by name */ |
3462 | return strcmp(a->name, b->name); | |
3463 | } | |
3464 | ||
1c0c7074 AN |
3465 | static int find_int_btf_id(const struct btf *btf) |
3466 | { | |
3467 | const struct btf_type *t; | |
3468 | int i, n; | |
3469 | ||
3470 | n = btf__get_nr_types(btf); | |
3471 | for (i = 1; i <= n; i++) { | |
3472 | t = btf__type_by_id(btf, i); | |
3473 | ||
3474 | if (btf_is_int(t) && btf_int_bits(t) == 32) | |
3475 | return i; | |
3476 | } | |
3477 | ||
3478 | return 0; | |
3479 | } | |
3480 | ||
5bd022ec MKL |
3481 | static int add_dummy_ksym_var(struct btf *btf) |
3482 | { | |
3483 | int i, int_btf_id, sec_btf_id, dummy_var_btf_id; | |
3484 | const struct btf_var_secinfo *vs; | |
3485 | const struct btf_type *sec; | |
3486 | ||
9683e577 IR |
3487 | if (!btf) |
3488 | return 0; | |
3489 | ||
5bd022ec MKL |
3490 | sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC, |
3491 | BTF_KIND_DATASEC); | |
3492 | if (sec_btf_id < 0) | |
3493 | return 0; | |
3494 | ||
3495 | sec = btf__type_by_id(btf, sec_btf_id); | |
3496 | vs = btf_var_secinfos(sec); | |
3497 | for (i = 0; i < btf_vlen(sec); i++, vs++) { | |
3498 | const struct btf_type *vt; | |
3499 | ||
3500 | vt = btf__type_by_id(btf, vs->type); | |
3501 | if (btf_is_func(vt)) | |
3502 | break; | |
3503 | } | |
3504 | ||
3505 | /* No func in ksyms sec. No need to add dummy var. */ | |
3506 | if (i == btf_vlen(sec)) | |
3507 | return 0; | |
3508 | ||
3509 | int_btf_id = find_int_btf_id(btf); | |
3510 | dummy_var_btf_id = btf__add_var(btf, | |
3511 | "dummy_ksym", | |
3512 | BTF_VAR_GLOBAL_ALLOCATED, | |
3513 | int_btf_id); | |
3514 | if (dummy_var_btf_id < 0) | |
3515 | pr_warn("cannot create a dummy_ksym var\n"); | |
3516 | ||
3517 | return dummy_var_btf_id; | |
3518 | } | |
3519 | ||
166750bc AN |
3520 | static int bpf_object__collect_externs(struct bpf_object *obj) |
3521 | { | |
1c0c7074 | 3522 | struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL; |
166750bc AN |
3523 | const struct btf_type *t; |
3524 | struct extern_desc *ext; | |
5bd022ec | 3525 | int i, n, off, dummy_var_btf_id; |
2e33efe3 | 3526 | const char *ext_name, *sec_name; |
166750bc | 3527 | Elf_Scn *scn; |
ad23b723 | 3528 | Elf64_Shdr *sh; |
166750bc AN |
3529 | |
3530 | if (!obj->efile.symbols) | |
3531 | return 0; | |
3532 | ||
88a82120 | 3533 | scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx); |
ad23b723 AN |
3534 | sh = elf_sec_hdr(obj, scn); |
3535 | if (!sh) | |
166750bc | 3536 | return -LIBBPF_ERRNO__FORMAT; |
166750bc | 3537 | |
5bd022ec MKL |
3538 | dummy_var_btf_id = add_dummy_ksym_var(obj->btf); |
3539 | if (dummy_var_btf_id < 0) | |
3540 | return dummy_var_btf_id; | |
3541 | ||
ad23b723 | 3542 | n = sh->sh_size / sh->sh_entsize; |
166750bc | 3543 | pr_debug("looking for externs among %d symbols...\n", n); |
88a82120 | 3544 | |
166750bc | 3545 | for (i = 0; i < n; i++) { |
ad23b723 | 3546 | Elf64_Sym *sym = elf_sym_by_idx(obj, i); |
166750bc | 3547 | |
ad23b723 | 3548 | if (!sym) |
166750bc | 3549 | return -LIBBPF_ERRNO__FORMAT; |
ad23b723 | 3550 | if (!sym_is_extern(sym)) |
166750bc | 3551 | continue; |
ad23b723 | 3552 | ext_name = elf_sym_str(obj, sym->st_name); |
166750bc AN |
3553 | if (!ext_name || !ext_name[0]) |
3554 | continue; | |
3555 | ||
3556 | ext = obj->externs; | |
029258d7 | 3557 | ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext)); |
166750bc AN |
3558 | if (!ext) |
3559 | return -ENOMEM; | |
3560 | obj->externs = ext; | |
3561 | ext = &ext[obj->nr_extern]; | |
3562 | memset(ext, 0, sizeof(*ext)); | |
3563 | obj->nr_extern++; | |
3564 | ||
3565 | ext->btf_id = find_extern_btf_id(obj->btf, ext_name); | |
3566 | if (ext->btf_id <= 0) { | |
3567 | pr_warn("failed to find BTF for extern '%s': %d\n", | |
3568 | ext_name, ext->btf_id); | |
3569 | return ext->btf_id; | |
3570 | } | |
3571 | t = btf__type_by_id(obj->btf, ext->btf_id); | |
3572 | ext->name = btf__name_by_offset(obj->btf, t->name_off); | |
3573 | ext->sym_idx = i; | |
ad23b723 | 3574 | ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK; |
2e33efe3 AN |
3575 | |
3576 | ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id); | |
3577 | if (ext->sec_btf_id <= 0) { | |
3578 | pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n", | |
3579 | ext_name, ext->btf_id, ext->sec_btf_id); | |
3580 | return ext->sec_btf_id; | |
166750bc | 3581 | } |
2e33efe3 AN |
3582 | sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id); |
3583 | sec_name = btf__name_by_offset(obj->btf, sec->name_off); | |
3584 | ||
3585 | if (strcmp(sec_name, KCONFIG_SEC) == 0) { | |
5bd022ec MKL |
3586 | if (btf_is_func(t)) { |
3587 | pr_warn("extern function %s is unsupported under %s section\n", | |
3588 | ext->name, KCONFIG_SEC); | |
3589 | return -ENOTSUP; | |
3590 | } | |
2e33efe3 AN |
3591 | kcfg_sec = sec; |
3592 | ext->type = EXT_KCFG; | |
3593 | ext->kcfg.sz = btf__resolve_size(obj->btf, t->type); | |
3594 | if (ext->kcfg.sz <= 0) { | |
3595 | pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n", | |
3596 | ext_name, ext->kcfg.sz); | |
3597 | return ext->kcfg.sz; | |
3598 | } | |
3599 | ext->kcfg.align = btf__align_of(obj->btf, t->type); | |
3600 | if (ext->kcfg.align <= 0) { | |
3601 | pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n", | |
3602 | ext_name, ext->kcfg.align); | |
3603 | return -EINVAL; | |
3604 | } | |
3605 | ext->kcfg.type = find_kcfg_type(obj->btf, t->type, | |
3606 | &ext->kcfg.is_signed); | |
3607 | if (ext->kcfg.type == KCFG_UNKNOWN) { | |
3608 | pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name); | |
3609 | return -ENOTSUP; | |
3610 | } | |
1c0c7074 | 3611 | } else if (strcmp(sec_name, KSYMS_SEC) == 0) { |
1c0c7074 AN |
3612 | ksym_sec = sec; |
3613 | ext->type = EXT_KSYM; | |
d370bbe1 HL |
3614 | skip_mods_and_typedefs(obj->btf, t->type, |
3615 | &ext->ksym.type_id); | |
2e33efe3 AN |
3616 | } else { |
3617 | pr_warn("unrecognized extern section '%s'\n", sec_name); | |
166750bc AN |
3618 | return -ENOTSUP; |
3619 | } | |
3620 | } | |
3621 | pr_debug("collected %d externs total\n", obj->nr_extern); | |
3622 | ||
3623 | if (!obj->nr_extern) | |
3624 | return 0; | |
3625 | ||
2e33efe3 | 3626 | /* sort externs by type, for kcfg ones also by (align, size, name) */ |
166750bc | 3627 | qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs); |
166750bc | 3628 | |
1c0c7074 AN |
3629 | /* for .ksyms section, we need to turn all externs into allocated |
3630 | * variables in BTF to pass kernel verification; we do this by | |
3631 | * pretending that each extern is a 8-byte variable | |
3632 | */ | |
3633 | if (ksym_sec) { | |
3634 | /* find existing 4-byte integer type in BTF to use for fake | |
3635 | * extern variables in DATASEC | |
3636 | */ | |
3637 | int int_btf_id = find_int_btf_id(obj->btf); | |
5bd022ec MKL |
3638 | /* For extern function, a dummy_var added earlier |
3639 | * will be used to replace the vs->type and | |
3640 | * its name string will be used to refill | |
3641 | * the missing param's name. | |
3642 | */ | |
3643 | const struct btf_type *dummy_var; | |
1c0c7074 | 3644 | |
5bd022ec | 3645 | dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id); |
1c0c7074 AN |
3646 | for (i = 0; i < obj->nr_extern; i++) { |
3647 | ext = &obj->externs[i]; | |
3648 | if (ext->type != EXT_KSYM) | |
3649 | continue; | |
3650 | pr_debug("extern (ksym) #%d: symbol %d, name %s\n", | |
3651 | i, ext->sym_idx, ext->name); | |
3652 | } | |
3653 | ||
3654 | sec = ksym_sec; | |
3655 | n = btf_vlen(sec); | |
3656 | for (i = 0, off = 0; i < n; i++, off += sizeof(int)) { | |
3657 | struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; | |
3658 | struct btf_type *vt; | |
3659 | ||
3660 | vt = (void *)btf__type_by_id(obj->btf, vs->type); | |
3661 | ext_name = btf__name_by_offset(obj->btf, vt->name_off); | |
3662 | ext = find_extern_by_name(obj, ext_name); | |
3663 | if (!ext) { | |
5bd022ec MKL |
3664 | pr_warn("failed to find extern definition for BTF %s '%s'\n", |
3665 | btf_kind_str(vt), ext_name); | |
1c0c7074 AN |
3666 | return -ESRCH; |
3667 | } | |
5bd022ec MKL |
3668 | if (btf_is_func(vt)) { |
3669 | const struct btf_type *func_proto; | |
3670 | struct btf_param *param; | |
3671 | int j; | |
3672 | ||
3673 | func_proto = btf__type_by_id(obj->btf, | |
3674 | vt->type); | |
3675 | param = btf_params(func_proto); | |
3676 | /* Reuse the dummy_var string if the | |
3677 | * func proto does not have param name. | |
3678 | */ | |
3679 | for (j = 0; j < btf_vlen(func_proto); j++) | |
3680 | if (param[j].type && !param[j].name_off) | |
3681 | param[j].name_off = | |
3682 | dummy_var->name_off; | |
3683 | vs->type = dummy_var_btf_id; | |
3684 | vt->info &= ~0xffff; | |
3685 | vt->info |= BTF_FUNC_GLOBAL; | |
3686 | } else { | |
3687 | btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED; | |
3688 | vt->type = int_btf_id; | |
3689 | } | |
1c0c7074 AN |
3690 | vs->offset = off; |
3691 | vs->size = sizeof(int); | |
3692 | } | |
3693 | sec->size = off; | |
3694 | } | |
3695 | ||
2e33efe3 AN |
3696 | if (kcfg_sec) { |
3697 | sec = kcfg_sec; | |
3698 | /* for kcfg externs calculate their offsets within a .kconfig map */ | |
3699 | off = 0; | |
3700 | for (i = 0; i < obj->nr_extern; i++) { | |
3701 | ext = &obj->externs[i]; | |
3702 | if (ext->type != EXT_KCFG) | |
3703 | continue; | |
166750bc | 3704 | |
2e33efe3 AN |
3705 | ext->kcfg.data_off = roundup(off, ext->kcfg.align); |
3706 | off = ext->kcfg.data_off + ext->kcfg.sz; | |
1c0c7074 | 3707 | pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n", |
2e33efe3 AN |
3708 | i, ext->sym_idx, ext->kcfg.data_off, ext->name); |
3709 | } | |
3710 | sec->size = off; | |
3711 | n = btf_vlen(sec); | |
3712 | for (i = 0; i < n; i++) { | |
3713 | struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i; | |
3714 | ||
3715 | t = btf__type_by_id(obj->btf, vs->type); | |
3716 | ext_name = btf__name_by_offset(obj->btf, t->name_off); | |
3717 | ext = find_extern_by_name(obj, ext_name); | |
3718 | if (!ext) { | |
3719 | pr_warn("failed to find extern definition for BTF var '%s'\n", | |
3720 | ext_name); | |
3721 | return -ESRCH; | |
3722 | } | |
3723 | btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED; | |
3724 | vs->offset = ext->kcfg.data_off; | |
166750bc | 3725 | } |
166750bc | 3726 | } |
166750bc AN |
3727 | return 0; |
3728 | } | |
3729 | ||
6d4b198b | 3730 | struct bpf_program * |
a324aae3 AN |
3731 | bpf_object__find_program_by_title(const struct bpf_object *obj, |
3732 | const char *title) | |
6d4b198b JK |
3733 | { |
3734 | struct bpf_program *pos; | |
3735 | ||
3736 | bpf_object__for_each_program(pos, obj) { | |
52109584 | 3737 | if (pos->sec_name && !strcmp(pos->sec_name, title)) |
6d4b198b JK |
3738 | return pos; |
3739 | } | |
e9fc3ce9 | 3740 | return errno = ENOENT, NULL; |
6d4b198b JK |
3741 | } |
3742 | ||
c3c55696 AN |
3743 | static bool prog_is_subprog(const struct bpf_object *obj, |
3744 | const struct bpf_program *prog) | |
3745 | { | |
197afc63 AN |
3746 | /* For legacy reasons, libbpf supports an entry-point BPF programs |
3747 | * without SEC() attribute, i.e., those in the .text section. But if | |
3748 | * there are 2 or more such programs in the .text section, they all | |
3749 | * must be subprograms called from entry-point BPF programs in | |
3750 | * designated SEC()'tions, otherwise there is no way to distinguish | |
3751 | * which of those programs should be loaded vs which are a subprogram. | |
3752 | * Similarly, if there is a function/program in .text and at least one | |
3753 | * other BPF program with custom SEC() attribute, then we just assume | |
3754 | * .text programs are subprograms (even if they are not called from | |
3755 | * other programs), because libbpf never explicitly supported mixing | |
3756 | * SEC()-designated BPF programs and .text entry-point BPF programs. | |
3757 | */ | |
3758 | return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1; | |
c3c55696 AN |
3759 | } |
3760 | ||
01af3bf0 AN |
3761 | struct bpf_program * |
3762 | bpf_object__find_program_by_name(const struct bpf_object *obj, | |
3763 | const char *name) | |
3764 | { | |
3765 | struct bpf_program *prog; | |
3766 | ||
3767 | bpf_object__for_each_program(prog, obj) { | |
c3c55696 AN |
3768 | if (prog_is_subprog(obj, prog)) |
3769 | continue; | |
01af3bf0 AN |
3770 | if (!strcmp(prog->name, name)) |
3771 | return prog; | |
3772 | } | |
e9fc3ce9 | 3773 | return errno = ENOENT, NULL; |
01af3bf0 AN |
3774 | } |
3775 | ||
d859900c DB |
3776 | static bool bpf_object__shndx_is_data(const struct bpf_object *obj, |
3777 | int shndx) | |
3778 | { | |
25bbbd7a AN |
3779 | switch (obj->efile.secs[shndx].sec_type) { |
3780 | case SEC_BSS: | |
3781 | case SEC_DATA: | |
3782 | case SEC_RODATA: | |
3783 | return true; | |
3784 | default: | |
3785 | return false; | |
3786 | } | |
d859900c DB |
3787 | } |
3788 | ||
3789 | static bool bpf_object__shndx_is_maps(const struct bpf_object *obj, | |
3790 | int shndx) | |
3791 | { | |
abd29c93 AN |
3792 | return shndx == obj->efile.maps_shndx || |
3793 | shndx == obj->efile.btf_maps_shndx; | |
d859900c DB |
3794 | } |
3795 | ||
d859900c DB |
3796 | static enum libbpf_map_type |
3797 | bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx) | |
3798 | { | |
25bbbd7a AN |
3799 | if (shndx == obj->efile.symbols_shndx) |
3800 | return LIBBPF_MAP_KCONFIG; | |
3801 | ||
3802 | switch (obj->efile.secs[shndx].sec_type) { | |
3803 | case SEC_BSS: | |
d859900c | 3804 | return LIBBPF_MAP_BSS; |
25bbbd7a AN |
3805 | case SEC_DATA: |
3806 | return LIBBPF_MAP_DATA; | |
3807 | case SEC_RODATA: | |
d859900c | 3808 | return LIBBPF_MAP_RODATA; |
25bbbd7a | 3809 | default: |
d859900c | 3810 | return LIBBPF_MAP_UNSPEC; |
25bbbd7a | 3811 | } |
d859900c DB |
3812 | } |
3813 | ||
1f8e2bcb AN |
3814 | static int bpf_program__record_reloc(struct bpf_program *prog, |
3815 | struct reloc_desc *reloc_desc, | |
9c0f8cbd | 3816 | __u32 insn_idx, const char *sym_name, |
ad23b723 | 3817 | const Elf64_Sym *sym, const Elf64_Rel *rel) |
1f8e2bcb AN |
3818 | { |
3819 | struct bpf_insn *insn = &prog->insns[insn_idx]; | |
3820 | size_t map_idx, nr_maps = prog->obj->nr_maps; | |
3821 | struct bpf_object *obj = prog->obj; | |
3822 | __u32 shdr_idx = sym->st_shndx; | |
3823 | enum libbpf_map_type type; | |
9c0f8cbd | 3824 | const char *sym_sec_name; |
1f8e2bcb AN |
3825 | struct bpf_map *map; |
3826 | ||
aa0b8d43 | 3827 | if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) { |
9c0f8cbd AN |
3828 | pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n", |
3829 | prog->name, sym_name, insn_idx, insn->code); | |
1f8e2bcb AN |
3830 | return -LIBBPF_ERRNO__RELOC; |
3831 | } | |
166750bc AN |
3832 | |
3833 | if (sym_is_extern(sym)) { | |
ad23b723 | 3834 | int sym_idx = ELF64_R_SYM(rel->r_info); |
166750bc AN |
3835 | int i, n = obj->nr_extern; |
3836 | struct extern_desc *ext; | |
3837 | ||
3838 | for (i = 0; i < n; i++) { | |
3839 | ext = &obj->externs[i]; | |
3840 | if (ext->sym_idx == sym_idx) | |
3841 | break; | |
3842 | } | |
3843 | if (i >= n) { | |
9c0f8cbd AN |
3844 | pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n", |
3845 | prog->name, sym_name, sym_idx); | |
166750bc AN |
3846 | return -LIBBPF_ERRNO__RELOC; |
3847 | } | |
9c0f8cbd AN |
3848 | pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n", |
3849 | prog->name, i, ext->name, ext->sym_idx, insn_idx); | |
5bd022ec MKL |
3850 | if (insn->code == (BPF_JMP | BPF_CALL)) |
3851 | reloc_desc->type = RELO_EXTERN_FUNC; | |
3852 | else | |
3853 | reloc_desc->type = RELO_EXTERN_VAR; | |
166750bc | 3854 | reloc_desc->insn_idx = insn_idx; |
2e33efe3 | 3855 | reloc_desc->sym_off = i; /* sym_off stores extern index */ |
166750bc AN |
3856 | return 0; |
3857 | } | |
3858 | ||
aa0b8d43 MKL |
3859 | /* sub-program call relocation */ |
3860 | if (is_call_insn(insn)) { | |
3861 | if (insn->src_reg != BPF_PSEUDO_CALL) { | |
3862 | pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name); | |
3863 | return -LIBBPF_ERRNO__RELOC; | |
3864 | } | |
3865 | /* text_shndx can be 0, if no default "main" program exists */ | |
3866 | if (!shdr_idx || shdr_idx != obj->efile.text_shndx) { | |
3867 | sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); | |
3868 | pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n", | |
3869 | prog->name, sym_name, sym_sec_name); | |
3870 | return -LIBBPF_ERRNO__RELOC; | |
3871 | } | |
3872 | if (sym->st_value % BPF_INSN_SZ) { | |
3873 | pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n", | |
3874 | prog->name, sym_name, (size_t)sym->st_value); | |
3875 | return -LIBBPF_ERRNO__RELOC; | |
3876 | } | |
3877 | reloc_desc->type = RELO_CALL; | |
3878 | reloc_desc->insn_idx = insn_idx; | |
3879 | reloc_desc->sym_off = sym->st_value; | |
3880 | return 0; | |
3881 | } | |
3882 | ||
1f8e2bcb | 3883 | if (!shdr_idx || shdr_idx >= SHN_LORESERVE) { |
9c0f8cbd AN |
3884 | pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n", |
3885 | prog->name, sym_name, shdr_idx); | |
1f8e2bcb AN |
3886 | return -LIBBPF_ERRNO__RELOC; |
3887 | } | |
3888 | ||
53eddb5e YS |
3889 | /* loading subprog addresses */ |
3890 | if (sym_is_subprog(sym, obj->efile.text_shndx)) { | |
3891 | /* global_func: sym->st_value = offset in the section, insn->imm = 0. | |
3892 | * local_func: sym->st_value = 0, insn->imm = offset in the section. | |
3893 | */ | |
3894 | if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) { | |
3895 | pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n", | |
3896 | prog->name, sym_name, (size_t)sym->st_value, insn->imm); | |
3897 | return -LIBBPF_ERRNO__RELOC; | |
3898 | } | |
3899 | ||
3900 | reloc_desc->type = RELO_SUBPROG_ADDR; | |
3901 | reloc_desc->insn_idx = insn_idx; | |
3902 | reloc_desc->sym_off = sym->st_value; | |
3903 | return 0; | |
3904 | } | |
3905 | ||
1f8e2bcb | 3906 | type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx); |
9c0f8cbd | 3907 | sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx)); |
1f8e2bcb AN |
3908 | |
3909 | /* generic map reference relocation */ | |
3910 | if (type == LIBBPF_MAP_UNSPEC) { | |
3911 | if (!bpf_object__shndx_is_maps(obj, shdr_idx)) { | |
9c0f8cbd AN |
3912 | pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n", |
3913 | prog->name, sym_name, sym_sec_name); | |
1f8e2bcb AN |
3914 | return -LIBBPF_ERRNO__RELOC; |
3915 | } | |
3916 | for (map_idx = 0; map_idx < nr_maps; map_idx++) { | |
3917 | map = &obj->maps[map_idx]; | |
3918 | if (map->libbpf_type != type || | |
3919 | map->sec_idx != sym->st_shndx || | |
3920 | map->sec_offset != sym->st_value) | |
3921 | continue; | |
9c0f8cbd AN |
3922 | pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n", |
3923 | prog->name, map_idx, map->name, map->sec_idx, | |
1f8e2bcb AN |
3924 | map->sec_offset, insn_idx); |
3925 | break; | |
3926 | } | |
3927 | if (map_idx >= nr_maps) { | |
9c0f8cbd AN |
3928 | pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n", |
3929 | prog->name, sym_sec_name, (size_t)sym->st_value); | |
1f8e2bcb AN |
3930 | return -LIBBPF_ERRNO__RELOC; |
3931 | } | |
3932 | reloc_desc->type = RELO_LD64; | |
3933 | reloc_desc->insn_idx = insn_idx; | |
3934 | reloc_desc->map_idx = map_idx; | |
53f8dd43 | 3935 | reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */ |
1f8e2bcb AN |
3936 | return 0; |
3937 | } | |
3938 | ||
3939 | /* global data map relocation */ | |
3940 | if (!bpf_object__shndx_is_data(obj, shdr_idx)) { | |
9c0f8cbd AN |
3941 | pr_warn("prog '%s': bad data relo against section '%s'\n", |
3942 | prog->name, sym_sec_name); | |
1f8e2bcb | 3943 | return -LIBBPF_ERRNO__RELOC; |
1f8e2bcb | 3944 | } |
1f8e2bcb AN |
3945 | for (map_idx = 0; map_idx < nr_maps; map_idx++) { |
3946 | map = &obj->maps[map_idx]; | |
25bbbd7a | 3947 | if (map->libbpf_type != type || map->sec_idx != sym->st_shndx) |
1f8e2bcb | 3948 | continue; |
9c0f8cbd AN |
3949 | pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n", |
3950 | prog->name, map_idx, map->name, map->sec_idx, | |
3951 | map->sec_offset, insn_idx); | |
1f8e2bcb AN |
3952 | break; |
3953 | } | |
3954 | if (map_idx >= nr_maps) { | |
9c0f8cbd AN |
3955 | pr_warn("prog '%s': data relo failed to find map for section '%s'\n", |
3956 | prog->name, sym_sec_name); | |
1f8e2bcb AN |
3957 | return -LIBBPF_ERRNO__RELOC; |
3958 | } | |
3959 | ||
3960 | reloc_desc->type = RELO_DATA; | |
3961 | reloc_desc->insn_idx = insn_idx; | |
3962 | reloc_desc->map_idx = map_idx; | |
53f8dd43 | 3963 | reloc_desc->sym_off = sym->st_value; |
1f8e2bcb AN |
3964 | return 0; |
3965 | } | |
3966 | ||
db2b8b06 AN |
3967 | static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx) |
3968 | { | |
3969 | return insn_idx >= prog->sec_insn_off && | |
3970 | insn_idx < prog->sec_insn_off + prog->sec_insn_cnt; | |
3971 | } | |
3972 | ||
3973 | static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj, | |
3974 | size_t sec_idx, size_t insn_idx) | |
3975 | { | |
3976 | int l = 0, r = obj->nr_programs - 1, m; | |
3977 | struct bpf_program *prog; | |
3978 | ||
3979 | while (l < r) { | |
3980 | m = l + (r - l + 1) / 2; | |
3981 | prog = &obj->programs[m]; | |
3982 | ||
3983 | if (prog->sec_idx < sec_idx || | |
3984 | (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx)) | |
3985 | l = m; | |
3986 | else | |
3987 | r = m - 1; | |
3988 | } | |
3989 | /* matching program could be at index l, but it still might be the | |
3990 | * wrong one, so we need to double check conditions for the last time | |
3991 | */ | |
3992 | prog = &obj->programs[l]; | |
3993 | if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx)) | |
3994 | return prog; | |
3995 | return NULL; | |
3996 | } | |
3997 | ||
34090915 | 3998 | static int |
ad23b723 | 3999 | bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data) |
34090915 | 4000 | { |
9c0f8cbd AN |
4001 | const char *relo_sec_name, *sec_name; |
4002 | size_t sec_idx = shdr->sh_info; | |
c3c55696 AN |
4003 | struct bpf_program *prog; |
4004 | struct reloc_desc *relos; | |
1f8e2bcb | 4005 | int err, i, nrels; |
c3c55696 AN |
4006 | const char *sym_name; |
4007 | __u32 insn_idx; | |
6245947c AN |
4008 | Elf_Scn *scn; |
4009 | Elf_Data *scn_data; | |
ad23b723 AN |
4010 | Elf64_Sym *sym; |
4011 | Elf64_Rel *rel; | |
34090915 | 4012 | |
6245947c AN |
4013 | scn = elf_sec_by_idx(obj, sec_idx); |
4014 | scn_data = elf_sec_data(obj, scn); | |
4015 | ||
9c0f8cbd | 4016 | relo_sec_name = elf_sec_str(obj, shdr->sh_name); |
6245947c | 4017 | sec_name = elf_sec_name(obj, scn); |
9c0f8cbd AN |
4018 | if (!relo_sec_name || !sec_name) |
4019 | return -EINVAL; | |
4020 | ||
4021 | pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n", | |
4022 | relo_sec_name, sec_idx, sec_name); | |
34090915 WN |
4023 | nrels = shdr->sh_size / shdr->sh_entsize; |
4024 | ||
34090915 | 4025 | for (i = 0; i < nrels; i++) { |
ad23b723 AN |
4026 | rel = elf_rel_by_idx(data, i); |
4027 | if (!rel) { | |
9c0f8cbd | 4028 | pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i); |
6371ca3b | 4029 | return -LIBBPF_ERRNO__FORMAT; |
34090915 | 4030 | } |
ad23b723 AN |
4031 | |
4032 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); | |
4033 | if (!sym) { | |
9c0f8cbd | 4034 | pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n", |
ad23b723 | 4035 | relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i); |
6371ca3b | 4036 | return -LIBBPF_ERRNO__FORMAT; |
34090915 | 4037 | } |
6245947c | 4038 | |
ad23b723 | 4039 | if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) { |
9c0f8cbd | 4040 | pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n", |
ad23b723 | 4041 | relo_sec_name, (size_t)ELF64_R_SYM(rel->r_info), i); |
1f8e2bcb | 4042 | return -LIBBPF_ERRNO__FORMAT; |
9c0f8cbd | 4043 | } |
d859900c | 4044 | |
ad23b723 | 4045 | insn_idx = rel->r_offset / BPF_INSN_SZ; |
c3c55696 AN |
4046 | /* relocations against static functions are recorded as |
4047 | * relocations against the section that contains a function; | |
4048 | * in such case, symbol will be STT_SECTION and sym.st_name | |
4049 | * will point to empty string (0), so fetch section name | |
4050 | * instead | |
4051 | */ | |
ad23b723 AN |
4052 | if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0) |
4053 | sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx)); | |
c3c55696 | 4054 | else |
ad23b723 | 4055 | sym_name = elf_sym_str(obj, sym->st_name); |
c3c55696 | 4056 | sym_name = sym_name ?: "<?"; |
d859900c | 4057 | |
9c0f8cbd AN |
4058 | pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n", |
4059 | relo_sec_name, i, insn_idx, sym_name); | |
666810e8 | 4060 | |
c3c55696 AN |
4061 | prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); |
4062 | if (!prog) { | |
6245947c | 4063 | pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n", |
c3c55696 | 4064 | relo_sec_name, i, sec_name, insn_idx); |
6245947c | 4065 | continue; |
c3c55696 AN |
4066 | } |
4067 | ||
4068 | relos = libbpf_reallocarray(prog->reloc_desc, | |
4069 | prog->nr_reloc + 1, sizeof(*relos)); | |
4070 | if (!relos) | |
4071 | return -ENOMEM; | |
4072 | prog->reloc_desc = relos; | |
4073 | ||
4074 | /* adjust insn_idx to local BPF program frame of reference */ | |
4075 | insn_idx -= prog->sec_insn_off; | |
4076 | err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc], | |
ad23b723 | 4077 | insn_idx, sym_name, sym, rel); |
1f8e2bcb AN |
4078 | if (err) |
4079 | return err; | |
c3c55696 AN |
4080 | |
4081 | prog->nr_reloc++; | |
34090915 WN |
4082 | } |
4083 | return 0; | |
4084 | } | |
4085 | ||
abd29c93 | 4086 | static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map) |
8a138aed MKL |
4087 | { |
4088 | struct bpf_map_def *def = &map->def; | |
d859900c | 4089 | __u32 key_type_id = 0, value_type_id = 0; |
96408c43 | 4090 | int ret; |
8a138aed | 4091 | |
590a0088 MKL |
4092 | /* if it's BTF-defined map, we don't need to search for type IDs. |
4093 | * For struct_ops map, it does not need btf_key_type_id and | |
4094 | * btf_value_type_id. | |
4095 | */ | |
4096 | if (map->sec_idx == obj->efile.btf_maps_shndx || | |
4097 | bpf_map__is_struct_ops(map)) | |
abd29c93 AN |
4098 | return 0; |
4099 | ||
d859900c | 4100 | if (!bpf_map__is_internal(map)) { |
abd29c93 | 4101 | ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size, |
d859900c DB |
4102 | def->value_size, &key_type_id, |
4103 | &value_type_id); | |
4104 | } else { | |
4105 | /* | |
4106 | * LLVM annotates global data differently in BTF, that is, | |
4107 | * only as '.data', '.bss' or '.rodata'. | |
4108 | */ | |
aed65917 | 4109 | ret = btf__find_by_name(obj->btf, map->real_name); |
d859900c DB |
4110 | } |
4111 | if (ret < 0) | |
96408c43 | 4112 | return ret; |
8a138aed | 4113 | |
96408c43 | 4114 | map->btf_key_type_id = key_type_id; |
d859900c DB |
4115 | map->btf_value_type_id = bpf_map__is_internal(map) ? |
4116 | ret : value_type_id; | |
8a138aed MKL |
4117 | return 0; |
4118 | } | |
4119 | ||
97eb3138 MP |
4120 | static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info) |
4121 | { | |
4122 | char file[PATH_MAX], buff[4096]; | |
4123 | FILE *fp; | |
4124 | __u32 val; | |
4125 | int err; | |
4126 | ||
4127 | snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd); | |
4128 | memset(info, 0, sizeof(*info)); | |
4129 | ||
4130 | fp = fopen(file, "r"); | |
4131 | if (!fp) { | |
4132 | err = -errno; | |
4133 | pr_warn("failed to open %s: %d. No procfs support?\n", file, | |
4134 | err); | |
4135 | return err; | |
4136 | } | |
4137 | ||
4138 | while (fgets(buff, sizeof(buff), fp)) { | |
4139 | if (sscanf(buff, "map_type:\t%u", &val) == 1) | |
4140 | info->type = val; | |
4141 | else if (sscanf(buff, "key_size:\t%u", &val) == 1) | |
4142 | info->key_size = val; | |
4143 | else if (sscanf(buff, "value_size:\t%u", &val) == 1) | |
4144 | info->value_size = val; | |
4145 | else if (sscanf(buff, "max_entries:\t%u", &val) == 1) | |
4146 | info->max_entries = val; | |
4147 | else if (sscanf(buff, "map_flags:\t%i", &val) == 1) | |
4148 | info->map_flags = val; | |
4149 | } | |
4150 | ||
4151 | fclose(fp); | |
4152 | ||
4153 | return 0; | |
4154 | } | |
4155 | ||
26736eb9 JK |
4156 | int bpf_map__reuse_fd(struct bpf_map *map, int fd) |
4157 | { | |
4158 | struct bpf_map_info info = {}; | |
4159 | __u32 len = sizeof(info); | |
4160 | int new_fd, err; | |
4161 | char *new_name; | |
4162 | ||
4163 | err = bpf_obj_get_info_by_fd(fd, &info, &len); | |
97eb3138 MP |
4164 | if (err && errno == EINVAL) |
4165 | err = bpf_get_map_info_from_fdinfo(fd, &info); | |
26736eb9 | 4166 | if (err) |
e9fc3ce9 | 4167 | return libbpf_err(err); |
26736eb9 JK |
4168 | |
4169 | new_name = strdup(info.name); | |
4170 | if (!new_name) | |
e9fc3ce9 | 4171 | return libbpf_err(-errno); |
26736eb9 JK |
4172 | |
4173 | new_fd = open("/", O_RDONLY | O_CLOEXEC); | |
d1b4574a THJ |
4174 | if (new_fd < 0) { |
4175 | err = -errno; | |
26736eb9 | 4176 | goto err_free_new_name; |
d1b4574a | 4177 | } |
26736eb9 JK |
4178 | |
4179 | new_fd = dup3(fd, new_fd, O_CLOEXEC); | |
d1b4574a THJ |
4180 | if (new_fd < 0) { |
4181 | err = -errno; | |
26736eb9 | 4182 | goto err_close_new_fd; |
d1b4574a | 4183 | } |
26736eb9 JK |
4184 | |
4185 | err = zclose(map->fd); | |
d1b4574a THJ |
4186 | if (err) { |
4187 | err = -errno; | |
26736eb9 | 4188 | goto err_close_new_fd; |
d1b4574a | 4189 | } |
26736eb9 JK |
4190 | free(map->name); |
4191 | ||
4192 | map->fd = new_fd; | |
4193 | map->name = new_name; | |
4194 | map->def.type = info.type; | |
4195 | map->def.key_size = info.key_size; | |
4196 | map->def.value_size = info.value_size; | |
4197 | map->def.max_entries = info.max_entries; | |
4198 | map->def.map_flags = info.map_flags; | |
4199 | map->btf_key_type_id = info.btf_key_type_id; | |
4200 | map->btf_value_type_id = info.btf_value_type_id; | |
ec6d5f47 | 4201 | map->reused = true; |
26736eb9 JK |
4202 | |
4203 | return 0; | |
4204 | ||
4205 | err_close_new_fd: | |
4206 | close(new_fd); | |
4207 | err_free_new_name: | |
4208 | free(new_name); | |
e9fc3ce9 | 4209 | return libbpf_err(err); |
26736eb9 JK |
4210 | } |
4211 | ||
1bdb6c9a | 4212 | __u32 bpf_map__max_entries(const struct bpf_map *map) |
1a11a4c7 | 4213 | { |
1bdb6c9a AN |
4214 | return map->def.max_entries; |
4215 | } | |
1a11a4c7 | 4216 | |
b3278099 AN |
4217 | struct bpf_map *bpf_map__inner_map(struct bpf_map *map) |
4218 | { | |
4219 | if (!bpf_map_type__is_map_in_map(map->def.type)) | |
e9fc3ce9 | 4220 | return errno = EINVAL, NULL; |
b3278099 AN |
4221 | |
4222 | return map->inner_map; | |
4223 | } | |
4224 | ||
1bdb6c9a AN |
4225 | int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries) |
4226 | { | |
1a11a4c7 | 4227 | if (map->fd >= 0) |
e9fc3ce9 | 4228 | return libbpf_err(-EBUSY); |
1a11a4c7 | 4229 | map->def.max_entries = max_entries; |
1a11a4c7 AI |
4230 | return 0; |
4231 | } | |
4232 | ||
1bdb6c9a AN |
4233 | int bpf_map__resize(struct bpf_map *map, __u32 max_entries) |
4234 | { | |
4235 | if (!map || !max_entries) | |
e9fc3ce9 | 4236 | return libbpf_err(-EINVAL); |
1bdb6c9a AN |
4237 | |
4238 | return bpf_map__set_max_entries(map, max_entries); | |
4239 | } | |
4240 | ||
47eff617 | 4241 | static int |
fd9eef1a | 4242 | bpf_object__probe_loading(struct bpf_object *obj) |
47eff617 SF |
4243 | { |
4244 | struct bpf_load_program_attr attr; | |
4245 | char *cp, errmsg[STRERR_BUFSIZE]; | |
4246 | struct bpf_insn insns[] = { | |
4247 | BPF_MOV64_IMM(BPF_REG_0, 0), | |
4248 | BPF_EXIT_INSN(), | |
4249 | }; | |
4250 | int ret; | |
4251 | ||
f9bceaa5 SF |
4252 | if (obj->gen_loader) |
4253 | return 0; | |
4254 | ||
47eff617 SF |
4255 | /* make sure basic loading works */ |
4256 | ||
4257 | memset(&attr, 0, sizeof(attr)); | |
4258 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | |
4259 | attr.insns = insns; | |
4260 | attr.insns_cnt = ARRAY_SIZE(insns); | |
4261 | attr.license = "GPL"; | |
4262 | ||
4263 | ret = bpf_load_program_xattr(&attr, NULL, 0); | |
5c10a3db JE |
4264 | if (ret < 0) { |
4265 | attr.prog_type = BPF_PROG_TYPE_TRACEPOINT; | |
4266 | ret = bpf_load_program_xattr(&attr, NULL, 0); | |
4267 | } | |
47eff617 | 4268 | if (ret < 0) { |
fd9eef1a EC |
4269 | ret = errno; |
4270 | cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); | |
4271 | pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF " | |
4272 | "program. Make sure your kernel supports BPF " | |
4273 | "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is " | |
4274 | "set to big enough value.\n", __func__, cp, ret); | |
4275 | return -ret; | |
47eff617 SF |
4276 | } |
4277 | close(ret); | |
4278 | ||
fd9eef1a EC |
4279 | return 0; |
4280 | } | |
4281 | ||
bb180fb2 AN |
4282 | static int probe_fd(int fd) |
4283 | { | |
4284 | if (fd >= 0) | |
4285 | close(fd); | |
4286 | return fd >= 0; | |
4287 | } | |
4288 | ||
47b6cb4d | 4289 | static int probe_kern_prog_name(void) |
fd9eef1a EC |
4290 | { |
4291 | struct bpf_load_program_attr attr; | |
4292 | struct bpf_insn insns[] = { | |
4293 | BPF_MOV64_IMM(BPF_REG_0, 0), | |
4294 | BPF_EXIT_INSN(), | |
4295 | }; | |
4296 | int ret; | |
4297 | ||
4298 | /* make sure loading with name works */ | |
47eff617 | 4299 | |
fd9eef1a EC |
4300 | memset(&attr, 0, sizeof(attr)); |
4301 | attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | |
4302 | attr.insns = insns; | |
4303 | attr.insns_cnt = ARRAY_SIZE(insns); | |
4304 | attr.license = "GPL"; | |
47eff617 SF |
4305 | attr.name = "test"; |
4306 | ret = bpf_load_program_xattr(&attr, NULL, 0); | |
bb180fb2 | 4307 | return probe_fd(ret); |
47eff617 SF |
4308 | } |
4309 | ||
47b6cb4d | 4310 | static int probe_kern_global_data(void) |
8837fe5d DB |
4311 | { |
4312 | struct bpf_load_program_attr prg_attr; | |
4313 | struct bpf_create_map_attr map_attr; | |
4314 | char *cp, errmsg[STRERR_BUFSIZE]; | |
4315 | struct bpf_insn insns[] = { | |
4316 | BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16), | |
4317 | BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42), | |
4318 | BPF_MOV64_IMM(BPF_REG_0, 0), | |
4319 | BPF_EXIT_INSN(), | |
4320 | }; | |
4321 | int ret, map; | |
4322 | ||
4323 | memset(&map_attr, 0, sizeof(map_attr)); | |
4324 | map_attr.map_type = BPF_MAP_TYPE_ARRAY; | |
4325 | map_attr.key_size = sizeof(int); | |
4326 | map_attr.value_size = 32; | |
4327 | map_attr.max_entries = 1; | |
4328 | ||
4329 | map = bpf_create_map_xattr(&map_attr); | |
4330 | if (map < 0) { | |
23ab656b THJ |
4331 | ret = -errno; |
4332 | cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); | |
be18010e | 4333 | pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", |
23ab656b THJ |
4334 | __func__, cp, -ret); |
4335 | return ret; | |
8837fe5d DB |
4336 | } |
4337 | ||
4338 | insns[0].imm = map; | |
4339 | ||
4340 | memset(&prg_attr, 0, sizeof(prg_attr)); | |
4341 | prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | |
4342 | prg_attr.insns = insns; | |
4343 | prg_attr.insns_cnt = ARRAY_SIZE(insns); | |
4344 | prg_attr.license = "GPL"; | |
4345 | ||
4346 | ret = bpf_load_program_xattr(&prg_attr, NULL, 0); | |
47b6cb4d | 4347 | close(map); |
bb180fb2 | 4348 | return probe_fd(ret); |
8837fe5d DB |
4349 | } |
4350 | ||
68b08647 AN |
4351 | static int probe_kern_btf(void) |
4352 | { | |
4353 | static const char strs[] = "\0int"; | |
4354 | __u32 types[] = { | |
4355 | /* int */ | |
4356 | BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), | |
4357 | }; | |
4358 | ||
4359 | return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), | |
4360 | strs, sizeof(strs))); | |
4361 | } | |
4362 | ||
47b6cb4d | 4363 | static int probe_kern_btf_func(void) |
d7c4b398 | 4364 | { |
8983b731 | 4365 | static const char strs[] = "\0int\0x\0a"; |
d7c4b398 AN |
4366 | /* void x(int a) {} */ |
4367 | __u32 types[] = { | |
4368 | /* int */ | |
4369 | BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | |
4370 | /* FUNC_PROTO */ /* [2] */ | |
4371 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), | |
4372 | BTF_PARAM_ENC(7, 1), | |
4373 | /* FUNC x */ /* [3] */ | |
4374 | BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2), | |
4375 | }; | |
d7c4b398 | 4376 | |
bb180fb2 AN |
4377 | return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), |
4378 | strs, sizeof(strs))); | |
d7c4b398 AN |
4379 | } |
4380 | ||
47b6cb4d | 4381 | static int probe_kern_btf_func_global(void) |
2d3eb67f AS |
4382 | { |
4383 | static const char strs[] = "\0int\0x\0a"; | |
4384 | /* static void x(int a) {} */ | |
4385 | __u32 types[] = { | |
4386 | /* int */ | |
4387 | BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | |
4388 | /* FUNC_PROTO */ /* [2] */ | |
4389 | BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0), | |
4390 | BTF_PARAM_ENC(7, 1), | |
4391 | /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */ | |
4392 | BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2), | |
4393 | }; | |
2d3eb67f | 4394 | |
bb180fb2 AN |
4395 | return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), |
4396 | strs, sizeof(strs))); | |
2d3eb67f AS |
4397 | } |
4398 | ||
47b6cb4d | 4399 | static int probe_kern_btf_datasec(void) |
d7c4b398 | 4400 | { |
8983b731 | 4401 | static const char strs[] = "\0x\0.data"; |
d7c4b398 AN |
4402 | /* static int a; */ |
4403 | __u32 types[] = { | |
4404 | /* int */ | |
4405 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | |
4406 | /* VAR x */ /* [2] */ | |
4407 | BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), | |
4408 | BTF_VAR_STATIC, | |
4409 | /* DATASEC val */ /* [3] */ | |
4410 | BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4), | |
4411 | BTF_VAR_SECINFO_ENC(2, 0, 4), | |
4412 | }; | |
cfd49210 | 4413 | |
bb180fb2 AN |
4414 | return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), |
4415 | strs, sizeof(strs))); | |
d7c4b398 AN |
4416 | } |
4417 | ||
22541a9e IL |
4418 | static int probe_kern_btf_float(void) |
4419 | { | |
4420 | static const char strs[] = "\0float"; | |
4421 | __u32 types[] = { | |
4422 | /* float */ | |
4423 | BTF_TYPE_FLOAT_ENC(1, 4), | |
4424 | }; | |
4425 | ||
4426 | return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), | |
4427 | strs, sizeof(strs))); | |
4428 | } | |
4429 | ||
223f903e | 4430 | static int probe_kern_btf_decl_tag(void) |
5b84bd10 YS |
4431 | { |
4432 | static const char strs[] = "\0tag"; | |
4433 | __u32 types[] = { | |
4434 | /* int */ | |
4435 | BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */ | |
4436 | /* VAR x */ /* [2] */ | |
4437 | BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1), | |
4438 | BTF_VAR_STATIC, | |
4439 | /* attr */ | |
223f903e | 4440 | BTF_TYPE_DECL_TAG_ENC(1, 2, -1), |
5b84bd10 YS |
4441 | }; |
4442 | ||
4443 | return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types), | |
4444 | strs, sizeof(strs))); | |
4445 | } | |
4446 | ||
47b6cb4d | 4447 | static int probe_kern_array_mmap(void) |
7fe74b43 AN |
4448 | { |
4449 | struct bpf_create_map_attr attr = { | |
4450 | .map_type = BPF_MAP_TYPE_ARRAY, | |
4451 | .map_flags = BPF_F_MMAPABLE, | |
4452 | .key_size = sizeof(int), | |
4453 | .value_size = sizeof(int), | |
4454 | .max_entries = 1, | |
4455 | }; | |
7fe74b43 | 4456 | |
bb180fb2 | 4457 | return probe_fd(bpf_create_map_xattr(&attr)); |
7fe74b43 AN |
4458 | } |
4459 | ||
47b6cb4d | 4460 | static int probe_kern_exp_attach_type(void) |
25498a19 AN |
4461 | { |
4462 | struct bpf_load_program_attr attr; | |
4463 | struct bpf_insn insns[] = { | |
4464 | BPF_MOV64_IMM(BPF_REG_0, 0), | |
4465 | BPF_EXIT_INSN(), | |
4466 | }; | |
25498a19 AN |
4467 | |
4468 | memset(&attr, 0, sizeof(attr)); | |
4469 | /* use any valid combination of program type and (optional) | |
4470 | * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS) | |
4471 | * to see if kernel supports expected_attach_type field for | |
4472 | * BPF_PROG_LOAD command | |
4473 | */ | |
4474 | attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK; | |
4475 | attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE; | |
4476 | attr.insns = insns; | |
4477 | attr.insns_cnt = ARRAY_SIZE(insns); | |
4478 | attr.license = "GPL"; | |
4479 | ||
bb180fb2 | 4480 | return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); |
25498a19 AN |
4481 | } |
4482 | ||
109cea5a AN |
4483 | static int probe_kern_probe_read_kernel(void) |
4484 | { | |
4485 | struct bpf_load_program_attr attr; | |
4486 | struct bpf_insn insns[] = { | |
4487 | BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */ | |
4488 | BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */ | |
4489 | BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */ | |
4490 | BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */ | |
4491 | BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel), | |
4492 | BPF_EXIT_INSN(), | |
4493 | }; | |
4494 | ||
4495 | memset(&attr, 0, sizeof(attr)); | |
4496 | attr.prog_type = BPF_PROG_TYPE_KPROBE; | |
4497 | attr.insns = insns; | |
4498 | attr.insns_cnt = ARRAY_SIZE(insns); | |
4499 | attr.license = "GPL"; | |
4500 | ||
4501 | return probe_fd(bpf_load_program_xattr(&attr, NULL, 0)); | |
4502 | } | |
4503 | ||
5d23328d YZ |
4504 | static int probe_prog_bind_map(void) |
4505 | { | |
4506 | struct bpf_load_program_attr prg_attr; | |
4507 | struct bpf_create_map_attr map_attr; | |
4508 | char *cp, errmsg[STRERR_BUFSIZE]; | |
4509 | struct bpf_insn insns[] = { | |
4510 | BPF_MOV64_IMM(BPF_REG_0, 0), | |
4511 | BPF_EXIT_INSN(), | |
4512 | }; | |
4513 | int ret, map, prog; | |
4514 | ||
4515 | memset(&map_attr, 0, sizeof(map_attr)); | |
4516 | map_attr.map_type = BPF_MAP_TYPE_ARRAY; | |
4517 | map_attr.key_size = sizeof(int); | |
4518 | map_attr.value_size = 32; | |
4519 | map_attr.max_entries = 1; | |
4520 | ||
4521 | map = bpf_create_map_xattr(&map_attr); | |
4522 | if (map < 0) { | |
4523 | ret = -errno; | |
4524 | cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg)); | |
4525 | pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n", | |
4526 | __func__, cp, -ret); | |
4527 | return ret; | |
4528 | } | |
4529 | ||
4530 | memset(&prg_attr, 0, sizeof(prg_attr)); | |
4531 | prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER; | |
4532 | prg_attr.insns = insns; | |
4533 | prg_attr.insns_cnt = ARRAY_SIZE(insns); | |
4534 | prg_attr.license = "GPL"; | |
4535 | ||
4536 | prog = bpf_load_program_xattr(&prg_attr, NULL, 0); | |
4537 | if (prog < 0) { | |
4538 | close(map); | |
4539 | return 0; | |
4540 | } | |
4541 | ||
4542 | ret = bpf_prog_bind_map(prog, map, NULL); | |
4543 | ||
4544 | close(map); | |
4545 | close(prog); | |
4546 | ||
4547 | return ret >= 0; | |
4548 | } | |
4549 | ||
4f33a53d AN |
4550 | static int probe_module_btf(void) |
4551 | { | |
4552 | static const char strs[] = "\0int"; | |
4553 | __u32 types[] = { | |
4554 | /* int */ | |
4555 | BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), | |
4556 | }; | |
4557 | struct bpf_btf_info info; | |
4558 | __u32 len = sizeof(info); | |
4559 | char name[16]; | |
4560 | int fd, err; | |
4561 | ||
4562 | fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs)); | |
4563 | if (fd < 0) | |
4564 | return 0; /* BTF not supported at all */ | |
4565 | ||
4566 | memset(&info, 0, sizeof(info)); | |
4567 | info.name = ptr_to_u64(name); | |
4568 | info.name_len = sizeof(name); | |
4569 | ||
4570 | /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer; | |
4571 | * kernel's module BTF support coincides with support for | |
4572 | * name/name_len fields in struct bpf_btf_info. | |
4573 | */ | |
4574 | err = bpf_obj_get_info_by_fd(fd, &info, &len); | |
4575 | close(fd); | |
4576 | return !err; | |
4577 | } | |
4578 | ||
668ace0e AN |
4579 | static int probe_perf_link(void) |
4580 | { | |
4581 | struct bpf_load_program_attr attr; | |
4582 | struct bpf_insn insns[] = { | |
4583 | BPF_MOV64_IMM(BPF_REG_0, 0), | |
4584 | BPF_EXIT_INSN(), | |
4585 | }; | |
4586 | int prog_fd, link_fd, err; | |
4587 | ||
4588 | memset(&attr, 0, sizeof(attr)); | |
4589 | attr.prog_type = BPF_PROG_TYPE_TRACEPOINT; | |
4590 | attr.insns = insns; | |
4591 | attr.insns_cnt = ARRAY_SIZE(insns); | |
4592 | attr.license = "GPL"; | |
4593 | prog_fd = bpf_load_program_xattr(&attr, NULL, 0); | |
4594 | if (prog_fd < 0) | |
4595 | return -errno; | |
4596 | ||
4597 | /* use invalid perf_event FD to get EBADF, if link is supported; | |
4598 | * otherwise EINVAL should be returned | |
4599 | */ | |
4600 | link_fd = bpf_link_create(prog_fd, -1, BPF_PERF_EVENT, NULL); | |
4601 | err = -errno; /* close() can clobber errno */ | |
4602 | ||
4603 | if (link_fd >= 0) | |
4604 | close(link_fd); | |
4605 | close(prog_fd); | |
4606 | ||
4607 | return link_fd < 0 && err == -EBADF; | |
4608 | } | |
4609 | ||
47b6cb4d AN |
4610 | enum kern_feature_result { |
4611 | FEAT_UNKNOWN = 0, | |
4612 | FEAT_SUPPORTED = 1, | |
4613 | FEAT_MISSING = 2, | |
4614 | }; | |
4615 | ||
4616 | typedef int (*feature_probe_fn)(void); | |
4617 | ||
4618 | static struct kern_feature_desc { | |
4619 | const char *desc; | |
4620 | feature_probe_fn probe; | |
4621 | enum kern_feature_result res; | |
4622 | } feature_probes[__FEAT_CNT] = { | |
4623 | [FEAT_PROG_NAME] = { | |
4624 | "BPF program name", probe_kern_prog_name, | |
4625 | }, | |
4626 | [FEAT_GLOBAL_DATA] = { | |
4627 | "global variables", probe_kern_global_data, | |
4628 | }, | |
68b08647 AN |
4629 | [FEAT_BTF] = { |
4630 | "minimal BTF", probe_kern_btf, | |
4631 | }, | |
47b6cb4d AN |
4632 | [FEAT_BTF_FUNC] = { |
4633 | "BTF functions", probe_kern_btf_func, | |
4634 | }, | |
4635 | [FEAT_BTF_GLOBAL_FUNC] = { | |
4636 | "BTF global function", probe_kern_btf_func_global, | |
4637 | }, | |
4638 | [FEAT_BTF_DATASEC] = { | |
4639 | "BTF data section and variable", probe_kern_btf_datasec, | |
4640 | }, | |
4641 | [FEAT_ARRAY_MMAP] = { | |
4642 | "ARRAY map mmap()", probe_kern_array_mmap, | |
4643 | }, | |
4644 | [FEAT_EXP_ATTACH_TYPE] = { | |
4645 | "BPF_PROG_LOAD expected_attach_type attribute", | |
4646 | probe_kern_exp_attach_type, | |
4647 | }, | |
109cea5a AN |
4648 | [FEAT_PROBE_READ_KERN] = { |
4649 | "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel, | |
5d23328d YZ |
4650 | }, |
4651 | [FEAT_PROG_BIND_MAP] = { | |
4652 | "BPF_PROG_BIND_MAP support", probe_prog_bind_map, | |
4f33a53d AN |
4653 | }, |
4654 | [FEAT_MODULE_BTF] = { | |
4655 | "module BTF support", probe_module_btf, | |
4656 | }, | |
22541a9e IL |
4657 | [FEAT_BTF_FLOAT] = { |
4658 | "BTF_KIND_FLOAT support", probe_kern_btf_float, | |
4659 | }, | |
668ace0e AN |
4660 | [FEAT_PERF_LINK] = { |
4661 | "BPF perf link support", probe_perf_link, | |
4662 | }, | |
223f903e YS |
4663 | [FEAT_BTF_DECL_TAG] = { |
4664 | "BTF_KIND_DECL_TAG support", probe_kern_btf_decl_tag, | |
5b84bd10 | 4665 | }, |
47b6cb4d | 4666 | }; |
8837fe5d | 4667 | |
9ca1f56a | 4668 | static bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id) |
47b6cb4d AN |
4669 | { |
4670 | struct kern_feature_desc *feat = &feature_probes[feat_id]; | |
4671 | int ret; | |
4672 | ||
67234743 AS |
4673 | if (obj->gen_loader) |
4674 | /* To generate loader program assume the latest kernel | |
4675 | * to avoid doing extra prog_load, map_create syscalls. | |
4676 | */ | |
4677 | return true; | |
4678 | ||
47b6cb4d AN |
4679 | if (READ_ONCE(feat->res) == FEAT_UNKNOWN) { |
4680 | ret = feat->probe(); | |
4681 | if (ret > 0) { | |
4682 | WRITE_ONCE(feat->res, FEAT_SUPPORTED); | |
4683 | } else if (ret == 0) { | |
4684 | WRITE_ONCE(feat->res, FEAT_MISSING); | |
4685 | } else { | |
4686 | pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret); | |
4687 | WRITE_ONCE(feat->res, FEAT_MISSING); | |
4688 | } | |
8837fe5d DB |
4689 | } |
4690 | ||
47b6cb4d | 4691 | return READ_ONCE(feat->res) == FEAT_SUPPORTED; |
47eff617 SF |
4692 | } |
4693 | ||
57a00f41 THJ |
4694 | static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd) |
4695 | { | |
4696 | struct bpf_map_info map_info = {}; | |
4697 | char msg[STRERR_BUFSIZE]; | |
4698 | __u32 map_info_len; | |
97eb3138 | 4699 | int err; |
57a00f41 THJ |
4700 | |
4701 | map_info_len = sizeof(map_info); | |
4702 | ||
97eb3138 MP |
4703 | err = bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len); |
4704 | if (err && errno == EINVAL) | |
4705 | err = bpf_get_map_info_from_fdinfo(map_fd, &map_info); | |
4706 | if (err) { | |
4707 | pr_warn("failed to get map info for map FD %d: %s\n", map_fd, | |
4708 | libbpf_strerror_r(errno, msg, sizeof(msg))); | |
57a00f41 THJ |
4709 | return false; |
4710 | } | |
4711 | ||
4712 | return (map_info.type == map->def.type && | |
4713 | map_info.key_size == map->def.key_size && | |
4714 | map_info.value_size == map->def.value_size && | |
4715 | map_info.max_entries == map->def.max_entries && | |
4716 | map_info.map_flags == map->def.map_flags); | |
4717 | } | |
4718 | ||
4719 | static int | |
4720 | bpf_object__reuse_map(struct bpf_map *map) | |
4721 | { | |
4722 | char *cp, errmsg[STRERR_BUFSIZE]; | |
4723 | int err, pin_fd; | |
4724 | ||
4725 | pin_fd = bpf_obj_get(map->pin_path); | |
4726 | if (pin_fd < 0) { | |
4727 | err = -errno; | |
4728 | if (err == -ENOENT) { | |
4729 | pr_debug("found no pinned map to reuse at '%s'\n", | |
4730 | map->pin_path); | |
4731 | return 0; | |
4732 | } | |
4733 | ||
4734 | cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); | |
4735 | pr_warn("couldn't retrieve pinned map '%s': %s\n", | |
4736 | map->pin_path, cp); | |
4737 | return err; | |
4738 | } | |
4739 | ||
4740 | if (!map_is_reuse_compat(map, pin_fd)) { | |
4741 | pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n", | |
4742 | map->pin_path); | |
4743 | close(pin_fd); | |
4744 | return -EINVAL; | |
4745 | } | |
4746 | ||
4747 | err = bpf_map__reuse_fd(map, pin_fd); | |
4748 | if (err) { | |
4749 | close(pin_fd); | |
4750 | return err; | |
4751 | } | |
4752 | map->pinned = true; | |
4753 | pr_debug("reused pinned map at '%s'\n", map->pin_path); | |
4754 | ||
4755 | return 0; | |
4756 | } | |
4757 | ||
d859900c DB |
4758 | static int |
4759 | bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map) | |
4760 | { | |
166750bc | 4761 | enum libbpf_map_type map_type = map->libbpf_type; |
d859900c DB |
4762 | char *cp, errmsg[STRERR_BUFSIZE]; |
4763 | int err, zero = 0; | |
d859900c | 4764 | |
67234743 AS |
4765 | if (obj->gen_loader) { |
4766 | bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps, | |
4767 | map->mmaped, map->def.value_size); | |
4768 | if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) | |
4769 | bpf_gen__map_freeze(obj->gen_loader, map - obj->maps); | |
4770 | return 0; | |
4771 | } | |
eba9c5f4 AN |
4772 | err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0); |
4773 | if (err) { | |
4774 | err = -errno; | |
4775 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); | |
4776 | pr_warn("Error setting initial map(%s) contents: %s\n", | |
4777 | map->name, cp); | |
4778 | return err; | |
4779 | } | |
d859900c | 4780 | |
81bfdd08 AN |
4781 | /* Freeze .rodata and .kconfig map as read-only from syscall side. */ |
4782 | if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) { | |
d859900c DB |
4783 | err = bpf_map_freeze(map->fd); |
4784 | if (err) { | |
eba9c5f4 AN |
4785 | err = -errno; |
4786 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); | |
be18010e KW |
4787 | pr_warn("Error freezing map(%s) as read-only: %s\n", |
4788 | map->name, cp); | |
eba9c5f4 | 4789 | return err; |
d859900c DB |
4790 | } |
4791 | } | |
eba9c5f4 | 4792 | return 0; |
d859900c DB |
4793 | } |
4794 | ||
2d39d7c5 AN |
4795 | static void bpf_map__destroy(struct bpf_map *map); |
4796 | ||
67234743 | 4797 | static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner) |
2d39d7c5 AN |
4798 | { |
4799 | struct bpf_create_map_attr create_attr; | |
4800 | struct bpf_map_def *def = &map->def; | |
a21ab4c5 | 4801 | int err = 0; |
2d39d7c5 AN |
4802 | |
4803 | memset(&create_attr, 0, sizeof(create_attr)); | |
4804 | ||
9ca1f56a | 4805 | if (kernel_supports(obj, FEAT_PROG_NAME)) |
2d39d7c5 AN |
4806 | create_attr.name = map->name; |
4807 | create_attr.map_ifindex = map->map_ifindex; | |
4808 | create_attr.map_type = def->type; | |
4809 | create_attr.map_flags = def->map_flags; | |
4810 | create_attr.key_size = def->key_size; | |
4811 | create_attr.value_size = def->value_size; | |
1bdb6c9a | 4812 | create_attr.numa_node = map->numa_node; |
2d39d7c5 AN |
4813 | |
4814 | if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) { | |
4815 | int nr_cpus; | |
4816 | ||
4817 | nr_cpus = libbpf_num_possible_cpus(); | |
4818 | if (nr_cpus < 0) { | |
4819 | pr_warn("map '%s': failed to determine number of system CPUs: %d\n", | |
4820 | map->name, nr_cpus); | |
4821 | return nr_cpus; | |
4822 | } | |
4823 | pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus); | |
4824 | create_attr.max_entries = nr_cpus; | |
4825 | } else { | |
4826 | create_attr.max_entries = def->max_entries; | |
4827 | } | |
4828 | ||
4829 | if (bpf_map__is_struct_ops(map)) | |
4830 | create_attr.btf_vmlinux_value_type_id = | |
4831 | map->btf_vmlinux_value_type_id; | |
4832 | ||
4833 | create_attr.btf_fd = 0; | |
4834 | create_attr.btf_key_type_id = 0; | |
4835 | create_attr.btf_value_type_id = 0; | |
0f0e55d8 | 4836 | if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) { |
2d39d7c5 AN |
4837 | create_attr.btf_fd = btf__fd(obj->btf); |
4838 | create_attr.btf_key_type_id = map->btf_key_type_id; | |
4839 | create_attr.btf_value_type_id = map->btf_value_type_id; | |
4840 | } | |
4841 | ||
646f02ff AN |
4842 | if (bpf_map_type__is_map_in_map(def->type)) { |
4843 | if (map->inner_map) { | |
67234743 | 4844 | err = bpf_object__create_map(obj, map->inner_map, true); |
646f02ff AN |
4845 | if (err) { |
4846 | pr_warn("map '%s': failed to create inner map: %d\n", | |
4847 | map->name, err); | |
4848 | return err; | |
4849 | } | |
4850 | map->inner_map_fd = bpf_map__fd(map->inner_map); | |
4851 | } | |
4852 | if (map->inner_map_fd >= 0) | |
4853 | create_attr.inner_map_fd = map->inner_map_fd; | |
4854 | } | |
4855 | ||
f7310523 HC |
4856 | switch (def->type) { |
4857 | case BPF_MAP_TYPE_PERF_EVENT_ARRAY: | |
4858 | case BPF_MAP_TYPE_CGROUP_ARRAY: | |
4859 | case BPF_MAP_TYPE_STACK_TRACE: | |
4860 | case BPF_MAP_TYPE_ARRAY_OF_MAPS: | |
4861 | case BPF_MAP_TYPE_HASH_OF_MAPS: | |
4862 | case BPF_MAP_TYPE_DEVMAP: | |
4863 | case BPF_MAP_TYPE_DEVMAP_HASH: | |
4864 | case BPF_MAP_TYPE_CPUMAP: | |
4865 | case BPF_MAP_TYPE_XSKMAP: | |
4866 | case BPF_MAP_TYPE_SOCKMAP: | |
4867 | case BPF_MAP_TYPE_SOCKHASH: | |
4868 | case BPF_MAP_TYPE_QUEUE: | |
4869 | case BPF_MAP_TYPE_STACK: | |
4870 | case BPF_MAP_TYPE_RINGBUF: | |
4871 | create_attr.btf_fd = 0; | |
4872 | create_attr.btf_key_type_id = 0; | |
4873 | create_attr.btf_value_type_id = 0; | |
4874 | map->btf_key_type_id = 0; | |
4875 | map->btf_value_type_id = 0; | |
4876 | default: | |
4877 | break; | |
4878 | } | |
4879 | ||
67234743 AS |
4880 | if (obj->gen_loader) { |
4881 | bpf_gen__map_create(obj->gen_loader, &create_attr, is_inner ? -1 : map - obj->maps); | |
4882 | /* Pretend to have valid FD to pass various fd >= 0 checks. | |
4883 | * This fd == 0 will not be used with any syscall and will be reset to -1 eventually. | |
4884 | */ | |
4885 | map->fd = 0; | |
4886 | } else { | |
4887 | map->fd = bpf_create_map_xattr(&create_attr); | |
4888 | } | |
2d39d7c5 AN |
4889 | if (map->fd < 0 && (create_attr.btf_key_type_id || |
4890 | create_attr.btf_value_type_id)) { | |
4891 | char *cp, errmsg[STRERR_BUFSIZE]; | |
2d39d7c5 | 4892 | |
a21ab4c5 | 4893 | err = -errno; |
2d39d7c5 AN |
4894 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); |
4895 | pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n", | |
4896 | map->name, cp, err); | |
4897 | create_attr.btf_fd = 0; | |
4898 | create_attr.btf_key_type_id = 0; | |
4899 | create_attr.btf_value_type_id = 0; | |
4900 | map->btf_key_type_id = 0; | |
4901 | map->btf_value_type_id = 0; | |
4902 | map->fd = bpf_create_map_xattr(&create_attr); | |
4903 | } | |
4904 | ||
a21ab4c5 | 4905 | err = map->fd < 0 ? -errno : 0; |
2d39d7c5 | 4906 | |
646f02ff | 4907 | if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) { |
67234743 AS |
4908 | if (obj->gen_loader) |
4909 | map->inner_map->fd = -1; | |
646f02ff AN |
4910 | bpf_map__destroy(map->inner_map); |
4911 | zfree(&map->inner_map); | |
4912 | } | |
4913 | ||
a21ab4c5 | 4914 | return err; |
2d39d7c5 AN |
4915 | } |
4916 | ||
67234743 | 4917 | static int init_map_slots(struct bpf_object *obj, struct bpf_map *map) |
a0f2b7ac HL |
4918 | { |
4919 | const struct bpf_map *targ_map; | |
4920 | unsigned int i; | |
67234743 | 4921 | int fd, err = 0; |
a0f2b7ac HL |
4922 | |
4923 | for (i = 0; i < map->init_slots_sz; i++) { | |
4924 | if (!map->init_slots[i]) | |
4925 | continue; | |
4926 | ||
4927 | targ_map = map->init_slots[i]; | |
4928 | fd = bpf_map__fd(targ_map); | |
67234743 | 4929 | if (obj->gen_loader) { |
edc0571c | 4930 | pr_warn("// TODO map_update_elem: idx %td key %d value==map_idx %td\n", |
67234743 AS |
4931 | map - obj->maps, i, targ_map - obj->maps); |
4932 | return -ENOTSUP; | |
4933 | } else { | |
4934 | err = bpf_map_update_elem(map->fd, &i, &fd, 0); | |
4935 | } | |
a0f2b7ac HL |
4936 | if (err) { |
4937 | err = -errno; | |
4938 | pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n", | |
4939 | map->name, i, targ_map->name, | |
4940 | fd, err); | |
4941 | return err; | |
4942 | } | |
4943 | pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n", | |
4944 | map->name, i, targ_map->name, fd); | |
4945 | } | |
4946 | ||
4947 | zfree(&map->init_slots); | |
4948 | map->init_slots_sz = 0; | |
4949 | ||
4950 | return 0; | |
4951 | } | |
4952 | ||
52d3352e WN |
4953 | static int |
4954 | bpf_object__create_maps(struct bpf_object *obj) | |
4955 | { | |
2d39d7c5 AN |
4956 | struct bpf_map *map; |
4957 | char *cp, errmsg[STRERR_BUFSIZE]; | |
4958 | unsigned int i, j; | |
8a138aed | 4959 | int err; |
043c5bb3 | 4960 | bool retried; |
52d3352e | 4961 | |
9d759a9b | 4962 | for (i = 0; i < obj->nr_maps; i++) { |
2d39d7c5 | 4963 | map = &obj->maps[i]; |
8a138aed | 4964 | |
043c5bb3 MP |
4965 | retried = false; |
4966 | retry: | |
57a00f41 THJ |
4967 | if (map->pin_path) { |
4968 | err = bpf_object__reuse_map(map); | |
4969 | if (err) { | |
2d39d7c5 | 4970 | pr_warn("map '%s': error reusing pinned map\n", |
57a00f41 | 4971 | map->name); |
2d39d7c5 | 4972 | goto err_out; |
57a00f41 | 4973 | } |
043c5bb3 MP |
4974 | if (retried && map->fd < 0) { |
4975 | pr_warn("map '%s': cannot find pinned map\n", | |
4976 | map->name); | |
4977 | err = -ENOENT; | |
4978 | goto err_out; | |
4979 | } | |
57a00f41 THJ |
4980 | } |
4981 | ||
26736eb9 | 4982 | if (map->fd >= 0) { |
2d39d7c5 | 4983 | pr_debug("map '%s': skipping creation (preset fd=%d)\n", |
26736eb9 | 4984 | map->name, map->fd); |
2c193d32 | 4985 | } else { |
67234743 | 4986 | err = bpf_object__create_map(obj, map, false); |
2c193d32 | 4987 | if (err) |
d859900c | 4988 | goto err_out; |
d859900c | 4989 | |
2c193d32 HL |
4990 | pr_debug("map '%s': created successfully, fd=%d\n", |
4991 | map->name, map->fd); | |
646f02ff | 4992 | |
2c193d32 HL |
4993 | if (bpf_map__is_internal(map)) { |
4994 | err = bpf_object__populate_internal_map(obj, map); | |
4995 | if (err < 0) { | |
4996 | zclose(map->fd); | |
4997 | goto err_out; | |
4998 | } | |
d859900c | 4999 | } |
646f02ff | 5000 | |
2c193d32 | 5001 | if (map->init_slots_sz) { |
67234743 | 5002 | err = init_map_slots(obj, map); |
2c193d32 HL |
5003 | if (err < 0) { |
5004 | zclose(map->fd); | |
646f02ff AN |
5005 | goto err_out; |
5006 | } | |
646f02ff | 5007 | } |
646f02ff AN |
5008 | } |
5009 | ||
57a00f41 THJ |
5010 | if (map->pin_path && !map->pinned) { |
5011 | err = bpf_map__pin(map, NULL); | |
5012 | if (err) { | |
043c5bb3 MP |
5013 | zclose(map->fd); |
5014 | if (!retried && err == -EEXIST) { | |
5015 | retried = true; | |
5016 | goto retry; | |
5017 | } | |
2d39d7c5 AN |
5018 | pr_warn("map '%s': failed to auto-pin at '%s': %d\n", |
5019 | map->name, map->pin_path, err); | |
2d39d7c5 | 5020 | goto err_out; |
57a00f41 THJ |
5021 | } |
5022 | } | |
52d3352e WN |
5023 | } |
5024 | ||
52d3352e | 5025 | return 0; |
2d39d7c5 AN |
5026 | |
5027 | err_out: | |
5028 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); | |
5029 | pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err); | |
5030 | pr_perm_msg(err); | |
5031 | for (j = 0; j < i; j++) | |
5032 | zclose(obj->maps[j].fd); | |
5033 | return err; | |
52d3352e WN |
5034 | } |
5035 | ||
ddc7c304 AN |
5036 | static bool bpf_core_is_flavor_sep(const char *s) |
5037 | { | |
5038 | /* check X___Y name pattern, where X and Y are not underscores */ | |
5039 | return s[0] != '_' && /* X */ | |
5040 | s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */ | |
5041 | s[4] != '_'; /* Y */ | |
5042 | } | |
5043 | ||
5044 | /* Given 'some_struct_name___with_flavor' return the length of a name prefix | |
5045 | * before last triple underscore. Struct name part after last triple | |
5046 | * underscore is ignored by BPF CO-RE relocation during relocation matching. | |
5047 | */ | |
b0588390 | 5048 | size_t bpf_core_essential_name_len(const char *name) |
ddc7c304 AN |
5049 | { |
5050 | size_t n = strlen(name); | |
5051 | int i; | |
5052 | ||
5053 | for (i = n - 5; i >= 0; i--) { | |
5054 | if (bpf_core_is_flavor_sep(name + i)) | |
5055 | return i + 1; | |
5056 | } | |
5057 | return n; | |
5058 | } | |
5059 | ||
301ba4d7 | 5060 | static void bpf_core_free_cands(struct bpf_core_cand_list *cands) |
ddc7c304 | 5061 | { |
0f7515ca AN |
5062 | free(cands->cands); |
5063 | free(cands); | |
ddc7c304 AN |
5064 | } |
5065 | ||
301ba4d7 | 5066 | static int bpf_core_add_cands(struct bpf_core_cand *local_cand, |
0f7515ca AN |
5067 | size_t local_essent_len, |
5068 | const struct btf *targ_btf, | |
5069 | const char *targ_btf_name, | |
5070 | int targ_start_id, | |
301ba4d7 | 5071 | struct bpf_core_cand_list *cands) |
ddc7c304 | 5072 | { |
301ba4d7 | 5073 | struct bpf_core_cand *new_cands, *cand; |
0f7515ca AN |
5074 | const struct btf_type *t; |
5075 | const char *targ_name; | |
5076 | size_t targ_essent_len; | |
5077 | int n, i; | |
ddc7c304 AN |
5078 | |
5079 | n = btf__get_nr_types(targ_btf); | |
0f7515ca | 5080 | for (i = targ_start_id; i <= n; i++) { |
ddc7c304 | 5081 | t = btf__type_by_id(targ_btf, i); |
0f7515ca | 5082 | if (btf_kind(t) != btf_kind(local_cand->t)) |
ddc7c304 AN |
5083 | continue; |
5084 | ||
3fc32f40 AN |
5085 | targ_name = btf__name_by_offset(targ_btf, t->name_off); |
5086 | if (str_is_empty(targ_name)) | |
d121e1d3 AN |
5087 | continue; |
5088 | ||
ddc7c304 AN |
5089 | targ_essent_len = bpf_core_essential_name_len(targ_name); |
5090 | if (targ_essent_len != local_essent_len) | |
5091 | continue; | |
5092 | ||
0f7515ca AN |
5093 | if (strncmp(local_cand->name, targ_name, local_essent_len) != 0) |
5094 | continue; | |
5095 | ||
5096 | pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n", | |
5097 | local_cand->id, btf_kind_str(local_cand->t), | |
5098 | local_cand->name, i, btf_kind_str(t), targ_name, | |
5099 | targ_btf_name); | |
5100 | new_cands = libbpf_reallocarray(cands->cands, cands->len + 1, | |
5101 | sizeof(*cands->cands)); | |
5102 | if (!new_cands) | |
5103 | return -ENOMEM; | |
5104 | ||
5105 | cand = &new_cands[cands->len]; | |
5106 | cand->btf = targ_btf; | |
5107 | cand->t = t; | |
5108 | cand->name = targ_name; | |
5109 | cand->id = i; | |
5110 | ||
5111 | cands->cands = new_cands; | |
5112 | cands->len++; | |
ddc7c304 | 5113 | } |
0f7515ca AN |
5114 | return 0; |
5115 | } | |
5116 | ||
4f33a53d AN |
5117 | static int load_module_btfs(struct bpf_object *obj) |
5118 | { | |
5119 | struct bpf_btf_info info; | |
5120 | struct module_btf *mod_btf; | |
5121 | struct btf *btf; | |
5122 | char name[64]; | |
5123 | __u32 id = 0, len; | |
5124 | int err, fd; | |
5125 | ||
5126 | if (obj->btf_modules_loaded) | |
5127 | return 0; | |
5128 | ||
67234743 AS |
5129 | if (obj->gen_loader) |
5130 | return 0; | |
5131 | ||
4f33a53d AN |
5132 | /* don't do this again, even if we find no module BTFs */ |
5133 | obj->btf_modules_loaded = true; | |
5134 | ||
5135 | /* kernel too old to support module BTFs */ | |
9ca1f56a | 5136 | if (!kernel_supports(obj, FEAT_MODULE_BTF)) |
4f33a53d AN |
5137 | return 0; |
5138 | ||
5139 | while (true) { | |
5140 | err = bpf_btf_get_next_id(id, &id); | |
5141 | if (err && errno == ENOENT) | |
5142 | return 0; | |
5143 | if (err) { | |
5144 | err = -errno; | |
5145 | pr_warn("failed to iterate BTF objects: %d\n", err); | |
5146 | return err; | |
ddc7c304 | 5147 | } |
4f33a53d AN |
5148 | |
5149 | fd = bpf_btf_get_fd_by_id(id); | |
5150 | if (fd < 0) { | |
5151 | if (errno == ENOENT) | |
5152 | continue; /* expected race: BTF was unloaded */ | |
5153 | err = -errno; | |
5154 | pr_warn("failed to get BTF object #%d FD: %d\n", id, err); | |
5155 | return err; | |
5156 | } | |
5157 | ||
5158 | len = sizeof(info); | |
5159 | memset(&info, 0, sizeof(info)); | |
5160 | info.name = ptr_to_u64(name); | |
5161 | info.name_len = sizeof(name); | |
5162 | ||
5163 | err = bpf_obj_get_info_by_fd(fd, &info, &len); | |
5164 | if (err) { | |
5165 | err = -errno; | |
5166 | pr_warn("failed to get BTF object #%d info: %d\n", id, err); | |
91abb4a6 | 5167 | goto err_out; |
4f33a53d AN |
5168 | } |
5169 | ||
5170 | /* ignore non-module BTFs */ | |
5171 | if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) { | |
5172 | close(fd); | |
5173 | continue; | |
5174 | } | |
5175 | ||
5176 | btf = btf_get_from_fd(fd, obj->btf_vmlinux); | |
e9fc3ce9 AN |
5177 | err = libbpf_get_error(btf); |
5178 | if (err) { | |
5179 | pr_warn("failed to load module [%s]'s BTF object #%d: %d\n", | |
5180 | name, id, err); | |
91abb4a6 | 5181 | goto err_out; |
4f33a53d AN |
5182 | } |
5183 | ||
3b029e06 AN |
5184 | err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap, |
5185 | sizeof(*obj->btf_modules), obj->btf_module_cnt + 1); | |
4f33a53d | 5186 | if (err) |
91abb4a6 | 5187 | goto err_out; |
4f33a53d AN |
5188 | |
5189 | mod_btf = &obj->btf_modules[obj->btf_module_cnt++]; | |
5190 | ||
5191 | mod_btf->btf = btf; | |
5192 | mod_btf->id = id; | |
91abb4a6 | 5193 | mod_btf->fd = fd; |
4f33a53d | 5194 | mod_btf->name = strdup(name); |
91abb4a6 AN |
5195 | if (!mod_btf->name) { |
5196 | err = -ENOMEM; | |
5197 | goto err_out; | |
5198 | } | |
5199 | continue; | |
5200 | ||
5201 | err_out: | |
5202 | close(fd); | |
5203 | return err; | |
ddc7c304 | 5204 | } |
4f33a53d AN |
5205 | |
5206 | return 0; | |
5207 | } | |
5208 | ||
301ba4d7 | 5209 | static struct bpf_core_cand_list * |
0f7515ca AN |
5210 | bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id) |
5211 | { | |
301ba4d7 AS |
5212 | struct bpf_core_cand local_cand = {}; |
5213 | struct bpf_core_cand_list *cands; | |
4f33a53d | 5214 | const struct btf *main_btf; |
0f7515ca | 5215 | size_t local_essent_len; |
4f33a53d | 5216 | int err, i; |
0f7515ca AN |
5217 | |
5218 | local_cand.btf = local_btf; | |
5219 | local_cand.t = btf__type_by_id(local_btf, local_type_id); | |
5220 | if (!local_cand.t) | |
5221 | return ERR_PTR(-EINVAL); | |
5222 | ||
5223 | local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off); | |
5224 | if (str_is_empty(local_cand.name)) | |
5225 | return ERR_PTR(-EINVAL); | |
5226 | local_essent_len = bpf_core_essential_name_len(local_cand.name); | |
5227 | ||
5228 | cands = calloc(1, sizeof(*cands)); | |
5229 | if (!cands) | |
5230 | return ERR_PTR(-ENOMEM); | |
5231 | ||
5232 | /* Attempt to find target candidates in vmlinux BTF first */ | |
4f33a53d AN |
5233 | main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux; |
5234 | err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands); | |
5235 | if (err) | |
5236 | goto err_out; | |
5237 | ||
5238 | /* if vmlinux BTF has any candidate, don't got for module BTFs */ | |
5239 | if (cands->len) | |
5240 | return cands; | |
5241 | ||
5242 | /* if vmlinux BTF was overridden, don't attempt to load module BTFs */ | |
5243 | if (obj->btf_vmlinux_override) | |
5244 | return cands; | |
5245 | ||
5246 | /* now look through module BTFs, trying to still find candidates */ | |
5247 | err = load_module_btfs(obj); | |
5248 | if (err) | |
5249 | goto err_out; | |
5250 | ||
5251 | for (i = 0; i < obj->btf_module_cnt; i++) { | |
5252 | err = bpf_core_add_cands(&local_cand, local_essent_len, | |
5253 | obj->btf_modules[i].btf, | |
5254 | obj->btf_modules[i].name, | |
5255 | btf__get_nr_types(obj->btf_vmlinux) + 1, | |
5256 | cands); | |
5257 | if (err) | |
5258 | goto err_out; | |
0f7515ca AN |
5259 | } |
5260 | ||
5261 | return cands; | |
ddc7c304 | 5262 | err_out: |
4f33a53d | 5263 | bpf_core_free_cands(cands); |
ddc7c304 AN |
5264 | return ERR_PTR(err); |
5265 | } | |
5266 | ||
3fc32f40 AN |
5267 | /* Check local and target types for compatibility. This check is used for |
5268 | * type-based CO-RE relocations and follow slightly different rules than | |
5269 | * field-based relocations. This function assumes that root types were already | |
5270 | * checked for name match. Beyond that initial root-level name check, names | |
5271 | * are completely ignored. Compatibility rules are as follows: | |
5272 | * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but | |
5273 | * kind should match for local and target types (i.e., STRUCT is not | |
5274 | * compatible with UNION); | |
5275 | * - for ENUMs, the size is ignored; | |
5276 | * - for INT, size and signedness are ignored; | |
5277 | * - for ARRAY, dimensionality is ignored, element types are checked for | |
5278 | * compatibility recursively; | |
5279 | * - CONST/VOLATILE/RESTRICT modifiers are ignored; | |
5280 | * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible; | |
5281 | * - FUNC_PROTOs are compatible if they have compatible signature: same | |
5282 | * number of input args and compatible return and argument types. | |
5283 | * These rules are not set in stone and probably will be adjusted as we get | |
5284 | * more experience with using BPF CO-RE relocations. | |
5285 | */ | |
b0588390 AS |
5286 | int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id, |
5287 | const struct btf *targ_btf, __u32 targ_id) | |
3fc32f40 AN |
5288 | { |
5289 | const struct btf_type *local_type, *targ_type; | |
5290 | int depth = 32; /* max recursion depth */ | |
5291 | ||
5292 | /* caller made sure that names match (ignoring flavor suffix) */ | |
5293 | local_type = btf__type_by_id(local_btf, local_id); | |
f872e4bc | 5294 | targ_type = btf__type_by_id(targ_btf, targ_id); |
3fc32f40 AN |
5295 | if (btf_kind(local_type) != btf_kind(targ_type)) |
5296 | return 0; | |
5297 | ||
5298 | recur: | |
5299 | depth--; | |
5300 | if (depth < 0) | |
5301 | return -EINVAL; | |
5302 | ||
5303 | local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id); | |
5304 | targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id); | |
5305 | if (!local_type || !targ_type) | |
5306 | return -EINVAL; | |
5307 | ||
5308 | if (btf_kind(local_type) != btf_kind(targ_type)) | |
5309 | return 0; | |
5310 | ||
5311 | switch (btf_kind(local_type)) { | |
5312 | case BTF_KIND_UNKN: | |
5313 | case BTF_KIND_STRUCT: | |
5314 | case BTF_KIND_UNION: | |
5315 | case BTF_KIND_ENUM: | |
5316 | case BTF_KIND_FWD: | |
5317 | return 1; | |
5318 | case BTF_KIND_INT: | |
5319 | /* just reject deprecated bitfield-like integers; all other | |
5320 | * integers are by default compatible between each other | |
5321 | */ | |
5322 | return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0; | |
5323 | case BTF_KIND_PTR: | |
5324 | local_id = local_type->type; | |
5325 | targ_id = targ_type->type; | |
5326 | goto recur; | |
5327 | case BTF_KIND_ARRAY: | |
5328 | local_id = btf_array(local_type)->type; | |
5329 | targ_id = btf_array(targ_type)->type; | |
5330 | goto recur; | |
5331 | case BTF_KIND_FUNC_PROTO: { | |
5332 | struct btf_param *local_p = btf_params(local_type); | |
5333 | struct btf_param *targ_p = btf_params(targ_type); | |
5334 | __u16 local_vlen = btf_vlen(local_type); | |
5335 | __u16 targ_vlen = btf_vlen(targ_type); | |
5336 | int i, err; | |
5337 | ||
5338 | if (local_vlen != targ_vlen) | |
5339 | return 0; | |
5340 | ||
5341 | for (i = 0; i < local_vlen; i++, local_p++, targ_p++) { | |
5342 | skip_mods_and_typedefs(local_btf, local_p->type, &local_id); | |
5343 | skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id); | |
5344 | err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id); | |
5345 | if (err <= 0) | |
5346 | return err; | |
5347 | } | |
5348 | ||
5349 | /* tail recurse for return type check */ | |
5350 | skip_mods_and_typedefs(local_btf, local_type->type, &local_id); | |
5351 | skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id); | |
5352 | goto recur; | |
5353 | } | |
5354 | default: | |
5355 | pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n", | |
5356 | btf_kind_str(local_type), local_id, targ_id); | |
5357 | return 0; | |
5358 | } | |
5359 | } | |
5360 | ||
ddc7c304 AN |
5361 | static size_t bpf_core_hash_fn(const void *key, void *ctx) |
5362 | { | |
5363 | return (size_t)key; | |
5364 | } | |
5365 | ||
5366 | static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx) | |
5367 | { | |
5368 | return k1 == k2; | |
5369 | } | |
5370 | ||
5371 | static void *u32_as_hash_key(__u32 x) | |
5372 | { | |
5373 | return (void *)(uintptr_t)x; | |
5374 | } | |
5375 | ||
3ee4f533 AS |
5376 | static int bpf_core_apply_relo(struct bpf_program *prog, |
5377 | const struct bpf_core_relo *relo, | |
5378 | int relo_idx, | |
5379 | const struct btf *local_btf, | |
5380 | struct hashmap *cand_cache) | |
5381 | { | |
5382 | const void *type_key = u32_as_hash_key(relo->type_id); | |
301ba4d7 | 5383 | struct bpf_core_cand_list *cands = NULL; |
3ee4f533 AS |
5384 | const char *prog_name = prog->name; |
5385 | const struct btf_type *local_type; | |
5386 | const char *local_name; | |
5387 | __u32 local_id = relo->type_id; | |
5388 | struct bpf_insn *insn; | |
5389 | int insn_idx, err; | |
5390 | ||
5391 | if (relo->insn_off % BPF_INSN_SZ) | |
5392 | return -EINVAL; | |
5393 | insn_idx = relo->insn_off / BPF_INSN_SZ; | |
5394 | /* adjust insn_idx from section frame of reference to the local | |
5395 | * program's frame of reference; (sub-)program code is not yet | |
5396 | * relocated, so it's enough to just subtract in-section offset | |
5397 | */ | |
5398 | insn_idx = insn_idx - prog->sec_insn_off; | |
5399 | if (insn_idx > prog->insns_cnt) | |
5400 | return -EINVAL; | |
5401 | insn = &prog->insns[insn_idx]; | |
5402 | ||
5403 | local_type = btf__type_by_id(local_btf, local_id); | |
5404 | if (!local_type) | |
5405 | return -EINVAL; | |
5406 | ||
5407 | local_name = btf__name_by_offset(local_btf, local_type->name_off); | |
5408 | if (!local_name) | |
5409 | return -EINVAL; | |
5410 | ||
5411 | if (prog->obj->gen_loader) { | |
5412 | pr_warn("// TODO core_relo: prog %td insn[%d] %s kind %d\n", | |
5413 | prog - prog->obj->programs, relo->insn_off / 8, | |
5414 | local_name, relo->kind); | |
5415 | return -ENOTSUP; | |
5416 | } | |
5417 | ||
5418 | if (relo->kind != BPF_TYPE_ID_LOCAL && | |
5419 | !hashmap__find(cand_cache, type_key, (void **)&cands)) { | |
5420 | cands = bpf_core_find_cands(prog->obj, local_btf, local_id); | |
5421 | if (IS_ERR(cands)) { | |
5422 | pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n", | |
5423 | prog_name, relo_idx, local_id, btf_kind_str(local_type), | |
5424 | local_name, PTR_ERR(cands)); | |
5425 | return PTR_ERR(cands); | |
5426 | } | |
5427 | err = hashmap__set(cand_cache, type_key, cands, NULL, NULL); | |
5428 | if (err) { | |
5429 | bpf_core_free_cands(cands); | |
5430 | return err; | |
5431 | } | |
5432 | } | |
5433 | ||
5434 | return bpf_core_apply_relo_insn(prog_name, insn, insn_idx, relo, relo_idx, local_btf, cands); | |
5435 | } | |
5436 | ||
ddc7c304 | 5437 | static int |
28b93c64 | 5438 | bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path) |
ddc7c304 AN |
5439 | { |
5440 | const struct btf_ext_info_sec *sec; | |
28b93c64 | 5441 | const struct bpf_core_relo *rec; |
ddc7c304 AN |
5442 | const struct btf_ext_info *seg; |
5443 | struct hashmap_entry *entry; | |
5444 | struct hashmap *cand_cache = NULL; | |
5445 | struct bpf_program *prog; | |
ddc7c304 | 5446 | const char *sec_name; |
db2b8b06 | 5447 | int i, err = 0, insn_idx, sec_idx; |
ddc7c304 | 5448 | |
28b93c64 AN |
5449 | if (obj->btf_ext->core_relo_info.len == 0) |
5450 | return 0; | |
5451 | ||
0f7515ca AN |
5452 | if (targ_btf_path) { |
5453 | obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL); | |
e9fc3ce9 AN |
5454 | err = libbpf_get_error(obj->btf_vmlinux_override); |
5455 | if (err) { | |
0f7515ca AN |
5456 | pr_warn("failed to parse target BTF: %d\n", err); |
5457 | return err; | |
5458 | } | |
ddc7c304 AN |
5459 | } |
5460 | ||
5461 | cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL); | |
5462 | if (IS_ERR(cand_cache)) { | |
5463 | err = PTR_ERR(cand_cache); | |
5464 | goto out; | |
5465 | } | |
5466 | ||
28b93c64 | 5467 | seg = &obj->btf_ext->core_relo_info; |
ddc7c304 AN |
5468 | for_each_btf_ext_sec(seg, sec) { |
5469 | sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); | |
5470 | if (str_is_empty(sec_name)) { | |
5471 | err = -EINVAL; | |
5472 | goto out; | |
5473 | } | |
db2b8b06 AN |
5474 | /* bpf_object's ELF is gone by now so it's not easy to find |
5475 | * section index by section name, but we can find *any* | |
5476 | * bpf_program within desired section name and use it's | |
5477 | * prog->sec_idx to do a proper search by section index and | |
5478 | * instruction offset | |
5479 | */ | |
9c82a63c AN |
5480 | prog = NULL; |
5481 | for (i = 0; i < obj->nr_programs; i++) { | |
db2b8b06 | 5482 | prog = &obj->programs[i]; |
52109584 | 5483 | if (strcmp(prog->sec_name, sec_name) == 0) |
9c82a63c | 5484 | break; |
9c82a63c | 5485 | } |
ddc7c304 | 5486 | if (!prog) { |
db2b8b06 AN |
5487 | pr_warn("sec '%s': failed to find a BPF program\n", sec_name); |
5488 | return -ENOENT; | |
ddc7c304 | 5489 | } |
db2b8b06 | 5490 | sec_idx = prog->sec_idx; |
ddc7c304 | 5491 | |
9c0f8cbd | 5492 | pr_debug("sec '%s': found %d CO-RE relocations\n", |
ddc7c304 AN |
5493 | sec_name, sec->num_info); |
5494 | ||
5495 | for_each_btf_ext_rec(seg, sec, i, rec) { | |
db2b8b06 AN |
5496 | insn_idx = rec->insn_off / BPF_INSN_SZ; |
5497 | prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx); | |
5498 | if (!prog) { | |
5499 | pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n", | |
5500 | sec_name, insn_idx, i); | |
5501 | err = -EINVAL; | |
5502 | goto out; | |
5503 | } | |
47f7cf63 AN |
5504 | /* no need to apply CO-RE relocation if the program is |
5505 | * not going to be loaded | |
5506 | */ | |
5507 | if (!prog->load) | |
5508 | continue; | |
db2b8b06 | 5509 | |
0f7515ca | 5510 | err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache); |
ddc7c304 | 5511 | if (err) { |
be18010e | 5512 | pr_warn("prog '%s': relo #%d: failed to relocate: %d\n", |
9c0f8cbd | 5513 | prog->name, i, err); |
ddc7c304 AN |
5514 | goto out; |
5515 | } | |
5516 | } | |
5517 | } | |
5518 | ||
5519 | out: | |
4f33a53d | 5520 | /* obj->btf_vmlinux and module BTFs are freed after object load */ |
0f7515ca AN |
5521 | btf__free(obj->btf_vmlinux_override); |
5522 | obj->btf_vmlinux_override = NULL; | |
5523 | ||
ddc7c304 AN |
5524 | if (!IS_ERR_OR_NULL(cand_cache)) { |
5525 | hashmap__for_each_entry(cand_cache, entry, i) { | |
5526 | bpf_core_free_cands(entry->value); | |
5527 | } | |
5528 | hashmap__free(cand_cache); | |
5529 | } | |
5530 | return err; | |
5531 | } | |
5532 | ||
c3c55696 AN |
5533 | /* Relocate data references within program code: |
5534 | * - map references; | |
5535 | * - global variable references; | |
5536 | * - extern references. | |
5537 | */ | |
48cca7e4 | 5538 | static int |
c3c55696 | 5539 | bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog) |
8a47a6c5 | 5540 | { |
c3c55696 | 5541 | int i; |
8a47a6c5 WN |
5542 | |
5543 | for (i = 0; i < prog->nr_reloc; i++) { | |
53f8dd43 | 5544 | struct reloc_desc *relo = &prog->reloc_desc[i]; |
166750bc | 5545 | struct bpf_insn *insn = &prog->insns[relo->insn_idx]; |
2e33efe3 | 5546 | struct extern_desc *ext; |
8a47a6c5 | 5547 | |
166750bc AN |
5548 | switch (relo->type) { |
5549 | case RELO_LD64: | |
e2fa0156 AS |
5550 | if (obj->gen_loader) { |
5551 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX; | |
5552 | insn[0].imm = relo->map_idx; | |
5553 | } else { | |
5554 | insn[0].src_reg = BPF_PSEUDO_MAP_FD; | |
5555 | insn[0].imm = obj->maps[relo->map_idx].fd; | |
5556 | } | |
166750bc AN |
5557 | break; |
5558 | case RELO_DATA: | |
166750bc | 5559 | insn[1].imm = insn[0].imm + relo->sym_off; |
e2fa0156 AS |
5560 | if (obj->gen_loader) { |
5561 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; | |
5562 | insn[0].imm = relo->map_idx; | |
5563 | } else { | |
5564 | insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; | |
5565 | insn[0].imm = obj->maps[relo->map_idx].fd; | |
5566 | } | |
166750bc | 5567 | break; |
0c091e5c | 5568 | case RELO_EXTERN_VAR: |
2e33efe3 | 5569 | ext = &obj->externs[relo->sym_off]; |
1c0c7074 | 5570 | if (ext->type == EXT_KCFG) { |
e2fa0156 AS |
5571 | if (obj->gen_loader) { |
5572 | insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE; | |
5573 | insn[0].imm = obj->kconfig_map_idx; | |
5574 | } else { | |
5575 | insn[0].src_reg = BPF_PSEUDO_MAP_VALUE; | |
5576 | insn[0].imm = obj->maps[obj->kconfig_map_idx].fd; | |
5577 | } | |
1c0c7074 AN |
5578 | insn[1].imm = ext->kcfg.data_off; |
5579 | } else /* EXT_KSYM */ { | |
2211c825 | 5580 | if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */ |
d370bbe1 | 5581 | insn[0].src_reg = BPF_PSEUDO_BTF_ID; |
284d2587 AN |
5582 | insn[0].imm = ext->ksym.kernel_btf_id; |
5583 | insn[1].imm = ext->ksym.kernel_btf_obj_fd; | |
2211c825 | 5584 | } else { /* typeless ksyms or unresolved typed ksyms */ |
d370bbe1 HL |
5585 | insn[0].imm = (__u32)ext->ksym.addr; |
5586 | insn[1].imm = ext->ksym.addr >> 32; | |
5587 | } | |
1c0c7074 | 5588 | } |
166750bc | 5589 | break; |
5bd022ec MKL |
5590 | case RELO_EXTERN_FUNC: |
5591 | ext = &obj->externs[relo->sym_off]; | |
5592 | insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL; | |
466b2e13 KKD |
5593 | if (ext->is_set) { |
5594 | insn[0].imm = ext->ksym.kernel_btf_id; | |
5595 | insn[0].off = ext->ksym.btf_fd_idx; | |
5596 | } else { /* unresolved weak kfunc */ | |
5597 | insn[0].imm = 0; | |
5598 | insn[0].off = 0; | |
5599 | } | |
5bd022ec | 5600 | break; |
53eddb5e | 5601 | case RELO_SUBPROG_ADDR: |
b1268826 AS |
5602 | if (insn[0].src_reg != BPF_PSEUDO_FUNC) { |
5603 | pr_warn("prog '%s': relo #%d: bad insn\n", | |
5604 | prog->name, i); | |
5605 | return -EINVAL; | |
5606 | } | |
5607 | /* handled already */ | |
53eddb5e | 5608 | break; |
166750bc | 5609 | case RELO_CALL: |
b1268826 | 5610 | /* handled already */ |
166750bc AN |
5611 | break; |
5612 | default: | |
9c0f8cbd AN |
5613 | pr_warn("prog '%s': relo #%d: bad relo type %d\n", |
5614 | prog->name, i, relo->type); | |
166750bc | 5615 | return -EINVAL; |
8a47a6c5 | 5616 | } |
8a47a6c5 WN |
5617 | } |
5618 | ||
c3c55696 AN |
5619 | return 0; |
5620 | } | |
5621 | ||
8505e870 AN |
5622 | static int adjust_prog_btf_ext_info(const struct bpf_object *obj, |
5623 | const struct bpf_program *prog, | |
5624 | const struct btf_ext_info *ext_info, | |
5625 | void **prog_info, __u32 *prog_rec_cnt, | |
5626 | __u32 *prog_rec_sz) | |
5627 | { | |
5628 | void *copy_start = NULL, *copy_end = NULL; | |
5629 | void *rec, *rec_end, *new_prog_info; | |
5630 | const struct btf_ext_info_sec *sec; | |
5631 | size_t old_sz, new_sz; | |
5632 | const char *sec_name; | |
5633 | int i, off_adj; | |
5634 | ||
5635 | for_each_btf_ext_sec(ext_info, sec) { | |
5636 | sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off); | |
5637 | if (!sec_name) | |
5638 | return -EINVAL; | |
52109584 | 5639 | if (strcmp(sec_name, prog->sec_name) != 0) |
8505e870 AN |
5640 | continue; |
5641 | ||
5642 | for_each_btf_ext_rec(ext_info, sec, i, rec) { | |
5643 | __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ; | |
5644 | ||
5645 | if (insn_off < prog->sec_insn_off) | |
5646 | continue; | |
5647 | if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt) | |
5648 | break; | |
5649 | ||
5650 | if (!copy_start) | |
5651 | copy_start = rec; | |
5652 | copy_end = rec + ext_info->rec_size; | |
5653 | } | |
5654 | ||
5655 | if (!copy_start) | |
5656 | return -ENOENT; | |
5657 | ||
5658 | /* append func/line info of a given (sub-)program to the main | |
5659 | * program func/line info | |
5660 | */ | |
8eb62958 | 5661 | old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size; |
8505e870 AN |
5662 | new_sz = old_sz + (copy_end - copy_start); |
5663 | new_prog_info = realloc(*prog_info, new_sz); | |
5664 | if (!new_prog_info) | |
5665 | return -ENOMEM; | |
5666 | *prog_info = new_prog_info; | |
5667 | *prog_rec_cnt = new_sz / ext_info->rec_size; | |
5668 | memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start); | |
5669 | ||
5670 | /* Kernel instruction offsets are in units of 8-byte | |
5671 | * instructions, while .BTF.ext instruction offsets generated | |
5672 | * by Clang are in units of bytes. So convert Clang offsets | |
5673 | * into kernel offsets and adjust offset according to program | |
5674 | * relocated position. | |
5675 | */ | |
5676 | off_adj = prog->sub_insn_off - prog->sec_insn_off; | |
5677 | rec = new_prog_info + old_sz; | |
5678 | rec_end = new_prog_info + new_sz; | |
5679 | for (; rec < rec_end; rec += ext_info->rec_size) { | |
5680 | __u32 *insn_off = rec; | |
5681 | ||
5682 | *insn_off = *insn_off / BPF_INSN_SZ + off_adj; | |
5683 | } | |
5684 | *prog_rec_sz = ext_info->rec_size; | |
5685 | return 0; | |
5686 | } | |
5687 | ||
5688 | return -ENOENT; | |
5689 | } | |
5690 | ||
5691 | static int | |
5692 | reloc_prog_func_and_line_info(const struct bpf_object *obj, | |
5693 | struct bpf_program *main_prog, | |
5694 | const struct bpf_program *prog) | |
5695 | { | |
5696 | int err; | |
5697 | ||
5698 | /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't | |
5699 | * supprot func/line info | |
5700 | */ | |
9ca1f56a | 5701 | if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC)) |
8505e870 AN |
5702 | return 0; |
5703 | ||
5704 | /* only attempt func info relocation if main program's func_info | |
5705 | * relocation was successful | |
5706 | */ | |
5707 | if (main_prog != prog && !main_prog->func_info) | |
5708 | goto line_info; | |
5709 | ||
5710 | err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info, | |
5711 | &main_prog->func_info, | |
5712 | &main_prog->func_info_cnt, | |
5713 | &main_prog->func_info_rec_size); | |
5714 | if (err) { | |
5715 | if (err != -ENOENT) { | |
5716 | pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n", | |
5717 | prog->name, err); | |
5718 | return err; | |
5719 | } | |
5720 | if (main_prog->func_info) { | |
5721 | /* | |
5722 | * Some info has already been found but has problem | |
5723 | * in the last btf_ext reloc. Must have to error out. | |
5724 | */ | |
5725 | pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name); | |
5726 | return err; | |
5727 | } | |
5728 | /* Have problem loading the very first info. Ignore the rest. */ | |
5729 | pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n", | |
5730 | prog->name); | |
5731 | } | |
5732 | ||
5733 | line_info: | |
5734 | /* don't relocate line info if main program's relocation failed */ | |
5735 | if (main_prog != prog && !main_prog->line_info) | |
5736 | return 0; | |
5737 | ||
5738 | err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info, | |
5739 | &main_prog->line_info, | |
5740 | &main_prog->line_info_cnt, | |
5741 | &main_prog->line_info_rec_size); | |
5742 | if (err) { | |
5743 | if (err != -ENOENT) { | |
5744 | pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n", | |
5745 | prog->name, err); | |
5746 | return err; | |
5747 | } | |
5748 | if (main_prog->line_info) { | |
5749 | /* | |
5750 | * Some info has already been found but has problem | |
5751 | * in the last btf_ext reloc. Must have to error out. | |
5752 | */ | |
5753 | pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name); | |
5754 | return err; | |
5755 | } | |
5756 | /* Have problem loading the very first info. Ignore the rest. */ | |
5757 | pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n", | |
5758 | prog->name); | |
5759 | } | |
5760 | return 0; | |
5761 | } | |
5762 | ||
c3c55696 AN |
5763 | static int cmp_relo_by_insn_idx(const void *key, const void *elem) |
5764 | { | |
5765 | size_t insn_idx = *(const size_t *)key; | |
5766 | const struct reloc_desc *relo = elem; | |
5767 | ||
5768 | if (insn_idx == relo->insn_idx) | |
5769 | return 0; | |
5770 | return insn_idx < relo->insn_idx ? -1 : 1; | |
5771 | } | |
5772 | ||
5773 | static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx) | |
5774 | { | |
5775 | return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc, | |
5776 | sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx); | |
5777 | } | |
5778 | ||
b1268826 AS |
5779 | static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog) |
5780 | { | |
5781 | int new_cnt = main_prog->nr_reloc + subprog->nr_reloc; | |
5782 | struct reloc_desc *relos; | |
5783 | int i; | |
5784 | ||
5785 | if (main_prog == subprog) | |
5786 | return 0; | |
5787 | relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos)); | |
5788 | if (!relos) | |
5789 | return -ENOMEM; | |
5790 | memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc, | |
5791 | sizeof(*relos) * subprog->nr_reloc); | |
5792 | ||
5793 | for (i = main_prog->nr_reloc; i < new_cnt; i++) | |
5794 | relos[i].insn_idx += subprog->sub_insn_off; | |
5795 | /* After insn_idx adjustment the 'relos' array is still sorted | |
5796 | * by insn_idx and doesn't break bsearch. | |
5797 | */ | |
5798 | main_prog->reloc_desc = relos; | |
5799 | main_prog->nr_reloc = new_cnt; | |
5800 | return 0; | |
5801 | } | |
5802 | ||
c3c55696 AN |
5803 | static int |
5804 | bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog, | |
5805 | struct bpf_program *prog) | |
5806 | { | |
5807 | size_t sub_insn_idx, insn_idx, new_cnt; | |
5808 | struct bpf_program *subprog; | |
5809 | struct bpf_insn *insns, *insn; | |
5810 | struct reloc_desc *relo; | |
5811 | int err; | |
5812 | ||
5813 | err = reloc_prog_func_and_line_info(obj, main_prog, prog); | |
5814 | if (err) | |
5815 | return err; | |
5816 | ||
5817 | for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) { | |
5818 | insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; | |
53eddb5e | 5819 | if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn)) |
c3c55696 AN |
5820 | continue; |
5821 | ||
5822 | relo = find_prog_insn_relo(prog, insn_idx); | |
b1268826 AS |
5823 | if (relo && relo->type == RELO_EXTERN_FUNC) |
5824 | /* kfunc relocations will be handled later | |
5825 | * in bpf_object__relocate_data() | |
5826 | */ | |
5827 | continue; | |
53eddb5e | 5828 | if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) { |
c3c55696 AN |
5829 | pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n", |
5830 | prog->name, insn_idx, relo->type); | |
5831 | return -LIBBPF_ERRNO__RELOC; | |
5832 | } | |
5833 | if (relo) { | |
5834 | /* sub-program instruction index is a combination of | |
5835 | * an offset of a symbol pointed to by relocation and | |
5836 | * call instruction's imm field; for global functions, | |
5837 | * call always has imm = -1, but for static functions | |
5838 | * relocation is against STT_SECTION and insn->imm | |
5839 | * points to a start of a static function | |
53eddb5e YS |
5840 | * |
5841 | * for subprog addr relocation, the relo->sym_off + insn->imm is | |
5842 | * the byte offset in the corresponding section. | |
c3c55696 | 5843 | */ |
53eddb5e YS |
5844 | if (relo->type == RELO_CALL) |
5845 | sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1; | |
5846 | else | |
5847 | sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ; | |
5848 | } else if (insn_is_pseudo_func(insn)) { | |
5849 | /* | |
5850 | * RELO_SUBPROG_ADDR relo is always emitted even if both | |
5851 | * functions are in the same section, so it shouldn't reach here. | |
5852 | */ | |
5853 | pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n", | |
5854 | prog->name, insn_idx); | |
5855 | return -LIBBPF_ERRNO__RELOC; | |
c3c55696 AN |
5856 | } else { |
5857 | /* if subprogram call is to a static function within | |
5858 | * the same ELF section, there won't be any relocation | |
5859 | * emitted, but it also means there is no additional | |
5860 | * offset necessary, insns->imm is relative to | |
5861 | * instruction's original position within the section | |
5862 | */ | |
5863 | sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1; | |
5864 | } | |
5865 | ||
5866 | /* we enforce that sub-programs should be in .text section */ | |
5867 | subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx); | |
5868 | if (!subprog) { | |
5869 | pr_warn("prog '%s': no .text section found yet sub-program call exists\n", | |
5870 | prog->name); | |
5871 | return -LIBBPF_ERRNO__RELOC; | |
5872 | } | |
5873 | ||
5874 | /* if it's the first call instruction calling into this | |
5875 | * subprogram (meaning this subprog hasn't been processed | |
5876 | * yet) within the context of current main program: | |
5877 | * - append it at the end of main program's instructions blog; | |
5878 | * - process is recursively, while current program is put on hold; | |
5879 | * - if that subprogram calls some other not yet processes | |
5880 | * subprogram, same thing will happen recursively until | |
5881 | * there are no more unprocesses subprograms left to append | |
5882 | * and relocate. | |
5883 | */ | |
5884 | if (subprog->sub_insn_off == 0) { | |
5885 | subprog->sub_insn_off = main_prog->insns_cnt; | |
5886 | ||
5887 | new_cnt = main_prog->insns_cnt + subprog->insns_cnt; | |
5888 | insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns)); | |
5889 | if (!insns) { | |
5890 | pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name); | |
5891 | return -ENOMEM; | |
5892 | } | |
5893 | main_prog->insns = insns; | |
5894 | main_prog->insns_cnt = new_cnt; | |
5895 | ||
5896 | memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns, | |
5897 | subprog->insns_cnt * sizeof(*insns)); | |
5898 | ||
5899 | pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n", | |
5900 | main_prog->name, subprog->insns_cnt, subprog->name); | |
5901 | ||
b1268826 AS |
5902 | /* The subprog insns are now appended. Append its relos too. */ |
5903 | err = append_subprog_relos(main_prog, subprog); | |
5904 | if (err) | |
5905 | return err; | |
c3c55696 AN |
5906 | err = bpf_object__reloc_code(obj, main_prog, subprog); |
5907 | if (err) | |
5908 | return err; | |
5909 | } | |
5910 | ||
5911 | /* main_prog->insns memory could have been re-allocated, so | |
5912 | * calculate pointer again | |
5913 | */ | |
5914 | insn = &main_prog->insns[prog->sub_insn_off + insn_idx]; | |
5915 | /* calculate correct instruction position within current main | |
5916 | * prog; each main prog can have a different set of | |
5917 | * subprograms appended (potentially in different order as | |
5918 | * well), so position of any subprog can be different for | |
5919 | * different main programs */ | |
5920 | insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1; | |
5921 | ||
c3c55696 AN |
5922 | pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n", |
5923 | prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off); | |
5924 | } | |
5925 | ||
5926 | return 0; | |
5927 | } | |
5928 | ||
5929 | /* | |
5930 | * Relocate sub-program calls. | |
5931 | * | |
5932 | * Algorithm operates as follows. Each entry-point BPF program (referred to as | |
5933 | * main prog) is processed separately. For each subprog (non-entry functions, | |
5934 | * that can be called from either entry progs or other subprogs) gets their | |
5935 | * sub_insn_off reset to zero. This serves as indicator that this subprogram | |
5936 | * hasn't been yet appended and relocated within current main prog. Once its | |
5937 | * relocated, sub_insn_off will point at the position within current main prog | |
5938 | * where given subprog was appended. This will further be used to relocate all | |
5939 | * the call instructions jumping into this subprog. | |
5940 | * | |
5941 | * We start with main program and process all call instructions. If the call | |
5942 | * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off | |
5943 | * is zero), subprog instructions are appended at the end of main program's | |
5944 | * instruction array. Then main program is "put on hold" while we recursively | |
5945 | * process newly appended subprogram. If that subprogram calls into another | |
5946 | * subprogram that hasn't been appended, new subprogram is appended again to | |
5947 | * the *main* prog's instructions (subprog's instructions are always left | |
5948 | * untouched, as they need to be in unmodified state for subsequent main progs | |
5949 | * and subprog instructions are always sent only as part of a main prog) and | |
5950 | * the process continues recursively. Once all the subprogs called from a main | |
5951 | * prog or any of its subprogs are appended (and relocated), all their | |
5952 | * positions within finalized instructions array are known, so it's easy to | |
5953 | * rewrite call instructions with correct relative offsets, corresponding to | |
5954 | * desired target subprog. | |
5955 | * | |
5956 | * Its important to realize that some subprogs might not be called from some | |
5957 | * main prog and any of its called/used subprogs. Those will keep their | |
5958 | * subprog->sub_insn_off as zero at all times and won't be appended to current | |
5959 | * main prog and won't be relocated within the context of current main prog. | |
5960 | * They might still be used from other main progs later. | |
5961 | * | |
5962 | * Visually this process can be shown as below. Suppose we have two main | |
5963 | * programs mainA and mainB and BPF object contains three subprogs: subA, | |
5964 | * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and | |
5965 | * subC both call subB: | |
5966 | * | |
5967 | * +--------+ +-------+ | |
5968 | * | v v | | |
5969 | * +--+---+ +--+-+-+ +---+--+ | |
5970 | * | subA | | subB | | subC | | |
5971 | * +--+---+ +------+ +---+--+ | |
5972 | * ^ ^ | |
5973 | * | | | |
5974 | * +---+-------+ +------+----+ | |
5975 | * | mainA | | mainB | | |
5976 | * +-----------+ +-----------+ | |
5977 | * | |
5978 | * We'll start relocating mainA, will find subA, append it and start | |
5979 | * processing sub A recursively: | |
5980 | * | |
5981 | * +-----------+------+ | |
5982 | * | mainA | subA | | |
5983 | * +-----------+------+ | |
5984 | * | |
5985 | * At this point we notice that subB is used from subA, so we append it and | |
5986 | * relocate (there are no further subcalls from subB): | |
5987 | * | |
5988 | * +-----------+------+------+ | |
5989 | * | mainA | subA | subB | | |
5990 | * +-----------+------+------+ | |
5991 | * | |
5992 | * At this point, we relocate subA calls, then go one level up and finish with | |
5993 | * relocatin mainA calls. mainA is done. | |
5994 | * | |
5995 | * For mainB process is similar but results in different order. We start with | |
5996 | * mainB and skip subA and subB, as mainB never calls them (at least | |
5997 | * directly), but we see subC is needed, so we append and start processing it: | |
5998 | * | |
5999 | * +-----------+------+ | |
6000 | * | mainB | subC | | |
6001 | * +-----------+------+ | |
6002 | * Now we see subC needs subB, so we go back to it, append and relocate it: | |
6003 | * | |
6004 | * +-----------+------+------+ | |
6005 | * | mainB | subC | subB | | |
6006 | * +-----------+------+------+ | |
6007 | * | |
6008 | * At this point we unwind recursion, relocate calls in subC, then in mainB. | |
6009 | */ | |
6010 | static int | |
6011 | bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog) | |
6012 | { | |
6013 | struct bpf_program *subprog; | |
d3d93e34 | 6014 | int i, err; |
c3c55696 | 6015 | |
c3c55696 AN |
6016 | /* mark all subprogs as not relocated (yet) within the context of |
6017 | * current main program | |
6018 | */ | |
6019 | for (i = 0; i < obj->nr_programs; i++) { | |
6020 | subprog = &obj->programs[i]; | |
6021 | if (!prog_is_subprog(obj, subprog)) | |
6022 | continue; | |
6023 | ||
6024 | subprog->sub_insn_off = 0; | |
c3c55696 AN |
6025 | } |
6026 | ||
6027 | err = bpf_object__reloc_code(obj, prog, prog); | |
6028 | if (err) | |
6029 | return err; | |
6030 | ||
6031 | ||
8a47a6c5 WN |
6032 | return 0; |
6033 | } | |
6034 | ||
67234743 AS |
6035 | static void |
6036 | bpf_object__free_relocs(struct bpf_object *obj) | |
6037 | { | |
6038 | struct bpf_program *prog; | |
6039 | int i; | |
6040 | ||
6041 | /* free up relocation descriptors */ | |
6042 | for (i = 0; i < obj->nr_programs; i++) { | |
6043 | prog = &obj->programs[i]; | |
6044 | zfree(&prog->reloc_desc); | |
6045 | prog->nr_reloc = 0; | |
6046 | } | |
6047 | } | |
6048 | ||
8a47a6c5 | 6049 | static int |
ddc7c304 | 6050 | bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path) |
8a47a6c5 WN |
6051 | { |
6052 | struct bpf_program *prog; | |
b1268826 | 6053 | size_t i, j; |
8a47a6c5 WN |
6054 | int err; |
6055 | ||
ddc7c304 AN |
6056 | if (obj->btf_ext) { |
6057 | err = bpf_object__relocate_core(obj, targ_btf_path); | |
6058 | if (err) { | |
be18010e KW |
6059 | pr_warn("failed to perform CO-RE relocations: %d\n", |
6060 | err); | |
ddc7c304 AN |
6061 | return err; |
6062 | } | |
6063 | } | |
b1268826 AS |
6064 | |
6065 | /* Before relocating calls pre-process relocations and mark | |
6066 | * few ld_imm64 instructions that points to subprogs. | |
6067 | * Otherwise bpf_object__reloc_code() later would have to consider | |
6068 | * all ld_imm64 insns as relocation candidates. That would | |
6069 | * reduce relocation speed, since amount of find_prog_insn_relo() | |
6070 | * would increase and most of them will fail to find a relo. | |
9173cac3 AN |
6071 | */ |
6072 | for (i = 0; i < obj->nr_programs; i++) { | |
6073 | prog = &obj->programs[i]; | |
b1268826 AS |
6074 | for (j = 0; j < prog->nr_reloc; j++) { |
6075 | struct reloc_desc *relo = &prog->reloc_desc[j]; | |
6076 | struct bpf_insn *insn = &prog->insns[relo->insn_idx]; | |
6077 | ||
6078 | /* mark the insn, so it's recognized by insn_is_pseudo_func() */ | |
6079 | if (relo->type == RELO_SUBPROG_ADDR) | |
6080 | insn[0].src_reg = BPF_PSEUDO_FUNC; | |
9173cac3 | 6081 | } |
9173cac3 | 6082 | } |
b1268826 AS |
6083 | |
6084 | /* relocate subprogram calls and append used subprograms to main | |
c3c55696 AN |
6085 | * programs; each copy of subprogram code needs to be relocated |
6086 | * differently for each main program, because its code location might | |
b1268826 AS |
6087 | * have changed. |
6088 | * Append subprog relos to main programs to allow data relos to be | |
6089 | * processed after text is completely relocated. | |
9173cac3 | 6090 | */ |
8a47a6c5 WN |
6091 | for (i = 0; i < obj->nr_programs; i++) { |
6092 | prog = &obj->programs[i]; | |
c3c55696 AN |
6093 | /* sub-program's sub-calls are relocated within the context of |
6094 | * its main program only | |
6095 | */ | |
6096 | if (prog_is_subprog(obj, prog)) | |
9173cac3 | 6097 | continue; |
8a47a6c5 | 6098 | |
c3c55696 | 6099 | err = bpf_object__relocate_calls(obj, prog); |
8a47a6c5 | 6100 | if (err) { |
9c0f8cbd AN |
6101 | pr_warn("prog '%s': failed to relocate calls: %d\n", |
6102 | prog->name, err); | |
8a47a6c5 WN |
6103 | return err; |
6104 | } | |
6105 | } | |
b1268826 AS |
6106 | /* Process data relos for main programs */ |
6107 | for (i = 0; i < obj->nr_programs; i++) { | |
6108 | prog = &obj->programs[i]; | |
6109 | if (prog_is_subprog(obj, prog)) | |
6110 | continue; | |
6111 | err = bpf_object__relocate_data(obj, prog); | |
6112 | if (err) { | |
6113 | pr_warn("prog '%s': failed to relocate data references: %d\n", | |
6114 | prog->name, err); | |
6115 | return err; | |
6116 | } | |
6117 | } | |
67234743 AS |
6118 | if (!obj->gen_loader) |
6119 | bpf_object__free_relocs(obj); | |
8a47a6c5 WN |
6120 | return 0; |
6121 | } | |
6122 | ||
646f02ff | 6123 | static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, |
ad23b723 | 6124 | Elf64_Shdr *shdr, Elf_Data *data); |
646f02ff AN |
6125 | |
6126 | static int bpf_object__collect_map_relos(struct bpf_object *obj, | |
ad23b723 | 6127 | Elf64_Shdr *shdr, Elf_Data *data) |
646f02ff | 6128 | { |
15728ad3 AN |
6129 | const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *); |
6130 | int i, j, nrels, new_sz; | |
063e6881 | 6131 | const struct btf_var_secinfo *vi = NULL; |
646f02ff | 6132 | const struct btf_type *sec, *var, *def; |
3168c158 | 6133 | struct bpf_map *map = NULL, *targ_map; |
646f02ff | 6134 | const struct btf_member *member; |
646f02ff | 6135 | const char *name, *mname; |
646f02ff | 6136 | unsigned int moff; |
ad23b723 AN |
6137 | Elf64_Sym *sym; |
6138 | Elf64_Rel *rel; | |
646f02ff AN |
6139 | void *tmp; |
6140 | ||
6141 | if (!obj->efile.btf_maps_sec_btf_id || !obj->btf) | |
6142 | return -EINVAL; | |
6143 | sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id); | |
6144 | if (!sec) | |
6145 | return -EINVAL; | |
6146 | ||
646f02ff AN |
6147 | nrels = shdr->sh_size / shdr->sh_entsize; |
6148 | for (i = 0; i < nrels; i++) { | |
ad23b723 AN |
6149 | rel = elf_rel_by_idx(data, i); |
6150 | if (!rel) { | |
646f02ff AN |
6151 | pr_warn(".maps relo #%d: failed to get ELF relo\n", i); |
6152 | return -LIBBPF_ERRNO__FORMAT; | |
6153 | } | |
ad23b723 AN |
6154 | |
6155 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); | |
6156 | if (!sym) { | |
646f02ff | 6157 | pr_warn(".maps relo #%d: symbol %zx not found\n", |
ad23b723 | 6158 | i, (size_t)ELF64_R_SYM(rel->r_info)); |
646f02ff AN |
6159 | return -LIBBPF_ERRNO__FORMAT; |
6160 | } | |
ad23b723 AN |
6161 | name = elf_sym_str(obj, sym->st_name) ?: "<?>"; |
6162 | if (sym->st_shndx != obj->efile.btf_maps_shndx) { | |
646f02ff AN |
6163 | pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n", |
6164 | i, name); | |
6165 | return -LIBBPF_ERRNO__RELOC; | |
6166 | } | |
6167 | ||
ad23b723 AN |
6168 | pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n", |
6169 | i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value, | |
6170 | (size_t)rel->r_offset, sym->st_name, name); | |
646f02ff AN |
6171 | |
6172 | for (j = 0; j < obj->nr_maps; j++) { | |
6173 | map = &obj->maps[j]; | |
6174 | if (map->sec_idx != obj->efile.btf_maps_shndx) | |
6175 | continue; | |
6176 | ||
6177 | vi = btf_var_secinfos(sec) + map->btf_var_idx; | |
ad23b723 AN |
6178 | if (vi->offset <= rel->r_offset && |
6179 | rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size) | |
646f02ff AN |
6180 | break; |
6181 | } | |
6182 | if (j == obj->nr_maps) { | |
ad23b723 AN |
6183 | pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n", |
6184 | i, name, (size_t)rel->r_offset); | |
646f02ff AN |
6185 | return -EINVAL; |
6186 | } | |
6187 | ||
6188 | if (!bpf_map_type__is_map_in_map(map->def.type)) | |
6189 | return -EINVAL; | |
6190 | if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS && | |
6191 | map->def.key_size != sizeof(int)) { | |
6192 | pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n", | |
6193 | i, map->name, sizeof(int)); | |
6194 | return -EINVAL; | |
6195 | } | |
6196 | ||
6197 | targ_map = bpf_object__find_map_by_name(obj, name); | |
6198 | if (!targ_map) | |
6199 | return -ESRCH; | |
6200 | ||
6201 | var = btf__type_by_id(obj->btf, vi->type); | |
6202 | def = skip_mods_and_typedefs(obj->btf, var->type, NULL); | |
6203 | if (btf_vlen(def) == 0) | |
6204 | return -EINVAL; | |
6205 | member = btf_members(def) + btf_vlen(def) - 1; | |
6206 | mname = btf__name_by_offset(obj->btf, member->name_off); | |
6207 | if (strcmp(mname, "values")) | |
6208 | return -EINVAL; | |
6209 | ||
6210 | moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8; | |
ad23b723 | 6211 | if (rel->r_offset - vi->offset < moff) |
646f02ff AN |
6212 | return -EINVAL; |
6213 | ||
ad23b723 | 6214 | moff = rel->r_offset - vi->offset - moff; |
15728ad3 AN |
6215 | /* here we use BPF pointer size, which is always 64 bit, as we |
6216 | * are parsing ELF that was built for BPF target | |
6217 | */ | |
6218 | if (moff % bpf_ptr_sz) | |
646f02ff | 6219 | return -EINVAL; |
15728ad3 | 6220 | moff /= bpf_ptr_sz; |
646f02ff AN |
6221 | if (moff >= map->init_slots_sz) { |
6222 | new_sz = moff + 1; | |
029258d7 | 6223 | tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz); |
646f02ff AN |
6224 | if (!tmp) |
6225 | return -ENOMEM; | |
6226 | map->init_slots = tmp; | |
6227 | memset(map->init_slots + map->init_slots_sz, 0, | |
15728ad3 | 6228 | (new_sz - map->init_slots_sz) * host_ptr_sz); |
646f02ff AN |
6229 | map->init_slots_sz = new_sz; |
6230 | } | |
6231 | map->init_slots[moff] = targ_map; | |
6232 | ||
6233 | pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n", | |
6234 | i, map->name, moff, name); | |
6235 | } | |
6236 | ||
6237 | return 0; | |
6238 | } | |
590a0088 | 6239 | |
c3c55696 | 6240 | static int cmp_relocs(const void *_a, const void *_b) |
34090915 | 6241 | { |
c3c55696 AN |
6242 | const struct reloc_desc *a = _a; |
6243 | const struct reloc_desc *b = _b; | |
34090915 | 6244 | |
c3c55696 AN |
6245 | if (a->insn_idx != b->insn_idx) |
6246 | return a->insn_idx < b->insn_idx ? -1 : 1; | |
6247 | ||
6248 | /* no two relocations should have the same insn_idx, but ... */ | |
6249 | if (a->type != b->type) | |
6250 | return a->type < b->type ? -1 : 1; | |
6251 | ||
6252 | return 0; | |
6253 | } | |
6254 | ||
6255 | static int bpf_object__collect_relos(struct bpf_object *obj) | |
6256 | { | |
6257 | int i, err; | |
34090915 | 6258 | |
25bbbd7a AN |
6259 | for (i = 0; i < obj->efile.sec_cnt; i++) { |
6260 | struct elf_sec_desc *sec_desc = &obj->efile.secs[i]; | |
6261 | Elf64_Shdr *shdr; | |
6262 | Elf_Data *data; | |
6263 | int idx; | |
6264 | ||
6265 | if (sec_desc->sec_type != SEC_RELO) | |
6266 | continue; | |
6267 | ||
6268 | shdr = sec_desc->shdr; | |
6269 | data = sec_desc->data; | |
6270 | idx = shdr->sh_info; | |
34090915 WN |
6271 | |
6272 | if (shdr->sh_type != SHT_REL) { | |
be18010e | 6273 | pr_warn("internal error at %d\n", __LINE__); |
6371ca3b | 6274 | return -LIBBPF_ERRNO__INTERNAL; |
34090915 WN |
6275 | } |
6276 | ||
c3c55696 | 6277 | if (idx == obj->efile.st_ops_shndx) |
646f02ff | 6278 | err = bpf_object__collect_st_ops_relos(obj, shdr, data); |
c3c55696 | 6279 | else if (idx == obj->efile.btf_maps_shndx) |
646f02ff | 6280 | err = bpf_object__collect_map_relos(obj, shdr, data); |
c3c55696 AN |
6281 | else |
6282 | err = bpf_object__collect_prog_relos(obj, shdr, data); | |
34090915 | 6283 | if (err) |
6371ca3b | 6284 | return err; |
34090915 | 6285 | } |
c3c55696 AN |
6286 | |
6287 | for (i = 0; i < obj->nr_programs; i++) { | |
6288 | struct bpf_program *p = &obj->programs[i]; | |
c139e40a | 6289 | |
c3c55696 AN |
6290 | if (!p->nr_reloc) |
6291 | continue; | |
6292 | ||
6293 | qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs); | |
6294 | } | |
34090915 WN |
6295 | return 0; |
6296 | } | |
6297 | ||
109cea5a AN |
6298 | static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id) |
6299 | { | |
9b2f6fec | 6300 | if (BPF_CLASS(insn->code) == BPF_JMP && |
109cea5a AN |
6301 | BPF_OP(insn->code) == BPF_CALL && |
6302 | BPF_SRC(insn->code) == BPF_K && | |
9b2f6fec AN |
6303 | insn->src_reg == 0 && |
6304 | insn->dst_reg == 0) { | |
6305 | *func_id = insn->imm; | |
109cea5a AN |
6306 | return true; |
6307 | } | |
6308 | return false; | |
6309 | } | |
6310 | ||
42869d28 | 6311 | static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog) |
109cea5a AN |
6312 | { |
6313 | struct bpf_insn *insn = prog->insns; | |
6314 | enum bpf_func_id func_id; | |
6315 | int i; | |
6316 | ||
67234743 AS |
6317 | if (obj->gen_loader) |
6318 | return 0; | |
6319 | ||
109cea5a AN |
6320 | for (i = 0; i < prog->insns_cnt; i++, insn++) { |
6321 | if (!insn_is_helper_call(insn, &func_id)) | |
6322 | continue; | |
6323 | ||
6324 | /* on kernels that don't yet support | |
6325 | * bpf_probe_read_{kernel,user}[_str] helpers, fall back | |
6326 | * to bpf_probe_read() which works well for old kernels | |
6327 | */ | |
6328 | switch (func_id) { | |
6329 | case BPF_FUNC_probe_read_kernel: | |
6330 | case BPF_FUNC_probe_read_user: | |
9ca1f56a | 6331 | if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) |
109cea5a AN |
6332 | insn->imm = BPF_FUNC_probe_read; |
6333 | break; | |
6334 | case BPF_FUNC_probe_read_kernel_str: | |
6335 | case BPF_FUNC_probe_read_user_str: | |
9ca1f56a | 6336 | if (!kernel_supports(obj, FEAT_PROBE_READ_KERN)) |
109cea5a AN |
6337 | insn->imm = BPF_FUNC_probe_read_str; |
6338 | break; | |
6339 | default: | |
6340 | break; | |
6341 | } | |
6342 | } | |
6343 | return 0; | |
6344 | } | |
6345 | ||
15ea31fa AN |
6346 | static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, |
6347 | int *btf_obj_fd, int *btf_type_id); | |
12d9466d AN |
6348 | |
6349 | /* this is called as prog->sec_def->preload_fn for libbpf-supported sec_defs */ | |
6350 | static int libbpf_preload_prog(struct bpf_program *prog, | |
6351 | struct bpf_prog_load_params *attr, long cookie) | |
6352 | { | |
15ea31fa AN |
6353 | enum sec_def_flags def = cookie; |
6354 | ||
12d9466d | 6355 | /* old kernels might not support specifying expected_attach_type */ |
15ea31fa | 6356 | if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE)) |
12d9466d AN |
6357 | attr->expected_attach_type = 0; |
6358 | ||
15ea31fa | 6359 | if (def & SEC_SLEEPABLE) |
12d9466d AN |
6360 | attr->prog_flags |= BPF_F_SLEEPABLE; |
6361 | ||
6362 | if ((prog->type == BPF_PROG_TYPE_TRACING || | |
6363 | prog->type == BPF_PROG_TYPE_LSM || | |
6364 | prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) { | |
6365 | int btf_obj_fd = 0, btf_type_id = 0, err; | |
15ea31fa | 6366 | const char *attach_name; |
12d9466d | 6367 | |
15ea31fa AN |
6368 | attach_name = strchr(prog->sec_name, '/') + 1; |
6369 | err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id); | |
12d9466d AN |
6370 | if (err) |
6371 | return err; | |
6372 | ||
6373 | /* cache resolved BTF FD and BTF type ID in the prog */ | |
6374 | prog->attach_btf_obj_fd = btf_obj_fd; | |
6375 | prog->attach_btf_id = btf_type_id; | |
6376 | ||
6377 | /* but by now libbpf common logic is not utilizing | |
6378 | * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because | |
6379 | * this callback is called after attrs were populated by | |
6380 | * libbpf, so this callback has to update attr explicitly here | |
6381 | */ | |
6382 | attr->attach_btf_obj_fd = btf_obj_fd; | |
6383 | attr->attach_btf_id = btf_type_id; | |
6384 | } | |
6385 | return 0; | |
6386 | } | |
6387 | ||
55cffde2 | 6388 | static int |
2993e051 | 6389 | load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt, |
f0187f0b | 6390 | char *license, __u32 kern_version, int *pfd) |
55cffde2 | 6391 | { |
6aef10a4 | 6392 | struct bpf_prog_load_params load_attr = {}; |
25bbbd7a | 6393 | struct bpf_object *obj = prog->obj; |
1ce6a9fc | 6394 | char *cp, errmsg[STRERR_BUFSIZE]; |
8395f320 SF |
6395 | size_t log_buf_size = 0; |
6396 | char *log_buf = NULL; | |
12d9466d | 6397 | int btf_fd, ret, err; |
55cffde2 | 6398 | |
80b2b5c3 AM |
6399 | if (prog->type == BPF_PROG_TYPE_UNSPEC) { |
6400 | /* | |
6401 | * The program type must be set. Most likely we couldn't find a proper | |
6402 | * section definition at load time, and thus we didn't infer the type. | |
6403 | */ | |
6404 | pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n", | |
6405 | prog->name, prog->sec_name); | |
6406 | return -EINVAL; | |
6407 | } | |
6408 | ||
fba01a06 AN |
6409 | if (!insns || !insns_cnt) |
6410 | return -EINVAL; | |
6411 | ||
2993e051 | 6412 | load_attr.prog_type = prog->type; |
12d9466d | 6413 | load_attr.expected_attach_type = prog->expected_attach_type; |
25bbbd7a | 6414 | if (kernel_supports(obj, FEAT_PROG_NAME)) |
5b32a23e | 6415 | load_attr.name = prog->name; |
d7be143b | 6416 | load_attr.insns = insns; |
6aef10a4 | 6417 | load_attr.insn_cnt = insns_cnt; |
d7be143b | 6418 | load_attr.license = license; |
6aef10a4 | 6419 | load_attr.attach_btf_id = prog->attach_btf_id; |
12d9466d AN |
6420 | load_attr.attach_prog_fd = prog->attach_prog_fd; |
6421 | load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd; | |
6aef10a4 AN |
6422 | load_attr.attach_btf_id = prog->attach_btf_id; |
6423 | load_attr.kern_version = kern_version; | |
6424 | load_attr.prog_ifindex = prog->prog_ifindex; | |
6425 | ||
0f0e55d8 | 6426 | /* specify func_info/line_info only if kernel supports them */ |
25bbbd7a AN |
6427 | btf_fd = bpf_object__btf_fd(obj); |
6428 | if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) { | |
0f0e55d8 AN |
6429 | load_attr.prog_btf_fd = btf_fd; |
6430 | load_attr.func_info = prog->func_info; | |
6431 | load_attr.func_info_rec_size = prog->func_info_rec_size; | |
6432 | load_attr.func_info_cnt = prog->func_info_cnt; | |
6433 | load_attr.line_info = prog->line_info; | |
6434 | load_attr.line_info_rec_size = prog->line_info_rec_size; | |
6435 | load_attr.line_info_cnt = prog->line_info_cnt; | |
6436 | } | |
da11b417 | 6437 | load_attr.log_level = prog->log_level; |
04656198 | 6438 | load_attr.prog_flags = prog->prog_flags; |
25bbbd7a | 6439 | load_attr.fd_array = obj->fd_array; |
55cffde2 | 6440 | |
12d9466d AN |
6441 | /* adjust load_attr if sec_def provides custom preload callback */ |
6442 | if (prog->sec_def && prog->sec_def->preload_fn) { | |
6443 | err = prog->sec_def->preload_fn(prog, &load_attr, prog->sec_def->cookie); | |
6444 | if (err < 0) { | |
6445 | pr_warn("prog '%s': failed to prepare load attributes: %d\n", | |
6446 | prog->name, err); | |
6447 | return err; | |
6448 | } | |
6449 | } | |
6450 | ||
25bbbd7a AN |
6451 | if (obj->gen_loader) { |
6452 | bpf_gen__prog_load(obj->gen_loader, &load_attr, | |
6453 | prog - obj->programs); | |
67234743 AS |
6454 | *pfd = -1; |
6455 | return 0; | |
6456 | } | |
da11b417 | 6457 | retry_load: |
8395f320 SF |
6458 | if (log_buf_size) { |
6459 | log_buf = malloc(log_buf_size); | |
6460 | if (!log_buf) | |
6461 | return -ENOMEM; | |
6462 | ||
6463 | *log_buf = 0; | |
6464 | } | |
55cffde2 | 6465 | |
6aef10a4 AN |
6466 | load_attr.log_buf = log_buf; |
6467 | load_attr.log_buf_sz = log_buf_size; | |
6468 | ret = libbpf__bpf_prog_load(&load_attr); | |
55cffde2 WN |
6469 | |
6470 | if (ret >= 0) { | |
8395f320 | 6471 | if (log_buf && load_attr.log_level) |
da11b417 | 6472 | pr_debug("verifier log:\n%s", log_buf); |
5d23328d | 6473 | |
25bbbd7a AN |
6474 | if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) { |
6475 | struct bpf_map *map; | |
6476 | int i; | |
6477 | ||
6478 | for (i = 0; i < obj->nr_maps; i++) { | |
6479 | map = &prog->obj->maps[i]; | |
6480 | if (map->libbpf_type != LIBBPF_MAP_RODATA) | |
6481 | continue; | |
5d23328d | 6482 | |
25bbbd7a AN |
6483 | if (bpf_prog_bind_map(ret, bpf_map__fd(map), NULL)) { |
6484 | cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); | |
6485 | pr_warn("prog '%s': failed to bind .rodata map: %s\n", | |
6486 | prog->name, cp); | |
6487 | /* Don't fail hard if can't bind rodata. */ | |
6488 | } | |
5d23328d YZ |
6489 | } |
6490 | } | |
6491 | ||
55cffde2 WN |
6492 | *pfd = ret; |
6493 | ret = 0; | |
6494 | goto out; | |
6495 | } | |
6496 | ||
8395f320 SF |
6497 | if (!log_buf || errno == ENOSPC) { |
6498 | log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, | |
6499 | log_buf_size << 1); | |
6500 | ||
da11b417 AS |
6501 | free(log_buf); |
6502 | goto retry_load; | |
6503 | } | |
ef05afa6 | 6504 | ret = errno ? -errno : -LIBBPF_ERRNO__LOAD; |
24d6a808 | 6505 | cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); |
be18010e | 6506 | pr_warn("load bpf program failed: %s\n", cp); |
dc3a2d25 | 6507 | pr_perm_msg(ret); |
55cffde2 | 6508 | |
6371ca3b WN |
6509 | if (log_buf && log_buf[0] != '\0') { |
6510 | ret = -LIBBPF_ERRNO__VERIFY; | |
be18010e KW |
6511 | pr_warn("-- BEGIN DUMP LOG ---\n"); |
6512 | pr_warn("\n%s\n", log_buf); | |
6513 | pr_warn("-- END LOG --\n"); | |
6aef10a4 | 6514 | } else if (load_attr.insn_cnt >= BPF_MAXINSNS) { |
be18010e | 6515 | pr_warn("Program too large (%zu insns), at most %d insns\n", |
6aef10a4 | 6516 | load_attr.insn_cnt, BPF_MAXINSNS); |
705fa219 | 6517 | ret = -LIBBPF_ERRNO__PROG2BIG; |
4f33ddb4 | 6518 | } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) { |
705fa219 | 6519 | /* Wrong program type? */ |
4f33ddb4 | 6520 | int fd; |
705fa219 | 6521 | |
4f33ddb4 THJ |
6522 | load_attr.prog_type = BPF_PROG_TYPE_KPROBE; |
6523 | load_attr.expected_attach_type = 0; | |
6aef10a4 AN |
6524 | load_attr.log_buf = NULL; |
6525 | load_attr.log_buf_sz = 0; | |
6526 | fd = libbpf__bpf_prog_load(&load_attr); | |
4f33ddb4 THJ |
6527 | if (fd >= 0) { |
6528 | close(fd); | |
6529 | ret = -LIBBPF_ERRNO__PROGTYPE; | |
6530 | goto out; | |
6531 | } | |
55cffde2 WN |
6532 | } |
6533 | ||
6534 | out: | |
6535 | free(log_buf); | |
6536 | return ret; | |
6537 | } | |
6538 | ||
67234743 AS |
6539 | static int bpf_program__record_externs(struct bpf_program *prog) |
6540 | { | |
6541 | struct bpf_object *obj = prog->obj; | |
6542 | int i; | |
6543 | ||
6544 | for (i = 0; i < prog->nr_reloc; i++) { | |
6545 | struct reloc_desc *relo = &prog->reloc_desc[i]; | |
6546 | struct extern_desc *ext = &obj->externs[relo->sym_off]; | |
6547 | ||
6548 | switch (relo->type) { | |
6549 | case RELO_EXTERN_VAR: | |
6550 | if (ext->type != EXT_KSYM) | |
6551 | continue; | |
6552 | if (!ext->ksym.type_id) { | |
6553 | pr_warn("typeless ksym %s is not supported yet\n", | |
6554 | ext->name); | |
6555 | return -ENOTSUP; | |
6556 | } | |
18f4fccb KKD |
6557 | bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, |
6558 | BTF_KIND_VAR, relo->insn_idx); | |
67234743 AS |
6559 | break; |
6560 | case RELO_EXTERN_FUNC: | |
18f4fccb KKD |
6561 | bpf_gen__record_extern(obj->gen_loader, ext->name, ext->is_weak, |
6562 | BTF_KIND_FUNC, relo->insn_idx); | |
67234743 AS |
6563 | break; |
6564 | default: | |
6565 | continue; | |
6566 | } | |
6567 | } | |
6568 | return 0; | |
6569 | } | |
6570 | ||
13acb508 | 6571 | int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver) |
55cffde2 | 6572 | { |
91abb4a6 | 6573 | int err = 0, fd, i; |
13acb508 | 6574 | |
d9297581 | 6575 | if (prog->obj->loaded) { |
52109584 | 6576 | pr_warn("prog '%s': can't load after object was loaded\n", prog->name); |
e9fc3ce9 | 6577 | return libbpf_err(-EINVAL); |
d9297581 AN |
6578 | } |
6579 | ||
b580563e WN |
6580 | if (prog->instances.nr < 0 || !prog->instances.fds) { |
6581 | if (prog->preprocessor) { | |
be18010e | 6582 | pr_warn("Internal error: can't load program '%s'\n", |
52109584 | 6583 | prog->name); |
e9fc3ce9 | 6584 | return libbpf_err(-LIBBPF_ERRNO__INTERNAL); |
b580563e | 6585 | } |
55cffde2 | 6586 | |
b580563e WN |
6587 | prog->instances.fds = malloc(sizeof(int)); |
6588 | if (!prog->instances.fds) { | |
be18010e | 6589 | pr_warn("Not enough memory for BPF fds\n"); |
e9fc3ce9 | 6590 | return libbpf_err(-ENOMEM); |
b580563e WN |
6591 | } |
6592 | prog->instances.nr = 1; | |
6593 | prog->instances.fds[0] = -1; | |
6594 | } | |
6595 | ||
6596 | if (!prog->preprocessor) { | |
6597 | if (prog->instances.nr != 1) { | |
52109584 AN |
6598 | pr_warn("prog '%s': inconsistent nr(%d) != 1\n", |
6599 | prog->name, prog->instances.nr); | |
b580563e | 6600 | } |
67234743 AS |
6601 | if (prog->obj->gen_loader) |
6602 | bpf_program__record_externs(prog); | |
2993e051 | 6603 | err = load_program(prog, prog->insns, prog->insns_cnt, |
13acb508 | 6604 | license, kern_ver, &fd); |
b580563e WN |
6605 | if (!err) |
6606 | prog->instances.fds[0] = fd; | |
6607 | goto out; | |
6608 | } | |
6609 | ||
6610 | for (i = 0; i < prog->instances.nr; i++) { | |
6611 | struct bpf_prog_prep_result result; | |
6612 | bpf_program_prep_t preprocessor = prog->preprocessor; | |
6613 | ||
1ad9cbb8 | 6614 | memset(&result, 0, sizeof(result)); |
b580563e WN |
6615 | err = preprocessor(prog, i, prog->insns, |
6616 | prog->insns_cnt, &result); | |
6617 | if (err) { | |
be18010e | 6618 | pr_warn("Preprocessing the %dth instance of program '%s' failed\n", |
52109584 | 6619 | i, prog->name); |
b580563e WN |
6620 | goto out; |
6621 | } | |
6622 | ||
6623 | if (!result.new_insn_ptr || !result.new_insn_cnt) { | |
6624 | pr_debug("Skip loading the %dth instance of program '%s'\n", | |
52109584 | 6625 | i, prog->name); |
b580563e WN |
6626 | prog->instances.fds[i] = -1; |
6627 | if (result.pfd) | |
6628 | *result.pfd = -1; | |
6629 | continue; | |
6630 | } | |
6631 | ||
2993e051 | 6632 | err = load_program(prog, result.new_insn_ptr, |
13acb508 | 6633 | result.new_insn_cnt, license, kern_ver, &fd); |
b580563e | 6634 | if (err) { |
be18010e | 6635 | pr_warn("Loading the %dth instance of program '%s' failed\n", |
52109584 | 6636 | i, prog->name); |
b580563e WN |
6637 | goto out; |
6638 | } | |
6639 | ||
6640 | if (result.pfd) | |
6641 | *result.pfd = fd; | |
6642 | prog->instances.fds[i] = fd; | |
6643 | } | |
6644 | out: | |
55cffde2 | 6645 | if (err) |
52109584 | 6646 | pr_warn("failed to load program '%s'\n", prog->name); |
55cffde2 WN |
6647 | zfree(&prog->insns); |
6648 | prog->insns_cnt = 0; | |
e9fc3ce9 | 6649 | return libbpf_err(err); |
55cffde2 WN |
6650 | } |
6651 | ||
6652 | static int | |
60276f98 | 6653 | bpf_object__load_progs(struct bpf_object *obj, int log_level) |
55cffde2 | 6654 | { |
d9297581 | 6655 | struct bpf_program *prog; |
55cffde2 WN |
6656 | size_t i; |
6657 | int err; | |
6658 | ||
109cea5a AN |
6659 | for (i = 0; i < obj->nr_programs; i++) { |
6660 | prog = &obj->programs[i]; | |
6661 | err = bpf_object__sanitize_prog(obj, prog); | |
6662 | if (err) | |
6663 | return err; | |
6664 | } | |
6665 | ||
55cffde2 | 6666 | for (i = 0; i < obj->nr_programs; i++) { |
d9297581 | 6667 | prog = &obj->programs[i]; |
c3c55696 | 6668 | if (prog_is_subprog(obj, prog)) |
48cca7e4 | 6669 | continue; |
d9297581 | 6670 | if (!prog->load) { |
9c0f8cbd | 6671 | pr_debug("prog '%s': skipped loading\n", prog->name); |
d9297581 AN |
6672 | continue; |
6673 | } | |
6674 | prog->log_level |= log_level; | |
6675 | err = bpf_program__load(prog, obj->license, obj->kern_version); | |
55cffde2 WN |
6676 | if (err) |
6677 | return err; | |
6678 | } | |
67234743 AS |
6679 | if (obj->gen_loader) |
6680 | bpf_object__free_relocs(obj); | |
55cffde2 WN |
6681 | return 0; |
6682 | } | |
6683 | ||
25498a19 AN |
6684 | static const struct bpf_sec_def *find_sec_def(const char *sec_name); |
6685 | ||
91b4d1d1 AN |
6686 | static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts) |
6687 | { | |
6688 | struct bpf_program *prog; | |
12d9466d | 6689 | int err; |
91b4d1d1 AN |
6690 | |
6691 | bpf_object__for_each_program(prog, obj) { | |
6692 | prog->sec_def = find_sec_def(prog->sec_name); | |
6693 | if (!prog->sec_def) { | |
6694 | /* couldn't guess, but user might manually specify */ | |
6695 | pr_debug("prog '%s': unrecognized ELF section name '%s'\n", | |
6696 | prog->name, prog->sec_name); | |
6697 | continue; | |
6698 | } | |
6699 | ||
91b4d1d1 AN |
6700 | bpf_program__set_type(prog, prog->sec_def->prog_type); |
6701 | bpf_program__set_expected_attach_type(prog, prog->sec_def->expected_attach_type); | |
6702 | ||
91b555d7 AN |
6703 | #pragma GCC diagnostic push |
6704 | #pragma GCC diagnostic ignored "-Wdeprecated-declarations" | |
91b4d1d1 AN |
6705 | if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING || |
6706 | prog->sec_def->prog_type == BPF_PROG_TYPE_EXT) | |
6707 | prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0); | |
91b555d7 | 6708 | #pragma GCC diagnostic pop |
12d9466d AN |
6709 | |
6710 | /* sec_def can have custom callback which should be called | |
6711 | * after bpf_program is initialized to adjust its properties | |
6712 | */ | |
6713 | if (prog->sec_def->init_fn) { | |
6714 | err = prog->sec_def->init_fn(prog, prog->sec_def->cookie); | |
6715 | if (err < 0) { | |
6716 | pr_warn("prog '%s': failed to initialize: %d\n", | |
6717 | prog->name, err); | |
6718 | return err; | |
6719 | } | |
6720 | } | |
91b4d1d1 AN |
6721 | } |
6722 | ||
6723 | return 0; | |
6724 | } | |
6725 | ||
1a5e3fb1 | 6726 | static struct bpf_object * |
5e61f270 | 6727 | __bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz, |
01af3bf0 | 6728 | const struct bpf_object_open_opts *opts) |
1a5e3fb1 | 6729 | { |
1373ff59 | 6730 | const char *obj_name, *kconfig, *btf_tmp_path; |
1a5e3fb1 | 6731 | struct bpf_object *obj; |
291ee02b | 6732 | char tmp_name[64]; |
6371ca3b | 6733 | int err; |
1a5e3fb1 WN |
6734 | |
6735 | if (elf_version(EV_CURRENT) == EV_NONE) { | |
be18010e KW |
6736 | pr_warn("failed to init libelf for %s\n", |
6737 | path ? : "(mem buf)"); | |
6371ca3b | 6738 | return ERR_PTR(-LIBBPF_ERRNO__LIBELF); |
1a5e3fb1 WN |
6739 | } |
6740 | ||
291ee02b AN |
6741 | if (!OPTS_VALID(opts, bpf_object_open_opts)) |
6742 | return ERR_PTR(-EINVAL); | |
6743 | ||
1aace10f | 6744 | obj_name = OPTS_GET(opts, object_name, NULL); |
291ee02b AN |
6745 | if (obj_buf) { |
6746 | if (!obj_name) { | |
6747 | snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx", | |
6748 | (unsigned long)obj_buf, | |
6749 | (unsigned long)obj_buf_sz); | |
6750 | obj_name = tmp_name; | |
6751 | } | |
6752 | path = obj_name; | |
6753 | pr_debug("loading object '%s' from buffer\n", obj_name); | |
6754 | } | |
6755 | ||
2ce8450e | 6756 | obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name); |
6371ca3b WN |
6757 | if (IS_ERR(obj)) |
6758 | return obj; | |
1a5e3fb1 | 6759 | |
1373ff59 SC |
6760 | btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL); |
6761 | if (btf_tmp_path) { | |
6762 | if (strlen(btf_tmp_path) >= PATH_MAX) { | |
6763 | err = -ENAMETOOLONG; | |
6764 | goto out; | |
6765 | } | |
6766 | obj->btf_custom_path = strdup(btf_tmp_path); | |
6767 | if (!obj->btf_custom_path) { | |
6768 | err = -ENOMEM; | |
6769 | goto out; | |
6770 | } | |
6771 | } | |
6772 | ||
8601fd42 AN |
6773 | kconfig = OPTS_GET(opts, kconfig, NULL); |
6774 | if (kconfig) { | |
6775 | obj->kconfig = strdup(kconfig); | |
18353c87 SC |
6776 | if (!obj->kconfig) { |
6777 | err = -ENOMEM; | |
6778 | goto out; | |
6779 | } | |
166750bc | 6780 | } |
291ee02b | 6781 | |
0d13bfce AN |
6782 | err = bpf_object__elf_init(obj); |
6783 | err = err ? : bpf_object__check_endianness(obj); | |
6784 | err = err ? : bpf_object__elf_collect(obj); | |
166750bc AN |
6785 | err = err ? : bpf_object__collect_externs(obj); |
6786 | err = err ? : bpf_object__finalize_btf(obj); | |
0d13bfce | 6787 | err = err ? : bpf_object__init_maps(obj, opts); |
91b4d1d1 | 6788 | err = err ? : bpf_object_init_progs(obj, opts); |
c3c55696 | 6789 | err = err ? : bpf_object__collect_relos(obj); |
0d13bfce AN |
6790 | if (err) |
6791 | goto out; | |
dd4436bb | 6792 | |
91b4d1d1 | 6793 | bpf_object__elf_finish(obj); |
dd4436bb | 6794 | |
1a5e3fb1 WN |
6795 | return obj; |
6796 | out: | |
6797 | bpf_object__close(obj); | |
6371ca3b | 6798 | return ERR_PTR(err); |
1a5e3fb1 WN |
6799 | } |
6800 | ||
5e61f270 AN |
6801 | static struct bpf_object * |
6802 | __bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags) | |
1a5e3fb1 | 6803 | { |
e00aca65 | 6804 | DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, |
291ee02b AN |
6805 | .relaxed_maps = flags & MAPS_RELAX_COMPAT, |
6806 | ); | |
6807 | ||
1a5e3fb1 | 6808 | /* param validation */ |
07f2d4ea | 6809 | if (!attr->file) |
1a5e3fb1 WN |
6810 | return NULL; |
6811 | ||
07f2d4ea | 6812 | pr_debug("loading %s\n", attr->file); |
291ee02b | 6813 | return __bpf_object__open(attr->file, NULL, 0, &opts); |
c034a177 JF |
6814 | } |
6815 | ||
6816 | struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr) | |
6817 | { | |
e9fc3ce9 | 6818 | return libbpf_ptr(__bpf_object__open_xattr(attr, 0)); |
07f2d4ea JK |
6819 | } |
6820 | ||
6821 | struct bpf_object *bpf_object__open(const char *path) | |
6822 | { | |
6823 | struct bpf_object_open_attr attr = { | |
6824 | .file = path, | |
6825 | .prog_type = BPF_PROG_TYPE_UNSPEC, | |
6826 | }; | |
1a5e3fb1 | 6827 | |
e9fc3ce9 | 6828 | return libbpf_ptr(__bpf_object__open_xattr(&attr, 0)); |
6c956392 WN |
6829 | } |
6830 | ||
2ce8450e | 6831 | struct bpf_object * |
01af3bf0 | 6832 | bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts) |
2ce8450e | 6833 | { |
2ce8450e | 6834 | if (!path) |
e9fc3ce9 | 6835 | return libbpf_err_ptr(-EINVAL); |
2ce8450e AN |
6836 | |
6837 | pr_debug("loading %s\n", path); | |
6838 | ||
e9fc3ce9 | 6839 | return libbpf_ptr(__bpf_object__open(path, NULL, 0, opts)); |
2ce8450e AN |
6840 | } |
6841 | ||
6842 | struct bpf_object * | |
6843 | bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz, | |
01af3bf0 | 6844 | const struct bpf_object_open_opts *opts) |
6c956392 | 6845 | { |
2ce8450e | 6846 | if (!obj_buf || obj_buf_sz == 0) |
e9fc3ce9 | 6847 | return libbpf_err_ptr(-EINVAL); |
6c956392 | 6848 | |
e9fc3ce9 | 6849 | return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, opts)); |
2ce8450e AN |
6850 | } |
6851 | ||
6852 | struct bpf_object * | |
6853 | bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz, | |
6854 | const char *name) | |
6855 | { | |
e00aca65 | 6856 | DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts, |
2ce8450e AN |
6857 | .object_name = name, |
6858 | /* wrong default, but backwards-compatible */ | |
6859 | .relaxed_maps = true, | |
6860 | ); | |
6861 | ||
6862 | /* returning NULL is wrong, but backwards-compatible */ | |
6863 | if (!obj_buf || obj_buf_sz == 0) | |
e9fc3ce9 | 6864 | return errno = EINVAL, NULL; |
6c956392 | 6865 | |
e9fc3ce9 | 6866 | return libbpf_ptr(__bpf_object__open(NULL, obj_buf, obj_buf_sz, &opts)); |
1a5e3fb1 WN |
6867 | } |
6868 | ||
4a404a7e | 6869 | static int bpf_object_unload(struct bpf_object *obj) |
52d3352e WN |
6870 | { |
6871 | size_t i; | |
6872 | ||
6873 | if (!obj) | |
e9fc3ce9 | 6874 | return libbpf_err(-EINVAL); |
52d3352e | 6875 | |
590a0088 | 6876 | for (i = 0; i < obj->nr_maps; i++) { |
9d759a9b | 6877 | zclose(obj->maps[i].fd); |
590a0088 MKL |
6878 | if (obj->maps[i].st_ops) |
6879 | zfree(&obj->maps[i].st_ops->kern_vdata); | |
6880 | } | |
52d3352e | 6881 | |
55cffde2 WN |
6882 | for (i = 0; i < obj->nr_programs; i++) |
6883 | bpf_program__unload(&obj->programs[i]); | |
6884 | ||
52d3352e WN |
6885 | return 0; |
6886 | } | |
6887 | ||
4a404a7e HC |
6888 | int bpf_object__unload(struct bpf_object *obj) __attribute__((alias("bpf_object_unload"))); |
6889 | ||
0d13bfce AN |
6890 | static int bpf_object__sanitize_maps(struct bpf_object *obj) |
6891 | { | |
6892 | struct bpf_map *m; | |
6893 | ||
6894 | bpf_object__for_each_map(m, obj) { | |
6895 | if (!bpf_map__is_internal(m)) | |
6896 | continue; | |
9ca1f56a | 6897 | if (!kernel_supports(obj, FEAT_GLOBAL_DATA)) { |
0d13bfce AN |
6898 | pr_warn("kernel doesn't support global data\n"); |
6899 | return -ENOTSUP; | |
6900 | } | |
9ca1f56a | 6901 | if (!kernel_supports(obj, FEAT_ARRAY_MMAP)) |
0d13bfce AN |
6902 | m->def.map_flags ^= BPF_F_MMAPABLE; |
6903 | } | |
6904 | ||
6905 | return 0; | |
6906 | } | |
6907 | ||
1c0c7074 AN |
6908 | static int bpf_object__read_kallsyms_file(struct bpf_object *obj) |
6909 | { | |
6910 | char sym_type, sym_name[500]; | |
6911 | unsigned long long sym_addr; | |
5bd022ec | 6912 | const struct btf_type *t; |
1c0c7074 AN |
6913 | struct extern_desc *ext; |
6914 | int ret, err = 0; | |
6915 | FILE *f; | |
6916 | ||
6917 | f = fopen("/proc/kallsyms", "r"); | |
6918 | if (!f) { | |
6919 | err = -errno; | |
6920 | pr_warn("failed to open /proc/kallsyms: %d\n", err); | |
6921 | return err; | |
6922 | } | |
6923 | ||
6924 | while (true) { | |
6925 | ret = fscanf(f, "%llx %c %499s%*[^\n]\n", | |
6926 | &sym_addr, &sym_type, sym_name); | |
6927 | if (ret == EOF && feof(f)) | |
6928 | break; | |
6929 | if (ret != 3) { | |
135c783f | 6930 | pr_warn("failed to read kallsyms entry: %d\n", ret); |
1c0c7074 AN |
6931 | err = -EINVAL; |
6932 | goto out; | |
6933 | } | |
6934 | ||
6935 | ext = find_extern_by_name(obj, sym_name); | |
6936 | if (!ext || ext->type != EXT_KSYM) | |
6937 | continue; | |
6938 | ||
5bd022ec MKL |
6939 | t = btf__type_by_id(obj->btf, ext->btf_id); |
6940 | if (!btf_is_var(t)) | |
6941 | continue; | |
6942 | ||
1c0c7074 AN |
6943 | if (ext->is_set && ext->ksym.addr != sym_addr) { |
6944 | pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n", | |
6945 | sym_name, ext->ksym.addr, sym_addr); | |
6946 | err = -EINVAL; | |
6947 | goto out; | |
6948 | } | |
6949 | if (!ext->is_set) { | |
6950 | ext->is_set = true; | |
6951 | ext->ksym.addr = sym_addr; | |
6952 | pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr); | |
6953 | } | |
6954 | } | |
6955 | ||
6956 | out: | |
6957 | fclose(f); | |
6958 | return err; | |
6959 | } | |
6960 | ||
774e132e MKL |
6961 | static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name, |
6962 | __u16 kind, struct btf **res_btf, | |
9dbe6015 | 6963 | struct module_btf **res_mod_btf) |
d370bbe1 | 6964 | { |
9dbe6015 | 6965 | struct module_btf *mod_btf; |
284d2587 | 6966 | struct btf *btf; |
9dbe6015 | 6967 | int i, id, err; |
d370bbe1 | 6968 | |
933d1aa3 | 6969 | btf = obj->btf_vmlinux; |
9dbe6015 | 6970 | mod_btf = NULL; |
774e132e MKL |
6971 | id = btf__find_by_name_kind(btf, ksym_name, kind); |
6972 | ||
933d1aa3 MKL |
6973 | if (id == -ENOENT) { |
6974 | err = load_module_btfs(obj); | |
6975 | if (err) | |
6976 | return err; | |
d370bbe1 | 6977 | |
933d1aa3 | 6978 | for (i = 0; i < obj->btf_module_cnt; i++) { |
9dbe6015 KKD |
6979 | /* we assume module_btf's BTF FD is always >0 */ |
6980 | mod_btf = &obj->btf_modules[i]; | |
6981 | btf = mod_btf->btf; | |
6982 | id = btf__find_by_name_kind_own(btf, ksym_name, kind); | |
933d1aa3 MKL |
6983 | if (id != -ENOENT) |
6984 | break; | |
6985 | } | |
6986 | } | |
2211c825 | 6987 | if (id <= 0) |
933d1aa3 | 6988 | return -ESRCH; |
d370bbe1 | 6989 | |
774e132e | 6990 | *res_btf = btf; |
9dbe6015 | 6991 | *res_mod_btf = mod_btf; |
774e132e MKL |
6992 | return id; |
6993 | } | |
6994 | ||
6995 | static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj, | |
6996 | struct extern_desc *ext) | |
6997 | { | |
6998 | const struct btf_type *targ_var, *targ_type; | |
6999 | __u32 targ_type_id, local_type_id; | |
9dbe6015 | 7000 | struct module_btf *mod_btf = NULL; |
774e132e | 7001 | const char *targ_var_name; |
774e132e | 7002 | struct btf *btf = NULL; |
9dbe6015 | 7003 | int id, err; |
774e132e | 7004 | |
9dbe6015 | 7005 | id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf); |
466b2e13 KKD |
7006 | if (id < 0) { |
7007 | if (id == -ESRCH && ext->is_weak) | |
7008 | return 0; | |
2211c825 HL |
7009 | pr_warn("extern (var ksym) '%s': not found in kernel BTF\n", |
7010 | ext->name); | |
774e132e | 7011 | return id; |
2211c825 | 7012 | } |
774e132e | 7013 | |
933d1aa3 MKL |
7014 | /* find local type_id */ |
7015 | local_type_id = ext->ksym.type_id; | |
284d2587 | 7016 | |
933d1aa3 MKL |
7017 | /* find target type_id */ |
7018 | targ_var = btf__type_by_id(btf, id); | |
7019 | targ_var_name = btf__name_by_offset(btf, targ_var->name_off); | |
7020 | targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id); | |
d370bbe1 | 7021 | |
933d1aa3 MKL |
7022 | err = bpf_core_types_are_compat(obj->btf, local_type_id, |
7023 | btf, targ_type_id); | |
7024 | if (err <= 0) { | |
7025 | const struct btf_type *local_type; | |
7026 | const char *targ_name, *local_name; | |
d370bbe1 | 7027 | |
933d1aa3 MKL |
7028 | local_type = btf__type_by_id(obj->btf, local_type_id); |
7029 | local_name = btf__name_by_offset(obj->btf, local_type->name_off); | |
7030 | targ_name = btf__name_by_offset(btf, targ_type->name_off); | |
d370bbe1 | 7031 | |
933d1aa3 MKL |
7032 | pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n", |
7033 | ext->name, local_type_id, | |
7034 | btf_kind_str(local_type), local_name, targ_type_id, | |
7035 | btf_kind_str(targ_type), targ_name); | |
7036 | return -EINVAL; | |
7037 | } | |
d370bbe1 | 7038 | |
933d1aa3 | 7039 | ext->is_set = true; |
9dbe6015 | 7040 | ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0; |
933d1aa3 MKL |
7041 | ext->ksym.kernel_btf_id = id; |
7042 | pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n", | |
7043 | ext->name, id, btf_kind_str(targ_var), targ_var_name); | |
d370bbe1 | 7044 | |
933d1aa3 MKL |
7045 | return 0; |
7046 | } | |
d370bbe1 | 7047 | |
5bd022ec MKL |
7048 | static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj, |
7049 | struct extern_desc *ext) | |
7050 | { | |
7051 | int local_func_proto_id, kfunc_proto_id, kfunc_id; | |
9dbe6015 | 7052 | struct module_btf *mod_btf = NULL; |
5bd022ec MKL |
7053 | const struct btf_type *kern_func; |
7054 | struct btf *kern_btf = NULL; | |
9dbe6015 | 7055 | int ret; |
5bd022ec MKL |
7056 | |
7057 | local_func_proto_id = ext->ksym.type_id; | |
7058 | ||
9dbe6015 | 7059 | kfunc_id = find_ksym_btf_id(obj, ext->name, BTF_KIND_FUNC, &kern_btf, &mod_btf); |
5bd022ec | 7060 | if (kfunc_id < 0) { |
466b2e13 KKD |
7061 | if (kfunc_id == -ESRCH && ext->is_weak) |
7062 | return 0; | |
7063 | pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n", | |
5bd022ec MKL |
7064 | ext->name); |
7065 | return kfunc_id; | |
7066 | } | |
7067 | ||
5bd022ec MKL |
7068 | kern_func = btf__type_by_id(kern_btf, kfunc_id); |
7069 | kfunc_proto_id = kern_func->type; | |
7070 | ||
7071 | ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id, | |
7072 | kern_btf, kfunc_proto_id); | |
7073 | if (ret <= 0) { | |
7074 | pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with kernel [%d]\n", | |
7075 | ext->name, local_func_proto_id, kfunc_proto_id); | |
7076 | return -EINVAL; | |
7077 | } | |
7078 | ||
9dbe6015 KKD |
7079 | /* set index for module BTF fd in fd_array, if unset */ |
7080 | if (mod_btf && !mod_btf->fd_array_idx) { | |
7081 | /* insn->off is s16 */ | |
7082 | if (obj->fd_array_cnt == INT16_MAX) { | |
7083 | pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n", | |
7084 | ext->name, mod_btf->fd_array_idx); | |
7085 | return -E2BIG; | |
7086 | } | |
7087 | /* Cannot use index 0 for module BTF fd */ | |
7088 | if (!obj->fd_array_cnt) | |
7089 | obj->fd_array_cnt = 1; | |
7090 | ||
7091 | ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int), | |
7092 | obj->fd_array_cnt + 1); | |
7093 | if (ret) | |
7094 | return ret; | |
7095 | mod_btf->fd_array_idx = obj->fd_array_cnt; | |
7096 | /* we assume module BTF FD is always >0 */ | |
7097 | obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd; | |
7098 | } | |
7099 | ||
5bd022ec | 7100 | ext->is_set = true; |
5bd022ec | 7101 | ext->ksym.kernel_btf_id = kfunc_id; |
9dbe6015 | 7102 | ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0; |
5bd022ec MKL |
7103 | pr_debug("extern (func ksym) '%s': resolved to kernel [%d]\n", |
7104 | ext->name, kfunc_id); | |
7105 | ||
7106 | return 0; | |
7107 | } | |
7108 | ||
933d1aa3 MKL |
7109 | static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj) |
7110 | { | |
5bd022ec | 7111 | const struct btf_type *t; |
933d1aa3 MKL |
7112 | struct extern_desc *ext; |
7113 | int i, err; | |
7114 | ||
7115 | for (i = 0; i < obj->nr_extern; i++) { | |
7116 | ext = &obj->externs[i]; | |
7117 | if (ext->type != EXT_KSYM || !ext->ksym.type_id) | |
7118 | continue; | |
7119 | ||
67234743 AS |
7120 | if (obj->gen_loader) { |
7121 | ext->is_set = true; | |
7122 | ext->ksym.kernel_btf_obj_fd = 0; | |
7123 | ext->ksym.kernel_btf_id = 0; | |
7124 | continue; | |
7125 | } | |
5bd022ec MKL |
7126 | t = btf__type_by_id(obj->btf, ext->btf_id); |
7127 | if (btf_is_var(t)) | |
7128 | err = bpf_object__resolve_ksym_var_btf_id(obj, ext); | |
7129 | else | |
7130 | err = bpf_object__resolve_ksym_func_btf_id(obj, ext); | |
933d1aa3 MKL |
7131 | if (err) |
7132 | return err; | |
d370bbe1 HL |
7133 | } |
7134 | return 0; | |
7135 | } | |
7136 | ||
166750bc | 7137 | static int bpf_object__resolve_externs(struct bpf_object *obj, |
8601fd42 | 7138 | const char *extra_kconfig) |
166750bc | 7139 | { |
1c0c7074 | 7140 | bool need_config = false, need_kallsyms = false; |
d370bbe1 | 7141 | bool need_vmlinux_btf = false; |
166750bc | 7142 | struct extern_desc *ext; |
2e33efe3 | 7143 | void *kcfg_data = NULL; |
166750bc | 7144 | int err, i; |
166750bc AN |
7145 | |
7146 | if (obj->nr_extern == 0) | |
7147 | return 0; | |
7148 | ||
2e33efe3 AN |
7149 | if (obj->kconfig_map_idx >= 0) |
7150 | kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped; | |
166750bc AN |
7151 | |
7152 | for (i = 0; i < obj->nr_extern; i++) { | |
7153 | ext = &obj->externs[i]; | |
7154 | ||
2e33efe3 AN |
7155 | if (ext->type == EXT_KCFG && |
7156 | strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) { | |
7157 | void *ext_val = kcfg_data + ext->kcfg.data_off; | |
166750bc AN |
7158 | __u32 kver = get_kernel_version(); |
7159 | ||
7160 | if (!kver) { | |
7161 | pr_warn("failed to get kernel version\n"); | |
7162 | return -EINVAL; | |
7163 | } | |
2e33efe3 | 7164 | err = set_kcfg_value_num(ext, ext_val, kver); |
166750bc AN |
7165 | if (err) |
7166 | return err; | |
2e33efe3 | 7167 | pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver); |
13d35a0c | 7168 | } else if (ext->type == EXT_KCFG && str_has_pfx(ext->name, "CONFIG_")) { |
166750bc | 7169 | need_config = true; |
1c0c7074 | 7170 | } else if (ext->type == EXT_KSYM) { |
d370bbe1 HL |
7171 | if (ext->ksym.type_id) |
7172 | need_vmlinux_btf = true; | |
7173 | else | |
7174 | need_kallsyms = true; | |
166750bc AN |
7175 | } else { |
7176 | pr_warn("unrecognized extern '%s'\n", ext->name); | |
7177 | return -EINVAL; | |
7178 | } | |
7179 | } | |
8601fd42 | 7180 | if (need_config && extra_kconfig) { |
2e33efe3 | 7181 | err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data); |
8601fd42 AN |
7182 | if (err) |
7183 | return -EINVAL; | |
7184 | need_config = false; | |
7185 | for (i = 0; i < obj->nr_extern; i++) { | |
7186 | ext = &obj->externs[i]; | |
2e33efe3 | 7187 | if (ext->type == EXT_KCFG && !ext->is_set) { |
8601fd42 AN |
7188 | need_config = true; |
7189 | break; | |
7190 | } | |
7191 | } | |
7192 | } | |
166750bc | 7193 | if (need_config) { |
2e33efe3 | 7194 | err = bpf_object__read_kconfig_file(obj, kcfg_data); |
166750bc AN |
7195 | if (err) |
7196 | return -EINVAL; | |
7197 | } | |
1c0c7074 AN |
7198 | if (need_kallsyms) { |
7199 | err = bpf_object__read_kallsyms_file(obj); | |
7200 | if (err) | |
7201 | return -EINVAL; | |
7202 | } | |
d370bbe1 HL |
7203 | if (need_vmlinux_btf) { |
7204 | err = bpf_object__resolve_ksyms_btf_id(obj); | |
7205 | if (err) | |
7206 | return -EINVAL; | |
7207 | } | |
166750bc AN |
7208 | for (i = 0; i < obj->nr_extern; i++) { |
7209 | ext = &obj->externs[i]; | |
7210 | ||
7211 | if (!ext->is_set && !ext->is_weak) { | |
7212 | pr_warn("extern %s (strong) not resolved\n", ext->name); | |
7213 | return -ESRCH; | |
7214 | } else if (!ext->is_set) { | |
7215 | pr_debug("extern %s (weak) not resolved, defaulting to zero\n", | |
7216 | ext->name); | |
7217 | } | |
7218 | } | |
7219 | ||
7220 | return 0; | |
7221 | } | |
7222 | ||
60276f98 | 7223 | int bpf_object__load_xattr(struct bpf_object_load_attr *attr) |
52d3352e | 7224 | { |
60276f98 | 7225 | struct bpf_object *obj; |
ec6d5f47 | 7226 | int err, i; |
6371ca3b | 7227 | |
60276f98 | 7228 | if (!attr) |
e9fc3ce9 | 7229 | return libbpf_err(-EINVAL); |
60276f98 | 7230 | obj = attr->obj; |
52d3352e | 7231 | if (!obj) |
e9fc3ce9 | 7232 | return libbpf_err(-EINVAL); |
52d3352e WN |
7233 | |
7234 | if (obj->loaded) { | |
d9297581 | 7235 | pr_warn("object '%s': load can't be attempted twice\n", obj->name); |
e9fc3ce9 | 7236 | return libbpf_err(-EINVAL); |
52d3352e WN |
7237 | } |
7238 | ||
67234743 AS |
7239 | if (obj->gen_loader) |
7240 | bpf_gen__init(obj->gen_loader, attr->log_level); | |
7241 | ||
fd9eef1a | 7242 | err = bpf_object__probe_loading(obj); |
fe62de31 | 7243 | err = err ? : bpf_object__load_vmlinux_btf(obj, false); |
8601fd42 | 7244 | err = err ? : bpf_object__resolve_externs(obj, obj->kconfig); |
0d13bfce AN |
7245 | err = err ? : bpf_object__sanitize_and_load_btf(obj); |
7246 | err = err ? : bpf_object__sanitize_maps(obj); | |
590a0088 | 7247 | err = err ? : bpf_object__init_kern_struct_ops_maps(obj); |
0d13bfce | 7248 | err = err ? : bpf_object__create_maps(obj); |
1373ff59 | 7249 | err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : attr->target_btf_path); |
0d13bfce | 7250 | err = err ? : bpf_object__load_progs(obj, attr->log_level); |
a6ed02ca | 7251 | |
67234743 AS |
7252 | if (obj->gen_loader) { |
7253 | /* reset FDs */ | |
7254 | btf__set_fd(obj->btf, -1); | |
7255 | for (i = 0; i < obj->nr_maps; i++) | |
7256 | obj->maps[i].fd = -1; | |
7257 | if (!err) | |
7258 | err = bpf_gen__finish(obj->gen_loader); | |
7259 | } | |
7260 | ||
9dbe6015 KKD |
7261 | /* clean up fd_array */ |
7262 | zfree(&obj->fd_array); | |
7263 | ||
4f33a53d AN |
7264 | /* clean up module BTFs */ |
7265 | for (i = 0; i < obj->btf_module_cnt; i++) { | |
91abb4a6 | 7266 | close(obj->btf_modules[i].fd); |
4f33a53d AN |
7267 | btf__free(obj->btf_modules[i].btf); |
7268 | free(obj->btf_modules[i].name); | |
7269 | } | |
7270 | free(obj->btf_modules); | |
7271 | ||
7272 | /* clean up vmlinux BTF */ | |
a6ed02ca KS |
7273 | btf__free(obj->btf_vmlinux); |
7274 | obj->btf_vmlinux = NULL; | |
7275 | ||
d9297581 AN |
7276 | obj->loaded = true; /* doesn't matter if successfully or not */ |
7277 | ||
0d13bfce AN |
7278 | if (err) |
7279 | goto out; | |
52d3352e WN |
7280 | |
7281 | return 0; | |
7282 | out: | |
ec6d5f47 THJ |
7283 | /* unpin any maps that were auto-pinned during load */ |
7284 | for (i = 0; i < obj->nr_maps; i++) | |
7285 | if (obj->maps[i].pinned && !obj->maps[i].reused) | |
7286 | bpf_map__unpin(&obj->maps[i], NULL); | |
7287 | ||
4a404a7e | 7288 | bpf_object_unload(obj); |
be18010e | 7289 | pr_warn("failed to load object '%s'\n", obj->path); |
e9fc3ce9 | 7290 | return libbpf_err(err); |
52d3352e WN |
7291 | } |
7292 | ||
60276f98 QM |
7293 | int bpf_object__load(struct bpf_object *obj) |
7294 | { | |
7295 | struct bpf_object_load_attr attr = { | |
7296 | .obj = obj, | |
7297 | }; | |
7298 | ||
7299 | return bpf_object__load_xattr(&attr); | |
7300 | } | |
7301 | ||
196f8487 THJ |
7302 | static int make_parent_dir(const char *path) |
7303 | { | |
7304 | char *cp, errmsg[STRERR_BUFSIZE]; | |
7305 | char *dname, *dir; | |
7306 | int err = 0; | |
7307 | ||
7308 | dname = strdup(path); | |
7309 | if (dname == NULL) | |
7310 | return -ENOMEM; | |
7311 | ||
7312 | dir = dirname(dname); | |
7313 | if (mkdir(dir, 0700) && errno != EEXIST) | |
7314 | err = -errno; | |
7315 | ||
7316 | free(dname); | |
7317 | if (err) { | |
7318 | cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); | |
7319 | pr_warn("failed to mkdir %s: %s\n", path, cp); | |
7320 | } | |
7321 | return err; | |
7322 | } | |
7323 | ||
f367540c JS |
7324 | static int check_path(const char *path) |
7325 | { | |
1ce6a9fc | 7326 | char *cp, errmsg[STRERR_BUFSIZE]; |
f367540c JS |
7327 | struct statfs st_fs; |
7328 | char *dname, *dir; | |
7329 | int err = 0; | |
7330 | ||
7331 | if (path == NULL) | |
7332 | return -EINVAL; | |
7333 | ||
7334 | dname = strdup(path); | |
7335 | if (dname == NULL) | |
7336 | return -ENOMEM; | |
7337 | ||
7338 | dir = dirname(dname); | |
7339 | if (statfs(dir, &st_fs)) { | |
24d6a808 | 7340 | cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg)); |
be18010e | 7341 | pr_warn("failed to statfs %s: %s\n", dir, cp); |
f367540c JS |
7342 | err = -errno; |
7343 | } | |
7344 | free(dname); | |
7345 | ||
7346 | if (!err && st_fs.f_type != BPF_FS_MAGIC) { | |
be18010e | 7347 | pr_warn("specified path %s is not on BPF FS\n", path); |
f367540c JS |
7348 | err = -EINVAL; |
7349 | } | |
7350 | ||
7351 | return err; | |
7352 | } | |
7353 | ||
7354 | int bpf_program__pin_instance(struct bpf_program *prog, const char *path, | |
7355 | int instance) | |
7356 | { | |
1ce6a9fc | 7357 | char *cp, errmsg[STRERR_BUFSIZE]; |
f367540c JS |
7358 | int err; |
7359 | ||
196f8487 THJ |
7360 | err = make_parent_dir(path); |
7361 | if (err) | |
e9fc3ce9 | 7362 | return libbpf_err(err); |
196f8487 | 7363 | |
f367540c JS |
7364 | err = check_path(path); |
7365 | if (err) | |
e9fc3ce9 | 7366 | return libbpf_err(err); |
f367540c JS |
7367 | |
7368 | if (prog == NULL) { | |
be18010e | 7369 | pr_warn("invalid program pointer\n"); |
e9fc3ce9 | 7370 | return libbpf_err(-EINVAL); |
f367540c JS |
7371 | } |
7372 | ||
7373 | if (instance < 0 || instance >= prog->instances.nr) { | |
be18010e | 7374 | pr_warn("invalid prog instance %d of prog %s (max %d)\n", |
52109584 | 7375 | instance, prog->name, prog->instances.nr); |
e9fc3ce9 | 7376 | return libbpf_err(-EINVAL); |
f367540c JS |
7377 | } |
7378 | ||
7379 | if (bpf_obj_pin(prog->instances.fds[instance], path)) { | |
23ab656b THJ |
7380 | err = -errno; |
7381 | cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg)); | |
be18010e | 7382 | pr_warn("failed to pin program: %s\n", cp); |
e9fc3ce9 | 7383 | return libbpf_err(err); |
f367540c JS |
7384 | } |
7385 | pr_debug("pinned program '%s'\n", path); | |
7386 | ||
7387 | return 0; | |
7388 | } | |
7389 | ||
0c19a9fb SF |
7390 | int bpf_program__unpin_instance(struct bpf_program *prog, const char *path, |
7391 | int instance) | |
7392 | { | |
7393 | int err; | |
7394 | ||
7395 | err = check_path(path); | |
7396 | if (err) | |
e9fc3ce9 | 7397 | return libbpf_err(err); |
0c19a9fb SF |
7398 | |
7399 | if (prog == NULL) { | |
be18010e | 7400 | pr_warn("invalid program pointer\n"); |
e9fc3ce9 | 7401 | return libbpf_err(-EINVAL); |
0c19a9fb SF |
7402 | } |
7403 | ||
7404 | if (instance < 0 || instance >= prog->instances.nr) { | |
be18010e | 7405 | pr_warn("invalid prog instance %d of prog %s (max %d)\n", |
52109584 | 7406 | instance, prog->name, prog->instances.nr); |
e9fc3ce9 | 7407 | return libbpf_err(-EINVAL); |
0c19a9fb SF |
7408 | } |
7409 | ||
7410 | err = unlink(path); | |
7411 | if (err != 0) | |
e9fc3ce9 AN |
7412 | return libbpf_err(-errno); |
7413 | ||
0c19a9fb SF |
7414 | pr_debug("unpinned program '%s'\n", path); |
7415 | ||
7416 | return 0; | |
7417 | } | |
7418 | ||
f367540c JS |
7419 | int bpf_program__pin(struct bpf_program *prog, const char *path) |
7420 | { | |
7421 | int i, err; | |
7422 | ||
196f8487 THJ |
7423 | err = make_parent_dir(path); |
7424 | if (err) | |
e9fc3ce9 | 7425 | return libbpf_err(err); |
196f8487 | 7426 | |
f367540c JS |
7427 | err = check_path(path); |
7428 | if (err) | |
e9fc3ce9 | 7429 | return libbpf_err(err); |
f367540c JS |
7430 | |
7431 | if (prog == NULL) { | |
be18010e | 7432 | pr_warn("invalid program pointer\n"); |
e9fc3ce9 | 7433 | return libbpf_err(-EINVAL); |
f367540c JS |
7434 | } |
7435 | ||
7436 | if (prog->instances.nr <= 0) { | |
52109584 | 7437 | pr_warn("no instances of prog %s to pin\n", prog->name); |
e9fc3ce9 | 7438 | return libbpf_err(-EINVAL); |
f367540c JS |
7439 | } |
7440 | ||
fd734c5c SF |
7441 | if (prog->instances.nr == 1) { |
7442 | /* don't create subdirs when pinning single instance */ | |
7443 | return bpf_program__pin_instance(prog, path, 0); | |
7444 | } | |
7445 | ||
0c19a9fb SF |
7446 | for (i = 0; i < prog->instances.nr; i++) { |
7447 | char buf[PATH_MAX]; | |
7448 | int len; | |
7449 | ||
7450 | len = snprintf(buf, PATH_MAX, "%s/%d", path, i); | |
7451 | if (len < 0) { | |
7452 | err = -EINVAL; | |
7453 | goto err_unpin; | |
7454 | } else if (len >= PATH_MAX) { | |
7455 | err = -ENAMETOOLONG; | |
7456 | goto err_unpin; | |
7457 | } | |
7458 | ||
7459 | err = bpf_program__pin_instance(prog, buf, i); | |
7460 | if (err) | |
7461 | goto err_unpin; | |
7462 | } | |
7463 | ||
7464 | return 0; | |
7465 | ||
7466 | err_unpin: | |
7467 | for (i = i - 1; i >= 0; i--) { | |
7468 | char buf[PATH_MAX]; | |
7469 | int len; | |
7470 | ||
7471 | len = snprintf(buf, PATH_MAX, "%s/%d", path, i); | |
7472 | if (len < 0) | |
7473 | continue; | |
7474 | else if (len >= PATH_MAX) | |
7475 | continue; | |
7476 | ||
7477 | bpf_program__unpin_instance(prog, buf, i); | |
7478 | } | |
7479 | ||
7480 | rmdir(path); | |
7481 | ||
e9fc3ce9 | 7482 | return libbpf_err(err); |
0c19a9fb SF |
7483 | } |
7484 | ||
7485 | int bpf_program__unpin(struct bpf_program *prog, const char *path) | |
7486 | { | |
7487 | int i, err; | |
7488 | ||
7489 | err = check_path(path); | |
7490 | if (err) | |
e9fc3ce9 | 7491 | return libbpf_err(err); |
0c19a9fb SF |
7492 | |
7493 | if (prog == NULL) { | |
be18010e | 7494 | pr_warn("invalid program pointer\n"); |
e9fc3ce9 | 7495 | return libbpf_err(-EINVAL); |
0c19a9fb SF |
7496 | } |
7497 | ||
7498 | if (prog->instances.nr <= 0) { | |
52109584 | 7499 | pr_warn("no instances of prog %s to pin\n", prog->name); |
e9fc3ce9 | 7500 | return libbpf_err(-EINVAL); |
fd734c5c SF |
7501 | } |
7502 | ||
7503 | if (prog->instances.nr == 1) { | |
7504 | /* don't create subdirs when pinning single instance */ | |
7505 | return bpf_program__unpin_instance(prog, path, 0); | |
0c19a9fb SF |
7506 | } |
7507 | ||
f367540c JS |
7508 | for (i = 0; i < prog->instances.nr; i++) { |
7509 | char buf[PATH_MAX]; | |
7510 | int len; | |
7511 | ||
7512 | len = snprintf(buf, PATH_MAX, "%s/%d", path, i); | |
7513 | if (len < 0) | |
e9fc3ce9 | 7514 | return libbpf_err(-EINVAL); |
f367540c | 7515 | else if (len >= PATH_MAX) |
e9fc3ce9 | 7516 | return libbpf_err(-ENAMETOOLONG); |
f367540c | 7517 | |
0c19a9fb | 7518 | err = bpf_program__unpin_instance(prog, buf, i); |
f367540c JS |
7519 | if (err) |
7520 | return err; | |
7521 | } | |
7522 | ||
0c19a9fb SF |
7523 | err = rmdir(path); |
7524 | if (err) | |
e9fc3ce9 | 7525 | return libbpf_err(-errno); |
0c19a9fb | 7526 | |
f367540c JS |
7527 | return 0; |
7528 | } | |
7529 | ||
b6989f35 JS |
7530 | int bpf_map__pin(struct bpf_map *map, const char *path) |
7531 | { | |
1ce6a9fc | 7532 | char *cp, errmsg[STRERR_BUFSIZE]; |
b6989f35 JS |
7533 | int err; |
7534 | ||
b6989f35 | 7535 | if (map == NULL) { |
be18010e | 7536 | pr_warn("invalid map pointer\n"); |
e9fc3ce9 | 7537 | return libbpf_err(-EINVAL); |
b6989f35 JS |
7538 | } |
7539 | ||
4580b25f THJ |
7540 | if (map->pin_path) { |
7541 | if (path && strcmp(path, map->pin_path)) { | |
7542 | pr_warn("map '%s' already has pin path '%s' different from '%s'\n", | |
7543 | bpf_map__name(map), map->pin_path, path); | |
e9fc3ce9 | 7544 | return libbpf_err(-EINVAL); |
4580b25f THJ |
7545 | } else if (map->pinned) { |
7546 | pr_debug("map '%s' already pinned at '%s'; not re-pinning\n", | |
7547 | bpf_map__name(map), map->pin_path); | |
7548 | return 0; | |
7549 | } | |
7550 | } else { | |
7551 | if (!path) { | |
7552 | pr_warn("missing a path to pin map '%s' at\n", | |
7553 | bpf_map__name(map)); | |
e9fc3ce9 | 7554 | return libbpf_err(-EINVAL); |
4580b25f THJ |
7555 | } else if (map->pinned) { |
7556 | pr_warn("map '%s' already pinned\n", bpf_map__name(map)); | |
e9fc3ce9 | 7557 | return libbpf_err(-EEXIST); |
4580b25f THJ |
7558 | } |
7559 | ||
7560 | map->pin_path = strdup(path); | |
7561 | if (!map->pin_path) { | |
7562 | err = -errno; | |
7563 | goto out_err; | |
7564 | } | |
b6989f35 JS |
7565 | } |
7566 | ||
196f8487 THJ |
7567 | err = make_parent_dir(map->pin_path); |
7568 | if (err) | |
e9fc3ce9 | 7569 | return libbpf_err(err); |
196f8487 | 7570 | |
4580b25f THJ |
7571 | err = check_path(map->pin_path); |
7572 | if (err) | |
e9fc3ce9 | 7573 | return libbpf_err(err); |
4580b25f THJ |
7574 | |
7575 | if (bpf_obj_pin(map->fd, map->pin_path)) { | |
7576 | err = -errno; | |
7577 | goto out_err; | |
7578 | } | |
7579 | ||
7580 | map->pinned = true; | |
7581 | pr_debug("pinned map '%s'\n", map->pin_path); | |
0c19a9fb | 7582 | |
b6989f35 | 7583 | return 0; |
4580b25f THJ |
7584 | |
7585 | out_err: | |
7586 | cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg)); | |
7587 | pr_warn("failed to pin map: %s\n", cp); | |
e9fc3ce9 | 7588 | return libbpf_err(err); |
b6989f35 JS |
7589 | } |
7590 | ||
0c19a9fb SF |
7591 | int bpf_map__unpin(struct bpf_map *map, const char *path) |
7592 | { | |
7593 | int err; | |
7594 | ||
0c19a9fb | 7595 | if (map == NULL) { |
be18010e | 7596 | pr_warn("invalid map pointer\n"); |
e9fc3ce9 | 7597 | return libbpf_err(-EINVAL); |
0c19a9fb SF |
7598 | } |
7599 | ||
4580b25f THJ |
7600 | if (map->pin_path) { |
7601 | if (path && strcmp(path, map->pin_path)) { | |
7602 | pr_warn("map '%s' already has pin path '%s' different from '%s'\n", | |
7603 | bpf_map__name(map), map->pin_path, path); | |
e9fc3ce9 | 7604 | return libbpf_err(-EINVAL); |
4580b25f THJ |
7605 | } |
7606 | path = map->pin_path; | |
7607 | } else if (!path) { | |
7608 | pr_warn("no path to unpin map '%s' from\n", | |
7609 | bpf_map__name(map)); | |
e9fc3ce9 | 7610 | return libbpf_err(-EINVAL); |
4580b25f THJ |
7611 | } |
7612 | ||
7613 | err = check_path(path); | |
7614 | if (err) | |
e9fc3ce9 | 7615 | return libbpf_err(err); |
4580b25f | 7616 | |
0c19a9fb SF |
7617 | err = unlink(path); |
7618 | if (err != 0) | |
e9fc3ce9 | 7619 | return libbpf_err(-errno); |
4580b25f THJ |
7620 | |
7621 | map->pinned = false; | |
7622 | pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path); | |
0c19a9fb SF |
7623 | |
7624 | return 0; | |
7625 | } | |
7626 | ||
4580b25f THJ |
7627 | int bpf_map__set_pin_path(struct bpf_map *map, const char *path) |
7628 | { | |
7629 | char *new = NULL; | |
7630 | ||
7631 | if (path) { | |
7632 | new = strdup(path); | |
7633 | if (!new) | |
e9fc3ce9 | 7634 | return libbpf_err(-errno); |
4580b25f THJ |
7635 | } |
7636 | ||
7637 | free(map->pin_path); | |
7638 | map->pin_path = new; | |
7639 | return 0; | |
7640 | } | |
7641 | ||
7642 | const char *bpf_map__get_pin_path(const struct bpf_map *map) | |
7643 | { | |
7644 | return map->pin_path; | |
7645 | } | |
7646 | ||
e244d34d EL |
7647 | const char *bpf_map__pin_path(const struct bpf_map *map) |
7648 | { | |
7649 | return map->pin_path; | |
7650 | } | |
7651 | ||
4580b25f THJ |
7652 | bool bpf_map__is_pinned(const struct bpf_map *map) |
7653 | { | |
7654 | return map->pinned; | |
7655 | } | |
7656 | ||
9cf309c5 THJ |
7657 | static void sanitize_pin_path(char *s) |
7658 | { | |
7659 | /* bpffs disallows periods in path names */ | |
7660 | while (*s) { | |
7661 | if (*s == '.') | |
7662 | *s = '_'; | |
7663 | s++; | |
7664 | } | |
7665 | } | |
7666 | ||
0c19a9fb | 7667 | int bpf_object__pin_maps(struct bpf_object *obj, const char *path) |
d5148d85 | 7668 | { |
d5148d85 JS |
7669 | struct bpf_map *map; |
7670 | int err; | |
7671 | ||
7672 | if (!obj) | |
e9fc3ce9 | 7673 | return libbpf_err(-ENOENT); |
d5148d85 JS |
7674 | |
7675 | if (!obj->loaded) { | |
be18010e | 7676 | pr_warn("object not yet loaded; load it first\n"); |
e9fc3ce9 | 7677 | return libbpf_err(-ENOENT); |
d5148d85 JS |
7678 | } |
7679 | ||
f74a53d9 | 7680 | bpf_object__for_each_map(map, obj) { |
4580b25f | 7681 | char *pin_path = NULL; |
0c19a9fb | 7682 | char buf[PATH_MAX]; |
0c19a9fb | 7683 | |
4580b25f THJ |
7684 | if (path) { |
7685 | int len; | |
7686 | ||
7687 | len = snprintf(buf, PATH_MAX, "%s/%s", path, | |
7688 | bpf_map__name(map)); | |
7689 | if (len < 0) { | |
7690 | err = -EINVAL; | |
7691 | goto err_unpin_maps; | |
7692 | } else if (len >= PATH_MAX) { | |
7693 | err = -ENAMETOOLONG; | |
7694 | goto err_unpin_maps; | |
7695 | } | |
9cf309c5 | 7696 | sanitize_pin_path(buf); |
4580b25f THJ |
7697 | pin_path = buf; |
7698 | } else if (!map->pin_path) { | |
7699 | continue; | |
0c19a9fb SF |
7700 | } |
7701 | ||
4580b25f | 7702 | err = bpf_map__pin(map, pin_path); |
0c19a9fb SF |
7703 | if (err) |
7704 | goto err_unpin_maps; | |
7705 | } | |
7706 | ||
7707 | return 0; | |
7708 | ||
7709 | err_unpin_maps: | |
7710 | while ((map = bpf_map__prev(map, obj))) { | |
4580b25f | 7711 | if (!map->pin_path) |
0c19a9fb SF |
7712 | continue; |
7713 | ||
4580b25f | 7714 | bpf_map__unpin(map, NULL); |
0c19a9fb SF |
7715 | } |
7716 | ||
e9fc3ce9 | 7717 | return libbpf_err(err); |
0c19a9fb SF |
7718 | } |
7719 | ||
7720 | int bpf_object__unpin_maps(struct bpf_object *obj, const char *path) | |
7721 | { | |
7722 | struct bpf_map *map; | |
7723 | int err; | |
7724 | ||
7725 | if (!obj) | |
e9fc3ce9 | 7726 | return libbpf_err(-ENOENT); |
0c19a9fb | 7727 | |
f74a53d9 | 7728 | bpf_object__for_each_map(map, obj) { |
4580b25f | 7729 | char *pin_path = NULL; |
d5148d85 | 7730 | char buf[PATH_MAX]; |
d5148d85 | 7731 | |
4580b25f THJ |
7732 | if (path) { |
7733 | int len; | |
7734 | ||
7735 | len = snprintf(buf, PATH_MAX, "%s/%s", path, | |
7736 | bpf_map__name(map)); | |
7737 | if (len < 0) | |
e9fc3ce9 | 7738 | return libbpf_err(-EINVAL); |
4580b25f | 7739 | else if (len >= PATH_MAX) |
e9fc3ce9 | 7740 | return libbpf_err(-ENAMETOOLONG); |
9cf309c5 | 7741 | sanitize_pin_path(buf); |
4580b25f THJ |
7742 | pin_path = buf; |
7743 | } else if (!map->pin_path) { | |
7744 | continue; | |
7745 | } | |
d5148d85 | 7746 | |
4580b25f | 7747 | err = bpf_map__unpin(map, pin_path); |
d5148d85 | 7748 | if (err) |
e9fc3ce9 | 7749 | return libbpf_err(err); |
d5148d85 JS |
7750 | } |
7751 | ||
0c19a9fb SF |
7752 | return 0; |
7753 | } | |
7754 | ||
7755 | int bpf_object__pin_programs(struct bpf_object *obj, const char *path) | |
7756 | { | |
7757 | struct bpf_program *prog; | |
7758 | int err; | |
7759 | ||
7760 | if (!obj) | |
e9fc3ce9 | 7761 | return libbpf_err(-ENOENT); |
0c19a9fb SF |
7762 | |
7763 | if (!obj->loaded) { | |
be18010e | 7764 | pr_warn("object not yet loaded; load it first\n"); |
e9fc3ce9 | 7765 | return libbpf_err(-ENOENT); |
0c19a9fb SF |
7766 | } |
7767 | ||
0c19a9fb SF |
7768 | bpf_object__for_each_program(prog, obj) { |
7769 | char buf[PATH_MAX]; | |
7770 | int len; | |
7771 | ||
7772 | len = snprintf(buf, PATH_MAX, "%s/%s", path, | |
33a2c75c | 7773 | prog->pin_name); |
0c19a9fb SF |
7774 | if (len < 0) { |
7775 | err = -EINVAL; | |
7776 | goto err_unpin_programs; | |
7777 | } else if (len >= PATH_MAX) { | |
7778 | err = -ENAMETOOLONG; | |
7779 | goto err_unpin_programs; | |
7780 | } | |
7781 | ||
7782 | err = bpf_program__pin(prog, buf); | |
7783 | if (err) | |
7784 | goto err_unpin_programs; | |
7785 | } | |
7786 | ||
7787 | return 0; | |
7788 | ||
7789 | err_unpin_programs: | |
7790 | while ((prog = bpf_program__prev(prog, obj))) { | |
7791 | char buf[PATH_MAX]; | |
7792 | int len; | |
7793 | ||
7794 | len = snprintf(buf, PATH_MAX, "%s/%s", path, | |
33a2c75c | 7795 | prog->pin_name); |
0c19a9fb SF |
7796 | if (len < 0) |
7797 | continue; | |
7798 | else if (len >= PATH_MAX) | |
7799 | continue; | |
7800 | ||
7801 | bpf_program__unpin(prog, buf); | |
7802 | } | |
7803 | ||
e9fc3ce9 | 7804 | return libbpf_err(err); |
0c19a9fb SF |
7805 | } |
7806 | ||
7807 | int bpf_object__unpin_programs(struct bpf_object *obj, const char *path) | |
7808 | { | |
7809 | struct bpf_program *prog; | |
7810 | int err; | |
7811 | ||
7812 | if (!obj) | |
e9fc3ce9 | 7813 | return libbpf_err(-ENOENT); |
0c19a9fb | 7814 | |
d5148d85 JS |
7815 | bpf_object__for_each_program(prog, obj) { |
7816 | char buf[PATH_MAX]; | |
7817 | int len; | |
7818 | ||
7819 | len = snprintf(buf, PATH_MAX, "%s/%s", path, | |
33a2c75c | 7820 | prog->pin_name); |
d5148d85 | 7821 | if (len < 0) |
e9fc3ce9 | 7822 | return libbpf_err(-EINVAL); |
d5148d85 | 7823 | else if (len >= PATH_MAX) |
e9fc3ce9 | 7824 | return libbpf_err(-ENAMETOOLONG); |
d5148d85 | 7825 | |
0c19a9fb | 7826 | err = bpf_program__unpin(prog, buf); |
d5148d85 | 7827 | if (err) |
e9fc3ce9 | 7828 | return libbpf_err(err); |
d5148d85 JS |
7829 | } |
7830 | ||
7831 | return 0; | |
7832 | } | |
7833 | ||
0c19a9fb SF |
7834 | int bpf_object__pin(struct bpf_object *obj, const char *path) |
7835 | { | |
7836 | int err; | |
7837 | ||
7838 | err = bpf_object__pin_maps(obj, path); | |
7839 | if (err) | |
e9fc3ce9 | 7840 | return libbpf_err(err); |
0c19a9fb SF |
7841 | |
7842 | err = bpf_object__pin_programs(obj, path); | |
7843 | if (err) { | |
7844 | bpf_object__unpin_maps(obj, path); | |
e9fc3ce9 | 7845 | return libbpf_err(err); |
0c19a9fb SF |
7846 | } |
7847 | ||
7848 | return 0; | |
7849 | } | |
7850 | ||
2d39d7c5 AN |
7851 | static void bpf_map__destroy(struct bpf_map *map) |
7852 | { | |
7853 | if (map->clear_priv) | |
7854 | map->clear_priv(map, map->priv); | |
7855 | map->priv = NULL; | |
7856 | map->clear_priv = NULL; | |
7857 | ||
646f02ff AN |
7858 | if (map->inner_map) { |
7859 | bpf_map__destroy(map->inner_map); | |
7860 | zfree(&map->inner_map); | |
7861 | } | |
7862 | ||
7863 | zfree(&map->init_slots); | |
7864 | map->init_slots_sz = 0; | |
7865 | ||
2d39d7c5 AN |
7866 | if (map->mmaped) { |
7867 | munmap(map->mmaped, bpf_map_mmap_sz(map)); | |
7868 | map->mmaped = NULL; | |
7869 | } | |
7870 | ||
7871 | if (map->st_ops) { | |
7872 | zfree(&map->st_ops->data); | |
7873 | zfree(&map->st_ops->progs); | |
7874 | zfree(&map->st_ops->kern_func_off); | |
7875 | zfree(&map->st_ops); | |
7876 | } | |
7877 | ||
7878 | zfree(&map->name); | |
aed65917 | 7879 | zfree(&map->real_name); |
2d39d7c5 AN |
7880 | zfree(&map->pin_path); |
7881 | ||
7882 | if (map->fd >= 0) | |
7883 | zclose(map->fd); | |
7884 | } | |
7885 | ||
1a5e3fb1 WN |
7886 | void bpf_object__close(struct bpf_object *obj) |
7887 | { | |
a5b8bd47 WN |
7888 | size_t i; |
7889 | ||
50450fc7 | 7890 | if (IS_ERR_OR_NULL(obj)) |
1a5e3fb1 WN |
7891 | return; |
7892 | ||
10931d24 WN |
7893 | if (obj->clear_priv) |
7894 | obj->clear_priv(obj, obj->priv); | |
7895 | ||
67234743 | 7896 | bpf_gen__free(obj->gen_loader); |
1a5e3fb1 | 7897 | bpf_object__elf_finish(obj); |
4a404a7e | 7898 | bpf_object_unload(obj); |
8a138aed | 7899 | btf__free(obj->btf); |
2993e051 | 7900 | btf_ext__free(obj->btf_ext); |
1a5e3fb1 | 7901 | |
2d39d7c5 AN |
7902 | for (i = 0; i < obj->nr_maps; i++) |
7903 | bpf_map__destroy(&obj->maps[i]); | |
d859900c | 7904 | |
1373ff59 | 7905 | zfree(&obj->btf_custom_path); |
8601fd42 | 7906 | zfree(&obj->kconfig); |
166750bc AN |
7907 | zfree(&obj->externs); |
7908 | obj->nr_extern = 0; | |
7909 | ||
9d759a9b WN |
7910 | zfree(&obj->maps); |
7911 | obj->nr_maps = 0; | |
a5b8bd47 WN |
7912 | |
7913 | if (obj->programs && obj->nr_programs) { | |
7914 | for (i = 0; i < obj->nr_programs; i++) | |
7915 | bpf_program__exit(&obj->programs[i]); | |
7916 | } | |
7917 | zfree(&obj->programs); | |
7918 | ||
9a208eff | 7919 | list_del(&obj->list); |
1a5e3fb1 WN |
7920 | free(obj); |
7921 | } | |
aa9b1ac3 | 7922 | |
9a208eff WN |
7923 | struct bpf_object * |
7924 | bpf_object__next(struct bpf_object *prev) | |
7925 | { | |
7926 | struct bpf_object *next; | |
7927 | ||
7928 | if (!prev) | |
7929 | next = list_first_entry(&bpf_objects_list, | |
7930 | struct bpf_object, | |
7931 | list); | |
7932 | else | |
7933 | next = list_next_entry(prev, list); | |
7934 | ||
7935 | /* Empty list is noticed here so don't need checking on entry. */ | |
7936 | if (&next->list == &bpf_objects_list) | |
7937 | return NULL; | |
7938 | ||
7939 | return next; | |
7940 | } | |
7941 | ||
a324aae3 | 7942 | const char *bpf_object__name(const struct bpf_object *obj) |
acf860ae | 7943 | { |
e9fc3ce9 | 7944 | return obj ? obj->name : libbpf_err_ptr(-EINVAL); |
acf860ae WN |
7945 | } |
7946 | ||
a324aae3 | 7947 | unsigned int bpf_object__kversion(const struct bpf_object *obj) |
45825d8a | 7948 | { |
a7fe0450 | 7949 | return obj ? obj->kern_version : 0; |
45825d8a WN |
7950 | } |
7951 | ||
a324aae3 | 7952 | struct btf *bpf_object__btf(const struct bpf_object *obj) |
789f6bab AI |
7953 | { |
7954 | return obj ? obj->btf : NULL; | |
7955 | } | |
7956 | ||
8a138aed MKL |
7957 | int bpf_object__btf_fd(const struct bpf_object *obj) |
7958 | { | |
7959 | return obj->btf ? btf__fd(obj->btf) : -1; | |
7960 | } | |
7961 | ||
155f556d RDT |
7962 | int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version) |
7963 | { | |
7964 | if (obj->loaded) | |
e9fc3ce9 | 7965 | return libbpf_err(-EINVAL); |
155f556d RDT |
7966 | |
7967 | obj->kern_version = kern_version; | |
7968 | ||
7969 | return 0; | |
7970 | } | |
7971 | ||
10931d24 WN |
7972 | int bpf_object__set_priv(struct bpf_object *obj, void *priv, |
7973 | bpf_object_clear_priv_t clear_priv) | |
7974 | { | |
7975 | if (obj->priv && obj->clear_priv) | |
7976 | obj->clear_priv(obj, obj->priv); | |
7977 | ||
7978 | obj->priv = priv; | |
7979 | obj->clear_priv = clear_priv; | |
7980 | return 0; | |
7981 | } | |
7982 | ||
a324aae3 | 7983 | void *bpf_object__priv(const struct bpf_object *obj) |
10931d24 | 7984 | { |
e9fc3ce9 | 7985 | return obj ? obj->priv : libbpf_err_ptr(-EINVAL); |
10931d24 WN |
7986 | } |
7987 | ||
67234743 AS |
7988 | int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts) |
7989 | { | |
7990 | struct bpf_gen *gen; | |
7991 | ||
7992 | if (!opts) | |
7993 | return -EFAULT; | |
7994 | if (!OPTS_VALID(opts, gen_loader_opts)) | |
7995 | return -EINVAL; | |
7996 | gen = calloc(sizeof(*gen), 1); | |
7997 | if (!gen) | |
7998 | return -ENOMEM; | |
7999 | gen->opts = opts; | |
8000 | obj->gen_loader = gen; | |
8001 | return 0; | |
8002 | } | |
8003 | ||
eac7d845 | 8004 | static struct bpf_program * |
a324aae3 AN |
8005 | __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj, |
8006 | bool forward) | |
aa9b1ac3 | 8007 | { |
a83d6e76 | 8008 | size_t nr_programs = obj->nr_programs; |
0c19a9fb | 8009 | ssize_t idx; |
aa9b1ac3 | 8010 | |
a83d6e76 | 8011 | if (!nr_programs) |
aa9b1ac3 | 8012 | return NULL; |
aa9b1ac3 | 8013 | |
a83d6e76 MKL |
8014 | if (!p) |
8015 | /* Iter from the beginning */ | |
8016 | return forward ? &obj->programs[0] : | |
8017 | &obj->programs[nr_programs - 1]; | |
8018 | ||
0c19a9fb | 8019 | if (p->obj != obj) { |
be18010e | 8020 | pr_warn("error: program handler doesn't match object\n"); |
e9fc3ce9 | 8021 | return errno = EINVAL, NULL; |
aa9b1ac3 WN |
8022 | } |
8023 | ||
a83d6e76 | 8024 | idx = (p - obj->programs) + (forward ? 1 : -1); |
0c19a9fb | 8025 | if (idx >= obj->nr_programs || idx < 0) |
aa9b1ac3 WN |
8026 | return NULL; |
8027 | return &obj->programs[idx]; | |
8028 | } | |
8029 | ||
eac7d845 | 8030 | struct bpf_program * |
a324aae3 | 8031 | bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj) |
2088a3a7 HC |
8032 | { |
8033 | return bpf_object__next_program(obj, prev); | |
8034 | } | |
8035 | ||
8036 | struct bpf_program * | |
8037 | bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev) | |
eac7d845 JK |
8038 | { |
8039 | struct bpf_program *prog = prev; | |
8040 | ||
8041 | do { | |
a83d6e76 | 8042 | prog = __bpf_program__iter(prog, obj, true); |
c3c55696 | 8043 | } while (prog && prog_is_subprog(obj, prog)); |
0c19a9fb SF |
8044 | |
8045 | return prog; | |
8046 | } | |
8047 | ||
8048 | struct bpf_program * | |
a324aae3 | 8049 | bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj) |
2088a3a7 HC |
8050 | { |
8051 | return bpf_object__prev_program(obj, next); | |
8052 | } | |
8053 | ||
8054 | struct bpf_program * | |
8055 | bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next) | |
0c19a9fb SF |
8056 | { |
8057 | struct bpf_program *prog = next; | |
8058 | ||
0c19a9fb | 8059 | do { |
a83d6e76 | 8060 | prog = __bpf_program__iter(prog, obj, false); |
c3c55696 | 8061 | } while (prog && prog_is_subprog(obj, prog)); |
eac7d845 JK |
8062 | |
8063 | return prog; | |
8064 | } | |
8065 | ||
edb13ed4 ACM |
8066 | int bpf_program__set_priv(struct bpf_program *prog, void *priv, |
8067 | bpf_program_clear_priv_t clear_priv) | |
aa9b1ac3 WN |
8068 | { |
8069 | if (prog->priv && prog->clear_priv) | |
8070 | prog->clear_priv(prog, prog->priv); | |
8071 | ||
8072 | prog->priv = priv; | |
8073 | prog->clear_priv = clear_priv; | |
8074 | return 0; | |
8075 | } | |
8076 | ||
a324aae3 | 8077 | void *bpf_program__priv(const struct bpf_program *prog) |
aa9b1ac3 | 8078 | { |
e9fc3ce9 | 8079 | return prog ? prog->priv : libbpf_err_ptr(-EINVAL); |
aa9b1ac3 WN |
8080 | } |
8081 | ||
9aba3613 JK |
8082 | void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex) |
8083 | { | |
8084 | prog->prog_ifindex = ifindex; | |
8085 | } | |
8086 | ||
01af3bf0 AN |
8087 | const char *bpf_program__name(const struct bpf_program *prog) |
8088 | { | |
8089 | return prog->name; | |
8090 | } | |
8091 | ||
52109584 AN |
8092 | const char *bpf_program__section_name(const struct bpf_program *prog) |
8093 | { | |
8094 | return prog->sec_name; | |
8095 | } | |
8096 | ||
a324aae3 | 8097 | const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy) |
aa9b1ac3 WN |
8098 | { |
8099 | const char *title; | |
8100 | ||
52109584 | 8101 | title = prog->sec_name; |
715f8db9 | 8102 | if (needs_copy) { |
aa9b1ac3 WN |
8103 | title = strdup(title); |
8104 | if (!title) { | |
be18010e | 8105 | pr_warn("failed to strdup program title\n"); |
e9fc3ce9 | 8106 | return libbpf_err_ptr(-ENOMEM); |
aa9b1ac3 WN |
8107 | } |
8108 | } | |
8109 | ||
8110 | return title; | |
8111 | } | |
8112 | ||
d9297581 AN |
8113 | bool bpf_program__autoload(const struct bpf_program *prog) |
8114 | { | |
8115 | return prog->load; | |
8116 | } | |
8117 | ||
8118 | int bpf_program__set_autoload(struct bpf_program *prog, bool autoload) | |
8119 | { | |
8120 | if (prog->obj->loaded) | |
e9fc3ce9 | 8121 | return libbpf_err(-EINVAL); |
d9297581 AN |
8122 | |
8123 | prog->load = autoload; | |
8124 | return 0; | |
8125 | } | |
8126 | ||
a324aae3 | 8127 | int bpf_program__fd(const struct bpf_program *prog) |
aa9b1ac3 | 8128 | { |
b580563e WN |
8129 | return bpf_program__nth_fd(prog, 0); |
8130 | } | |
8131 | ||
1a734efe THJ |
8132 | size_t bpf_program__size(const struct bpf_program *prog) |
8133 | { | |
9c0f8cbd | 8134 | return prog->insns_cnt * BPF_INSN_SZ; |
1a734efe THJ |
8135 | } |
8136 | ||
b580563e WN |
8137 | int bpf_program__set_prep(struct bpf_program *prog, int nr_instances, |
8138 | bpf_program_prep_t prep) | |
8139 | { | |
8140 | int *instances_fds; | |
8141 | ||
8142 | if (nr_instances <= 0 || !prep) | |
e9fc3ce9 | 8143 | return libbpf_err(-EINVAL); |
b580563e WN |
8144 | |
8145 | if (prog->instances.nr > 0 || prog->instances.fds) { | |
be18010e | 8146 | pr_warn("Can't set pre-processor after loading\n"); |
e9fc3ce9 | 8147 | return libbpf_err(-EINVAL); |
b580563e WN |
8148 | } |
8149 | ||
8150 | instances_fds = malloc(sizeof(int) * nr_instances); | |
8151 | if (!instances_fds) { | |
be18010e | 8152 | pr_warn("alloc memory failed for fds\n"); |
e9fc3ce9 | 8153 | return libbpf_err(-ENOMEM); |
b580563e WN |
8154 | } |
8155 | ||
8156 | /* fill all fd with -1 */ | |
8157 | memset(instances_fds, -1, sizeof(int) * nr_instances); | |
8158 | ||
8159 | prog->instances.nr = nr_instances; | |
8160 | prog->instances.fds = instances_fds; | |
8161 | prog->preprocessor = prep; | |
8162 | return 0; | |
8163 | } | |
8164 | ||
a324aae3 | 8165 | int bpf_program__nth_fd(const struct bpf_program *prog, int n) |
b580563e WN |
8166 | { |
8167 | int fd; | |
8168 | ||
1e960043 | 8169 | if (!prog) |
e9fc3ce9 | 8170 | return libbpf_err(-EINVAL); |
1e960043 | 8171 | |
b580563e | 8172 | if (n >= prog->instances.nr || n < 0) { |
be18010e | 8173 | pr_warn("Can't get the %dth fd from program %s: only %d instances\n", |
52109584 | 8174 | n, prog->name, prog->instances.nr); |
e9fc3ce9 | 8175 | return libbpf_err(-EINVAL); |
b580563e WN |
8176 | } |
8177 | ||
8178 | fd = prog->instances.fds[n]; | |
8179 | if (fd < 0) { | |
be18010e | 8180 | pr_warn("%dth instance of program '%s' is invalid\n", |
52109584 | 8181 | n, prog->name); |
e9fc3ce9 | 8182 | return libbpf_err(-ENOENT); |
b580563e WN |
8183 | } |
8184 | ||
8185 | return fd; | |
aa9b1ac3 | 8186 | } |
9d759a9b | 8187 | |
a46410d5 | 8188 | enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog) |
f1eead9e AN |
8189 | { |
8190 | return prog->type; | |
8191 | } | |
8192 | ||
dd26b7f5 | 8193 | void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type) |
5f44e4c8 WN |
8194 | { |
8195 | prog->type = type; | |
8196 | } | |
8197 | ||
a324aae3 | 8198 | static bool bpf_program__is_type(const struct bpf_program *prog, |
5f44e4c8 WN |
8199 | enum bpf_prog_type type) |
8200 | { | |
8201 | return prog ? (prog->type == type) : false; | |
8202 | } | |
8203 | ||
a324aae3 AN |
8204 | #define BPF_PROG_TYPE_FNS(NAME, TYPE) \ |
8205 | int bpf_program__set_##NAME(struct bpf_program *prog) \ | |
8206 | { \ | |
8207 | if (!prog) \ | |
e9fc3ce9 | 8208 | return libbpf_err(-EINVAL); \ |
a324aae3 AN |
8209 | bpf_program__set_type(prog, TYPE); \ |
8210 | return 0; \ | |
8211 | } \ | |
8212 | \ | |
8213 | bool bpf_program__is_##NAME(const struct bpf_program *prog) \ | |
8214 | { \ | |
8215 | return bpf_program__is_type(prog, TYPE); \ | |
8216 | } \ | |
ed794073 | 8217 | |
7803ba73 | 8218 | BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER); |
1e092a03 | 8219 | BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM); |
ed794073 | 8220 | BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE); |
7803ba73 JS |
8221 | BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS); |
8222 | BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT); | |
ed794073 | 8223 | BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT); |
e14c93fd | 8224 | BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT); |
7803ba73 JS |
8225 | BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP); |
8226 | BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT); | |
12a8654b | 8227 | BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING); |
590a0088 | 8228 | BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS); |
2db6eab1 | 8229 | BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT); |
499dd29d | 8230 | BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP); |
5f44e4c8 | 8231 | |
f1eead9e | 8232 | enum bpf_attach_type |
a46410d5 | 8233 | bpf_program__get_expected_attach_type(const struct bpf_program *prog) |
f1eead9e AN |
8234 | { |
8235 | return prog->expected_attach_type; | |
8236 | } | |
8237 | ||
16962b24 JF |
8238 | void bpf_program__set_expected_attach_type(struct bpf_program *prog, |
8239 | enum bpf_attach_type type) | |
d7be143b AI |
8240 | { |
8241 | prog->expected_attach_type = type; | |
8242 | } | |
8243 | ||
15ea31fa | 8244 | #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \ |
d7a18ea7 | 8245 | .sec = sec_pfx, \ |
d7a18ea7 | 8246 | .prog_type = BPF_PROG_TYPE_##ptype, \ |
15ea31fa AN |
8247 | .expected_attach_type = atype, \ |
8248 | .cookie = (long)(flags), \ | |
12d9466d | 8249 | .preload_fn = libbpf_preload_prog, \ |
d7a18ea7 AN |
8250 | __VA_ARGS__ \ |
8251 | } | |
8252 | ||
12d9466d AN |
8253 | static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie); |
8254 | static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie); | |
8255 | static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie); | |
8256 | static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie); | |
8257 | static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie); | |
8258 | static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie); | |
d7a18ea7 | 8259 | |
d7a18ea7 | 8260 | static const struct bpf_sec_def section_defs[] = { |
dd94d45c AN |
8261 | SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE | SEC_SLOPPY_PFX), |
8262 | SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8263 | SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
15ea31fa AN |
8264 | SEC_DEF("kprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), |
8265 | SEC_DEF("uprobe/", KPROBE, 0, SEC_NONE), | |
8266 | SEC_DEF("kretprobe/", KPROBE, 0, SEC_NONE, attach_kprobe), | |
8267 | SEC_DEF("uretprobe/", KPROBE, 0, SEC_NONE), | |
15ea31fa | 8268 | SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), |
dd94d45c AN |
8269 | SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE | SEC_SLOPPY_PFX), |
8270 | SEC_DEF("action", SCHED_ACT, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
15ea31fa AN |
8271 | SEC_DEF("tracepoint/", TRACEPOINT, 0, SEC_NONE, attach_tp), |
8272 | SEC_DEF("tp/", TRACEPOINT, 0, SEC_NONE, attach_tp), | |
8273 | SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), | |
8274 | SEC_DEF("raw_tp/", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp), | |
ccaf12d6 HT |
8275 | SEC_DEF("raw_tracepoint.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), |
8276 | SEC_DEF("raw_tp.w/", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp), | |
15ea31fa AN |
8277 | SEC_DEF("tp_btf/", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace), |
8278 | SEC_DEF("fentry/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace), | |
8279 | SEC_DEF("fmod_ret/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace), | |
8280 | SEC_DEF("fexit/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace), | |
8281 | SEC_DEF("fentry.s/", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), | |
8282 | SEC_DEF("fmod_ret.s/", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), | |
8283 | SEC_DEF("fexit.s/", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace), | |
8284 | SEC_DEF("freplace/", EXT, 0, SEC_ATTACH_BTF, attach_trace), | |
8285 | SEC_DEF("lsm/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm), | |
8286 | SEC_DEF("lsm.s/", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm), | |
8287 | SEC_DEF("iter/", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter), | |
8288 | SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE), | |
d41ea045 AN |
8289 | SEC_DEF("xdp_devmap/", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE), |
8290 | SEC_DEF("xdp_cpumap/", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE), | |
dd94d45c AN |
8291 | SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), |
8292 | SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8293 | SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8294 | SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8295 | SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8296 | SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8297 | SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8298 | SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8299 | SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8300 | SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8301 | SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8302 | SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8303 | SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8304 | SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8305 | SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8306 | SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8307 | SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8308 | SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8309 | SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE | SEC_SLOPPY_PFX), | |
8310 | SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8311 | SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8312 | SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT | SEC_SLOPPY_PFX), | |
8313 | SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8314 | SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8315 | SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8316 | SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8317 | SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8318 | SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8319 | SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8320 | SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8321 | SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8322 | SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8323 | SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8324 | SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8325 | SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8326 | SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8327 | SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE | SEC_SLOPPY_PFX), | |
8328 | SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE), | |
7c80c87a | 8329 | SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE | SEC_SLOPPY_PFX), |
583c9009 | 8330 | }; |
d7be143b | 8331 | |
c76e4c22 TS |
8332 | #define MAX_TYPE_NAME_SIZE 32 |
8333 | ||
d7a18ea7 AN |
8334 | static const struct bpf_sec_def *find_sec_def(const char *sec_name) |
8335 | { | |
dd94d45c AN |
8336 | const struct bpf_sec_def *sec_def; |
8337 | enum sec_def_flags sec_flags; | |
8338 | int i, n = ARRAY_SIZE(section_defs), len; | |
8339 | bool strict = libbpf_mode & LIBBPF_STRICT_SEC_NAME; | |
d7a18ea7 AN |
8340 | |
8341 | for (i = 0; i < n; i++) { | |
dd94d45c AN |
8342 | sec_def = §ion_defs[i]; |
8343 | sec_flags = sec_def->cookie; | |
8344 | len = strlen(sec_def->sec); | |
8345 | ||
8346 | /* "type/" always has to have proper SEC("type/extras") form */ | |
8347 | if (sec_def->sec[len - 1] == '/') { | |
8348 | if (str_has_pfx(sec_name, sec_def->sec)) | |
8349 | return sec_def; | |
8350 | continue; | |
8351 | } | |
8352 | ||
8353 | /* "type+" means it can be either exact SEC("type") or | |
8354 | * well-formed SEC("type/extras") with proper '/' separator | |
8355 | */ | |
8356 | if (sec_def->sec[len - 1] == '+') { | |
8357 | len--; | |
8358 | /* not even a prefix */ | |
8359 | if (strncmp(sec_name, sec_def->sec, len) != 0) | |
8360 | continue; | |
8361 | /* exact match or has '/' separator */ | |
8362 | if (sec_name[len] == '\0' || sec_name[len] == '/') | |
8363 | return sec_def; | |
8364 | continue; | |
8365 | } | |
8366 | ||
8367 | /* SEC_SLOPPY_PFX definitions are allowed to be just prefix | |
8368 | * matches, unless strict section name mode | |
8369 | * (LIBBPF_STRICT_SEC_NAME) is enabled, in which case the | |
8370 | * match has to be exact. | |
8371 | */ | |
8372 | if ((sec_flags & SEC_SLOPPY_PFX) && !strict) { | |
8373 | if (str_has_pfx(sec_name, sec_def->sec)) | |
8374 | return sec_def; | |
8375 | continue; | |
8376 | } | |
8377 | ||
8378 | /* Definitions not marked SEC_SLOPPY_PFX (e.g., | |
8379 | * SEC("syscall")) are exact matches in both modes. | |
8380 | */ | |
8381 | if (strcmp(sec_name, sec_def->sec) == 0) | |
8382 | return sec_def; | |
d7a18ea7 AN |
8383 | } |
8384 | return NULL; | |
8385 | } | |
8386 | ||
c76e4c22 TS |
8387 | static char *libbpf_get_type_names(bool attach_type) |
8388 | { | |
d7a18ea7 | 8389 | int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE; |
c76e4c22 TS |
8390 | char *buf; |
8391 | ||
8392 | buf = malloc(len); | |
8393 | if (!buf) | |
8394 | return NULL; | |
8395 | ||
8396 | buf[0] = '\0'; | |
8397 | /* Forge string buf with all available names */ | |
d7a18ea7 | 8398 | for (i = 0; i < ARRAY_SIZE(section_defs); i++) { |
15ea31fa AN |
8399 | const struct bpf_sec_def *sec_def = §ion_defs[i]; |
8400 | ||
8401 | if (attach_type) { | |
8402 | if (sec_def->preload_fn != libbpf_preload_prog) | |
8403 | continue; | |
8404 | ||
8405 | if (!(sec_def->cookie & SEC_ATTACHABLE)) | |
8406 | continue; | |
8407 | } | |
c76e4c22 | 8408 | |
d7a18ea7 | 8409 | if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) { |
c76e4c22 TS |
8410 | free(buf); |
8411 | return NULL; | |
8412 | } | |
8413 | strcat(buf, " "); | |
d7a18ea7 | 8414 | strcat(buf, section_defs[i].sec); |
c76e4c22 TS |
8415 | } |
8416 | ||
8417 | return buf; | |
8418 | } | |
8419 | ||
b60df2a0 JK |
8420 | int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type, |
8421 | enum bpf_attach_type *expected_attach_type) | |
583c9009 | 8422 | { |
d7a18ea7 | 8423 | const struct bpf_sec_def *sec_def; |
c76e4c22 | 8424 | char *type_names; |
583c9009 | 8425 | |
b60df2a0 | 8426 | if (!name) |
e9fc3ce9 | 8427 | return libbpf_err(-EINVAL); |
583c9009 | 8428 | |
d7a18ea7 AN |
8429 | sec_def = find_sec_def(name); |
8430 | if (sec_def) { | |
8431 | *prog_type = sec_def->prog_type; | |
8432 | *expected_attach_type = sec_def->expected_attach_type; | |
b60df2a0 JK |
8433 | return 0; |
8434 | } | |
d7a18ea7 | 8435 | |
4a3d6c6a | 8436 | pr_debug("failed to guess program type from ELF section '%s'\n", name); |
c76e4c22 TS |
8437 | type_names = libbpf_get_type_names(false); |
8438 | if (type_names != NULL) { | |
3f519353 | 8439 | pr_debug("supported section(type) names are:%s\n", type_names); |
c76e4c22 TS |
8440 | free(type_names); |
8441 | } | |
8442 | ||
e9fc3ce9 | 8443 | return libbpf_err(-ESRCH); |
b60df2a0 | 8444 | } |
583c9009 | 8445 | |
590a0088 MKL |
8446 | static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj, |
8447 | size_t offset) | |
8448 | { | |
8449 | struct bpf_map *map; | |
8450 | size_t i; | |
8451 | ||
8452 | for (i = 0; i < obj->nr_maps; i++) { | |
8453 | map = &obj->maps[i]; | |
8454 | if (!bpf_map__is_struct_ops(map)) | |
8455 | continue; | |
8456 | if (map->sec_offset <= offset && | |
8457 | offset - map->sec_offset < map->def.value_size) | |
8458 | return map; | |
8459 | } | |
8460 | ||
8461 | return NULL; | |
8462 | } | |
8463 | ||
8464 | /* Collect the reloc from ELF and populate the st_ops->progs[] */ | |
646f02ff | 8465 | static int bpf_object__collect_st_ops_relos(struct bpf_object *obj, |
ad23b723 | 8466 | Elf64_Shdr *shdr, Elf_Data *data) |
590a0088 MKL |
8467 | { |
8468 | const struct btf_member *member; | |
8469 | struct bpf_struct_ops *st_ops; | |
8470 | struct bpf_program *prog; | |
8471 | unsigned int shdr_idx; | |
8472 | const struct btf *btf; | |
8473 | struct bpf_map *map; | |
7e06aad5 | 8474 | unsigned int moff, insn_idx; |
590a0088 | 8475 | const char *name; |
1d1a3bcf | 8476 | __u32 member_idx; |
ad23b723 AN |
8477 | Elf64_Sym *sym; |
8478 | Elf64_Rel *rel; | |
590a0088 MKL |
8479 | int i, nrels; |
8480 | ||
590a0088 MKL |
8481 | btf = obj->btf; |
8482 | nrels = shdr->sh_size / shdr->sh_entsize; | |
8483 | for (i = 0; i < nrels; i++) { | |
ad23b723 AN |
8484 | rel = elf_rel_by_idx(data, i); |
8485 | if (!rel) { | |
590a0088 MKL |
8486 | pr_warn("struct_ops reloc: failed to get %d reloc\n", i); |
8487 | return -LIBBPF_ERRNO__FORMAT; | |
8488 | } | |
8489 | ||
ad23b723 AN |
8490 | sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info)); |
8491 | if (!sym) { | |
590a0088 | 8492 | pr_warn("struct_ops reloc: symbol %zx not found\n", |
ad23b723 | 8493 | (size_t)ELF64_R_SYM(rel->r_info)); |
590a0088 MKL |
8494 | return -LIBBPF_ERRNO__FORMAT; |
8495 | } | |
8496 | ||
ad23b723 AN |
8497 | name = elf_sym_str(obj, sym->st_name) ?: "<?>"; |
8498 | map = find_struct_ops_map_by_offset(obj, rel->r_offset); | |
590a0088 | 8499 | if (!map) { |
ad23b723 AN |
8500 | pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n", |
8501 | (size_t)rel->r_offset); | |
590a0088 MKL |
8502 | return -EINVAL; |
8503 | } | |
8504 | ||
ad23b723 AN |
8505 | moff = rel->r_offset - map->sec_offset; |
8506 | shdr_idx = sym->st_shndx; | |
590a0088 | 8507 | st_ops = map->st_ops; |
ad23b723 | 8508 | pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n", |
590a0088 | 8509 | map->name, |
ad23b723 AN |
8510 | (long long)(rel->r_info >> 32), |
8511 | (long long)sym->st_value, | |
8512 | shdr_idx, (size_t)rel->r_offset, | |
8513 | map->sec_offset, sym->st_name, name); | |
590a0088 MKL |
8514 | |
8515 | if (shdr_idx >= SHN_LORESERVE) { | |
ad23b723 AN |
8516 | pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n", |
8517 | map->name, (size_t)rel->r_offset, shdr_idx); | |
590a0088 MKL |
8518 | return -LIBBPF_ERRNO__RELOC; |
8519 | } | |
ad23b723 | 8520 | if (sym->st_value % BPF_INSN_SZ) { |
7e06aad5 | 8521 | pr_warn("struct_ops reloc %s: invalid target program offset %llu\n", |
ad23b723 | 8522 | map->name, (unsigned long long)sym->st_value); |
7e06aad5 AN |
8523 | return -LIBBPF_ERRNO__FORMAT; |
8524 | } | |
ad23b723 | 8525 | insn_idx = sym->st_value / BPF_INSN_SZ; |
590a0088 MKL |
8526 | |
8527 | member = find_member_by_offset(st_ops->type, moff * 8); | |
8528 | if (!member) { | |
8529 | pr_warn("struct_ops reloc %s: cannot find member at moff %u\n", | |
8530 | map->name, moff); | |
8531 | return -EINVAL; | |
8532 | } | |
8533 | member_idx = member - btf_members(st_ops->type); | |
8534 | name = btf__name_by_offset(btf, member->name_off); | |
8535 | ||
8536 | if (!resolve_func_ptr(btf, member->type, NULL)) { | |
8537 | pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n", | |
8538 | map->name, name); | |
8539 | return -EINVAL; | |
8540 | } | |
8541 | ||
7e06aad5 | 8542 | prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx); |
590a0088 MKL |
8543 | if (!prog) { |
8544 | pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n", | |
8545 | map->name, shdr_idx, name); | |
8546 | return -EINVAL; | |
8547 | } | |
8548 | ||
91b4d1d1 AN |
8549 | /* prevent the use of BPF prog with invalid type */ |
8550 | if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) { | |
8551 | pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n", | |
8552 | map->name, prog->name); | |
8553 | return -EINVAL; | |
8554 | } | |
590a0088 | 8555 | |
91b4d1d1 AN |
8556 | /* if we haven't yet processed this BPF program, record proper |
8557 | * attach_btf_id and member_idx | |
8558 | */ | |
8559 | if (!prog->attach_btf_id) { | |
590a0088 MKL |
8560 | prog->attach_btf_id = st_ops->type_id; |
8561 | prog->expected_attach_type = member_idx; | |
590a0088 | 8562 | } |
91b4d1d1 AN |
8563 | |
8564 | /* struct_ops BPF prog can be re-used between multiple | |
8565 | * .struct_ops as long as it's the same struct_ops struct | |
8566 | * definition and the same function pointer field | |
8567 | */ | |
8568 | if (prog->attach_btf_id != st_ops->type_id || | |
8569 | prog->expected_attach_type != member_idx) { | |
8570 | pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n", | |
8571 | map->name, prog->name, prog->sec_name, prog->type, | |
8572 | prog->attach_btf_id, prog->expected_attach_type, name); | |
8573 | return -EINVAL; | |
8574 | } | |
8575 | ||
590a0088 MKL |
8576 | st_ops->progs[member_idx] = prog; |
8577 | } | |
8578 | ||
8579 | return 0; | |
590a0088 MKL |
8580 | } |
8581 | ||
a6ed02ca | 8582 | #define BTF_TRACE_PREFIX "btf_trace_" |
1e092a03 | 8583 | #define BTF_LSM_PREFIX "bpf_lsm_" |
21aef70e | 8584 | #define BTF_ITER_PREFIX "bpf_iter_" |
a6ed02ca KS |
8585 | #define BTF_MAX_NAME_SIZE 128 |
8586 | ||
67234743 AS |
8587 | void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type, |
8588 | const char **prefix, int *kind) | |
8589 | { | |
8590 | switch (attach_type) { | |
8591 | case BPF_TRACE_RAW_TP: | |
8592 | *prefix = BTF_TRACE_PREFIX; | |
8593 | *kind = BTF_KIND_TYPEDEF; | |
8594 | break; | |
8595 | case BPF_LSM_MAC: | |
8596 | *prefix = BTF_LSM_PREFIX; | |
8597 | *kind = BTF_KIND_FUNC; | |
8598 | break; | |
8599 | case BPF_TRACE_ITER: | |
8600 | *prefix = BTF_ITER_PREFIX; | |
8601 | *kind = BTF_KIND_FUNC; | |
8602 | break; | |
8603 | default: | |
8604 | *prefix = ""; | |
8605 | *kind = BTF_KIND_FUNC; | |
8606 | } | |
8607 | } | |
8608 | ||
a6ed02ca KS |
8609 | static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix, |
8610 | const char *name, __u32 kind) | |
8611 | { | |
8612 | char btf_type_name[BTF_MAX_NAME_SIZE]; | |
8613 | int ret; | |
8614 | ||
8615 | ret = snprintf(btf_type_name, sizeof(btf_type_name), | |
8616 | "%s%s", prefix, name); | |
8617 | /* snprintf returns the number of characters written excluding the | |
c139e40a | 8618 | * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it |
a6ed02ca KS |
8619 | * indicates truncation. |
8620 | */ | |
8621 | if (ret < 0 || ret >= sizeof(btf_type_name)) | |
8622 | return -ENAMETOOLONG; | |
8623 | return btf__find_by_name_kind(btf, btf_type_name, kind); | |
8624 | } | |
8625 | ||
91abb4a6 AN |
8626 | static inline int find_attach_btf_id(struct btf *btf, const char *name, |
8627 | enum bpf_attach_type attach_type) | |
a6ed02ca | 8628 | { |
67234743 AS |
8629 | const char *prefix; |
8630 | int kind; | |
a6ed02ca | 8631 | |
67234743 AS |
8632 | btf_get_kernel_prefix_kind(attach_type, &prefix, &kind); |
8633 | return find_btf_by_prefix_kind(btf, prefix, name, kind); | |
a6ed02ca KS |
8634 | } |
8635 | ||
b8c54ea4 AS |
8636 | int libbpf_find_vmlinux_btf_id(const char *name, |
8637 | enum bpf_attach_type attach_type) | |
12a8654b | 8638 | { |
a6ed02ca | 8639 | struct btf *btf; |
3521ffa2 | 8640 | int err; |
12a8654b | 8641 | |
a710eed3 | 8642 | btf = btf__load_vmlinux_btf(); |
e9fc3ce9 AN |
8643 | err = libbpf_get_error(btf); |
8644 | if (err) { | |
12a8654b | 8645 | pr_warn("vmlinux BTF is not found\n"); |
e9fc3ce9 | 8646 | return libbpf_err(err); |
12a8654b AS |
8647 | } |
8648 | ||
91abb4a6 AN |
8649 | err = find_attach_btf_id(btf, name, attach_type); |
8650 | if (err <= 0) | |
8651 | pr_warn("%s is not found in vmlinux BTF\n", name); | |
8652 | ||
3521ffa2 | 8653 | btf__free(btf); |
e9fc3ce9 | 8654 | return libbpf_err(err); |
b8c54ea4 AS |
8655 | } |
8656 | ||
e7bf94db AS |
8657 | static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd) |
8658 | { | |
ebc7b50a DM |
8659 | struct bpf_prog_info info = {}; |
8660 | __u32 info_len = sizeof(info); | |
6cc93e2f | 8661 | struct btf *btf; |
6d2d73cd | 8662 | int err; |
e7bf94db | 8663 | |
ebc7b50a | 8664 | err = bpf_obj_get_info_by_fd(attach_prog_fd, &info, &info_len); |
e9fc3ce9 | 8665 | if (err) { |
ebc7b50a DM |
8666 | pr_warn("failed bpf_obj_get_info_by_fd for FD %d: %d\n", |
8667 | attach_prog_fd, err); | |
e9fc3ce9 | 8668 | return err; |
e7bf94db | 8669 | } |
6d2d73cd QM |
8670 | |
8671 | err = -EINVAL; | |
ebc7b50a | 8672 | if (!info.btf_id) { |
e7bf94db AS |
8673 | pr_warn("The target program doesn't have BTF\n"); |
8674 | goto out; | |
8675 | } | |
ebc7b50a DM |
8676 | btf = btf__load_from_kernel_by_id(info.btf_id); |
8677 | err = libbpf_get_error(btf); | |
8678 | if (err) { | |
8679 | pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err); | |
e7bf94db AS |
8680 | goto out; |
8681 | } | |
8682 | err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC); | |
8683 | btf__free(btf); | |
8684 | if (err <= 0) { | |
8685 | pr_warn("%s is not found in prog's BTF\n", name); | |
8686 | goto out; | |
8687 | } | |
8688 | out: | |
e7bf94db AS |
8689 | return err; |
8690 | } | |
8691 | ||
91abb4a6 AN |
8692 | static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name, |
8693 | enum bpf_attach_type attach_type, | |
8694 | int *btf_obj_fd, int *btf_type_id) | |
8695 | { | |
8696 | int ret, i; | |
8697 | ||
8698 | ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type); | |
8699 | if (ret > 0) { | |
8700 | *btf_obj_fd = 0; /* vmlinux BTF */ | |
8701 | *btf_type_id = ret; | |
8702 | return 0; | |
8703 | } | |
8704 | if (ret != -ENOENT) | |
8705 | return ret; | |
8706 | ||
8707 | ret = load_module_btfs(obj); | |
8708 | if (ret) | |
8709 | return ret; | |
8710 | ||
8711 | for (i = 0; i < obj->btf_module_cnt; i++) { | |
8712 | const struct module_btf *mod = &obj->btf_modules[i]; | |
8713 | ||
8714 | ret = find_attach_btf_id(mod->btf, attach_name, attach_type); | |
8715 | if (ret > 0) { | |
8716 | *btf_obj_fd = mod->fd; | |
8717 | *btf_type_id = ret; | |
8718 | return 0; | |
8719 | } | |
8720 | if (ret == -ENOENT) | |
8721 | continue; | |
8722 | ||
8723 | return ret; | |
8724 | } | |
8725 | ||
8726 | return -ESRCH; | |
8727 | } | |
8728 | ||
15ea31fa AN |
8729 | static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name, |
8730 | int *btf_obj_fd, int *btf_type_id) | |
b8c54ea4 | 8731 | { |
a6ed02ca KS |
8732 | enum bpf_attach_type attach_type = prog->expected_attach_type; |
8733 | __u32 attach_prog_fd = prog->attach_prog_fd; | |
b6291a6f | 8734 | int err = 0; |
b8c54ea4 | 8735 | |
91abb4a6 AN |
8736 | /* BPF program's BTF ID */ |
8737 | if (attach_prog_fd) { | |
8738 | err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd); | |
8739 | if (err < 0) { | |
8740 | pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n", | |
8741 | attach_prog_fd, attach_name, err); | |
8742 | return err; | |
8743 | } | |
8744 | *btf_obj_fd = 0; | |
8745 | *btf_type_id = err; | |
8746 | return 0; | |
8747 | } | |
8748 | ||
8749 | /* kernel/module BTF ID */ | |
67234743 AS |
8750 | if (prog->obj->gen_loader) { |
8751 | bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type); | |
8752 | *btf_obj_fd = 0; | |
8753 | *btf_type_id = 1; | |
8754 | } else { | |
8755 | err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id); | |
8756 | } | |
91abb4a6 AN |
8757 | if (err) { |
8758 | pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err); | |
b8c54ea4 | 8759 | return err; |
12a8654b | 8760 | } |
91abb4a6 | 8761 | return 0; |
12a8654b AS |
8762 | } |
8763 | ||
956b620f AI |
8764 | int libbpf_attach_type_by_name(const char *name, |
8765 | enum bpf_attach_type *attach_type) | |
8766 | { | |
c76e4c22 | 8767 | char *type_names; |
b6291a6f | 8768 | const struct bpf_sec_def *sec_def; |
956b620f AI |
8769 | |
8770 | if (!name) | |
e9fc3ce9 | 8771 | return libbpf_err(-EINVAL); |
956b620f | 8772 | |
b6291a6f AN |
8773 | sec_def = find_sec_def(name); |
8774 | if (!sec_def) { | |
8775 | pr_debug("failed to guess attach type based on ELF section name '%s'\n", name); | |
8776 | type_names = libbpf_get_type_names(true); | |
8777 | if (type_names != NULL) { | |
8778 | pr_debug("attachable section(type) names are:%s\n", type_names); | |
8779 | free(type_names); | |
8780 | } | |
8781 | ||
8782 | return libbpf_err(-EINVAL); | |
c76e4c22 TS |
8783 | } |
8784 | ||
15ea31fa AN |
8785 | if (sec_def->preload_fn != libbpf_preload_prog) |
8786 | return libbpf_err(-EINVAL); | |
8787 | if (!(sec_def->cookie & SEC_ATTACHABLE)) | |
b6291a6f AN |
8788 | return libbpf_err(-EINVAL); |
8789 | ||
8790 | *attach_type = sec_def->expected_attach_type; | |
8791 | return 0; | |
956b620f AI |
8792 | } |
8793 | ||
a324aae3 | 8794 | int bpf_map__fd(const struct bpf_map *map) |
9d759a9b | 8795 | { |
e9fc3ce9 | 8796 | return map ? map->fd : libbpf_err(-EINVAL); |
9d759a9b WN |
8797 | } |
8798 | ||
a324aae3 | 8799 | const struct bpf_map_def *bpf_map__def(const struct bpf_map *map) |
9d759a9b | 8800 | { |
e9fc3ce9 | 8801 | return map ? &map->def : libbpf_err_ptr(-EINVAL); |
9d759a9b WN |
8802 | } |
8803 | ||
aed65917 AN |
8804 | static bool map_uses_real_name(const struct bpf_map *map) |
8805 | { | |
8806 | /* Since libbpf started to support custom .data.* and .rodata.* maps, | |
8807 | * their user-visible name differs from kernel-visible name. Users see | |
8808 | * such map's corresponding ELF section name as a map name. | |
8809 | * This check distinguishes .data/.rodata from .data.* and .rodata.* | |
8810 | * maps to know which name has to be returned to the user. | |
8811 | */ | |
8812 | if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0) | |
8813 | return true; | |
8814 | if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0) | |
8815 | return true; | |
8816 | return false; | |
8817 | } | |
8818 | ||
a324aae3 | 8819 | const char *bpf_map__name(const struct bpf_map *map) |
561bbcca | 8820 | { |
aed65917 AN |
8821 | if (!map) |
8822 | return NULL; | |
8823 | ||
8824 | if (map_uses_real_name(map)) | |
8825 | return map->real_name; | |
8826 | ||
8827 | return map->name; | |
561bbcca WN |
8828 | } |
8829 | ||
1bdb6c9a AN |
8830 | enum bpf_map_type bpf_map__type(const struct bpf_map *map) |
8831 | { | |
8832 | return map->def.type; | |
8833 | } | |
8834 | ||
8835 | int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type) | |
8836 | { | |
8837 | if (map->fd >= 0) | |
e9fc3ce9 | 8838 | return libbpf_err(-EBUSY); |
1bdb6c9a AN |
8839 | map->def.type = type; |
8840 | return 0; | |
8841 | } | |
8842 | ||
8843 | __u32 bpf_map__map_flags(const struct bpf_map *map) | |
8844 | { | |
8845 | return map->def.map_flags; | |
8846 | } | |
8847 | ||
8848 | int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags) | |
8849 | { | |
8850 | if (map->fd >= 0) | |
e9fc3ce9 | 8851 | return libbpf_err(-EBUSY); |
1bdb6c9a AN |
8852 | map->def.map_flags = flags; |
8853 | return 0; | |
8854 | } | |
8855 | ||
8856 | __u32 bpf_map__numa_node(const struct bpf_map *map) | |
8857 | { | |
8858 | return map->numa_node; | |
8859 | } | |
8860 | ||
8861 | int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node) | |
8862 | { | |
8863 | if (map->fd >= 0) | |
e9fc3ce9 | 8864 | return libbpf_err(-EBUSY); |
1bdb6c9a AN |
8865 | map->numa_node = numa_node; |
8866 | return 0; | |
8867 | } | |
8868 | ||
8869 | __u32 bpf_map__key_size(const struct bpf_map *map) | |
8870 | { | |
8871 | return map->def.key_size; | |
8872 | } | |
8873 | ||
8874 | int bpf_map__set_key_size(struct bpf_map *map, __u32 size) | |
8875 | { | |
8876 | if (map->fd >= 0) | |
e9fc3ce9 | 8877 | return libbpf_err(-EBUSY); |
1bdb6c9a AN |
8878 | map->def.key_size = size; |
8879 | return 0; | |
8880 | } | |
8881 | ||
8882 | __u32 bpf_map__value_size(const struct bpf_map *map) | |
8883 | { | |
8884 | return map->def.value_size; | |
8885 | } | |
8886 | ||
8887 | int bpf_map__set_value_size(struct bpf_map *map, __u32 size) | |
8888 | { | |
8889 | if (map->fd >= 0) | |
e9fc3ce9 | 8890 | return libbpf_err(-EBUSY); |
1bdb6c9a AN |
8891 | map->def.value_size = size; |
8892 | return 0; | |
8893 | } | |
8894 | ||
5b891af7 | 8895 | __u32 bpf_map__btf_key_type_id(const struct bpf_map *map) |
8a138aed | 8896 | { |
61746dbe | 8897 | return map ? map->btf_key_type_id : 0; |
8a138aed MKL |
8898 | } |
8899 | ||
5b891af7 | 8900 | __u32 bpf_map__btf_value_type_id(const struct bpf_map *map) |
8a138aed | 8901 | { |
61746dbe | 8902 | return map ? map->btf_value_type_id : 0; |
8a138aed MKL |
8903 | } |
8904 | ||
edb13ed4 ACM |
8905 | int bpf_map__set_priv(struct bpf_map *map, void *priv, |
8906 | bpf_map_clear_priv_t clear_priv) | |
9d759a9b WN |
8907 | { |
8908 | if (!map) | |
e9fc3ce9 | 8909 | return libbpf_err(-EINVAL); |
9d759a9b WN |
8910 | |
8911 | if (map->priv) { | |
8912 | if (map->clear_priv) | |
8913 | map->clear_priv(map, map->priv); | |
8914 | } | |
8915 | ||
8916 | map->priv = priv; | |
8917 | map->clear_priv = clear_priv; | |
8918 | return 0; | |
8919 | } | |
8920 | ||
a324aae3 | 8921 | void *bpf_map__priv(const struct bpf_map *map) |
9d759a9b | 8922 | { |
e9fc3ce9 | 8923 | return map ? map->priv : libbpf_err_ptr(-EINVAL); |
9d759a9b WN |
8924 | } |
8925 | ||
e2842be5 THJ |
8926 | int bpf_map__set_initial_value(struct bpf_map *map, |
8927 | const void *data, size_t size) | |
8928 | { | |
8929 | if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG || | |
8930 | size != map->def.value_size || map->fd >= 0) | |
e9fc3ce9 | 8931 | return libbpf_err(-EINVAL); |
e2842be5 THJ |
8932 | |
8933 | memcpy(map->mmaped, data, size); | |
8934 | return 0; | |
8935 | } | |
8936 | ||
7723256b AS |
8937 | const void *bpf_map__initial_value(struct bpf_map *map, size_t *psize) |
8938 | { | |
8939 | if (!map->mmaped) | |
8940 | return NULL; | |
8941 | *psize = map->def.value_size; | |
8942 | return map->mmaped; | |
8943 | } | |
8944 | ||
a324aae3 | 8945 | bool bpf_map__is_offload_neutral(const struct bpf_map *map) |
f83fb22c JK |
8946 | { |
8947 | return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY; | |
8948 | } | |
8949 | ||
a324aae3 | 8950 | bool bpf_map__is_internal(const struct bpf_map *map) |
d859900c DB |
8951 | { |
8952 | return map->libbpf_type != LIBBPF_MAP_UNSPEC; | |
8953 | } | |
8954 | ||
1bdb6c9a AN |
8955 | __u32 bpf_map__ifindex(const struct bpf_map *map) |
8956 | { | |
8957 | return map->map_ifindex; | |
8958 | } | |
8959 | ||
8960 | int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex) | |
9aba3613 | 8961 | { |
1bdb6c9a | 8962 | if (map->fd >= 0) |
e9fc3ce9 | 8963 | return libbpf_err(-EBUSY); |
9aba3613 | 8964 | map->map_ifindex = ifindex; |
1bdb6c9a | 8965 | return 0; |
9aba3613 JK |
8966 | } |
8967 | ||
addb9fc9 NS |
8968 | int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd) |
8969 | { | |
8970 | if (!bpf_map_type__is_map_in_map(map->def.type)) { | |
be18010e | 8971 | pr_warn("error: unsupported map type\n"); |
e9fc3ce9 | 8972 | return libbpf_err(-EINVAL); |
addb9fc9 NS |
8973 | } |
8974 | if (map->inner_map_fd != -1) { | |
be18010e | 8975 | pr_warn("error: inner_map_fd already specified\n"); |
e9fc3ce9 | 8976 | return libbpf_err(-EINVAL); |
addb9fc9 | 8977 | } |
b3278099 | 8978 | zfree(&map->inner_map); |
addb9fc9 NS |
8979 | map->inner_map_fd = fd; |
8980 | return 0; | |
8981 | } | |
8982 | ||
0c19a9fb | 8983 | static struct bpf_map * |
a324aae3 | 8984 | __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i) |
9d759a9b | 8985 | { |
0c19a9fb | 8986 | ssize_t idx; |
9d759a9b WN |
8987 | struct bpf_map *s, *e; |
8988 | ||
8989 | if (!obj || !obj->maps) | |
e9fc3ce9 | 8990 | return errno = EINVAL, NULL; |
9d759a9b WN |
8991 | |
8992 | s = obj->maps; | |
8993 | e = obj->maps + obj->nr_maps; | |
8994 | ||
0c19a9fb | 8995 | if ((m < s) || (m >= e)) { |
be18010e KW |
8996 | pr_warn("error in %s: map handler doesn't belong to object\n", |
8997 | __func__); | |
e9fc3ce9 | 8998 | return errno = EINVAL, NULL; |
9d759a9b WN |
8999 | } |
9000 | ||
0c19a9fb SF |
9001 | idx = (m - obj->maps) + i; |
9002 | if (idx >= obj->nr_maps || idx < 0) | |
9d759a9b WN |
9003 | return NULL; |
9004 | return &obj->maps[idx]; | |
9005 | } | |
561bbcca | 9006 | |
0c19a9fb | 9007 | struct bpf_map * |
a324aae3 | 9008 | bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj) |
2088a3a7 HC |
9009 | { |
9010 | return bpf_object__next_map(obj, prev); | |
9011 | } | |
9012 | ||
9013 | struct bpf_map * | |
9014 | bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev) | |
0c19a9fb SF |
9015 | { |
9016 | if (prev == NULL) | |
9017 | return obj->maps; | |
9018 | ||
9019 | return __bpf_map__iter(prev, obj, 1); | |
9020 | } | |
9021 | ||
9022 | struct bpf_map * | |
a324aae3 | 9023 | bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj) |
2088a3a7 HC |
9024 | { |
9025 | return bpf_object__prev_map(obj, next); | |
9026 | } | |
9027 | ||
9028 | struct bpf_map * | |
9029 | bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next) | |
0c19a9fb SF |
9030 | { |
9031 | if (next == NULL) { | |
9032 | if (!obj->nr_maps) | |
9033 | return NULL; | |
9034 | return obj->maps + obj->nr_maps - 1; | |
9035 | } | |
9036 | ||
9037 | return __bpf_map__iter(next, obj, -1); | |
9038 | } | |
9039 | ||
561bbcca | 9040 | struct bpf_map * |
a324aae3 | 9041 | bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name) |
561bbcca WN |
9042 | { |
9043 | struct bpf_map *pos; | |
9044 | ||
f74a53d9 | 9045 | bpf_object__for_each_map(pos, obj) { |
26071635 AN |
9046 | /* if it's a special internal map name (which always starts |
9047 | * with dot) then check if that special name matches the | |
9048 | * real map name (ELF section name) | |
9049 | */ | |
9050 | if (name[0] == '.') { | |
9051 | if (pos->real_name && strcmp(pos->real_name, name) == 0) | |
9052 | return pos; | |
9053 | continue; | |
9054 | } | |
9055 | /* otherwise map name has to be an exact match */ | |
aed65917 AN |
9056 | if (map_uses_real_name(pos)) { |
9057 | if (strcmp(pos->real_name, name) == 0) | |
9058 | return pos; | |
9059 | continue; | |
9060 | } | |
9061 | if (strcmp(pos->name, name) == 0) | |
561bbcca WN |
9062 | return pos; |
9063 | } | |
e9fc3ce9 | 9064 | return errno = ENOENT, NULL; |
561bbcca | 9065 | } |
5a6acad1 | 9066 | |
f3cea32d | 9067 | int |
a324aae3 | 9068 | bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name) |
f3cea32d MF |
9069 | { |
9070 | return bpf_map__fd(bpf_object__find_map_by_name(obj, name)); | |
9071 | } | |
9072 | ||
5a6acad1 WN |
9073 | struct bpf_map * |
9074 | bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset) | |
9075 | { | |
e9fc3ce9 | 9076 | return libbpf_err_ptr(-ENOTSUP); |
5a6acad1 | 9077 | } |
e28ff1a8 JS |
9078 | |
9079 | long libbpf_get_error(const void *ptr) | |
9080 | { | |
e9fc3ce9 AN |
9081 | if (!IS_ERR_OR_NULL(ptr)) |
9082 | return 0; | |
9083 | ||
9084 | if (IS_ERR(ptr)) | |
9085 | errno = -PTR_ERR(ptr); | |
9086 | ||
9087 | /* If ptr == NULL, then errno should be already set by the failing | |
9088 | * API, because libbpf never returns NULL on success and it now always | |
9089 | * sets errno on error. So no extra errno handling for ptr == NULL | |
9090 | * case. | |
9091 | */ | |
9092 | return -errno; | |
e28ff1a8 | 9093 | } |
6f6d33f3 JF |
9094 | |
9095 | int bpf_prog_load(const char *file, enum bpf_prog_type type, | |
9096 | struct bpf_object **pobj, int *prog_fd) | |
d7be143b AI |
9097 | { |
9098 | struct bpf_prog_load_attr attr; | |
9099 | ||
9100 | memset(&attr, 0, sizeof(struct bpf_prog_load_attr)); | |
9101 | attr.file = file; | |
9102 | attr.prog_type = type; | |
9103 | attr.expected_attach_type = 0; | |
9104 | ||
9105 | return bpf_prog_load_xattr(&attr, pobj, prog_fd); | |
9106 | } | |
9107 | ||
9108 | int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr, | |
9109 | struct bpf_object **pobj, int *prog_fd) | |
6f6d33f3 | 9110 | { |
33bae185 | 9111 | struct bpf_object_open_attr open_attr = {}; |
48cca7e4 | 9112 | struct bpf_program *prog, *first_prog = NULL; |
6f6d33f3 | 9113 | struct bpf_object *obj; |
f0307a7e | 9114 | struct bpf_map *map; |
6f6d33f3 JF |
9115 | int err; |
9116 | ||
d7be143b | 9117 | if (!attr) |
e9fc3ce9 | 9118 | return libbpf_err(-EINVAL); |
17387dd5 | 9119 | if (!attr->file) |
e9fc3ce9 | 9120 | return libbpf_err(-EINVAL); |
d7be143b | 9121 | |
33bae185 LY |
9122 | open_attr.file = attr->file; |
9123 | open_attr.prog_type = attr->prog_type; | |
9124 | ||
07f2d4ea | 9125 | obj = bpf_object__open_xattr(&open_attr); |
e9fc3ce9 AN |
9126 | err = libbpf_get_error(obj); |
9127 | if (err) | |
9128 | return libbpf_err(-ENOENT); | |
6f6d33f3 | 9129 | |
48cca7e4 | 9130 | bpf_object__for_each_program(prog, obj) { |
dd4436bb | 9131 | enum bpf_attach_type attach_type = attr->expected_attach_type; |
48cca7e4 | 9132 | /* |
dd4436bb AN |
9133 | * to preserve backwards compatibility, bpf_prog_load treats |
9134 | * attr->prog_type, if specified, as an override to whatever | |
9135 | * bpf_object__open guessed | |
48cca7e4 | 9136 | */ |
dd4436bb AN |
9137 | if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) { |
9138 | bpf_program__set_type(prog, attr->prog_type); | |
9139 | bpf_program__set_expected_attach_type(prog, | |
9140 | attach_type); | |
9141 | } | |
9142 | if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) { | |
9143 | /* | |
9144 | * we haven't guessed from section name and user | |
9145 | * didn't provide a fallback type, too bad... | |
9146 | */ | |
9147 | bpf_object__close(obj); | |
e9fc3ce9 | 9148 | return libbpf_err(-EINVAL); |
583c9009 | 9149 | } |
48cca7e4 | 9150 | |
dd4436bb | 9151 | prog->prog_ifindex = attr->ifindex; |
da11b417 | 9152 | prog->log_level = attr->log_level; |
2b288740 | 9153 | prog->prog_flags |= attr->prog_flags; |
69495d2a | 9154 | if (!first_prog) |
48cca7e4 AS |
9155 | first_prog = prog; |
9156 | } | |
9157 | ||
f74a53d9 | 9158 | bpf_object__for_each_map(map, obj) { |
f83fb22c JK |
9159 | if (!bpf_map__is_offload_neutral(map)) |
9160 | map->map_ifindex = attr->ifindex; | |
f0307a7e DB |
9161 | } |
9162 | ||
48cca7e4 | 9163 | if (!first_prog) { |
be18010e | 9164 | pr_warn("object file doesn't contain bpf program\n"); |
48cca7e4 | 9165 | bpf_object__close(obj); |
e9fc3ce9 | 9166 | return libbpf_err(-ENOENT); |
583c9009 RG |
9167 | } |
9168 | ||
6f6d33f3 JF |
9169 | err = bpf_object__load(obj); |
9170 | if (err) { | |
9171 | bpf_object__close(obj); | |
e9fc3ce9 | 9172 | return libbpf_err(err); |
6f6d33f3 JF |
9173 | } |
9174 | ||
9175 | *pobj = obj; | |
48cca7e4 | 9176 | *prog_fd = bpf_program__fd(first_prog); |
6f6d33f3 JF |
9177 | return 0; |
9178 | } | |
d0cabbb0 | 9179 | |
1c2e9efc | 9180 | struct bpf_link { |
d6958706 | 9181 | int (*detach)(struct bpf_link *link); |
d88b71d4 | 9182 | void (*dealloc)(struct bpf_link *link); |
c016b68e AN |
9183 | char *pin_path; /* NULL, if not pinned */ |
9184 | int fd; /* hook FD, -1 if not applicable */ | |
d6958706 | 9185 | bool disconnected; |
1c2e9efc AN |
9186 | }; |
9187 | ||
cc4f864b AN |
9188 | /* Replace link's underlying BPF program with the new one */ |
9189 | int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog) | |
9190 | { | |
e9fc3ce9 | 9191 | int ret; |
c139e40a | 9192 | |
e9fc3ce9 AN |
9193 | ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL); |
9194 | return libbpf_err_errno(ret); | |
cc4f864b AN |
9195 | } |
9196 | ||
d6958706 AN |
9197 | /* Release "ownership" of underlying BPF resource (typically, BPF program |
9198 | * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected | |
9199 | * link, when destructed through bpf_link__destroy() call won't attempt to | |
9200 | * detach/unregisted that BPF resource. This is useful in situations where, | |
9201 | * say, attached BPF program has to outlive userspace program that attached it | |
9202 | * in the system. Depending on type of BPF program, though, there might be | |
9203 | * additional steps (like pinning BPF program in BPF FS) necessary to ensure | |
9204 | * exit of userspace program doesn't trigger automatic detachment and clean up | |
9205 | * inside the kernel. | |
9206 | */ | |
9207 | void bpf_link__disconnect(struct bpf_link *link) | |
9208 | { | |
9209 | link->disconnected = true; | |
9210 | } | |
9211 | ||
1c2e9efc AN |
9212 | int bpf_link__destroy(struct bpf_link *link) |
9213 | { | |
d6958706 | 9214 | int err = 0; |
1c2e9efc | 9215 | |
50450fc7 | 9216 | if (IS_ERR_OR_NULL(link)) |
1c2e9efc AN |
9217 | return 0; |
9218 | ||
d6958706 AN |
9219 | if (!link->disconnected && link->detach) |
9220 | err = link->detach(link); | |
c016b68e AN |
9221 | if (link->pin_path) |
9222 | free(link->pin_path); | |
d88b71d4 AN |
9223 | if (link->dealloc) |
9224 | link->dealloc(link); | |
9225 | else | |
9226 | free(link); | |
1c2e9efc | 9227 | |
e9fc3ce9 | 9228 | return libbpf_err(err); |
1c2e9efc AN |
9229 | } |
9230 | ||
c016b68e AN |
9231 | int bpf_link__fd(const struct bpf_link *link) |
9232 | { | |
9233 | return link->fd; | |
9234 | } | |
9235 | ||
9236 | const char *bpf_link__pin_path(const struct bpf_link *link) | |
9237 | { | |
9238 | return link->pin_path; | |
9239 | } | |
9240 | ||
9241 | static int bpf_link__detach_fd(struct bpf_link *link) | |
9242 | { | |
e9fc3ce9 | 9243 | return libbpf_err_errno(close(link->fd)); |
c016b68e AN |
9244 | } |
9245 | ||
9246 | struct bpf_link *bpf_link__open(const char *path) | |
9247 | { | |
9248 | struct bpf_link *link; | |
9249 | int fd; | |
9250 | ||
9251 | fd = bpf_obj_get(path); | |
9252 | if (fd < 0) { | |
9253 | fd = -errno; | |
9254 | pr_warn("failed to open link at %s: %d\n", path, fd); | |
e9fc3ce9 | 9255 | return libbpf_err_ptr(fd); |
c016b68e AN |
9256 | } |
9257 | ||
9258 | link = calloc(1, sizeof(*link)); | |
9259 | if (!link) { | |
9260 | close(fd); | |
e9fc3ce9 | 9261 | return libbpf_err_ptr(-ENOMEM); |
c016b68e AN |
9262 | } |
9263 | link->detach = &bpf_link__detach_fd; | |
9264 | link->fd = fd; | |
9265 | ||
9266 | link->pin_path = strdup(path); | |
9267 | if (!link->pin_path) { | |
9268 | bpf_link__destroy(link); | |
e9fc3ce9 | 9269 | return libbpf_err_ptr(-ENOMEM); |
c016b68e AN |
9270 | } |
9271 | ||
9272 | return link; | |
9273 | } | |
9274 | ||
2e49527e AN |
9275 | int bpf_link__detach(struct bpf_link *link) |
9276 | { | |
9277 | return bpf_link_detach(link->fd) ? -errno : 0; | |
9278 | } | |
9279 | ||
c016b68e AN |
9280 | int bpf_link__pin(struct bpf_link *link, const char *path) |
9281 | { | |
9282 | int err; | |
9283 | ||
9284 | if (link->pin_path) | |
e9fc3ce9 | 9285 | return libbpf_err(-EBUSY); |
c016b68e AN |
9286 | err = make_parent_dir(path); |
9287 | if (err) | |
e9fc3ce9 | 9288 | return libbpf_err(err); |
c016b68e AN |
9289 | err = check_path(path); |
9290 | if (err) | |
e9fc3ce9 | 9291 | return libbpf_err(err); |
c016b68e AN |
9292 | |
9293 | link->pin_path = strdup(path); | |
9294 | if (!link->pin_path) | |
e9fc3ce9 | 9295 | return libbpf_err(-ENOMEM); |
c016b68e AN |
9296 | |
9297 | if (bpf_obj_pin(link->fd, link->pin_path)) { | |
9298 | err = -errno; | |
9299 | zfree(&link->pin_path); | |
e9fc3ce9 | 9300 | return libbpf_err(err); |
c016b68e AN |
9301 | } |
9302 | ||
9303 | pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path); | |
9304 | return 0; | |
9305 | } | |
9306 | ||
9307 | int bpf_link__unpin(struct bpf_link *link) | |
9308 | { | |
9309 | int err; | |
9310 | ||
9311 | if (!link->pin_path) | |
e9fc3ce9 | 9312 | return libbpf_err(-EINVAL); |
c016b68e AN |
9313 | |
9314 | err = unlink(link->pin_path); | |
9315 | if (err != 0) | |
af0efa05 | 9316 | return -errno; |
c016b68e AN |
9317 | |
9318 | pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path); | |
9319 | zfree(&link->pin_path); | |
9320 | return 0; | |
9321 | } | |
63f2f5ee | 9322 | |
668ace0e AN |
9323 | struct bpf_link_perf { |
9324 | struct bpf_link link; | |
9325 | int perf_event_fd; | |
ca304b40 RDT |
9326 | /* legacy kprobe support: keep track of probe identifier and type */ |
9327 | char *legacy_probe_name; | |
46ed5fc3 | 9328 | bool legacy_is_kprobe; |
ca304b40 | 9329 | bool legacy_is_retprobe; |
668ace0e AN |
9330 | }; |
9331 | ||
46ed5fc3 | 9332 | static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe); |
cc10623c | 9333 | static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe); |
46ed5fc3 | 9334 | |
668ace0e | 9335 | static int bpf_link_perf_detach(struct bpf_link *link) |
63f2f5ee | 9336 | { |
668ace0e AN |
9337 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); |
9338 | int err = 0; | |
63f2f5ee | 9339 | |
668ace0e | 9340 | if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0) |
63f2f5ee AN |
9341 | err = -errno; |
9342 | ||
668ace0e AN |
9343 | if (perf_link->perf_event_fd != link->fd) |
9344 | close(perf_link->perf_event_fd); | |
c016b68e | 9345 | close(link->fd); |
668ace0e | 9346 | |
cc10623c | 9347 | /* legacy uprobe/kprobe needs to be removed after perf event fd closure */ |
46ed5fc3 AN |
9348 | if (perf_link->legacy_probe_name) { |
9349 | if (perf_link->legacy_is_kprobe) { | |
9350 | err = remove_kprobe_event_legacy(perf_link->legacy_probe_name, | |
9351 | perf_link->legacy_is_retprobe); | |
cc10623c AN |
9352 | } else { |
9353 | err = remove_uprobe_event_legacy(perf_link->legacy_probe_name, | |
9354 | perf_link->legacy_is_retprobe); | |
46ed5fc3 AN |
9355 | } |
9356 | } | |
ca304b40 RDT |
9357 | |
9358 | return err; | |
63f2f5ee AN |
9359 | } |
9360 | ||
668ace0e AN |
9361 | static void bpf_link_perf_dealloc(struct bpf_link *link) |
9362 | { | |
9363 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); | |
9364 | ||
ca304b40 | 9365 | free(perf_link->legacy_probe_name); |
668ace0e AN |
9366 | free(perf_link); |
9367 | } | |
9368 | ||
942025c9 | 9369 | struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd, |
47faff37 | 9370 | const struct bpf_perf_event_opts *opts) |
63f2f5ee AN |
9371 | { |
9372 | char errmsg[STRERR_BUFSIZE]; | |
668ace0e AN |
9373 | struct bpf_link_perf *link; |
9374 | int prog_fd, link_fd = -1, err; | |
63f2f5ee | 9375 | |
47faff37 AN |
9376 | if (!OPTS_VALID(opts, bpf_perf_event_opts)) |
9377 | return libbpf_err_ptr(-EINVAL); | |
9378 | ||
63f2f5ee | 9379 | if (pfd < 0) { |
52109584 AN |
9380 | pr_warn("prog '%s': invalid perf event FD %d\n", |
9381 | prog->name, pfd); | |
e9fc3ce9 | 9382 | return libbpf_err_ptr(-EINVAL); |
63f2f5ee AN |
9383 | } |
9384 | prog_fd = bpf_program__fd(prog); | |
9385 | if (prog_fd < 0) { | |
52109584 AN |
9386 | pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n", |
9387 | prog->name); | |
e9fc3ce9 | 9388 | return libbpf_err_ptr(-EINVAL); |
63f2f5ee AN |
9389 | } |
9390 | ||
d6958706 | 9391 | link = calloc(1, sizeof(*link)); |
63f2f5ee | 9392 | if (!link) |
e9fc3ce9 | 9393 | return libbpf_err_ptr(-ENOMEM); |
668ace0e AN |
9394 | link->link.detach = &bpf_link_perf_detach; |
9395 | link->link.dealloc = &bpf_link_perf_dealloc; | |
9396 | link->perf_event_fd = pfd; | |
63f2f5ee | 9397 | |
668ace0e | 9398 | if (kernel_supports(prog->obj, FEAT_PERF_LINK)) { |
47faff37 AN |
9399 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts, |
9400 | .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0)); | |
9401 | ||
9402 | link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts); | |
668ace0e AN |
9403 | if (link_fd < 0) { |
9404 | err = -errno; | |
9405 | pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n", | |
9406 | prog->name, pfd, | |
9407 | err, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); | |
9408 | goto err_out; | |
9409 | } | |
9410 | link->link.fd = link_fd; | |
9411 | } else { | |
47faff37 AN |
9412 | if (OPTS_GET(opts, bpf_cookie, 0)) { |
9413 | pr_warn("prog '%s': user context value is not supported\n", prog->name); | |
9414 | err = -EOPNOTSUPP; | |
9415 | goto err_out; | |
9416 | } | |
9417 | ||
668ace0e AN |
9418 | if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) { |
9419 | err = -errno; | |
9420 | pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n", | |
9421 | prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); | |
9422 | if (err == -EPROTO) | |
9423 | pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n", | |
9424 | prog->name, pfd); | |
9425 | goto err_out; | |
9426 | } | |
9427 | link->link.fd = pfd; | |
63f2f5ee AN |
9428 | } |
9429 | if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) { | |
9430 | err = -errno; | |
668ace0e | 9431 | pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n", |
52109584 | 9432 | prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
668ace0e | 9433 | goto err_out; |
63f2f5ee | 9434 | } |
668ace0e AN |
9435 | |
9436 | return &link->link; | |
9437 | err_out: | |
9438 | if (link_fd >= 0) | |
9439 | close(link_fd); | |
9440 | free(link); | |
9441 | return libbpf_err_ptr(err); | |
63f2f5ee AN |
9442 | } |
9443 | ||
942025c9 | 9444 | struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd) |
47faff37 AN |
9445 | { |
9446 | return bpf_program__attach_perf_event_opts(prog, pfd, NULL); | |
9447 | } | |
9448 | ||
b2650027 AN |
9449 | /* |
9450 | * this function is expected to parse integer in the range of [0, 2^31-1] from | |
9451 | * given file using scanf format string fmt. If actual parsed value is | |
9452 | * negative, the result might be indistinguishable from error | |
9453 | */ | |
9454 | static int parse_uint_from_file(const char *file, const char *fmt) | |
9455 | { | |
9456 | char buf[STRERR_BUFSIZE]; | |
9457 | int err, ret; | |
9458 | FILE *f; | |
9459 | ||
9460 | f = fopen(file, "r"); | |
9461 | if (!f) { | |
9462 | err = -errno; | |
9463 | pr_debug("failed to open '%s': %s\n", file, | |
9464 | libbpf_strerror_r(err, buf, sizeof(buf))); | |
9465 | return err; | |
9466 | } | |
9467 | err = fscanf(f, fmt, &ret); | |
9468 | if (err != 1) { | |
9469 | err = err == EOF ? -EIO : -errno; | |
9470 | pr_debug("failed to parse '%s': %s\n", file, | |
9471 | libbpf_strerror_r(err, buf, sizeof(buf))); | |
9472 | fclose(f); | |
9473 | return err; | |
9474 | } | |
9475 | fclose(f); | |
9476 | return ret; | |
9477 | } | |
9478 | ||
9479 | static int determine_kprobe_perf_type(void) | |
9480 | { | |
9481 | const char *file = "/sys/bus/event_source/devices/kprobe/type"; | |
9482 | ||
9483 | return parse_uint_from_file(file, "%d\n"); | |
9484 | } | |
9485 | ||
9486 | static int determine_uprobe_perf_type(void) | |
9487 | { | |
9488 | const char *file = "/sys/bus/event_source/devices/uprobe/type"; | |
9489 | ||
9490 | return parse_uint_from_file(file, "%d\n"); | |
9491 | } | |
9492 | ||
9493 | static int determine_kprobe_retprobe_bit(void) | |
9494 | { | |
9495 | const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe"; | |
9496 | ||
9497 | return parse_uint_from_file(file, "config:%d\n"); | |
9498 | } | |
9499 | ||
9500 | static int determine_uprobe_retprobe_bit(void) | |
9501 | { | |
9502 | const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe"; | |
9503 | ||
9504 | return parse_uint_from_file(file, "config:%d\n"); | |
9505 | } | |
9506 | ||
5e3b8356 AN |
9507 | #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32 |
9508 | #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32 | |
9509 | ||
b2650027 | 9510 | static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name, |
5e3b8356 | 9511 | uint64_t offset, int pid, size_t ref_ctr_off) |
b2650027 AN |
9512 | { |
9513 | struct perf_event_attr attr = {}; | |
9514 | char errmsg[STRERR_BUFSIZE]; | |
9515 | int type, pfd, err; | |
9516 | ||
5e3b8356 AN |
9517 | if (ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS)) |
9518 | return -EINVAL; | |
9519 | ||
b2650027 AN |
9520 | type = uprobe ? determine_uprobe_perf_type() |
9521 | : determine_kprobe_perf_type(); | |
9522 | if (type < 0) { | |
be18010e KW |
9523 | pr_warn("failed to determine %s perf type: %s\n", |
9524 | uprobe ? "uprobe" : "kprobe", | |
9525 | libbpf_strerror_r(type, errmsg, sizeof(errmsg))); | |
b2650027 AN |
9526 | return type; |
9527 | } | |
9528 | if (retprobe) { | |
9529 | int bit = uprobe ? determine_uprobe_retprobe_bit() | |
9530 | : determine_kprobe_retprobe_bit(); | |
9531 | ||
9532 | if (bit < 0) { | |
be18010e KW |
9533 | pr_warn("failed to determine %s retprobe bit: %s\n", |
9534 | uprobe ? "uprobe" : "kprobe", | |
9535 | libbpf_strerror_r(bit, errmsg, sizeof(errmsg))); | |
b2650027 AN |
9536 | return bit; |
9537 | } | |
9538 | attr.config |= 1 << bit; | |
9539 | } | |
9540 | attr.size = sizeof(attr); | |
9541 | attr.type = type; | |
5e3b8356 | 9542 | attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT; |
36db2a94 AN |
9543 | attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */ |
9544 | attr.config2 = offset; /* kprobe_addr or probe_offset */ | |
b2650027 AN |
9545 | |
9546 | /* pid filter is meaningful only for uprobes */ | |
9547 | pfd = syscall(__NR_perf_event_open, &attr, | |
9548 | pid < 0 ? -1 : pid /* pid */, | |
9549 | pid == -1 ? 0 : -1 /* cpu */, | |
9550 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); | |
9551 | if (pfd < 0) { | |
9552 | err = -errno; | |
be18010e KW |
9553 | pr_warn("%s perf_event_open() failed: %s\n", |
9554 | uprobe ? "uprobe" : "kprobe", | |
9555 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); | |
b2650027 AN |
9556 | return err; |
9557 | } | |
9558 | return pfd; | |
9559 | } | |
9560 | ||
46ed5fc3 AN |
9561 | static int append_to_file(const char *file, const char *fmt, ...) |
9562 | { | |
9563 | int fd, n, err = 0; | |
9564 | va_list ap; | |
9565 | ||
9566 | fd = open(file, O_WRONLY | O_APPEND, 0); | |
9567 | if (fd < 0) | |
9568 | return -errno; | |
9569 | ||
9570 | va_start(ap, fmt); | |
9571 | n = vdprintf(fd, fmt, ap); | |
9572 | va_end(ap); | |
9573 | ||
9574 | if (n < 0) | |
9575 | err = -errno; | |
9576 | ||
9577 | close(fd); | |
9578 | return err; | |
9579 | } | |
9580 | ||
9581 | static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz, | |
9582 | const char *kfunc_name, size_t offset) | |
9583 | { | |
9584 | snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), kfunc_name, offset); | |
9585 | } | |
9586 | ||
9587 | static int add_kprobe_event_legacy(const char *probe_name, bool retprobe, | |
9588 | const char *kfunc_name, size_t offset) | |
9589 | { | |
9590 | const char *file = "/sys/kernel/debug/tracing/kprobe_events"; | |
9591 | ||
9592 | return append_to_file(file, "%c:%s/%s %s+0x%zx", | |
9593 | retprobe ? 'r' : 'p', | |
9594 | retprobe ? "kretprobes" : "kprobes", | |
9595 | probe_name, kfunc_name, offset); | |
9596 | } | |
9597 | ||
9598 | static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe) | |
9599 | { | |
9600 | const char *file = "/sys/kernel/debug/tracing/kprobe_events"; | |
9601 | ||
9602 | return append_to_file(file, "-:%s/%s", retprobe ? "kretprobes" : "kprobes", probe_name); | |
9603 | } | |
9604 | ||
9605 | static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe) | |
9606 | { | |
9607 | char file[256]; | |
9608 | ||
9609 | snprintf(file, sizeof(file), | |
9610 | "/sys/kernel/debug/tracing/events/%s/%s/id", | |
9611 | retprobe ? "kretprobes" : "kprobes", probe_name); | |
9612 | ||
9613 | return parse_uint_from_file(file, "%d\n"); | |
9614 | } | |
9615 | ||
9616 | static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe, | |
9617 | const char *kfunc_name, size_t offset, int pid) | |
ca304b40 RDT |
9618 | { |
9619 | struct perf_event_attr attr = {}; | |
9620 | char errmsg[STRERR_BUFSIZE]; | |
9621 | int type, pfd, err; | |
9622 | ||
46ed5fc3 | 9623 | err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset); |
ca304b40 | 9624 | if (err < 0) { |
46ed5fc3 AN |
9625 | pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n", |
9626 | kfunc_name, offset, | |
ca304b40 RDT |
9627 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
9628 | return err; | |
9629 | } | |
46ed5fc3 | 9630 | type = determine_kprobe_perf_type_legacy(probe_name, retprobe); |
ca304b40 | 9631 | if (type < 0) { |
46ed5fc3 AN |
9632 | pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n", |
9633 | kfunc_name, offset, | |
ca304b40 RDT |
9634 | libbpf_strerror_r(type, errmsg, sizeof(errmsg))); |
9635 | return type; | |
9636 | } | |
9637 | attr.size = sizeof(attr); | |
9638 | attr.config = type; | |
9639 | attr.type = PERF_TYPE_TRACEPOINT; | |
9640 | ||
9641 | pfd = syscall(__NR_perf_event_open, &attr, | |
9642 | pid < 0 ? -1 : pid, /* pid */ | |
9643 | pid == -1 ? 0 : -1, /* cpu */ | |
9644 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); | |
9645 | if (pfd < 0) { | |
9646 | err = -errno; | |
9647 | pr_warn("legacy kprobe perf_event_open() failed: %s\n", | |
9648 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); | |
9649 | return err; | |
9650 | } | |
9651 | return pfd; | |
9652 | } | |
9653 | ||
da97553e | 9654 | struct bpf_link * |
942025c9 | 9655 | bpf_program__attach_kprobe_opts(const struct bpf_program *prog, |
ac0ed488 | 9656 | const char *func_name, |
47faff37 | 9657 | const struct bpf_kprobe_opts *opts) |
b2650027 | 9658 | { |
47faff37 | 9659 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
b2650027 | 9660 | char errmsg[STRERR_BUFSIZE]; |
ca304b40 | 9661 | char *legacy_probe = NULL; |
b2650027 | 9662 | struct bpf_link *link; |
46ed5fc3 | 9663 | size_t offset; |
ca304b40 | 9664 | bool retprobe, legacy; |
b2650027 AN |
9665 | int pfd, err; |
9666 | ||
da97553e JO |
9667 | if (!OPTS_VALID(opts, bpf_kprobe_opts)) |
9668 | return libbpf_err_ptr(-EINVAL); | |
9669 | ||
9670 | retprobe = OPTS_GET(opts, retprobe, false); | |
9671 | offset = OPTS_GET(opts, offset, 0); | |
47faff37 | 9672 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
da97553e | 9673 | |
ca304b40 RDT |
9674 | legacy = determine_kprobe_perf_type() < 0; |
9675 | if (!legacy) { | |
9676 | pfd = perf_event_open_probe(false /* uprobe */, retprobe, | |
9677 | func_name, offset, | |
9678 | -1 /* pid */, 0 /* ref_ctr_off */); | |
9679 | } else { | |
46ed5fc3 AN |
9680 | char probe_name[256]; |
9681 | ||
9682 | gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), | |
9683 | func_name, offset); | |
9684 | ||
ca304b40 RDT |
9685 | legacy_probe = strdup(func_name); |
9686 | if (!legacy_probe) | |
9687 | return libbpf_err_ptr(-ENOMEM); | |
9688 | ||
46ed5fc3 | 9689 | pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name, |
ca304b40 RDT |
9690 | offset, -1 /* pid */); |
9691 | } | |
b2650027 | 9692 | if (pfd < 0) { |
46ed5fc3 AN |
9693 | err = -errno; |
9694 | pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n", | |
9695 | prog->name, retprobe ? "kretprobe" : "kprobe", | |
9696 | func_name, offset, | |
303a2572 AN |
9697 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
9698 | goto err_out; | |
b2650027 | 9699 | } |
47faff37 | 9700 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
e9fc3ce9 AN |
9701 | err = libbpf_get_error(link); |
9702 | if (err) { | |
b2650027 | 9703 | close(pfd); |
46ed5fc3 AN |
9704 | pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n", |
9705 | prog->name, retprobe ? "kretprobe" : "kprobe", | |
9706 | func_name, offset, | |
be18010e | 9707 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
303a2572 | 9708 | goto err_out; |
b2650027 | 9709 | } |
ca304b40 RDT |
9710 | if (legacy) { |
9711 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); | |
9712 | ||
9713 | perf_link->legacy_probe_name = legacy_probe; | |
46ed5fc3 | 9714 | perf_link->legacy_is_kprobe = true; |
ca304b40 RDT |
9715 | perf_link->legacy_is_retprobe = retprobe; |
9716 | } | |
9717 | ||
b2650027 | 9718 | return link; |
303a2572 AN |
9719 | err_out: |
9720 | free(legacy_probe); | |
9721 | return libbpf_err_ptr(err); | |
b2650027 AN |
9722 | } |
9723 | ||
942025c9 | 9724 | struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog, |
ac0ed488 JO |
9725 | bool retprobe, |
9726 | const char *func_name) | |
9727 | { | |
da97553e | 9728 | DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts, |
ac0ed488 | 9729 | .retprobe = retprobe, |
da97553e | 9730 | ); |
ac0ed488 JO |
9731 | |
9732 | return bpf_program__attach_kprobe_opts(prog, func_name, &opts); | |
9733 | } | |
9734 | ||
12d9466d | 9735 | static struct bpf_link *attach_kprobe(const struct bpf_program *prog, long cookie) |
d7a18ea7 | 9736 | { |
da97553e | 9737 | DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts); |
a2488b5f AM |
9738 | unsigned long offset = 0; |
9739 | struct bpf_link *link; | |
d7a18ea7 | 9740 | const char *func_name; |
a2488b5f AM |
9741 | char *func; |
9742 | int n, err; | |
d7a18ea7 | 9743 | |
13d35a0c AN |
9744 | opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/"); |
9745 | if (opts.retprobe) | |
9746 | func_name = prog->sec_name + sizeof("kretprobe/") - 1; | |
9747 | else | |
9748 | func_name = prog->sec_name + sizeof("kprobe/") - 1; | |
d7a18ea7 | 9749 | |
e3f9bc35 | 9750 | n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset); |
a2488b5f AM |
9751 | if (n < 1) { |
9752 | err = -EINVAL; | |
9753 | pr_warn("kprobe name is invalid: %s\n", func_name); | |
9754 | return libbpf_err_ptr(err); | |
9755 | } | |
9756 | if (opts.retprobe && offset != 0) { | |
1f71a468 | 9757 | free(func); |
a2488b5f AM |
9758 | err = -EINVAL; |
9759 | pr_warn("kretprobes do not support offset specification\n"); | |
9760 | return libbpf_err_ptr(err); | |
9761 | } | |
d7a18ea7 | 9762 | |
a2488b5f AM |
9763 | opts.offset = offset; |
9764 | link = bpf_program__attach_kprobe_opts(prog, func, &opts); | |
9765 | free(func); | |
9766 | return link; | |
d7a18ea7 AN |
9767 | } |
9768 | ||
cc10623c AN |
9769 | static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz, |
9770 | const char *binary_path, uint64_t offset) | |
9771 | { | |
9772 | int i; | |
9773 | ||
9774 | snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset); | |
9775 | ||
9776 | /* sanitize binary_path in the probe name */ | |
9777 | for (i = 0; buf[i]; i++) { | |
9778 | if (!isalnum(buf[i])) | |
9779 | buf[i] = '_'; | |
9780 | } | |
9781 | } | |
9782 | ||
9783 | static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe, | |
9784 | const char *binary_path, size_t offset) | |
9785 | { | |
9786 | const char *file = "/sys/kernel/debug/tracing/uprobe_events"; | |
9787 | ||
9788 | return append_to_file(file, "%c:%s/%s %s:0x%zx", | |
9789 | retprobe ? 'r' : 'p', | |
9790 | retprobe ? "uretprobes" : "uprobes", | |
9791 | probe_name, binary_path, offset); | |
9792 | } | |
9793 | ||
9794 | static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe) | |
9795 | { | |
9796 | const char *file = "/sys/kernel/debug/tracing/uprobe_events"; | |
9797 | ||
9798 | return append_to_file(file, "-:%s/%s", retprobe ? "uretprobes" : "uprobes", probe_name); | |
9799 | } | |
9800 | ||
9801 | static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe) | |
9802 | { | |
9803 | char file[512]; | |
9804 | ||
9805 | snprintf(file, sizeof(file), | |
9806 | "/sys/kernel/debug/tracing/events/%s/%s/id", | |
9807 | retprobe ? "uretprobes" : "uprobes", probe_name); | |
9808 | ||
9809 | return parse_uint_from_file(file, "%d\n"); | |
9810 | } | |
9811 | ||
9812 | static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe, | |
9813 | const char *binary_path, size_t offset, int pid) | |
9814 | { | |
9815 | struct perf_event_attr attr; | |
9816 | int type, pfd, err; | |
9817 | ||
9818 | err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset); | |
9819 | if (err < 0) { | |
9820 | pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n", | |
9821 | binary_path, (size_t)offset, err); | |
9822 | return err; | |
9823 | } | |
9824 | type = determine_uprobe_perf_type_legacy(probe_name, retprobe); | |
9825 | if (type < 0) { | |
9826 | pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n", | |
9827 | binary_path, offset, err); | |
9828 | return type; | |
9829 | } | |
9830 | ||
9831 | memset(&attr, 0, sizeof(attr)); | |
9832 | attr.size = sizeof(attr); | |
9833 | attr.config = type; | |
9834 | attr.type = PERF_TYPE_TRACEPOINT; | |
9835 | ||
9836 | pfd = syscall(__NR_perf_event_open, &attr, | |
9837 | pid < 0 ? -1 : pid, /* pid */ | |
9838 | pid == -1 ? 0 : -1, /* cpu */ | |
9839 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); | |
9840 | if (pfd < 0) { | |
9841 | err = -errno; | |
9842 | pr_warn("legacy uprobe perf_event_open() failed: %d\n", err); | |
9843 | return err; | |
9844 | } | |
9845 | return pfd; | |
9846 | } | |
9847 | ||
47faff37 | 9848 | LIBBPF_API struct bpf_link * |
942025c9 | 9849 | bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid, |
47faff37 AN |
9850 | const char *binary_path, size_t func_offset, |
9851 | const struct bpf_uprobe_opts *opts) | |
b2650027 | 9852 | { |
47faff37 | 9853 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
cc10623c | 9854 | char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL; |
b2650027 | 9855 | struct bpf_link *link; |
5e3b8356 | 9856 | size_t ref_ctr_off; |
b2650027 | 9857 | int pfd, err; |
cc10623c | 9858 | bool retprobe, legacy; |
47faff37 AN |
9859 | |
9860 | if (!OPTS_VALID(opts, bpf_uprobe_opts)) | |
9861 | return libbpf_err_ptr(-EINVAL); | |
9862 | ||
9863 | retprobe = OPTS_GET(opts, retprobe, false); | |
5e3b8356 | 9864 | ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0); |
47faff37 | 9865 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); |
b2650027 | 9866 | |
cc10623c AN |
9867 | legacy = determine_uprobe_perf_type() < 0; |
9868 | if (!legacy) { | |
9869 | pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path, | |
9870 | func_offset, pid, ref_ctr_off); | |
9871 | } else { | |
9872 | char probe_name[512]; | |
9873 | ||
9874 | if (ref_ctr_off) | |
9875 | return libbpf_err_ptr(-EINVAL); | |
9876 | ||
9877 | gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name), | |
9878 | binary_path, func_offset); | |
9879 | ||
9880 | legacy_probe = strdup(probe_name); | |
9881 | if (!legacy_probe) | |
9882 | return libbpf_err_ptr(-ENOMEM); | |
9883 | ||
9884 | pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe, | |
9885 | binary_path, func_offset, pid); | |
9886 | } | |
b2650027 | 9887 | if (pfd < 0) { |
cc10623c | 9888 | err = -errno; |
52109584 AN |
9889 | pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n", |
9890 | prog->name, retprobe ? "uretprobe" : "uprobe", | |
be18010e | 9891 | binary_path, func_offset, |
cc10623c AN |
9892 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
9893 | goto err_out; | |
b2650027 | 9894 | } |
cc10623c | 9895 | |
47faff37 | 9896 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
e9fc3ce9 AN |
9897 | err = libbpf_get_error(link); |
9898 | if (err) { | |
b2650027 | 9899 | close(pfd); |
52109584 AN |
9900 | pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n", |
9901 | prog->name, retprobe ? "uretprobe" : "uprobe", | |
be18010e KW |
9902 | binary_path, func_offset, |
9903 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); | |
cc10623c AN |
9904 | goto err_out; |
9905 | } | |
9906 | if (legacy) { | |
9907 | struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link); | |
9908 | ||
9909 | perf_link->legacy_probe_name = legacy_probe; | |
9910 | perf_link->legacy_is_kprobe = false; | |
9911 | perf_link->legacy_is_retprobe = retprobe; | |
b2650027 AN |
9912 | } |
9913 | return link; | |
cc10623c AN |
9914 | err_out: |
9915 | free(legacy_probe); | |
9916 | return libbpf_err_ptr(err); | |
9917 | ||
b2650027 AN |
9918 | } |
9919 | ||
942025c9 | 9920 | struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog, |
47faff37 AN |
9921 | bool retprobe, pid_t pid, |
9922 | const char *binary_path, | |
9923 | size_t func_offset) | |
9924 | { | |
9925 | DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe); | |
9926 | ||
9927 | return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts); | |
9928 | } | |
9929 | ||
f6de59c1 AN |
9930 | static int determine_tracepoint_id(const char *tp_category, |
9931 | const char *tp_name) | |
9932 | { | |
9933 | char file[PATH_MAX]; | |
9934 | int ret; | |
9935 | ||
9936 | ret = snprintf(file, sizeof(file), | |
9937 | "/sys/kernel/debug/tracing/events/%s/%s/id", | |
9938 | tp_category, tp_name); | |
9939 | if (ret < 0) | |
9940 | return -errno; | |
9941 | if (ret >= sizeof(file)) { | |
9942 | pr_debug("tracepoint %s/%s path is too long\n", | |
9943 | tp_category, tp_name); | |
9944 | return -E2BIG; | |
9945 | } | |
9946 | return parse_uint_from_file(file, "%d\n"); | |
9947 | } | |
9948 | ||
9949 | static int perf_event_open_tracepoint(const char *tp_category, | |
9950 | const char *tp_name) | |
9951 | { | |
9952 | struct perf_event_attr attr = {}; | |
9953 | char errmsg[STRERR_BUFSIZE]; | |
9954 | int tp_id, pfd, err; | |
9955 | ||
9956 | tp_id = determine_tracepoint_id(tp_category, tp_name); | |
9957 | if (tp_id < 0) { | |
be18010e KW |
9958 | pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n", |
9959 | tp_category, tp_name, | |
9960 | libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg))); | |
f6de59c1 AN |
9961 | return tp_id; |
9962 | } | |
9963 | ||
9964 | attr.type = PERF_TYPE_TRACEPOINT; | |
9965 | attr.size = sizeof(attr); | |
9966 | attr.config = tp_id; | |
9967 | ||
9968 | pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */, | |
9969 | -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC); | |
9970 | if (pfd < 0) { | |
9971 | err = -errno; | |
be18010e KW |
9972 | pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n", |
9973 | tp_category, tp_name, | |
9974 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); | |
f6de59c1 AN |
9975 | return err; |
9976 | } | |
9977 | return pfd; | |
9978 | } | |
9979 | ||
942025c9 | 9980 | struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog, |
47faff37 AN |
9981 | const char *tp_category, |
9982 | const char *tp_name, | |
9983 | const struct bpf_tracepoint_opts *opts) | |
f6de59c1 | 9984 | { |
47faff37 | 9985 | DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts); |
f6de59c1 AN |
9986 | char errmsg[STRERR_BUFSIZE]; |
9987 | struct bpf_link *link; | |
9988 | int pfd, err; | |
9989 | ||
47faff37 AN |
9990 | if (!OPTS_VALID(opts, bpf_tracepoint_opts)) |
9991 | return libbpf_err_ptr(-EINVAL); | |
9992 | ||
9993 | pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0); | |
9994 | ||
f6de59c1 AN |
9995 | pfd = perf_event_open_tracepoint(tp_category, tp_name); |
9996 | if (pfd < 0) { | |
52109584 AN |
9997 | pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n", |
9998 | prog->name, tp_category, tp_name, | |
be18010e | 9999 | libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); |
e9fc3ce9 | 10000 | return libbpf_err_ptr(pfd); |
f6de59c1 | 10001 | } |
47faff37 | 10002 | link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts); |
e9fc3ce9 AN |
10003 | err = libbpf_get_error(link); |
10004 | if (err) { | |
f6de59c1 | 10005 | close(pfd); |
52109584 AN |
10006 | pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n", |
10007 | prog->name, tp_category, tp_name, | |
be18010e | 10008 | libbpf_strerror_r(err, errmsg, sizeof(errmsg))); |
e9fc3ce9 | 10009 | return libbpf_err_ptr(err); |
f6de59c1 AN |
10010 | } |
10011 | return link; | |
10012 | } | |
10013 | ||
942025c9 | 10014 | struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog, |
47faff37 AN |
10015 | const char *tp_category, |
10016 | const char *tp_name) | |
10017 | { | |
10018 | return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL); | |
10019 | } | |
10020 | ||
12d9466d | 10021 | static struct bpf_link *attach_tp(const struct bpf_program *prog, long cookie) |
d7a18ea7 AN |
10022 | { |
10023 | char *sec_name, *tp_cat, *tp_name; | |
10024 | struct bpf_link *link; | |
10025 | ||
52109584 | 10026 | sec_name = strdup(prog->sec_name); |
d7a18ea7 | 10027 | if (!sec_name) |
e9fc3ce9 | 10028 | return libbpf_err_ptr(-ENOMEM); |
d7a18ea7 | 10029 | |
13d35a0c AN |
10030 | /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */ |
10031 | if (str_has_pfx(prog->sec_name, "tp/")) | |
10032 | tp_cat = sec_name + sizeof("tp/") - 1; | |
10033 | else | |
10034 | tp_cat = sec_name + sizeof("tracepoint/") - 1; | |
d7a18ea7 AN |
10035 | tp_name = strchr(tp_cat, '/'); |
10036 | if (!tp_name) { | |
e9fc3ce9 AN |
10037 | free(sec_name); |
10038 | return libbpf_err_ptr(-EINVAL); | |
d7a18ea7 AN |
10039 | } |
10040 | *tp_name = '\0'; | |
10041 | tp_name++; | |
10042 | ||
10043 | link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name); | |
d7a18ea7 AN |
10044 | free(sec_name); |
10045 | return link; | |
10046 | } | |
10047 | ||
942025c9 | 10048 | struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog, |
84bf5e1f AN |
10049 | const char *tp_name) |
10050 | { | |
10051 | char errmsg[STRERR_BUFSIZE]; | |
c016b68e | 10052 | struct bpf_link *link; |
84bf5e1f AN |
10053 | int prog_fd, pfd; |
10054 | ||
10055 | prog_fd = bpf_program__fd(prog); | |
10056 | if (prog_fd < 0) { | |
52109584 | 10057 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
e9fc3ce9 | 10058 | return libbpf_err_ptr(-EINVAL); |
84bf5e1f AN |
10059 | } |
10060 | ||
d6958706 | 10061 | link = calloc(1, sizeof(*link)); |
84bf5e1f | 10062 | if (!link) |
e9fc3ce9 | 10063 | return libbpf_err_ptr(-ENOMEM); |
c016b68e | 10064 | link->detach = &bpf_link__detach_fd; |
84bf5e1f AN |
10065 | |
10066 | pfd = bpf_raw_tracepoint_open(tp_name, prog_fd); | |
10067 | if (pfd < 0) { | |
10068 | pfd = -errno; | |
10069 | free(link); | |
52109584 AN |
10070 | pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n", |
10071 | prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); | |
e9fc3ce9 | 10072 | return libbpf_err_ptr(pfd); |
84bf5e1f AN |
10073 | } |
10074 | link->fd = pfd; | |
c016b68e | 10075 | return link; |
84bf5e1f AN |
10076 | } |
10077 | ||
12d9466d | 10078 | static struct bpf_link *attach_raw_tp(const struct bpf_program *prog, long cookie) |
d7a18ea7 | 10079 | { |
ccaf12d6 HT |
10080 | static const char *const prefixes[] = { |
10081 | "raw_tp/", | |
10082 | "raw_tracepoint/", | |
10083 | "raw_tp.w/", | |
10084 | "raw_tracepoint.w/", | |
10085 | }; | |
10086 | size_t i; | |
10087 | const char *tp_name = NULL; | |
13d35a0c | 10088 | |
ccaf12d6 HT |
10089 | for (i = 0; i < ARRAY_SIZE(prefixes); i++) { |
10090 | if (str_has_pfx(prog->sec_name, prefixes[i])) { | |
10091 | tp_name = prog->sec_name + strlen(prefixes[i]); | |
10092 | break; | |
10093 | } | |
10094 | } | |
10095 | if (!tp_name) { | |
10096 | pr_warn("prog '%s': invalid section name '%s'\n", | |
10097 | prog->name, prog->sec_name); | |
10098 | return libbpf_err_ptr(-EINVAL); | |
10099 | } | |
d7a18ea7 AN |
10100 | |
10101 | return bpf_program__attach_raw_tracepoint(prog, tp_name); | |
10102 | } | |
10103 | ||
1e092a03 | 10104 | /* Common logic for all BPF program types that attach to a btf_id */ |
942025c9 | 10105 | static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog) |
b8c54ea4 AS |
10106 | { |
10107 | char errmsg[STRERR_BUFSIZE]; | |
c016b68e | 10108 | struct bpf_link *link; |
b8c54ea4 AS |
10109 | int prog_fd, pfd; |
10110 | ||
10111 | prog_fd = bpf_program__fd(prog); | |
10112 | if (prog_fd < 0) { | |
52109584 | 10113 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
e9fc3ce9 | 10114 | return libbpf_err_ptr(-EINVAL); |
b8c54ea4 AS |
10115 | } |
10116 | ||
d6958706 | 10117 | link = calloc(1, sizeof(*link)); |
b8c54ea4 | 10118 | if (!link) |
e9fc3ce9 | 10119 | return libbpf_err_ptr(-ENOMEM); |
c016b68e | 10120 | link->detach = &bpf_link__detach_fd; |
b8c54ea4 AS |
10121 | |
10122 | pfd = bpf_raw_tracepoint_open(NULL, prog_fd); | |
10123 | if (pfd < 0) { | |
10124 | pfd = -errno; | |
10125 | free(link); | |
52109584 AN |
10126 | pr_warn("prog '%s': failed to attach: %s\n", |
10127 | prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg))); | |
e9fc3ce9 | 10128 | return libbpf_err_ptr(pfd); |
b8c54ea4 AS |
10129 | } |
10130 | link->fd = pfd; | |
10131 | return (struct bpf_link *)link; | |
10132 | } | |
10133 | ||
942025c9 | 10134 | struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog) |
1e092a03 KS |
10135 | { |
10136 | return bpf_program__attach_btf_id(prog); | |
10137 | } | |
10138 | ||
942025c9 | 10139 | struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog) |
1e092a03 KS |
10140 | { |
10141 | return bpf_program__attach_btf_id(prog); | |
10142 | } | |
10143 | ||
12d9466d | 10144 | static struct bpf_link *attach_trace(const struct bpf_program *prog, long cookie) |
d7a18ea7 AN |
10145 | { |
10146 | return bpf_program__attach_trace(prog); | |
10147 | } | |
10148 | ||
12d9466d | 10149 | static struct bpf_link *attach_lsm(const struct bpf_program *prog, long cookie) |
1e092a03 KS |
10150 | { |
10151 | return bpf_program__attach_lsm(prog); | |
10152 | } | |
10153 | ||
d60d81ac | 10154 | static struct bpf_link * |
942025c9 | 10155 | bpf_program__attach_fd(const struct bpf_program *prog, int target_fd, int btf_id, |
d60d81ac | 10156 | const char *target_name) |
cc4f864b | 10157 | { |
a5359091 THJ |
10158 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts, |
10159 | .target_btf_id = btf_id); | |
cc4f864b AN |
10160 | enum bpf_attach_type attach_type; |
10161 | char errmsg[STRERR_BUFSIZE]; | |
10162 | struct bpf_link *link; | |
10163 | int prog_fd, link_fd; | |
10164 | ||
10165 | prog_fd = bpf_program__fd(prog); | |
10166 | if (prog_fd < 0) { | |
52109584 | 10167 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
e9fc3ce9 | 10168 | return libbpf_err_ptr(-EINVAL); |
cc4f864b AN |
10169 | } |
10170 | ||
10171 | link = calloc(1, sizeof(*link)); | |
10172 | if (!link) | |
e9fc3ce9 | 10173 | return libbpf_err_ptr(-ENOMEM); |
cc4f864b AN |
10174 | link->detach = &bpf_link__detach_fd; |
10175 | ||
10176 | attach_type = bpf_program__get_expected_attach_type(prog); | |
a5359091 | 10177 | link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts); |
cc4f864b AN |
10178 | if (link_fd < 0) { |
10179 | link_fd = -errno; | |
10180 | free(link); | |
52109584 AN |
10181 | pr_warn("prog '%s': failed to attach to %s: %s\n", |
10182 | prog->name, target_name, | |
cc4f864b | 10183 | libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); |
e9fc3ce9 | 10184 | return libbpf_err_ptr(link_fd); |
cc4f864b AN |
10185 | } |
10186 | link->fd = link_fd; | |
10187 | return link; | |
10188 | } | |
10189 | ||
d60d81ac | 10190 | struct bpf_link * |
942025c9 | 10191 | bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd) |
d60d81ac | 10192 | { |
a5359091 | 10193 | return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup"); |
d60d81ac JS |
10194 | } |
10195 | ||
10196 | struct bpf_link * | |
942025c9 | 10197 | bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd) |
d60d81ac | 10198 | { |
a5359091 | 10199 | return bpf_program__attach_fd(prog, netns_fd, 0, "netns"); |
d60d81ac JS |
10200 | } |
10201 | ||
942025c9 | 10202 | struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex) |
dc8698ca AN |
10203 | { |
10204 | /* target_fd/target_ifindex use the same field in LINK_CREATE */ | |
a5359091 THJ |
10205 | return bpf_program__attach_fd(prog, ifindex, 0, "xdp"); |
10206 | } | |
10207 | ||
942025c9 | 10208 | struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog, |
a5359091 THJ |
10209 | int target_fd, |
10210 | const char *attach_func_name) | |
10211 | { | |
10212 | int btf_id; | |
10213 | ||
10214 | if (!!target_fd != !!attach_func_name) { | |
10215 | pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n", | |
10216 | prog->name); | |
e9fc3ce9 | 10217 | return libbpf_err_ptr(-EINVAL); |
a5359091 THJ |
10218 | } |
10219 | ||
10220 | if (prog->type != BPF_PROG_TYPE_EXT) { | |
10221 | pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace", | |
10222 | prog->name); | |
e9fc3ce9 | 10223 | return libbpf_err_ptr(-EINVAL); |
a5359091 THJ |
10224 | } |
10225 | ||
10226 | if (target_fd) { | |
10227 | btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd); | |
10228 | if (btf_id < 0) | |
e9fc3ce9 | 10229 | return libbpf_err_ptr(btf_id); |
a5359091 THJ |
10230 | |
10231 | return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace"); | |
10232 | } else { | |
10233 | /* no target, so use raw_tracepoint_open for compatibility | |
10234 | * with old kernels | |
10235 | */ | |
10236 | return bpf_program__attach_trace(prog); | |
10237 | } | |
dc8698ca AN |
10238 | } |
10239 | ||
c09add2f | 10240 | struct bpf_link * |
942025c9 | 10241 | bpf_program__attach_iter(const struct bpf_program *prog, |
c09add2f YS |
10242 | const struct bpf_iter_attach_opts *opts) |
10243 | { | |
cd31039a | 10244 | DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts); |
c09add2f YS |
10245 | char errmsg[STRERR_BUFSIZE]; |
10246 | struct bpf_link *link; | |
10247 | int prog_fd, link_fd; | |
cd31039a | 10248 | __u32 target_fd = 0; |
c09add2f YS |
10249 | |
10250 | if (!OPTS_VALID(opts, bpf_iter_attach_opts)) | |
e9fc3ce9 | 10251 | return libbpf_err_ptr(-EINVAL); |
c09add2f | 10252 | |
74fc097d YS |
10253 | link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0); |
10254 | link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0); | |
cd31039a | 10255 | |
c09add2f YS |
10256 | prog_fd = bpf_program__fd(prog); |
10257 | if (prog_fd < 0) { | |
52109584 | 10258 | pr_warn("prog '%s': can't attach before loaded\n", prog->name); |
e9fc3ce9 | 10259 | return libbpf_err_ptr(-EINVAL); |
c09add2f YS |
10260 | } |
10261 | ||
10262 | link = calloc(1, sizeof(*link)); | |
10263 | if (!link) | |
e9fc3ce9 | 10264 | return libbpf_err_ptr(-ENOMEM); |
c09add2f YS |
10265 | link->detach = &bpf_link__detach_fd; |
10266 | ||
cd31039a YS |
10267 | link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER, |
10268 | &link_create_opts); | |
c09add2f YS |
10269 | if (link_fd < 0) { |
10270 | link_fd = -errno; | |
10271 | free(link); | |
52109584 AN |
10272 | pr_warn("prog '%s': failed to attach to iterator: %s\n", |
10273 | prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg))); | |
e9fc3ce9 | 10274 | return libbpf_err_ptr(link_fd); |
c09add2f YS |
10275 | } |
10276 | link->fd = link_fd; | |
10277 | return link; | |
10278 | } | |
10279 | ||
12d9466d | 10280 | static struct bpf_link *attach_iter(const struct bpf_program *prog, long cookie) |
e9fc3ce9 AN |
10281 | { |
10282 | return bpf_program__attach_iter(prog, NULL); | |
10283 | } | |
10284 | ||
942025c9 | 10285 | struct bpf_link *bpf_program__attach(const struct bpf_program *prog) |
d7a18ea7 | 10286 | { |
5532dfd4 | 10287 | if (!prog->sec_def || !prog->sec_def->attach_fn) |
e9fc3ce9 | 10288 | return libbpf_err_ptr(-ESRCH); |
d7a18ea7 | 10289 | |
12d9466d | 10290 | return prog->sec_def->attach_fn(prog, prog->sec_def->cookie); |
d7a18ea7 AN |
10291 | } |
10292 | ||
590a0088 MKL |
10293 | static int bpf_link__detach_struct_ops(struct bpf_link *link) |
10294 | { | |
590a0088 MKL |
10295 | __u32 zero = 0; |
10296 | ||
c016b68e | 10297 | if (bpf_map_delete_elem(link->fd, &zero)) |
590a0088 MKL |
10298 | return -errno; |
10299 | ||
10300 | return 0; | |
10301 | } | |
10302 | ||
942025c9 | 10303 | struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map) |
590a0088 MKL |
10304 | { |
10305 | struct bpf_struct_ops *st_ops; | |
c016b68e | 10306 | struct bpf_link *link; |
590a0088 MKL |
10307 | __u32 i, zero = 0; |
10308 | int err; | |
10309 | ||
10310 | if (!bpf_map__is_struct_ops(map) || map->fd == -1) | |
e9fc3ce9 | 10311 | return libbpf_err_ptr(-EINVAL); |
590a0088 MKL |
10312 | |
10313 | link = calloc(1, sizeof(*link)); | |
10314 | if (!link) | |
e9fc3ce9 | 10315 | return libbpf_err_ptr(-EINVAL); |
590a0088 MKL |
10316 | |
10317 | st_ops = map->st_ops; | |
10318 | for (i = 0; i < btf_vlen(st_ops->type); i++) { | |
10319 | struct bpf_program *prog = st_ops->progs[i]; | |
10320 | void *kern_data; | |
10321 | int prog_fd; | |
10322 | ||
10323 | if (!prog) | |
10324 | continue; | |
10325 | ||
10326 | prog_fd = bpf_program__fd(prog); | |
10327 | kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i]; | |
10328 | *(unsigned long *)kern_data = prog_fd; | |
10329 | } | |
10330 | ||
10331 | err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0); | |
10332 | if (err) { | |
10333 | err = -errno; | |
10334 | free(link); | |
e9fc3ce9 | 10335 | return libbpf_err_ptr(err); |
590a0088 MKL |
10336 | } |
10337 | ||
c016b68e | 10338 | link->detach = bpf_link__detach_struct_ops; |
590a0088 MKL |
10339 | link->fd = map->fd; |
10340 | ||
c016b68e | 10341 | return link; |
590a0088 MKL |
10342 | } |
10343 | ||
d0cabbb0 | 10344 | enum bpf_perf_event_ret |
3dca2115 DB |
10345 | bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size, |
10346 | void **copy_mem, size_t *copy_size, | |
10347 | bpf_perf_event_print_t fn, void *private_data) | |
d0cabbb0 | 10348 | { |
3dca2115 | 10349 | struct perf_event_mmap_page *header = mmap_mem; |
a64af0ef | 10350 | __u64 data_head = ring_buffer_read_head(header); |
d0cabbb0 | 10351 | __u64 data_tail = header->data_tail; |
3dca2115 DB |
10352 | void *base = ((__u8 *)header) + page_size; |
10353 | int ret = LIBBPF_PERF_EVENT_CONT; | |
10354 | struct perf_event_header *ehdr; | |
10355 | size_t ehdr_size; | |
10356 | ||
10357 | while (data_head != data_tail) { | |
10358 | ehdr = base + (data_tail & (mmap_size - 1)); | |
10359 | ehdr_size = ehdr->size; | |
10360 | ||
10361 | if (((void *)ehdr) + ehdr_size > base + mmap_size) { | |
10362 | void *copy_start = ehdr; | |
10363 | size_t len_first = base + mmap_size - copy_start; | |
10364 | size_t len_secnd = ehdr_size - len_first; | |
10365 | ||
10366 | if (*copy_size < ehdr_size) { | |
10367 | free(*copy_mem); | |
10368 | *copy_mem = malloc(ehdr_size); | |
10369 | if (!*copy_mem) { | |
10370 | *copy_size = 0; | |
d0cabbb0 JK |
10371 | ret = LIBBPF_PERF_EVENT_ERROR; |
10372 | break; | |
10373 | } | |
3dca2115 | 10374 | *copy_size = ehdr_size; |
d0cabbb0 JK |
10375 | } |
10376 | ||
3dca2115 DB |
10377 | memcpy(*copy_mem, copy_start, len_first); |
10378 | memcpy(*copy_mem + len_first, base, len_secnd); | |
10379 | ehdr = *copy_mem; | |
d0cabbb0 JK |
10380 | } |
10381 | ||
3dca2115 DB |
10382 | ret = fn(ehdr, private_data); |
10383 | data_tail += ehdr_size; | |
d0cabbb0 JK |
10384 | if (ret != LIBBPF_PERF_EVENT_CONT) |
10385 | break; | |
d0cabbb0 JK |
10386 | } |
10387 | ||
a64af0ef | 10388 | ring_buffer_write_tail(header, data_tail); |
e9fc3ce9 | 10389 | return libbpf_err(ret); |
d0cabbb0 | 10390 | } |
34be1646 | 10391 | |
fb84b822 AN |
10392 | struct perf_buffer; |
10393 | ||
10394 | struct perf_buffer_params { | |
10395 | struct perf_event_attr *attr; | |
10396 | /* if event_cb is specified, it takes precendence */ | |
10397 | perf_buffer_event_fn event_cb; | |
10398 | /* sample_cb and lost_cb are higher-level common-case callbacks */ | |
10399 | perf_buffer_sample_fn sample_cb; | |
10400 | perf_buffer_lost_fn lost_cb; | |
10401 | void *ctx; | |
10402 | int cpu_cnt; | |
10403 | int *cpus; | |
10404 | int *map_keys; | |
10405 | }; | |
10406 | ||
10407 | struct perf_cpu_buf { | |
10408 | struct perf_buffer *pb; | |
10409 | void *base; /* mmap()'ed memory */ | |
10410 | void *buf; /* for reconstructing segmented data */ | |
10411 | size_t buf_size; | |
10412 | int fd; | |
10413 | int cpu; | |
10414 | int map_key; | |
10415 | }; | |
10416 | ||
10417 | struct perf_buffer { | |
10418 | perf_buffer_event_fn event_cb; | |
10419 | perf_buffer_sample_fn sample_cb; | |
10420 | perf_buffer_lost_fn lost_cb; | |
10421 | void *ctx; /* passed into callbacks */ | |
10422 | ||
10423 | size_t page_size; | |
10424 | size_t mmap_size; | |
10425 | struct perf_cpu_buf **cpu_bufs; | |
10426 | struct epoll_event *events; | |
783b8f01 | 10427 | int cpu_cnt; /* number of allocated CPU buffers */ |
fb84b822 AN |
10428 | int epoll_fd; /* perf event FD */ |
10429 | int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */ | |
10430 | }; | |
10431 | ||
10432 | static void perf_buffer__free_cpu_buf(struct perf_buffer *pb, | |
10433 | struct perf_cpu_buf *cpu_buf) | |
10434 | { | |
10435 | if (!cpu_buf) | |
10436 | return; | |
10437 | if (cpu_buf->base && | |
10438 | munmap(cpu_buf->base, pb->mmap_size + pb->page_size)) | |
be18010e | 10439 | pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu); |
fb84b822 AN |
10440 | if (cpu_buf->fd >= 0) { |
10441 | ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0); | |
10442 | close(cpu_buf->fd); | |
10443 | } | |
10444 | free(cpu_buf->buf); | |
10445 | free(cpu_buf); | |
10446 | } | |
10447 | ||
10448 | void perf_buffer__free(struct perf_buffer *pb) | |
10449 | { | |
10450 | int i; | |
10451 | ||
50450fc7 | 10452 | if (IS_ERR_OR_NULL(pb)) |
fb84b822 AN |
10453 | return; |
10454 | if (pb->cpu_bufs) { | |
601b05ca | 10455 | for (i = 0; i < pb->cpu_cnt; i++) { |
fb84b822 AN |
10456 | struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; |
10457 | ||
601b05ca EC |
10458 | if (!cpu_buf) |
10459 | continue; | |
10460 | ||
fb84b822 AN |
10461 | bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key); |
10462 | perf_buffer__free_cpu_buf(pb, cpu_buf); | |
10463 | } | |
10464 | free(pb->cpu_bufs); | |
10465 | } | |
10466 | if (pb->epoll_fd >= 0) | |
10467 | close(pb->epoll_fd); | |
10468 | free(pb->events); | |
10469 | free(pb); | |
10470 | } | |
10471 | ||
10472 | static struct perf_cpu_buf * | |
10473 | perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr, | |
10474 | int cpu, int map_key) | |
10475 | { | |
10476 | struct perf_cpu_buf *cpu_buf; | |
10477 | char msg[STRERR_BUFSIZE]; | |
10478 | int err; | |
10479 | ||
10480 | cpu_buf = calloc(1, sizeof(*cpu_buf)); | |
10481 | if (!cpu_buf) | |
10482 | return ERR_PTR(-ENOMEM); | |
10483 | ||
10484 | cpu_buf->pb = pb; | |
10485 | cpu_buf->cpu = cpu; | |
10486 | cpu_buf->map_key = map_key; | |
10487 | ||
10488 | cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu, | |
10489 | -1, PERF_FLAG_FD_CLOEXEC); | |
10490 | if (cpu_buf->fd < 0) { | |
10491 | err = -errno; | |
be18010e KW |
10492 | pr_warn("failed to open perf buffer event on cpu #%d: %s\n", |
10493 | cpu, libbpf_strerror_r(err, msg, sizeof(msg))); | |
fb84b822 AN |
10494 | goto error; |
10495 | } | |
10496 | ||
10497 | cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size, | |
10498 | PROT_READ | PROT_WRITE, MAP_SHARED, | |
10499 | cpu_buf->fd, 0); | |
10500 | if (cpu_buf->base == MAP_FAILED) { | |
10501 | cpu_buf->base = NULL; | |
10502 | err = -errno; | |
be18010e KW |
10503 | pr_warn("failed to mmap perf buffer on cpu #%d: %s\n", |
10504 | cpu, libbpf_strerror_r(err, msg, sizeof(msg))); | |
fb84b822 AN |
10505 | goto error; |
10506 | } | |
10507 | ||
10508 | if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) { | |
10509 | err = -errno; | |
be18010e KW |
10510 | pr_warn("failed to enable perf buffer event on cpu #%d: %s\n", |
10511 | cpu, libbpf_strerror_r(err, msg, sizeof(msg))); | |
fb84b822 AN |
10512 | goto error; |
10513 | } | |
10514 | ||
10515 | return cpu_buf; | |
10516 | ||
10517 | error: | |
10518 | perf_buffer__free_cpu_buf(pb, cpu_buf); | |
10519 | return (struct perf_cpu_buf *)ERR_PTR(err); | |
10520 | } | |
10521 | ||
10522 | static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, | |
10523 | struct perf_buffer_params *p); | |
10524 | ||
10525 | struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt, | |
10526 | const struct perf_buffer_opts *opts) | |
10527 | { | |
10528 | struct perf_buffer_params p = {}; | |
4be6e05c ACM |
10529 | struct perf_event_attr attr = { 0, }; |
10530 | ||
65bb2e0f | 10531 | attr.config = PERF_COUNT_SW_BPF_OUTPUT; |
4be6e05c ACM |
10532 | attr.type = PERF_TYPE_SOFTWARE; |
10533 | attr.sample_type = PERF_SAMPLE_RAW; | |
10534 | attr.sample_period = 1; | |
10535 | attr.wakeup_events = 1; | |
fb84b822 AN |
10536 | |
10537 | p.attr = &attr; | |
10538 | p.sample_cb = opts ? opts->sample_cb : NULL; | |
10539 | p.lost_cb = opts ? opts->lost_cb : NULL; | |
10540 | p.ctx = opts ? opts->ctx : NULL; | |
10541 | ||
e9fc3ce9 | 10542 | return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); |
fb84b822 AN |
10543 | } |
10544 | ||
10545 | struct perf_buffer * | |
10546 | perf_buffer__new_raw(int map_fd, size_t page_cnt, | |
10547 | const struct perf_buffer_raw_opts *opts) | |
10548 | { | |
10549 | struct perf_buffer_params p = {}; | |
10550 | ||
10551 | p.attr = opts->attr; | |
10552 | p.event_cb = opts->event_cb; | |
10553 | p.ctx = opts->ctx; | |
10554 | p.cpu_cnt = opts->cpu_cnt; | |
10555 | p.cpus = opts->cpus; | |
10556 | p.map_keys = opts->map_keys; | |
10557 | ||
e9fc3ce9 | 10558 | return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p)); |
fb84b822 AN |
10559 | } |
10560 | ||
10561 | static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt, | |
10562 | struct perf_buffer_params *p) | |
10563 | { | |
783b8f01 | 10564 | const char *online_cpus_file = "/sys/devices/system/cpu/online"; |
0e289487 | 10565 | struct bpf_map_info map; |
fb84b822 AN |
10566 | char msg[STRERR_BUFSIZE]; |
10567 | struct perf_buffer *pb; | |
783b8f01 | 10568 | bool *online = NULL; |
fb84b822 | 10569 | __u32 map_info_len; |
783b8f01 | 10570 | int err, i, j, n; |
fb84b822 AN |
10571 | |
10572 | if (page_cnt & (page_cnt - 1)) { | |
be18010e KW |
10573 | pr_warn("page count should be power of two, but is %zu\n", |
10574 | page_cnt); | |
fb84b822 AN |
10575 | return ERR_PTR(-EINVAL); |
10576 | } | |
10577 | ||
0e289487 AN |
10578 | /* best-effort sanity checks */ |
10579 | memset(&map, 0, sizeof(map)); | |
fb84b822 AN |
10580 | map_info_len = sizeof(map); |
10581 | err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len); | |
10582 | if (err) { | |
10583 | err = -errno; | |
0e289487 AN |
10584 | /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return |
10585 | * -EBADFD, -EFAULT, or -E2BIG on real error | |
10586 | */ | |
10587 | if (err != -EINVAL) { | |
10588 | pr_warn("failed to get map info for map FD %d: %s\n", | |
10589 | map_fd, libbpf_strerror_r(err, msg, sizeof(msg))); | |
10590 | return ERR_PTR(err); | |
10591 | } | |
10592 | pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n", | |
10593 | map_fd); | |
10594 | } else { | |
10595 | if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) { | |
10596 | pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n", | |
10597 | map.name); | |
10598 | return ERR_PTR(-EINVAL); | |
10599 | } | |
fb84b822 AN |
10600 | } |
10601 | ||
10602 | pb = calloc(1, sizeof(*pb)); | |
10603 | if (!pb) | |
10604 | return ERR_PTR(-ENOMEM); | |
10605 | ||
10606 | pb->event_cb = p->event_cb; | |
10607 | pb->sample_cb = p->sample_cb; | |
10608 | pb->lost_cb = p->lost_cb; | |
10609 | pb->ctx = p->ctx; | |
10610 | ||
10611 | pb->page_size = getpagesize(); | |
10612 | pb->mmap_size = pb->page_size * page_cnt; | |
10613 | pb->map_fd = map_fd; | |
10614 | ||
10615 | pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC); | |
10616 | if (pb->epoll_fd < 0) { | |
10617 | err = -errno; | |
be18010e KW |
10618 | pr_warn("failed to create epoll instance: %s\n", |
10619 | libbpf_strerror_r(err, msg, sizeof(msg))); | |
fb84b822 AN |
10620 | goto error; |
10621 | } | |
10622 | ||
10623 | if (p->cpu_cnt > 0) { | |
10624 | pb->cpu_cnt = p->cpu_cnt; | |
10625 | } else { | |
10626 | pb->cpu_cnt = libbpf_num_possible_cpus(); | |
10627 | if (pb->cpu_cnt < 0) { | |
10628 | err = pb->cpu_cnt; | |
10629 | goto error; | |
10630 | } | |
0e289487 | 10631 | if (map.max_entries && map.max_entries < pb->cpu_cnt) |
fb84b822 AN |
10632 | pb->cpu_cnt = map.max_entries; |
10633 | } | |
10634 | ||
10635 | pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events)); | |
10636 | if (!pb->events) { | |
10637 | err = -ENOMEM; | |
be18010e | 10638 | pr_warn("failed to allocate events: out of memory\n"); |
fb84b822 AN |
10639 | goto error; |
10640 | } | |
10641 | pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs)); | |
10642 | if (!pb->cpu_bufs) { | |
10643 | err = -ENOMEM; | |
be18010e | 10644 | pr_warn("failed to allocate buffers: out of memory\n"); |
fb84b822 AN |
10645 | goto error; |
10646 | } | |
10647 | ||
783b8f01 AN |
10648 | err = parse_cpu_mask_file(online_cpus_file, &online, &n); |
10649 | if (err) { | |
10650 | pr_warn("failed to get online CPU mask: %d\n", err); | |
10651 | goto error; | |
10652 | } | |
10653 | ||
10654 | for (i = 0, j = 0; i < pb->cpu_cnt; i++) { | |
fb84b822 AN |
10655 | struct perf_cpu_buf *cpu_buf; |
10656 | int cpu, map_key; | |
10657 | ||
10658 | cpu = p->cpu_cnt > 0 ? p->cpus[i] : i; | |
10659 | map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i; | |
10660 | ||
783b8f01 AN |
10661 | /* in case user didn't explicitly requested particular CPUs to |
10662 | * be attached to, skip offline/not present CPUs | |
10663 | */ | |
10664 | if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu])) | |
10665 | continue; | |
10666 | ||
fb84b822 AN |
10667 | cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key); |
10668 | if (IS_ERR(cpu_buf)) { | |
10669 | err = PTR_ERR(cpu_buf); | |
10670 | goto error; | |
10671 | } | |
10672 | ||
783b8f01 | 10673 | pb->cpu_bufs[j] = cpu_buf; |
fb84b822 AN |
10674 | |
10675 | err = bpf_map_update_elem(pb->map_fd, &map_key, | |
10676 | &cpu_buf->fd, 0); | |
10677 | if (err) { | |
10678 | err = -errno; | |
be18010e KW |
10679 | pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n", |
10680 | cpu, map_key, cpu_buf->fd, | |
10681 | libbpf_strerror_r(err, msg, sizeof(msg))); | |
fb84b822 AN |
10682 | goto error; |
10683 | } | |
10684 | ||
783b8f01 AN |
10685 | pb->events[j].events = EPOLLIN; |
10686 | pb->events[j].data.ptr = cpu_buf; | |
fb84b822 | 10687 | if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd, |
783b8f01 | 10688 | &pb->events[j]) < 0) { |
fb84b822 | 10689 | err = -errno; |
be18010e KW |
10690 | pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n", |
10691 | cpu, cpu_buf->fd, | |
10692 | libbpf_strerror_r(err, msg, sizeof(msg))); | |
fb84b822 AN |
10693 | goto error; |
10694 | } | |
783b8f01 | 10695 | j++; |
fb84b822 | 10696 | } |
783b8f01 AN |
10697 | pb->cpu_cnt = j; |
10698 | free(online); | |
fb84b822 AN |
10699 | |
10700 | return pb; | |
10701 | ||
10702 | error: | |
783b8f01 | 10703 | free(online); |
fb84b822 AN |
10704 | if (pb) |
10705 | perf_buffer__free(pb); | |
10706 | return ERR_PTR(err); | |
10707 | } | |
10708 | ||
10709 | struct perf_sample_raw { | |
10710 | struct perf_event_header header; | |
10711 | uint32_t size; | |
385bbf7b | 10712 | char data[]; |
fb84b822 AN |
10713 | }; |
10714 | ||
10715 | struct perf_sample_lost { | |
10716 | struct perf_event_header header; | |
10717 | uint64_t id; | |
10718 | uint64_t lost; | |
10719 | uint64_t sample_id; | |
10720 | }; | |
10721 | ||
10722 | static enum bpf_perf_event_ret | |
10723 | perf_buffer__process_record(struct perf_event_header *e, void *ctx) | |
10724 | { | |
10725 | struct perf_cpu_buf *cpu_buf = ctx; | |
10726 | struct perf_buffer *pb = cpu_buf->pb; | |
10727 | void *data = e; | |
10728 | ||
10729 | /* user wants full control over parsing perf event */ | |
10730 | if (pb->event_cb) | |
10731 | return pb->event_cb(pb->ctx, cpu_buf->cpu, e); | |
10732 | ||
10733 | switch (e->type) { | |
10734 | case PERF_RECORD_SAMPLE: { | |
10735 | struct perf_sample_raw *s = data; | |
10736 | ||
10737 | if (pb->sample_cb) | |
10738 | pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size); | |
10739 | break; | |
10740 | } | |
10741 | case PERF_RECORD_LOST: { | |
10742 | struct perf_sample_lost *s = data; | |
10743 | ||
10744 | if (pb->lost_cb) | |
10745 | pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost); | |
10746 | break; | |
10747 | } | |
10748 | default: | |
be18010e | 10749 | pr_warn("unknown perf sample type %d\n", e->type); |
fb84b822 AN |
10750 | return LIBBPF_PERF_EVENT_ERROR; |
10751 | } | |
10752 | return LIBBPF_PERF_EVENT_CONT; | |
10753 | } | |
10754 | ||
10755 | static int perf_buffer__process_records(struct perf_buffer *pb, | |
10756 | struct perf_cpu_buf *cpu_buf) | |
10757 | { | |
10758 | enum bpf_perf_event_ret ret; | |
10759 | ||
10760 | ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size, | |
10761 | pb->page_size, &cpu_buf->buf, | |
10762 | &cpu_buf->buf_size, | |
10763 | perf_buffer__process_record, cpu_buf); | |
10764 | if (ret != LIBBPF_PERF_EVENT_CONT) | |
10765 | return ret; | |
10766 | return 0; | |
10767 | } | |
10768 | ||
dca5612f AN |
10769 | int perf_buffer__epoll_fd(const struct perf_buffer *pb) |
10770 | { | |
10771 | return pb->epoll_fd; | |
10772 | } | |
10773 | ||
fb84b822 AN |
10774 | int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms) |
10775 | { | |
10776 | int i, cnt, err; | |
10777 | ||
10778 | cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms); | |
e9fc3ce9 | 10779 | if (cnt < 0) |
af0efa05 | 10780 | return -errno; |
e9fc3ce9 | 10781 | |
fb84b822 AN |
10782 | for (i = 0; i < cnt; i++) { |
10783 | struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr; | |
10784 | ||
10785 | err = perf_buffer__process_records(pb, cpu_buf); | |
10786 | if (err) { | |
be18010e | 10787 | pr_warn("error while processing records: %d\n", err); |
e9fc3ce9 | 10788 | return libbpf_err(err); |
fb84b822 AN |
10789 | } |
10790 | } | |
e9fc3ce9 | 10791 | return cnt; |
fb84b822 AN |
10792 | } |
10793 | ||
dca5612f AN |
10794 | /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer |
10795 | * manager. | |
10796 | */ | |
10797 | size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb) | |
10798 | { | |
10799 | return pb->cpu_cnt; | |
10800 | } | |
10801 | ||
10802 | /* | |
10803 | * Return perf_event FD of a ring buffer in *buf_idx* slot of | |
10804 | * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using | |
10805 | * select()/poll()/epoll() Linux syscalls. | |
10806 | */ | |
10807 | int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx) | |
10808 | { | |
10809 | struct perf_cpu_buf *cpu_buf; | |
10810 | ||
10811 | if (buf_idx >= pb->cpu_cnt) | |
e9fc3ce9 | 10812 | return libbpf_err(-EINVAL); |
dca5612f AN |
10813 | |
10814 | cpu_buf = pb->cpu_bufs[buf_idx]; | |
10815 | if (!cpu_buf) | |
e9fc3ce9 | 10816 | return libbpf_err(-ENOENT); |
dca5612f AN |
10817 | |
10818 | return cpu_buf->fd; | |
10819 | } | |
10820 | ||
10821 | /* | |
10822 | * Consume data from perf ring buffer corresponding to slot *buf_idx* in | |
10823 | * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to | |
10824 | * consume, do nothing and return success. | |
10825 | * Returns: | |
10826 | * - 0 on success; | |
10827 | * - <0 on failure. | |
10828 | */ | |
10829 | int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx) | |
10830 | { | |
10831 | struct perf_cpu_buf *cpu_buf; | |
10832 | ||
10833 | if (buf_idx >= pb->cpu_cnt) | |
e9fc3ce9 | 10834 | return libbpf_err(-EINVAL); |
dca5612f AN |
10835 | |
10836 | cpu_buf = pb->cpu_bufs[buf_idx]; | |
10837 | if (!cpu_buf) | |
e9fc3ce9 | 10838 | return libbpf_err(-ENOENT); |
dca5612f AN |
10839 | |
10840 | return perf_buffer__process_records(pb, cpu_buf); | |
10841 | } | |
10842 | ||
272d51af EC |
10843 | int perf_buffer__consume(struct perf_buffer *pb) |
10844 | { | |
10845 | int i, err; | |
10846 | ||
10847 | for (i = 0; i < pb->cpu_cnt; i++) { | |
10848 | struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i]; | |
10849 | ||
10850 | if (!cpu_buf) | |
10851 | continue; | |
10852 | ||
10853 | err = perf_buffer__process_records(pb, cpu_buf); | |
10854 | if (err) { | |
dca5612f | 10855 | pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err); |
e9fc3ce9 | 10856 | return libbpf_err(err); |
272d51af EC |
10857 | } |
10858 | } | |
10859 | return 0; | |
10860 | } | |
10861 | ||
34be1646 SL |
10862 | struct bpf_prog_info_array_desc { |
10863 | int array_offset; /* e.g. offset of jited_prog_insns */ | |
10864 | int count_offset; /* e.g. offset of jited_prog_len */ | |
10865 | int size_offset; /* > 0: offset of rec size, | |
10866 | * < 0: fix size of -size_offset | |
10867 | */ | |
10868 | }; | |
10869 | ||
10870 | static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = { | |
10871 | [BPF_PROG_INFO_JITED_INSNS] = { | |
10872 | offsetof(struct bpf_prog_info, jited_prog_insns), | |
10873 | offsetof(struct bpf_prog_info, jited_prog_len), | |
10874 | -1, | |
10875 | }, | |
10876 | [BPF_PROG_INFO_XLATED_INSNS] = { | |
10877 | offsetof(struct bpf_prog_info, xlated_prog_insns), | |
10878 | offsetof(struct bpf_prog_info, xlated_prog_len), | |
10879 | -1, | |
10880 | }, | |
10881 | [BPF_PROG_INFO_MAP_IDS] = { | |
10882 | offsetof(struct bpf_prog_info, map_ids), | |
10883 | offsetof(struct bpf_prog_info, nr_map_ids), | |
10884 | -(int)sizeof(__u32), | |
10885 | }, | |
10886 | [BPF_PROG_INFO_JITED_KSYMS] = { | |
10887 | offsetof(struct bpf_prog_info, jited_ksyms), | |
10888 | offsetof(struct bpf_prog_info, nr_jited_ksyms), | |
10889 | -(int)sizeof(__u64), | |
10890 | }, | |
10891 | [BPF_PROG_INFO_JITED_FUNC_LENS] = { | |
10892 | offsetof(struct bpf_prog_info, jited_func_lens), | |
10893 | offsetof(struct bpf_prog_info, nr_jited_func_lens), | |
10894 | -(int)sizeof(__u32), | |
10895 | }, | |
10896 | [BPF_PROG_INFO_FUNC_INFO] = { | |
10897 | offsetof(struct bpf_prog_info, func_info), | |
10898 | offsetof(struct bpf_prog_info, nr_func_info), | |
10899 | offsetof(struct bpf_prog_info, func_info_rec_size), | |
10900 | }, | |
10901 | [BPF_PROG_INFO_LINE_INFO] = { | |
10902 | offsetof(struct bpf_prog_info, line_info), | |
10903 | offsetof(struct bpf_prog_info, nr_line_info), | |
10904 | offsetof(struct bpf_prog_info, line_info_rec_size), | |
10905 | }, | |
10906 | [BPF_PROG_INFO_JITED_LINE_INFO] = { | |
10907 | offsetof(struct bpf_prog_info, jited_line_info), | |
10908 | offsetof(struct bpf_prog_info, nr_jited_line_info), | |
10909 | offsetof(struct bpf_prog_info, jited_line_info_rec_size), | |
10910 | }, | |
10911 | [BPF_PROG_INFO_PROG_TAGS] = { | |
10912 | offsetof(struct bpf_prog_info, prog_tags), | |
10913 | offsetof(struct bpf_prog_info, nr_prog_tags), | |
10914 | -(int)sizeof(__u8) * BPF_TAG_SIZE, | |
10915 | }, | |
10916 | ||
10917 | }; | |
10918 | ||
8983b731 AN |
10919 | static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info, |
10920 | int offset) | |
34be1646 SL |
10921 | { |
10922 | __u32 *array = (__u32 *)info; | |
10923 | ||
10924 | if (offset >= 0) | |
10925 | return array[offset / sizeof(__u32)]; | |
10926 | return -(int)offset; | |
10927 | } | |
10928 | ||
8983b731 AN |
10929 | static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info, |
10930 | int offset) | |
34be1646 SL |
10931 | { |
10932 | __u64 *array = (__u64 *)info; | |
10933 | ||
10934 | if (offset >= 0) | |
10935 | return array[offset / sizeof(__u64)]; | |
10936 | return -(int)offset; | |
10937 | } | |
10938 | ||
10939 | static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset, | |
10940 | __u32 val) | |
10941 | { | |
10942 | __u32 *array = (__u32 *)info; | |
10943 | ||
10944 | if (offset >= 0) | |
10945 | array[offset / sizeof(__u32)] = val; | |
10946 | } | |
10947 | ||
10948 | static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset, | |
10949 | __u64 val) | |
10950 | { | |
10951 | __u64 *array = (__u64 *)info; | |
10952 | ||
10953 | if (offset >= 0) | |
10954 | array[offset / sizeof(__u64)] = val; | |
10955 | } | |
10956 | ||
10957 | struct bpf_prog_info_linear * | |
10958 | bpf_program__get_prog_info_linear(int fd, __u64 arrays) | |
10959 | { | |
10960 | struct bpf_prog_info_linear *info_linear; | |
10961 | struct bpf_prog_info info = {}; | |
10962 | __u32 info_len = sizeof(info); | |
10963 | __u32 data_len = 0; | |
10964 | int i, err; | |
10965 | void *ptr; | |
10966 | ||
10967 | if (arrays >> BPF_PROG_INFO_LAST_ARRAY) | |
e9fc3ce9 | 10968 | return libbpf_err_ptr(-EINVAL); |
34be1646 SL |
10969 | |
10970 | /* step 1: get array dimensions */ | |
10971 | err = bpf_obj_get_info_by_fd(fd, &info, &info_len); | |
10972 | if (err) { | |
10973 | pr_debug("can't get prog info: %s", strerror(errno)); | |
e9fc3ce9 | 10974 | return libbpf_err_ptr(-EFAULT); |
34be1646 SL |
10975 | } |
10976 | ||
10977 | /* step 2: calculate total size of all arrays */ | |
10978 | for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { | |
10979 | bool include_array = (arrays & (1UL << i)) > 0; | |
10980 | struct bpf_prog_info_array_desc *desc; | |
10981 | __u32 count, size; | |
10982 | ||
10983 | desc = bpf_prog_info_array_desc + i; | |
10984 | ||
10985 | /* kernel is too old to support this field */ | |
10986 | if (info_len < desc->array_offset + sizeof(__u32) || | |
10987 | info_len < desc->count_offset + sizeof(__u32) || | |
10988 | (desc->size_offset > 0 && info_len < desc->size_offset)) | |
10989 | include_array = false; | |
10990 | ||
10991 | if (!include_array) { | |
10992 | arrays &= ~(1UL << i); /* clear the bit */ | |
10993 | continue; | |
10994 | } | |
10995 | ||
10996 | count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); | |
10997 | size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); | |
10998 | ||
10999 | data_len += count * size; | |
11000 | } | |
11001 | ||
11002 | /* step 3: allocate continuous memory */ | |
11003 | data_len = roundup(data_len, sizeof(__u64)); | |
11004 | info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len); | |
11005 | if (!info_linear) | |
e9fc3ce9 | 11006 | return libbpf_err_ptr(-ENOMEM); |
34be1646 SL |
11007 | |
11008 | /* step 4: fill data to info_linear->info */ | |
11009 | info_linear->arrays = arrays; | |
11010 | memset(&info_linear->info, 0, sizeof(info)); | |
11011 | ptr = info_linear->data; | |
11012 | ||
11013 | for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { | |
11014 | struct bpf_prog_info_array_desc *desc; | |
11015 | __u32 count, size; | |
11016 | ||
11017 | if ((arrays & (1UL << i)) == 0) | |
11018 | continue; | |
11019 | ||
11020 | desc = bpf_prog_info_array_desc + i; | |
11021 | count = bpf_prog_info_read_offset_u32(&info, desc->count_offset); | |
11022 | size = bpf_prog_info_read_offset_u32(&info, desc->size_offset); | |
11023 | bpf_prog_info_set_offset_u32(&info_linear->info, | |
11024 | desc->count_offset, count); | |
11025 | bpf_prog_info_set_offset_u32(&info_linear->info, | |
11026 | desc->size_offset, size); | |
11027 | bpf_prog_info_set_offset_u64(&info_linear->info, | |
11028 | desc->array_offset, | |
11029 | ptr_to_u64(ptr)); | |
11030 | ptr += count * size; | |
11031 | } | |
11032 | ||
11033 | /* step 5: call syscall again to get required arrays */ | |
11034 | err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len); | |
11035 | if (err) { | |
11036 | pr_debug("can't get prog info: %s", strerror(errno)); | |
11037 | free(info_linear); | |
e9fc3ce9 | 11038 | return libbpf_err_ptr(-EFAULT); |
34be1646 SL |
11039 | } |
11040 | ||
11041 | /* step 6: verify the data */ | |
11042 | for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { | |
11043 | struct bpf_prog_info_array_desc *desc; | |
11044 | __u32 v1, v2; | |
11045 | ||
11046 | if ((arrays & (1UL << i)) == 0) | |
11047 | continue; | |
11048 | ||
11049 | desc = bpf_prog_info_array_desc + i; | |
11050 | v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset); | |
11051 | v2 = bpf_prog_info_read_offset_u32(&info_linear->info, | |
11052 | desc->count_offset); | |
11053 | if (v1 != v2) | |
be18010e | 11054 | pr_warn("%s: mismatch in element count\n", __func__); |
34be1646 SL |
11055 | |
11056 | v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset); | |
11057 | v2 = bpf_prog_info_read_offset_u32(&info_linear->info, | |
11058 | desc->size_offset); | |
11059 | if (v1 != v2) | |
be18010e | 11060 | pr_warn("%s: mismatch in rec size\n", __func__); |
34be1646 SL |
11061 | } |
11062 | ||
11063 | /* step 7: update info_len and data_len */ | |
11064 | info_linear->info_len = sizeof(struct bpf_prog_info); | |
11065 | info_linear->data_len = data_len; | |
11066 | ||
11067 | return info_linear; | |
11068 | } | |
11069 | ||
11070 | void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear) | |
11071 | { | |
11072 | int i; | |
11073 | ||
11074 | for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { | |
11075 | struct bpf_prog_info_array_desc *desc; | |
11076 | __u64 addr, offs; | |
11077 | ||
11078 | if ((info_linear->arrays & (1UL << i)) == 0) | |
11079 | continue; | |
11080 | ||
11081 | desc = bpf_prog_info_array_desc + i; | |
11082 | addr = bpf_prog_info_read_offset_u64(&info_linear->info, | |
11083 | desc->array_offset); | |
11084 | offs = addr - ptr_to_u64(info_linear->data); | |
11085 | bpf_prog_info_set_offset_u64(&info_linear->info, | |
11086 | desc->array_offset, offs); | |
11087 | } | |
11088 | } | |
11089 | ||
11090 | void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear) | |
11091 | { | |
11092 | int i; | |
11093 | ||
11094 | for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) { | |
11095 | struct bpf_prog_info_array_desc *desc; | |
11096 | __u64 addr, offs; | |
11097 | ||
11098 | if ((info_linear->arrays & (1UL << i)) == 0) | |
11099 | continue; | |
11100 | ||
11101 | desc = bpf_prog_info_array_desc + i; | |
11102 | offs = bpf_prog_info_read_offset_u64(&info_linear->info, | |
11103 | desc->array_offset); | |
11104 | addr = offs + ptr_to_u64(info_linear->data); | |
11105 | bpf_prog_info_set_offset_u64(&info_linear->info, | |
11106 | desc->array_offset, addr); | |
11107 | } | |
11108 | } | |
6446b315 | 11109 | |
ff26ce5c EC |
11110 | int bpf_program__set_attach_target(struct bpf_program *prog, |
11111 | int attach_prog_fd, | |
11112 | const char *attach_func_name) | |
11113 | { | |
fe62de31 | 11114 | int btf_obj_fd = 0, btf_id = 0, err; |
ff26ce5c | 11115 | |
2d5ec1c6 | 11116 | if (!prog || attach_prog_fd < 0) |
e9fc3ce9 | 11117 | return libbpf_err(-EINVAL); |
ff26ce5c | 11118 | |
fe62de31 | 11119 | if (prog->obj->loaded) |
e9fc3ce9 | 11120 | return libbpf_err(-EINVAL); |
fe62de31 | 11121 | |
2d5ec1c6 AN |
11122 | if (attach_prog_fd && !attach_func_name) { |
11123 | /* remember attach_prog_fd and let bpf_program__load() find | |
11124 | * BTF ID during the program load | |
11125 | */ | |
11126 | prog->attach_prog_fd = attach_prog_fd; | |
11127 | return 0; | |
11128 | } | |
11129 | ||
fe62de31 | 11130 | if (attach_prog_fd) { |
ff26ce5c EC |
11131 | btf_id = libbpf_find_prog_btf_id(attach_func_name, |
11132 | attach_prog_fd); | |
fe62de31 | 11133 | if (btf_id < 0) |
e9fc3ce9 | 11134 | return libbpf_err(btf_id); |
fe62de31 | 11135 | } else { |
2d5ec1c6 AN |
11136 | if (!attach_func_name) |
11137 | return libbpf_err(-EINVAL); | |
11138 | ||
fe62de31 AN |
11139 | /* load btf_vmlinux, if not yet */ |
11140 | err = bpf_object__load_vmlinux_btf(prog->obj, true); | |
11141 | if (err) | |
e9fc3ce9 | 11142 | return libbpf_err(err); |
fe62de31 AN |
11143 | err = find_kernel_btf_id(prog->obj, attach_func_name, |
11144 | prog->expected_attach_type, | |
11145 | &btf_obj_fd, &btf_id); | |
11146 | if (err) | |
e9fc3ce9 | 11147 | return libbpf_err(err); |
fe62de31 | 11148 | } |
ff26ce5c EC |
11149 | |
11150 | prog->attach_btf_id = btf_id; | |
fe62de31 | 11151 | prog->attach_btf_obj_fd = btf_obj_fd; |
ff26ce5c EC |
11152 | prog->attach_prog_fd = attach_prog_fd; |
11153 | return 0; | |
11154 | } | |
11155 | ||
6803ee25 | 11156 | int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz) |
6446b315 | 11157 | { |
6803ee25 AN |
11158 | int err = 0, n, len, start, end = -1; |
11159 | bool *tmp; | |
6446b315 | 11160 | |
6803ee25 AN |
11161 | *mask = NULL; |
11162 | *mask_sz = 0; | |
11163 | ||
11164 | /* Each sub string separated by ',' has format \d+-\d+ or \d+ */ | |
11165 | while (*s) { | |
11166 | if (*s == ',' || *s == '\n') { | |
11167 | s++; | |
11168 | continue; | |
11169 | } | |
11170 | n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len); | |
11171 | if (n <= 0 || n > 2) { | |
11172 | pr_warn("Failed to get CPU range %s: %d\n", s, n); | |
11173 | err = -EINVAL; | |
11174 | goto cleanup; | |
11175 | } else if (n == 1) { | |
11176 | end = start; | |
11177 | } | |
11178 | if (start < 0 || start > end) { | |
11179 | pr_warn("Invalid CPU range [%d,%d] in %s\n", | |
11180 | start, end, s); | |
11181 | err = -EINVAL; | |
11182 | goto cleanup; | |
11183 | } | |
11184 | tmp = realloc(*mask, end + 1); | |
11185 | if (!tmp) { | |
11186 | err = -ENOMEM; | |
11187 | goto cleanup; | |
11188 | } | |
11189 | *mask = tmp; | |
11190 | memset(tmp + *mask_sz, 0, start - *mask_sz); | |
11191 | memset(tmp + start, 1, end - start + 1); | |
11192 | *mask_sz = end + 1; | |
11193 | s += len; | |
11194 | } | |
11195 | if (!*mask_sz) { | |
11196 | pr_warn("Empty CPU range\n"); | |
11197 | return -EINVAL; | |
11198 | } | |
11199 | return 0; | |
11200 | cleanup: | |
11201 | free(*mask); | |
11202 | *mask = NULL; | |
11203 | return err; | |
11204 | } | |
11205 | ||
11206 | int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz) | |
11207 | { | |
11208 | int fd, err = 0, len; | |
11209 | char buf[128]; | |
6446b315 HL |
11210 | |
11211 | fd = open(fcpu, O_RDONLY); | |
11212 | if (fd < 0) { | |
6803ee25 AN |
11213 | err = -errno; |
11214 | pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err); | |
11215 | return err; | |
6446b315 HL |
11216 | } |
11217 | len = read(fd, buf, sizeof(buf)); | |
11218 | close(fd); | |
11219 | if (len <= 0) { | |
6803ee25 AN |
11220 | err = len ? -errno : -EINVAL; |
11221 | pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err); | |
11222 | return err; | |
6446b315 | 11223 | } |
6803ee25 AN |
11224 | if (len >= sizeof(buf)) { |
11225 | pr_warn("CPU mask is too big in file %s\n", fcpu); | |
11226 | return -E2BIG; | |
6446b315 HL |
11227 | } |
11228 | buf[len] = '\0'; | |
11229 | ||
6803ee25 AN |
11230 | return parse_cpu_mask_str(buf, mask, mask_sz); |
11231 | } | |
11232 | ||
11233 | int libbpf_num_possible_cpus(void) | |
11234 | { | |
11235 | static const char *fcpu = "/sys/devices/system/cpu/possible"; | |
11236 | static int cpus; | |
11237 | int err, n, i, tmp_cpus; | |
11238 | bool *mask; | |
11239 | ||
11240 | tmp_cpus = READ_ONCE(cpus); | |
11241 | if (tmp_cpus > 0) | |
11242 | return tmp_cpus; | |
11243 | ||
11244 | err = parse_cpu_mask_file(fcpu, &mask, &n); | |
11245 | if (err) | |
e9fc3ce9 | 11246 | return libbpf_err(err); |
6803ee25 AN |
11247 | |
11248 | tmp_cpus = 0; | |
11249 | for (i = 0; i < n; i++) { | |
11250 | if (mask[i]) | |
11251 | tmp_cpus++; | |
6446b315 | 11252 | } |
6803ee25 | 11253 | free(mask); |
56fbc241 TC |
11254 | |
11255 | WRITE_ONCE(cpus, tmp_cpus); | |
11256 | return tmp_cpus; | |
6446b315 | 11257 | } |
d66562fb AN |
11258 | |
11259 | int bpf_object__open_skeleton(struct bpf_object_skeleton *s, | |
11260 | const struct bpf_object_open_opts *opts) | |
11261 | { | |
11262 | DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts, | |
11263 | .object_name = s->name, | |
11264 | ); | |
11265 | struct bpf_object *obj; | |
e9fc3ce9 | 11266 | int i, err; |
d66562fb AN |
11267 | |
11268 | /* Attempt to preserve opts->object_name, unless overriden by user | |
11269 | * explicitly. Overwriting object name for skeletons is discouraged, | |
11270 | * as it breaks global data maps, because they contain object name | |
11271 | * prefix as their own map name prefix. When skeleton is generated, | |
11272 | * bpftool is making an assumption that this name will stay the same. | |
11273 | */ | |
11274 | if (opts) { | |
11275 | memcpy(&skel_opts, opts, sizeof(*opts)); | |
11276 | if (!opts->object_name) | |
11277 | skel_opts.object_name = s->name; | |
11278 | } | |
11279 | ||
11280 | obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts); | |
e9fc3ce9 AN |
11281 | err = libbpf_get_error(obj); |
11282 | if (err) { | |
11283 | pr_warn("failed to initialize skeleton BPF object '%s': %d\n", | |
11284 | s->name, err); | |
11285 | return libbpf_err(err); | |
d66562fb AN |
11286 | } |
11287 | ||
11288 | *s->obj = obj; | |
11289 | ||
11290 | for (i = 0; i < s->map_cnt; i++) { | |
11291 | struct bpf_map **map = s->maps[i].map; | |
11292 | const char *name = s->maps[i].name; | |
11293 | void **mmaped = s->maps[i].mmaped; | |
11294 | ||
11295 | *map = bpf_object__find_map_by_name(obj, name); | |
11296 | if (!*map) { | |
11297 | pr_warn("failed to find skeleton map '%s'\n", name); | |
e9fc3ce9 | 11298 | return libbpf_err(-ESRCH); |
d66562fb AN |
11299 | } |
11300 | ||
2ad97d47 | 11301 | /* externs shouldn't be pre-setup from user code */ |
81bfdd08 | 11302 | if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG) |
d66562fb AN |
11303 | *mmaped = (*map)->mmaped; |
11304 | } | |
11305 | ||
11306 | for (i = 0; i < s->prog_cnt; i++) { | |
11307 | struct bpf_program **prog = s->progs[i].prog; | |
11308 | const char *name = s->progs[i].name; | |
11309 | ||
11310 | *prog = bpf_object__find_program_by_name(obj, name); | |
11311 | if (!*prog) { | |
11312 | pr_warn("failed to find skeleton program '%s'\n", name); | |
e9fc3ce9 | 11313 | return libbpf_err(-ESRCH); |
d66562fb AN |
11314 | } |
11315 | } | |
11316 | ||
11317 | return 0; | |
11318 | } | |
11319 | ||
11320 | int bpf_object__load_skeleton(struct bpf_object_skeleton *s) | |
11321 | { | |
11322 | int i, err; | |
11323 | ||
11324 | err = bpf_object__load(*s->obj); | |
11325 | if (err) { | |
11326 | pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err); | |
e9fc3ce9 | 11327 | return libbpf_err(err); |
d66562fb AN |
11328 | } |
11329 | ||
11330 | for (i = 0; i < s->map_cnt; i++) { | |
11331 | struct bpf_map *map = *s->maps[i].map; | |
11332 | size_t mmap_sz = bpf_map_mmap_sz(map); | |
11333 | int prot, map_fd = bpf_map__fd(map); | |
11334 | void **mmaped = s->maps[i].mmaped; | |
d66562fb AN |
11335 | |
11336 | if (!mmaped) | |
11337 | continue; | |
11338 | ||
11339 | if (!(map->def.map_flags & BPF_F_MMAPABLE)) { | |
11340 | *mmaped = NULL; | |
11341 | continue; | |
11342 | } | |
11343 | ||
11344 | if (map->def.map_flags & BPF_F_RDONLY_PROG) | |
11345 | prot = PROT_READ; | |
11346 | else | |
11347 | prot = PROT_READ | PROT_WRITE; | |
11348 | ||
11349 | /* Remap anonymous mmap()-ed "map initialization image" as | |
11350 | * a BPF map-backed mmap()-ed memory, but preserving the same | |
11351 | * memory address. This will cause kernel to change process' | |
11352 | * page table to point to a different piece of kernel memory, | |
11353 | * but from userspace point of view memory address (and its | |
11354 | * contents, being identical at this point) will stay the | |
11355 | * same. This mapping will be released by bpf_object__close() | |
11356 | * as per normal clean up procedure, so we don't need to worry | |
11357 | * about it from skeleton's clean up perspective. | |
11358 | */ | |
2ad97d47 AN |
11359 | *mmaped = mmap(map->mmaped, mmap_sz, prot, |
11360 | MAP_SHARED | MAP_FIXED, map_fd, 0); | |
11361 | if (*mmaped == MAP_FAILED) { | |
d66562fb AN |
11362 | err = -errno; |
11363 | *mmaped = NULL; | |
11364 | pr_warn("failed to re-mmap() map '%s': %d\n", | |
11365 | bpf_map__name(map), err); | |
e9fc3ce9 | 11366 | return libbpf_err(err); |
d66562fb AN |
11367 | } |
11368 | } | |
11369 | ||
11370 | return 0; | |
11371 | } | |
11372 | ||
11373 | int bpf_object__attach_skeleton(struct bpf_object_skeleton *s) | |
11374 | { | |
e9fc3ce9 | 11375 | int i, err; |
d66562fb AN |
11376 | |
11377 | for (i = 0; i < s->prog_cnt; i++) { | |
11378 | struct bpf_program *prog = *s->progs[i].prog; | |
11379 | struct bpf_link **link = s->progs[i].link; | |
d66562fb | 11380 | |
d9297581 AN |
11381 | if (!prog->load) |
11382 | continue; | |
11383 | ||
5532dfd4 AN |
11384 | /* auto-attaching not supported for this program */ |
11385 | if (!prog->sec_def || !prog->sec_def->attach_fn) | |
d66562fb AN |
11386 | continue; |
11387 | ||
942025c9 | 11388 | *link = bpf_program__attach(prog); |
e9fc3ce9 AN |
11389 | err = libbpf_get_error(*link); |
11390 | if (err) { | |
11391 | pr_warn("failed to auto-attach program '%s': %d\n", | |
11392 | bpf_program__name(prog), err); | |
11393 | return libbpf_err(err); | |
d66562fb AN |
11394 | } |
11395 | } | |
11396 | ||
11397 | return 0; | |
11398 | } | |
11399 | ||
11400 | void bpf_object__detach_skeleton(struct bpf_object_skeleton *s) | |
11401 | { | |
11402 | int i; | |
11403 | ||
11404 | for (i = 0; i < s->prog_cnt; i++) { | |
11405 | struct bpf_link **link = s->progs[i].link; | |
11406 | ||
50450fc7 | 11407 | bpf_link__destroy(*link); |
d66562fb AN |
11408 | *link = NULL; |
11409 | } | |
11410 | } | |
11411 | ||
11412 | void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s) | |
11413 | { | |
11414 | if (s->progs) | |
11415 | bpf_object__detach_skeleton(s); | |
11416 | if (s->obj) | |
11417 | bpf_object__close(*s->obj); | |
11418 | free(s->maps); | |
11419 | free(s->progs); | |
11420 | free(s); | |
11421 | } |