libbpf: Add generic BTF type shallow copy API
[linux-2.6-block.git] / tools / lib / bpf / libbpf.c
CommitLineData
1bc38b8f 1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6061a3d6 2
1b76c13e
WN
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
f367540c 9 * Copyright (C) 2017 Nicira, Inc.
d859900c 10 * Copyright (C) 2019 Isovalent, Inc.
1b76c13e
WN
11 */
12
b4269954 13#ifndef _GNU_SOURCE
531b014e 14#define _GNU_SOURCE
b4269954 15#endif
1b76c13e 16#include <stdlib.h>
b3f59d66
WN
17#include <stdio.h>
18#include <stdarg.h>
f367540c 19#include <libgen.h>
34090915 20#include <inttypes.h>
8ab9da57 21#include <limits.h>
b3f59d66 22#include <string.h>
1b76c13e 23#include <unistd.h>
cdb2f920 24#include <endian.h>
1a5e3fb1
WN
25#include <fcntl.h>
26#include <errno.h>
113e6b7e 27#include <ctype.h>
1b76c13e 28#include <asm/unistd.h>
e28ff1a8 29#include <linux/err.h>
cb1e5e96 30#include <linux/kernel.h>
1b76c13e 31#include <linux/bpf.h>
38d5d3b3 32#include <linux/btf.h>
47eff617 33#include <linux/filter.h>
9a208eff 34#include <linux/list.h>
f367540c 35#include <linux/limits.h>
438363c0 36#include <linux/perf_event.h>
a64af0ef 37#include <linux/ring_buffer.h>
5e61f270 38#include <linux/version.h>
fb84b822 39#include <sys/epoll.h>
63f2f5ee 40#include <sys/ioctl.h>
fb84b822 41#include <sys/mman.h>
f367540c
JS
42#include <sys/stat.h>
43#include <sys/types.h>
44#include <sys/vfs.h>
ddc7c304 45#include <sys/utsname.h>
dc3a2d25 46#include <sys/resource.h>
1a5e3fb1
WN
47#include <libelf.h>
48#include <gelf.h>
166750bc 49#include <zlib.h>
1b76c13e
WN
50
51#include "libbpf.h"
52d3352e 52#include "bpf.h"
8a138aed 53#include "btf.h"
6d41907c 54#include "str_error.h"
d7c4b398 55#include "libbpf_internal.h"
ddc7c304 56#include "hashmap.h"
b3f59d66 57
9b16137a
WN
58#ifndef EM_BPF
59#define EM_BPF 247
60#endif
61
f367540c
JS
62#ifndef BPF_FS_MAGIC
63#define BPF_FS_MAGIC 0xcafe4a11
64#endif
65
9c0f8cbd
AN
66#define BPF_INSN_SZ (sizeof(struct bpf_insn))
67
ff466b58
AI
68/* vsprintf() in __base_pr() uses nonliteral format string. It may break
69 * compilation if user enables corresponding warning. Disable it explicitly.
70 */
71#pragma GCC diagnostic ignored "-Wformat-nonliteral"
72
b3f59d66
WN
73#define __printf(a, b) __attribute__((format(printf, a, b)))
74
590a0088 75static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
590a0088
MKL
76static const struct btf_type *
77skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id);
78
a8a1f7d0
SF
79static int __base_pr(enum libbpf_print_level level, const char *format,
80 va_list args)
b3f59d66 81{
6f1ae8b6
YS
82 if (level == LIBBPF_DEBUG)
83 return 0;
84
a8a1f7d0 85 return vfprintf(stderr, format, args);
b3f59d66
WN
86}
87
a8a1f7d0 88static libbpf_print_fn_t __libbpf_pr = __base_pr;
b3f59d66 89
e87fd8ba 90libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
b3f59d66 91{
e87fd8ba
AN
92 libbpf_print_fn_t old_print_fn = __libbpf_pr;
93
6f1ae8b6 94 __libbpf_pr = fn;
e87fd8ba 95 return old_print_fn;
b3f59d66 96}
1a5e3fb1 97
8461ef8b
YS
98__printf(2, 3)
99void libbpf_print(enum libbpf_print_level level, const char *format, ...)
100{
101 va_list args;
102
6f1ae8b6
YS
103 if (!__libbpf_pr)
104 return;
105
8461ef8b 106 va_start(args, format);
6f1ae8b6 107 __libbpf_pr(level, format, args);
8461ef8b
YS
108 va_end(args);
109}
110
dc3a2d25
THJ
111static void pr_perm_msg(int err)
112{
113 struct rlimit limit;
114 char buf[100];
115
116 if (err != -EPERM || geteuid() != 0)
117 return;
118
119 err = getrlimit(RLIMIT_MEMLOCK, &limit);
120 if (err)
121 return;
122
123 if (limit.rlim_cur == RLIM_INFINITY)
124 return;
125
126 if (limit.rlim_cur < 1024)
b5c7d0d0 127 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
dc3a2d25
THJ
128 else if (limit.rlim_cur < 1024*1024)
129 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
130 else
131 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
132
133 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
134 buf);
135}
136
6371ca3b
WN
137#define STRERR_BUFSIZE 128
138
1a5e3fb1
WN
139/* Copied from tools/perf/util/util.h */
140#ifndef zfree
141# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
142#endif
143
144#ifndef zclose
145# define zclose(fd) ({ \
146 int ___err = 0; \
147 if ((fd) >= 0) \
148 ___err = close((fd)); \
149 fd = -1; \
150 ___err; })
151#endif
152
34be1646
SL
153static inline __u64 ptr_to_u64(const void *ptr)
154{
155 return (__u64) (unsigned long) ptr;
156}
157
47b6cb4d 158enum kern_feature_id {
47eff617 159 /* v4.14: kernel support for program & map names. */
47b6cb4d 160 FEAT_PROG_NAME,
8837fe5d 161 /* v5.2: kernel support for global data sections. */
47b6cb4d 162 FEAT_GLOBAL_DATA,
68b08647
AN
163 /* BTF support */
164 FEAT_BTF,
d7c4b398 165 /* BTF_KIND_FUNC and BTF_KIND_FUNC_PROTO support */
47b6cb4d 166 FEAT_BTF_FUNC,
d7c4b398 167 /* BTF_KIND_VAR and BTF_KIND_DATASEC support */
47b6cb4d 168 FEAT_BTF_DATASEC,
2d3eb67f 169 /* BTF_FUNC_GLOBAL is supported */
47b6cb4d
AN
170 FEAT_BTF_GLOBAL_FUNC,
171 /* BPF_F_MMAPABLE is supported for arrays */
172 FEAT_ARRAY_MMAP,
25498a19 173 /* kernel support for expected_attach_type in BPF_PROG_LOAD */
47b6cb4d 174 FEAT_EXP_ATTACH_TYPE,
109cea5a
AN
175 /* bpf_probe_read_{kernel,user}[_str] helpers */
176 FEAT_PROBE_READ_KERN,
5d23328d
YZ
177 /* BPF_PROG_BIND_MAP is supported */
178 FEAT_PROG_BIND_MAP,
4f33a53d
AN
179 /* Kernel support for module BTFs */
180 FEAT_MODULE_BTF,
22541a9e
IL
181 /* BTF_KIND_FLOAT support */
182 FEAT_BTF_FLOAT,
47b6cb4d 183 __FEAT_CNT,
47eff617
SF
184};
185
47b6cb4d
AN
186static bool kernel_supports(enum kern_feature_id feat_id);
187
166750bc
AN
188enum reloc_type {
189 RELO_LD64,
190 RELO_CALL,
191 RELO_DATA,
192 RELO_EXTERN,
53eddb5e 193 RELO_SUBPROG_ADDR,
166750bc
AN
194};
195
196struct reloc_desc {
197 enum reloc_type type;
198 int insn_idx;
199 int map_idx;
200 int sym_off;
c3c55696 201 bool processed;
166750bc
AN
202};
203
25498a19
AN
204struct bpf_sec_def;
205
206typedef struct bpf_link *(*attach_fn_t)(const struct bpf_sec_def *sec,
207 struct bpf_program *prog);
208
209struct bpf_sec_def {
210 const char *sec;
211 size_t len;
212 enum bpf_prog_type prog_type;
213 enum bpf_attach_type expected_attach_type;
214 bool is_exp_attach_type_optional;
215 bool is_attachable;
216 bool is_attach_btf;
2b288740 217 bool is_sleepable;
25498a19
AN
218 attach_fn_t attach_fn;
219};
220
a5b8bd47
WN
221/*
222 * bpf_prog should be a better name but it has been used in
223 * linux/filter.h.
224 */
225struct bpf_program {
25498a19 226 const struct bpf_sec_def *sec_def;
52109584 227 char *sec_name;
c1122392
AN
228 size_t sec_idx;
229 /* this program's instruction offset (in number of instructions)
230 * within its containing ELF section
231 */
232 size_t sec_insn_off;
233 /* number of original instructions in ELF section belonging to this
234 * program, not taking into account subprogram instructions possible
235 * appended later during relocation
236 */
237 size_t sec_insn_cnt;
238 /* Offset (in number of instructions) of the start of instruction
239 * belonging to this BPF program within its containing main BPF
240 * program. For the entry-point (main) BPF program, this is always
241 * zero. For a sub-program, this gets reset before each of main BPF
242 * programs are processed and relocated and is used to determined
243 * whether sub-program was already appended to the main program, and
244 * if yes, at which instruction offset.
245 */
246 size_t sub_insn_off;
247
248 char *name;
52109584 249 /* sec_name with / replaced by _; makes recursive pinning
33a2c75c
SF
250 * in bpf_object__pin_programs easier
251 */
252 char *pin_name;
c1122392
AN
253
254 /* instructions that belong to BPF program; insns[0] is located at
255 * sec_insn_off instruction within its ELF section in ELF file, so
256 * when mapping ELF file instruction index to the local instruction,
257 * one needs to subtract sec_insn_off; and vice versa.
258 */
a5b8bd47 259 struct bpf_insn *insns;
c1122392
AN
260 /* actual number of instruction in this BPF program's image; for
261 * entry-point BPF programs this includes the size of main program
262 * itself plus all the used sub-programs, appended at the end
263 */
c3c55696 264 size_t insns_cnt;
34090915 265
166750bc 266 struct reloc_desc *reloc_desc;
34090915 267 int nr_reloc;
da11b417 268 int log_level;
55cffde2 269
b580563e
WN
270 struct {
271 int nr;
272 int *fds;
273 } instances;
274 bpf_program_prep_t preprocessor;
aa9b1ac3
WN
275
276 struct bpf_object *obj;
277 void *priv;
278 bpf_program_clear_priv_t clear_priv;
d7be143b 279
c1122392
AN
280 bool load;
281 enum bpf_prog_type type;
d7be143b 282 enum bpf_attach_type expected_attach_type;
c1122392 283 int prog_ifindex;
91abb4a6 284 __u32 attach_btf_obj_fd;
12a8654b 285 __u32 attach_btf_id;
e7bf94db 286 __u32 attach_prog_fd;
2993e051
YS
287 void *func_info;
288 __u32 func_info_rec_size;
f0187f0b 289 __u32 func_info_cnt;
47eff617 290
3d650141
MKL
291 void *line_info;
292 __u32 line_info_rec_size;
293 __u32 line_info_cnt;
04656198 294 __u32 prog_flags;
a5b8bd47
WN
295};
296
590a0088
MKL
297struct bpf_struct_ops {
298 const char *tname;
299 const struct btf_type *type;
300 struct bpf_program **progs;
301 __u32 *kern_func_off;
302 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
303 void *data;
304 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
305 * btf_vmlinux's format.
306 * struct bpf_struct_ops_tcp_congestion_ops {
307 * [... some other kernel fields ...]
308 * struct tcp_congestion_ops data;
309 * }
310 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
311 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
312 * from "data".
313 */
314 void *kern_vdata;
315 __u32 type_id;
316};
317
ac9d1389
AN
318#define DATA_SEC ".data"
319#define BSS_SEC ".bss"
320#define RODATA_SEC ".rodata"
81bfdd08 321#define KCONFIG_SEC ".kconfig"
1c0c7074 322#define KSYMS_SEC ".ksyms"
590a0088 323#define STRUCT_OPS_SEC ".struct_ops"
ac9d1389 324
d859900c
DB
325enum libbpf_map_type {
326 LIBBPF_MAP_UNSPEC,
327 LIBBPF_MAP_DATA,
328 LIBBPF_MAP_BSS,
329 LIBBPF_MAP_RODATA,
81bfdd08 330 LIBBPF_MAP_KCONFIG,
d859900c
DB
331};
332
333static const char * const libbpf_type_to_btf_name[] = {
ac9d1389
AN
334 [LIBBPF_MAP_DATA] = DATA_SEC,
335 [LIBBPF_MAP_BSS] = BSS_SEC,
336 [LIBBPF_MAP_RODATA] = RODATA_SEC,
81bfdd08 337 [LIBBPF_MAP_KCONFIG] = KCONFIG_SEC,
d859900c
DB
338};
339
9d759a9b 340struct bpf_map {
561bbcca 341 char *name;
01af3bf0 342 int fd;
db48814b
AN
343 int sec_idx;
344 size_t sec_offset;
f0307a7e 345 int map_ifindex;
addb9fc9 346 int inner_map_fd;
9d759a9b 347 struct bpf_map_def def;
1bdb6c9a 348 __u32 numa_node;
646f02ff 349 __u32 btf_var_idx;
5b891af7
MKL
350 __u32 btf_key_type_id;
351 __u32 btf_value_type_id;
590a0088 352 __u32 btf_vmlinux_value_type_id;
9d759a9b
WN
353 void *priv;
354 bpf_map_clear_priv_t clear_priv;
d859900c 355 enum libbpf_map_type libbpf_type;
eba9c5f4 356 void *mmaped;
590a0088 357 struct bpf_struct_ops *st_ops;
646f02ff
AN
358 struct bpf_map *inner_map;
359 void **init_slots;
360 int init_slots_sz;
4580b25f
THJ
361 char *pin_path;
362 bool pinned;
ec6d5f47 363 bool reused;
d859900c
DB
364};
365
166750bc
AN
366enum extern_type {
367 EXT_UNKNOWN,
2e33efe3 368 EXT_KCFG,
1c0c7074 369 EXT_KSYM,
2e33efe3
AN
370};
371
372enum kcfg_type {
373 KCFG_UNKNOWN,
374 KCFG_CHAR,
375 KCFG_BOOL,
376 KCFG_INT,
377 KCFG_TRISTATE,
378 KCFG_CHAR_ARR,
166750bc
AN
379};
380
381struct extern_desc {
2e33efe3 382 enum extern_type type;
166750bc
AN
383 int sym_idx;
384 int btf_id;
2e33efe3
AN
385 int sec_btf_id;
386 const char *name;
166750bc 387 bool is_set;
2e33efe3
AN
388 bool is_weak;
389 union {
390 struct {
391 enum kcfg_type type;
392 int sz;
393 int align;
394 int data_off;
395 bool is_signed;
396 } kcfg;
1c0c7074
AN
397 struct {
398 unsigned long long addr;
d370bbe1
HL
399
400 /* target btf_id of the corresponding kernel var. */
284d2587
AN
401 int kernel_btf_obj_fd;
402 int kernel_btf_id;
d370bbe1
HL
403
404 /* local btf_id of the ksym extern's type. */
405 __u32 type_id;
1c0c7074 406 } ksym;
2e33efe3 407 };
166750bc
AN
408};
409
9a208eff
WN
410static LIST_HEAD(bpf_objects_list);
411
4f33a53d
AN
412struct module_btf {
413 struct btf *btf;
414 char *name;
415 __u32 id;
91abb4a6 416 int fd;
4f33a53d
AN
417};
418
1a5e3fb1 419struct bpf_object {
d859900c 420 char name[BPF_OBJ_NAME_LEN];
cb1e5e96 421 char license[64];
438363c0 422 __u32 kern_version;
0b3d1efa 423
a5b8bd47
WN
424 struct bpf_program *programs;
425 size_t nr_programs;
9d759a9b
WN
426 struct bpf_map *maps;
427 size_t nr_maps;
bf829271 428 size_t maps_cap;
9d759a9b 429
8601fd42 430 char *kconfig;
166750bc
AN
431 struct extern_desc *externs;
432 int nr_extern;
81bfdd08 433 int kconfig_map_idx;
5d23328d 434 int rodata_map_idx;
166750bc 435
52d3352e 436 bool loaded;
c3c55696 437 bool has_subcalls;
a5b8bd47 438
1a5e3fb1
WN
439 /*
440 * Information when doing elf related work. Only valid if fd
441 * is valid.
442 */
443 struct {
444 int fd;
5e61f270 445 const void *obj_buf;
6c956392 446 size_t obj_buf_sz;
1a5e3fb1
WN
447 Elf *elf;
448 GElf_Ehdr ehdr;
bec7d68c 449 Elf_Data *symbols;
d859900c
DB
450 Elf_Data *data;
451 Elf_Data *rodata;
452 Elf_Data *bss;
590a0088 453 Elf_Data *st_ops_data;
88a82120 454 size_t shstrndx; /* section index for section name strings */
77ba9a5b 455 size_t strtabidx;
b62f06e8
WN
456 struct {
457 GElf_Shdr shdr;
458 Elf_Data *data;
1f8e2bcb
AN
459 } *reloc_sects;
460 int nr_reloc_sects;
666810e8 461 int maps_shndx;
abd29c93 462 int btf_maps_shndx;
646f02ff 463 __u32 btf_maps_sec_btf_id;
48cca7e4 464 int text_shndx;
166750bc 465 int symbols_shndx;
d859900c
DB
466 int data_shndx;
467 int rodata_shndx;
468 int bss_shndx;
590a0088 469 int st_ops_shndx;
1a5e3fb1 470 } efile;
9a208eff
WN
471 /*
472 * All loaded bpf_object is linked in a list, which is
473 * hidden to caller. bpf_objects__<func> handlers deal with
474 * all objects.
475 */
476 struct list_head list;
10931d24 477
8a138aed 478 struct btf *btf;
0f7515ca
AN
479 struct btf_ext *btf_ext;
480
a6ed02ca
KS
481 /* Parse and load BTF vmlinux if any of the programs in the object need
482 * it at load time.
483 */
484 struct btf *btf_vmlinux;
0f7515ca
AN
485 /* vmlinux BTF override for CO-RE relocations */
486 struct btf *btf_vmlinux_override;
4f33a53d
AN
487 /* Lazily initialized kernel module BTFs */
488 struct module_btf *btf_modules;
489 bool btf_modules_loaded;
490 size_t btf_module_cnt;
491 size_t btf_module_cap;
8a138aed 492
10931d24
WN
493 void *priv;
494 bpf_object_clear_priv_t clear_priv;
495
1a5e3fb1
WN
496 char path[];
497};
498#define obj_elf_valid(o) ((o)->efile.elf)
499
88a82120
AN
500static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
501static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
502static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
503static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
504static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr);
505static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
506static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
c1122392
AN
507static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
508 size_t off, __u32 sym_type, GElf_Sym *sym);
88a82120 509
29cd77f4 510void bpf_program__unload(struct bpf_program *prog)
55cffde2 511{
b580563e
WN
512 int i;
513
55cffde2
WN
514 if (!prog)
515 return;
516
b580563e
WN
517 /*
518 * If the object is opened but the program was never loaded,
519 * it is possible that prog->instances.nr == -1.
520 */
521 if (prog->instances.nr > 0) {
522 for (i = 0; i < prog->instances.nr; i++)
523 zclose(prog->instances.fds[i]);
524 } else if (prog->instances.nr != -1) {
be18010e
KW
525 pr_warn("Internal error: instances.nr is %d\n",
526 prog->instances.nr);
b580563e
WN
527 }
528
529 prog->instances.nr = -1;
530 zfree(&prog->instances.fds);
2993e051 531
2993e051 532 zfree(&prog->func_info);
07a09d1b 533 zfree(&prog->line_info);
55cffde2
WN
534}
535
a5b8bd47
WN
536static void bpf_program__exit(struct bpf_program *prog)
537{
538 if (!prog)
539 return;
540
aa9b1ac3
WN
541 if (prog->clear_priv)
542 prog->clear_priv(prog, prog->priv);
543
544 prog->priv = NULL;
545 prog->clear_priv = NULL;
546
55cffde2 547 bpf_program__unload(prog);
88cda1c9 548 zfree(&prog->name);
52109584 549 zfree(&prog->sec_name);
33a2c75c 550 zfree(&prog->pin_name);
a5b8bd47 551 zfree(&prog->insns);
34090915
WN
552 zfree(&prog->reloc_desc);
553
554 prog->nr_reloc = 0;
a5b8bd47 555 prog->insns_cnt = 0;
c1122392 556 prog->sec_idx = -1;
a5b8bd47
WN
557}
558
33a2c75c
SF
559static char *__bpf_program__pin_name(struct bpf_program *prog)
560{
561 char *name, *p;
562
52109584 563 name = p = strdup(prog->sec_name);
33a2c75c
SF
564 while ((p = strchr(p, '/')))
565 *p = '_';
566
567 return name;
568}
569
c3c55696
AN
570static bool insn_is_subprog_call(const struct bpf_insn *insn)
571{
572 return BPF_CLASS(insn->code) == BPF_JMP &&
573 BPF_OP(insn->code) == BPF_CALL &&
574 BPF_SRC(insn->code) == BPF_K &&
575 insn->src_reg == BPF_PSEUDO_CALL &&
576 insn->dst_reg == 0 &&
577 insn->off == 0;
578}
579
b8f871fa
YS
580static bool is_ldimm64(struct bpf_insn *insn)
581{
582 return insn->code == (BPF_LD | BPF_IMM | BPF_DW);
583}
584
53eddb5e
YS
585static bool insn_is_pseudo_func(struct bpf_insn *insn)
586{
587 return is_ldimm64(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
588}
589
a5b8bd47 590static int
c3c55696
AN
591bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
592 const char *name, size_t sec_idx, const char *sec_name,
593 size_t sec_off, void *insn_data, size_t insn_data_sz)
a5b8bd47 594{
c1122392
AN
595 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
596 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
597 sec_name, name, sec_off, insn_data_sz);
a5b8bd47
WN
598 return -EINVAL;
599 }
600
1ad9cbb8 601 memset(prog, 0, sizeof(*prog));
c3c55696
AN
602 prog->obj = obj;
603
c1122392
AN
604 prog->sec_idx = sec_idx;
605 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
606 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
607 /* insns_cnt can later be increased by appending used subprograms */
608 prog->insns_cnt = prog->sec_insn_cnt;
a5b8bd47 609
c1122392
AN
610 prog->type = BPF_PROG_TYPE_UNSPEC;
611 prog->load = true;
a5b8bd47 612
c1122392
AN
613 prog->instances.fds = NULL;
614 prog->instances.nr = -1;
615
52109584
AN
616 prog->sec_name = strdup(sec_name);
617 if (!prog->sec_name)
c1122392
AN
618 goto errout;
619
620 prog->name = strdup(name);
621 if (!prog->name)
a5b8bd47 622 goto errout;
a5b8bd47 623
33a2c75c 624 prog->pin_name = __bpf_program__pin_name(prog);
c1122392 625 if (!prog->pin_name)
33a2c75c 626 goto errout;
33a2c75c 627
c1122392
AN
628 prog->insns = malloc(insn_data_sz);
629 if (!prog->insns)
a5b8bd47 630 goto errout;
c1122392 631 memcpy(prog->insns, insn_data, insn_data_sz);
a5b8bd47
WN
632
633 return 0;
634errout:
c1122392 635 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
a5b8bd47
WN
636 bpf_program__exit(prog);
637 return -ENOMEM;
638}
639
640static int
c1122392
AN
641bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
642 const char *sec_name, int sec_idx)
a5b8bd47 643{
c1122392
AN
644 struct bpf_program *prog, *progs;
645 void *data = sec_data->d_buf;
646 size_t sec_sz = sec_data->d_size, sec_off, prog_sz;
a5b8bd47 647 int nr_progs, err;
c1122392
AN
648 const char *name;
649 GElf_Sym sym;
a5b8bd47
WN
650
651 progs = obj->programs;
652 nr_progs = obj->nr_programs;
c1122392 653 sec_off = 0;
a5b8bd47 654
c1122392
AN
655 while (sec_off < sec_sz) {
656 if (elf_sym_by_sec_off(obj, sec_idx, sec_off, STT_FUNC, &sym)) {
657 pr_warn("sec '%s': failed to find program symbol at offset %zu\n",
658 sec_name, sec_off);
659 return -LIBBPF_ERRNO__FORMAT;
660 }
88cda1c9 661
c1122392 662 prog_sz = sym.st_size;
88cda1c9 663
c1122392
AN
664 name = elf_sym_str(obj, sym.st_name);
665 if (!name) {
666 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
667 sec_name, sec_off);
668 return -LIBBPF_ERRNO__FORMAT;
669 }
88cda1c9 670
c1122392
AN
671 if (sec_off + prog_sz > sec_sz) {
672 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
673 sec_name, sec_off);
674 return -LIBBPF_ERRNO__FORMAT;
675 }
88cda1c9 676
c3c55696
AN
677 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
678 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
88cda1c9 679
c3c55696 680 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
c1122392
AN
681 if (!progs) {
682 /*
683 * In this case the original obj->programs
684 * is still valid, so don't need special treat for
685 * bpf_close_object().
686 */
687 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
688 sec_name, name);
689 return -ENOMEM;
88cda1c9 690 }
c1122392 691 obj->programs = progs;
88cda1c9 692
c1122392 693 prog = &progs[nr_progs];
9a94f277 694
c3c55696
AN
695 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
696 sec_off, data + sec_off, prog_sz);
c1122392
AN
697 if (err)
698 return err;
9a94f277 699
c1122392
AN
700 nr_progs++;
701 obj->nr_programs = nr_progs;
702
703 sec_off += prog_sz;
88cda1c9
MKL
704 }
705
706 return 0;
707}
708
5e61f270
AN
709static __u32 get_kernel_version(void)
710{
711 __u32 major, minor, patch;
712 struct utsname info;
713
714 uname(&info);
715 if (sscanf(info.release, "%u.%u.%u", &major, &minor, &patch) != 3)
716 return 0;
717 return KERNEL_VERSION(major, minor, patch);
718}
719
590a0088
MKL
720static const struct btf_member *
721find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
722{
723 struct btf_member *m;
724 int i;
725
726 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
727 if (btf_member_bit_offset(t, i) == bit_offset)
728 return m;
729 }
730
731 return NULL;
732}
733
734static const struct btf_member *
735find_member_by_name(const struct btf *btf, const struct btf_type *t,
736 const char *name)
737{
738 struct btf_member *m;
739 int i;
740
741 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
742 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
743 return m;
744 }
745
746 return NULL;
747}
748
749#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
a6ed02ca
KS
750static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
751 const char *name, __u32 kind);
590a0088
MKL
752
753static int
754find_struct_ops_kern_types(const struct btf *btf, const char *tname,
755 const struct btf_type **type, __u32 *type_id,
756 const struct btf_type **vtype, __u32 *vtype_id,
757 const struct btf_member **data_member)
758{
759 const struct btf_type *kern_type, *kern_vtype;
760 const struct btf_member *kern_data_member;
761 __s32 kern_vtype_id, kern_type_id;
590a0088
MKL
762 __u32 i;
763
764 kern_type_id = btf__find_by_name_kind(btf, tname, BTF_KIND_STRUCT);
765 if (kern_type_id < 0) {
766 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
767 tname);
768 return kern_type_id;
769 }
770 kern_type = btf__type_by_id(btf, kern_type_id);
771
772 /* Find the corresponding "map_value" type that will be used
773 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
774 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
775 * btf_vmlinux.
776 */
a6ed02ca
KS
777 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
778 tname, BTF_KIND_STRUCT);
590a0088 779 if (kern_vtype_id < 0) {
a6ed02ca
KS
780 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
781 STRUCT_OPS_VALUE_PREFIX, tname);
590a0088
MKL
782 return kern_vtype_id;
783 }
784 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
785
786 /* Find "struct tcp_congestion_ops" from
787 * struct bpf_struct_ops_tcp_congestion_ops {
788 * [ ... ]
789 * struct tcp_congestion_ops data;
790 * }
791 */
792 kern_data_member = btf_members(kern_vtype);
793 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
794 if (kern_data_member->type == kern_type_id)
795 break;
796 }
797 if (i == btf_vlen(kern_vtype)) {
a6ed02ca
KS
798 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
799 tname, STRUCT_OPS_VALUE_PREFIX, tname);
590a0088
MKL
800 return -EINVAL;
801 }
802
803 *type = kern_type;
804 *type_id = kern_type_id;
805 *vtype = kern_vtype;
806 *vtype_id = kern_vtype_id;
807 *data_member = kern_data_member;
808
809 return 0;
810}
811
812static bool bpf_map__is_struct_ops(const struct bpf_map *map)
813{
814 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
815}
816
817/* Init the map's fields that depend on kern_btf */
818static int bpf_map__init_kern_struct_ops(struct bpf_map *map,
819 const struct btf *btf,
820 const struct btf *kern_btf)
821{
822 const struct btf_member *member, *kern_member, *kern_data_member;
823 const struct btf_type *type, *kern_type, *kern_vtype;
824 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
825 struct bpf_struct_ops *st_ops;
826 void *data, *kern_data;
827 const char *tname;
828 int err;
829
830 st_ops = map->st_ops;
831 type = st_ops->type;
832 tname = st_ops->tname;
833 err = find_struct_ops_kern_types(kern_btf, tname,
834 &kern_type, &kern_type_id,
835 &kern_vtype, &kern_vtype_id,
836 &kern_data_member);
837 if (err)
838 return err;
839
840 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
841 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
842
843 map->def.value_size = kern_vtype->size;
844 map->btf_vmlinux_value_type_id = kern_vtype_id;
845
846 st_ops->kern_vdata = calloc(1, kern_vtype->size);
847 if (!st_ops->kern_vdata)
848 return -ENOMEM;
849
850 data = st_ops->data;
851 kern_data_off = kern_data_member->offset / 8;
852 kern_data = st_ops->kern_vdata + kern_data_off;
853
854 member = btf_members(type);
855 for (i = 0; i < btf_vlen(type); i++, member++) {
856 const struct btf_type *mtype, *kern_mtype;
857 __u32 mtype_id, kern_mtype_id;
858 void *mdata, *kern_mdata;
859 __s64 msize, kern_msize;
860 __u32 moff, kern_moff;
861 __u32 kern_member_idx;
862 const char *mname;
863
864 mname = btf__name_by_offset(btf, member->name_off);
865 kern_member = find_member_by_name(kern_btf, kern_type, mname);
866 if (!kern_member) {
867 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
868 map->name, mname);
869 return -ENOTSUP;
870 }
871
872 kern_member_idx = kern_member - btf_members(kern_type);
873 if (btf_member_bitfield_size(type, i) ||
874 btf_member_bitfield_size(kern_type, kern_member_idx)) {
875 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
876 map->name, mname);
877 return -ENOTSUP;
878 }
879
880 moff = member->offset / 8;
881 kern_moff = kern_member->offset / 8;
882
883 mdata = data + moff;
884 kern_mdata = kern_data + kern_moff;
885
886 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
887 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
888 &kern_mtype_id);
889 if (BTF_INFO_KIND(mtype->info) !=
890 BTF_INFO_KIND(kern_mtype->info)) {
891 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
892 map->name, mname, BTF_INFO_KIND(mtype->info),
893 BTF_INFO_KIND(kern_mtype->info));
894 return -ENOTSUP;
895 }
896
897 if (btf_is_ptr(mtype)) {
898 struct bpf_program *prog;
899
d2836ddd
MKL
900 prog = st_ops->progs[i];
901 if (!prog)
902 continue;
903
590a0088
MKL
904 kern_mtype = skip_mods_and_typedefs(kern_btf,
905 kern_mtype->type,
906 &kern_mtype_id);
d2836ddd
MKL
907
908 /* mtype->type must be a func_proto which was
909 * guaranteed in bpf_object__collect_st_ops_relos(),
910 * so only check kern_mtype for func_proto here.
911 */
912 if (!btf_is_func_proto(kern_mtype)) {
913 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
590a0088
MKL
914 map->name, mname);
915 return -ENOTSUP;
916 }
917
590a0088
MKL
918 prog->attach_btf_id = kern_type_id;
919 prog->expected_attach_type = kern_member_idx;
920
921 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
922
923 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
924 map->name, mname, prog->name, moff,
925 kern_moff);
926
927 continue;
928 }
929
930 msize = btf__resolve_size(btf, mtype_id);
931 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
932 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
933 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
934 map->name, mname, (ssize_t)msize,
935 (ssize_t)kern_msize);
936 return -ENOTSUP;
937 }
938
939 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
940 map->name, mname, (unsigned int)msize,
941 moff, kern_moff);
942 memcpy(kern_mdata, mdata, msize);
943 }
944
945 return 0;
946}
947
948static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
949{
590a0088
MKL
950 struct bpf_map *map;
951 size_t i;
952 int err;
953
954 for (i = 0; i < obj->nr_maps; i++) {
955 map = &obj->maps[i];
956
957 if (!bpf_map__is_struct_ops(map))
958 continue;
959
a6ed02ca
KS
960 err = bpf_map__init_kern_struct_ops(map, obj->btf,
961 obj->btf_vmlinux);
962 if (err)
590a0088 963 return err;
590a0088
MKL
964 }
965
590a0088
MKL
966 return 0;
967}
968
969static int bpf_object__init_struct_ops_maps(struct bpf_object *obj)
970{
971 const struct btf_type *type, *datasec;
972 const struct btf_var_secinfo *vsi;
973 struct bpf_struct_ops *st_ops;
974 const char *tname, *var_name;
975 __s32 type_id, datasec_id;
976 const struct btf *btf;
977 struct bpf_map *map;
978 __u32 i;
979
980 if (obj->efile.st_ops_shndx == -1)
981 return 0;
982
983 btf = obj->btf;
984 datasec_id = btf__find_by_name_kind(btf, STRUCT_OPS_SEC,
985 BTF_KIND_DATASEC);
986 if (datasec_id < 0) {
987 pr_warn("struct_ops init: DATASEC %s not found\n",
988 STRUCT_OPS_SEC);
989 return -EINVAL;
990 }
991
992 datasec = btf__type_by_id(btf, datasec_id);
993 vsi = btf_var_secinfos(datasec);
994 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
995 type = btf__type_by_id(obj->btf, vsi->type);
996 var_name = btf__name_by_offset(obj->btf, type->name_off);
997
998 type_id = btf__resolve_type(obj->btf, vsi->type);
999 if (type_id < 0) {
1000 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1001 vsi->type, STRUCT_OPS_SEC);
1002 return -EINVAL;
1003 }
1004
1005 type = btf__type_by_id(obj->btf, type_id);
1006 tname = btf__name_by_offset(obj->btf, type->name_off);
1007 if (!tname[0]) {
1008 pr_warn("struct_ops init: anonymous type is not supported\n");
1009 return -ENOTSUP;
1010 }
1011 if (!btf_is_struct(type)) {
1012 pr_warn("struct_ops init: %s is not a struct\n", tname);
1013 return -EINVAL;
1014 }
1015
1016 map = bpf_object__add_map(obj);
1017 if (IS_ERR(map))
1018 return PTR_ERR(map);
1019
1020 map->sec_idx = obj->efile.st_ops_shndx;
1021 map->sec_offset = vsi->offset;
1022 map->name = strdup(var_name);
1023 if (!map->name)
1024 return -ENOMEM;
1025
1026 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1027 map->def.key_size = sizeof(int);
1028 map->def.value_size = type->size;
1029 map->def.max_entries = 1;
1030
1031 map->st_ops = calloc(1, sizeof(*map->st_ops));
1032 if (!map->st_ops)
1033 return -ENOMEM;
1034 st_ops = map->st_ops;
1035 st_ops->data = malloc(type->size);
1036 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1037 st_ops->kern_func_off = malloc(btf_vlen(type) *
1038 sizeof(*st_ops->kern_func_off));
1039 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1040 return -ENOMEM;
1041
1042 if (vsi->offset + type->size > obj->efile.st_ops_data->d_size) {
1043 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1044 var_name, STRUCT_OPS_SEC);
1045 return -EINVAL;
1046 }
1047
1048 memcpy(st_ops->data,
1049 obj->efile.st_ops_data->d_buf + vsi->offset,
1050 type->size);
1051 st_ops->tname = tname;
1052 st_ops->type = type;
1053 st_ops->type_id = type_id;
1054
1055 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1056 tname, type_id, var_name, vsi->offset);
1057 }
1058
1059 return 0;
1060}
1061
6c956392 1062static struct bpf_object *bpf_object__new(const char *path,
5e61f270 1063 const void *obj_buf,
2ce8450e
AN
1064 size_t obj_buf_sz,
1065 const char *obj_name)
1a5e3fb1
WN
1066{
1067 struct bpf_object *obj;
d859900c 1068 char *end;
1a5e3fb1
WN
1069
1070 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1071 if (!obj) {
be18010e 1072 pr_warn("alloc memory failed for %s\n", path);
6371ca3b 1073 return ERR_PTR(-ENOMEM);
1a5e3fb1
WN
1074 }
1075
1076 strcpy(obj->path, path);
2ce8450e
AN
1077 if (obj_name) {
1078 strncpy(obj->name, obj_name, sizeof(obj->name) - 1);
1079 obj->name[sizeof(obj->name) - 1] = 0;
1080 } else {
1081 /* Using basename() GNU version which doesn't modify arg. */
1082 strncpy(obj->name, basename((void *)path),
1083 sizeof(obj->name) - 1);
1084 end = strchr(obj->name, '.');
1085 if (end)
1086 *end = 0;
1087 }
6c956392 1088
d859900c 1089 obj->efile.fd = -1;
6c956392 1090 /*
76e1022b 1091 * Caller of this function should also call
6c956392
WN
1092 * bpf_object__elf_finish() after data collection to return
1093 * obj_buf to user. If not, we should duplicate the buffer to
1094 * avoid user freeing them before elf finish.
1095 */
1096 obj->efile.obj_buf = obj_buf;
1097 obj->efile.obj_buf_sz = obj_buf_sz;
666810e8 1098 obj->efile.maps_shndx = -1;
abd29c93 1099 obj->efile.btf_maps_shndx = -1;
d859900c
DB
1100 obj->efile.data_shndx = -1;
1101 obj->efile.rodata_shndx = -1;
1102 obj->efile.bss_shndx = -1;
590a0088 1103 obj->efile.st_ops_shndx = -1;
81bfdd08 1104 obj->kconfig_map_idx = -1;
5d23328d 1105 obj->rodata_map_idx = -1;
6c956392 1106
5e61f270 1107 obj->kern_version = get_kernel_version();
52d3352e 1108 obj->loaded = false;
9a208eff
WN
1109
1110 INIT_LIST_HEAD(&obj->list);
1111 list_add(&obj->list, &bpf_objects_list);
1a5e3fb1
WN
1112 return obj;
1113}
1114
1115static void bpf_object__elf_finish(struct bpf_object *obj)
1116{
1117 if (!obj_elf_valid(obj))
1118 return;
1119
1120 if (obj->efile.elf) {
1121 elf_end(obj->efile.elf);
1122 obj->efile.elf = NULL;
1123 }
bec7d68c 1124 obj->efile.symbols = NULL;
d859900c
DB
1125 obj->efile.data = NULL;
1126 obj->efile.rodata = NULL;
1127 obj->efile.bss = NULL;
590a0088 1128 obj->efile.st_ops_data = NULL;
b62f06e8 1129
1f8e2bcb
AN
1130 zfree(&obj->efile.reloc_sects);
1131 obj->efile.nr_reloc_sects = 0;
1a5e3fb1 1132 zclose(obj->efile.fd);
6c956392
WN
1133 obj->efile.obj_buf = NULL;
1134 obj->efile.obj_buf_sz = 0;
1a5e3fb1
WN
1135}
1136
22dd1ac9
AN
1137/* if libelf is old and doesn't support mmap(), fall back to read() */
1138#ifndef ELF_C_READ_MMAP
1139#define ELF_C_READ_MMAP ELF_C_READ
1140#endif
1141
1a5e3fb1
WN
1142static int bpf_object__elf_init(struct bpf_object *obj)
1143{
1144 int err = 0;
1145 GElf_Ehdr *ep;
1146
1147 if (obj_elf_valid(obj)) {
88a82120 1148 pr_warn("elf: init internal error\n");
6371ca3b 1149 return -LIBBPF_ERRNO__LIBELF;
1a5e3fb1
WN
1150 }
1151
6c956392
WN
1152 if (obj->efile.obj_buf_sz > 0) {
1153 /*
1154 * obj_buf should have been validated by
1155 * bpf_object__open_buffer().
1156 */
5e61f270 1157 obj->efile.elf = elf_memory((char *)obj->efile.obj_buf,
6c956392
WN
1158 obj->efile.obj_buf_sz);
1159 } else {
1160 obj->efile.fd = open(obj->path, O_RDONLY);
1161 if (obj->efile.fd < 0) {
be5c5d4e 1162 char errmsg[STRERR_BUFSIZE], *cp;
1ce6a9fc 1163
be5c5d4e
AN
1164 err = -errno;
1165 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
88a82120 1166 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
be5c5d4e 1167 return err;
6c956392
WN
1168 }
1169
22dd1ac9 1170 obj->efile.elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1a5e3fb1
WN
1171 }
1172
1a5e3fb1 1173 if (!obj->efile.elf) {
88a82120 1174 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
6371ca3b 1175 err = -LIBBPF_ERRNO__LIBELF;
1a5e3fb1
WN
1176 goto errout;
1177 }
1178
1179 if (!gelf_getehdr(obj->efile.elf, &obj->efile.ehdr)) {
88a82120 1180 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
6371ca3b 1181 err = -LIBBPF_ERRNO__FORMAT;
1a5e3fb1
WN
1182 goto errout;
1183 }
1184 ep = &obj->efile.ehdr;
1185
88a82120
AN
1186 if (elf_getshdrstrndx(obj->efile.elf, &obj->efile.shstrndx)) {
1187 pr_warn("elf: failed to get section names section index for %s: %s\n",
1188 obj->path, elf_errmsg(-1));
1189 err = -LIBBPF_ERRNO__FORMAT;
1190 goto errout;
1191 }
1192
1193 /* Elf is corrupted/truncated, avoid calling elf_strptr. */
1194 if (!elf_rawdata(elf_getscn(obj->efile.elf, obj->efile.shstrndx), NULL)) {
1195 pr_warn("elf: failed to get section names strings from %s: %s\n",
1196 obj->path, elf_errmsg(-1));
1197 return -LIBBPF_ERRNO__FORMAT;
1198 }
1199
9b16137a 1200 /* Old LLVM set e_machine to EM_NONE */
76e1022b
AN
1201 if (ep->e_type != ET_REL ||
1202 (ep->e_machine && ep->e_machine != EM_BPF)) {
88a82120 1203 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
6371ca3b 1204 err = -LIBBPF_ERRNO__FORMAT;
1a5e3fb1
WN
1205 goto errout;
1206 }
1207
1208 return 0;
1209errout:
1210 bpf_object__elf_finish(obj);
1211 return err;
1212}
1213
12ef5634 1214static int bpf_object__check_endianness(struct bpf_object *obj)
cc4228d5 1215{
cdb2f920 1216#if __BYTE_ORDER == __LITTLE_ENDIAN
12ef5634
AN
1217 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
1218 return 0;
cdb2f920 1219#elif __BYTE_ORDER == __BIG_ENDIAN
12ef5634
AN
1220 if (obj->efile.ehdr.e_ident[EI_DATA] == ELFDATA2MSB)
1221 return 0;
1222#else
1223# error "Unrecognized __BYTE_ORDER__"
1224#endif
88a82120 1225 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
6371ca3b 1226 return -LIBBPF_ERRNO__ENDIAN;
cc4228d5
WN
1227}
1228
cb1e5e96 1229static int
399dc65e 1230bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
cb1e5e96 1231{
399dc65e 1232 memcpy(obj->license, data, min(size, sizeof(obj->license) - 1));
cb1e5e96
WN
1233 pr_debug("license of %s is %s\n", obj->path, obj->license);
1234 return 0;
1235}
1236
54b8625c
JF
1237static int
1238bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1239{
1240 __u32 kver;
1241
1242 if (size != sizeof(kver)) {
be18010e 1243 pr_warn("invalid kver section in %s\n", obj->path);
54b8625c
JF
1244 return -LIBBPF_ERRNO__FORMAT;
1245 }
1246 memcpy(&kver, data, sizeof(kver));
1247 obj->kern_version = kver;
1248 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1249 return 0;
1250}
1251
addb9fc9
NS
1252static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1253{
1254 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1255 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1256 return true;
1257 return false;
1258}
1259
1713d68b
DB
1260int bpf_object__section_size(const struct bpf_object *obj, const char *name,
1261 __u32 *size)
1262{
1263 int ret = -ENOENT;
1713d68b
DB
1264
1265 *size = 0;
1266 if (!name) {
1267 return -EINVAL;
ac9d1389 1268 } else if (!strcmp(name, DATA_SEC)) {
1713d68b
DB
1269 if (obj->efile.data)
1270 *size = obj->efile.data->d_size;
ac9d1389 1271 } else if (!strcmp(name, BSS_SEC)) {
1713d68b
DB
1272 if (obj->efile.bss)
1273 *size = obj->efile.bss->d_size;
ac9d1389 1274 } else if (!strcmp(name, RODATA_SEC)) {
1713d68b
DB
1275 if (obj->efile.rodata)
1276 *size = obj->efile.rodata->d_size;
590a0088
MKL
1277 } else if (!strcmp(name, STRUCT_OPS_SEC)) {
1278 if (obj->efile.st_ops_data)
1279 *size = obj->efile.st_ops_data->d_size;
1713d68b 1280 } else {
88a82120
AN
1281 Elf_Scn *scn = elf_sec_by_name(obj, name);
1282 Elf_Data *data = elf_sec_data(obj, scn);
1283
1284 if (data) {
1285 ret = 0; /* found it */
1286 *size = data->d_size;
1287 }
1713d68b
DB
1288 }
1289
1290 return *size ? 0 : ret;
1291}
1292
1293int bpf_object__variable_offset(const struct bpf_object *obj, const char *name,
1294 __u32 *off)
1295{
1296 Elf_Data *symbols = obj->efile.symbols;
1297 const char *sname;
1298 size_t si;
1299
1300 if (!name || !off)
1301 return -EINVAL;
1302
1303 for (si = 0; si < symbols->d_size / sizeof(GElf_Sym); si++) {
1304 GElf_Sym sym;
1305
1306 if (!gelf_getsym(symbols, si, &sym))
1307 continue;
1308 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1309 GELF_ST_TYPE(sym.st_info) != STT_OBJECT)
1310 continue;
1311
88a82120 1312 sname = elf_sym_str(obj, sym.st_name);
1713d68b 1313 if (!sname) {
be18010e
KW
1314 pr_warn("failed to get sym name string for var %s\n",
1315 name);
1713d68b
DB
1316 return -EIO;
1317 }
1318 if (strcmp(name, sname) == 0) {
1319 *off = sym.st_value;
1320 return 0;
1321 }
1322 }
1323
1324 return -ENOENT;
1325}
1326
bf829271 1327static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
d859900c 1328{
bf829271
AN
1329 struct bpf_map *new_maps;
1330 size_t new_cap;
1331 int i;
1332
1333 if (obj->nr_maps < obj->maps_cap)
1334 return &obj->maps[obj->nr_maps++];
1335
95064979 1336 new_cap = max((size_t)4, obj->maps_cap * 3 / 2);
029258d7 1337 new_maps = libbpf_reallocarray(obj->maps, new_cap, sizeof(*obj->maps));
bf829271 1338 if (!new_maps) {
be18010e 1339 pr_warn("alloc maps for object failed\n");
bf829271
AN
1340 return ERR_PTR(-ENOMEM);
1341 }
1342
1343 obj->maps_cap = new_cap;
1344 obj->maps = new_maps;
1345
1346 /* zero out new maps */
1347 memset(obj->maps + obj->nr_maps, 0,
1348 (obj->maps_cap - obj->nr_maps) * sizeof(*obj->maps));
1349 /*
1350 * fill all fd with -1 so won't close incorrect fd (fd=0 is stdin)
1351 * when failure (zclose won't close negative fd)).
1352 */
1353 for (i = obj->nr_maps; i < obj->maps_cap; i++) {
1354 obj->maps[i].fd = -1;
1355 obj->maps[i].inner_map_fd = -1;
1356 }
1357
1358 return &obj->maps[obj->nr_maps++];
d859900c
DB
1359}
1360
eba9c5f4
AN
1361static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1362{
1363 long page_sz = sysconf(_SC_PAGE_SIZE);
1364 size_t map_sz;
1365
c701917e 1366 map_sz = (size_t)roundup(map->def.value_size, 8) * map->def.max_entries;
eba9c5f4
AN
1367 map_sz = roundup(map_sz, page_sz);
1368 return map_sz;
1369}
1370
81bfdd08
AN
1371static char *internal_map_name(struct bpf_object *obj,
1372 enum libbpf_map_type type)
1373{
113e6b7e 1374 char map_name[BPF_OBJ_NAME_LEN], *p;
81bfdd08
AN
1375 const char *sfx = libbpf_type_to_btf_name[type];
1376 int sfx_len = max((size_t)7, strlen(sfx));
1377 int pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1,
1378 strlen(obj->name));
1379
1380 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1381 sfx_len, libbpf_type_to_btf_name[type]);
1382
113e6b7e
THJ
1383 /* sanitise map name to characters allowed by kernel */
1384 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1385 if (!isalnum(*p) && *p != '_' && *p != '.')
1386 *p = '_';
1387
81bfdd08
AN
1388 return strdup(map_name);
1389}
1390
d859900c 1391static int
bf829271 1392bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
eba9c5f4 1393 int sec_idx, void *data, size_t data_sz)
d859900c 1394{
bf829271
AN
1395 struct bpf_map_def *def;
1396 struct bpf_map *map;
eba9c5f4 1397 int err;
bf829271
AN
1398
1399 map = bpf_object__add_map(obj);
1400 if (IS_ERR(map))
1401 return PTR_ERR(map);
d859900c
DB
1402
1403 map->libbpf_type = type;
db48814b
AN
1404 map->sec_idx = sec_idx;
1405 map->sec_offset = 0;
81bfdd08 1406 map->name = internal_map_name(obj, type);
d859900c 1407 if (!map->name) {
be18010e 1408 pr_warn("failed to alloc map name\n");
d859900c
DB
1409 return -ENOMEM;
1410 }
1411
bf829271 1412 def = &map->def;
d859900c
DB
1413 def->type = BPF_MAP_TYPE_ARRAY;
1414 def->key_size = sizeof(int);
eba9c5f4 1415 def->value_size = data_sz;
d859900c 1416 def->max_entries = 1;
81bfdd08 1417 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
166750bc 1418 ? BPF_F_RDONLY_PROG : 0;
0d13bfce 1419 def->map_flags |= BPF_F_MMAPABLE;
7fe74b43
AN
1420
1421 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
81bfdd08 1422 map->name, map->sec_idx, map->sec_offset, def->map_flags);
7fe74b43 1423
eba9c5f4
AN
1424 map->mmaped = mmap(NULL, bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
1425 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1426 if (map->mmaped == MAP_FAILED) {
1427 err = -errno;
1428 map->mmaped = NULL;
1429 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1430 map->name, err);
1431 zfree(&map->name);
1432 return err;
d859900c
DB
1433 }
1434
166750bc 1435 if (data)
eba9c5f4
AN
1436 memcpy(map->mmaped, data, data_sz);
1437
e1d1dc46 1438 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
d859900c
DB
1439 return 0;
1440}
1441
bf829271
AN
1442static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1443{
1444 int err;
1445
bf829271
AN
1446 /*
1447 * Populate obj->maps with libbpf internal maps.
1448 */
1449 if (obj->efile.data_shndx >= 0) {
1450 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
db48814b 1451 obj->efile.data_shndx,
eba9c5f4
AN
1452 obj->efile.data->d_buf,
1453 obj->efile.data->d_size);
bf829271
AN
1454 if (err)
1455 return err;
1456 }
1457 if (obj->efile.rodata_shndx >= 0) {
1458 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
db48814b 1459 obj->efile.rodata_shndx,
eba9c5f4
AN
1460 obj->efile.rodata->d_buf,
1461 obj->efile.rodata->d_size);
bf829271
AN
1462 if (err)
1463 return err;
5d23328d
YZ
1464
1465 obj->rodata_map_idx = obj->nr_maps - 1;
bf829271
AN
1466 }
1467 if (obj->efile.bss_shndx >= 0) {
1468 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
db48814b 1469 obj->efile.bss_shndx,
eba9c5f4
AN
1470 NULL,
1471 obj->efile.bss->d_size);
bf829271
AN
1472 if (err)
1473 return err;
1474 }
1475 return 0;
1476}
1477
166750bc
AN
1478
1479static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1480 const void *name)
1481{
1482 int i;
1483
1484 for (i = 0; i < obj->nr_extern; i++) {
1485 if (strcmp(obj->externs[i].name, name) == 0)
1486 return &obj->externs[i];
1487 }
1488 return NULL;
1489}
1490
2e33efe3
AN
1491static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1492 char value)
166750bc 1493{
2e33efe3
AN
1494 switch (ext->kcfg.type) {
1495 case KCFG_BOOL:
166750bc 1496 if (value == 'm') {
2e33efe3 1497 pr_warn("extern (kcfg) %s=%c should be tristate or char\n",
166750bc
AN
1498 ext->name, value);
1499 return -EINVAL;
1500 }
1501 *(bool *)ext_val = value == 'y' ? true : false;
1502 break;
2e33efe3 1503 case KCFG_TRISTATE:
166750bc
AN
1504 if (value == 'y')
1505 *(enum libbpf_tristate *)ext_val = TRI_YES;
1506 else if (value == 'm')
1507 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1508 else /* value == 'n' */
1509 *(enum libbpf_tristate *)ext_val = TRI_NO;
1510 break;
2e33efe3 1511 case KCFG_CHAR:
166750bc
AN
1512 *(char *)ext_val = value;
1513 break;
2e33efe3
AN
1514 case KCFG_UNKNOWN:
1515 case KCFG_INT:
1516 case KCFG_CHAR_ARR:
166750bc 1517 default:
2e33efe3 1518 pr_warn("extern (kcfg) %s=%c should be bool, tristate, or char\n",
166750bc
AN
1519 ext->name, value);
1520 return -EINVAL;
1521 }
1522 ext->is_set = true;
1523 return 0;
1524}
1525
2e33efe3
AN
1526static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1527 const char *value)
166750bc
AN
1528{
1529 size_t len;
1530
2e33efe3
AN
1531 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1532 pr_warn("extern (kcfg) %s=%s should be char array\n", ext->name, value);
166750bc
AN
1533 return -EINVAL;
1534 }
1535
1536 len = strlen(value);
1537 if (value[len - 1] != '"') {
2e33efe3 1538 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
166750bc
AN
1539 ext->name, value);
1540 return -EINVAL;
1541 }
1542
1543 /* strip quotes */
1544 len -= 2;
2e33efe3
AN
1545 if (len >= ext->kcfg.sz) {
1546 pr_warn("extern (kcfg) '%s': long string config %s of (%zu bytes) truncated to %d bytes\n",
1547 ext->name, value, len, ext->kcfg.sz - 1);
1548 len = ext->kcfg.sz - 1;
166750bc
AN
1549 }
1550 memcpy(ext_val, value + 1, len);
1551 ext_val[len] = '\0';
1552 ext->is_set = true;
1553 return 0;
1554}
1555
1556static int parse_u64(const char *value, __u64 *res)
1557{
1558 char *value_end;
1559 int err;
1560
1561 errno = 0;
1562 *res = strtoull(value, &value_end, 0);
1563 if (errno) {
1564 err = -errno;
1565 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
1566 return err;
1567 }
1568 if (*value_end) {
1569 pr_warn("failed to parse '%s' as integer completely\n", value);
1570 return -EINVAL;
1571 }
1572 return 0;
1573}
1574
2e33efe3 1575static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
166750bc 1576{
2e33efe3 1577 int bit_sz = ext->kcfg.sz * 8;
166750bc 1578
2e33efe3 1579 if (ext->kcfg.sz == 8)
166750bc
AN
1580 return true;
1581
1582 /* Validate that value stored in u64 fits in integer of `ext->sz`
1583 * bytes size without any loss of information. If the target integer
1584 * is signed, we rely on the following limits of integer type of
1585 * Y bits and subsequent transformation:
1586 *
1587 * -2^(Y-1) <= X <= 2^(Y-1) - 1
1588 * 0 <= X + 2^(Y-1) <= 2^Y - 1
1589 * 0 <= X + 2^(Y-1) < 2^Y
1590 *
1591 * For unsigned target integer, check that all the (64 - Y) bits are
1592 * zero.
1593 */
2e33efe3 1594 if (ext->kcfg.is_signed)
166750bc
AN
1595 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
1596 else
1597 return (v >> bit_sz) == 0;
1598}
1599
2e33efe3
AN
1600static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
1601 __u64 value)
166750bc 1602{
2e33efe3
AN
1603 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
1604 pr_warn("extern (kcfg) %s=%llu should be integer\n",
7745ff98 1605 ext->name, (unsigned long long)value);
166750bc
AN
1606 return -EINVAL;
1607 }
2e33efe3
AN
1608 if (!is_kcfg_value_in_range(ext, value)) {
1609 pr_warn("extern (kcfg) %s=%llu value doesn't fit in %d bytes\n",
1610 ext->name, (unsigned long long)value, ext->kcfg.sz);
166750bc
AN
1611 return -ERANGE;
1612 }
2e33efe3 1613 switch (ext->kcfg.sz) {
166750bc
AN
1614 case 1: *(__u8 *)ext_val = value; break;
1615 case 2: *(__u16 *)ext_val = value; break;
1616 case 4: *(__u32 *)ext_val = value; break;
1617 case 8: *(__u64 *)ext_val = value; break;
1618 default:
1619 return -EINVAL;
1620 }
1621 ext->is_set = true;
1622 return 0;
1623}
1624
8601fd42
AN
1625static int bpf_object__process_kconfig_line(struct bpf_object *obj,
1626 char *buf, void *data)
166750bc 1627{
166750bc 1628 struct extern_desc *ext;
8601fd42 1629 char *sep, *value;
166750bc
AN
1630 int len, err = 0;
1631 void *ext_val;
1632 __u64 num;
166750bc 1633
8601fd42
AN
1634 if (strncmp(buf, "CONFIG_", 7))
1635 return 0;
166750bc 1636
8601fd42
AN
1637 sep = strchr(buf, '=');
1638 if (!sep) {
1639 pr_warn("failed to parse '%s': no separator\n", buf);
1640 return -EINVAL;
1641 }
1642
1643 /* Trim ending '\n' */
1644 len = strlen(buf);
1645 if (buf[len - 1] == '\n')
1646 buf[len - 1] = '\0';
1647 /* Split on '=' and ensure that a value is present. */
1648 *sep = '\0';
1649 if (!sep[1]) {
1650 *sep = '=';
1651 pr_warn("failed to parse '%s': no value\n", buf);
1652 return -EINVAL;
1653 }
1654
1655 ext = find_extern_by_name(obj, buf);
1656 if (!ext || ext->is_set)
1657 return 0;
1658
2e33efe3 1659 ext_val = data + ext->kcfg.data_off;
8601fd42
AN
1660 value = sep + 1;
1661
1662 switch (*value) {
1663 case 'y': case 'n': case 'm':
2e33efe3 1664 err = set_kcfg_value_tri(ext, ext_val, *value);
8601fd42
AN
1665 break;
1666 case '"':
2e33efe3 1667 err = set_kcfg_value_str(ext, ext_val, value);
8601fd42
AN
1668 break;
1669 default:
1670 /* assume integer */
1671 err = parse_u64(value, &num);
1672 if (err) {
2e33efe3 1673 pr_warn("extern (kcfg) %s=%s should be integer\n",
8601fd42
AN
1674 ext->name, value);
1675 return err;
1676 }
2e33efe3 1677 err = set_kcfg_value_num(ext, ext_val, num);
8601fd42 1678 break;
166750bc 1679 }
8601fd42
AN
1680 if (err)
1681 return err;
2e33efe3 1682 pr_debug("extern (kcfg) %s=%s\n", ext->name, value);
8601fd42
AN
1683 return 0;
1684}
1685
1686static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
1687{
1688 char buf[PATH_MAX];
1689 struct utsname uts;
1690 int len, err = 0;
1691 gzFile file;
1692
1693 uname(&uts);
1694 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
1695 if (len < 0)
1696 return -EINVAL;
1697 else if (len >= PATH_MAX)
1698 return -ENAMETOOLONG;
1699
1700 /* gzopen also accepts uncompressed files. */
1701 file = gzopen(buf, "r");
1702 if (!file)
1703 file = gzopen("/proc/config.gz", "r");
1704
166750bc 1705 if (!file) {
8601fd42 1706 pr_warn("failed to open system Kconfig\n");
166750bc
AN
1707 return -ENOENT;
1708 }
1709
1710 while (gzgets(file, buf, sizeof(buf))) {
8601fd42
AN
1711 err = bpf_object__process_kconfig_line(obj, buf, data);
1712 if (err) {
1713 pr_warn("error parsing system Kconfig line '%s': %d\n",
1714 buf, err);
166750bc
AN
1715 goto out;
1716 }
8601fd42 1717 }
166750bc 1718
8601fd42
AN
1719out:
1720 gzclose(file);
1721 return err;
1722}
166750bc 1723
8601fd42
AN
1724static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
1725 const char *config, void *data)
1726{
1727 char buf[PATH_MAX];
1728 int err = 0;
1729 FILE *file;
166750bc 1730
8601fd42
AN
1731 file = fmemopen((void *)config, strlen(config), "r");
1732 if (!file) {
1733 err = -errno;
1734 pr_warn("failed to open in-memory Kconfig: %d\n", err);
1735 return err;
1736 }
1737
1738 while (fgets(buf, sizeof(buf), file)) {
1739 err = bpf_object__process_kconfig_line(obj, buf, data);
1740 if (err) {
1741 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
1742 buf, err);
166750bc
AN
1743 break;
1744 }
166750bc
AN
1745 }
1746
8601fd42 1747 fclose(file);
166750bc
AN
1748 return err;
1749}
1750
81bfdd08 1751static int bpf_object__init_kconfig_map(struct bpf_object *obj)
166750bc 1752{
2e33efe3 1753 struct extern_desc *last_ext = NULL, *ext;
166750bc 1754 size_t map_sz;
2e33efe3 1755 int i, err;
166750bc 1756
2e33efe3
AN
1757 for (i = 0; i < obj->nr_extern; i++) {
1758 ext = &obj->externs[i];
1759 if (ext->type == EXT_KCFG)
1760 last_ext = ext;
1761 }
166750bc 1762
2e33efe3
AN
1763 if (!last_ext)
1764 return 0;
166750bc 1765
2e33efe3 1766 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
81bfdd08 1767 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
166750bc
AN
1768 obj->efile.symbols_shndx,
1769 NULL, map_sz);
1770 if (err)
1771 return err;
1772
81bfdd08 1773 obj->kconfig_map_idx = obj->nr_maps - 1;
166750bc
AN
1774
1775 return 0;
1776}
1777
bf829271 1778static int bpf_object__init_user_maps(struct bpf_object *obj, bool strict)
561bbcca 1779{
561bbcca 1780 Elf_Data *symbols = obj->efile.symbols;
bf829271 1781 int i, map_def_sz = 0, nr_maps = 0, nr_syms;
d859900c 1782 Elf_Data *data = NULL;
bf829271
AN
1783 Elf_Scn *scn;
1784
1785 if (obj->efile.maps_shndx < 0)
1786 return 0;
561bbcca 1787
4708bbda
EL
1788 if (!symbols)
1789 return -EINVAL;
1790
88a82120
AN
1791
1792 scn = elf_sec_by_idx(obj, obj->efile.maps_shndx);
1793 data = elf_sec_data(obj, scn);
bf829271 1794 if (!scn || !data) {
88a82120
AN
1795 pr_warn("elf: failed to get legacy map definitions for %s\n",
1796 obj->path);
bf829271 1797 return -EINVAL;
4708bbda 1798 }
561bbcca 1799
4708bbda
EL
1800 /*
1801 * Count number of maps. Each map has a name.
1802 * Array of maps is not supported: only the first element is
1803 * considered.
1804 *
1805 * TODO: Detect array of map and report error.
1806 */
bf829271
AN
1807 nr_syms = symbols->d_size / sizeof(GElf_Sym);
1808 for (i = 0; i < nr_syms; i++) {
561bbcca 1809 GElf_Sym sym;
4708bbda
EL
1810
1811 if (!gelf_getsym(symbols, i, &sym))
1812 continue;
1813 if (sym.st_shndx != obj->efile.maps_shndx)
1814 continue;
1815 nr_maps++;
1816 }
b13c5c14 1817 /* Assume equally sized map definitions */
88a82120
AN
1818 pr_debug("elf: found %d legacy map definitions (%zd bytes) in %s\n",
1819 nr_maps, data->d_size, obj->path);
bf829271 1820
98e527af 1821 if (!data->d_size || nr_maps == 0 || (data->d_size % nr_maps) != 0) {
88a82120
AN
1822 pr_warn("elf: unable to determine legacy map definition size in %s\n",
1823 obj->path);
bf829271 1824 return -EINVAL;
addb9fc9 1825 }
98e527af 1826 map_def_sz = data->d_size / nr_maps;
4708bbda 1827
bf829271
AN
1828 /* Fill obj->maps using data in "maps" section. */
1829 for (i = 0; i < nr_syms; i++) {
4708bbda 1830 GElf_Sym sym;
561bbcca 1831 const char *map_name;
4708bbda 1832 struct bpf_map_def *def;
bf829271 1833 struct bpf_map *map;
561bbcca
WN
1834
1835 if (!gelf_getsym(symbols, i, &sym))
1836 continue;
666810e8 1837 if (sym.st_shndx != obj->efile.maps_shndx)
561bbcca
WN
1838 continue;
1839
bf829271
AN
1840 map = bpf_object__add_map(obj);
1841 if (IS_ERR(map))
1842 return PTR_ERR(map);
1843
88a82120 1844 map_name = elf_sym_str(obj, sym.st_name);
c51829bb 1845 if (!map_name) {
be18010e
KW
1846 pr_warn("failed to get map #%d name sym string for obj %s\n",
1847 i, obj->path);
c51829bb
AN
1848 return -LIBBPF_ERRNO__FORMAT;
1849 }
d859900c 1850
bf829271 1851 map->libbpf_type = LIBBPF_MAP_UNSPEC;
db48814b
AN
1852 map->sec_idx = sym.st_shndx;
1853 map->sec_offset = sym.st_value;
1854 pr_debug("map '%s' (legacy): at sec_idx %d, offset %zu.\n",
1855 map_name, map->sec_idx, map->sec_offset);
b13c5c14 1856 if (sym.st_value + map_def_sz > data->d_size) {
be18010e
KW
1857 pr_warn("corrupted maps section in %s: last map \"%s\" too small\n",
1858 obj->path, map_name);
4708bbda 1859 return -EINVAL;
561bbcca 1860 }
4708bbda 1861
bf829271
AN
1862 map->name = strdup(map_name);
1863 if (!map->name) {
be18010e 1864 pr_warn("failed to alloc map name\n");
973170e6
WN
1865 return -ENOMEM;
1866 }
bf829271 1867 pr_debug("map %d is \"%s\"\n", i, map->name);
4708bbda 1868 def = (struct bpf_map_def *)(data->d_buf + sym.st_value);
b13c5c14
CG
1869 /*
1870 * If the definition of the map in the object file fits in
1871 * bpf_map_def, copy it. Any extra fields in our version
1872 * of bpf_map_def will default to zero as a result of the
1873 * calloc above.
1874 */
1875 if (map_def_sz <= sizeof(struct bpf_map_def)) {
bf829271 1876 memcpy(&map->def, def, map_def_sz);
b13c5c14
CG
1877 } else {
1878 /*
1879 * Here the map structure being read is bigger than what
1880 * we expect, truncate if the excess bits are all zero.
1881 * If they are not zero, reject this map as
1882 * incompatible.
1883 */
1884 char *b;
8983b731 1885
b13c5c14
CG
1886 for (b = ((char *)def) + sizeof(struct bpf_map_def);
1887 b < ((char *)def) + map_def_sz; b++) {
1888 if (*b != 0) {
8983b731 1889 pr_warn("maps section in %s: \"%s\" has unrecognized, non-zero options\n",
be18010e 1890 obj->path, map_name);
c034a177
JF
1891 if (strict)
1892 return -EINVAL;
b13c5c14
CG
1893 }
1894 }
bf829271 1895 memcpy(&map->def, def, sizeof(struct bpf_map_def));
b13c5c14 1896 }
561bbcca 1897 }
bf829271
AN
1898 return 0;
1899}
4708bbda 1900
ddc7c304
AN
1901static const struct btf_type *
1902skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
abd29c93
AN
1903{
1904 const struct btf_type *t = btf__type_by_id(btf, id);
8837fe5d 1905
ddc7c304
AN
1906 if (res_id)
1907 *res_id = id;
1908
1909 while (btf_is_mod(t) || btf_is_typedef(t)) {
1910 if (res_id)
1911 *res_id = t->type;
1912 t = btf__type_by_id(btf, t->type);
abd29c93 1913 }
ddc7c304
AN
1914
1915 return t;
abd29c93
AN
1916}
1917
590a0088
MKL
1918static const struct btf_type *
1919resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
1920{
1921 const struct btf_type *t;
1922
1923 t = skip_mods_and_typedefs(btf, id, NULL);
1924 if (!btf_is_ptr(t))
1925 return NULL;
1926
1927 t = skip_mods_and_typedefs(btf, t->type, res_id);
1928
1929 return btf_is_func_proto(t) ? t : NULL;
1930}
1931
81ba0889
AN
1932static const char *btf_kind_str(const struct btf_type *t)
1933{
1934 switch (btf_kind(t)) {
1935 case BTF_KIND_UNKN: return "void";
1936 case BTF_KIND_INT: return "int";
1937 case BTF_KIND_PTR: return "ptr";
1938 case BTF_KIND_ARRAY: return "array";
1939 case BTF_KIND_STRUCT: return "struct";
1940 case BTF_KIND_UNION: return "union";
1941 case BTF_KIND_ENUM: return "enum";
1942 case BTF_KIND_FWD: return "fwd";
1943 case BTF_KIND_TYPEDEF: return "typedef";
1944 case BTF_KIND_VOLATILE: return "volatile";
1945 case BTF_KIND_CONST: return "const";
1946 case BTF_KIND_RESTRICT: return "restrict";
1947 case BTF_KIND_FUNC: return "func";
1948 case BTF_KIND_FUNC_PROTO: return "func_proto";
1949 case BTF_KIND_VAR: return "var";
1950 case BTF_KIND_DATASEC: return "datasec";
22541a9e 1951 case BTF_KIND_FLOAT: return "float";
81ba0889
AN
1952 default: return "unknown";
1953 }
1954}
1955
ef99b02b
AN
1956/*
1957 * Fetch integer attribute of BTF map definition. Such attributes are
1958 * represented using a pointer to an array, in which dimensionality of array
1959 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
1960 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
1961 * type definition, while using only sizeof(void *) space in ELF data section.
1962 */
1963static bool get_map_field_int(const char *map_name, const struct btf *btf,
8983b731
AN
1964 const struct btf_member *m, __u32 *res)
1965{
ddc7c304 1966 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
abd29c93 1967 const char *name = btf__name_by_offset(btf, m->name_off);
ef99b02b
AN
1968 const struct btf_array *arr_info;
1969 const struct btf_type *arr_t;
abd29c93 1970
b03bc685 1971 if (!btf_is_ptr(t)) {
81ba0889
AN
1972 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
1973 map_name, name, btf_kind_str(t));
abd29c93
AN
1974 return false;
1975 }
ef99b02b
AN
1976
1977 arr_t = btf__type_by_id(btf, t->type);
1978 if (!arr_t) {
be18010e
KW
1979 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
1980 map_name, name, t->type);
abd29c93
AN
1981 return false;
1982 }
b03bc685 1983 if (!btf_is_array(arr_t)) {
81ba0889
AN
1984 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
1985 map_name, name, btf_kind_str(arr_t));
abd29c93
AN
1986 return false;
1987 }
b03bc685 1988 arr_info = btf_array(arr_t);
ef99b02b 1989 *res = arr_info->nelems;
abd29c93
AN
1990 return true;
1991}
1992
57a00f41
THJ
1993static int build_map_pin_path(struct bpf_map *map, const char *path)
1994{
1995 char buf[PATH_MAX];
6e9cab2e 1996 int len;
57a00f41
THJ
1997
1998 if (!path)
1999 path = "/sys/fs/bpf";
2000
2001 len = snprintf(buf, PATH_MAX, "%s/%s", path, bpf_map__name(map));
2002 if (len < 0)
2003 return -EINVAL;
2004 else if (len >= PATH_MAX)
2005 return -ENAMETOOLONG;
2006
6e9cab2e 2007 return bpf_map__set_pin_path(map, buf);
57a00f41
THJ
2008}
2009
41017e56
AN
2010
2011static int parse_btf_map_def(struct bpf_object *obj,
2012 struct bpf_map *map,
2013 const struct btf_type *def,
646f02ff 2014 bool strict, bool is_inner,
41017e56 2015 const char *pin_root_path)
abd29c93 2016{
41017e56 2017 const struct btf_type *t;
abd29c93 2018 const struct btf_member *m;
abd29c93
AN
2019 int vlen, i;
2020
b03bc685
AN
2021 vlen = btf_vlen(def);
2022 m = btf_members(def);
abd29c93
AN
2023 for (i = 0; i < vlen; i++, m++) {
2024 const char *name = btf__name_by_offset(obj->btf, m->name_off);
2025
2026 if (!name) {
41017e56 2027 pr_warn("map '%s': invalid field #%d.\n", map->name, i);
abd29c93
AN
2028 return -EINVAL;
2029 }
2030 if (strcmp(name, "type") == 0) {
41017e56 2031 if (!get_map_field_int(map->name, obj->btf, m,
ef99b02b 2032 &map->def.type))
abd29c93
AN
2033 return -EINVAL;
2034 pr_debug("map '%s': found type = %u.\n",
41017e56 2035 map->name, map->def.type);
abd29c93 2036 } else if (strcmp(name, "max_entries") == 0) {
41017e56 2037 if (!get_map_field_int(map->name, obj->btf, m,
ef99b02b 2038 &map->def.max_entries))
abd29c93
AN
2039 return -EINVAL;
2040 pr_debug("map '%s': found max_entries = %u.\n",
41017e56 2041 map->name, map->def.max_entries);
abd29c93 2042 } else if (strcmp(name, "map_flags") == 0) {
41017e56 2043 if (!get_map_field_int(map->name, obj->btf, m,
ef99b02b 2044 &map->def.map_flags))
abd29c93
AN
2045 return -EINVAL;
2046 pr_debug("map '%s': found map_flags = %u.\n",
41017e56 2047 map->name, map->def.map_flags);
1bdb6c9a
AN
2048 } else if (strcmp(name, "numa_node") == 0) {
2049 if (!get_map_field_int(map->name, obj->btf, m, &map->numa_node))
2050 return -EINVAL;
2051 pr_debug("map '%s': found numa_node = %u.\n", map->name, map->numa_node);
abd29c93
AN
2052 } else if (strcmp(name, "key_size") == 0) {
2053 __u32 sz;
2054
41017e56 2055 if (!get_map_field_int(map->name, obj->btf, m, &sz))
abd29c93
AN
2056 return -EINVAL;
2057 pr_debug("map '%s': found key_size = %u.\n",
41017e56 2058 map->name, sz);
abd29c93 2059 if (map->def.key_size && map->def.key_size != sz) {
be18010e 2060 pr_warn("map '%s': conflicting key size %u != %u.\n",
41017e56 2061 map->name, map->def.key_size, sz);
abd29c93
AN
2062 return -EINVAL;
2063 }
2064 map->def.key_size = sz;
2065 } else if (strcmp(name, "key") == 0) {
2066 __s64 sz;
2067
2068 t = btf__type_by_id(obj->btf, m->type);
2069 if (!t) {
be18010e 2070 pr_warn("map '%s': key type [%d] not found.\n",
41017e56 2071 map->name, m->type);
abd29c93
AN
2072 return -EINVAL;
2073 }
b03bc685 2074 if (!btf_is_ptr(t)) {
81ba0889
AN
2075 pr_warn("map '%s': key spec is not PTR: %s.\n",
2076 map->name, btf_kind_str(t));
abd29c93
AN
2077 return -EINVAL;
2078 }
2079 sz = btf__resolve_size(obj->btf, t->type);
2080 if (sz < 0) {
679152d3 2081 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
41017e56 2082 map->name, t->type, (ssize_t)sz);
abd29c93
AN
2083 return sz;
2084 }
679152d3 2085 pr_debug("map '%s': found key [%u], sz = %zd.\n",
41017e56 2086 map->name, t->type, (ssize_t)sz);
abd29c93 2087 if (map->def.key_size && map->def.key_size != sz) {
679152d3 2088 pr_warn("map '%s': conflicting key size %u != %zd.\n",
41017e56 2089 map->name, map->def.key_size, (ssize_t)sz);
abd29c93
AN
2090 return -EINVAL;
2091 }
2092 map->def.key_size = sz;
2093 map->btf_key_type_id = t->type;
2094 } else if (strcmp(name, "value_size") == 0) {
2095 __u32 sz;
2096
41017e56 2097 if (!get_map_field_int(map->name, obj->btf, m, &sz))
abd29c93
AN
2098 return -EINVAL;
2099 pr_debug("map '%s': found value_size = %u.\n",
41017e56 2100 map->name, sz);
abd29c93 2101 if (map->def.value_size && map->def.value_size != sz) {
be18010e 2102 pr_warn("map '%s': conflicting value size %u != %u.\n",
41017e56 2103 map->name, map->def.value_size, sz);
abd29c93
AN
2104 return -EINVAL;
2105 }
2106 map->def.value_size = sz;
2107 } else if (strcmp(name, "value") == 0) {
2108 __s64 sz;
2109
2110 t = btf__type_by_id(obj->btf, m->type);
2111 if (!t) {
be18010e 2112 pr_warn("map '%s': value type [%d] not found.\n",
41017e56 2113 map->name, m->type);
abd29c93
AN
2114 return -EINVAL;
2115 }
b03bc685 2116 if (!btf_is_ptr(t)) {
81ba0889
AN
2117 pr_warn("map '%s': value spec is not PTR: %s.\n",
2118 map->name, btf_kind_str(t));
abd29c93
AN
2119 return -EINVAL;
2120 }
2121 sz = btf__resolve_size(obj->btf, t->type);
2122 if (sz < 0) {
679152d3 2123 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
41017e56 2124 map->name, t->type, (ssize_t)sz);
abd29c93
AN
2125 return sz;
2126 }
679152d3 2127 pr_debug("map '%s': found value [%u], sz = %zd.\n",
41017e56 2128 map->name, t->type, (ssize_t)sz);
abd29c93 2129 if (map->def.value_size && map->def.value_size != sz) {
679152d3 2130 pr_warn("map '%s': conflicting value size %u != %zd.\n",
41017e56 2131 map->name, map->def.value_size, (ssize_t)sz);
abd29c93
AN
2132 return -EINVAL;
2133 }
2134 map->def.value_size = sz;
2135 map->btf_value_type_id = t->type;
646f02ff
AN
2136 }
2137 else if (strcmp(name, "values") == 0) {
2138 int err;
2139
2140 if (is_inner) {
2141 pr_warn("map '%s': multi-level inner maps not supported.\n",
2142 map->name);
2143 return -ENOTSUP;
2144 }
2145 if (i != vlen - 1) {
2146 pr_warn("map '%s': '%s' member should be last.\n",
2147 map->name, name);
2148 return -EINVAL;
2149 }
2150 if (!bpf_map_type__is_map_in_map(map->def.type)) {
2151 pr_warn("map '%s': should be map-in-map.\n",
2152 map->name);
2153 return -ENOTSUP;
2154 }
2155 if (map->def.value_size && map->def.value_size != 4) {
2156 pr_warn("map '%s': conflicting value size %u != 4.\n",
2157 map->name, map->def.value_size);
2158 return -EINVAL;
2159 }
2160 map->def.value_size = 4;
2161 t = btf__type_by_id(obj->btf, m->type);
2162 if (!t) {
2163 pr_warn("map '%s': map-in-map inner type [%d] not found.\n",
2164 map->name, m->type);
2165 return -EINVAL;
2166 }
2167 if (!btf_is_array(t) || btf_array(t)->nelems) {
2168 pr_warn("map '%s': map-in-map inner spec is not a zero-sized array.\n",
2169 map->name);
2170 return -EINVAL;
2171 }
2172 t = skip_mods_and_typedefs(obj->btf, btf_array(t)->type,
2173 NULL);
2174 if (!btf_is_ptr(t)) {
81ba0889
AN
2175 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2176 map->name, btf_kind_str(t));
646f02ff
AN
2177 return -EINVAL;
2178 }
2179 t = skip_mods_and_typedefs(obj->btf, t->type, NULL);
2180 if (!btf_is_struct(t)) {
81ba0889
AN
2181 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2182 map->name, btf_kind_str(t));
646f02ff
AN
2183 return -EINVAL;
2184 }
2185
2186 map->inner_map = calloc(1, sizeof(*map->inner_map));
2187 if (!map->inner_map)
2188 return -ENOMEM;
2189 map->inner_map->sec_idx = obj->efile.btf_maps_shndx;
2190 map->inner_map->name = malloc(strlen(map->name) +
2191 sizeof(".inner") + 1);
2192 if (!map->inner_map->name)
2193 return -ENOMEM;
2194 sprintf(map->inner_map->name, "%s.inner", map->name);
2195
2196 err = parse_btf_map_def(obj, map->inner_map, t, strict,
2197 true /* is_inner */, NULL);
2198 if (err)
2199 return err;
57a00f41
THJ
2200 } else if (strcmp(name, "pinning") == 0) {
2201 __u32 val;
2202 int err;
2203
646f02ff
AN
2204 if (is_inner) {
2205 pr_debug("map '%s': inner def can't be pinned.\n",
2206 map->name);
2207 return -EINVAL;
2208 }
41017e56 2209 if (!get_map_field_int(map->name, obj->btf, m, &val))
57a00f41
THJ
2210 return -EINVAL;
2211 pr_debug("map '%s': found pinning = %u.\n",
41017e56 2212 map->name, val);
57a00f41
THJ
2213
2214 if (val != LIBBPF_PIN_NONE &&
2215 val != LIBBPF_PIN_BY_NAME) {
2216 pr_warn("map '%s': invalid pinning value %u.\n",
41017e56 2217 map->name, val);
57a00f41
THJ
2218 return -EINVAL;
2219 }
2220 if (val == LIBBPF_PIN_BY_NAME) {
2221 err = build_map_pin_path(map, pin_root_path);
2222 if (err) {
2223 pr_warn("map '%s': couldn't build pin path.\n",
41017e56 2224 map->name);
57a00f41
THJ
2225 return err;
2226 }
2227 }
abd29c93
AN
2228 } else {
2229 if (strict) {
be18010e 2230 pr_warn("map '%s': unknown field '%s'.\n",
41017e56 2231 map->name, name);
abd29c93
AN
2232 return -ENOTSUP;
2233 }
2234 pr_debug("map '%s': ignoring unknown field '%s'.\n",
41017e56 2235 map->name, name);
abd29c93
AN
2236 }
2237 }
2238
2239 if (map->def.type == BPF_MAP_TYPE_UNSPEC) {
41017e56 2240 pr_warn("map '%s': map type isn't specified.\n", map->name);
abd29c93
AN
2241 return -EINVAL;
2242 }
2243
2244 return 0;
2245}
2246
41017e56
AN
2247static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2248 const struct btf_type *sec,
2249 int var_idx, int sec_idx,
2250 const Elf_Data *data, bool strict,
2251 const char *pin_root_path)
2252{
2253 const struct btf_type *var, *def;
2254 const struct btf_var_secinfo *vi;
2255 const struct btf_var *var_extra;
2256 const char *map_name;
2257 struct bpf_map *map;
2258
2259 vi = btf_var_secinfos(sec) + var_idx;
2260 var = btf__type_by_id(obj->btf, vi->type);
2261 var_extra = btf_var(var);
2262 map_name = btf__name_by_offset(obj->btf, var->name_off);
2263
2264 if (map_name == NULL || map_name[0] == '\0') {
2265 pr_warn("map #%d: empty name.\n", var_idx);
2266 return -EINVAL;
2267 }
2268 if ((__u64)vi->offset + vi->size > data->d_size) {
2269 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2270 return -EINVAL;
2271 }
2272 if (!btf_is_var(var)) {
81ba0889
AN
2273 pr_warn("map '%s': unexpected var kind %s.\n",
2274 map_name, btf_kind_str(var));
41017e56
AN
2275 return -EINVAL;
2276 }
2277 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED &&
2278 var_extra->linkage != BTF_VAR_STATIC) {
2279 pr_warn("map '%s': unsupported var linkage %u.\n",
2280 map_name, var_extra->linkage);
2281 return -EOPNOTSUPP;
2282 }
2283
2284 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2285 if (!btf_is_struct(def)) {
81ba0889
AN
2286 pr_warn("map '%s': unexpected def kind %s.\n",
2287 map_name, btf_kind_str(var));
41017e56
AN
2288 return -EINVAL;
2289 }
2290 if (def->size > vi->size) {
2291 pr_warn("map '%s': invalid def size.\n", map_name);
2292 return -EINVAL;
2293 }
2294
2295 map = bpf_object__add_map(obj);
2296 if (IS_ERR(map))
2297 return PTR_ERR(map);
2298 map->name = strdup(map_name);
2299 if (!map->name) {
2300 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2301 return -ENOMEM;
2302 }
2303 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2304 map->def.type = BPF_MAP_TYPE_UNSPEC;
2305 map->sec_idx = sec_idx;
2306 map->sec_offset = vi->offset;
646f02ff 2307 map->btf_var_idx = var_idx;
41017e56
AN
2308 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2309 map_name, map->sec_idx, map->sec_offset);
2310
646f02ff 2311 return parse_btf_map_def(obj, map, def, strict, false, pin_root_path);
41017e56
AN
2312}
2313
57a00f41
THJ
2314static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2315 const char *pin_root_path)
abd29c93
AN
2316{
2317 const struct btf_type *sec = NULL;
2318 int nr_types, i, vlen, err;
2319 const struct btf_type *t;
2320 const char *name;
2321 Elf_Data *data;
2322 Elf_Scn *scn;
2323
2324 if (obj->efile.btf_maps_shndx < 0)
2325 return 0;
2326
88a82120
AN
2327 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2328 data = elf_sec_data(obj, scn);
abd29c93 2329 if (!scn || !data) {
88a82120
AN
2330 pr_warn("elf: failed to get %s map definitions for %s\n",
2331 MAPS_ELF_SEC, obj->path);
abd29c93
AN
2332 return -EINVAL;
2333 }
2334
2335 nr_types = btf__get_nr_types(obj->btf);
2336 for (i = 1; i <= nr_types; i++) {
2337 t = btf__type_by_id(obj->btf, i);
b03bc685 2338 if (!btf_is_datasec(t))
abd29c93
AN
2339 continue;
2340 name = btf__name_by_offset(obj->btf, t->name_off);
2341 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2342 sec = t;
646f02ff 2343 obj->efile.btf_maps_sec_btf_id = i;
abd29c93
AN
2344 break;
2345 }
2346 }
2347
2348 if (!sec) {
be18010e 2349 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
abd29c93
AN
2350 return -ENOENT;
2351 }
2352
b03bc685 2353 vlen = btf_vlen(sec);
abd29c93
AN
2354 for (i = 0; i < vlen; i++) {
2355 err = bpf_object__init_user_btf_map(obj, sec, i,
2356 obj->efile.btf_maps_shndx,
8983b731
AN
2357 data, strict,
2358 pin_root_path);
abd29c93
AN
2359 if (err)
2360 return err;
2361 }
2362
2363 return 0;
2364}
2365
0d13bfce 2366static int bpf_object__init_maps(struct bpf_object *obj,
01af3bf0 2367 const struct bpf_object_open_opts *opts)
bf829271 2368{
166750bc
AN
2369 const char *pin_root_path;
2370 bool strict;
bf829271 2371 int err;
8837fe5d 2372
166750bc
AN
2373 strict = !OPTS_GET(opts, relaxed_maps, false);
2374 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
bf829271 2375
166750bc
AN
2376 err = bpf_object__init_user_maps(obj, strict);
2377 err = err ?: bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2378 err = err ?: bpf_object__init_global_data_maps(obj);
81bfdd08 2379 err = err ?: bpf_object__init_kconfig_map(obj);
590a0088 2380 err = err ?: bpf_object__init_struct_ops_maps(obj);
bf829271
AN
2381 if (err)
2382 return err;
2383
bf829271 2384 return 0;
561bbcca
WN
2385}
2386
e3d91b0c
JDB
2387static bool section_have_execinstr(struct bpf_object *obj, int idx)
2388{
e3d91b0c
JDB
2389 GElf_Shdr sh;
2390
88a82120 2391 if (elf_sec_hdr(obj, elf_sec_by_idx(obj, idx), &sh))
e3d91b0c
JDB
2392 return false;
2393
88a82120 2394 return sh.sh_flags & SHF_EXECINSTR;
e3d91b0c
JDB
2395}
2396
0f0e55d8
AN
2397static bool btf_needs_sanitization(struct bpf_object *obj)
2398{
47b6cb4d
AN
2399 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2400 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
22541a9e 2401 bool has_float = kernel_supports(FEAT_BTF_FLOAT);
47b6cb4d 2402 bool has_func = kernel_supports(FEAT_BTF_FUNC);
0f0e55d8 2403
22541a9e 2404 return !has_func || !has_datasec || !has_func_global || !has_float;
0f0e55d8
AN
2405}
2406
2407static void bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
d7c4b398 2408{
47b6cb4d
AN
2409 bool has_func_global = kernel_supports(FEAT_BTF_GLOBAL_FUNC);
2410 bool has_datasec = kernel_supports(FEAT_BTF_DATASEC);
22541a9e 2411 bool has_float = kernel_supports(FEAT_BTF_FLOAT);
47b6cb4d 2412 bool has_func = kernel_supports(FEAT_BTF_FUNC);
d7c4b398
AN
2413 struct btf_type *t;
2414 int i, j, vlen;
d7c4b398 2415
d7c4b398
AN
2416 for (i = 1; i <= btf__get_nr_types(btf); i++) {
2417 t = (struct btf_type *)btf__type_by_id(btf, i);
d7c4b398 2418
b03bc685 2419 if (!has_datasec && btf_is_var(t)) {
d7c4b398
AN
2420 /* replace VAR with INT */
2421 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1d4126c4
AN
2422 /*
2423 * using size = 1 is the safest choice, 4 will be too
2424 * big and cause kernel BTF validation failure if
2425 * original variable took less than 4 bytes
2426 */
2427 t->size = 1;
708852dc 2428 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
b03bc685 2429 } else if (!has_datasec && btf_is_datasec(t)) {
d7c4b398 2430 /* replace DATASEC with STRUCT */
b03bc685
AN
2431 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2432 struct btf_member *m = btf_members(t);
d7c4b398
AN
2433 struct btf_type *vt;
2434 char *name;
2435
2436 name = (char *)btf__name_by_offset(btf, t->name_off);
2437 while (*name) {
2438 if (*name == '.')
2439 *name = '_';
2440 name++;
2441 }
2442
b03bc685 2443 vlen = btf_vlen(t);
d7c4b398
AN
2444 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2445 for (j = 0; j < vlen; j++, v++, m++) {
2446 /* order of field assignments is important */
2447 m->offset = v->offset * 8;
2448 m->type = v->type;
2449 /* preserve variable name as member name */
2450 vt = (void *)btf__type_by_id(btf, v->type);
2451 m->name_off = vt->name_off;
2452 }
b03bc685 2453 } else if (!has_func && btf_is_func_proto(t)) {
d7c4b398 2454 /* replace FUNC_PROTO with ENUM */
b03bc685 2455 vlen = btf_vlen(t);
d7c4b398
AN
2456 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
2457 t->size = sizeof(__u32); /* kernel enforced */
b03bc685 2458 } else if (!has_func && btf_is_func(t)) {
d7c4b398
AN
2459 /* replace FUNC with TYPEDEF */
2460 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2d3eb67f
AS
2461 } else if (!has_func_global && btf_is_func(t)) {
2462 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
2463 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
22541a9e
IL
2464 } else if (!has_float && btf_is_float(t)) {
2465 /* replace FLOAT with an equally-sized empty STRUCT;
2466 * since C compilers do not accept e.g. "float" as a
2467 * valid struct name, make it anonymous
2468 */
2469 t->name_off = 0;
2470 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
d7c4b398
AN
2471 }
2472 }
2473}
2474
b35f14f4 2475static bool libbpf_needs_btf(const struct bpf_object *obj)
abd29c93 2476{
b35f14f4
AN
2477 return obj->efile.btf_maps_shndx >= 0 ||
2478 obj->efile.st_ops_shndx >= 0 ||
2479 obj->nr_extern > 0;
2480}
2481
2482static bool kernel_needs_btf(const struct bpf_object *obj)
2483{
2484 return obj->efile.st_ops_shndx >= 0;
abd29c93
AN
2485}
2486
063183bf 2487static int bpf_object__init_btf(struct bpf_object *obj,
9c6660d0
AN
2488 Elf_Data *btf_data,
2489 Elf_Data *btf_ext_data)
2490{
b7d7f3e1 2491 int err = -ENOENT;
9c6660d0
AN
2492
2493 if (btf_data) {
2494 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
2495 if (IS_ERR(obj->btf)) {
b7d7f3e1
AN
2496 err = PTR_ERR(obj->btf);
2497 obj->btf = NULL;
be18010e
KW
2498 pr_warn("Error loading ELF section %s: %d.\n",
2499 BTF_ELF_SEC, err);
9c6660d0
AN
2500 goto out;
2501 }
4c01925f
AN
2502 /* enforce 8-byte pointers for BPF-targeted BTFs */
2503 btf__set_pointer_size(obj->btf, 8);
b7d7f3e1 2504 err = 0;
9c6660d0
AN
2505 }
2506 if (btf_ext_data) {
2507 if (!obj->btf) {
2508 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
2509 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
2510 goto out;
2511 }
2512 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf,
2513 btf_ext_data->d_size);
2514 if (IS_ERR(obj->btf_ext)) {
be18010e
KW
2515 pr_warn("Error loading ELF section %s: %ld. Ignored and continue.\n",
2516 BTF_EXT_ELF_SEC, PTR_ERR(obj->btf_ext));
9c6660d0
AN
2517 obj->btf_ext = NULL;
2518 goto out;
2519 }
9c6660d0
AN
2520 }
2521out:
b35f14f4 2522 if (err && libbpf_needs_btf(obj)) {
be18010e 2523 pr_warn("BTF is required, but is missing or corrupted.\n");
b7d7f3e1 2524 return err;
abd29c93 2525 }
9c6660d0
AN
2526 return 0;
2527}
2528
166750bc
AN
2529static int bpf_object__finalize_btf(struct bpf_object *obj)
2530{
2531 int err;
2532
2533 if (!obj->btf)
2534 return 0;
2535
2536 err = btf__finalize_data(obj, obj->btf);
bfc96656
AN
2537 if (err) {
2538 pr_warn("Error finalizing %s: %d.\n", BTF_ELF_SEC, err);
2539 return err;
166750bc 2540 }
bfc96656 2541
166750bc
AN
2542 return 0;
2543}
2544
fe62de31 2545static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
a6ed02ca 2546{
1e092a03
KS
2547 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
2548 prog->type == BPF_PROG_TYPE_LSM)
a6ed02ca
KS
2549 return true;
2550
2551 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
2552 * also need vmlinux BTF
2553 */
2554 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
2555 return true;
2556
2557 return false;
2558}
2559
fe62de31 2560static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
a6ed02ca
KS
2561{
2562 struct bpf_program *prog;
fe62de31 2563 int i;
a6ed02ca 2564
192b6638 2565 /* CO-RE relocations need kernel BTF */
28b93c64 2566 if (obj->btf_ext && obj->btf_ext->core_relo_info.len)
fe62de31 2567 return true;
192b6638 2568
d370bbe1
HL
2569 /* Support for typed ksyms needs kernel BTF */
2570 for (i = 0; i < obj->nr_extern; i++) {
2571 const struct extern_desc *ext;
2572
2573 ext = &obj->externs[i];
fe62de31
AN
2574 if (ext->type == EXT_KSYM && ext->ksym.type_id)
2575 return true;
d370bbe1
HL
2576 }
2577
a6ed02ca 2578 bpf_object__for_each_program(prog, obj) {
d9297581
AN
2579 if (!prog->load)
2580 continue;
fe62de31
AN
2581 if (prog_needs_vmlinux_btf(prog))
2582 return true;
a6ed02ca
KS
2583 }
2584
fe62de31
AN
2585 return false;
2586}
2587
2588static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
2589{
2590 int err;
2591
2592 /* btf_vmlinux could be loaded earlier */
2593 if (obj->btf_vmlinux)
2594 return 0;
2595
2596 if (!force && !obj_needs_vmlinux_btf(obj))
192b6638
AN
2597 return 0;
2598
2599 obj->btf_vmlinux = libbpf_find_kernel_btf();
2600 if (IS_ERR(obj->btf_vmlinux)) {
2601 err = PTR_ERR(obj->btf_vmlinux);
2602 pr_warn("Error loading vmlinux BTF: %d\n", err);
2603 obj->btf_vmlinux = NULL;
2604 return err;
2605 }
a6ed02ca
KS
2606 return 0;
2607}
2608
063183bf
AN
2609static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
2610{
0f0e55d8
AN
2611 struct btf *kern_btf = obj->btf;
2612 bool btf_mandatory, sanitize;
063183bf
AN
2613 int err = 0;
2614
2615 if (!obj->btf)
2616 return 0;
2617
68b08647
AN
2618 if (!kernel_supports(FEAT_BTF)) {
2619 if (kernel_needs_btf(obj)) {
2620 err = -EOPNOTSUPP;
2621 goto report;
2622 }
2623 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
2624 return 0;
2625 }
2626
0f0e55d8
AN
2627 sanitize = btf_needs_sanitization(obj);
2628 if (sanitize) {
5c3320d7 2629 const void *raw_data;
0f0e55d8 2630 __u32 sz;
063183bf 2631
0f0e55d8 2632 /* clone BTF to sanitize a copy and leave the original intact */
5c3320d7
AN
2633 raw_data = btf__get_raw_data(obj->btf, &sz);
2634 kern_btf = btf__new(raw_data, sz);
0f0e55d8
AN
2635 if (IS_ERR(kern_btf))
2636 return PTR_ERR(kern_btf);
04efe591 2637
4c01925f
AN
2638 /* enforce 8-byte pointers for BPF-targeted BTFs */
2639 btf__set_pointer_size(obj->btf, 8);
0f0e55d8 2640 bpf_object__sanitize_btf(obj, kern_btf);
063183bf 2641 }
0f0e55d8
AN
2642
2643 err = btf__load(kern_btf);
2644 if (sanitize) {
2645 if (!err) {
2646 /* move fd to libbpf's BTF */
2647 btf__set_fd(obj->btf, btf__fd(kern_btf));
2648 btf__set_fd(kern_btf, -1);
2649 }
2650 btf__free(kern_btf);
2651 }
68b08647 2652report:
0f0e55d8
AN
2653 if (err) {
2654 btf_mandatory = kernel_needs_btf(obj);
2655 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
2656 btf_mandatory ? "BTF is mandatory, can't proceed."
2657 : "BTF is optional, ignoring.");
2658 if (!btf_mandatory)
2659 err = 0;
2660 }
2661 return err;
063183bf
AN
2662}
2663
88a82120
AN
2664static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
2665{
2666 const char *name;
2667
2668 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
2669 if (!name) {
2670 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2671 off, obj->path, elf_errmsg(-1));
2672 return NULL;
2673 }
2674
2675 return name;
2676}
2677
2678static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
2679{
2680 const char *name;
2681
2682 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
2683 if (!name) {
2684 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
2685 off, obj->path, elf_errmsg(-1));
2686 return NULL;
2687 }
2688
2689 return name;
2690}
2691
2692static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
2693{
2694 Elf_Scn *scn;
2695
2696 scn = elf_getscn(obj->efile.elf, idx);
2697 if (!scn) {
2698 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
2699 idx, obj->path, elf_errmsg(-1));
2700 return NULL;
2701 }
2702 return scn;
2703}
2704
2705static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
2706{
2707 Elf_Scn *scn = NULL;
2708 Elf *elf = obj->efile.elf;
2709 const char *sec_name;
2710
2711 while ((scn = elf_nextscn(elf, scn)) != NULL) {
2712 sec_name = elf_sec_name(obj, scn);
2713 if (!sec_name)
2714 return NULL;
2715
2716 if (strcmp(sec_name, name) != 0)
2717 continue;
2718
2719 return scn;
2720 }
2721 return NULL;
2722}
2723
2724static int elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn, GElf_Shdr *hdr)
2725{
2726 if (!scn)
2727 return -EINVAL;
2728
2729 if (gelf_getshdr(scn, hdr) != hdr) {
2730 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
2731 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2732 return -EINVAL;
2733 }
2734
2735 return 0;
2736}
2737
2738static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
2739{
2740 const char *name;
2741 GElf_Shdr sh;
2742
2743 if (!scn)
2744 return NULL;
2745
2746 if (elf_sec_hdr(obj, scn, &sh))
2747 return NULL;
2748
2749 name = elf_sec_str(obj, sh.sh_name);
2750 if (!name) {
2751 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
2752 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
2753 return NULL;
2754 }
2755
2756 return name;
2757}
2758
2759static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
2760{
2761 Elf_Data *data;
2762
2763 if (!scn)
2764 return NULL;
2765
2766 data = elf_getdata(scn, 0);
2767 if (!data) {
2768 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
2769 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
2770 obj->path, elf_errmsg(-1));
2771 return NULL;
2772 }
2773
2774 return data;
2775}
2776
c1122392
AN
2777static int elf_sym_by_sec_off(const struct bpf_object *obj, size_t sec_idx,
2778 size_t off, __u32 sym_type, GElf_Sym *sym)
2779{
2780 Elf_Data *symbols = obj->efile.symbols;
2781 size_t n = symbols->d_size / sizeof(GElf_Sym);
2782 int i;
2783
2784 for (i = 0; i < n; i++) {
2785 if (!gelf_getsym(symbols, i, sym))
2786 continue;
2787 if (sym->st_shndx != sec_idx || sym->st_value != off)
2788 continue;
2789 if (GELF_ST_TYPE(sym->st_info) != sym_type)
2790 continue;
2791 return 0;
2792 }
2793
2794 return -ENOENT;
2795}
2796
50e09460
AN
2797static bool is_sec_name_dwarf(const char *name)
2798{
2799 /* approximation, but the actual list is too long */
2800 return strncmp(name, ".debug_", sizeof(".debug_") - 1) == 0;
2801}
2802
2803static bool ignore_elf_section(GElf_Shdr *hdr, const char *name)
2804{
2805 /* no special handling of .strtab */
2806 if (hdr->sh_type == SHT_STRTAB)
2807 return true;
2808
2809 /* ignore .llvm_addrsig section as well */
2810 if (hdr->sh_type == 0x6FFF4C03 /* SHT_LLVM_ADDRSIG */)
2811 return true;
2812
2813 /* no subprograms will lead to an empty .text section, ignore it */
2814 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
2815 strcmp(name, ".text") == 0)
2816 return true;
2817
2818 /* DWARF sections */
2819 if (is_sec_name_dwarf(name))
2820 return true;
2821
2822 if (strncmp(name, ".rel", sizeof(".rel") - 1) == 0) {
2823 name += sizeof(".rel") - 1;
2824 /* DWARF section relocations */
2825 if (is_sec_name_dwarf(name))
2826 return true;
2827
2828 /* .BTF and .BTF.ext don't need relocations */
2829 if (strcmp(name, BTF_ELF_SEC) == 0 ||
2830 strcmp(name, BTF_EXT_ELF_SEC) == 0)
2831 return true;
2832 }
2833
2834 return false;
2835}
2836
db2b8b06
AN
2837static int cmp_progs(const void *_a, const void *_b)
2838{
2839 const struct bpf_program *a = _a;
2840 const struct bpf_program *b = _b;
2841
2842 if (a->sec_idx != b->sec_idx)
2843 return a->sec_idx < b->sec_idx ? -1 : 1;
2844
2845 /* sec_insn_off can't be the same within the section */
2846 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
2847}
2848
0d13bfce 2849static int bpf_object__elf_collect(struct bpf_object *obj)
29603665
WN
2850{
2851 Elf *elf = obj->efile.elf;
f0187f0b 2852 Elf_Data *btf_ext_data = NULL;
1713d68b 2853 Elf_Data *btf_data = NULL;
666810e8 2854 int idx = 0, err = 0;
0201c575
AN
2855 const char *name;
2856 Elf_Data *data;
2857 Elf_Scn *scn;
2858 GElf_Shdr sh;
29603665 2859
0201c575
AN
2860 /* a bunch of ELF parsing functionality depends on processing symbols,
2861 * so do the first pass and find the symbol table
2862 */
2863 scn = NULL;
29603665 2864 while ((scn = elf_nextscn(elf, scn)) != NULL) {
0201c575
AN
2865 if (elf_sec_hdr(obj, scn, &sh))
2866 return -LIBBPF_ERRNO__FORMAT;
2867
2868 if (sh.sh_type == SHT_SYMTAB) {
2869 if (obj->efile.symbols) {
2870 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
2871 return -LIBBPF_ERRNO__FORMAT;
2872 }
29603665 2873
0201c575
AN
2874 data = elf_sec_data(obj, scn);
2875 if (!data)
2876 return -LIBBPF_ERRNO__FORMAT;
2877
2878 obj->efile.symbols = data;
2879 obj->efile.symbols_shndx = elf_ndxscn(scn);
2880 obj->efile.strtabidx = sh.sh_link;
2881 }
2882 }
2883
2884 scn = NULL;
2885 while ((scn = elf_nextscn(elf, scn)) != NULL) {
29603665 2886 idx++;
88a82120
AN
2887
2888 if (elf_sec_hdr(obj, scn, &sh))
01b29d1d 2889 return -LIBBPF_ERRNO__FORMAT;
29603665 2890
88a82120
AN
2891 name = elf_sec_str(obj, sh.sh_name);
2892 if (!name)
01b29d1d 2893 return -LIBBPF_ERRNO__FORMAT;
29603665 2894
50e09460
AN
2895 if (ignore_elf_section(&sh, name))
2896 continue;
2897
88a82120
AN
2898 data = elf_sec_data(obj, scn);
2899 if (!data)
01b29d1d 2900 return -LIBBPF_ERRNO__FORMAT;
88a82120
AN
2901
2902 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
077c066a 2903 idx, name, (unsigned long)data->d_size,
29603665
WN
2904 (int)sh.sh_link, (unsigned long)sh.sh_flags,
2905 (int)sh.sh_type);
cb1e5e96 2906
1713d68b 2907 if (strcmp(name, "license") == 0) {
88a82120 2908 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
01b29d1d
AN
2909 if (err)
2910 return err;
1713d68b 2911 } else if (strcmp(name, "version") == 0) {
88a82120 2912 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
54b8625c
JF
2913 if (err)
2914 return err;
1713d68b 2915 } else if (strcmp(name, "maps") == 0) {
666810e8 2916 obj->efile.maps_shndx = idx;
abd29c93
AN
2917 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
2918 obj->efile.btf_maps_shndx = idx;
1713d68b
DB
2919 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
2920 btf_data = data;
2993e051 2921 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
f0187f0b 2922 btf_ext_data = data;
8a138aed 2923 } else if (sh.sh_type == SHT_SYMTAB) {
0201c575 2924 /* already processed during the first pass above */
f8c7a4d4
JS
2925 } else if (sh.sh_type == SHT_PROGBITS && data->d_size > 0) {
2926 if (sh.sh_flags & SHF_EXECINSTR) {
2927 if (strcmp(name, ".text") == 0)
2928 obj->efile.text_shndx = idx;
c1122392 2929 err = bpf_object__add_programs(obj, data, name, idx);
88a82120 2930 if (err)
01b29d1d 2931 return err;
ac9d1389 2932 } else if (strcmp(name, DATA_SEC) == 0) {
d859900c
DB
2933 obj->efile.data = data;
2934 obj->efile.data_shndx = idx;
ac9d1389 2935 } else if (strcmp(name, RODATA_SEC) == 0) {
d859900c
DB
2936 obj->efile.rodata = data;
2937 obj->efile.rodata_shndx = idx;
590a0088
MKL
2938 } else if (strcmp(name, STRUCT_OPS_SEC) == 0) {
2939 obj->efile.st_ops_data = data;
2940 obj->efile.st_ops_shndx = idx;
d859900c 2941 } else {
50e09460
AN
2942 pr_info("elf: skipping unrecognized data section(%d) %s\n",
2943 idx, name);
a5b8bd47 2944 }
b62f06e8 2945 } else if (sh.sh_type == SHT_REL) {
1f8e2bcb
AN
2946 int nr_sects = obj->efile.nr_reloc_sects;
2947 void *sects = obj->efile.reloc_sects;
e3d91b0c
JDB
2948 int sec = sh.sh_info; /* points to other section */
2949
2950 /* Only do relo for section with exec instructions */
590a0088 2951 if (!section_have_execinstr(obj, sec) &&
646f02ff
AN
2952 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
2953 strcmp(name, ".rel" MAPS_ELF_SEC)) {
50e09460
AN
2954 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
2955 idx, name, sec,
2956 elf_sec_name(obj, elf_sec_by_idx(obj, sec)) ?: "<?>");
e3d91b0c
JDB
2957 continue;
2958 }
b62f06e8 2959
029258d7
AN
2960 sects = libbpf_reallocarray(sects, nr_sects + 1,
2961 sizeof(*obj->efile.reloc_sects));
88a82120 2962 if (!sects)
01b29d1d 2963 return -ENOMEM;
b62f06e8 2964
1f8e2bcb
AN
2965 obj->efile.reloc_sects = sects;
2966 obj->efile.nr_reloc_sects++;
b62f06e8 2967
1f8e2bcb
AN
2968 obj->efile.reloc_sects[nr_sects].shdr = sh;
2969 obj->efile.reloc_sects[nr_sects].data = data;
88a82120 2970 } else if (sh.sh_type == SHT_NOBITS && strcmp(name, BSS_SEC) == 0) {
d859900c
DB
2971 obj->efile.bss = data;
2972 obj->efile.bss_shndx = idx;
077c066a 2973 } else {
2e80be60
AN
2974 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
2975 (size_t)sh.sh_size);
bec7d68c 2976 }
29603665 2977 }
561bbcca 2978
d3a3aa0c 2979 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
88a82120 2980 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
f102154d 2981 return -LIBBPF_ERRNO__FORMAT;
77ba9a5b 2982 }
db2b8b06
AN
2983
2984 /* sort BPF programs by section name and in-section instruction offset
2985 * for faster search */
2986 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
2987
0d13bfce 2988 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
29603665
WN
2989}
2990
166750bc
AN
2991static bool sym_is_extern(const GElf_Sym *sym)
2992{
2993 int bind = GELF_ST_BIND(sym->st_info);
2994 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
2995 return sym->st_shndx == SHN_UNDEF &&
2996 (bind == STB_GLOBAL || bind == STB_WEAK) &&
2997 GELF_ST_TYPE(sym->st_info) == STT_NOTYPE;
2998}
2999
53eddb5e
YS
3000static bool sym_is_subprog(const GElf_Sym *sym, int text_shndx)
3001{
3002 int bind = GELF_ST_BIND(sym->st_info);
3003 int type = GELF_ST_TYPE(sym->st_info);
3004
3005 /* in .text section */
3006 if (sym->st_shndx != text_shndx)
3007 return false;
3008
3009 /* local function */
3010 if (bind == STB_LOCAL && type == STT_SECTION)
3011 return true;
3012
3013 /* global function */
3014 return bind == STB_GLOBAL && type == STT_FUNC;
3015}
3016
166750bc
AN
3017static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3018{
3019 const struct btf_type *t;
3020 const char *var_name;
3021 int i, n;
3022
3023 if (!btf)
3024 return -ESRCH;
3025
3026 n = btf__get_nr_types(btf);
3027 for (i = 1; i <= n; i++) {
3028 t = btf__type_by_id(btf, i);
3029
3030 if (!btf_is_var(t))
3031 continue;
3032
3033 var_name = btf__name_by_offset(btf, t->name_off);
3034 if (strcmp(var_name, ext_name))
3035 continue;
3036
3037 if (btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3038 return -EINVAL;
3039
3040 return i;
3041 }
3042
3043 return -ENOENT;
3044}
3045
2e33efe3
AN
3046static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3047 const struct btf_var_secinfo *vs;
3048 const struct btf_type *t;
3049 int i, j, n;
3050
3051 if (!btf)
3052 return -ESRCH;
3053
3054 n = btf__get_nr_types(btf);
3055 for (i = 1; i <= n; i++) {
3056 t = btf__type_by_id(btf, i);
3057
3058 if (!btf_is_datasec(t))
3059 continue;
3060
3061 vs = btf_var_secinfos(t);
3062 for (j = 0; j < btf_vlen(t); j++, vs++) {
3063 if (vs->type == ext_btf_id)
3064 return i;
3065 }
3066 }
3067
3068 return -ENOENT;
3069}
3070
3071static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3072 bool *is_signed)
166750bc
AN
3073{
3074 const struct btf_type *t;
3075 const char *name;
3076
3077 t = skip_mods_and_typedefs(btf, id, NULL);
3078 name = btf__name_by_offset(btf, t->name_off);
3079
3080 if (is_signed)
3081 *is_signed = false;
3082 switch (btf_kind(t)) {
3083 case BTF_KIND_INT: {
3084 int enc = btf_int_encoding(t);
3085
3086 if (enc & BTF_INT_BOOL)
2e33efe3 3087 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
166750bc
AN
3088 if (is_signed)
3089 *is_signed = enc & BTF_INT_SIGNED;
3090 if (t->size == 1)
2e33efe3 3091 return KCFG_CHAR;
166750bc 3092 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
2e33efe3
AN
3093 return KCFG_UNKNOWN;
3094 return KCFG_INT;
166750bc
AN
3095 }
3096 case BTF_KIND_ENUM:
3097 if (t->size != 4)
2e33efe3 3098 return KCFG_UNKNOWN;
166750bc 3099 if (strcmp(name, "libbpf_tristate"))
2e33efe3
AN
3100 return KCFG_UNKNOWN;
3101 return KCFG_TRISTATE;
166750bc
AN
3102 case BTF_KIND_ARRAY:
3103 if (btf_array(t)->nelems == 0)
2e33efe3
AN
3104 return KCFG_UNKNOWN;
3105 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3106 return KCFG_UNKNOWN;
3107 return KCFG_CHAR_ARR;
166750bc 3108 default:
2e33efe3 3109 return KCFG_UNKNOWN;
166750bc
AN
3110 }
3111}
3112
3113static int cmp_externs(const void *_a, const void *_b)
3114{
3115 const struct extern_desc *a = _a;
3116 const struct extern_desc *b = _b;
3117
2e33efe3
AN
3118 if (a->type != b->type)
3119 return a->type < b->type ? -1 : 1;
3120
3121 if (a->type == EXT_KCFG) {
3122 /* descending order by alignment requirements */
3123 if (a->kcfg.align != b->kcfg.align)
3124 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3125 /* ascending order by size, within same alignment class */
3126 if (a->kcfg.sz != b->kcfg.sz)
3127 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3128 }
3129
166750bc
AN
3130 /* resolve ties by name */
3131 return strcmp(a->name, b->name);
3132}
3133
1c0c7074
AN
3134static int find_int_btf_id(const struct btf *btf)
3135{
3136 const struct btf_type *t;
3137 int i, n;
3138
3139 n = btf__get_nr_types(btf);
3140 for (i = 1; i <= n; i++) {
3141 t = btf__type_by_id(btf, i);
3142
3143 if (btf_is_int(t) && btf_int_bits(t) == 32)
3144 return i;
3145 }
3146
3147 return 0;
3148}
3149
166750bc
AN
3150static int bpf_object__collect_externs(struct bpf_object *obj)
3151{
1c0c7074 3152 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
166750bc
AN
3153 const struct btf_type *t;
3154 struct extern_desc *ext;
2e33efe3
AN
3155 int i, n, off;
3156 const char *ext_name, *sec_name;
166750bc
AN
3157 Elf_Scn *scn;
3158 GElf_Shdr sh;
3159
3160 if (!obj->efile.symbols)
3161 return 0;
3162
88a82120
AN
3163 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
3164 if (elf_sec_hdr(obj, scn, &sh))
166750bc 3165 return -LIBBPF_ERRNO__FORMAT;
166750bc 3166
88a82120 3167 n = sh.sh_size / sh.sh_entsize;
166750bc 3168 pr_debug("looking for externs among %d symbols...\n", n);
88a82120 3169
166750bc
AN
3170 for (i = 0; i < n; i++) {
3171 GElf_Sym sym;
3172
3173 if (!gelf_getsym(obj->efile.symbols, i, &sym))
3174 return -LIBBPF_ERRNO__FORMAT;
3175 if (!sym_is_extern(&sym))
3176 continue;
88a82120 3177 ext_name = elf_sym_str(obj, sym.st_name);
166750bc
AN
3178 if (!ext_name || !ext_name[0])
3179 continue;
3180
3181 ext = obj->externs;
029258d7 3182 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
166750bc
AN
3183 if (!ext)
3184 return -ENOMEM;
3185 obj->externs = ext;
3186 ext = &ext[obj->nr_extern];
3187 memset(ext, 0, sizeof(*ext));
3188 obj->nr_extern++;
3189
3190 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
3191 if (ext->btf_id <= 0) {
3192 pr_warn("failed to find BTF for extern '%s': %d\n",
3193 ext_name, ext->btf_id);
3194 return ext->btf_id;
3195 }
3196 t = btf__type_by_id(obj->btf, ext->btf_id);
3197 ext->name = btf__name_by_offset(obj->btf, t->name_off);
3198 ext->sym_idx = i;
3199 ext->is_weak = GELF_ST_BIND(sym.st_info) == STB_WEAK;
2e33efe3
AN
3200
3201 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
3202 if (ext->sec_btf_id <= 0) {
3203 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
3204 ext_name, ext->btf_id, ext->sec_btf_id);
3205 return ext->sec_btf_id;
166750bc 3206 }
2e33efe3
AN
3207 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
3208 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
3209
3210 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
3211 kcfg_sec = sec;
3212 ext->type = EXT_KCFG;
3213 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
3214 if (ext->kcfg.sz <= 0) {
3215 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
3216 ext_name, ext->kcfg.sz);
3217 return ext->kcfg.sz;
3218 }
3219 ext->kcfg.align = btf__align_of(obj->btf, t->type);
3220 if (ext->kcfg.align <= 0) {
3221 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
3222 ext_name, ext->kcfg.align);
3223 return -EINVAL;
3224 }
3225 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
3226 &ext->kcfg.is_signed);
3227 if (ext->kcfg.type == KCFG_UNKNOWN) {
3228 pr_warn("extern (kcfg) '%s' type is unsupported\n", ext_name);
3229 return -ENOTSUP;
3230 }
1c0c7074 3231 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
1c0c7074
AN
3232 ksym_sec = sec;
3233 ext->type = EXT_KSYM;
d370bbe1
HL
3234 skip_mods_and_typedefs(obj->btf, t->type,
3235 &ext->ksym.type_id);
2e33efe3
AN
3236 } else {
3237 pr_warn("unrecognized extern section '%s'\n", sec_name);
166750bc
AN
3238 return -ENOTSUP;
3239 }
3240 }
3241 pr_debug("collected %d externs total\n", obj->nr_extern);
3242
3243 if (!obj->nr_extern)
3244 return 0;
3245
2e33efe3 3246 /* sort externs by type, for kcfg ones also by (align, size, name) */
166750bc 3247 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
166750bc 3248
1c0c7074
AN
3249 /* for .ksyms section, we need to turn all externs into allocated
3250 * variables in BTF to pass kernel verification; we do this by
3251 * pretending that each extern is a 8-byte variable
3252 */
3253 if (ksym_sec) {
3254 /* find existing 4-byte integer type in BTF to use for fake
3255 * extern variables in DATASEC
3256 */
3257 int int_btf_id = find_int_btf_id(obj->btf);
3258
3259 for (i = 0; i < obj->nr_extern; i++) {
3260 ext = &obj->externs[i];
3261 if (ext->type != EXT_KSYM)
3262 continue;
3263 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
3264 i, ext->sym_idx, ext->name);
3265 }
3266
3267 sec = ksym_sec;
3268 n = btf_vlen(sec);
3269 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
3270 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3271 struct btf_type *vt;
3272
3273 vt = (void *)btf__type_by_id(obj->btf, vs->type);
3274 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
3275 ext = find_extern_by_name(obj, ext_name);
3276 if (!ext) {
3277 pr_warn("failed to find extern definition for BTF var '%s'\n",
3278 ext_name);
3279 return -ESRCH;
3280 }
3281 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3282 vt->type = int_btf_id;
3283 vs->offset = off;
3284 vs->size = sizeof(int);
3285 }
3286 sec->size = off;
3287 }
3288
2e33efe3
AN
3289 if (kcfg_sec) {
3290 sec = kcfg_sec;
3291 /* for kcfg externs calculate their offsets within a .kconfig map */
3292 off = 0;
3293 for (i = 0; i < obj->nr_extern; i++) {
3294 ext = &obj->externs[i];
3295 if (ext->type != EXT_KCFG)
3296 continue;
166750bc 3297
2e33efe3
AN
3298 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
3299 off = ext->kcfg.data_off + ext->kcfg.sz;
1c0c7074 3300 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
2e33efe3
AN
3301 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
3302 }
3303 sec->size = off;
3304 n = btf_vlen(sec);
3305 for (i = 0; i < n; i++) {
3306 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
3307
3308 t = btf__type_by_id(obj->btf, vs->type);
3309 ext_name = btf__name_by_offset(obj->btf, t->name_off);
3310 ext = find_extern_by_name(obj, ext_name);
3311 if (!ext) {
3312 pr_warn("failed to find extern definition for BTF var '%s'\n",
3313 ext_name);
3314 return -ESRCH;
3315 }
3316 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
3317 vs->offset = ext->kcfg.data_off;
166750bc 3318 }
166750bc 3319 }
166750bc
AN
3320 return 0;
3321}
3322
6d4b198b 3323struct bpf_program *
a324aae3
AN
3324bpf_object__find_program_by_title(const struct bpf_object *obj,
3325 const char *title)
6d4b198b
JK
3326{
3327 struct bpf_program *pos;
3328
3329 bpf_object__for_each_program(pos, obj) {
52109584 3330 if (pos->sec_name && !strcmp(pos->sec_name, title))
6d4b198b
JK
3331 return pos;
3332 }
3333 return NULL;
3334}
3335
c3c55696
AN
3336static bool prog_is_subprog(const struct bpf_object *obj,
3337 const struct bpf_program *prog)
3338{
197afc63
AN
3339 /* For legacy reasons, libbpf supports an entry-point BPF programs
3340 * without SEC() attribute, i.e., those in the .text section. But if
3341 * there are 2 or more such programs in the .text section, they all
3342 * must be subprograms called from entry-point BPF programs in
3343 * designated SEC()'tions, otherwise there is no way to distinguish
3344 * which of those programs should be loaded vs which are a subprogram.
3345 * Similarly, if there is a function/program in .text and at least one
3346 * other BPF program with custom SEC() attribute, then we just assume
3347 * .text programs are subprograms (even if they are not called from
3348 * other programs), because libbpf never explicitly supported mixing
3349 * SEC()-designated BPF programs and .text entry-point BPF programs.
3350 */
3351 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
c3c55696
AN
3352}
3353
01af3bf0
AN
3354struct bpf_program *
3355bpf_object__find_program_by_name(const struct bpf_object *obj,
3356 const char *name)
3357{
3358 struct bpf_program *prog;
3359
3360 bpf_object__for_each_program(prog, obj) {
c3c55696
AN
3361 if (prog_is_subprog(obj, prog))
3362 continue;
01af3bf0
AN
3363 if (!strcmp(prog->name, name))
3364 return prog;
3365 }
3366 return NULL;
3367}
3368
d859900c
DB
3369static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
3370 int shndx)
3371{
3372 return shndx == obj->efile.data_shndx ||
3373 shndx == obj->efile.bss_shndx ||
3374 shndx == obj->efile.rodata_shndx;
3375}
3376
3377static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
3378 int shndx)
3379{
abd29c93
AN
3380 return shndx == obj->efile.maps_shndx ||
3381 shndx == obj->efile.btf_maps_shndx;
d859900c
DB
3382}
3383
d859900c
DB
3384static enum libbpf_map_type
3385bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
3386{
3387 if (shndx == obj->efile.data_shndx)
3388 return LIBBPF_MAP_DATA;
3389 else if (shndx == obj->efile.bss_shndx)
3390 return LIBBPF_MAP_BSS;
3391 else if (shndx == obj->efile.rodata_shndx)
3392 return LIBBPF_MAP_RODATA;
166750bc 3393 else if (shndx == obj->efile.symbols_shndx)
81bfdd08 3394 return LIBBPF_MAP_KCONFIG;
d859900c
DB
3395 else
3396 return LIBBPF_MAP_UNSPEC;
3397}
3398
1f8e2bcb
AN
3399static int bpf_program__record_reloc(struct bpf_program *prog,
3400 struct reloc_desc *reloc_desc,
9c0f8cbd 3401 __u32 insn_idx, const char *sym_name,
1f8e2bcb
AN
3402 const GElf_Sym *sym, const GElf_Rel *rel)
3403{
3404 struct bpf_insn *insn = &prog->insns[insn_idx];
3405 size_t map_idx, nr_maps = prog->obj->nr_maps;
3406 struct bpf_object *obj = prog->obj;
3407 __u32 shdr_idx = sym->st_shndx;
3408 enum libbpf_map_type type;
9c0f8cbd 3409 const char *sym_sec_name;
1f8e2bcb
AN
3410 struct bpf_map *map;
3411
c3c55696
AN
3412 reloc_desc->processed = false;
3413
1f8e2bcb
AN
3414 /* sub-program call relocation */
3415 if (insn->code == (BPF_JMP | BPF_CALL)) {
3416 if (insn->src_reg != BPF_PSEUDO_CALL) {
9c0f8cbd 3417 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
1f8e2bcb
AN
3418 return -LIBBPF_ERRNO__RELOC;
3419 }
3420 /* text_shndx can be 0, if no default "main" program exists */
3421 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
9c0f8cbd
AN
3422 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
3423 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
3424 prog->name, sym_name, sym_sec_name);
1f8e2bcb
AN
3425 return -LIBBPF_ERRNO__RELOC;
3426 }
9c0f8cbd
AN
3427 if (sym->st_value % BPF_INSN_SZ) {
3428 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
3429 prog->name, sym_name, (size_t)sym->st_value);
1f8e2bcb
AN
3430 return -LIBBPF_ERRNO__RELOC;
3431 }
3432 reloc_desc->type = RELO_CALL;
3433 reloc_desc->insn_idx = insn_idx;
53f8dd43 3434 reloc_desc->sym_off = sym->st_value;
1f8e2bcb
AN
3435 return 0;
3436 }
3437
b8f871fa 3438 if (!is_ldimm64(insn)) {
9c0f8cbd
AN
3439 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
3440 prog->name, sym_name, insn_idx, insn->code);
1f8e2bcb
AN
3441 return -LIBBPF_ERRNO__RELOC;
3442 }
166750bc
AN
3443
3444 if (sym_is_extern(sym)) {
3445 int sym_idx = GELF_R_SYM(rel->r_info);
3446 int i, n = obj->nr_extern;
3447 struct extern_desc *ext;
3448
3449 for (i = 0; i < n; i++) {
3450 ext = &obj->externs[i];
3451 if (ext->sym_idx == sym_idx)
3452 break;
3453 }
3454 if (i >= n) {
9c0f8cbd
AN
3455 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
3456 prog->name, sym_name, sym_idx);
166750bc
AN
3457 return -LIBBPF_ERRNO__RELOC;
3458 }
9c0f8cbd
AN
3459 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
3460 prog->name, i, ext->name, ext->sym_idx, insn_idx);
166750bc
AN
3461 reloc_desc->type = RELO_EXTERN;
3462 reloc_desc->insn_idx = insn_idx;
2e33efe3 3463 reloc_desc->sym_off = i; /* sym_off stores extern index */
166750bc
AN
3464 return 0;
3465 }
3466
1f8e2bcb 3467 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
9c0f8cbd
AN
3468 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
3469 prog->name, sym_name, shdr_idx);
1f8e2bcb
AN
3470 return -LIBBPF_ERRNO__RELOC;
3471 }
3472
53eddb5e
YS
3473 /* loading subprog addresses */
3474 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
3475 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
3476 * local_func: sym->st_value = 0, insn->imm = offset in the section.
3477 */
3478 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
3479 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
3480 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
3481 return -LIBBPF_ERRNO__RELOC;
3482 }
3483
3484 reloc_desc->type = RELO_SUBPROG_ADDR;
3485 reloc_desc->insn_idx = insn_idx;
3486 reloc_desc->sym_off = sym->st_value;
3487 return 0;
3488 }
3489
1f8e2bcb 3490 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
9c0f8cbd 3491 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
1f8e2bcb
AN
3492
3493 /* generic map reference relocation */
3494 if (type == LIBBPF_MAP_UNSPEC) {
3495 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
9c0f8cbd
AN
3496 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
3497 prog->name, sym_name, sym_sec_name);
1f8e2bcb
AN
3498 return -LIBBPF_ERRNO__RELOC;
3499 }
3500 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3501 map = &obj->maps[map_idx];
3502 if (map->libbpf_type != type ||
3503 map->sec_idx != sym->st_shndx ||
3504 map->sec_offset != sym->st_value)
3505 continue;
9c0f8cbd
AN
3506 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
3507 prog->name, map_idx, map->name, map->sec_idx,
1f8e2bcb
AN
3508 map->sec_offset, insn_idx);
3509 break;
3510 }
3511 if (map_idx >= nr_maps) {
9c0f8cbd
AN
3512 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
3513 prog->name, sym_sec_name, (size_t)sym->st_value);
1f8e2bcb
AN
3514 return -LIBBPF_ERRNO__RELOC;
3515 }
3516 reloc_desc->type = RELO_LD64;
3517 reloc_desc->insn_idx = insn_idx;
3518 reloc_desc->map_idx = map_idx;
53f8dd43 3519 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
1f8e2bcb
AN
3520 return 0;
3521 }
3522
3523 /* global data map relocation */
3524 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
9c0f8cbd
AN
3525 pr_warn("prog '%s': bad data relo against section '%s'\n",
3526 prog->name, sym_sec_name);
1f8e2bcb 3527 return -LIBBPF_ERRNO__RELOC;
1f8e2bcb 3528 }
1f8e2bcb
AN
3529 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
3530 map = &obj->maps[map_idx];
3531 if (map->libbpf_type != type)
3532 continue;
9c0f8cbd
AN
3533 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
3534 prog->name, map_idx, map->name, map->sec_idx,
3535 map->sec_offset, insn_idx);
1f8e2bcb
AN
3536 break;
3537 }
3538 if (map_idx >= nr_maps) {
9c0f8cbd
AN
3539 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
3540 prog->name, sym_sec_name);
1f8e2bcb
AN
3541 return -LIBBPF_ERRNO__RELOC;
3542 }
3543
3544 reloc_desc->type = RELO_DATA;
3545 reloc_desc->insn_idx = insn_idx;
3546 reloc_desc->map_idx = map_idx;
53f8dd43 3547 reloc_desc->sym_off = sym->st_value;
1f8e2bcb
AN
3548 return 0;
3549}
3550
db2b8b06
AN
3551static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
3552{
3553 return insn_idx >= prog->sec_insn_off &&
3554 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
3555}
3556
3557static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
3558 size_t sec_idx, size_t insn_idx)
3559{
3560 int l = 0, r = obj->nr_programs - 1, m;
3561 struct bpf_program *prog;
3562
3563 while (l < r) {
3564 m = l + (r - l + 1) / 2;
3565 prog = &obj->programs[m];
3566
3567 if (prog->sec_idx < sec_idx ||
3568 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
3569 l = m;
3570 else
3571 r = m - 1;
3572 }
3573 /* matching program could be at index l, but it still might be the
3574 * wrong one, so we need to double check conditions for the last time
3575 */
3576 prog = &obj->programs[l];
3577 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
3578 return prog;
3579 return NULL;
3580}
3581
34090915 3582static int
c3c55696 3583bpf_object__collect_prog_relos(struct bpf_object *obj, GElf_Shdr *shdr, Elf_Data *data)
34090915 3584{
48cca7e4 3585 Elf_Data *symbols = obj->efile.symbols;
9c0f8cbd
AN
3586 const char *relo_sec_name, *sec_name;
3587 size_t sec_idx = shdr->sh_info;
c3c55696
AN
3588 struct bpf_program *prog;
3589 struct reloc_desc *relos;
1f8e2bcb 3590 int err, i, nrels;
c3c55696
AN
3591 const char *sym_name;
3592 __u32 insn_idx;
3593 GElf_Sym sym;
3594 GElf_Rel rel;
34090915 3595
9c0f8cbd
AN
3596 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
3597 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
3598 if (!relo_sec_name || !sec_name)
3599 return -EINVAL;
3600
3601 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
3602 relo_sec_name, sec_idx, sec_name);
34090915
WN
3603 nrels = shdr->sh_size / shdr->sh_entsize;
3604
34090915 3605 for (i = 0; i < nrels; i++) {
34090915 3606 if (!gelf_getrel(data, i, &rel)) {
9c0f8cbd 3607 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
6371ca3b 3608 return -LIBBPF_ERRNO__FORMAT;
34090915 3609 }
399dc65e 3610 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
9c0f8cbd
AN
3611 pr_warn("sec '%s': symbol 0x%zx not found for relo #%d\n",
3612 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
6371ca3b 3613 return -LIBBPF_ERRNO__FORMAT;
34090915 3614 }
9c0f8cbd
AN
3615 if (rel.r_offset % BPF_INSN_SZ) {
3616 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
3617 relo_sec_name, (size_t)GELF_R_SYM(rel.r_info), i);
1f8e2bcb 3618 return -LIBBPF_ERRNO__FORMAT;
9c0f8cbd 3619 }
d859900c 3620
9c0f8cbd 3621 insn_idx = rel.r_offset / BPF_INSN_SZ;
c3c55696
AN
3622 /* relocations against static functions are recorded as
3623 * relocations against the section that contains a function;
3624 * in such case, symbol will be STT_SECTION and sym.st_name
3625 * will point to empty string (0), so fetch section name
3626 * instead
3627 */
3628 if (GELF_ST_TYPE(sym.st_info) == STT_SECTION && sym.st_name == 0)
3629 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym.st_shndx));
3630 else
3631 sym_name = elf_sym_str(obj, sym.st_name);
3632 sym_name = sym_name ?: "<?";
d859900c 3633
9c0f8cbd
AN
3634 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
3635 relo_sec_name, i, insn_idx, sym_name);
666810e8 3636
c3c55696
AN
3637 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
3638 if (!prog) {
3639 pr_warn("sec '%s': relo #%d: program not found in section '%s' for insn #%u\n",
3640 relo_sec_name, i, sec_name, insn_idx);
3641 return -LIBBPF_ERRNO__RELOC;
3642 }
3643
3644 relos = libbpf_reallocarray(prog->reloc_desc,
3645 prog->nr_reloc + 1, sizeof(*relos));
3646 if (!relos)
3647 return -ENOMEM;
3648 prog->reloc_desc = relos;
3649
3650 /* adjust insn_idx to local BPF program frame of reference */
3651 insn_idx -= prog->sec_insn_off;
3652 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
9c0f8cbd 3653 insn_idx, sym_name, &sym, &rel);
1f8e2bcb
AN
3654 if (err)
3655 return err;
c3c55696
AN
3656
3657 prog->nr_reloc++;
34090915
WN
3658 }
3659 return 0;
3660}
3661
abd29c93 3662static int bpf_map_find_btf_info(struct bpf_object *obj, struct bpf_map *map)
8a138aed
MKL
3663{
3664 struct bpf_map_def *def = &map->def;
d859900c 3665 __u32 key_type_id = 0, value_type_id = 0;
96408c43 3666 int ret;
8a138aed 3667
590a0088
MKL
3668 /* if it's BTF-defined map, we don't need to search for type IDs.
3669 * For struct_ops map, it does not need btf_key_type_id and
3670 * btf_value_type_id.
3671 */
3672 if (map->sec_idx == obj->efile.btf_maps_shndx ||
3673 bpf_map__is_struct_ops(map))
abd29c93
AN
3674 return 0;
3675
d859900c 3676 if (!bpf_map__is_internal(map)) {
abd29c93 3677 ret = btf__get_map_kv_tids(obj->btf, map->name, def->key_size,
d859900c
DB
3678 def->value_size, &key_type_id,
3679 &value_type_id);
3680 } else {
3681 /*
3682 * LLVM annotates global data differently in BTF, that is,
3683 * only as '.data', '.bss' or '.rodata'.
3684 */
abd29c93 3685 ret = btf__find_by_name(obj->btf,
d859900c
DB
3686 libbpf_type_to_btf_name[map->libbpf_type]);
3687 }
3688 if (ret < 0)
96408c43 3689 return ret;
8a138aed 3690
96408c43 3691 map->btf_key_type_id = key_type_id;
d859900c
DB
3692 map->btf_value_type_id = bpf_map__is_internal(map) ?
3693 ret : value_type_id;
8a138aed
MKL
3694 return 0;
3695}
3696
26736eb9
JK
3697int bpf_map__reuse_fd(struct bpf_map *map, int fd)
3698{
3699 struct bpf_map_info info = {};
3700 __u32 len = sizeof(info);
3701 int new_fd, err;
3702 char *new_name;
3703
3704 err = bpf_obj_get_info_by_fd(fd, &info, &len);
3705 if (err)
3706 return err;
3707
3708 new_name = strdup(info.name);
3709 if (!new_name)
3710 return -errno;
3711
3712 new_fd = open("/", O_RDONLY | O_CLOEXEC);
d1b4574a
THJ
3713 if (new_fd < 0) {
3714 err = -errno;
26736eb9 3715 goto err_free_new_name;
d1b4574a 3716 }
26736eb9
JK
3717
3718 new_fd = dup3(fd, new_fd, O_CLOEXEC);
d1b4574a
THJ
3719 if (new_fd < 0) {
3720 err = -errno;
26736eb9 3721 goto err_close_new_fd;
d1b4574a 3722 }
26736eb9
JK
3723
3724 err = zclose(map->fd);
d1b4574a
THJ
3725 if (err) {
3726 err = -errno;
26736eb9 3727 goto err_close_new_fd;
d1b4574a 3728 }
26736eb9
JK
3729 free(map->name);
3730
3731 map->fd = new_fd;
3732 map->name = new_name;
3733 map->def.type = info.type;
3734 map->def.key_size = info.key_size;
3735 map->def.value_size = info.value_size;
3736 map->def.max_entries = info.max_entries;
3737 map->def.map_flags = info.map_flags;
3738 map->btf_key_type_id = info.btf_key_type_id;
3739 map->btf_value_type_id = info.btf_value_type_id;
ec6d5f47 3740 map->reused = true;
26736eb9
JK
3741
3742 return 0;
3743
3744err_close_new_fd:
3745 close(new_fd);
3746err_free_new_name:
3747 free(new_name);
d1b4574a 3748 return err;
26736eb9
JK
3749}
3750
1bdb6c9a 3751__u32 bpf_map__max_entries(const struct bpf_map *map)
1a11a4c7 3752{
1bdb6c9a
AN
3753 return map->def.max_entries;
3754}
1a11a4c7 3755
1bdb6c9a
AN
3756int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
3757{
1a11a4c7
AI
3758 if (map->fd >= 0)
3759 return -EBUSY;
1a11a4c7 3760 map->def.max_entries = max_entries;
1a11a4c7
AI
3761 return 0;
3762}
3763
1bdb6c9a
AN
3764int bpf_map__resize(struct bpf_map *map, __u32 max_entries)
3765{
3766 if (!map || !max_entries)
3767 return -EINVAL;
3768
3769 return bpf_map__set_max_entries(map, max_entries);
3770}
3771
47eff617 3772static int
fd9eef1a 3773bpf_object__probe_loading(struct bpf_object *obj)
47eff617
SF
3774{
3775 struct bpf_load_program_attr attr;
3776 char *cp, errmsg[STRERR_BUFSIZE];
3777 struct bpf_insn insns[] = {
3778 BPF_MOV64_IMM(BPF_REG_0, 0),
3779 BPF_EXIT_INSN(),
3780 };
3781 int ret;
3782
3783 /* make sure basic loading works */
3784
3785 memset(&attr, 0, sizeof(attr));
3786 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3787 attr.insns = insns;
3788 attr.insns_cnt = ARRAY_SIZE(insns);
3789 attr.license = "GPL";
3790
3791 ret = bpf_load_program_xattr(&attr, NULL, 0);
3792 if (ret < 0) {
fd9eef1a
EC
3793 ret = errno;
3794 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
3795 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
3796 "program. Make sure your kernel supports BPF "
3797 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
3798 "set to big enough value.\n", __func__, cp, ret);
3799 return -ret;
47eff617
SF
3800 }
3801 close(ret);
3802
fd9eef1a
EC
3803 return 0;
3804}
3805
bb180fb2
AN
3806static int probe_fd(int fd)
3807{
3808 if (fd >= 0)
3809 close(fd);
3810 return fd >= 0;
3811}
3812
47b6cb4d 3813static int probe_kern_prog_name(void)
fd9eef1a
EC
3814{
3815 struct bpf_load_program_attr attr;
3816 struct bpf_insn insns[] = {
3817 BPF_MOV64_IMM(BPF_REG_0, 0),
3818 BPF_EXIT_INSN(),
3819 };
3820 int ret;
3821
3822 /* make sure loading with name works */
47eff617 3823
fd9eef1a
EC
3824 memset(&attr, 0, sizeof(attr));
3825 attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3826 attr.insns = insns;
3827 attr.insns_cnt = ARRAY_SIZE(insns);
3828 attr.license = "GPL";
47eff617
SF
3829 attr.name = "test";
3830 ret = bpf_load_program_xattr(&attr, NULL, 0);
bb180fb2 3831 return probe_fd(ret);
47eff617
SF
3832}
3833
47b6cb4d 3834static int probe_kern_global_data(void)
8837fe5d
DB
3835{
3836 struct bpf_load_program_attr prg_attr;
3837 struct bpf_create_map_attr map_attr;
3838 char *cp, errmsg[STRERR_BUFSIZE];
3839 struct bpf_insn insns[] = {
3840 BPF_LD_MAP_VALUE(BPF_REG_1, 0, 16),
3841 BPF_ST_MEM(BPF_DW, BPF_REG_1, 0, 42),
3842 BPF_MOV64_IMM(BPF_REG_0, 0),
3843 BPF_EXIT_INSN(),
3844 };
3845 int ret, map;
3846
3847 memset(&map_attr, 0, sizeof(map_attr));
3848 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
3849 map_attr.key_size = sizeof(int);
3850 map_attr.value_size = 32;
3851 map_attr.max_entries = 1;
3852
3853 map = bpf_create_map_xattr(&map_attr);
3854 if (map < 0) {
23ab656b
THJ
3855 ret = -errno;
3856 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
be18010e 3857 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
23ab656b
THJ
3858 __func__, cp, -ret);
3859 return ret;
8837fe5d
DB
3860 }
3861
3862 insns[0].imm = map;
3863
3864 memset(&prg_attr, 0, sizeof(prg_attr));
3865 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
3866 prg_attr.insns = insns;
3867 prg_attr.insns_cnt = ARRAY_SIZE(insns);
3868 prg_attr.license = "GPL";
3869
3870 ret = bpf_load_program_xattr(&prg_attr, NULL, 0);
47b6cb4d 3871 close(map);
bb180fb2 3872 return probe_fd(ret);
8837fe5d
DB
3873}
3874
68b08647
AN
3875static int probe_kern_btf(void)
3876{
3877 static const char strs[] = "\0int";
3878 __u32 types[] = {
3879 /* int */
3880 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
3881 };
3882
3883 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3884 strs, sizeof(strs)));
3885}
3886
47b6cb4d 3887static int probe_kern_btf_func(void)
d7c4b398 3888{
8983b731 3889 static const char strs[] = "\0int\0x\0a";
d7c4b398
AN
3890 /* void x(int a) {} */
3891 __u32 types[] = {
3892 /* int */
3893 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3894 /* FUNC_PROTO */ /* [2] */
3895 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3896 BTF_PARAM_ENC(7, 1),
3897 /* FUNC x */ /* [3] */
3898 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0), 2),
3899 };
d7c4b398 3900
bb180fb2
AN
3901 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3902 strs, sizeof(strs)));
d7c4b398
AN
3903}
3904
47b6cb4d 3905static int probe_kern_btf_func_global(void)
2d3eb67f
AS
3906{
3907 static const char strs[] = "\0int\0x\0a";
3908 /* static void x(int a) {} */
3909 __u32 types[] = {
3910 /* int */
3911 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3912 /* FUNC_PROTO */ /* [2] */
3913 BTF_TYPE_ENC(0, BTF_INFO_ENC(BTF_KIND_FUNC_PROTO, 0, 1), 0),
3914 BTF_PARAM_ENC(7, 1),
3915 /* FUNC x BTF_FUNC_GLOBAL */ /* [3] */
3916 BTF_TYPE_ENC(5, BTF_INFO_ENC(BTF_KIND_FUNC, 0, BTF_FUNC_GLOBAL), 2),
3917 };
2d3eb67f 3918
bb180fb2
AN
3919 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3920 strs, sizeof(strs)));
2d3eb67f
AS
3921}
3922
47b6cb4d 3923static int probe_kern_btf_datasec(void)
d7c4b398 3924{
8983b731 3925 static const char strs[] = "\0x\0.data";
d7c4b398
AN
3926 /* static int a; */
3927 __u32 types[] = {
3928 /* int */
3929 BTF_TYPE_INT_ENC(0, BTF_INT_SIGNED, 0, 32, 4), /* [1] */
3930 /* VAR x */ /* [2] */
3931 BTF_TYPE_ENC(1, BTF_INFO_ENC(BTF_KIND_VAR, 0, 0), 1),
3932 BTF_VAR_STATIC,
3933 /* DATASEC val */ /* [3] */
3934 BTF_TYPE_ENC(3, BTF_INFO_ENC(BTF_KIND_DATASEC, 0, 1), 4),
3935 BTF_VAR_SECINFO_ENC(2, 0, 4),
3936 };
cfd49210 3937
bb180fb2
AN
3938 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3939 strs, sizeof(strs)));
d7c4b398
AN
3940}
3941
22541a9e
IL
3942static int probe_kern_btf_float(void)
3943{
3944 static const char strs[] = "\0float";
3945 __u32 types[] = {
3946 /* float */
3947 BTF_TYPE_FLOAT_ENC(1, 4),
3948 };
3949
3950 return probe_fd(libbpf__load_raw_btf((char *)types, sizeof(types),
3951 strs, sizeof(strs)));
3952}
3953
47b6cb4d 3954static int probe_kern_array_mmap(void)
7fe74b43
AN
3955{
3956 struct bpf_create_map_attr attr = {
3957 .map_type = BPF_MAP_TYPE_ARRAY,
3958 .map_flags = BPF_F_MMAPABLE,
3959 .key_size = sizeof(int),
3960 .value_size = sizeof(int),
3961 .max_entries = 1,
3962 };
7fe74b43 3963
bb180fb2 3964 return probe_fd(bpf_create_map_xattr(&attr));
7fe74b43
AN
3965}
3966
47b6cb4d 3967static int probe_kern_exp_attach_type(void)
25498a19
AN
3968{
3969 struct bpf_load_program_attr attr;
3970 struct bpf_insn insns[] = {
3971 BPF_MOV64_IMM(BPF_REG_0, 0),
3972 BPF_EXIT_INSN(),
3973 };
25498a19
AN
3974
3975 memset(&attr, 0, sizeof(attr));
3976 /* use any valid combination of program type and (optional)
3977 * non-zero expected attach type (i.e., not a BPF_CGROUP_INET_INGRESS)
3978 * to see if kernel supports expected_attach_type field for
3979 * BPF_PROG_LOAD command
3980 */
3981 attr.prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
3982 attr.expected_attach_type = BPF_CGROUP_INET_SOCK_CREATE;
3983 attr.insns = insns;
3984 attr.insns_cnt = ARRAY_SIZE(insns);
3985 attr.license = "GPL";
3986
bb180fb2 3987 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
25498a19
AN
3988}
3989
109cea5a
AN
3990static int probe_kern_probe_read_kernel(void)
3991{
3992 struct bpf_load_program_attr attr;
3993 struct bpf_insn insns[] = {
3994 BPF_MOV64_REG(BPF_REG_1, BPF_REG_10), /* r1 = r10 (fp) */
3995 BPF_ALU64_IMM(BPF_ADD, BPF_REG_1, -8), /* r1 += -8 */
3996 BPF_MOV64_IMM(BPF_REG_2, 8), /* r2 = 8 */
3997 BPF_MOV64_IMM(BPF_REG_3, 0), /* r3 = 0 */
3998 BPF_RAW_INSN(BPF_JMP | BPF_CALL, 0, 0, 0, BPF_FUNC_probe_read_kernel),
3999 BPF_EXIT_INSN(),
4000 };
4001
4002 memset(&attr, 0, sizeof(attr));
4003 attr.prog_type = BPF_PROG_TYPE_KPROBE;
4004 attr.insns = insns;
4005 attr.insns_cnt = ARRAY_SIZE(insns);
4006 attr.license = "GPL";
4007
4008 return probe_fd(bpf_load_program_xattr(&attr, NULL, 0));
4009}
4010
5d23328d
YZ
4011static int probe_prog_bind_map(void)
4012{
4013 struct bpf_load_program_attr prg_attr;
4014 struct bpf_create_map_attr map_attr;
4015 char *cp, errmsg[STRERR_BUFSIZE];
4016 struct bpf_insn insns[] = {
4017 BPF_MOV64_IMM(BPF_REG_0, 0),
4018 BPF_EXIT_INSN(),
4019 };
4020 int ret, map, prog;
4021
4022 memset(&map_attr, 0, sizeof(map_attr));
4023 map_attr.map_type = BPF_MAP_TYPE_ARRAY;
4024 map_attr.key_size = sizeof(int);
4025 map_attr.value_size = 32;
4026 map_attr.max_entries = 1;
4027
4028 map = bpf_create_map_xattr(&map_attr);
4029 if (map < 0) {
4030 ret = -errno;
4031 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4032 pr_warn("Error in %s():%s(%d). Couldn't create simple array map.\n",
4033 __func__, cp, -ret);
4034 return ret;
4035 }
4036
4037 memset(&prg_attr, 0, sizeof(prg_attr));
4038 prg_attr.prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
4039 prg_attr.insns = insns;
4040 prg_attr.insns_cnt = ARRAY_SIZE(insns);
4041 prg_attr.license = "GPL";
4042
4043 prog = bpf_load_program_xattr(&prg_attr, NULL, 0);
4044 if (prog < 0) {
4045 close(map);
4046 return 0;
4047 }
4048
4049 ret = bpf_prog_bind_map(prog, map, NULL);
4050
4051 close(map);
4052 close(prog);
4053
4054 return ret >= 0;
4055}
4056
4f33a53d
AN
4057static int probe_module_btf(void)
4058{
4059 static const char strs[] = "\0int";
4060 __u32 types[] = {
4061 /* int */
4062 BTF_TYPE_INT_ENC(1, BTF_INT_SIGNED, 0, 32, 4),
4063 };
4064 struct bpf_btf_info info;
4065 __u32 len = sizeof(info);
4066 char name[16];
4067 int fd, err;
4068
4069 fd = libbpf__load_raw_btf((char *)types, sizeof(types), strs, sizeof(strs));
4070 if (fd < 0)
4071 return 0; /* BTF not supported at all */
4072
4073 memset(&info, 0, sizeof(info));
4074 info.name = ptr_to_u64(name);
4075 info.name_len = sizeof(name);
4076
4077 /* check that BPF_OBJ_GET_INFO_BY_FD supports specifying name pointer;
4078 * kernel's module BTF support coincides with support for
4079 * name/name_len fields in struct bpf_btf_info.
4080 */
4081 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4082 close(fd);
4083 return !err;
4084}
4085
47b6cb4d
AN
4086enum kern_feature_result {
4087 FEAT_UNKNOWN = 0,
4088 FEAT_SUPPORTED = 1,
4089 FEAT_MISSING = 2,
4090};
4091
4092typedef int (*feature_probe_fn)(void);
4093
4094static struct kern_feature_desc {
4095 const char *desc;
4096 feature_probe_fn probe;
4097 enum kern_feature_result res;
4098} feature_probes[__FEAT_CNT] = {
4099 [FEAT_PROG_NAME] = {
4100 "BPF program name", probe_kern_prog_name,
4101 },
4102 [FEAT_GLOBAL_DATA] = {
4103 "global variables", probe_kern_global_data,
4104 },
68b08647
AN
4105 [FEAT_BTF] = {
4106 "minimal BTF", probe_kern_btf,
4107 },
47b6cb4d
AN
4108 [FEAT_BTF_FUNC] = {
4109 "BTF functions", probe_kern_btf_func,
4110 },
4111 [FEAT_BTF_GLOBAL_FUNC] = {
4112 "BTF global function", probe_kern_btf_func_global,
4113 },
4114 [FEAT_BTF_DATASEC] = {
4115 "BTF data section and variable", probe_kern_btf_datasec,
4116 },
4117 [FEAT_ARRAY_MMAP] = {
4118 "ARRAY map mmap()", probe_kern_array_mmap,
4119 },
4120 [FEAT_EXP_ATTACH_TYPE] = {
4121 "BPF_PROG_LOAD expected_attach_type attribute",
4122 probe_kern_exp_attach_type,
4123 },
109cea5a
AN
4124 [FEAT_PROBE_READ_KERN] = {
4125 "bpf_probe_read_kernel() helper", probe_kern_probe_read_kernel,
5d23328d
YZ
4126 },
4127 [FEAT_PROG_BIND_MAP] = {
4128 "BPF_PROG_BIND_MAP support", probe_prog_bind_map,
4f33a53d
AN
4129 },
4130 [FEAT_MODULE_BTF] = {
4131 "module BTF support", probe_module_btf,
4132 },
22541a9e
IL
4133 [FEAT_BTF_FLOAT] = {
4134 "BTF_KIND_FLOAT support", probe_kern_btf_float,
4135 },
47b6cb4d 4136};
8837fe5d 4137
47b6cb4d
AN
4138static bool kernel_supports(enum kern_feature_id feat_id)
4139{
4140 struct kern_feature_desc *feat = &feature_probes[feat_id];
4141 int ret;
4142
4143 if (READ_ONCE(feat->res) == FEAT_UNKNOWN) {
4144 ret = feat->probe();
4145 if (ret > 0) {
4146 WRITE_ONCE(feat->res, FEAT_SUPPORTED);
4147 } else if (ret == 0) {
4148 WRITE_ONCE(feat->res, FEAT_MISSING);
4149 } else {
4150 pr_warn("Detection of kernel %s support failed: %d\n", feat->desc, ret);
4151 WRITE_ONCE(feat->res, FEAT_MISSING);
4152 }
8837fe5d
DB
4153 }
4154
47b6cb4d 4155 return READ_ONCE(feat->res) == FEAT_SUPPORTED;
47eff617
SF
4156}
4157
57a00f41
THJ
4158static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4159{
4160 struct bpf_map_info map_info = {};
4161 char msg[STRERR_BUFSIZE];
4162 __u32 map_info_len;
4163
4164 map_info_len = sizeof(map_info);
4165
4166 if (bpf_obj_get_info_by_fd(map_fd, &map_info, &map_info_len)) {
4167 pr_warn("failed to get map info for map FD %d: %s\n",
4168 map_fd, libbpf_strerror_r(errno, msg, sizeof(msg)));
4169 return false;
4170 }
4171
4172 return (map_info.type == map->def.type &&
4173 map_info.key_size == map->def.key_size &&
4174 map_info.value_size == map->def.value_size &&
4175 map_info.max_entries == map->def.max_entries &&
4176 map_info.map_flags == map->def.map_flags);
4177}
4178
4179static int
4180bpf_object__reuse_map(struct bpf_map *map)
4181{
4182 char *cp, errmsg[STRERR_BUFSIZE];
4183 int err, pin_fd;
4184
4185 pin_fd = bpf_obj_get(map->pin_path);
4186 if (pin_fd < 0) {
4187 err = -errno;
4188 if (err == -ENOENT) {
4189 pr_debug("found no pinned map to reuse at '%s'\n",
4190 map->pin_path);
4191 return 0;
4192 }
4193
4194 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4195 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4196 map->pin_path, cp);
4197 return err;
4198 }
4199
4200 if (!map_is_reuse_compat(map, pin_fd)) {
4201 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4202 map->pin_path);
4203 close(pin_fd);
4204 return -EINVAL;
4205 }
4206
4207 err = bpf_map__reuse_fd(map, pin_fd);
4208 if (err) {
4209 close(pin_fd);
4210 return err;
4211 }
4212 map->pinned = true;
4213 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4214
4215 return 0;
4216}
4217
d859900c
DB
4218static int
4219bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4220{
166750bc 4221 enum libbpf_map_type map_type = map->libbpf_type;
d859900c
DB
4222 char *cp, errmsg[STRERR_BUFSIZE];
4223 int err, zero = 0;
d859900c 4224
eba9c5f4
AN
4225 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4226 if (err) {
4227 err = -errno;
4228 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4229 pr_warn("Error setting initial map(%s) contents: %s\n",
4230 map->name, cp);
4231 return err;
4232 }
d859900c 4233
81bfdd08
AN
4234 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4235 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
d859900c
DB
4236 err = bpf_map_freeze(map->fd);
4237 if (err) {
eba9c5f4
AN
4238 err = -errno;
4239 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
be18010e
KW
4240 pr_warn("Error freezing map(%s) as read-only: %s\n",
4241 map->name, cp);
eba9c5f4 4242 return err;
d859900c
DB
4243 }
4244 }
eba9c5f4 4245 return 0;
d859900c
DB
4246}
4247
2d39d7c5
AN
4248static void bpf_map__destroy(struct bpf_map *map);
4249
4250static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map)
4251{
4252 struct bpf_create_map_attr create_attr;
4253 struct bpf_map_def *def = &map->def;
4254
4255 memset(&create_attr, 0, sizeof(create_attr));
4256
47b6cb4d 4257 if (kernel_supports(FEAT_PROG_NAME))
2d39d7c5
AN
4258 create_attr.name = map->name;
4259 create_attr.map_ifindex = map->map_ifindex;
4260 create_attr.map_type = def->type;
4261 create_attr.map_flags = def->map_flags;
4262 create_attr.key_size = def->key_size;
4263 create_attr.value_size = def->value_size;
1bdb6c9a 4264 create_attr.numa_node = map->numa_node;
2d39d7c5
AN
4265
4266 if (def->type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !def->max_entries) {
4267 int nr_cpus;
4268
4269 nr_cpus = libbpf_num_possible_cpus();
4270 if (nr_cpus < 0) {
4271 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
4272 map->name, nr_cpus);
4273 return nr_cpus;
4274 }
4275 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
4276 create_attr.max_entries = nr_cpus;
4277 } else {
4278 create_attr.max_entries = def->max_entries;
4279 }
4280
4281 if (bpf_map__is_struct_ops(map))
4282 create_attr.btf_vmlinux_value_type_id =
4283 map->btf_vmlinux_value_type_id;
4284
4285 create_attr.btf_fd = 0;
4286 create_attr.btf_key_type_id = 0;
4287 create_attr.btf_value_type_id = 0;
0f0e55d8 4288 if (obj->btf && btf__fd(obj->btf) >= 0 && !bpf_map_find_btf_info(obj, map)) {
2d39d7c5
AN
4289 create_attr.btf_fd = btf__fd(obj->btf);
4290 create_attr.btf_key_type_id = map->btf_key_type_id;
4291 create_attr.btf_value_type_id = map->btf_value_type_id;
4292 }
4293
646f02ff
AN
4294 if (bpf_map_type__is_map_in_map(def->type)) {
4295 if (map->inner_map) {
4296 int err;
4297
4298 err = bpf_object__create_map(obj, map->inner_map);
4299 if (err) {
4300 pr_warn("map '%s': failed to create inner map: %d\n",
4301 map->name, err);
4302 return err;
4303 }
4304 map->inner_map_fd = bpf_map__fd(map->inner_map);
4305 }
4306 if (map->inner_map_fd >= 0)
4307 create_attr.inner_map_fd = map->inner_map_fd;
4308 }
4309
2d39d7c5
AN
4310 map->fd = bpf_create_map_xattr(&create_attr);
4311 if (map->fd < 0 && (create_attr.btf_key_type_id ||
4312 create_attr.btf_value_type_id)) {
4313 char *cp, errmsg[STRERR_BUFSIZE];
4314 int err = -errno;
4315
4316 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4317 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
4318 map->name, cp, err);
4319 create_attr.btf_fd = 0;
4320 create_attr.btf_key_type_id = 0;
4321 create_attr.btf_value_type_id = 0;
4322 map->btf_key_type_id = 0;
4323 map->btf_value_type_id = 0;
4324 map->fd = bpf_create_map_xattr(&create_attr);
4325 }
4326
4327 if (map->fd < 0)
4328 return -errno;
4329
646f02ff
AN
4330 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
4331 bpf_map__destroy(map->inner_map);
4332 zfree(&map->inner_map);
4333 }
4334
2d39d7c5
AN
4335 return 0;
4336}
4337
a0f2b7ac
HL
4338static int init_map_slots(struct bpf_map *map)
4339{
4340 const struct bpf_map *targ_map;
4341 unsigned int i;
4342 int fd, err;
4343
4344 for (i = 0; i < map->init_slots_sz; i++) {
4345 if (!map->init_slots[i])
4346 continue;
4347
4348 targ_map = map->init_slots[i];
4349 fd = bpf_map__fd(targ_map);
4350 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
4351 if (err) {
4352 err = -errno;
4353 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
4354 map->name, i, targ_map->name,
4355 fd, err);
4356 return err;
4357 }
4358 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
4359 map->name, i, targ_map->name, fd);
4360 }
4361
4362 zfree(&map->init_slots);
4363 map->init_slots_sz = 0;
4364
4365 return 0;
4366}
4367
52d3352e
WN
4368static int
4369bpf_object__create_maps(struct bpf_object *obj)
4370{
2d39d7c5
AN
4371 struct bpf_map *map;
4372 char *cp, errmsg[STRERR_BUFSIZE];
4373 unsigned int i, j;
8a138aed 4374 int err;
52d3352e 4375
9d759a9b 4376 for (i = 0; i < obj->nr_maps; i++) {
2d39d7c5 4377 map = &obj->maps[i];
8a138aed 4378
57a00f41
THJ
4379 if (map->pin_path) {
4380 err = bpf_object__reuse_map(map);
4381 if (err) {
2d39d7c5 4382 pr_warn("map '%s': error reusing pinned map\n",
57a00f41 4383 map->name);
2d39d7c5 4384 goto err_out;
57a00f41
THJ
4385 }
4386 }
4387
26736eb9 4388 if (map->fd >= 0) {
2d39d7c5 4389 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
26736eb9 4390 map->name, map->fd);
2c193d32
HL
4391 } else {
4392 err = bpf_object__create_map(obj, map);
4393 if (err)
d859900c 4394 goto err_out;
d859900c 4395
2c193d32
HL
4396 pr_debug("map '%s': created successfully, fd=%d\n",
4397 map->name, map->fd);
646f02ff 4398
2c193d32
HL
4399 if (bpf_map__is_internal(map)) {
4400 err = bpf_object__populate_internal_map(obj, map);
4401 if (err < 0) {
4402 zclose(map->fd);
4403 goto err_out;
4404 }
d859900c 4405 }
646f02ff 4406
2c193d32
HL
4407 if (map->init_slots_sz) {
4408 err = init_map_slots(map);
4409 if (err < 0) {
4410 zclose(map->fd);
646f02ff
AN
4411 goto err_out;
4412 }
646f02ff 4413 }
646f02ff
AN
4414 }
4415
57a00f41
THJ
4416 if (map->pin_path && !map->pinned) {
4417 err = bpf_map__pin(map, NULL);
4418 if (err) {
2d39d7c5
AN
4419 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
4420 map->name, map->pin_path, err);
4421 zclose(map->fd);
4422 goto err_out;
57a00f41
THJ
4423 }
4424 }
52d3352e
WN
4425 }
4426
52d3352e 4427 return 0;
2d39d7c5
AN
4428
4429err_out:
4430 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4431 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
4432 pr_perm_msg(err);
4433 for (j = 0; j < i; j++)
4434 zclose(obj->maps[j].fd);
4435 return err;
52d3352e
WN
4436}
4437
ddc7c304
AN
4438#define BPF_CORE_SPEC_MAX_LEN 64
4439
4440/* represents BPF CO-RE field or array element accessor */
4441struct bpf_core_accessor {
4442 __u32 type_id; /* struct/union type or array element type */
4443 __u32 idx; /* field index or array index */
4444 const char *name; /* field name or NULL for array accessor */
4445};
4446
4447struct bpf_core_spec {
4448 const struct btf *btf;
4449 /* high-level spec: named fields and array indices only */
4450 struct bpf_core_accessor spec[BPF_CORE_SPEC_MAX_LEN];
28b93c64
AN
4451 /* original unresolved (no skip_mods_or_typedefs) root type ID */
4452 __u32 root_type_id;
4453 /* CO-RE relocation kind */
4454 enum bpf_core_relo_kind relo_kind;
ddc7c304
AN
4455 /* high-level spec length */
4456 int len;
4457 /* raw, low-level spec: 1-to-1 with accessor spec string */
4458 int raw_spec[BPF_CORE_SPEC_MAX_LEN];
4459 /* raw spec length */
4460 int raw_len;
ee26dade
AN
4461 /* field bit offset represented by spec */
4462 __u32 bit_offset;
ddc7c304
AN
4463};
4464
4465static bool str_is_empty(const char *s)
4466{
4467 return !s || !s[0];
4468}
4469
1b484b30
AN
4470static bool is_flex_arr(const struct btf *btf,
4471 const struct bpf_core_accessor *acc,
4472 const struct btf_array *arr)
4473{
4474 const struct btf_type *t;
4475
4476 /* not a flexible array, if not inside a struct or has non-zero size */
4477 if (!acc->name || arr->nelems > 0)
4478 return false;
4479
4480 /* has to be the last member of enclosing struct */
4481 t = btf__type_by_id(btf, acc->type_id);
4482 return acc->idx == btf_vlen(t) - 1;
4483}
4484
28b93c64
AN
4485static const char *core_relo_kind_str(enum bpf_core_relo_kind kind)
4486{
4487 switch (kind) {
4488 case BPF_FIELD_BYTE_OFFSET: return "byte_off";
4489 case BPF_FIELD_BYTE_SIZE: return "byte_sz";
4490 case BPF_FIELD_EXISTS: return "field_exists";
4491 case BPF_FIELD_SIGNED: return "signed";
4492 case BPF_FIELD_LSHIFT_U64: return "lshift_u64";
4493 case BPF_FIELD_RSHIFT_U64: return "rshift_u64";
3fc32f40
AN
4494 case BPF_TYPE_ID_LOCAL: return "local_type_id";
4495 case BPF_TYPE_ID_TARGET: return "target_type_id";
4496 case BPF_TYPE_EXISTS: return "type_exists";
4497 case BPF_TYPE_SIZE: return "type_size";
eacaaed7
AN
4498 case BPF_ENUMVAL_EXISTS: return "enumval_exists";
4499 case BPF_ENUMVAL_VALUE: return "enumval_value";
28b93c64
AN
4500 default: return "unknown";
4501 }
4502}
4503
4504static bool core_relo_is_field_based(enum bpf_core_relo_kind kind)
4505{
4506 switch (kind) {
4507 case BPF_FIELD_BYTE_OFFSET:
4508 case BPF_FIELD_BYTE_SIZE:
4509 case BPF_FIELD_EXISTS:
4510 case BPF_FIELD_SIGNED:
4511 case BPF_FIELD_LSHIFT_U64:
4512 case BPF_FIELD_RSHIFT_U64:
4513 return true;
4514 default:
4515 return false;
4516 }
4517}
4518
3fc32f40
AN
4519static bool core_relo_is_type_based(enum bpf_core_relo_kind kind)
4520{
4521 switch (kind) {
4522 case BPF_TYPE_ID_LOCAL:
4523 case BPF_TYPE_ID_TARGET:
4524 case BPF_TYPE_EXISTS:
4525 case BPF_TYPE_SIZE:
4526 return true;
4527 default:
4528 return false;
4529 }
4530}
4531
eacaaed7
AN
4532static bool core_relo_is_enumval_based(enum bpf_core_relo_kind kind)
4533{
4534 switch (kind) {
4535 case BPF_ENUMVAL_EXISTS:
4536 case BPF_ENUMVAL_VALUE:
4537 return true;
4538 default:
4539 return false;
4540 }
4541}
4542
ddc7c304 4543/*
28b93c64 4544 * Turn bpf_core_relo into a low- and high-level spec representation,
ddc7c304 4545 * validating correctness along the way, as well as calculating resulting
ee26dade
AN
4546 * field bit offset, specified by accessor string. Low-level spec captures
4547 * every single level of nestedness, including traversing anonymous
ddc7c304
AN
4548 * struct/union members. High-level one only captures semantically meaningful
4549 * "turning points": named fields and array indicies.
4550 * E.g., for this case:
4551 *
4552 * struct sample {
4553 * int __unimportant;
4554 * struct {
4555 * int __1;
4556 * int __2;
4557 * int a[7];
4558 * };
4559 * };
4560 *
4561 * struct sample *s = ...;
4562 *
4563 * int x = &s->a[3]; // access string = '0:1:2:3'
4564 *
4565 * Low-level spec has 1:1 mapping with each element of access string (it's
4566 * just a parsed access string representation): [0, 1, 2, 3].
4567 *
4568 * High-level spec will capture only 3 points:
4569 * - intial zero-index access by pointer (&s->... is the same as &s[0]...);
4570 * - field 'a' access (corresponds to '2' in low-level spec);
4571 * - array element #3 access (corresponds to '3' in low-level spec).
4572 *
3fc32f40
AN
4573 * Type-based relocations (TYPE_EXISTS/TYPE_SIZE,
4574 * TYPE_ID_LOCAL/TYPE_ID_TARGET) don't capture any field information. Their
4575 * spec and raw_spec are kept empty.
eacaaed7
AN
4576 *
4577 * Enum value-based relocations (ENUMVAL_EXISTS/ENUMVAL_VALUE) use access
4578 * string to specify enumerator's value index that need to be relocated.
ddc7c304 4579 */
28b93c64 4580static int bpf_core_parse_spec(const struct btf *btf,
ddc7c304
AN
4581 __u32 type_id,
4582 const char *spec_str,
28b93c64 4583 enum bpf_core_relo_kind relo_kind,
ddc7c304
AN
4584 struct bpf_core_spec *spec)
4585{
4586 int access_idx, parsed_len, i;
1b484b30 4587 struct bpf_core_accessor *acc;
ddc7c304
AN
4588 const struct btf_type *t;
4589 const char *name;
4590 __u32 id;
4591 __s64 sz;
4592
4593 if (str_is_empty(spec_str) || *spec_str == ':')
4594 return -EINVAL;
4595
4596 memset(spec, 0, sizeof(*spec));
4597 spec->btf = btf;
28b93c64
AN
4598 spec->root_type_id = type_id;
4599 spec->relo_kind = relo_kind;
ddc7c304 4600
3fc32f40
AN
4601 /* type-based relocations don't have a field access string */
4602 if (core_relo_is_type_based(relo_kind)) {
4603 if (strcmp(spec_str, "0"))
4604 return -EINVAL;
4605 return 0;
4606 }
4607
ddc7c304
AN
4608 /* parse spec_str="0:1:2:3:4" into array raw_spec=[0, 1, 2, 3, 4] */
4609 while (*spec_str) {
4610 if (*spec_str == ':')
4611 ++spec_str;
4612 if (sscanf(spec_str, "%d%n", &access_idx, &parsed_len) != 1)
4613 return -EINVAL;
4614 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
4615 return -E2BIG;
4616 spec_str += parsed_len;
4617 spec->raw_spec[spec->raw_len++] = access_idx;
4618 }
4619
4620 if (spec->raw_len == 0)
4621 return -EINVAL;
4622
ddc7c304
AN
4623 t = skip_mods_and_typedefs(btf, type_id, &id);
4624 if (!t)
4625 return -EINVAL;
4626
4627 access_idx = spec->raw_spec[0];
eacaaed7
AN
4628 acc = &spec->spec[0];
4629 acc->type_id = id;
4630 acc->idx = access_idx;
ddc7c304
AN
4631 spec->len++;
4632
eacaaed7
AN
4633 if (core_relo_is_enumval_based(relo_kind)) {
4634 if (!btf_is_enum(t) || spec->raw_len > 1 || access_idx >= btf_vlen(t))
4635 return -EINVAL;
4636
4637 /* record enumerator name in a first accessor */
4638 acc->name = btf__name_by_offset(btf, btf_enum(t)[access_idx].name_off);
4639 return 0;
4640 }
4641
28b93c64
AN
4642 if (!core_relo_is_field_based(relo_kind))
4643 return -EINVAL;
4644
ddc7c304
AN
4645 sz = btf__resolve_size(btf, id);
4646 if (sz < 0)
4647 return sz;
ee26dade 4648 spec->bit_offset = access_idx * sz * 8;
ddc7c304
AN
4649
4650 for (i = 1; i < spec->raw_len; i++) {
4651 t = skip_mods_and_typedefs(btf, id, &id);
4652 if (!t)
4653 return -EINVAL;
4654
4655 access_idx = spec->raw_spec[i];
1b484b30 4656 acc = &spec->spec[spec->len];
ddc7c304
AN
4657
4658 if (btf_is_composite(t)) {
4659 const struct btf_member *m;
ee26dade 4660 __u32 bit_offset;
ddc7c304
AN
4661
4662 if (access_idx >= btf_vlen(t))
4663 return -EINVAL;
ddc7c304 4664
ee26dade
AN
4665 bit_offset = btf_member_bit_offset(t, access_idx);
4666 spec->bit_offset += bit_offset;
ddc7c304
AN
4667
4668 m = btf_members(t) + access_idx;
4669 if (m->name_off) {
4670 name = btf__name_by_offset(btf, m->name_off);
4671 if (str_is_empty(name))
4672 return -EINVAL;
4673
1b484b30
AN
4674 acc->type_id = id;
4675 acc->idx = access_idx;
4676 acc->name = name;
ddc7c304
AN
4677 spec->len++;
4678 }
4679
4680 id = m->type;
4681 } else if (btf_is_array(t)) {
4682 const struct btf_array *a = btf_array(t);
1b484b30 4683 bool flex;
ddc7c304
AN
4684
4685 t = skip_mods_and_typedefs(btf, a->type, &id);
1b484b30
AN
4686 if (!t)
4687 return -EINVAL;
4688
4689 flex = is_flex_arr(btf, acc - 1, a);
4690 if (!flex && access_idx >= a->nelems)
ddc7c304
AN
4691 return -EINVAL;
4692
4693 spec->spec[spec->len].type_id = id;
4694 spec->spec[spec->len].idx = access_idx;
4695 spec->len++;
4696
4697 sz = btf__resolve_size(btf, id);
4698 if (sz < 0)
4699 return sz;
ee26dade 4700 spec->bit_offset += access_idx * sz * 8;
ddc7c304 4701 } else {
81ba0889
AN
4702 pr_warn("relo for [%u] %s (at idx %d) captures type [%d] of unexpected kind %s\n",
4703 type_id, spec_str, i, id, btf_kind_str(t));
ddc7c304
AN
4704 return -EINVAL;
4705 }
4706 }
4707
4708 return 0;
4709}
4710
4711static bool bpf_core_is_flavor_sep(const char *s)
4712{
4713 /* check X___Y name pattern, where X and Y are not underscores */
4714 return s[0] != '_' && /* X */
4715 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
4716 s[4] != '_'; /* Y */
4717}
4718
4719/* Given 'some_struct_name___with_flavor' return the length of a name prefix
4720 * before last triple underscore. Struct name part after last triple
4721 * underscore is ignored by BPF CO-RE relocation during relocation matching.
4722 */
4723static size_t bpf_core_essential_name_len(const char *name)
4724{
4725 size_t n = strlen(name);
4726 int i;
4727
4728 for (i = n - 5; i >= 0; i--) {
4729 if (bpf_core_is_flavor_sep(name + i))
4730 return i + 1;
4731 }
4732 return n;
4733}
4734
0f7515ca
AN
4735struct core_cand
4736{
4737 const struct btf *btf;
4738 const struct btf_type *t;
4739 const char *name;
4740 __u32 id;
4741};
4742
4743/* dynamically sized list of type IDs and its associated struct btf */
4744struct core_cand_list {
4745 struct core_cand *cands;
ddc7c304
AN
4746 int len;
4747};
4748
0f7515ca 4749static void bpf_core_free_cands(struct core_cand_list *cands)
ddc7c304 4750{
0f7515ca
AN
4751 free(cands->cands);
4752 free(cands);
ddc7c304
AN
4753}
4754
0f7515ca
AN
4755static int bpf_core_add_cands(struct core_cand *local_cand,
4756 size_t local_essent_len,
4757 const struct btf *targ_btf,
4758 const char *targ_btf_name,
4759 int targ_start_id,
4760 struct core_cand_list *cands)
ddc7c304 4761{
0f7515ca
AN
4762 struct core_cand *new_cands, *cand;
4763 const struct btf_type *t;
4764 const char *targ_name;
4765 size_t targ_essent_len;
4766 int n, i;
ddc7c304
AN
4767
4768 n = btf__get_nr_types(targ_btf);
0f7515ca 4769 for (i = targ_start_id; i <= n; i++) {
ddc7c304 4770 t = btf__type_by_id(targ_btf, i);
0f7515ca 4771 if (btf_kind(t) != btf_kind(local_cand->t))
ddc7c304
AN
4772 continue;
4773
3fc32f40
AN
4774 targ_name = btf__name_by_offset(targ_btf, t->name_off);
4775 if (str_is_empty(targ_name))
d121e1d3
AN
4776 continue;
4777
ddc7c304
AN
4778 targ_essent_len = bpf_core_essential_name_len(targ_name);
4779 if (targ_essent_len != local_essent_len)
4780 continue;
4781
0f7515ca
AN
4782 if (strncmp(local_cand->name, targ_name, local_essent_len) != 0)
4783 continue;
4784
4785 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
4786 local_cand->id, btf_kind_str(local_cand->t),
4787 local_cand->name, i, btf_kind_str(t), targ_name,
4788 targ_btf_name);
4789 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
4790 sizeof(*cands->cands));
4791 if (!new_cands)
4792 return -ENOMEM;
4793
4794 cand = &new_cands[cands->len];
4795 cand->btf = targ_btf;
4796 cand->t = t;
4797 cand->name = targ_name;
4798 cand->id = i;
4799
4800 cands->cands = new_cands;
4801 cands->len++;
ddc7c304 4802 }
0f7515ca
AN
4803 return 0;
4804}
4805
4f33a53d
AN
4806static int load_module_btfs(struct bpf_object *obj)
4807{
4808 struct bpf_btf_info info;
4809 struct module_btf *mod_btf;
4810 struct btf *btf;
4811 char name[64];
4812 __u32 id = 0, len;
4813 int err, fd;
4814
4815 if (obj->btf_modules_loaded)
4816 return 0;
4817
4818 /* don't do this again, even if we find no module BTFs */
4819 obj->btf_modules_loaded = true;
4820
4821 /* kernel too old to support module BTFs */
4822 if (!kernel_supports(FEAT_MODULE_BTF))
4823 return 0;
4824
4825 while (true) {
4826 err = bpf_btf_get_next_id(id, &id);
4827 if (err && errno == ENOENT)
4828 return 0;
4829 if (err) {
4830 err = -errno;
4831 pr_warn("failed to iterate BTF objects: %d\n", err);
4832 return err;
ddc7c304 4833 }
4f33a53d
AN
4834
4835 fd = bpf_btf_get_fd_by_id(id);
4836 if (fd < 0) {
4837 if (errno == ENOENT)
4838 continue; /* expected race: BTF was unloaded */
4839 err = -errno;
4840 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
4841 return err;
4842 }
4843
4844 len = sizeof(info);
4845 memset(&info, 0, sizeof(info));
4846 info.name = ptr_to_u64(name);
4847 info.name_len = sizeof(name);
4848
4849 err = bpf_obj_get_info_by_fd(fd, &info, &len);
4850 if (err) {
4851 err = -errno;
4852 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
91abb4a6 4853 goto err_out;
4f33a53d
AN
4854 }
4855
4856 /* ignore non-module BTFs */
4857 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
4858 close(fd);
4859 continue;
4860 }
4861
4862 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
4f33a53d
AN
4863 if (IS_ERR(btf)) {
4864 pr_warn("failed to load module [%s]'s BTF object #%d: %ld\n",
4865 name, id, PTR_ERR(btf));
91abb4a6
AN
4866 err = PTR_ERR(btf);
4867 goto err_out;
4f33a53d
AN
4868 }
4869
3b029e06
AN
4870 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
4871 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4f33a53d 4872 if (err)
91abb4a6 4873 goto err_out;
4f33a53d
AN
4874
4875 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
4876
4877 mod_btf->btf = btf;
4878 mod_btf->id = id;
91abb4a6 4879 mod_btf->fd = fd;
4f33a53d 4880 mod_btf->name = strdup(name);
91abb4a6
AN
4881 if (!mod_btf->name) {
4882 err = -ENOMEM;
4883 goto err_out;
4884 }
4885 continue;
4886
4887err_out:
4888 close(fd);
4889 return err;
ddc7c304 4890 }
4f33a53d
AN
4891
4892 return 0;
4893}
4894
0f7515ca
AN
4895static struct core_cand_list *
4896bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
4897{
4898 struct core_cand local_cand = {};
4899 struct core_cand_list *cands;
4f33a53d 4900 const struct btf *main_btf;
0f7515ca 4901 size_t local_essent_len;
4f33a53d 4902 int err, i;
0f7515ca
AN
4903
4904 local_cand.btf = local_btf;
4905 local_cand.t = btf__type_by_id(local_btf, local_type_id);
4906 if (!local_cand.t)
4907 return ERR_PTR(-EINVAL);
4908
4909 local_cand.name = btf__name_by_offset(local_btf, local_cand.t->name_off);
4910 if (str_is_empty(local_cand.name))
4911 return ERR_PTR(-EINVAL);
4912 local_essent_len = bpf_core_essential_name_len(local_cand.name);
4913
4914 cands = calloc(1, sizeof(*cands));
4915 if (!cands)
4916 return ERR_PTR(-ENOMEM);
4917
4918 /* Attempt to find target candidates in vmlinux BTF first */
4f33a53d
AN
4919 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
4920 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
4921 if (err)
4922 goto err_out;
4923
4924 /* if vmlinux BTF has any candidate, don't got for module BTFs */
4925 if (cands->len)
4926 return cands;
4927
4928 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
4929 if (obj->btf_vmlinux_override)
4930 return cands;
4931
4932 /* now look through module BTFs, trying to still find candidates */
4933 err = load_module_btfs(obj);
4934 if (err)
4935 goto err_out;
4936
4937 for (i = 0; i < obj->btf_module_cnt; i++) {
4938 err = bpf_core_add_cands(&local_cand, local_essent_len,
4939 obj->btf_modules[i].btf,
4940 obj->btf_modules[i].name,
4941 btf__get_nr_types(obj->btf_vmlinux) + 1,
4942 cands);
4943 if (err)
4944 goto err_out;
0f7515ca
AN
4945 }
4946
4947 return cands;
ddc7c304 4948err_out:
4f33a53d 4949 bpf_core_free_cands(cands);
ddc7c304
AN
4950 return ERR_PTR(err);
4951}
4952
3fc32f40
AN
4953/* Check two types for compatibility for the purpose of field access
4954 * relocation. const/volatile/restrict and typedefs are skipped to ensure we
4955 * are relocating semantically compatible entities:
ddc7c304 4956 * - any two STRUCTs/UNIONs are compatible and can be mixed;
94f060e9 4957 * - any two FWDs are compatible, if their names match (modulo flavor suffix);
ddc7c304 4958 * - any two PTRs are always compatible;
94f060e9
AN
4959 * - for ENUMs, names should be the same (ignoring flavor suffix) or at
4960 * least one of enums should be anonymous;
ddc7c304 4961 * - for ENUMs, check sizes, names are ignored;
ee26dade 4962 * - for INT, size and signedness are ignored;
ddc7c304
AN
4963 * - for ARRAY, dimensionality is ignored, element types are checked for
4964 * compatibility recursively;
4965 * - everything else shouldn't be ever a target of relocation.
4966 * These rules are not set in stone and probably will be adjusted as we get
4967 * more experience with using BPF CO-RE relocations.
4968 */
4969static int bpf_core_fields_are_compat(const struct btf *local_btf,
4970 __u32 local_id,
4971 const struct btf *targ_btf,
4972 __u32 targ_id)
4973{
4974 const struct btf_type *local_type, *targ_type;
4975
4976recur:
4977 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
4978 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
4979 if (!local_type || !targ_type)
4980 return -EINVAL;
4981
4982 if (btf_is_composite(local_type) && btf_is_composite(targ_type))
4983 return 1;
4984 if (btf_kind(local_type) != btf_kind(targ_type))
4985 return 0;
4986
4987 switch (btf_kind(local_type)) {
ddc7c304
AN
4988 case BTF_KIND_PTR:
4989 return 1;
94f060e9
AN
4990 case BTF_KIND_FWD:
4991 case BTF_KIND_ENUM: {
4992 const char *local_name, *targ_name;
4993 size_t local_len, targ_len;
4994
4995 local_name = btf__name_by_offset(local_btf,
4996 local_type->name_off);
4997 targ_name = btf__name_by_offset(targ_btf, targ_type->name_off);
4998 local_len = bpf_core_essential_name_len(local_name);
4999 targ_len = bpf_core_essential_name_len(targ_name);
5000 /* one of them is anonymous or both w/ same flavor-less names */
5001 return local_len == 0 || targ_len == 0 ||
5002 (local_len == targ_len &&
5003 strncmp(local_name, targ_name, local_len) == 0);
5004 }
ddc7c304 5005 case BTF_KIND_INT:
ee26dade
AN
5006 /* just reject deprecated bitfield-like integers; all other
5007 * integers are by default compatible between each other
5008 */
ddc7c304 5009 return btf_int_offset(local_type) == 0 &&
ee26dade 5010 btf_int_offset(targ_type) == 0;
ddc7c304
AN
5011 case BTF_KIND_ARRAY:
5012 local_id = btf_array(local_type)->type;
5013 targ_id = btf_array(targ_type)->type;
5014 goto recur;
5015 default:
be18010e
KW
5016 pr_warn("unexpected kind %d relocated, local [%d], target [%d]\n",
5017 btf_kind(local_type), local_id, targ_id);
ddc7c304
AN
5018 return 0;
5019 }
5020}
5021
5022/*
5023 * Given single high-level named field accessor in local type, find
5024 * corresponding high-level accessor for a target type. Along the way,
5025 * maintain low-level spec for target as well. Also keep updating target
ee26dade 5026 * bit offset.
ddc7c304
AN
5027 *
5028 * Searching is performed through recursive exhaustive enumeration of all
5029 * fields of a struct/union. If there are any anonymous (embedded)
5030 * structs/unions, they are recursively searched as well. If field with
5031 * desired name is found, check compatibility between local and target types,
5032 * before returning result.
5033 *
5034 * 1 is returned, if field is found.
5035 * 0 is returned if no compatible field is found.
5036 * <0 is returned on error.
5037 */
5038static int bpf_core_match_member(const struct btf *local_btf,
5039 const struct bpf_core_accessor *local_acc,
5040 const struct btf *targ_btf,
5041 __u32 targ_id,
5042 struct bpf_core_spec *spec,
5043 __u32 *next_targ_id)
5044{
5045 const struct btf_type *local_type, *targ_type;
5046 const struct btf_member *local_member, *m;
5047 const char *local_name, *targ_name;
5048 __u32 local_id;
5049 int i, n, found;
5050
5051 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5052 if (!targ_type)
5053 return -EINVAL;
5054 if (!btf_is_composite(targ_type))
5055 return 0;
5056
5057 local_id = local_acc->type_id;
5058 local_type = btf__type_by_id(local_btf, local_id);
5059 local_member = btf_members(local_type) + local_acc->idx;
5060 local_name = btf__name_by_offset(local_btf, local_member->name_off);
5061
5062 n = btf_vlen(targ_type);
5063 m = btf_members(targ_type);
5064 for (i = 0; i < n; i++, m++) {
ee26dade 5065 __u32 bit_offset;
ddc7c304 5066
ee26dade 5067 bit_offset = btf_member_bit_offset(targ_type, i);
ddc7c304
AN
5068
5069 /* too deep struct/union/array nesting */
5070 if (spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5071 return -E2BIG;
5072
5073 /* speculate this member will be the good one */
ee26dade 5074 spec->bit_offset += bit_offset;
ddc7c304
AN
5075 spec->raw_spec[spec->raw_len++] = i;
5076
5077 targ_name = btf__name_by_offset(targ_btf, m->name_off);
5078 if (str_is_empty(targ_name)) {
5079 /* embedded struct/union, we need to go deeper */
5080 found = bpf_core_match_member(local_btf, local_acc,
5081 targ_btf, m->type,
5082 spec, next_targ_id);
5083 if (found) /* either found or error */
5084 return found;
5085 } else if (strcmp(local_name, targ_name) == 0) {
5086 /* matching named field */
5087 struct bpf_core_accessor *targ_acc;
5088
5089 targ_acc = &spec->spec[spec->len++];
5090 targ_acc->type_id = targ_id;
5091 targ_acc->idx = i;
5092 targ_acc->name = targ_name;
5093
5094 *next_targ_id = m->type;
5095 found = bpf_core_fields_are_compat(local_btf,
5096 local_member->type,
5097 targ_btf, m->type);
5098 if (!found)
5099 spec->len--; /* pop accessor */
5100 return found;
5101 }
5102 /* member turned out not to be what we looked for */
ee26dade 5103 spec->bit_offset -= bit_offset;
ddc7c304
AN
5104 spec->raw_len--;
5105 }
5106
5107 return 0;
5108}
5109
3fc32f40
AN
5110/* Check local and target types for compatibility. This check is used for
5111 * type-based CO-RE relocations and follow slightly different rules than
5112 * field-based relocations. This function assumes that root types were already
5113 * checked for name match. Beyond that initial root-level name check, names
5114 * are completely ignored. Compatibility rules are as follows:
5115 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5116 * kind should match for local and target types (i.e., STRUCT is not
5117 * compatible with UNION);
5118 * - for ENUMs, the size is ignored;
5119 * - for INT, size and signedness are ignored;
5120 * - for ARRAY, dimensionality is ignored, element types are checked for
5121 * compatibility recursively;
5122 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5123 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5124 * - FUNC_PROTOs are compatible if they have compatible signature: same
5125 * number of input args and compatible return and argument types.
5126 * These rules are not set in stone and probably will be adjusted as we get
5127 * more experience with using BPF CO-RE relocations.
5128 */
5129static int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5130 const struct btf *targ_btf, __u32 targ_id)
5131{
5132 const struct btf_type *local_type, *targ_type;
5133 int depth = 32; /* max recursion depth */
5134
5135 /* caller made sure that names match (ignoring flavor suffix) */
5136 local_type = btf__type_by_id(local_btf, local_id);
f872e4bc 5137 targ_type = btf__type_by_id(targ_btf, targ_id);
3fc32f40
AN
5138 if (btf_kind(local_type) != btf_kind(targ_type))
5139 return 0;
5140
5141recur:
5142 depth--;
5143 if (depth < 0)
5144 return -EINVAL;
5145
5146 local_type = skip_mods_and_typedefs(local_btf, local_id, &local_id);
5147 targ_type = skip_mods_and_typedefs(targ_btf, targ_id, &targ_id);
5148 if (!local_type || !targ_type)
5149 return -EINVAL;
5150
5151 if (btf_kind(local_type) != btf_kind(targ_type))
5152 return 0;
5153
5154 switch (btf_kind(local_type)) {
5155 case BTF_KIND_UNKN:
5156 case BTF_KIND_STRUCT:
5157 case BTF_KIND_UNION:
5158 case BTF_KIND_ENUM:
5159 case BTF_KIND_FWD:
5160 return 1;
5161 case BTF_KIND_INT:
5162 /* just reject deprecated bitfield-like integers; all other
5163 * integers are by default compatible between each other
5164 */
5165 return btf_int_offset(local_type) == 0 && btf_int_offset(targ_type) == 0;
5166 case BTF_KIND_PTR:
5167 local_id = local_type->type;
5168 targ_id = targ_type->type;
5169 goto recur;
5170 case BTF_KIND_ARRAY:
5171 local_id = btf_array(local_type)->type;
5172 targ_id = btf_array(targ_type)->type;
5173 goto recur;
5174 case BTF_KIND_FUNC_PROTO: {
5175 struct btf_param *local_p = btf_params(local_type);
5176 struct btf_param *targ_p = btf_params(targ_type);
5177 __u16 local_vlen = btf_vlen(local_type);
5178 __u16 targ_vlen = btf_vlen(targ_type);
5179 int i, err;
5180
5181 if (local_vlen != targ_vlen)
5182 return 0;
5183
5184 for (i = 0; i < local_vlen; i++, local_p++, targ_p++) {
5185 skip_mods_and_typedefs(local_btf, local_p->type, &local_id);
5186 skip_mods_and_typedefs(targ_btf, targ_p->type, &targ_id);
5187 err = bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id);
5188 if (err <= 0)
5189 return err;
5190 }
5191
5192 /* tail recurse for return type check */
5193 skip_mods_and_typedefs(local_btf, local_type->type, &local_id);
5194 skip_mods_and_typedefs(targ_btf, targ_type->type, &targ_id);
5195 goto recur;
5196 }
5197 default:
5198 pr_warn("unexpected kind %s relocated, local [%d], target [%d]\n",
5199 btf_kind_str(local_type), local_id, targ_id);
5200 return 0;
5201 }
5202}
5203
ddc7c304
AN
5204/*
5205 * Try to match local spec to a target type and, if successful, produce full
ee26dade 5206 * target spec (high-level, low-level + bit offset).
ddc7c304
AN
5207 */
5208static int bpf_core_spec_match(struct bpf_core_spec *local_spec,
5209 const struct btf *targ_btf, __u32 targ_id,
5210 struct bpf_core_spec *targ_spec)
5211{
5212 const struct btf_type *targ_type;
5213 const struct bpf_core_accessor *local_acc;
5214 struct bpf_core_accessor *targ_acc;
5215 int i, sz, matched;
5216
5217 memset(targ_spec, 0, sizeof(*targ_spec));
5218 targ_spec->btf = targ_btf;
28b93c64
AN
5219 targ_spec->root_type_id = targ_id;
5220 targ_spec->relo_kind = local_spec->relo_kind;
ddc7c304 5221
3fc32f40
AN
5222 if (core_relo_is_type_based(local_spec->relo_kind)) {
5223 return bpf_core_types_are_compat(local_spec->btf,
5224 local_spec->root_type_id,
5225 targ_btf, targ_id);
5226 }
5227
ddc7c304
AN
5228 local_acc = &local_spec->spec[0];
5229 targ_acc = &targ_spec->spec[0];
5230
eacaaed7
AN
5231 if (core_relo_is_enumval_based(local_spec->relo_kind)) {
5232 size_t local_essent_len, targ_essent_len;
5233 const struct btf_enum *e;
5234 const char *targ_name;
5235
5236 /* has to resolve to an enum */
5237 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id, &targ_id);
5238 if (!btf_is_enum(targ_type))
5239 return 0;
5240
5241 local_essent_len = bpf_core_essential_name_len(local_acc->name);
5242
5243 for (i = 0, e = btf_enum(targ_type); i < btf_vlen(targ_type); i++, e++) {
5244 targ_name = btf__name_by_offset(targ_spec->btf, e->name_off);
5245 targ_essent_len = bpf_core_essential_name_len(targ_name);
5246 if (targ_essent_len != local_essent_len)
5247 continue;
5248 if (strncmp(local_acc->name, targ_name, local_essent_len) == 0) {
5249 targ_acc->type_id = targ_id;
5250 targ_acc->idx = i;
5251 targ_acc->name = targ_name;
5252 targ_spec->len++;
5253 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5254 targ_spec->raw_len++;
5255 return 1;
5256 }
5257 }
5258 return 0;
5259 }
5260
5261 if (!core_relo_is_field_based(local_spec->relo_kind))
5262 return -EINVAL;
5263
ddc7c304
AN
5264 for (i = 0; i < local_spec->len; i++, local_acc++, targ_acc++) {
5265 targ_type = skip_mods_and_typedefs(targ_spec->btf, targ_id,
5266 &targ_id);
5267 if (!targ_type)
5268 return -EINVAL;
5269
5270 if (local_acc->name) {
5271 matched = bpf_core_match_member(local_spec->btf,
5272 local_acc,
5273 targ_btf, targ_id,
5274 targ_spec, &targ_id);
5275 if (matched <= 0)
5276 return matched;
5277 } else {
5278 /* for i=0, targ_id is already treated as array element
5279 * type (because it's the original struct), for others
5280 * we should find array element type first
5281 */
5282 if (i > 0) {
5283 const struct btf_array *a;
1b484b30 5284 bool flex;
ddc7c304
AN
5285
5286 if (!btf_is_array(targ_type))
5287 return 0;
5288
5289 a = btf_array(targ_type);
1b484b30
AN
5290 flex = is_flex_arr(targ_btf, targ_acc - 1, a);
5291 if (!flex && local_acc->idx >= a->nelems)
ddc7c304
AN
5292 return 0;
5293 if (!skip_mods_and_typedefs(targ_btf, a->type,
5294 &targ_id))
5295 return -EINVAL;
5296 }
5297
5298 /* too deep struct/union/array nesting */
5299 if (targ_spec->raw_len == BPF_CORE_SPEC_MAX_LEN)
5300 return -E2BIG;
5301
5302 targ_acc->type_id = targ_id;
5303 targ_acc->idx = local_acc->idx;
5304 targ_acc->name = NULL;
5305 targ_spec->len++;
5306 targ_spec->raw_spec[targ_spec->raw_len] = targ_acc->idx;
5307 targ_spec->raw_len++;
5308
5309 sz = btf__resolve_size(targ_btf, targ_id);
5310 if (sz < 0)
5311 return sz;
ee26dade 5312 targ_spec->bit_offset += local_acc->idx * sz * 8;
ddc7c304
AN
5313 }
5314 }
5315
5316 return 1;
5317}
5318
ee26dade 5319static int bpf_core_calc_field_relo(const struct bpf_program *prog,
28b93c64 5320 const struct bpf_core_relo *relo,
ee26dade 5321 const struct bpf_core_spec *spec,
a66345bc
AN
5322 __u32 *val, __u32 *field_sz, __u32 *type_id,
5323 bool *validate)
ee26dade 5324{
353c788c
AN
5325 const struct bpf_core_accessor *acc;
5326 const struct btf_type *t;
a66345bc 5327 __u32 byte_off, byte_sz, bit_off, bit_sz, field_type_id;
ee26dade
AN
5328 const struct btf_member *m;
5329 const struct btf_type *mt;
5330 bool bitfield;
94f060e9 5331 __s64 sz;
ee26dade 5332
a66345bc
AN
5333 *field_sz = 0;
5334
353c788c
AN
5335 if (relo->kind == BPF_FIELD_EXISTS) {
5336 *val = spec ? 1 : 0;
5337 return 0;
5338 }
5339
5340 if (!spec)
5341 return -EUCLEAN; /* request instruction poisoning */
5342
5343 acc = &spec->spec[spec->len - 1];
5344 t = btf__type_by_id(spec->btf, acc->type_id);
5345
ee26dade
AN
5346 /* a[n] accessor needs special handling */
5347 if (!acc->name) {
94f060e9
AN
5348 if (relo->kind == BPF_FIELD_BYTE_OFFSET) {
5349 *val = spec->bit_offset / 8;
a66345bc
AN
5350 /* remember field size for load/store mem size */
5351 sz = btf__resolve_size(spec->btf, acc->type_id);
5352 if (sz < 0)
5353 return -EINVAL;
5354 *field_sz = sz;
5355 *type_id = acc->type_id;
94f060e9
AN
5356 } else if (relo->kind == BPF_FIELD_BYTE_SIZE) {
5357 sz = btf__resolve_size(spec->btf, acc->type_id);
5358 if (sz < 0)
5359 return -EINVAL;
5360 *val = sz;
5361 } else {
5362 pr_warn("prog '%s': relo %d at insn #%d can't be applied to array access\n",
52109584 5363 prog->name, relo->kind, relo->insn_off / 8);
ee26dade
AN
5364 return -EINVAL;
5365 }
ee26dade
AN
5366 if (validate)
5367 *validate = true;
5368 return 0;
5369 }
5370
5371 m = btf_members(t) + acc->idx;
a66345bc 5372 mt = skip_mods_and_typedefs(spec->btf, m->type, &field_type_id);
ee26dade
AN
5373 bit_off = spec->bit_offset;
5374 bit_sz = btf_member_bitfield_size(t, acc->idx);
5375
5376 bitfield = bit_sz > 0;
5377 if (bitfield) {
5378 byte_sz = mt->size;
5379 byte_off = bit_off / 8 / byte_sz * byte_sz;
5380 /* figure out smallest int size necessary for bitfield load */
5381 while (bit_off + bit_sz - byte_off * 8 > byte_sz * 8) {
5382 if (byte_sz >= 8) {
5383 /* bitfield can't be read with 64-bit read */
5384 pr_warn("prog '%s': relo %d at insn #%d can't be satisfied for bitfield\n",
52109584 5385 prog->name, relo->kind, relo->insn_off / 8);
ee26dade
AN
5386 return -E2BIG;
5387 }
5388 byte_sz *= 2;
5389 byte_off = bit_off / 8 / byte_sz * byte_sz;
5390 }
5391 } else {
a66345bc 5392 sz = btf__resolve_size(spec->btf, field_type_id);
94f060e9
AN
5393 if (sz < 0)
5394 return -EINVAL;
5395 byte_sz = sz;
ee26dade
AN
5396 byte_off = spec->bit_offset / 8;
5397 bit_sz = byte_sz * 8;
5398 }
5399
5400 /* for bitfields, all the relocatable aspects are ambiguous and we
5401 * might disagree with compiler, so turn off validation of expected
5402 * value, except for signedness
5403 */
5404 if (validate)
5405 *validate = !bitfield;
5406
5407 switch (relo->kind) {
5408 case BPF_FIELD_BYTE_OFFSET:
5409 *val = byte_off;
a66345bc
AN
5410 if (!bitfield) {
5411 *field_sz = byte_sz;
5412 *type_id = field_type_id;
5413 }
ee26dade
AN
5414 break;
5415 case BPF_FIELD_BYTE_SIZE:
5416 *val = byte_sz;
5417 break;
5418 case BPF_FIELD_SIGNED:
5419 /* enums will be assumed unsigned */
5420 *val = btf_is_enum(mt) ||
5421 (btf_int_encoding(mt) & BTF_INT_SIGNED);
5422 if (validate)
5423 *validate = true; /* signedness is never ambiguous */
5424 break;
5425 case BPF_FIELD_LSHIFT_U64:
5426#if __BYTE_ORDER == __LITTLE_ENDIAN
5427 *val = 64 - (bit_off + bit_sz - byte_off * 8);
5428#else
5429 *val = (8 - byte_sz) * 8 + (bit_off - byte_off * 8);
5430#endif
5431 break;
5432 case BPF_FIELD_RSHIFT_U64:
5433 *val = 64 - bit_sz;
5434 if (validate)
5435 *validate = true; /* right shift is never ambiguous */
5436 break;
5437 case BPF_FIELD_EXISTS:
5438 default:
353c788c 5439 return -EOPNOTSUPP;
ee26dade
AN
5440 }
5441
5442 return 0;
5443}
5444
3fc32f40
AN
5445static int bpf_core_calc_type_relo(const struct bpf_core_relo *relo,
5446 const struct bpf_core_spec *spec,
5447 __u32 *val)
5448{
5449 __s64 sz;
5450
5451 /* type-based relos return zero when target type is not found */
5452 if (!spec) {
5453 *val = 0;
5454 return 0;
5455 }
5456
5457 switch (relo->kind) {
5458 case BPF_TYPE_ID_TARGET:
5459 *val = spec->root_type_id;
5460 break;
5461 case BPF_TYPE_EXISTS:
5462 *val = 1;
5463 break;
5464 case BPF_TYPE_SIZE:
5465 sz = btf__resolve_size(spec->btf, spec->root_type_id);
5466 if (sz < 0)
5467 return -EINVAL;
5468 *val = sz;
5469 break;
5470 case BPF_TYPE_ID_LOCAL:
5471 /* BPF_TYPE_ID_LOCAL is handled specially and shouldn't get here */
5472 default:
5473 return -EOPNOTSUPP;
5474 }
5475
5476 return 0;
5477}
5478
eacaaed7
AN
5479static int bpf_core_calc_enumval_relo(const struct bpf_core_relo *relo,
5480 const struct bpf_core_spec *spec,
5481 __u32 *val)
5482{
5483 const struct btf_type *t;
5484 const struct btf_enum *e;
5485
5486 switch (relo->kind) {
5487 case BPF_ENUMVAL_EXISTS:
5488 *val = spec ? 1 : 0;
5489 break;
5490 case BPF_ENUMVAL_VALUE:
5491 if (!spec)
5492 return -EUCLEAN; /* request instruction poisoning */
5493 t = btf__type_by_id(spec->btf, spec->spec[0].type_id);
5494 e = btf_enum(t) + spec->spec[0].idx;
5495 *val = e->val;
5496 break;
5497 default:
5498 return -EOPNOTSUPP;
5499 }
5500
5501 return 0;
5502}
5503
353c788c
AN
5504struct bpf_core_relo_res
5505{
5506 /* expected value in the instruction, unless validate == false */
5507 __u32 orig_val;
5508 /* new value that needs to be patched up to */
5509 __u32 new_val;
5510 /* relocation unsuccessful, poison instruction, but don't fail load */
5511 bool poison;
5512 /* some relocations can't be validated against orig_val */
5513 bool validate;
a66345bc
AN
5514 /* for field byte offset relocations or the forms:
5515 * *(T *)(rX + <off>) = rY
5516 * rX = *(T *)(rY + <off>),
5517 * we remember original and resolved field size to adjust direct
5518 * memory loads of pointers and integers; this is necessary for 32-bit
5519 * host kernel architectures, but also allows to automatically
5520 * relocate fields that were resized from, e.g., u32 to u64, etc.
5521 */
5522 bool fail_memsz_adjust;
5523 __u32 orig_sz;
5524 __u32 orig_type_id;
5525 __u32 new_sz;
5526 __u32 new_type_id;
353c788c
AN
5527};
5528
5529/* Calculate original and target relocation values, given local and target
5530 * specs and relocation kind. These values are calculated for each candidate.
5531 * If there are multiple candidates, resulting values should all be consistent
5532 * with each other. Otherwise, libbpf will refuse to proceed due to ambiguity.
5533 * If instruction has to be poisoned, *poison will be set to true.
5534 */
5535static int bpf_core_calc_relo(const struct bpf_program *prog,
5536 const struct bpf_core_relo *relo,
5537 int relo_idx,
5538 const struct bpf_core_spec *local_spec,
5539 const struct bpf_core_spec *targ_spec,
5540 struct bpf_core_relo_res *res)
5541{
5542 int err = -EOPNOTSUPP;
5543
5544 res->orig_val = 0;
5545 res->new_val = 0;
5546 res->poison = false;
5547 res->validate = true;
a66345bc
AN
5548 res->fail_memsz_adjust = false;
5549 res->orig_sz = res->new_sz = 0;
5550 res->orig_type_id = res->new_type_id = 0;
353c788c
AN
5551
5552 if (core_relo_is_field_based(relo->kind)) {
a66345bc
AN
5553 err = bpf_core_calc_field_relo(prog, relo, local_spec,
5554 &res->orig_val, &res->orig_sz,
5555 &res->orig_type_id, &res->validate);
5556 err = err ?: bpf_core_calc_field_relo(prog, relo, targ_spec,
5557 &res->new_val, &res->new_sz,
5558 &res->new_type_id, NULL);
5559 if (err)
5560 goto done;
5561 /* Validate if it's safe to adjust load/store memory size.
5562 * Adjustments are performed only if original and new memory
5563 * sizes differ.
5564 */
5565 res->fail_memsz_adjust = false;
5566 if (res->orig_sz != res->new_sz) {
5567 const struct btf_type *orig_t, *new_t;
5568
5569 orig_t = btf__type_by_id(local_spec->btf, res->orig_type_id);
5570 new_t = btf__type_by_id(targ_spec->btf, res->new_type_id);
5571
5572 /* There are two use cases in which it's safe to
5573 * adjust load/store's mem size:
5574 * - reading a 32-bit kernel pointer, while on BPF
5575 * size pointers are always 64-bit; in this case
5576 * it's safe to "downsize" instruction size due to
5577 * pointer being treated as unsigned integer with
5578 * zero-extended upper 32-bits;
5579 * - reading unsigned integers, again due to
5580 * zero-extension is preserving the value correctly.
5581 *
5582 * In all other cases it's incorrect to attempt to
5583 * load/store field because read value will be
5584 * incorrect, so we poison relocated instruction.
5585 */
5586 if (btf_is_ptr(orig_t) && btf_is_ptr(new_t))
5587 goto done;
5588 if (btf_is_int(orig_t) && btf_is_int(new_t) &&
5589 btf_int_encoding(orig_t) != BTF_INT_SIGNED &&
5590 btf_int_encoding(new_t) != BTF_INT_SIGNED)
5591 goto done;
5592
5593 /* mark as invalid mem size adjustment, but this will
5594 * only be checked for LDX/STX/ST insns
5595 */
5596 res->fail_memsz_adjust = true;
5597 }
3fc32f40
AN
5598 } else if (core_relo_is_type_based(relo->kind)) {
5599 err = bpf_core_calc_type_relo(relo, local_spec, &res->orig_val);
5600 err = err ?: bpf_core_calc_type_relo(relo, targ_spec, &res->new_val);
eacaaed7
AN
5601 } else if (core_relo_is_enumval_based(relo->kind)) {
5602 err = bpf_core_calc_enumval_relo(relo, local_spec, &res->orig_val);
5603 err = err ?: bpf_core_calc_enumval_relo(relo, targ_spec, &res->new_val);
353c788c
AN
5604 }
5605
a66345bc 5606done:
353c788c
AN
5607 if (err == -EUCLEAN) {
5608 /* EUCLEAN is used to signal instruction poisoning request */
5609 res->poison = true;
5610 err = 0;
5611 } else if (err == -EOPNOTSUPP) {
5612 /* EOPNOTSUPP means unknown/unsupported relocation */
5613 pr_warn("prog '%s': relo #%d: unrecognized CO-RE relocation %s (%d) at insn #%d\n",
52109584
AN
5614 prog->name, relo_idx, core_relo_kind_str(relo->kind),
5615 relo->kind, relo->insn_off / 8);
353c788c
AN
5616 }
5617
5618 return err;
5619}
5620
5621/*
5622 * Turn instruction for which CO_RE relocation failed into invalid one with
5623 * distinct signature.
5624 */
5625static void bpf_core_poison_insn(struct bpf_program *prog, int relo_idx,
5626 int insn_idx, struct bpf_insn *insn)
5627{
5628 pr_debug("prog '%s': relo #%d: substituting insn #%d w/ invalid insn\n",
52109584 5629 prog->name, relo_idx, insn_idx);
353c788c
AN
5630 insn->code = BPF_JMP | BPF_CALL;
5631 insn->dst_reg = 0;
5632 insn->src_reg = 0;
5633 insn->off = 0;
5634 /* if this instruction is reachable (not a dead code),
5635 * verifier will complain with the following message:
5636 * invalid func unknown#195896080
5637 */
5638 insn->imm = 195896080; /* => 0xbad2310 => "bad relo" */
5639}
5640
a66345bc
AN
5641static int insn_bpf_size_to_bytes(struct bpf_insn *insn)
5642{
5643 switch (BPF_SIZE(insn->code)) {
5644 case BPF_DW: return 8;
5645 case BPF_W: return 4;
5646 case BPF_H: return 2;
5647 case BPF_B: return 1;
5648 default: return -1;
5649 }
5650}
5651
5652static int insn_bytes_to_bpf_size(__u32 sz)
5653{
5654 switch (sz) {
5655 case 8: return BPF_DW;
5656 case 4: return BPF_W;
5657 case 2: return BPF_H;
5658 case 1: return BPF_B;
5659 default: return -1;
5660 }
5661}
5662
ddc7c304
AN
5663/*
5664 * Patch relocatable BPF instruction.
62561eb4
AN
5665 *
5666 * Patched value is determined by relocation kind and target specification.
353c788c 5667 * For existence relocations target spec will be NULL if field/type is not found.
62561eb4
AN
5668 * Expected insn->imm value is determined using relocation kind and local
5669 * spec, and is checked before patching instruction. If actual insn->imm value
5670 * is wrong, bail out with error.
ddc7c304 5671 *
a66345bc 5672 * Currently supported classes of BPF instruction are:
ddc7c304
AN
5673 * 1. rX = <imm> (assignment with immediate operand);
5674 * 2. rX += <imm> (arithmetic operations with immediate operand);
a66345bc
AN
5675 * 3. rX = <imm64> (load with 64-bit immediate value);
5676 * 4. rX = *(T *)(rY + <off>), where T is one of {u8, u16, u32, u64};
5677 * 5. *(T *)(rX + <off>) = rY, where T is one of {u8, u16, u32, u64};
5678 * 6. *(T *)(rX + <off>) = <imm>, where T is one of {u8, u16, u32, u64}.
ddc7c304 5679 */
353c788c 5680static int bpf_core_patch_insn(struct bpf_program *prog,
28b93c64 5681 const struct bpf_core_relo *relo,
d7a25270 5682 int relo_idx,
353c788c 5683 const struct bpf_core_relo_res *res)
ddc7c304 5684{
62561eb4 5685 __u32 orig_val, new_val;
ddc7c304 5686 struct bpf_insn *insn;
353c788c 5687 int insn_idx;
ddc7c304
AN
5688 __u8 class;
5689
9c0f8cbd 5690 if (relo->insn_off % BPF_INSN_SZ)
62561eb4 5691 return -EINVAL;
9c0f8cbd 5692 insn_idx = relo->insn_off / BPF_INSN_SZ;
db2b8b06
AN
5693 /* adjust insn_idx from section frame of reference to the local
5694 * program's frame of reference; (sub-)program code is not yet
5695 * relocated, so it's enough to just subtract in-section offset
5696 */
5697 insn_idx = insn_idx - prog->sec_insn_off;
d7a25270
AN
5698 insn = &prog->insns[insn_idx];
5699 class = BPF_CLASS(insn->code);
62561eb4 5700
353c788c 5701 if (res->poison) {
a66345bc 5702poison:
eacaaed7
AN
5703 /* poison second part of ldimm64 to avoid confusing error from
5704 * verifier about "unknown opcode 00"
5705 */
5706 if (is_ldimm64(insn))
5707 bpf_core_poison_insn(prog, relo_idx, insn_idx + 1, insn + 1);
353c788c 5708 bpf_core_poison_insn(prog, relo_idx, insn_idx, insn);
d7a25270 5709 return 0;
62561eb4 5710 }
ddc7c304 5711
353c788c
AN
5712 orig_val = res->orig_val;
5713 new_val = res->new_val;
5714
8ab9da57
AN
5715 switch (class) {
5716 case BPF_ALU:
5717 case BPF_ALU64:
ddc7c304
AN
5718 if (BPF_SRC(insn->code) != BPF_K)
5719 return -EINVAL;
353c788c 5720 if (res->validate && insn->imm != orig_val) {
d7a25270 5721 pr_warn("prog '%s': relo #%d: unexpected insn #%d (ALU/ALU64) value: got %u, exp %u -> %u\n",
52109584 5722 prog->name, relo_idx,
d7a25270 5723 insn_idx, insn->imm, orig_val, new_val);
ddc7c304 5724 return -EINVAL;
ee26dade
AN
5725 }
5726 orig_val = insn->imm;
62561eb4 5727 insn->imm = new_val;
d7a25270 5728 pr_debug("prog '%s': relo #%d: patched insn #%d (ALU/ALU64) imm %u -> %u\n",
52109584 5729 prog->name, relo_idx, insn_idx,
d7a25270 5730 orig_val, new_val);
8ab9da57
AN
5731 break;
5732 case BPF_LDX:
5733 case BPF_ST:
5734 case BPF_STX:
353c788c 5735 if (res->validate && insn->off != orig_val) {
eacaaed7 5736 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDX/ST/STX) value: got %u, exp %u -> %u\n",
52109584 5737 prog->name, relo_idx, insn_idx, insn->off, orig_val, new_val);
8ab9da57
AN
5738 return -EINVAL;
5739 }
5740 if (new_val > SHRT_MAX) {
d7a25270 5741 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) value too big: %u\n",
52109584 5742 prog->name, relo_idx, insn_idx, new_val);
8ab9da57
AN
5743 return -ERANGE;
5744 }
a66345bc
AN
5745 if (res->fail_memsz_adjust) {
5746 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) accesses field incorrectly. "
5747 "Make sure you are accessing pointers, unsigned integers, or fields of matching type and size.\n",
5748 prog->name, relo_idx, insn_idx);
5749 goto poison;
5750 }
5751
8ab9da57
AN
5752 orig_val = insn->off;
5753 insn->off = new_val;
d7a25270 5754 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) off %u -> %u\n",
52109584 5755 prog->name, relo_idx, insn_idx, orig_val, new_val);
a66345bc
AN
5756
5757 if (res->new_sz != res->orig_sz) {
5758 int insn_bytes_sz, insn_bpf_sz;
5759
5760 insn_bytes_sz = insn_bpf_size_to_bytes(insn);
5761 if (insn_bytes_sz != res->orig_sz) {
5762 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) unexpected mem size: got %d, exp %u\n",
5763 prog->name, relo_idx, insn_idx, insn_bytes_sz, res->orig_sz);
5764 return -EINVAL;
5765 }
5766
5767 insn_bpf_sz = insn_bytes_to_bpf_size(res->new_sz);
5768 if (insn_bpf_sz < 0) {
5769 pr_warn("prog '%s': relo #%d: insn #%d (LDX/ST/STX) invalid new mem size: %u\n",
5770 prog->name, relo_idx, insn_idx, res->new_sz);
5771 return -EINVAL;
5772 }
5773
5774 insn->code = BPF_MODE(insn->code) | insn_bpf_sz | BPF_CLASS(insn->code);
5775 pr_debug("prog '%s': relo #%d: patched insn #%d (LDX/ST/STX) mem_sz %u -> %u\n",
5776 prog->name, relo_idx, insn_idx, res->orig_sz, res->new_sz);
5777 }
8ab9da57 5778 break;
eacaaed7
AN
5779 case BPF_LD: {
5780 __u64 imm;
5781
5782 if (!is_ldimm64(insn) ||
5783 insn[0].src_reg != 0 || insn[0].off != 0 ||
5784 insn_idx + 1 >= prog->insns_cnt ||
5785 insn[1].code != 0 || insn[1].dst_reg != 0 ||
5786 insn[1].src_reg != 0 || insn[1].off != 0) {
5787 pr_warn("prog '%s': relo #%d: insn #%d (LDIMM64) has unexpected form\n",
52109584 5788 prog->name, relo_idx, insn_idx);
eacaaed7
AN
5789 return -EINVAL;
5790 }
5791
5792 imm = insn[0].imm + ((__u64)insn[1].imm << 32);
5793 if (res->validate && imm != orig_val) {
5794 pr_warn("prog '%s': relo #%d: unexpected insn #%d (LDIMM64) value: got %llu, exp %u -> %u\n",
52109584 5795 prog->name, relo_idx,
2e80be60
AN
5796 insn_idx, (unsigned long long)imm,
5797 orig_val, new_val);
eacaaed7
AN
5798 return -EINVAL;
5799 }
5800
5801 insn[0].imm = new_val;
5802 insn[1].imm = 0; /* currently only 32-bit values are supported */
5803 pr_debug("prog '%s': relo #%d: patched insn #%d (LDIMM64) imm64 %llu -> %u\n",
52109584 5804 prog->name, relo_idx, insn_idx,
2e80be60 5805 (unsigned long long)imm, new_val);
eacaaed7
AN
5806 break;
5807 }
8ab9da57 5808 default:
eacaaed7 5809 pr_warn("prog '%s': relo #%d: trying to relocate unrecognized insn #%d, code:0x%x, src:0x%x, dst:0x%x, off:0x%x, imm:0x%x\n",
52109584
AN
5810 prog->name, relo_idx, insn_idx, insn->code,
5811 insn->src_reg, insn->dst_reg, insn->off, insn->imm);
ddc7c304
AN
5812 return -EINVAL;
5813 }
62561eb4 5814
ddc7c304
AN
5815 return 0;
5816}
5817
ddc7c304
AN
5818/* Output spec definition in the format:
5819 * [<type-id>] (<type-name>) + <raw-spec> => <offset>@<spec>,
5820 * where <spec> is a C-syntax view of recorded field access, e.g.: x.a[3].b
5821 */
5822static void bpf_core_dump_spec(int level, const struct bpf_core_spec *spec)
5823{
5824 const struct btf_type *t;
eacaaed7 5825 const struct btf_enum *e;
ddc7c304
AN
5826 const char *s;
5827 __u32 type_id;
5828 int i;
5829
28b93c64 5830 type_id = spec->root_type_id;
ddc7c304
AN
5831 t = btf__type_by_id(spec->btf, type_id);
5832 s = btf__name_by_offset(spec->btf, t->name_off);
ddc7c304 5833
28b93c64
AN
5834 libbpf_print(level, "[%u] %s %s", type_id, btf_kind_str(t), str_is_empty(s) ? "<anon>" : s);
5835
3fc32f40
AN
5836 if (core_relo_is_type_based(spec->relo_kind))
5837 return;
5838
eacaaed7
AN
5839 if (core_relo_is_enumval_based(spec->relo_kind)) {
5840 t = skip_mods_and_typedefs(spec->btf, type_id, NULL);
5841 e = btf_enum(t) + spec->raw_spec[0];
5842 s = btf__name_by_offset(spec->btf, e->name_off);
5843
5844 libbpf_print(level, "::%s = %u", s, e->val);
5845 return;
5846 }
5847
28b93c64
AN
5848 if (core_relo_is_field_based(spec->relo_kind)) {
5849 for (i = 0; i < spec->len; i++) {
5850 if (spec->spec[i].name)
5851 libbpf_print(level, ".%s", spec->spec[i].name);
5852 else if (i > 0 || spec->spec[i].idx > 0)
5853 libbpf_print(level, "[%u]", spec->spec[i].idx);
5854 }
ddc7c304 5855
28b93c64
AN
5856 libbpf_print(level, " (");
5857 for (i = 0; i < spec->raw_len; i++)
5858 libbpf_print(level, "%s%d", i == 0 ? "" : ":", spec->raw_spec[i]);
ddc7c304 5859
28b93c64
AN
5860 if (spec->bit_offset % 8)
5861 libbpf_print(level, " @ offset %u.%u)",
5862 spec->bit_offset / 8, spec->bit_offset % 8);
ddc7c304 5863 else
28b93c64 5864 libbpf_print(level, " @ offset %u)", spec->bit_offset / 8);
3fc32f40 5865 return;
ddc7c304 5866 }
ddc7c304
AN
5867}
5868
5869static size_t bpf_core_hash_fn(const void *key, void *ctx)
5870{
5871 return (size_t)key;
5872}
5873
5874static bool bpf_core_equal_fn(const void *k1, const void *k2, void *ctx)
5875{
5876 return k1 == k2;
5877}
5878
5879static void *u32_as_hash_key(__u32 x)
5880{
5881 return (void *)(uintptr_t)x;
5882}
5883
5884/*
5885 * CO-RE relocate single instruction.
5886 *
5887 * The outline and important points of the algorithm:
5888 * 1. For given local type, find corresponding candidate target types.
5889 * Candidate type is a type with the same "essential" name, ignoring
5890 * everything after last triple underscore (___). E.g., `sample`,
5891 * `sample___flavor_one`, `sample___flavor_another_one`, are all candidates
5892 * for each other. Names with triple underscore are referred to as
5893 * "flavors" and are useful, among other things, to allow to
5894 * specify/support incompatible variations of the same kernel struct, which
5895 * might differ between different kernel versions and/or build
5896 * configurations.
5897 *
5898 * N.B. Struct "flavors" could be generated by bpftool's BTF-to-C
5899 * converter, when deduplicated BTF of a kernel still contains more than
5900 * one different types with the same name. In that case, ___2, ___3, etc
5901 * are appended starting from second name conflict. But start flavors are
5902 * also useful to be defined "locally", in BPF program, to extract same
5903 * data from incompatible changes between different kernel
5904 * versions/configurations. For instance, to handle field renames between
5905 * kernel versions, one can use two flavors of the struct name with the
5906 * same common name and use conditional relocations to extract that field,
5907 * depending on target kernel version.
5908 * 2. For each candidate type, try to match local specification to this
5909 * candidate target type. Matching involves finding corresponding
5910 * high-level spec accessors, meaning that all named fields should match,
5911 * as well as all array accesses should be within the actual bounds. Also,
5912 * types should be compatible (see bpf_core_fields_are_compat for details).
5913 * 3. It is supported and expected that there might be multiple flavors
5914 * matching the spec. As long as all the specs resolve to the same set of
511bb008 5915 * offsets across all candidates, there is no error. If there is any
ddc7c304
AN
5916 * ambiguity, CO-RE relocation will fail. This is necessary to accomodate
5917 * imprefection of BTF deduplication, which can cause slight duplication of
5918 * the same BTF type, if some directly or indirectly referenced (by
5919 * pointer) type gets resolved to different actual types in different
5920 * object files. If such situation occurs, deduplicated BTF will end up
5921 * with two (or more) structurally identical types, which differ only in
5922 * types they refer to through pointer. This should be OK in most cases and
5923 * is not an error.
5924 * 4. Candidate types search is performed by linearly scanning through all
5925 * types in target BTF. It is anticipated that this is overall more
5926 * efficient memory-wise and not significantly worse (if not better)
5927 * CPU-wise compared to prebuilding a map from all local type names to
5928 * a list of candidate type names. It's also sped up by caching resolved
5929 * list of matching candidates per each local "root" type ID, that has at
28b93c64 5930 * least one bpf_core_relo associated with it. This list is shared
ddc7c304
AN
5931 * between multiple relocations for the same type ID and is updated as some
5932 * of the candidates are pruned due to structural incompatibility.
5933 */
3fc32f40
AN
5934static int bpf_core_apply_relo(struct bpf_program *prog,
5935 const struct bpf_core_relo *relo,
5936 int relo_idx,
5937 const struct btf *local_btf,
3fc32f40 5938 struct hashmap *cand_cache)
ddc7c304 5939{
3418c56d 5940 struct bpf_core_spec local_spec, cand_spec, targ_spec = {};
ddc7c304 5941 const void *type_key = u32_as_hash_key(relo->type_id);
353c788c 5942 struct bpf_core_relo_res cand_res, targ_res;
28b93c64
AN
5943 const struct btf_type *local_type;
5944 const char *local_name;
0f7515ca
AN
5945 struct core_cand_list *cands = NULL;
5946 __u32 local_id;
ddc7c304
AN
5947 const char *spec_str;
5948 int i, j, err;
5949
5950 local_id = relo->type_id;
5951 local_type = btf__type_by_id(local_btf, local_id);
5952 if (!local_type)
5953 return -EINVAL;
5954
5955 local_name = btf__name_by_offset(local_btf, local_type->name_off);
3fc32f40 5956 if (!local_name)
ddc7c304
AN
5957 return -EINVAL;
5958
5959 spec_str = btf__name_by_offset(local_btf, relo->access_str_off);
5960 if (str_is_empty(spec_str))
5961 return -EINVAL;
5962
28b93c64 5963 err = bpf_core_parse_spec(local_btf, local_id, spec_str, relo->kind, &local_spec);
ddc7c304 5964 if (err) {
28b93c64 5965 pr_warn("prog '%s': relo #%d: parsing [%d] %s %s + %s failed: %d\n",
52109584 5966 prog->name, relo_idx, local_id, btf_kind_str(local_type),
3fc32f40
AN
5967 str_is_empty(local_name) ? "<anon>" : local_name,
5968 spec_str, err);
ddc7c304
AN
5969 return -EINVAL;
5970 }
5971
52109584 5972 pr_debug("prog '%s': relo #%d: kind <%s> (%d), spec is ", prog->name,
28b93c64 5973 relo_idx, core_relo_kind_str(relo->kind), relo->kind);
ddc7c304
AN
5974 bpf_core_dump_spec(LIBBPF_DEBUG, &local_spec);
5975 libbpf_print(LIBBPF_DEBUG, "\n");
5976
3fc32f40
AN
5977 /* TYPE_ID_LOCAL relo is special and doesn't need candidate search */
5978 if (relo->kind == BPF_TYPE_ID_LOCAL) {
5979 targ_res.validate = true;
5980 targ_res.poison = false;
5981 targ_res.orig_val = local_spec.root_type_id;
5982 targ_res.new_val = local_spec.root_type_id;
5983 goto patch_insn;
5984 }
5985
5986 /* libbpf doesn't support candidate search for anonymous types */
5987 if (str_is_empty(spec_str)) {
5988 pr_warn("prog '%s': relo #%d: <%s> (%d) relocation doesn't support anonymous types\n",
52109584 5989 prog->name, relo_idx, core_relo_kind_str(relo->kind), relo->kind);
3fc32f40
AN
5990 return -EOPNOTSUPP;
5991 }
5992
0f7515ca
AN
5993 if (!hashmap__find(cand_cache, type_key, (void **)&cands)) {
5994 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5995 if (IS_ERR(cands)) {
4f33a53d 5996 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
52109584 5997 prog->name, relo_idx, local_id, btf_kind_str(local_type),
0f7515ca
AN
5998 local_name, PTR_ERR(cands));
5999 return PTR_ERR(cands);
ddc7c304 6000 }
0f7515ca 6001 err = hashmap__set(cand_cache, type_key, cands, NULL, NULL);
ddc7c304 6002 if (err) {
0f7515ca 6003 bpf_core_free_cands(cands);
ddc7c304
AN
6004 return err;
6005 }
6006 }
6007
0f7515ca
AN
6008 for (i = 0, j = 0; i < cands->len; i++) {
6009 err = bpf_core_spec_match(&local_spec, cands->cands[i].btf,
6010 cands->cands[i].id, &cand_spec);
ddc7c304 6011 if (err < 0) {
28b93c64 6012 pr_warn("prog '%s': relo #%d: error matching candidate #%d ",
52109584 6013 prog->name, relo_idx, i);
28b93c64
AN
6014 bpf_core_dump_spec(LIBBPF_WARN, &cand_spec);
6015 libbpf_print(LIBBPF_WARN, ": %d\n", err);
ddc7c304
AN
6016 return err;
6017 }
28b93c64 6018
52109584 6019 pr_debug("prog '%s': relo #%d: %s candidate #%d ", prog->name,
28b93c64
AN
6020 relo_idx, err == 0 ? "non-matching" : "matching", i);
6021 bpf_core_dump_spec(LIBBPF_DEBUG, &cand_spec);
6022 libbpf_print(LIBBPF_DEBUG, "\n");
6023
ddc7c304
AN
6024 if (err == 0)
6025 continue;
6026
353c788c
AN
6027 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, &cand_spec, &cand_res);
6028 if (err)
6029 return err;
6030
ddc7c304 6031 if (j == 0) {
353c788c 6032 targ_res = cand_res;
ddc7c304 6033 targ_spec = cand_spec;
ee26dade 6034 } else if (cand_spec.bit_offset != targ_spec.bit_offset) {
353c788c
AN
6035 /* if there are many field relo candidates, they
6036 * should all resolve to the same bit offset
ddc7c304 6037 */
353c788c 6038 pr_warn("prog '%s': relo #%d: field offset ambiguity: %u != %u\n",
52109584 6039 prog->name, relo_idx, cand_spec.bit_offset,
ee26dade 6040 targ_spec.bit_offset);
ddc7c304 6041 return -EINVAL;
353c788c
AN
6042 } else if (cand_res.poison != targ_res.poison || cand_res.new_val != targ_res.new_val) {
6043 /* all candidates should result in the same relocation
6044 * decision and value, otherwise it's dangerous to
6045 * proceed due to ambiguity
6046 */
6047 pr_warn("prog '%s': relo #%d: relocation decision ambiguity: %s %u != %s %u\n",
52109584 6048 prog->name, relo_idx,
353c788c
AN
6049 cand_res.poison ? "failure" : "success", cand_res.new_val,
6050 targ_res.poison ? "failure" : "success", targ_res.new_val);
6051 return -EINVAL;
ddc7c304
AN
6052 }
6053
0f7515ca 6054 cands->cands[j++] = cands->cands[i];
ddc7c304
AN
6055 }
6056
62561eb4 6057 /*
d7a25270
AN
6058 * For BPF_FIELD_EXISTS relo or when used BPF program has field
6059 * existence checks or kernel version/config checks, it's expected
6060 * that we might not find any candidates. In this case, if field
6061 * wasn't found in any candidate, the list of candidates shouldn't
6062 * change at all, we'll just handle relocating appropriately,
6063 * depending on relo's kind.
62561eb4
AN
6064 */
6065 if (j > 0)
0f7515ca 6066 cands->len = j;
62561eb4 6067
d7a25270
AN
6068 /*
6069 * If no candidates were found, it might be both a programmer error,
6070 * as well as expected case, depending whether instruction w/
6071 * relocation is guarded in some way that makes it unreachable (dead
6072 * code) if relocation can't be resolved. This is handled in
3fc32f40 6073 * bpf_core_patch_insn() uniformly by replacing that instruction with
d7a25270
AN
6074 * BPF helper call insn (using invalid helper ID). If that instruction
6075 * is indeed unreachable, then it will be ignored and eliminated by
6076 * verifier. If it was an error, then verifier will complain and point
6077 * to a specific instruction number in its log.
6078 */
353c788c 6079 if (j == 0) {
28b93c64 6080 pr_debug("prog '%s': relo #%d: no matching targets found\n",
52109584 6081 prog->name, relo_idx);
ddc7c304 6082
353c788c
AN
6083 /* calculate single target relo result explicitly */
6084 err = bpf_core_calc_relo(prog, relo, relo_idx, &local_spec, NULL, &targ_res);
6085 if (err)
6086 return err;
6087 }
6088
3fc32f40 6089patch_insn:
353c788c
AN
6090 /* bpf_core_patch_insn() should know how to handle missing targ_spec */
6091 err = bpf_core_patch_insn(prog, relo, relo_idx, &targ_res);
ddc7c304 6092 if (err) {
be18010e 6093 pr_warn("prog '%s': relo #%d: failed to patch insn at offset %d: %d\n",
52109584 6094 prog->name, relo_idx, relo->insn_off, err);
ddc7c304
AN
6095 return -EINVAL;
6096 }
6097
6098 return 0;
6099}
6100
6101static int
28b93c64 6102bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
ddc7c304
AN
6103{
6104 const struct btf_ext_info_sec *sec;
28b93c64 6105 const struct bpf_core_relo *rec;
ddc7c304
AN
6106 const struct btf_ext_info *seg;
6107 struct hashmap_entry *entry;
6108 struct hashmap *cand_cache = NULL;
6109 struct bpf_program *prog;
ddc7c304 6110 const char *sec_name;
db2b8b06 6111 int i, err = 0, insn_idx, sec_idx;
ddc7c304 6112
28b93c64
AN
6113 if (obj->btf_ext->core_relo_info.len == 0)
6114 return 0;
6115
0f7515ca
AN
6116 if (targ_btf_path) {
6117 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
6118 if (IS_ERR_OR_NULL(obj->btf_vmlinux_override)) {
6119 err = PTR_ERR(obj->btf_vmlinux_override);
6120 pr_warn("failed to parse target BTF: %d\n", err);
6121 return err;
6122 }
ddc7c304
AN
6123 }
6124
6125 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
6126 if (IS_ERR(cand_cache)) {
6127 err = PTR_ERR(cand_cache);
6128 goto out;
6129 }
6130
28b93c64 6131 seg = &obj->btf_ext->core_relo_info;
ddc7c304
AN
6132 for_each_btf_ext_sec(seg, sec) {
6133 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6134 if (str_is_empty(sec_name)) {
6135 err = -EINVAL;
6136 goto out;
6137 }
db2b8b06
AN
6138 /* bpf_object's ELF is gone by now so it's not easy to find
6139 * section index by section name, but we can find *any*
6140 * bpf_program within desired section name and use it's
6141 * prog->sec_idx to do a proper search by section index and
6142 * instruction offset
6143 */
9c82a63c
AN
6144 prog = NULL;
6145 for (i = 0; i < obj->nr_programs; i++) {
db2b8b06 6146 prog = &obj->programs[i];
52109584 6147 if (strcmp(prog->sec_name, sec_name) == 0)
9c82a63c 6148 break;
9c82a63c 6149 }
ddc7c304 6150 if (!prog) {
db2b8b06
AN
6151 pr_warn("sec '%s': failed to find a BPF program\n", sec_name);
6152 return -ENOENT;
ddc7c304 6153 }
db2b8b06 6154 sec_idx = prog->sec_idx;
ddc7c304 6155
9c0f8cbd 6156 pr_debug("sec '%s': found %d CO-RE relocations\n",
ddc7c304
AN
6157 sec_name, sec->num_info);
6158
6159 for_each_btf_ext_rec(seg, sec, i, rec) {
db2b8b06
AN
6160 insn_idx = rec->insn_off / BPF_INSN_SZ;
6161 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
6162 if (!prog) {
6163 pr_warn("sec '%s': failed to find program at insn #%d for CO-RE offset relocation #%d\n",
6164 sec_name, insn_idx, i);
6165 err = -EINVAL;
6166 goto out;
6167 }
47f7cf63
AN
6168 /* no need to apply CO-RE relocation if the program is
6169 * not going to be loaded
6170 */
6171 if (!prog->load)
6172 continue;
db2b8b06 6173
0f7515ca 6174 err = bpf_core_apply_relo(prog, rec, i, obj->btf, cand_cache);
ddc7c304 6175 if (err) {
be18010e 6176 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
9c0f8cbd 6177 prog->name, i, err);
ddc7c304
AN
6178 goto out;
6179 }
6180 }
6181 }
6182
6183out:
4f33a53d 6184 /* obj->btf_vmlinux and module BTFs are freed after object load */
0f7515ca
AN
6185 btf__free(obj->btf_vmlinux_override);
6186 obj->btf_vmlinux_override = NULL;
6187
ddc7c304
AN
6188 if (!IS_ERR_OR_NULL(cand_cache)) {
6189 hashmap__for_each_entry(cand_cache, entry, i) {
6190 bpf_core_free_cands(entry->value);
6191 }
6192 hashmap__free(cand_cache);
6193 }
6194 return err;
6195}
6196
c3c55696
AN
6197/* Relocate data references within program code:
6198 * - map references;
6199 * - global variable references;
6200 * - extern references.
6201 */
48cca7e4 6202static int
c3c55696 6203bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
8a47a6c5 6204{
c3c55696 6205 int i;
8a47a6c5
WN
6206
6207 for (i = 0; i < prog->nr_reloc; i++) {
53f8dd43 6208 struct reloc_desc *relo = &prog->reloc_desc[i];
166750bc 6209 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
2e33efe3 6210 struct extern_desc *ext;
8a47a6c5 6211
166750bc
AN
6212 switch (relo->type) {
6213 case RELO_LD64:
6214 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
6215 insn[0].imm = obj->maps[relo->map_idx].fd;
c3c55696 6216 relo->processed = true;
166750bc
AN
6217 break;
6218 case RELO_DATA:
6219 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6220 insn[1].imm = insn[0].imm + relo->sym_off;
53f8dd43 6221 insn[0].imm = obj->maps[relo->map_idx].fd;
c3c55696 6222 relo->processed = true;
166750bc
AN
6223 break;
6224 case RELO_EXTERN:
2e33efe3 6225 ext = &obj->externs[relo->sym_off];
1c0c7074
AN
6226 if (ext->type == EXT_KCFG) {
6227 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6228 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6229 insn[1].imm = ext->kcfg.data_off;
6230 } else /* EXT_KSYM */ {
d370bbe1
HL
6231 if (ext->ksym.type_id) { /* typed ksyms */
6232 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
284d2587
AN
6233 insn[0].imm = ext->ksym.kernel_btf_id;
6234 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
d370bbe1
HL
6235 } else { /* typeless ksyms */
6236 insn[0].imm = (__u32)ext->ksym.addr;
6237 insn[1].imm = ext->ksym.addr >> 32;
6238 }
1c0c7074 6239 }
c3c55696 6240 relo->processed = true;
166750bc 6241 break;
53eddb5e
YS
6242 case RELO_SUBPROG_ADDR:
6243 insn[0].src_reg = BPF_PSEUDO_FUNC;
6244 /* will be handled as a follow up pass */
6245 break;
166750bc 6246 case RELO_CALL:
c3c55696 6247 /* will be handled as a follow up pass */
166750bc
AN
6248 break;
6249 default:
9c0f8cbd
AN
6250 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6251 prog->name, i, relo->type);
166750bc 6252 return -EINVAL;
8a47a6c5 6253 }
8a47a6c5
WN
6254 }
6255
c3c55696
AN
6256 return 0;
6257}
6258
8505e870
AN
6259static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6260 const struct bpf_program *prog,
6261 const struct btf_ext_info *ext_info,
6262 void **prog_info, __u32 *prog_rec_cnt,
6263 __u32 *prog_rec_sz)
6264{
6265 void *copy_start = NULL, *copy_end = NULL;
6266 void *rec, *rec_end, *new_prog_info;
6267 const struct btf_ext_info_sec *sec;
6268 size_t old_sz, new_sz;
6269 const char *sec_name;
6270 int i, off_adj;
6271
6272 for_each_btf_ext_sec(ext_info, sec) {
6273 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
6274 if (!sec_name)
6275 return -EINVAL;
52109584 6276 if (strcmp(sec_name, prog->sec_name) != 0)
8505e870
AN
6277 continue;
6278
6279 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6280 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6281
6282 if (insn_off < prog->sec_insn_off)
6283 continue;
6284 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6285 break;
6286
6287 if (!copy_start)
6288 copy_start = rec;
6289 copy_end = rec + ext_info->rec_size;
6290 }
6291
6292 if (!copy_start)
6293 return -ENOENT;
6294
6295 /* append func/line info of a given (sub-)program to the main
6296 * program func/line info
6297 */
8eb62958 6298 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
8505e870
AN
6299 new_sz = old_sz + (copy_end - copy_start);
6300 new_prog_info = realloc(*prog_info, new_sz);
6301 if (!new_prog_info)
6302 return -ENOMEM;
6303 *prog_info = new_prog_info;
6304 *prog_rec_cnt = new_sz / ext_info->rec_size;
6305 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6306
6307 /* Kernel instruction offsets are in units of 8-byte
6308 * instructions, while .BTF.ext instruction offsets generated
6309 * by Clang are in units of bytes. So convert Clang offsets
6310 * into kernel offsets and adjust offset according to program
6311 * relocated position.
6312 */
6313 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6314 rec = new_prog_info + old_sz;
6315 rec_end = new_prog_info + new_sz;
6316 for (; rec < rec_end; rec += ext_info->rec_size) {
6317 __u32 *insn_off = rec;
6318
6319 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6320 }
6321 *prog_rec_sz = ext_info->rec_size;
6322 return 0;
6323 }
6324
6325 return -ENOENT;
6326}
6327
6328static int
6329reloc_prog_func_and_line_info(const struct bpf_object *obj,
6330 struct bpf_program *main_prog,
6331 const struct bpf_program *prog)
6332{
6333 int err;
6334
6335 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6336 * supprot func/line info
6337 */
6338 if (!obj->btf_ext || !kernel_supports(FEAT_BTF_FUNC))
6339 return 0;
6340
6341 /* only attempt func info relocation if main program's func_info
6342 * relocation was successful
6343 */
6344 if (main_prog != prog && !main_prog->func_info)
6345 goto line_info;
6346
6347 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6348 &main_prog->func_info,
6349 &main_prog->func_info_cnt,
6350 &main_prog->func_info_rec_size);
6351 if (err) {
6352 if (err != -ENOENT) {
6353 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6354 prog->name, err);
6355 return err;
6356 }
6357 if (main_prog->func_info) {
6358 /*
6359 * Some info has already been found but has problem
6360 * in the last btf_ext reloc. Must have to error out.
6361 */
6362 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6363 return err;
6364 }
6365 /* Have problem loading the very first info. Ignore the rest. */
6366 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6367 prog->name);
6368 }
6369
6370line_info:
6371 /* don't relocate line info if main program's relocation failed */
6372 if (main_prog != prog && !main_prog->line_info)
6373 return 0;
6374
6375 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6376 &main_prog->line_info,
6377 &main_prog->line_info_cnt,
6378 &main_prog->line_info_rec_size);
6379 if (err) {
6380 if (err != -ENOENT) {
6381 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6382 prog->name, err);
6383 return err;
6384 }
6385 if (main_prog->line_info) {
6386 /*
6387 * Some info has already been found but has problem
6388 * in the last btf_ext reloc. Must have to error out.
6389 */
6390 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6391 return err;
6392 }
6393 /* Have problem loading the very first info. Ignore the rest. */
6394 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6395 prog->name);
6396 }
6397 return 0;
6398}
6399
c3c55696
AN
6400static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6401{
6402 size_t insn_idx = *(const size_t *)key;
6403 const struct reloc_desc *relo = elem;
6404
6405 if (insn_idx == relo->insn_idx)
6406 return 0;
6407 return insn_idx < relo->insn_idx ? -1 : 1;
6408}
6409
6410static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6411{
6412 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6413 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6414}
6415
6416static int
6417bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6418 struct bpf_program *prog)
6419{
6420 size_t sub_insn_idx, insn_idx, new_cnt;
6421 struct bpf_program *subprog;
6422 struct bpf_insn *insns, *insn;
6423 struct reloc_desc *relo;
6424 int err;
6425
6426 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6427 if (err)
6428 return err;
6429
6430 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6431 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
53eddb5e 6432 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
c3c55696
AN
6433 continue;
6434
6435 relo = find_prog_insn_relo(prog, insn_idx);
53eddb5e 6436 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
c3c55696
AN
6437 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6438 prog->name, insn_idx, relo->type);
6439 return -LIBBPF_ERRNO__RELOC;
6440 }
6441 if (relo) {
6442 /* sub-program instruction index is a combination of
6443 * an offset of a symbol pointed to by relocation and
6444 * call instruction's imm field; for global functions,
6445 * call always has imm = -1, but for static functions
6446 * relocation is against STT_SECTION and insn->imm
6447 * points to a start of a static function
53eddb5e
YS
6448 *
6449 * for subprog addr relocation, the relo->sym_off + insn->imm is
6450 * the byte offset in the corresponding section.
c3c55696 6451 */
53eddb5e
YS
6452 if (relo->type == RELO_CALL)
6453 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6454 else
6455 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6456 } else if (insn_is_pseudo_func(insn)) {
6457 /*
6458 * RELO_SUBPROG_ADDR relo is always emitted even if both
6459 * functions are in the same section, so it shouldn't reach here.
6460 */
6461 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6462 prog->name, insn_idx);
6463 return -LIBBPF_ERRNO__RELOC;
c3c55696
AN
6464 } else {
6465 /* if subprogram call is to a static function within
6466 * the same ELF section, there won't be any relocation
6467 * emitted, but it also means there is no additional
6468 * offset necessary, insns->imm is relative to
6469 * instruction's original position within the section
6470 */
6471 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6472 }
6473
6474 /* we enforce that sub-programs should be in .text section */
6475 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6476 if (!subprog) {
6477 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6478 prog->name);
6479 return -LIBBPF_ERRNO__RELOC;
6480 }
6481
6482 /* if it's the first call instruction calling into this
6483 * subprogram (meaning this subprog hasn't been processed
6484 * yet) within the context of current main program:
6485 * - append it at the end of main program's instructions blog;
6486 * - process is recursively, while current program is put on hold;
6487 * - if that subprogram calls some other not yet processes
6488 * subprogram, same thing will happen recursively until
6489 * there are no more unprocesses subprograms left to append
6490 * and relocate.
6491 */
6492 if (subprog->sub_insn_off == 0) {
6493 subprog->sub_insn_off = main_prog->insns_cnt;
6494
6495 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6496 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6497 if (!insns) {
6498 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6499 return -ENOMEM;
6500 }
6501 main_prog->insns = insns;
6502 main_prog->insns_cnt = new_cnt;
6503
6504 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6505 subprog->insns_cnt * sizeof(*insns));
6506
6507 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6508 main_prog->name, subprog->insns_cnt, subprog->name);
6509
6510 err = bpf_object__reloc_code(obj, main_prog, subprog);
6511 if (err)
6512 return err;
6513 }
6514
6515 /* main_prog->insns memory could have been re-allocated, so
6516 * calculate pointer again
6517 */
6518 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6519 /* calculate correct instruction position within current main
6520 * prog; each main prog can have a different set of
6521 * subprograms appended (potentially in different order as
6522 * well), so position of any subprog can be different for
6523 * different main programs */
6524 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6525
6526 if (relo)
6527 relo->processed = true;
6528
6529 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6530 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6531 }
6532
6533 return 0;
6534}
6535
6536/*
6537 * Relocate sub-program calls.
6538 *
6539 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6540 * main prog) is processed separately. For each subprog (non-entry functions,
6541 * that can be called from either entry progs or other subprogs) gets their
6542 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6543 * hasn't been yet appended and relocated within current main prog. Once its
6544 * relocated, sub_insn_off will point at the position within current main prog
6545 * where given subprog was appended. This will further be used to relocate all
6546 * the call instructions jumping into this subprog.
6547 *
6548 * We start with main program and process all call instructions. If the call
6549 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6550 * is zero), subprog instructions are appended at the end of main program's
6551 * instruction array. Then main program is "put on hold" while we recursively
6552 * process newly appended subprogram. If that subprogram calls into another
6553 * subprogram that hasn't been appended, new subprogram is appended again to
6554 * the *main* prog's instructions (subprog's instructions are always left
6555 * untouched, as they need to be in unmodified state for subsequent main progs
6556 * and subprog instructions are always sent only as part of a main prog) and
6557 * the process continues recursively. Once all the subprogs called from a main
6558 * prog or any of its subprogs are appended (and relocated), all their
6559 * positions within finalized instructions array are known, so it's easy to
6560 * rewrite call instructions with correct relative offsets, corresponding to
6561 * desired target subprog.
6562 *
6563 * Its important to realize that some subprogs might not be called from some
6564 * main prog and any of its called/used subprogs. Those will keep their
6565 * subprog->sub_insn_off as zero at all times and won't be appended to current
6566 * main prog and won't be relocated within the context of current main prog.
6567 * They might still be used from other main progs later.
6568 *
6569 * Visually this process can be shown as below. Suppose we have two main
6570 * programs mainA and mainB and BPF object contains three subprogs: subA,
6571 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6572 * subC both call subB:
6573 *
6574 * +--------+ +-------+
6575 * | v v |
6576 * +--+---+ +--+-+-+ +---+--+
6577 * | subA | | subB | | subC |
6578 * +--+---+ +------+ +---+--+
6579 * ^ ^
6580 * | |
6581 * +---+-------+ +------+----+
6582 * | mainA | | mainB |
6583 * +-----------+ +-----------+
6584 *
6585 * We'll start relocating mainA, will find subA, append it and start
6586 * processing sub A recursively:
6587 *
6588 * +-----------+------+
6589 * | mainA | subA |
6590 * +-----------+------+
6591 *
6592 * At this point we notice that subB is used from subA, so we append it and
6593 * relocate (there are no further subcalls from subB):
6594 *
6595 * +-----------+------+------+
6596 * | mainA | subA | subB |
6597 * +-----------+------+------+
6598 *
6599 * At this point, we relocate subA calls, then go one level up and finish with
6600 * relocatin mainA calls. mainA is done.
6601 *
6602 * For mainB process is similar but results in different order. We start with
6603 * mainB and skip subA and subB, as mainB never calls them (at least
6604 * directly), but we see subC is needed, so we append and start processing it:
6605 *
6606 * +-----------+------+
6607 * | mainB | subC |
6608 * +-----------+------+
6609 * Now we see subC needs subB, so we go back to it, append and relocate it:
6610 *
6611 * +-----------+------+------+
6612 * | mainB | subC | subB |
6613 * +-----------+------+------+
6614 *
6615 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6616 */
6617static int
6618bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6619{
6620 struct bpf_program *subprog;
6621 int i, j, err;
6622
c3c55696
AN
6623 /* mark all subprogs as not relocated (yet) within the context of
6624 * current main program
6625 */
6626 for (i = 0; i < obj->nr_programs; i++) {
6627 subprog = &obj->programs[i];
6628 if (!prog_is_subprog(obj, subprog))
6629 continue;
6630
6631 subprog->sub_insn_off = 0;
6632 for (j = 0; j < subprog->nr_reloc; j++)
6633 if (subprog->reloc_desc[j].type == RELO_CALL)
6634 subprog->reloc_desc[j].processed = false;
6635 }
6636
6637 err = bpf_object__reloc_code(obj, prog, prog);
6638 if (err)
6639 return err;
6640
6641
8a47a6c5
WN
6642 return 0;
6643}
6644
8a47a6c5 6645static int
ddc7c304 6646bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
8a47a6c5
WN
6647{
6648 struct bpf_program *prog;
6649 size_t i;
6650 int err;
6651
ddc7c304
AN
6652 if (obj->btf_ext) {
6653 err = bpf_object__relocate_core(obj, targ_btf_path);
6654 if (err) {
be18010e
KW
6655 pr_warn("failed to perform CO-RE relocations: %d\n",
6656 err);
ddc7c304
AN
6657 return err;
6658 }
6659 }
c3c55696
AN
6660 /* relocate data references first for all programs and sub-programs,
6661 * as they don't change relative to code locations, so subsequent
6662 * subprogram processing won't need to re-calculate any of them
9173cac3
AN
6663 */
6664 for (i = 0; i < obj->nr_programs; i++) {
6665 prog = &obj->programs[i];
c3c55696 6666 err = bpf_object__relocate_data(obj, prog);
9173cac3 6667 if (err) {
9c0f8cbd
AN
6668 pr_warn("prog '%s': failed to relocate data references: %d\n",
6669 prog->name, err);
9173cac3
AN
6670 return err;
6671 }
9173cac3 6672 }
c3c55696
AN
6673 /* now relocate subprogram calls and append used subprograms to main
6674 * programs; each copy of subprogram code needs to be relocated
6675 * differently for each main program, because its code location might
6676 * have changed
9173cac3 6677 */
8a47a6c5
WN
6678 for (i = 0; i < obj->nr_programs; i++) {
6679 prog = &obj->programs[i];
c3c55696
AN
6680 /* sub-program's sub-calls are relocated within the context of
6681 * its main program only
6682 */
6683 if (prog_is_subprog(obj, prog))
9173cac3 6684 continue;
8a47a6c5 6685
c3c55696 6686 err = bpf_object__relocate_calls(obj, prog);
8a47a6c5 6687 if (err) {
9c0f8cbd
AN
6688 pr_warn("prog '%s': failed to relocate calls: %d\n",
6689 prog->name, err);
8a47a6c5
WN
6690 return err;
6691 }
6692 }
c3c55696
AN
6693 /* free up relocation descriptors */
6694 for (i = 0; i < obj->nr_programs; i++) {
6695 prog = &obj->programs[i];
6696 zfree(&prog->reloc_desc);
6697 prog->nr_reloc = 0;
6698 }
8a47a6c5
WN
6699 return 0;
6700}
6701
646f02ff
AN
6702static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6703 GElf_Shdr *shdr, Elf_Data *data);
6704
6705static int bpf_object__collect_map_relos(struct bpf_object *obj,
6706 GElf_Shdr *shdr, Elf_Data *data)
6707{
15728ad3
AN
6708 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6709 int i, j, nrels, new_sz;
063e6881 6710 const struct btf_var_secinfo *vi = NULL;
646f02ff 6711 const struct btf_type *sec, *var, *def;
3168c158 6712 struct bpf_map *map = NULL, *targ_map;
646f02ff 6713 const struct btf_member *member;
646f02ff
AN
6714 const char *name, *mname;
6715 Elf_Data *symbols;
6716 unsigned int moff;
6717 GElf_Sym sym;
6718 GElf_Rel rel;
6719 void *tmp;
6720
6721 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6722 return -EINVAL;
6723 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6724 if (!sec)
6725 return -EINVAL;
6726
6727 symbols = obj->efile.symbols;
6728 nrels = shdr->sh_size / shdr->sh_entsize;
6729 for (i = 0; i < nrels; i++) {
6730 if (!gelf_getrel(data, i, &rel)) {
6731 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6732 return -LIBBPF_ERRNO__FORMAT;
6733 }
6734 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
6735 pr_warn(".maps relo #%d: symbol %zx not found\n",
6736 i, (size_t)GELF_R_SYM(rel.r_info));
6737 return -LIBBPF_ERRNO__FORMAT;
6738 }
88a82120 6739 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
646f02ff
AN
6740 if (sym.st_shndx != obj->efile.btf_maps_shndx) {
6741 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
6742 i, name);
6743 return -LIBBPF_ERRNO__RELOC;
6744 }
6745
6746 pr_debug(".maps relo #%d: for %zd value %zd rel.r_offset %zu name %d ('%s')\n",
6747 i, (ssize_t)(rel.r_info >> 32), (size_t)sym.st_value,
6748 (size_t)rel.r_offset, sym.st_name, name);
6749
6750 for (j = 0; j < obj->nr_maps; j++) {
6751 map = &obj->maps[j];
6752 if (map->sec_idx != obj->efile.btf_maps_shndx)
6753 continue;
6754
6755 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6756 if (vi->offset <= rel.r_offset &&
15728ad3 6757 rel.r_offset + bpf_ptr_sz <= vi->offset + vi->size)
646f02ff
AN
6758 break;
6759 }
6760 if (j == obj->nr_maps) {
6761 pr_warn(".maps relo #%d: cannot find map '%s' at rel.r_offset %zu\n",
6762 i, name, (size_t)rel.r_offset);
6763 return -EINVAL;
6764 }
6765
6766 if (!bpf_map_type__is_map_in_map(map->def.type))
6767 return -EINVAL;
6768 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
6769 map->def.key_size != sizeof(int)) {
6770 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
6771 i, map->name, sizeof(int));
6772 return -EINVAL;
6773 }
6774
6775 targ_map = bpf_object__find_map_by_name(obj, name);
6776 if (!targ_map)
6777 return -ESRCH;
6778
6779 var = btf__type_by_id(obj->btf, vi->type);
6780 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
6781 if (btf_vlen(def) == 0)
6782 return -EINVAL;
6783 member = btf_members(def) + btf_vlen(def) - 1;
6784 mname = btf__name_by_offset(obj->btf, member->name_off);
6785 if (strcmp(mname, "values"))
6786 return -EINVAL;
6787
6788 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
6789 if (rel.r_offset - vi->offset < moff)
6790 return -EINVAL;
6791
6792 moff = rel.r_offset - vi->offset - moff;
15728ad3
AN
6793 /* here we use BPF pointer size, which is always 64 bit, as we
6794 * are parsing ELF that was built for BPF target
6795 */
6796 if (moff % bpf_ptr_sz)
646f02ff 6797 return -EINVAL;
15728ad3 6798 moff /= bpf_ptr_sz;
646f02ff
AN
6799 if (moff >= map->init_slots_sz) {
6800 new_sz = moff + 1;
029258d7 6801 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
646f02ff
AN
6802 if (!tmp)
6803 return -ENOMEM;
6804 map->init_slots = tmp;
6805 memset(map->init_slots + map->init_slots_sz, 0,
15728ad3 6806 (new_sz - map->init_slots_sz) * host_ptr_sz);
646f02ff
AN
6807 map->init_slots_sz = new_sz;
6808 }
6809 map->init_slots[moff] = targ_map;
6810
6811 pr_debug(".maps relo #%d: map '%s' slot [%d] points to map '%s'\n",
6812 i, map->name, moff, name);
6813 }
6814
6815 return 0;
6816}
590a0088 6817
c3c55696 6818static int cmp_relocs(const void *_a, const void *_b)
34090915 6819{
c3c55696
AN
6820 const struct reloc_desc *a = _a;
6821 const struct reloc_desc *b = _b;
34090915 6822
c3c55696
AN
6823 if (a->insn_idx != b->insn_idx)
6824 return a->insn_idx < b->insn_idx ? -1 : 1;
6825
6826 /* no two relocations should have the same insn_idx, but ... */
6827 if (a->type != b->type)
6828 return a->type < b->type ? -1 : 1;
6829
6830 return 0;
6831}
6832
6833static int bpf_object__collect_relos(struct bpf_object *obj)
6834{
6835 int i, err;
34090915 6836
1f8e2bcb
AN
6837 for (i = 0; i < obj->efile.nr_reloc_sects; i++) {
6838 GElf_Shdr *shdr = &obj->efile.reloc_sects[i].shdr;
6839 Elf_Data *data = obj->efile.reloc_sects[i].data;
34090915 6840 int idx = shdr->sh_info;
34090915
WN
6841
6842 if (shdr->sh_type != SHT_REL) {
be18010e 6843 pr_warn("internal error at %d\n", __LINE__);
6371ca3b 6844 return -LIBBPF_ERRNO__INTERNAL;
34090915
WN
6845 }
6846
c3c55696 6847 if (idx == obj->efile.st_ops_shndx)
646f02ff 6848 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
c3c55696 6849 else if (idx == obj->efile.btf_maps_shndx)
646f02ff 6850 err = bpf_object__collect_map_relos(obj, shdr, data);
c3c55696
AN
6851 else
6852 err = bpf_object__collect_prog_relos(obj, shdr, data);
34090915 6853 if (err)
6371ca3b 6854 return err;
34090915 6855 }
c3c55696
AN
6856
6857 for (i = 0; i < obj->nr_programs; i++) {
6858 struct bpf_program *p = &obj->programs[i];
6859
6860 if (!p->nr_reloc)
6861 continue;
6862
6863 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6864 }
34090915
WN
6865 return 0;
6866}
6867
109cea5a
AN
6868static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
6869{
9b2f6fec 6870 if (BPF_CLASS(insn->code) == BPF_JMP &&
109cea5a
AN
6871 BPF_OP(insn->code) == BPF_CALL &&
6872 BPF_SRC(insn->code) == BPF_K &&
9b2f6fec
AN
6873 insn->src_reg == 0 &&
6874 insn->dst_reg == 0) {
6875 *func_id = insn->imm;
109cea5a
AN
6876 return true;
6877 }
6878 return false;
6879}
6880
6881static int bpf_object__sanitize_prog(struct bpf_object* obj, struct bpf_program *prog)
6882{
6883 struct bpf_insn *insn = prog->insns;
6884 enum bpf_func_id func_id;
6885 int i;
6886
6887 for (i = 0; i < prog->insns_cnt; i++, insn++) {
6888 if (!insn_is_helper_call(insn, &func_id))
6889 continue;
6890
6891 /* on kernels that don't yet support
6892 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
6893 * to bpf_probe_read() which works well for old kernels
6894 */
6895 switch (func_id) {
6896 case BPF_FUNC_probe_read_kernel:
6897 case BPF_FUNC_probe_read_user:
6898 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6899 insn->imm = BPF_FUNC_probe_read;
6900 break;
6901 case BPF_FUNC_probe_read_kernel_str:
6902 case BPF_FUNC_probe_read_user_str:
6903 if (!kernel_supports(FEAT_PROBE_READ_KERN))
6904 insn->imm = BPF_FUNC_probe_read_str;
6905 break;
6906 default:
6907 break;
6908 }
6909 }
6910 return 0;
6911}
6912
55cffde2 6913static int
2993e051 6914load_program(struct bpf_program *prog, struct bpf_insn *insns, int insns_cnt,
f0187f0b 6915 char *license, __u32 kern_version, int *pfd)
55cffde2 6916{
6aef10a4 6917 struct bpf_prog_load_params load_attr = {};
1ce6a9fc 6918 char *cp, errmsg[STRERR_BUFSIZE];
8395f320
SF
6919 size_t log_buf_size = 0;
6920 char *log_buf = NULL;
5d01ab7b 6921 int btf_fd, ret;
55cffde2 6922
80b2b5c3
AM
6923 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
6924 /*
6925 * The program type must be set. Most likely we couldn't find a proper
6926 * section definition at load time, and thus we didn't infer the type.
6927 */
6928 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
6929 prog->name, prog->sec_name);
6930 return -EINVAL;
6931 }
6932
fba01a06
AN
6933 if (!insns || !insns_cnt)
6934 return -EINVAL;
6935
2993e051 6936 load_attr.prog_type = prog->type;
25498a19 6937 /* old kernels might not support specifying expected_attach_type */
47b6cb4d 6938 if (!kernel_supports(FEAT_EXP_ATTACH_TYPE) && prog->sec_def &&
25498a19
AN
6939 prog->sec_def->is_exp_attach_type_optional)
6940 load_attr.expected_attach_type = 0;
6941 else
6942 load_attr.expected_attach_type = prog->expected_attach_type;
47b6cb4d 6943 if (kernel_supports(FEAT_PROG_NAME))
5b32a23e 6944 load_attr.name = prog->name;
d7be143b 6945 load_attr.insns = insns;
6aef10a4 6946 load_attr.insn_cnt = insns_cnt;
d7be143b 6947 load_attr.license = license;
6aef10a4 6948 load_attr.attach_btf_id = prog->attach_btf_id;
91abb4a6 6949 if (prog->attach_prog_fd)
e7bf94db 6950 load_attr.attach_prog_fd = prog->attach_prog_fd;
91abb4a6
AN
6951 else
6952 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6aef10a4
AN
6953 load_attr.attach_btf_id = prog->attach_btf_id;
6954 load_attr.kern_version = kern_version;
6955 load_attr.prog_ifindex = prog->prog_ifindex;
6956
0f0e55d8
AN
6957 /* specify func_info/line_info only if kernel supports them */
6958 btf_fd = bpf_object__btf_fd(prog->obj);
47b6cb4d 6959 if (btf_fd >= 0 && kernel_supports(FEAT_BTF_FUNC)) {
0f0e55d8
AN
6960 load_attr.prog_btf_fd = btf_fd;
6961 load_attr.func_info = prog->func_info;
6962 load_attr.func_info_rec_size = prog->func_info_rec_size;
6963 load_attr.func_info_cnt = prog->func_info_cnt;
6964 load_attr.line_info = prog->line_info;
6965 load_attr.line_info_rec_size = prog->line_info_rec_size;
6966 load_attr.line_info_cnt = prog->line_info_cnt;
6967 }
da11b417 6968 load_attr.log_level = prog->log_level;
04656198 6969 load_attr.prog_flags = prog->prog_flags;
55cffde2 6970
da11b417 6971retry_load:
8395f320
SF
6972 if (log_buf_size) {
6973 log_buf = malloc(log_buf_size);
6974 if (!log_buf)
6975 return -ENOMEM;
6976
6977 *log_buf = 0;
6978 }
55cffde2 6979
6aef10a4
AN
6980 load_attr.log_buf = log_buf;
6981 load_attr.log_buf_sz = log_buf_size;
6982 ret = libbpf__bpf_prog_load(&load_attr);
55cffde2
WN
6983
6984 if (ret >= 0) {
8395f320 6985 if (log_buf && load_attr.log_level)
da11b417 6986 pr_debug("verifier log:\n%s", log_buf);
5d23328d
YZ
6987
6988 if (prog->obj->rodata_map_idx >= 0 &&
6989 kernel_supports(FEAT_PROG_BIND_MAP)) {
6990 struct bpf_map *rodata_map =
6991 &prog->obj->maps[prog->obj->rodata_map_idx];
6992
6993 if (bpf_prog_bind_map(ret, bpf_map__fd(rodata_map), NULL)) {
6994 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
6995 pr_warn("prog '%s': failed to bind .rodata map: %s\n",
6996 prog->name, cp);
6997 /* Don't fail hard if can't bind rodata. */
6998 }
6999 }
7000
55cffde2
WN
7001 *pfd = ret;
7002 ret = 0;
7003 goto out;
7004 }
7005
8395f320
SF
7006 if (!log_buf || errno == ENOSPC) {
7007 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE,
7008 log_buf_size << 1);
7009
da11b417
AS
7010 free(log_buf);
7011 goto retry_load;
7012 }
ef05afa6 7013 ret = errno ? -errno : -LIBBPF_ERRNO__LOAD;
24d6a808 7014 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
be18010e 7015 pr_warn("load bpf program failed: %s\n", cp);
dc3a2d25 7016 pr_perm_msg(ret);
55cffde2 7017
6371ca3b
WN
7018 if (log_buf && log_buf[0] != '\0') {
7019 ret = -LIBBPF_ERRNO__VERIFY;
be18010e
KW
7020 pr_warn("-- BEGIN DUMP LOG ---\n");
7021 pr_warn("\n%s\n", log_buf);
7022 pr_warn("-- END LOG --\n");
6aef10a4 7023 } else if (load_attr.insn_cnt >= BPF_MAXINSNS) {
be18010e 7024 pr_warn("Program too large (%zu insns), at most %d insns\n",
6aef10a4 7025 load_attr.insn_cnt, BPF_MAXINSNS);
705fa219 7026 ret = -LIBBPF_ERRNO__PROG2BIG;
4f33ddb4 7027 } else if (load_attr.prog_type != BPF_PROG_TYPE_KPROBE) {
705fa219 7028 /* Wrong program type? */
4f33ddb4 7029 int fd;
705fa219 7030
4f33ddb4
THJ
7031 load_attr.prog_type = BPF_PROG_TYPE_KPROBE;
7032 load_attr.expected_attach_type = 0;
6aef10a4
AN
7033 load_attr.log_buf = NULL;
7034 load_attr.log_buf_sz = 0;
7035 fd = libbpf__bpf_prog_load(&load_attr);
4f33ddb4
THJ
7036 if (fd >= 0) {
7037 close(fd);
7038 ret = -LIBBPF_ERRNO__PROGTYPE;
7039 goto out;
7040 }
55cffde2
WN
7041 }
7042
7043out:
7044 free(log_buf);
7045 return ret;
7046}
7047
91abb4a6 7048static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id);
13acb508
AN
7049
7050int bpf_program__load(struct bpf_program *prog, char *license, __u32 kern_ver)
55cffde2 7051{
91abb4a6 7052 int err = 0, fd, i;
13acb508 7053
d9297581 7054 if (prog->obj->loaded) {
52109584 7055 pr_warn("prog '%s': can't load after object was loaded\n", prog->name);
d9297581
AN
7056 return -EINVAL;
7057 }
7058
ff26ce5c 7059 if ((prog->type == BPF_PROG_TYPE_TRACING ||
1e092a03 7060 prog->type == BPF_PROG_TYPE_LSM ||
ff26ce5c 7061 prog->type == BPF_PROG_TYPE_EXT) && !prog->attach_btf_id) {
91abb4a6
AN
7062 int btf_obj_fd = 0, btf_type_id = 0;
7063
7064 err = libbpf_find_attach_btf_id(prog, &btf_obj_fd, &btf_type_id);
7065 if (err)
7066 return err;
7067
7068 prog->attach_btf_obj_fd = btf_obj_fd;
7069 prog->attach_btf_id = btf_type_id;
13acb508 7070 }
55cffde2 7071
b580563e
WN
7072 if (prog->instances.nr < 0 || !prog->instances.fds) {
7073 if (prog->preprocessor) {
be18010e 7074 pr_warn("Internal error: can't load program '%s'\n",
52109584 7075 prog->name);
b580563e
WN
7076 return -LIBBPF_ERRNO__INTERNAL;
7077 }
55cffde2 7078
b580563e
WN
7079 prog->instances.fds = malloc(sizeof(int));
7080 if (!prog->instances.fds) {
be18010e 7081 pr_warn("Not enough memory for BPF fds\n");
b580563e
WN
7082 return -ENOMEM;
7083 }
7084 prog->instances.nr = 1;
7085 prog->instances.fds[0] = -1;
7086 }
7087
7088 if (!prog->preprocessor) {
7089 if (prog->instances.nr != 1) {
52109584
AN
7090 pr_warn("prog '%s': inconsistent nr(%d) != 1\n",
7091 prog->name, prog->instances.nr);
b580563e 7092 }
2993e051 7093 err = load_program(prog, prog->insns, prog->insns_cnt,
13acb508 7094 license, kern_ver, &fd);
b580563e
WN
7095 if (!err)
7096 prog->instances.fds[0] = fd;
7097 goto out;
7098 }
7099
7100 for (i = 0; i < prog->instances.nr; i++) {
7101 struct bpf_prog_prep_result result;
7102 bpf_program_prep_t preprocessor = prog->preprocessor;
7103
1ad9cbb8 7104 memset(&result, 0, sizeof(result));
b580563e
WN
7105 err = preprocessor(prog, i, prog->insns,
7106 prog->insns_cnt, &result);
7107 if (err) {
be18010e 7108 pr_warn("Preprocessing the %dth instance of program '%s' failed\n",
52109584 7109 i, prog->name);
b580563e
WN
7110 goto out;
7111 }
7112
7113 if (!result.new_insn_ptr || !result.new_insn_cnt) {
7114 pr_debug("Skip loading the %dth instance of program '%s'\n",
52109584 7115 i, prog->name);
b580563e
WN
7116 prog->instances.fds[i] = -1;
7117 if (result.pfd)
7118 *result.pfd = -1;
7119 continue;
7120 }
7121
2993e051 7122 err = load_program(prog, result.new_insn_ptr,
13acb508 7123 result.new_insn_cnt, license, kern_ver, &fd);
b580563e 7124 if (err) {
be18010e 7125 pr_warn("Loading the %dth instance of program '%s' failed\n",
52109584 7126 i, prog->name);
b580563e
WN
7127 goto out;
7128 }
7129
7130 if (result.pfd)
7131 *result.pfd = fd;
7132 prog->instances.fds[i] = fd;
7133 }
7134out:
55cffde2 7135 if (err)
52109584 7136 pr_warn("failed to load program '%s'\n", prog->name);
55cffde2
WN
7137 zfree(&prog->insns);
7138 prog->insns_cnt = 0;
7139 return err;
7140}
7141
7142static int
60276f98 7143bpf_object__load_progs(struct bpf_object *obj, int log_level)
55cffde2 7144{
d9297581 7145 struct bpf_program *prog;
55cffde2
WN
7146 size_t i;
7147 int err;
7148
109cea5a
AN
7149 for (i = 0; i < obj->nr_programs; i++) {
7150 prog = &obj->programs[i];
7151 err = bpf_object__sanitize_prog(obj, prog);
7152 if (err)
7153 return err;
7154 }
7155
55cffde2 7156 for (i = 0; i < obj->nr_programs; i++) {
d9297581 7157 prog = &obj->programs[i];
c3c55696 7158 if (prog_is_subprog(obj, prog))
48cca7e4 7159 continue;
d9297581 7160 if (!prog->load) {
9c0f8cbd 7161 pr_debug("prog '%s': skipped loading\n", prog->name);
d9297581
AN
7162 continue;
7163 }
7164 prog->log_level |= log_level;
7165 err = bpf_program__load(prog, obj->license, obj->kern_version);
55cffde2
WN
7166 if (err)
7167 return err;
7168 }
7169 return 0;
7170}
7171
25498a19
AN
7172static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7173
1a5e3fb1 7174static struct bpf_object *
5e61f270 7175__bpf_object__open(const char *path, const void *obj_buf, size_t obj_buf_sz,
01af3bf0 7176 const struct bpf_object_open_opts *opts)
1a5e3fb1 7177{
8601fd42 7178 const char *obj_name, *kconfig;
dd4436bb 7179 struct bpf_program *prog;
1a5e3fb1 7180 struct bpf_object *obj;
291ee02b 7181 char tmp_name[64];
6371ca3b 7182 int err;
1a5e3fb1
WN
7183
7184 if (elf_version(EV_CURRENT) == EV_NONE) {
be18010e
KW
7185 pr_warn("failed to init libelf for %s\n",
7186 path ? : "(mem buf)");
6371ca3b 7187 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1a5e3fb1
WN
7188 }
7189
291ee02b
AN
7190 if (!OPTS_VALID(opts, bpf_object_open_opts))
7191 return ERR_PTR(-EINVAL);
7192
1aace10f 7193 obj_name = OPTS_GET(opts, object_name, NULL);
291ee02b
AN
7194 if (obj_buf) {
7195 if (!obj_name) {
7196 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7197 (unsigned long)obj_buf,
7198 (unsigned long)obj_buf_sz);
7199 obj_name = tmp_name;
7200 }
7201 path = obj_name;
7202 pr_debug("loading object '%s' from buffer\n", obj_name);
7203 }
7204
2ce8450e 7205 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6371ca3b
WN
7206 if (IS_ERR(obj))
7207 return obj;
1a5e3fb1 7208
8601fd42
AN
7209 kconfig = OPTS_GET(opts, kconfig, NULL);
7210 if (kconfig) {
7211 obj->kconfig = strdup(kconfig);
7212 if (!obj->kconfig)
166750bc
AN
7213 return ERR_PTR(-ENOMEM);
7214 }
291ee02b 7215
0d13bfce
AN
7216 err = bpf_object__elf_init(obj);
7217 err = err ? : bpf_object__check_endianness(obj);
7218 err = err ? : bpf_object__elf_collect(obj);
166750bc
AN
7219 err = err ? : bpf_object__collect_externs(obj);
7220 err = err ? : bpf_object__finalize_btf(obj);
0d13bfce 7221 err = err ? : bpf_object__init_maps(obj, opts);
c3c55696 7222 err = err ? : bpf_object__collect_relos(obj);
0d13bfce
AN
7223 if (err)
7224 goto out;
1a5e3fb1 7225 bpf_object__elf_finish(obj);
dd4436bb
AN
7226
7227 bpf_object__for_each_program(prog, obj) {
52109584 7228 prog->sec_def = find_sec_def(prog->sec_name);
80b2b5c3 7229 if (!prog->sec_def) {
dd4436bb 7230 /* couldn't guess, but user might manually specify */
80b2b5c3
AM
7231 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7232 prog->name, prog->sec_name);
dd4436bb 7233 continue;
80b2b5c3 7234 }
dd4436bb 7235
2b288740
AS
7236 if (prog->sec_def->is_sleepable)
7237 prog->prog_flags |= BPF_F_SLEEPABLE;
25498a19
AN
7238 bpf_program__set_type(prog, prog->sec_def->prog_type);
7239 bpf_program__set_expected_attach_type(prog,
7240 prog->sec_def->expected_attach_type);
7241
7242 if (prog->sec_def->prog_type == BPF_PROG_TYPE_TRACING ||
7243 prog->sec_def->prog_type == BPF_PROG_TYPE_EXT)
166750bc 7244 prog->attach_prog_fd = OPTS_GET(opts, attach_prog_fd, 0);
dd4436bb
AN
7245 }
7246
1a5e3fb1
WN
7247 return obj;
7248out:
7249 bpf_object__close(obj);
6371ca3b 7250 return ERR_PTR(err);
1a5e3fb1
WN
7251}
7252
5e61f270
AN
7253static struct bpf_object *
7254__bpf_object__open_xattr(struct bpf_object_open_attr *attr, int flags)
1a5e3fb1 7255{
e00aca65 7256 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
291ee02b
AN
7257 .relaxed_maps = flags & MAPS_RELAX_COMPAT,
7258 );
7259
1a5e3fb1 7260 /* param validation */
07f2d4ea 7261 if (!attr->file)
1a5e3fb1
WN
7262 return NULL;
7263
07f2d4ea 7264 pr_debug("loading %s\n", attr->file);
291ee02b 7265 return __bpf_object__open(attr->file, NULL, 0, &opts);
c034a177
JF
7266}
7267
7268struct bpf_object *bpf_object__open_xattr(struct bpf_object_open_attr *attr)
7269{
7270 return __bpf_object__open_xattr(attr, 0);
07f2d4ea
JK
7271}
7272
7273struct bpf_object *bpf_object__open(const char *path)
7274{
7275 struct bpf_object_open_attr attr = {
7276 .file = path,
7277 .prog_type = BPF_PROG_TYPE_UNSPEC,
7278 };
1a5e3fb1 7279
07f2d4ea 7280 return bpf_object__open_xattr(&attr);
6c956392
WN
7281}
7282
2ce8450e 7283struct bpf_object *
01af3bf0 7284bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
2ce8450e 7285{
2ce8450e
AN
7286 if (!path)
7287 return ERR_PTR(-EINVAL);
7288
7289 pr_debug("loading %s\n", path);
7290
291ee02b 7291 return __bpf_object__open(path, NULL, 0, opts);
2ce8450e
AN
7292}
7293
7294struct bpf_object *
7295bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
01af3bf0 7296 const struct bpf_object_open_opts *opts)
6c956392 7297{
2ce8450e
AN
7298 if (!obj_buf || obj_buf_sz == 0)
7299 return ERR_PTR(-EINVAL);
6c956392 7300
291ee02b 7301 return __bpf_object__open(NULL, obj_buf, obj_buf_sz, opts);
2ce8450e
AN
7302}
7303
7304struct bpf_object *
7305bpf_object__open_buffer(const void *obj_buf, size_t obj_buf_sz,
7306 const char *name)
7307{
e00aca65 7308 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, opts,
2ce8450e
AN
7309 .object_name = name,
7310 /* wrong default, but backwards-compatible */
7311 .relaxed_maps = true,
7312 );
7313
7314 /* returning NULL is wrong, but backwards-compatible */
7315 if (!obj_buf || obj_buf_sz == 0)
7316 return NULL;
6c956392 7317
2ce8450e 7318 return bpf_object__open_mem(obj_buf, obj_buf_sz, &opts);
1a5e3fb1
WN
7319}
7320
52d3352e
WN
7321int bpf_object__unload(struct bpf_object *obj)
7322{
7323 size_t i;
7324
7325 if (!obj)
7326 return -EINVAL;
7327
590a0088 7328 for (i = 0; i < obj->nr_maps; i++) {
9d759a9b 7329 zclose(obj->maps[i].fd);
590a0088
MKL
7330 if (obj->maps[i].st_ops)
7331 zfree(&obj->maps[i].st_ops->kern_vdata);
7332 }
52d3352e 7333
55cffde2
WN
7334 for (i = 0; i < obj->nr_programs; i++)
7335 bpf_program__unload(&obj->programs[i]);
7336
52d3352e
WN
7337 return 0;
7338}
7339
0d13bfce
AN
7340static int bpf_object__sanitize_maps(struct bpf_object *obj)
7341{
7342 struct bpf_map *m;
7343
7344 bpf_object__for_each_map(m, obj) {
7345 if (!bpf_map__is_internal(m))
7346 continue;
47b6cb4d 7347 if (!kernel_supports(FEAT_GLOBAL_DATA)) {
0d13bfce
AN
7348 pr_warn("kernel doesn't support global data\n");
7349 return -ENOTSUP;
7350 }
47b6cb4d 7351 if (!kernel_supports(FEAT_ARRAY_MMAP))
0d13bfce
AN
7352 m->def.map_flags ^= BPF_F_MMAPABLE;
7353 }
7354
7355 return 0;
7356}
7357
1c0c7074
AN
7358static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7359{
7360 char sym_type, sym_name[500];
7361 unsigned long long sym_addr;
7362 struct extern_desc *ext;
7363 int ret, err = 0;
7364 FILE *f;
7365
7366 f = fopen("/proc/kallsyms", "r");
7367 if (!f) {
7368 err = -errno;
7369 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7370 return err;
7371 }
7372
7373 while (true) {
7374 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7375 &sym_addr, &sym_type, sym_name);
7376 if (ret == EOF && feof(f))
7377 break;
7378 if (ret != 3) {
135c783f 7379 pr_warn("failed to read kallsyms entry: %d\n", ret);
1c0c7074
AN
7380 err = -EINVAL;
7381 goto out;
7382 }
7383
7384 ext = find_extern_by_name(obj, sym_name);
7385 if (!ext || ext->type != EXT_KSYM)
7386 continue;
7387
7388 if (ext->is_set && ext->ksym.addr != sym_addr) {
7389 pr_warn("extern (ksym) '%s' resolution is ambiguous: 0x%llx or 0x%llx\n",
7390 sym_name, ext->ksym.addr, sym_addr);
7391 err = -EINVAL;
7392 goto out;
7393 }
7394 if (!ext->is_set) {
7395 ext->is_set = true;
7396 ext->ksym.addr = sym_addr;
7397 pr_debug("extern (ksym) %s=0x%llx\n", sym_name, sym_addr);
7398 }
7399 }
7400
7401out:
7402 fclose(f);
7403 return err;
7404}
7405
d370bbe1
HL
7406static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
7407{
7408 struct extern_desc *ext;
284d2587
AN
7409 struct btf *btf;
7410 int i, j, id, btf_fd, err;
d370bbe1
HL
7411
7412 for (i = 0; i < obj->nr_extern; i++) {
7413 const struct btf_type *targ_var, *targ_type;
7414 __u32 targ_type_id, local_type_id;
7415 const char *targ_var_name;
7416 int ret;
7417
7418 ext = &obj->externs[i];
7419 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
7420 continue;
7421
284d2587
AN
7422 btf = obj->btf_vmlinux;
7423 btf_fd = 0;
7424 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7425 if (id == -ENOENT) {
7426 err = load_module_btfs(obj);
7427 if (err)
7428 return err;
7429
7430 for (j = 0; j < obj->btf_module_cnt; j++) {
7431 btf = obj->btf_modules[j].btf;
7432 /* we assume module BTF FD is always >0 */
7433 btf_fd = obj->btf_modules[j].fd;
7434 id = btf__find_by_name_kind(btf, ext->name, BTF_KIND_VAR);
7435 if (id != -ENOENT)
7436 break;
7437 }
7438 }
d370bbe1 7439 if (id <= 0) {
284d2587 7440 pr_warn("extern (ksym) '%s': failed to find BTF ID in kernel BTF(s).\n",
d370bbe1
HL
7441 ext->name);
7442 return -ESRCH;
7443 }
7444
7445 /* find local type_id */
7446 local_type_id = ext->ksym.type_id;
7447
7448 /* find target type_id */
284d2587
AN
7449 targ_var = btf__type_by_id(btf, id);
7450 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
7451 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
d370bbe1
HL
7452
7453 ret = bpf_core_types_are_compat(obj->btf, local_type_id,
284d2587 7454 btf, targ_type_id);
d370bbe1
HL
7455 if (ret <= 0) {
7456 const struct btf_type *local_type;
7457 const char *targ_name, *local_name;
7458
7459 local_type = btf__type_by_id(obj->btf, local_type_id);
284d2587
AN
7460 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
7461 targ_name = btf__name_by_offset(btf, targ_type->name_off);
d370bbe1
HL
7462
7463 pr_warn("extern (ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
7464 ext->name, local_type_id,
7465 btf_kind_str(local_type), local_name, targ_type_id,
7466 btf_kind_str(targ_type), targ_name);
7467 return -EINVAL;
7468 }
7469
7470 ext->is_set = true;
284d2587
AN
7471 ext->ksym.kernel_btf_obj_fd = btf_fd;
7472 ext->ksym.kernel_btf_id = id;
d370bbe1
HL
7473 pr_debug("extern (ksym) '%s': resolved to [%d] %s %s\n",
7474 ext->name, id, btf_kind_str(targ_var), targ_var_name);
7475 }
7476 return 0;
7477}
7478
166750bc 7479static int bpf_object__resolve_externs(struct bpf_object *obj,
8601fd42 7480 const char *extra_kconfig)
166750bc 7481{
1c0c7074 7482 bool need_config = false, need_kallsyms = false;
d370bbe1 7483 bool need_vmlinux_btf = false;
166750bc 7484 struct extern_desc *ext;
2e33efe3 7485 void *kcfg_data = NULL;
166750bc 7486 int err, i;
166750bc
AN
7487
7488 if (obj->nr_extern == 0)
7489 return 0;
7490
2e33efe3
AN
7491 if (obj->kconfig_map_idx >= 0)
7492 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
166750bc
AN
7493
7494 for (i = 0; i < obj->nr_extern; i++) {
7495 ext = &obj->externs[i];
7496
2e33efe3
AN
7497 if (ext->type == EXT_KCFG &&
7498 strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
7499 void *ext_val = kcfg_data + ext->kcfg.data_off;
166750bc
AN
7500 __u32 kver = get_kernel_version();
7501
7502 if (!kver) {
7503 pr_warn("failed to get kernel version\n");
7504 return -EINVAL;
7505 }
2e33efe3 7506 err = set_kcfg_value_num(ext, ext_val, kver);
166750bc
AN
7507 if (err)
7508 return err;
2e33efe3
AN
7509 pr_debug("extern (kcfg) %s=0x%x\n", ext->name, kver);
7510 } else if (ext->type == EXT_KCFG &&
7511 strncmp(ext->name, "CONFIG_", 7) == 0) {
166750bc 7512 need_config = true;
1c0c7074 7513 } else if (ext->type == EXT_KSYM) {
d370bbe1
HL
7514 if (ext->ksym.type_id)
7515 need_vmlinux_btf = true;
7516 else
7517 need_kallsyms = true;
166750bc
AN
7518 } else {
7519 pr_warn("unrecognized extern '%s'\n", ext->name);
7520 return -EINVAL;
7521 }
7522 }
8601fd42 7523 if (need_config && extra_kconfig) {
2e33efe3 7524 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8601fd42
AN
7525 if (err)
7526 return -EINVAL;
7527 need_config = false;
7528 for (i = 0; i < obj->nr_extern; i++) {
7529 ext = &obj->externs[i];
2e33efe3 7530 if (ext->type == EXT_KCFG && !ext->is_set) {
8601fd42
AN
7531 need_config = true;
7532 break;
7533 }
7534 }
7535 }
166750bc 7536 if (need_config) {
2e33efe3 7537 err = bpf_object__read_kconfig_file(obj, kcfg_data);
166750bc
AN
7538 if (err)
7539 return -EINVAL;
7540 }
1c0c7074
AN
7541 if (need_kallsyms) {
7542 err = bpf_object__read_kallsyms_file(obj);
7543 if (err)
7544 return -EINVAL;
7545 }
d370bbe1
HL
7546 if (need_vmlinux_btf) {
7547 err = bpf_object__resolve_ksyms_btf_id(obj);
7548 if (err)
7549 return -EINVAL;
7550 }
166750bc
AN
7551 for (i = 0; i < obj->nr_extern; i++) {
7552 ext = &obj->externs[i];
7553
7554 if (!ext->is_set && !ext->is_weak) {
7555 pr_warn("extern %s (strong) not resolved\n", ext->name);
7556 return -ESRCH;
7557 } else if (!ext->is_set) {
7558 pr_debug("extern %s (weak) not resolved, defaulting to zero\n",
7559 ext->name);
7560 }
7561 }
7562
7563 return 0;
7564}
7565
60276f98 7566int bpf_object__load_xattr(struct bpf_object_load_attr *attr)
52d3352e 7567{
60276f98 7568 struct bpf_object *obj;
ec6d5f47 7569 int err, i;
6371ca3b 7570
60276f98
QM
7571 if (!attr)
7572 return -EINVAL;
7573 obj = attr->obj;
52d3352e
WN
7574 if (!obj)
7575 return -EINVAL;
7576
7577 if (obj->loaded) {
d9297581 7578 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
52d3352e
WN
7579 return -EINVAL;
7580 }
7581
fd9eef1a 7582 err = bpf_object__probe_loading(obj);
fe62de31 7583 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8601fd42 7584 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
0d13bfce
AN
7585 err = err ? : bpf_object__sanitize_and_load_btf(obj);
7586 err = err ? : bpf_object__sanitize_maps(obj);
590a0088 7587 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
0d13bfce
AN
7588 err = err ? : bpf_object__create_maps(obj);
7589 err = err ? : bpf_object__relocate(obj, attr->target_btf_path);
7590 err = err ? : bpf_object__load_progs(obj, attr->log_level);
a6ed02ca 7591
4f33a53d
AN
7592 /* clean up module BTFs */
7593 for (i = 0; i < obj->btf_module_cnt; i++) {
91abb4a6 7594 close(obj->btf_modules[i].fd);
4f33a53d
AN
7595 btf__free(obj->btf_modules[i].btf);
7596 free(obj->btf_modules[i].name);
7597 }
7598 free(obj->btf_modules);
7599
7600 /* clean up vmlinux BTF */
a6ed02ca
KS
7601 btf__free(obj->btf_vmlinux);
7602 obj->btf_vmlinux = NULL;
7603
d9297581
AN
7604 obj->loaded = true; /* doesn't matter if successfully or not */
7605
0d13bfce
AN
7606 if (err)
7607 goto out;
52d3352e
WN
7608
7609 return 0;
7610out:
ec6d5f47
THJ
7611 /* unpin any maps that were auto-pinned during load */
7612 for (i = 0; i < obj->nr_maps; i++)
7613 if (obj->maps[i].pinned && !obj->maps[i].reused)
7614 bpf_map__unpin(&obj->maps[i], NULL);
7615
52d3352e 7616 bpf_object__unload(obj);
be18010e 7617 pr_warn("failed to load object '%s'\n", obj->path);
6371ca3b 7618 return err;
52d3352e
WN
7619}
7620
60276f98
QM
7621int bpf_object__load(struct bpf_object *obj)
7622{
7623 struct bpf_object_load_attr attr = {
7624 .obj = obj,
7625 };
7626
7627 return bpf_object__load_xattr(&attr);
7628}
7629
196f8487
THJ
7630static int make_parent_dir(const char *path)
7631{
7632 char *cp, errmsg[STRERR_BUFSIZE];
7633 char *dname, *dir;
7634 int err = 0;
7635
7636 dname = strdup(path);
7637 if (dname == NULL)
7638 return -ENOMEM;
7639
7640 dir = dirname(dname);
7641 if (mkdir(dir, 0700) && errno != EEXIST)
7642 err = -errno;
7643
7644 free(dname);
7645 if (err) {
7646 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7647 pr_warn("failed to mkdir %s: %s\n", path, cp);
7648 }
7649 return err;
7650}
7651
f367540c
JS
7652static int check_path(const char *path)
7653{
1ce6a9fc 7654 char *cp, errmsg[STRERR_BUFSIZE];
f367540c
JS
7655 struct statfs st_fs;
7656 char *dname, *dir;
7657 int err = 0;
7658
7659 if (path == NULL)
7660 return -EINVAL;
7661
7662 dname = strdup(path);
7663 if (dname == NULL)
7664 return -ENOMEM;
7665
7666 dir = dirname(dname);
7667 if (statfs(dir, &st_fs)) {
24d6a808 7668 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
be18010e 7669 pr_warn("failed to statfs %s: %s\n", dir, cp);
f367540c
JS
7670 err = -errno;
7671 }
7672 free(dname);
7673
7674 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
be18010e 7675 pr_warn("specified path %s is not on BPF FS\n", path);
f367540c
JS
7676 err = -EINVAL;
7677 }
7678
7679 return err;
7680}
7681
7682int bpf_program__pin_instance(struct bpf_program *prog, const char *path,
7683 int instance)
7684{
1ce6a9fc 7685 char *cp, errmsg[STRERR_BUFSIZE];
f367540c
JS
7686 int err;
7687
196f8487
THJ
7688 err = make_parent_dir(path);
7689 if (err)
7690 return err;
7691
f367540c
JS
7692 err = check_path(path);
7693 if (err)
7694 return err;
7695
7696 if (prog == NULL) {
be18010e 7697 pr_warn("invalid program pointer\n");
f367540c
JS
7698 return -EINVAL;
7699 }
7700
7701 if (instance < 0 || instance >= prog->instances.nr) {
be18010e 7702 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
52109584 7703 instance, prog->name, prog->instances.nr);
f367540c
JS
7704 return -EINVAL;
7705 }
7706
7707 if (bpf_obj_pin(prog->instances.fds[instance], path)) {
23ab656b
THJ
7708 err = -errno;
7709 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
be18010e 7710 pr_warn("failed to pin program: %s\n", cp);
23ab656b 7711 return err;
f367540c
JS
7712 }
7713 pr_debug("pinned program '%s'\n", path);
7714
7715 return 0;
7716}
7717
0c19a9fb
SF
7718int bpf_program__unpin_instance(struct bpf_program *prog, const char *path,
7719 int instance)
7720{
7721 int err;
7722
7723 err = check_path(path);
7724 if (err)
7725 return err;
7726
7727 if (prog == NULL) {
be18010e 7728 pr_warn("invalid program pointer\n");
0c19a9fb
SF
7729 return -EINVAL;
7730 }
7731
7732 if (instance < 0 || instance >= prog->instances.nr) {
be18010e 7733 pr_warn("invalid prog instance %d of prog %s (max %d)\n",
52109584 7734 instance, prog->name, prog->instances.nr);
0c19a9fb
SF
7735 return -EINVAL;
7736 }
7737
7738 err = unlink(path);
7739 if (err != 0)
7740 return -errno;
7741 pr_debug("unpinned program '%s'\n", path);
7742
7743 return 0;
7744}
7745
f367540c
JS
7746int bpf_program__pin(struct bpf_program *prog, const char *path)
7747{
7748 int i, err;
7749
196f8487
THJ
7750 err = make_parent_dir(path);
7751 if (err)
7752 return err;
7753
f367540c
JS
7754 err = check_path(path);
7755 if (err)
7756 return err;
7757
7758 if (prog == NULL) {
be18010e 7759 pr_warn("invalid program pointer\n");
f367540c
JS
7760 return -EINVAL;
7761 }
7762
7763 if (prog->instances.nr <= 0) {
52109584 7764 pr_warn("no instances of prog %s to pin\n", prog->name);
f367540c
JS
7765 return -EINVAL;
7766 }
7767
fd734c5c
SF
7768 if (prog->instances.nr == 1) {
7769 /* don't create subdirs when pinning single instance */
7770 return bpf_program__pin_instance(prog, path, 0);
7771 }
7772
0c19a9fb
SF
7773 for (i = 0; i < prog->instances.nr; i++) {
7774 char buf[PATH_MAX];
7775 int len;
7776
7777 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7778 if (len < 0) {
7779 err = -EINVAL;
7780 goto err_unpin;
7781 } else if (len >= PATH_MAX) {
7782 err = -ENAMETOOLONG;
7783 goto err_unpin;
7784 }
7785
7786 err = bpf_program__pin_instance(prog, buf, i);
7787 if (err)
7788 goto err_unpin;
7789 }
7790
7791 return 0;
7792
7793err_unpin:
7794 for (i = i - 1; i >= 0; i--) {
7795 char buf[PATH_MAX];
7796 int len;
7797
7798 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7799 if (len < 0)
7800 continue;
7801 else if (len >= PATH_MAX)
7802 continue;
7803
7804 bpf_program__unpin_instance(prog, buf, i);
7805 }
7806
7807 rmdir(path);
7808
7809 return err;
7810}
7811
7812int bpf_program__unpin(struct bpf_program *prog, const char *path)
7813{
7814 int i, err;
7815
7816 err = check_path(path);
7817 if (err)
7818 return err;
7819
7820 if (prog == NULL) {
be18010e 7821 pr_warn("invalid program pointer\n");
0c19a9fb
SF
7822 return -EINVAL;
7823 }
7824
7825 if (prog->instances.nr <= 0) {
52109584 7826 pr_warn("no instances of prog %s to pin\n", prog->name);
0c19a9fb 7827 return -EINVAL;
fd734c5c
SF
7828 }
7829
7830 if (prog->instances.nr == 1) {
7831 /* don't create subdirs when pinning single instance */
7832 return bpf_program__unpin_instance(prog, path, 0);
0c19a9fb
SF
7833 }
7834
f367540c
JS
7835 for (i = 0; i < prog->instances.nr; i++) {
7836 char buf[PATH_MAX];
7837 int len;
7838
7839 len = snprintf(buf, PATH_MAX, "%s/%d", path, i);
7840 if (len < 0)
7841 return -EINVAL;
7842 else if (len >= PATH_MAX)
7843 return -ENAMETOOLONG;
7844
0c19a9fb 7845 err = bpf_program__unpin_instance(prog, buf, i);
f367540c
JS
7846 if (err)
7847 return err;
7848 }
7849
0c19a9fb
SF
7850 err = rmdir(path);
7851 if (err)
7852 return -errno;
7853
f367540c
JS
7854 return 0;
7855}
7856
b6989f35
JS
7857int bpf_map__pin(struct bpf_map *map, const char *path)
7858{
1ce6a9fc 7859 char *cp, errmsg[STRERR_BUFSIZE];
b6989f35
JS
7860 int err;
7861
b6989f35 7862 if (map == NULL) {
be18010e 7863 pr_warn("invalid map pointer\n");
b6989f35
JS
7864 return -EINVAL;
7865 }
7866
4580b25f
THJ
7867 if (map->pin_path) {
7868 if (path && strcmp(path, map->pin_path)) {
7869 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7870 bpf_map__name(map), map->pin_path, path);
7871 return -EINVAL;
7872 } else if (map->pinned) {
7873 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
7874 bpf_map__name(map), map->pin_path);
7875 return 0;
7876 }
7877 } else {
7878 if (!path) {
7879 pr_warn("missing a path to pin map '%s' at\n",
7880 bpf_map__name(map));
7881 return -EINVAL;
7882 } else if (map->pinned) {
7883 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
7884 return -EEXIST;
7885 }
7886
7887 map->pin_path = strdup(path);
7888 if (!map->pin_path) {
7889 err = -errno;
7890 goto out_err;
7891 }
b6989f35
JS
7892 }
7893
196f8487
THJ
7894 err = make_parent_dir(map->pin_path);
7895 if (err)
7896 return err;
7897
4580b25f
THJ
7898 err = check_path(map->pin_path);
7899 if (err)
7900 return err;
7901
7902 if (bpf_obj_pin(map->fd, map->pin_path)) {
7903 err = -errno;
7904 goto out_err;
7905 }
7906
7907 map->pinned = true;
7908 pr_debug("pinned map '%s'\n", map->pin_path);
0c19a9fb 7909
b6989f35 7910 return 0;
4580b25f
THJ
7911
7912out_err:
7913 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
7914 pr_warn("failed to pin map: %s\n", cp);
7915 return err;
b6989f35
JS
7916}
7917
0c19a9fb
SF
7918int bpf_map__unpin(struct bpf_map *map, const char *path)
7919{
7920 int err;
7921
0c19a9fb 7922 if (map == NULL) {
be18010e 7923 pr_warn("invalid map pointer\n");
0c19a9fb
SF
7924 return -EINVAL;
7925 }
7926
4580b25f
THJ
7927 if (map->pin_path) {
7928 if (path && strcmp(path, map->pin_path)) {
7929 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
7930 bpf_map__name(map), map->pin_path, path);
7931 return -EINVAL;
7932 }
7933 path = map->pin_path;
7934 } else if (!path) {
7935 pr_warn("no path to unpin map '%s' from\n",
7936 bpf_map__name(map));
7937 return -EINVAL;
7938 }
7939
7940 err = check_path(path);
7941 if (err)
7942 return err;
7943
0c19a9fb
SF
7944 err = unlink(path);
7945 if (err != 0)
7946 return -errno;
4580b25f
THJ
7947
7948 map->pinned = false;
7949 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
0c19a9fb
SF
7950
7951 return 0;
7952}
7953
4580b25f
THJ
7954int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
7955{
7956 char *new = NULL;
7957
7958 if (path) {
7959 new = strdup(path);
7960 if (!new)
7961 return -errno;
7962 }
7963
7964 free(map->pin_path);
7965 map->pin_path = new;
7966 return 0;
7967}
7968
7969const char *bpf_map__get_pin_path(const struct bpf_map *map)
7970{
7971 return map->pin_path;
7972}
7973
7974bool bpf_map__is_pinned(const struct bpf_map *map)
7975{
7976 return map->pinned;
7977}
7978
9cf309c5
THJ
7979static void sanitize_pin_path(char *s)
7980{
7981 /* bpffs disallows periods in path names */
7982 while (*s) {
7983 if (*s == '.')
7984 *s = '_';
7985 s++;
7986 }
7987}
7988
0c19a9fb 7989int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
d5148d85 7990{
d5148d85
JS
7991 struct bpf_map *map;
7992 int err;
7993
7994 if (!obj)
7995 return -ENOENT;
7996
7997 if (!obj->loaded) {
be18010e 7998 pr_warn("object not yet loaded; load it first\n");
d5148d85
JS
7999 return -ENOENT;
8000 }
8001
f74a53d9 8002 bpf_object__for_each_map(map, obj) {
4580b25f 8003 char *pin_path = NULL;
0c19a9fb 8004 char buf[PATH_MAX];
0c19a9fb 8005
4580b25f
THJ
8006 if (path) {
8007 int len;
8008
8009 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8010 bpf_map__name(map));
8011 if (len < 0) {
8012 err = -EINVAL;
8013 goto err_unpin_maps;
8014 } else if (len >= PATH_MAX) {
8015 err = -ENAMETOOLONG;
8016 goto err_unpin_maps;
8017 }
9cf309c5 8018 sanitize_pin_path(buf);
4580b25f
THJ
8019 pin_path = buf;
8020 } else if (!map->pin_path) {
8021 continue;
0c19a9fb
SF
8022 }
8023
4580b25f 8024 err = bpf_map__pin(map, pin_path);
0c19a9fb
SF
8025 if (err)
8026 goto err_unpin_maps;
8027 }
8028
8029 return 0;
8030
8031err_unpin_maps:
8032 while ((map = bpf_map__prev(map, obj))) {
4580b25f 8033 if (!map->pin_path)
0c19a9fb
SF
8034 continue;
8035
4580b25f 8036 bpf_map__unpin(map, NULL);
0c19a9fb
SF
8037 }
8038
8039 return err;
8040}
8041
8042int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8043{
8044 struct bpf_map *map;
8045 int err;
8046
8047 if (!obj)
8048 return -ENOENT;
8049
f74a53d9 8050 bpf_object__for_each_map(map, obj) {
4580b25f 8051 char *pin_path = NULL;
d5148d85 8052 char buf[PATH_MAX];
d5148d85 8053
4580b25f
THJ
8054 if (path) {
8055 int len;
8056
8057 len = snprintf(buf, PATH_MAX, "%s/%s", path,
8058 bpf_map__name(map));
8059 if (len < 0)
8060 return -EINVAL;
8061 else if (len >= PATH_MAX)
8062 return -ENAMETOOLONG;
9cf309c5 8063 sanitize_pin_path(buf);
4580b25f
THJ
8064 pin_path = buf;
8065 } else if (!map->pin_path) {
8066 continue;
8067 }
d5148d85 8068
4580b25f 8069 err = bpf_map__unpin(map, pin_path);
d5148d85
JS
8070 if (err)
8071 return err;
8072 }
8073
0c19a9fb
SF
8074 return 0;
8075}
8076
8077int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8078{
8079 struct bpf_program *prog;
8080 int err;
8081
8082 if (!obj)
8083 return -ENOENT;
8084
8085 if (!obj->loaded) {
be18010e 8086 pr_warn("object not yet loaded; load it first\n");
0c19a9fb
SF
8087 return -ENOENT;
8088 }
8089
0c19a9fb
SF
8090 bpf_object__for_each_program(prog, obj) {
8091 char buf[PATH_MAX];
8092 int len;
8093
8094 len = snprintf(buf, PATH_MAX, "%s/%s", path,
33a2c75c 8095 prog->pin_name);
0c19a9fb
SF
8096 if (len < 0) {
8097 err = -EINVAL;
8098 goto err_unpin_programs;
8099 } else if (len >= PATH_MAX) {
8100 err = -ENAMETOOLONG;
8101 goto err_unpin_programs;
8102 }
8103
8104 err = bpf_program__pin(prog, buf);
8105 if (err)
8106 goto err_unpin_programs;
8107 }
8108
8109 return 0;
8110
8111err_unpin_programs:
8112 while ((prog = bpf_program__prev(prog, obj))) {
8113 char buf[PATH_MAX];
8114 int len;
8115
8116 len = snprintf(buf, PATH_MAX, "%s/%s", path,
33a2c75c 8117 prog->pin_name);
0c19a9fb
SF
8118 if (len < 0)
8119 continue;
8120 else if (len >= PATH_MAX)
8121 continue;
8122
8123 bpf_program__unpin(prog, buf);
8124 }
8125
8126 return err;
8127}
8128
8129int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8130{
8131 struct bpf_program *prog;
8132 int err;
8133
8134 if (!obj)
8135 return -ENOENT;
8136
d5148d85
JS
8137 bpf_object__for_each_program(prog, obj) {
8138 char buf[PATH_MAX];
8139 int len;
8140
8141 len = snprintf(buf, PATH_MAX, "%s/%s", path,
33a2c75c 8142 prog->pin_name);
d5148d85
JS
8143 if (len < 0)
8144 return -EINVAL;
8145 else if (len >= PATH_MAX)
8146 return -ENAMETOOLONG;
8147
0c19a9fb 8148 err = bpf_program__unpin(prog, buf);
d5148d85
JS
8149 if (err)
8150 return err;
8151 }
8152
8153 return 0;
8154}
8155
0c19a9fb
SF
8156int bpf_object__pin(struct bpf_object *obj, const char *path)
8157{
8158 int err;
8159
8160 err = bpf_object__pin_maps(obj, path);
8161 if (err)
8162 return err;
8163
8164 err = bpf_object__pin_programs(obj, path);
8165 if (err) {
8166 bpf_object__unpin_maps(obj, path);
8167 return err;
8168 }
8169
8170 return 0;
8171}
8172
2d39d7c5
AN
8173static void bpf_map__destroy(struct bpf_map *map)
8174{
8175 if (map->clear_priv)
8176 map->clear_priv(map, map->priv);
8177 map->priv = NULL;
8178 map->clear_priv = NULL;
8179
646f02ff
AN
8180 if (map->inner_map) {
8181 bpf_map__destroy(map->inner_map);
8182 zfree(&map->inner_map);
8183 }
8184
8185 zfree(&map->init_slots);
8186 map->init_slots_sz = 0;
8187
2d39d7c5
AN
8188 if (map->mmaped) {
8189 munmap(map->mmaped, bpf_map_mmap_sz(map));
8190 map->mmaped = NULL;
8191 }
8192
8193 if (map->st_ops) {
8194 zfree(&map->st_ops->data);
8195 zfree(&map->st_ops->progs);
8196 zfree(&map->st_ops->kern_func_off);
8197 zfree(&map->st_ops);
8198 }
8199
8200 zfree(&map->name);
8201 zfree(&map->pin_path);
8202
8203 if (map->fd >= 0)
8204 zclose(map->fd);
8205}
8206
1a5e3fb1
WN
8207void bpf_object__close(struct bpf_object *obj)
8208{
a5b8bd47
WN
8209 size_t i;
8210
50450fc7 8211 if (IS_ERR_OR_NULL(obj))
1a5e3fb1
WN
8212 return;
8213
10931d24
WN
8214 if (obj->clear_priv)
8215 obj->clear_priv(obj, obj->priv);
8216
1a5e3fb1 8217 bpf_object__elf_finish(obj);
52d3352e 8218 bpf_object__unload(obj);
8a138aed 8219 btf__free(obj->btf);
2993e051 8220 btf_ext__free(obj->btf_ext);
1a5e3fb1 8221
2d39d7c5
AN
8222 for (i = 0; i < obj->nr_maps; i++)
8223 bpf_map__destroy(&obj->maps[i]);
d859900c 8224
8601fd42 8225 zfree(&obj->kconfig);
166750bc
AN
8226 zfree(&obj->externs);
8227 obj->nr_extern = 0;
8228
9d759a9b
WN
8229 zfree(&obj->maps);
8230 obj->nr_maps = 0;
a5b8bd47
WN
8231
8232 if (obj->programs && obj->nr_programs) {
8233 for (i = 0; i < obj->nr_programs; i++)
8234 bpf_program__exit(&obj->programs[i]);
8235 }
8236 zfree(&obj->programs);
8237
9a208eff 8238 list_del(&obj->list);
1a5e3fb1
WN
8239 free(obj);
8240}
aa9b1ac3 8241
9a208eff
WN
8242struct bpf_object *
8243bpf_object__next(struct bpf_object *prev)
8244{
8245 struct bpf_object *next;
8246
8247 if (!prev)
8248 next = list_first_entry(&bpf_objects_list,
8249 struct bpf_object,
8250 list);
8251 else
8252 next = list_next_entry(prev, list);
8253
8254 /* Empty list is noticed here so don't need checking on entry. */
8255 if (&next->list == &bpf_objects_list)
8256 return NULL;
8257
8258 return next;
8259}
8260
a324aae3 8261const char *bpf_object__name(const struct bpf_object *obj)
acf860ae 8262{
c9e4c301 8263 return obj ? obj->name : ERR_PTR(-EINVAL);
acf860ae
WN
8264}
8265
a324aae3 8266unsigned int bpf_object__kversion(const struct bpf_object *obj)
45825d8a 8267{
a7fe0450 8268 return obj ? obj->kern_version : 0;
45825d8a
WN
8269}
8270
a324aae3 8271struct btf *bpf_object__btf(const struct bpf_object *obj)
789f6bab
AI
8272{
8273 return obj ? obj->btf : NULL;
8274}
8275
8a138aed
MKL
8276int bpf_object__btf_fd(const struct bpf_object *obj)
8277{
8278 return obj->btf ? btf__fd(obj->btf) : -1;
8279}
8280
10931d24
WN
8281int bpf_object__set_priv(struct bpf_object *obj, void *priv,
8282 bpf_object_clear_priv_t clear_priv)
8283{
8284 if (obj->priv && obj->clear_priv)
8285 obj->clear_priv(obj, obj->priv);
8286
8287 obj->priv = priv;
8288 obj->clear_priv = clear_priv;
8289 return 0;
8290}
8291
a324aae3 8292void *bpf_object__priv(const struct bpf_object *obj)
10931d24
WN
8293{
8294 return obj ? obj->priv : ERR_PTR(-EINVAL);
8295}
8296
eac7d845 8297static struct bpf_program *
a324aae3
AN
8298__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8299 bool forward)
aa9b1ac3 8300{
a83d6e76 8301 size_t nr_programs = obj->nr_programs;
0c19a9fb 8302 ssize_t idx;
aa9b1ac3 8303
a83d6e76 8304 if (!nr_programs)
aa9b1ac3 8305 return NULL;
aa9b1ac3 8306
a83d6e76
MKL
8307 if (!p)
8308 /* Iter from the beginning */
8309 return forward ? &obj->programs[0] :
8310 &obj->programs[nr_programs - 1];
8311
0c19a9fb 8312 if (p->obj != obj) {
be18010e 8313 pr_warn("error: program handler doesn't match object\n");
aa9b1ac3
WN
8314 return NULL;
8315 }
8316
a83d6e76 8317 idx = (p - obj->programs) + (forward ? 1 : -1);
0c19a9fb 8318 if (idx >= obj->nr_programs || idx < 0)
aa9b1ac3
WN
8319 return NULL;
8320 return &obj->programs[idx];
8321}
8322
eac7d845 8323struct bpf_program *
a324aae3 8324bpf_program__next(struct bpf_program *prev, const struct bpf_object *obj)
eac7d845
JK
8325{
8326 struct bpf_program *prog = prev;
8327
8328 do {
a83d6e76 8329 prog = __bpf_program__iter(prog, obj, true);
c3c55696 8330 } while (prog && prog_is_subprog(obj, prog));
0c19a9fb
SF
8331
8332 return prog;
8333}
8334
8335struct bpf_program *
a324aae3 8336bpf_program__prev(struct bpf_program *next, const struct bpf_object *obj)
0c19a9fb
SF
8337{
8338 struct bpf_program *prog = next;
8339
0c19a9fb 8340 do {
a83d6e76 8341 prog = __bpf_program__iter(prog, obj, false);
c3c55696 8342 } while (prog && prog_is_subprog(obj, prog));
eac7d845
JK
8343
8344 return prog;
8345}
8346
edb13ed4
ACM
8347int bpf_program__set_priv(struct bpf_program *prog, void *priv,
8348 bpf_program_clear_priv_t clear_priv)
aa9b1ac3
WN
8349{
8350 if (prog->priv && prog->clear_priv)
8351 prog->clear_priv(prog, prog->priv);
8352
8353 prog->priv = priv;
8354 prog->clear_priv = clear_priv;
8355 return 0;
8356}
8357
a324aae3 8358void *bpf_program__priv(const struct bpf_program *prog)
aa9b1ac3 8359{
be834ffb 8360 return prog ? prog->priv : ERR_PTR(-EINVAL);
aa9b1ac3
WN
8361}
8362
9aba3613
JK
8363void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8364{
8365 prog->prog_ifindex = ifindex;
8366}
8367
01af3bf0
AN
8368const char *bpf_program__name(const struct bpf_program *prog)
8369{
8370 return prog->name;
8371}
8372
52109584
AN
8373const char *bpf_program__section_name(const struct bpf_program *prog)
8374{
8375 return prog->sec_name;
8376}
8377
a324aae3 8378const char *bpf_program__title(const struct bpf_program *prog, bool needs_copy)
aa9b1ac3
WN
8379{
8380 const char *title;
8381
52109584 8382 title = prog->sec_name;
715f8db9 8383 if (needs_copy) {
aa9b1ac3
WN
8384 title = strdup(title);
8385 if (!title) {
be18010e 8386 pr_warn("failed to strdup program title\n");
6371ca3b 8387 return ERR_PTR(-ENOMEM);
aa9b1ac3
WN
8388 }
8389 }
8390
8391 return title;
8392}
8393
d9297581
AN
8394bool bpf_program__autoload(const struct bpf_program *prog)
8395{
8396 return prog->load;
8397}
8398
8399int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8400{
8401 if (prog->obj->loaded)
8402 return -EINVAL;
8403
8404 prog->load = autoload;
8405 return 0;
8406}
8407
a324aae3 8408int bpf_program__fd(const struct bpf_program *prog)
aa9b1ac3 8409{
b580563e
WN
8410 return bpf_program__nth_fd(prog, 0);
8411}
8412
1a734efe
THJ
8413size_t bpf_program__size(const struct bpf_program *prog)
8414{
9c0f8cbd 8415 return prog->insns_cnt * BPF_INSN_SZ;
1a734efe
THJ
8416}
8417
b580563e
WN
8418int bpf_program__set_prep(struct bpf_program *prog, int nr_instances,
8419 bpf_program_prep_t prep)
8420{
8421 int *instances_fds;
8422
8423 if (nr_instances <= 0 || !prep)
8424 return -EINVAL;
8425
8426 if (prog->instances.nr > 0 || prog->instances.fds) {
be18010e 8427 pr_warn("Can't set pre-processor after loading\n");
b580563e
WN
8428 return -EINVAL;
8429 }
8430
8431 instances_fds = malloc(sizeof(int) * nr_instances);
8432 if (!instances_fds) {
be18010e 8433 pr_warn("alloc memory failed for fds\n");
b580563e
WN
8434 return -ENOMEM;
8435 }
8436
8437 /* fill all fd with -1 */
8438 memset(instances_fds, -1, sizeof(int) * nr_instances);
8439
8440 prog->instances.nr = nr_instances;
8441 prog->instances.fds = instances_fds;
8442 prog->preprocessor = prep;
8443 return 0;
8444}
8445
a324aae3 8446int bpf_program__nth_fd(const struct bpf_program *prog, int n)
b580563e
WN
8447{
8448 int fd;
8449
1e960043
JK
8450 if (!prog)
8451 return -EINVAL;
8452
b580563e 8453 if (n >= prog->instances.nr || n < 0) {
be18010e 8454 pr_warn("Can't get the %dth fd from program %s: only %d instances\n",
52109584 8455 n, prog->name, prog->instances.nr);
b580563e
WN
8456 return -EINVAL;
8457 }
8458
8459 fd = prog->instances.fds[n];
8460 if (fd < 0) {
be18010e 8461 pr_warn("%dth instance of program '%s' is invalid\n",
52109584 8462 n, prog->name);
b580563e
WN
8463 return -ENOENT;
8464 }
8465
8466 return fd;
aa9b1ac3 8467}
9d759a9b 8468
f1eead9e
AN
8469enum bpf_prog_type bpf_program__get_type(struct bpf_program *prog)
8470{
8471 return prog->type;
8472}
8473
dd26b7f5 8474void bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
5f44e4c8
WN
8475{
8476 prog->type = type;
8477}
8478
a324aae3 8479static bool bpf_program__is_type(const struct bpf_program *prog,
5f44e4c8
WN
8480 enum bpf_prog_type type)
8481{
8482 return prog ? (prog->type == type) : false;
8483}
8484
a324aae3
AN
8485#define BPF_PROG_TYPE_FNS(NAME, TYPE) \
8486int bpf_program__set_##NAME(struct bpf_program *prog) \
8487{ \
8488 if (!prog) \
8489 return -EINVAL; \
8490 bpf_program__set_type(prog, TYPE); \
8491 return 0; \
8492} \
8493 \
8494bool bpf_program__is_##NAME(const struct bpf_program *prog) \
8495{ \
8496 return bpf_program__is_type(prog, TYPE); \
8497} \
ed794073 8498
7803ba73 8499BPF_PROG_TYPE_FNS(socket_filter, BPF_PROG_TYPE_SOCKET_FILTER);
1e092a03 8500BPF_PROG_TYPE_FNS(lsm, BPF_PROG_TYPE_LSM);
ed794073 8501BPF_PROG_TYPE_FNS(kprobe, BPF_PROG_TYPE_KPROBE);
7803ba73
JS
8502BPF_PROG_TYPE_FNS(sched_cls, BPF_PROG_TYPE_SCHED_CLS);
8503BPF_PROG_TYPE_FNS(sched_act, BPF_PROG_TYPE_SCHED_ACT);
ed794073 8504BPF_PROG_TYPE_FNS(tracepoint, BPF_PROG_TYPE_TRACEPOINT);
e14c93fd 8505BPF_PROG_TYPE_FNS(raw_tracepoint, BPF_PROG_TYPE_RAW_TRACEPOINT);
7803ba73
JS
8506BPF_PROG_TYPE_FNS(xdp, BPF_PROG_TYPE_XDP);
8507BPF_PROG_TYPE_FNS(perf_event, BPF_PROG_TYPE_PERF_EVENT);
12a8654b 8508BPF_PROG_TYPE_FNS(tracing, BPF_PROG_TYPE_TRACING);
590a0088 8509BPF_PROG_TYPE_FNS(struct_ops, BPF_PROG_TYPE_STRUCT_OPS);
2db6eab1 8510BPF_PROG_TYPE_FNS(extension, BPF_PROG_TYPE_EXT);
499dd29d 8511BPF_PROG_TYPE_FNS(sk_lookup, BPF_PROG_TYPE_SK_LOOKUP);
5f44e4c8 8512
f1eead9e
AN
8513enum bpf_attach_type
8514bpf_program__get_expected_attach_type(struct bpf_program *prog)
8515{
8516 return prog->expected_attach_type;
8517}
8518
16962b24
JF
8519void bpf_program__set_expected_attach_type(struct bpf_program *prog,
8520 enum bpf_attach_type type)
d7be143b
AI
8521{
8522 prog->expected_attach_type = type;
8523}
8524
25498a19
AN
8525#define BPF_PROG_SEC_IMPL(string, ptype, eatype, eatype_optional, \
8526 attachable, attach_btf) \
8527 { \
8528 .sec = string, \
8529 .len = sizeof(string) - 1, \
8530 .prog_type = ptype, \
8531 .expected_attach_type = eatype, \
8532 .is_exp_attach_type_optional = eatype_optional, \
8533 .is_attachable = attachable, \
8534 .is_attach_btf = attach_btf, \
8535 }
d7be143b 8536
956b620f 8537/* Programs that can NOT be attached. */
f75a697e 8538#define BPF_PROG_SEC(string, ptype) BPF_PROG_SEC_IMPL(string, ptype, 0, 0, 0, 0)
d7be143b 8539
956b620f
AI
8540/* Programs that can be attached. */
8541#define BPF_APROG_SEC(string, ptype, atype) \
25498a19 8542 BPF_PROG_SEC_IMPL(string, ptype, atype, true, 1, 0)
81efee75 8543
956b620f
AI
8544/* Programs that must specify expected attach type at load time. */
8545#define BPF_EAPROG_SEC(string, ptype, eatype) \
25498a19 8546 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 1, 0)
f75a697e
AS
8547
8548/* Programs that use BTF to identify attach point */
12a8654b 8549#define BPF_PROG_BTF(string, ptype, eatype) \
25498a19 8550 BPF_PROG_SEC_IMPL(string, ptype, eatype, false, 0, 1)
956b620f
AI
8551
8552/* Programs that can be attached but attach type can't be identified by section
8553 * name. Kept for backward compatibility.
8554 */
8555#define BPF_APROG_COMPAT(string, ptype) BPF_PROG_SEC(string, ptype)
e50b0a6f 8556
d7a18ea7
AN
8557#define SEC_DEF(sec_pfx, ptype, ...) { \
8558 .sec = sec_pfx, \
8559 .len = sizeof(sec_pfx) - 1, \
8560 .prog_type = BPF_PROG_TYPE_##ptype, \
8561 __VA_ARGS__ \
8562}
8563
d7a18ea7
AN
8564static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
8565 struct bpf_program *prog);
8566static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
8567 struct bpf_program *prog);
8568static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
8569 struct bpf_program *prog);
8570static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
8571 struct bpf_program *prog);
1e092a03
KS
8572static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
8573 struct bpf_program *prog);
c09add2f
YS
8574static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
8575 struct bpf_program *prog);
d7a18ea7 8576
d7a18ea7 8577static const struct bpf_sec_def section_defs[] = {
956b620f 8578 BPF_PROG_SEC("socket", BPF_PROG_TYPE_SOCKET_FILTER),
67d69ccd 8579 BPF_PROG_SEC("sk_reuseport", BPF_PROG_TYPE_SK_REUSEPORT),
d7a18ea7
AN
8580 SEC_DEF("kprobe/", KPROBE,
8581 .attach_fn = attach_kprobe),
32dff6db 8582 BPF_PROG_SEC("uprobe/", BPF_PROG_TYPE_KPROBE),
d7a18ea7
AN
8583 SEC_DEF("kretprobe/", KPROBE,
8584 .attach_fn = attach_kprobe),
32dff6db 8585 BPF_PROG_SEC("uretprobe/", BPF_PROG_TYPE_KPROBE),
956b620f
AI
8586 BPF_PROG_SEC("classifier", BPF_PROG_TYPE_SCHED_CLS),
8587 BPF_PROG_SEC("action", BPF_PROG_TYPE_SCHED_ACT),
d7a18ea7
AN
8588 SEC_DEF("tracepoint/", TRACEPOINT,
8589 .attach_fn = attach_tp),
8590 SEC_DEF("tp/", TRACEPOINT,
8591 .attach_fn = attach_tp),
8592 SEC_DEF("raw_tracepoint/", RAW_TRACEPOINT,
8593 .attach_fn = attach_raw_tp),
8594 SEC_DEF("raw_tp/", RAW_TRACEPOINT,
8595 .attach_fn = attach_raw_tp),
8596 SEC_DEF("tp_btf/", TRACING,
8597 .expected_attach_type = BPF_TRACE_RAW_TP,
8598 .is_attach_btf = true,
8599 .attach_fn = attach_trace),
8600 SEC_DEF("fentry/", TRACING,
8601 .expected_attach_type = BPF_TRACE_FENTRY,
8602 .is_attach_btf = true,
8603 .attach_fn = attach_trace),
aca228cd
KS
8604 SEC_DEF("fmod_ret/", TRACING,
8605 .expected_attach_type = BPF_MODIFY_RETURN,
8606 .is_attach_btf = true,
8607 .attach_fn = attach_trace),
d7a18ea7
AN
8608 SEC_DEF("fexit/", TRACING,
8609 .expected_attach_type = BPF_TRACE_FEXIT,
8610 .is_attach_btf = true,
8611 .attach_fn = attach_trace),
2b288740
AS
8612 SEC_DEF("fentry.s/", TRACING,
8613 .expected_attach_type = BPF_TRACE_FENTRY,
8614 .is_attach_btf = true,
8615 .is_sleepable = true,
8616 .attach_fn = attach_trace),
8617 SEC_DEF("fmod_ret.s/", TRACING,
8618 .expected_attach_type = BPF_MODIFY_RETURN,
8619 .is_attach_btf = true,
8620 .is_sleepable = true,
8621 .attach_fn = attach_trace),
8622 SEC_DEF("fexit.s/", TRACING,
8623 .expected_attach_type = BPF_TRACE_FEXIT,
8624 .is_attach_btf = true,
8625 .is_sleepable = true,
8626 .attach_fn = attach_trace),
2db6eab1
AS
8627 SEC_DEF("freplace/", EXT,
8628 .is_attach_btf = true,
8629 .attach_fn = attach_trace),
1e092a03
KS
8630 SEC_DEF("lsm/", LSM,
8631 .is_attach_btf = true,
8632 .expected_attach_type = BPF_LSM_MAC,
8633 .attach_fn = attach_lsm),
2b288740
AS
8634 SEC_DEF("lsm.s/", LSM,
8635 .is_attach_btf = true,
8636 .is_sleepable = true,
8637 .expected_attach_type = BPF_LSM_MAC,
8638 .attach_fn = attach_lsm),
c09add2f
YS
8639 SEC_DEF("iter/", TRACING,
8640 .expected_attach_type = BPF_TRACE_ITER,
8641 .is_attach_btf = true,
8642 .attach_fn = attach_iter),
7a64135f 8643 BPF_EAPROG_SEC("xdp_devmap/", BPF_PROG_TYPE_XDP,
27787970 8644 BPF_XDP_DEVMAP),
4be556cf
LB
8645 BPF_EAPROG_SEC("xdp_cpumap/", BPF_PROG_TYPE_XDP,
8646 BPF_XDP_CPUMAP),
87f92ac4 8647 BPF_APROG_SEC("xdp", BPF_PROG_TYPE_XDP,
dc8698ca 8648 BPF_XDP),
956b620f
AI
8649 BPF_PROG_SEC("perf_event", BPF_PROG_TYPE_PERF_EVENT),
8650 BPF_PROG_SEC("lwt_in", BPF_PROG_TYPE_LWT_IN),
8651 BPF_PROG_SEC("lwt_out", BPF_PROG_TYPE_LWT_OUT),
8652 BPF_PROG_SEC("lwt_xmit", BPF_PROG_TYPE_LWT_XMIT),
8653 BPF_PROG_SEC("lwt_seg6local", BPF_PROG_TYPE_LWT_SEG6LOCAL),
bafa7afe
AI
8654 BPF_APROG_SEC("cgroup_skb/ingress", BPF_PROG_TYPE_CGROUP_SKB,
8655 BPF_CGROUP_INET_INGRESS),
8656 BPF_APROG_SEC("cgroup_skb/egress", BPF_PROG_TYPE_CGROUP_SKB,
8657 BPF_CGROUP_INET_EGRESS),
956b620f 8658 BPF_APROG_COMPAT("cgroup/skb", BPF_PROG_TYPE_CGROUP_SKB),
e8b012e9
SF
8659 BPF_EAPROG_SEC("cgroup/sock_create", BPF_PROG_TYPE_CGROUP_SOCK,
8660 BPF_CGROUP_INET_SOCK_CREATE),
8661 BPF_EAPROG_SEC("cgroup/sock_release", BPF_PROG_TYPE_CGROUP_SOCK,
8662 BPF_CGROUP_INET_SOCK_RELEASE),
956b620f
AI
8663 BPF_APROG_SEC("cgroup/sock", BPF_PROG_TYPE_CGROUP_SOCK,
8664 BPF_CGROUP_INET_SOCK_CREATE),
8665 BPF_EAPROG_SEC("cgroup/post_bind4", BPF_PROG_TYPE_CGROUP_SOCK,
8666 BPF_CGROUP_INET4_POST_BIND),
8667 BPF_EAPROG_SEC("cgroup/post_bind6", BPF_PROG_TYPE_CGROUP_SOCK,
8668 BPF_CGROUP_INET6_POST_BIND),
8669 BPF_APROG_SEC("cgroup/dev", BPF_PROG_TYPE_CGROUP_DEVICE,
8670 BPF_CGROUP_DEVICE),
8671 BPF_APROG_SEC("sockops", BPF_PROG_TYPE_SOCK_OPS,
8672 BPF_CGROUP_SOCK_OPS),
c6f6851b
AI
8673 BPF_APROG_SEC("sk_skb/stream_parser", BPF_PROG_TYPE_SK_SKB,
8674 BPF_SK_SKB_STREAM_PARSER),
8675 BPF_APROG_SEC("sk_skb/stream_verdict", BPF_PROG_TYPE_SK_SKB,
8676 BPF_SK_SKB_STREAM_VERDICT),
956b620f
AI
8677 BPF_APROG_COMPAT("sk_skb", BPF_PROG_TYPE_SK_SKB),
8678 BPF_APROG_SEC("sk_msg", BPF_PROG_TYPE_SK_MSG,
8679 BPF_SK_MSG_VERDICT),
8680 BPF_APROG_SEC("lirc_mode2", BPF_PROG_TYPE_LIRC_MODE2,
8681 BPF_LIRC_MODE2),
8682 BPF_APROG_SEC("flow_dissector", BPF_PROG_TYPE_FLOW_DISSECTOR,
8683 BPF_FLOW_DISSECTOR),
8684 BPF_EAPROG_SEC("cgroup/bind4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8685 BPF_CGROUP_INET4_BIND),
8686 BPF_EAPROG_SEC("cgroup/bind6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8687 BPF_CGROUP_INET6_BIND),
8688 BPF_EAPROG_SEC("cgroup/connect4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8689 BPF_CGROUP_INET4_CONNECT),
8690 BPF_EAPROG_SEC("cgroup/connect6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8691 BPF_CGROUP_INET6_CONNECT),
8692 BPF_EAPROG_SEC("cgroup/sendmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8693 BPF_CGROUP_UDP4_SENDMSG),
8694 BPF_EAPROG_SEC("cgroup/sendmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8695 BPF_CGROUP_UDP6_SENDMSG),
9bb59ac1
DB
8696 BPF_EAPROG_SEC("cgroup/recvmsg4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8697 BPF_CGROUP_UDP4_RECVMSG),
8698 BPF_EAPROG_SEC("cgroup/recvmsg6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8699 BPF_CGROUP_UDP6_RECVMSG),
f15ed018
DB
8700 BPF_EAPROG_SEC("cgroup/getpeername4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8701 BPF_CGROUP_INET4_GETPEERNAME),
8702 BPF_EAPROG_SEC("cgroup/getpeername6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8703 BPF_CGROUP_INET6_GETPEERNAME),
8704 BPF_EAPROG_SEC("cgroup/getsockname4", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8705 BPF_CGROUP_INET4_GETSOCKNAME),
8706 BPF_EAPROG_SEC("cgroup/getsockname6", BPF_PROG_TYPE_CGROUP_SOCK_ADDR,
8707 BPF_CGROUP_INET6_GETSOCKNAME),
063cc9f0
AI
8708 BPF_EAPROG_SEC("cgroup/sysctl", BPF_PROG_TYPE_CGROUP_SYSCTL,
8709 BPF_CGROUP_SYSCTL),
4cdbfb59
SF
8710 BPF_EAPROG_SEC("cgroup/getsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8711 BPF_CGROUP_GETSOCKOPT),
8712 BPF_EAPROG_SEC("cgroup/setsockopt", BPF_PROG_TYPE_CGROUP_SOCKOPT,
8713 BPF_CGROUP_SETSOCKOPT),
590a0088 8714 BPF_PROG_SEC("struct_ops", BPF_PROG_TYPE_STRUCT_OPS),
499dd29d
JS
8715 BPF_EAPROG_SEC("sk_lookup/", BPF_PROG_TYPE_SK_LOOKUP,
8716 BPF_SK_LOOKUP),
583c9009 8717};
d7be143b 8718
956b620f 8719#undef BPF_PROG_SEC_IMPL
583c9009 8720#undef BPF_PROG_SEC
956b620f
AI
8721#undef BPF_APROG_SEC
8722#undef BPF_EAPROG_SEC
8723#undef BPF_APROG_COMPAT
d7a18ea7 8724#undef SEC_DEF
583c9009 8725
c76e4c22
TS
8726#define MAX_TYPE_NAME_SIZE 32
8727
d7a18ea7
AN
8728static const struct bpf_sec_def *find_sec_def(const char *sec_name)
8729{
8730 int i, n = ARRAY_SIZE(section_defs);
8731
8732 for (i = 0; i < n; i++) {
8733 if (strncmp(sec_name,
8734 section_defs[i].sec, section_defs[i].len))
8735 continue;
8736 return &section_defs[i];
8737 }
8738 return NULL;
8739}
8740
c76e4c22
TS
8741static char *libbpf_get_type_names(bool attach_type)
8742{
d7a18ea7 8743 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
c76e4c22
TS
8744 char *buf;
8745
8746 buf = malloc(len);
8747 if (!buf)
8748 return NULL;
8749
8750 buf[0] = '\0';
8751 /* Forge string buf with all available names */
d7a18ea7
AN
8752 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
8753 if (attach_type && !section_defs[i].is_attachable)
c76e4c22
TS
8754 continue;
8755
d7a18ea7 8756 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
c76e4c22
TS
8757 free(buf);
8758 return NULL;
8759 }
8760 strcat(buf, " ");
d7a18ea7 8761 strcat(buf, section_defs[i].sec);
c76e4c22
TS
8762 }
8763
8764 return buf;
8765}
8766
b60df2a0
JK
8767int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
8768 enum bpf_attach_type *expected_attach_type)
583c9009 8769{
d7a18ea7 8770 const struct bpf_sec_def *sec_def;
c76e4c22 8771 char *type_names;
583c9009 8772
b60df2a0
JK
8773 if (!name)
8774 return -EINVAL;
583c9009 8775
d7a18ea7
AN
8776 sec_def = find_sec_def(name);
8777 if (sec_def) {
8778 *prog_type = sec_def->prog_type;
8779 *expected_attach_type = sec_def->expected_attach_type;
b60df2a0
JK
8780 return 0;
8781 }
d7a18ea7 8782
4a3d6c6a 8783 pr_debug("failed to guess program type from ELF section '%s'\n", name);
c76e4c22
TS
8784 type_names = libbpf_get_type_names(false);
8785 if (type_names != NULL) {
3f519353 8786 pr_debug("supported section(type) names are:%s\n", type_names);
c76e4c22
TS
8787 free(type_names);
8788 }
8789
dd4436bb 8790 return -ESRCH;
b60df2a0 8791}
583c9009 8792
590a0088
MKL
8793static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
8794 size_t offset)
8795{
8796 struct bpf_map *map;
8797 size_t i;
8798
8799 for (i = 0; i < obj->nr_maps; i++) {
8800 map = &obj->maps[i];
8801 if (!bpf_map__is_struct_ops(map))
8802 continue;
8803 if (map->sec_offset <= offset &&
8804 offset - map->sec_offset < map->def.value_size)
8805 return map;
8806 }
8807
8808 return NULL;
8809}
8810
8811/* Collect the reloc from ELF and populate the st_ops->progs[] */
646f02ff
AN
8812static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
8813 GElf_Shdr *shdr, Elf_Data *data)
590a0088
MKL
8814{
8815 const struct btf_member *member;
8816 struct bpf_struct_ops *st_ops;
8817 struct bpf_program *prog;
8818 unsigned int shdr_idx;
8819 const struct btf *btf;
8820 struct bpf_map *map;
8821 Elf_Data *symbols;
7e06aad5 8822 unsigned int moff, insn_idx;
590a0088 8823 const char *name;
1d1a3bcf 8824 __u32 member_idx;
590a0088
MKL
8825 GElf_Sym sym;
8826 GElf_Rel rel;
8827 int i, nrels;
8828
8829 symbols = obj->efile.symbols;
8830 btf = obj->btf;
8831 nrels = shdr->sh_size / shdr->sh_entsize;
8832 for (i = 0; i < nrels; i++) {
8833 if (!gelf_getrel(data, i, &rel)) {
8834 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
8835 return -LIBBPF_ERRNO__FORMAT;
8836 }
8837
8838 if (!gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym)) {
8839 pr_warn("struct_ops reloc: symbol %zx not found\n",
8840 (size_t)GELF_R_SYM(rel.r_info));
8841 return -LIBBPF_ERRNO__FORMAT;
8842 }
8843
88a82120 8844 name = elf_sym_str(obj, sym.st_name) ?: "<?>";
590a0088
MKL
8845 map = find_struct_ops_map_by_offset(obj, rel.r_offset);
8846 if (!map) {
8847 pr_warn("struct_ops reloc: cannot find map at rel.r_offset %zu\n",
8848 (size_t)rel.r_offset);
8849 return -EINVAL;
8850 }
8851
8852 moff = rel.r_offset - map->sec_offset;
8853 shdr_idx = sym.st_shndx;
8854 st_ops = map->st_ops;
8855 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel.r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
8856 map->name,
8857 (long long)(rel.r_info >> 32),
8858 (long long)sym.st_value,
8859 shdr_idx, (size_t)rel.r_offset,
8860 map->sec_offset, sym.st_name, name);
8861
8862 if (shdr_idx >= SHN_LORESERVE) {
8863 pr_warn("struct_ops reloc %s: rel.r_offset %zu shdr_idx %u unsupported non-static function\n",
8864 map->name, (size_t)rel.r_offset, shdr_idx);
8865 return -LIBBPF_ERRNO__RELOC;
8866 }
7e06aad5
AN
8867 if (sym.st_value % BPF_INSN_SZ) {
8868 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
17e54b09 8869 map->name, (unsigned long long)sym.st_value);
7e06aad5
AN
8870 return -LIBBPF_ERRNO__FORMAT;
8871 }
8872 insn_idx = sym.st_value / BPF_INSN_SZ;
590a0088
MKL
8873
8874 member = find_member_by_offset(st_ops->type, moff * 8);
8875 if (!member) {
8876 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
8877 map->name, moff);
8878 return -EINVAL;
8879 }
8880 member_idx = member - btf_members(st_ops->type);
8881 name = btf__name_by_offset(btf, member->name_off);
8882
8883 if (!resolve_func_ptr(btf, member->type, NULL)) {
8884 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
8885 map->name, name);
8886 return -EINVAL;
8887 }
8888
7e06aad5 8889 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
590a0088
MKL
8890 if (!prog) {
8891 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
8892 map->name, shdr_idx, name);
8893 return -EINVAL;
8894 }
8895
8896 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
8897 const struct bpf_sec_def *sec_def;
8898
52109584 8899 sec_def = find_sec_def(prog->sec_name);
590a0088
MKL
8900 if (sec_def &&
8901 sec_def->prog_type != BPF_PROG_TYPE_STRUCT_OPS) {
8902 /* for pr_warn */
8903 prog->type = sec_def->prog_type;
8904 goto invalid_prog;
8905 }
8906
8907 prog->type = BPF_PROG_TYPE_STRUCT_OPS;
8908 prog->attach_btf_id = st_ops->type_id;
8909 prog->expected_attach_type = member_idx;
8910 } else if (prog->type != BPF_PROG_TYPE_STRUCT_OPS ||
8911 prog->attach_btf_id != st_ops->type_id ||
8912 prog->expected_attach_type != member_idx) {
8913 goto invalid_prog;
8914 }
8915 st_ops->progs[member_idx] = prog;
8916 }
8917
8918 return 0;
8919
8920invalid_prog:
8921 pr_warn("struct_ops reloc %s: cannot use prog %s in sec %s with type %u attach_btf_id %u expected_attach_type %u for func ptr %s\n",
52109584 8922 map->name, prog->name, prog->sec_name, prog->type,
590a0088
MKL
8923 prog->attach_btf_id, prog->expected_attach_type, name);
8924 return -EINVAL;
8925}
8926
a6ed02ca 8927#define BTF_TRACE_PREFIX "btf_trace_"
1e092a03 8928#define BTF_LSM_PREFIX "bpf_lsm_"
21aef70e 8929#define BTF_ITER_PREFIX "bpf_iter_"
a6ed02ca
KS
8930#define BTF_MAX_NAME_SIZE 128
8931
8932static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
8933 const char *name, __u32 kind)
8934{
8935 char btf_type_name[BTF_MAX_NAME_SIZE];
8936 int ret;
8937
8938 ret = snprintf(btf_type_name, sizeof(btf_type_name),
8939 "%s%s", prefix, name);
8940 /* snprintf returns the number of characters written excluding the
8941 * the terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
8942 * indicates truncation.
8943 */
8944 if (ret < 0 || ret >= sizeof(btf_type_name))
8945 return -ENAMETOOLONG;
8946 return btf__find_by_name_kind(btf, btf_type_name, kind);
8947}
8948
91abb4a6
AN
8949static inline int find_attach_btf_id(struct btf *btf, const char *name,
8950 enum bpf_attach_type attach_type)
a6ed02ca
KS
8951{
8952 int err;
8953
8954 if (attach_type == BPF_TRACE_RAW_TP)
8955 err = find_btf_by_prefix_kind(btf, BTF_TRACE_PREFIX, name,
8956 BTF_KIND_TYPEDEF);
1e092a03
KS
8957 else if (attach_type == BPF_LSM_MAC)
8958 err = find_btf_by_prefix_kind(btf, BTF_LSM_PREFIX, name,
8959 BTF_KIND_FUNC);
c09add2f
YS
8960 else if (attach_type == BPF_TRACE_ITER)
8961 err = find_btf_by_prefix_kind(btf, BTF_ITER_PREFIX, name,
8962 BTF_KIND_FUNC);
a6ed02ca
KS
8963 else
8964 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
8965
8966 return err;
8967}
8968
b8c54ea4
AS
8969int libbpf_find_vmlinux_btf_id(const char *name,
8970 enum bpf_attach_type attach_type)
12a8654b 8971{
a6ed02ca 8972 struct btf *btf;
3521ffa2 8973 int err;
12a8654b 8974
a6ed02ca 8975 btf = libbpf_find_kernel_btf();
12a8654b
AS
8976 if (IS_ERR(btf)) {
8977 pr_warn("vmlinux BTF is not found\n");
8978 return -EINVAL;
8979 }
8980
91abb4a6
AN
8981 err = find_attach_btf_id(btf, name, attach_type);
8982 if (err <= 0)
8983 pr_warn("%s is not found in vmlinux BTF\n", name);
8984
3521ffa2
AN
8985 btf__free(btf);
8986 return err;
b8c54ea4
AS
8987}
8988
e7bf94db
AS
8989static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
8990{
8991 struct bpf_prog_info_linear *info_linear;
8992 struct bpf_prog_info *info;
8993 struct btf *btf = NULL;
8994 int err = -EINVAL;
8995
8996 info_linear = bpf_program__get_prog_info_linear(attach_prog_fd, 0);
8997 if (IS_ERR_OR_NULL(info_linear)) {
8998 pr_warn("failed get_prog_info_linear for FD %d\n",
8999 attach_prog_fd);
9000 return -EINVAL;
9001 }
9002 info = &info_linear->info;
9003 if (!info->btf_id) {
9004 pr_warn("The target program doesn't have BTF\n");
9005 goto out;
9006 }
9007 if (btf__get_from_id(info->btf_id, &btf)) {
9008 pr_warn("Failed to get BTF of the program\n");
9009 goto out;
9010 }
9011 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9012 btf__free(btf);
9013 if (err <= 0) {
9014 pr_warn("%s is not found in prog's BTF\n", name);
9015 goto out;
9016 }
9017out:
9018 free(info_linear);
9019 return err;
9020}
9021
91abb4a6
AN
9022static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9023 enum bpf_attach_type attach_type,
9024 int *btf_obj_fd, int *btf_type_id)
9025{
9026 int ret, i;
9027
9028 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9029 if (ret > 0) {
9030 *btf_obj_fd = 0; /* vmlinux BTF */
9031 *btf_type_id = ret;
9032 return 0;
9033 }
9034 if (ret != -ENOENT)
9035 return ret;
9036
9037 ret = load_module_btfs(obj);
9038 if (ret)
9039 return ret;
9040
9041 for (i = 0; i < obj->btf_module_cnt; i++) {
9042 const struct module_btf *mod = &obj->btf_modules[i];
9043
9044 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9045 if (ret > 0) {
9046 *btf_obj_fd = mod->fd;
9047 *btf_type_id = ret;
9048 return 0;
9049 }
9050 if (ret == -ENOENT)
9051 continue;
9052
9053 return ret;
9054 }
9055
9056 return -ESRCH;
9057}
9058
9059static int libbpf_find_attach_btf_id(struct bpf_program *prog, int *btf_obj_fd, int *btf_type_id)
b8c54ea4 9060{
a6ed02ca
KS
9061 enum bpf_attach_type attach_type = prog->expected_attach_type;
9062 __u32 attach_prog_fd = prog->attach_prog_fd;
91abb4a6
AN
9063 const char *name = prog->sec_name, *attach_name;
9064 const struct bpf_sec_def *sec = NULL;
b8c54ea4
AS
9065 int i, err;
9066
12a8654b 9067 if (!name)
b8c54ea4 9068 return -EINVAL;
12a8654b 9069
d7a18ea7
AN
9070 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9071 if (!section_defs[i].is_attach_btf)
12a8654b 9072 continue;
d7a18ea7 9073 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
12a8654b 9074 continue;
91abb4a6
AN
9075
9076 sec = &section_defs[i];
9077 break;
9078 }
9079
9080 if (!sec) {
9081 pr_warn("failed to identify BTF ID based on ELF section name '%s'\n", name);
9082 return -ESRCH;
9083 }
9084 attach_name = name + sec->len;
9085
9086 /* BPF program's BTF ID */
9087 if (attach_prog_fd) {
9088 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9089 if (err < 0) {
9090 pr_warn("failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9091 attach_prog_fd, attach_name, err);
9092 return err;
9093 }
9094 *btf_obj_fd = 0;
9095 *btf_type_id = err;
9096 return 0;
9097 }
9098
9099 /* kernel/module BTF ID */
9100 err = find_kernel_btf_id(prog->obj, attach_name, attach_type, btf_obj_fd, btf_type_id);
9101 if (err) {
9102 pr_warn("failed to find kernel BTF type ID of '%s': %d\n", attach_name, err);
b8c54ea4 9103 return err;
12a8654b 9104 }
91abb4a6 9105 return 0;
12a8654b
AS
9106}
9107
956b620f
AI
9108int libbpf_attach_type_by_name(const char *name,
9109 enum bpf_attach_type *attach_type)
9110{
c76e4c22 9111 char *type_names;
956b620f
AI
9112 int i;
9113
9114 if (!name)
9115 return -EINVAL;
9116
d7a18ea7
AN
9117 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9118 if (strncmp(name, section_defs[i].sec, section_defs[i].len))
956b620f 9119 continue;
d7a18ea7 9120 if (!section_defs[i].is_attachable)
956b620f 9121 return -EINVAL;
25498a19 9122 *attach_type = section_defs[i].expected_attach_type;
956b620f
AI
9123 return 0;
9124 }
4a3d6c6a 9125 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
c76e4c22
TS
9126 type_names = libbpf_get_type_names(true);
9127 if (type_names != NULL) {
4a3d6c6a 9128 pr_debug("attachable section(type) names are:%s\n", type_names);
c76e4c22
TS
9129 free(type_names);
9130 }
9131
956b620f
AI
9132 return -EINVAL;
9133}
9134
a324aae3 9135int bpf_map__fd(const struct bpf_map *map)
9d759a9b 9136{
6e009e65 9137 return map ? map->fd : -EINVAL;
9d759a9b
WN
9138}
9139
a324aae3 9140const struct bpf_map_def *bpf_map__def(const struct bpf_map *map)
9d759a9b 9141{
53897a78 9142 return map ? &map->def : ERR_PTR(-EINVAL);
9d759a9b
WN
9143}
9144
a324aae3 9145const char *bpf_map__name(const struct bpf_map *map)
561bbcca 9146{
009ad5d5 9147 return map ? map->name : NULL;
561bbcca
WN
9148}
9149
1bdb6c9a
AN
9150enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9151{
9152 return map->def.type;
9153}
9154
9155int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9156{
9157 if (map->fd >= 0)
9158 return -EBUSY;
9159 map->def.type = type;
9160 return 0;
9161}
9162
9163__u32 bpf_map__map_flags(const struct bpf_map *map)
9164{
9165 return map->def.map_flags;
9166}
9167
9168int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9169{
9170 if (map->fd >= 0)
9171 return -EBUSY;
9172 map->def.map_flags = flags;
9173 return 0;
9174}
9175
9176__u32 bpf_map__numa_node(const struct bpf_map *map)
9177{
9178 return map->numa_node;
9179}
9180
9181int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9182{
9183 if (map->fd >= 0)
9184 return -EBUSY;
9185 map->numa_node = numa_node;
9186 return 0;
9187}
9188
9189__u32 bpf_map__key_size(const struct bpf_map *map)
9190{
9191 return map->def.key_size;
9192}
9193
9194int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9195{
9196 if (map->fd >= 0)
9197 return -EBUSY;
9198 map->def.key_size = size;
9199 return 0;
9200}
9201
9202__u32 bpf_map__value_size(const struct bpf_map *map)
9203{
9204 return map->def.value_size;
9205}
9206
9207int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
9208{
9209 if (map->fd >= 0)
9210 return -EBUSY;
9211 map->def.value_size = size;
9212 return 0;
9213}
9214
5b891af7 9215__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
8a138aed 9216{
61746dbe 9217 return map ? map->btf_key_type_id : 0;
8a138aed
MKL
9218}
9219
5b891af7 9220__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
8a138aed 9221{
61746dbe 9222 return map ? map->btf_value_type_id : 0;
8a138aed
MKL
9223}
9224
edb13ed4
ACM
9225int bpf_map__set_priv(struct bpf_map *map, void *priv,
9226 bpf_map_clear_priv_t clear_priv)
9d759a9b
WN
9227{
9228 if (!map)
9229 return -EINVAL;
9230
9231 if (map->priv) {
9232 if (map->clear_priv)
9233 map->clear_priv(map, map->priv);
9234 }
9235
9236 map->priv = priv;
9237 map->clear_priv = clear_priv;
9238 return 0;
9239}
9240
a324aae3 9241void *bpf_map__priv(const struct bpf_map *map)
9d759a9b 9242{
b4cbfa56 9243 return map ? map->priv : ERR_PTR(-EINVAL);
9d759a9b
WN
9244}
9245
e2842be5
THJ
9246int bpf_map__set_initial_value(struct bpf_map *map,
9247 const void *data, size_t size)
9248{
9249 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
9250 size != map->def.value_size || map->fd >= 0)
9251 return -EINVAL;
9252
9253 memcpy(map->mmaped, data, size);
9254 return 0;
9255}
9256
a324aae3 9257bool bpf_map__is_offload_neutral(const struct bpf_map *map)
f83fb22c
JK
9258{
9259 return map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
9260}
9261
a324aae3 9262bool bpf_map__is_internal(const struct bpf_map *map)
d859900c
DB
9263{
9264 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
9265}
9266
1bdb6c9a
AN
9267__u32 bpf_map__ifindex(const struct bpf_map *map)
9268{
9269 return map->map_ifindex;
9270}
9271
9272int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9aba3613 9273{
1bdb6c9a
AN
9274 if (map->fd >= 0)
9275 return -EBUSY;
9aba3613 9276 map->map_ifindex = ifindex;
1bdb6c9a 9277 return 0;
9aba3613
JK
9278}
9279
addb9fc9
NS
9280int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
9281{
9282 if (!bpf_map_type__is_map_in_map(map->def.type)) {
be18010e 9283 pr_warn("error: unsupported map type\n");
addb9fc9
NS
9284 return -EINVAL;
9285 }
9286 if (map->inner_map_fd != -1) {
be18010e 9287 pr_warn("error: inner_map_fd already specified\n");
addb9fc9
NS
9288 return -EINVAL;
9289 }
9290 map->inner_map_fd = fd;
9291 return 0;
9292}
9293
0c19a9fb 9294static struct bpf_map *
a324aae3 9295__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9d759a9b 9296{
0c19a9fb 9297 ssize_t idx;
9d759a9b
WN
9298 struct bpf_map *s, *e;
9299
9300 if (!obj || !obj->maps)
9301 return NULL;
9302
9303 s = obj->maps;
9304 e = obj->maps + obj->nr_maps;
9305
0c19a9fb 9306 if ((m < s) || (m >= e)) {
be18010e
KW
9307 pr_warn("error in %s: map handler doesn't belong to object\n",
9308 __func__);
9d759a9b
WN
9309 return NULL;
9310 }
9311
0c19a9fb
SF
9312 idx = (m - obj->maps) + i;
9313 if (idx >= obj->nr_maps || idx < 0)
9d759a9b
WN
9314 return NULL;
9315 return &obj->maps[idx];
9316}
561bbcca 9317
0c19a9fb 9318struct bpf_map *
a324aae3 9319bpf_map__next(const struct bpf_map *prev, const struct bpf_object *obj)
0c19a9fb
SF
9320{
9321 if (prev == NULL)
9322 return obj->maps;
9323
9324 return __bpf_map__iter(prev, obj, 1);
9325}
9326
9327struct bpf_map *
a324aae3 9328bpf_map__prev(const struct bpf_map *next, const struct bpf_object *obj)
0c19a9fb
SF
9329{
9330 if (next == NULL) {
9331 if (!obj->nr_maps)
9332 return NULL;
9333 return obj->maps + obj->nr_maps - 1;
9334 }
9335
9336 return __bpf_map__iter(next, obj, -1);
9337}
9338
561bbcca 9339struct bpf_map *
a324aae3 9340bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
561bbcca
WN
9341{
9342 struct bpf_map *pos;
9343
f74a53d9 9344 bpf_object__for_each_map(pos, obj) {
973170e6 9345 if (pos->name && !strcmp(pos->name, name))
561bbcca
WN
9346 return pos;
9347 }
9348 return NULL;
9349}
5a6acad1 9350
f3cea32d 9351int
a324aae3 9352bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
f3cea32d
MF
9353{
9354 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
9355}
9356
5a6acad1
WN
9357struct bpf_map *
9358bpf_object__find_map_by_offset(struct bpf_object *obj, size_t offset)
9359{
db48814b 9360 return ERR_PTR(-ENOTSUP);
5a6acad1 9361}
e28ff1a8
JS
9362
9363long libbpf_get_error(const void *ptr)
9364{
d98363b5 9365 return PTR_ERR_OR_ZERO(ptr);
e28ff1a8 9366}
6f6d33f3
JF
9367
9368int bpf_prog_load(const char *file, enum bpf_prog_type type,
9369 struct bpf_object **pobj, int *prog_fd)
d7be143b
AI
9370{
9371 struct bpf_prog_load_attr attr;
9372
9373 memset(&attr, 0, sizeof(struct bpf_prog_load_attr));
9374 attr.file = file;
9375 attr.prog_type = type;
9376 attr.expected_attach_type = 0;
9377
9378 return bpf_prog_load_xattr(&attr, pobj, prog_fd);
9379}
9380
9381int bpf_prog_load_xattr(const struct bpf_prog_load_attr *attr,
9382 struct bpf_object **pobj, int *prog_fd)
6f6d33f3 9383{
33bae185 9384 struct bpf_object_open_attr open_attr = {};
48cca7e4 9385 struct bpf_program *prog, *first_prog = NULL;
6f6d33f3 9386 struct bpf_object *obj;
f0307a7e 9387 struct bpf_map *map;
6f6d33f3
JF
9388 int err;
9389
d7be143b
AI
9390 if (!attr)
9391 return -EINVAL;
17387dd5
JK
9392 if (!attr->file)
9393 return -EINVAL;
d7be143b 9394
33bae185
LY
9395 open_attr.file = attr->file;
9396 open_attr.prog_type = attr->prog_type;
9397
07f2d4ea 9398 obj = bpf_object__open_xattr(&open_attr);
3597683c 9399 if (IS_ERR_OR_NULL(obj))
6f6d33f3
JF
9400 return -ENOENT;
9401
48cca7e4 9402 bpf_object__for_each_program(prog, obj) {
dd4436bb 9403 enum bpf_attach_type attach_type = attr->expected_attach_type;
48cca7e4 9404 /*
dd4436bb
AN
9405 * to preserve backwards compatibility, bpf_prog_load treats
9406 * attr->prog_type, if specified, as an override to whatever
9407 * bpf_object__open guessed
48cca7e4 9408 */
dd4436bb
AN
9409 if (attr->prog_type != BPF_PROG_TYPE_UNSPEC) {
9410 bpf_program__set_type(prog, attr->prog_type);
9411 bpf_program__set_expected_attach_type(prog,
9412 attach_type);
9413 }
9414 if (bpf_program__get_type(prog) == BPF_PROG_TYPE_UNSPEC) {
9415 /*
9416 * we haven't guessed from section name and user
9417 * didn't provide a fallback type, too bad...
9418 */
9419 bpf_object__close(obj);
9420 return -EINVAL;
583c9009 9421 }
48cca7e4 9422
dd4436bb 9423 prog->prog_ifindex = attr->ifindex;
da11b417 9424 prog->log_level = attr->log_level;
2b288740 9425 prog->prog_flags |= attr->prog_flags;
69495d2a 9426 if (!first_prog)
48cca7e4
AS
9427 first_prog = prog;
9428 }
9429
f74a53d9 9430 bpf_object__for_each_map(map, obj) {
f83fb22c
JK
9431 if (!bpf_map__is_offload_neutral(map))
9432 map->map_ifindex = attr->ifindex;
f0307a7e
DB
9433 }
9434
48cca7e4 9435 if (!first_prog) {
be18010e 9436 pr_warn("object file doesn't contain bpf program\n");
48cca7e4
AS
9437 bpf_object__close(obj);
9438 return -ENOENT;
583c9009
RG
9439 }
9440
6f6d33f3
JF
9441 err = bpf_object__load(obj);
9442 if (err) {
9443 bpf_object__close(obj);
e411eb25 9444 return err;
6f6d33f3
JF
9445 }
9446
9447 *pobj = obj;
48cca7e4 9448 *prog_fd = bpf_program__fd(first_prog);
6f6d33f3
JF
9449 return 0;
9450}
d0cabbb0 9451
1c2e9efc 9452struct bpf_link {
d6958706 9453 int (*detach)(struct bpf_link *link);
1c2e9efc 9454 int (*destroy)(struct bpf_link *link);
c016b68e
AN
9455 char *pin_path; /* NULL, if not pinned */
9456 int fd; /* hook FD, -1 if not applicable */
d6958706 9457 bool disconnected;
1c2e9efc
AN
9458};
9459
cc4f864b
AN
9460/* Replace link's underlying BPF program with the new one */
9461int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
9462{
9463 return bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
9464}
9465
d6958706
AN
9466/* Release "ownership" of underlying BPF resource (typically, BPF program
9467 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
9468 * link, when destructed through bpf_link__destroy() call won't attempt to
9469 * detach/unregisted that BPF resource. This is useful in situations where,
9470 * say, attached BPF program has to outlive userspace program that attached it
9471 * in the system. Depending on type of BPF program, though, there might be
9472 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
9473 * exit of userspace program doesn't trigger automatic detachment and clean up
9474 * inside the kernel.
9475 */
9476void bpf_link__disconnect(struct bpf_link *link)
9477{
9478 link->disconnected = true;
9479}
9480
1c2e9efc
AN
9481int bpf_link__destroy(struct bpf_link *link)
9482{
d6958706 9483 int err = 0;
1c2e9efc 9484
50450fc7 9485 if (IS_ERR_OR_NULL(link))
1c2e9efc
AN
9486 return 0;
9487
d6958706
AN
9488 if (!link->disconnected && link->detach)
9489 err = link->detach(link);
9490 if (link->destroy)
9491 link->destroy(link);
c016b68e
AN
9492 if (link->pin_path)
9493 free(link->pin_path);
1c2e9efc
AN
9494 free(link);
9495
9496 return err;
9497}
9498
c016b68e
AN
9499int bpf_link__fd(const struct bpf_link *link)
9500{
9501 return link->fd;
9502}
9503
9504const char *bpf_link__pin_path(const struct bpf_link *link)
9505{
9506 return link->pin_path;
9507}
9508
9509static int bpf_link__detach_fd(struct bpf_link *link)
9510{
9511 return close(link->fd);
9512}
9513
9514struct bpf_link *bpf_link__open(const char *path)
9515{
9516 struct bpf_link *link;
9517 int fd;
9518
9519 fd = bpf_obj_get(path);
9520 if (fd < 0) {
9521 fd = -errno;
9522 pr_warn("failed to open link at %s: %d\n", path, fd);
9523 return ERR_PTR(fd);
9524 }
9525
9526 link = calloc(1, sizeof(*link));
9527 if (!link) {
9528 close(fd);
9529 return ERR_PTR(-ENOMEM);
9530 }
9531 link->detach = &bpf_link__detach_fd;
9532 link->fd = fd;
9533
9534 link->pin_path = strdup(path);
9535 if (!link->pin_path) {
9536 bpf_link__destroy(link);
9537 return ERR_PTR(-ENOMEM);
9538 }
9539
9540 return link;
9541}
9542
2e49527e
AN
9543int bpf_link__detach(struct bpf_link *link)
9544{
9545 return bpf_link_detach(link->fd) ? -errno : 0;
9546}
9547
c016b68e
AN
9548int bpf_link__pin(struct bpf_link *link, const char *path)
9549{
9550 int err;
9551
9552 if (link->pin_path)
9553 return -EBUSY;
9554 err = make_parent_dir(path);
9555 if (err)
9556 return err;
9557 err = check_path(path);
9558 if (err)
9559 return err;
9560
9561 link->pin_path = strdup(path);
9562 if (!link->pin_path)
9563 return -ENOMEM;
9564
9565 if (bpf_obj_pin(link->fd, link->pin_path)) {
9566 err = -errno;
9567 zfree(&link->pin_path);
9568 return err;
9569 }
9570
9571 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
9572 return 0;
9573}
9574
9575int bpf_link__unpin(struct bpf_link *link)
9576{
9577 int err;
9578
9579 if (!link->pin_path)
9580 return -EINVAL;
9581
9582 err = unlink(link->pin_path);
9583 if (err != 0)
9584 return -errno;
9585
9586 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
9587 zfree(&link->pin_path);
9588 return 0;
9589}
63f2f5ee 9590
d6958706 9591static int bpf_link__detach_perf_event(struct bpf_link *link)
63f2f5ee 9592{
63f2f5ee
AN
9593 int err;
9594
c016b68e 9595 err = ioctl(link->fd, PERF_EVENT_IOC_DISABLE, 0);
63f2f5ee
AN
9596 if (err)
9597 err = -errno;
9598
c016b68e 9599 close(link->fd);
63f2f5ee
AN
9600 return err;
9601}
9602
9603struct bpf_link *bpf_program__attach_perf_event(struct bpf_program *prog,
9604 int pfd)
9605{
9606 char errmsg[STRERR_BUFSIZE];
c016b68e 9607 struct bpf_link *link;
63f2f5ee
AN
9608 int prog_fd, err;
9609
9610 if (pfd < 0) {
52109584
AN
9611 pr_warn("prog '%s': invalid perf event FD %d\n",
9612 prog->name, pfd);
63f2f5ee
AN
9613 return ERR_PTR(-EINVAL);
9614 }
9615 prog_fd = bpf_program__fd(prog);
9616 if (prog_fd < 0) {
52109584
AN
9617 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
9618 prog->name);
63f2f5ee
AN
9619 return ERR_PTR(-EINVAL);
9620 }
9621
d6958706 9622 link = calloc(1, sizeof(*link));
63f2f5ee
AN
9623 if (!link)
9624 return ERR_PTR(-ENOMEM);
c016b68e 9625 link->detach = &bpf_link__detach_perf_event;
63f2f5ee
AN
9626 link->fd = pfd;
9627
9628 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
9629 err = -errno;
9630 free(link);
52109584
AN
9631 pr_warn("prog '%s': failed to attach to pfd %d: %s\n",
9632 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
d4b4dd6c 9633 if (err == -EPROTO)
52109584
AN
9634 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
9635 prog->name, pfd);
63f2f5ee
AN
9636 return ERR_PTR(err);
9637 }
9638 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
9639 err = -errno;
9640 free(link);
52109584
AN
9641 pr_warn("prog '%s': failed to enable pfd %d: %s\n",
9642 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
63f2f5ee
AN
9643 return ERR_PTR(err);
9644 }
c016b68e 9645 return link;
63f2f5ee
AN
9646}
9647
b2650027
AN
9648/*
9649 * this function is expected to parse integer in the range of [0, 2^31-1] from
9650 * given file using scanf format string fmt. If actual parsed value is
9651 * negative, the result might be indistinguishable from error
9652 */
9653static int parse_uint_from_file(const char *file, const char *fmt)
9654{
9655 char buf[STRERR_BUFSIZE];
9656 int err, ret;
9657 FILE *f;
9658
9659 f = fopen(file, "r");
9660 if (!f) {
9661 err = -errno;
9662 pr_debug("failed to open '%s': %s\n", file,
9663 libbpf_strerror_r(err, buf, sizeof(buf)));
9664 return err;
9665 }
9666 err = fscanf(f, fmt, &ret);
9667 if (err != 1) {
9668 err = err == EOF ? -EIO : -errno;
9669 pr_debug("failed to parse '%s': %s\n", file,
9670 libbpf_strerror_r(err, buf, sizeof(buf)));
9671 fclose(f);
9672 return err;
9673 }
9674 fclose(f);
9675 return ret;
9676}
9677
9678static int determine_kprobe_perf_type(void)
9679{
9680 const char *file = "/sys/bus/event_source/devices/kprobe/type";
9681
9682 return parse_uint_from_file(file, "%d\n");
9683}
9684
9685static int determine_uprobe_perf_type(void)
9686{
9687 const char *file = "/sys/bus/event_source/devices/uprobe/type";
9688
9689 return parse_uint_from_file(file, "%d\n");
9690}
9691
9692static int determine_kprobe_retprobe_bit(void)
9693{
9694 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
9695
9696 return parse_uint_from_file(file, "config:%d\n");
9697}
9698
9699static int determine_uprobe_retprobe_bit(void)
9700{
9701 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
9702
9703 return parse_uint_from_file(file, "config:%d\n");
9704}
9705
9706static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
9707 uint64_t offset, int pid)
9708{
9709 struct perf_event_attr attr = {};
9710 char errmsg[STRERR_BUFSIZE];
9711 int type, pfd, err;
9712
9713 type = uprobe ? determine_uprobe_perf_type()
9714 : determine_kprobe_perf_type();
9715 if (type < 0) {
be18010e
KW
9716 pr_warn("failed to determine %s perf type: %s\n",
9717 uprobe ? "uprobe" : "kprobe",
9718 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
b2650027
AN
9719 return type;
9720 }
9721 if (retprobe) {
9722 int bit = uprobe ? determine_uprobe_retprobe_bit()
9723 : determine_kprobe_retprobe_bit();
9724
9725 if (bit < 0) {
be18010e
KW
9726 pr_warn("failed to determine %s retprobe bit: %s\n",
9727 uprobe ? "uprobe" : "kprobe",
9728 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
b2650027
AN
9729 return bit;
9730 }
9731 attr.config |= 1 << bit;
9732 }
9733 attr.size = sizeof(attr);
9734 attr.type = type;
36db2a94
AN
9735 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
9736 attr.config2 = offset; /* kprobe_addr or probe_offset */
b2650027
AN
9737
9738 /* pid filter is meaningful only for uprobes */
9739 pfd = syscall(__NR_perf_event_open, &attr,
9740 pid < 0 ? -1 : pid /* pid */,
9741 pid == -1 ? 0 : -1 /* cpu */,
9742 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9743 if (pfd < 0) {
9744 err = -errno;
be18010e
KW
9745 pr_warn("%s perf_event_open() failed: %s\n",
9746 uprobe ? "uprobe" : "kprobe",
9747 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
b2650027
AN
9748 return err;
9749 }
9750 return pfd;
9751}
9752
9753struct bpf_link *bpf_program__attach_kprobe(struct bpf_program *prog,
9754 bool retprobe,
9755 const char *func_name)
9756{
9757 char errmsg[STRERR_BUFSIZE];
9758 struct bpf_link *link;
9759 int pfd, err;
9760
9761 pfd = perf_event_open_probe(false /* uprobe */, retprobe, func_name,
9762 0 /* offset */, -1 /* pid */);
9763 if (pfd < 0) {
52109584
AN
9764 pr_warn("prog '%s': failed to create %s '%s' perf event: %s\n",
9765 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
be18010e 9766 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
b2650027
AN
9767 return ERR_PTR(pfd);
9768 }
9769 link = bpf_program__attach_perf_event(prog, pfd);
9770 if (IS_ERR(link)) {
9771 close(pfd);
9772 err = PTR_ERR(link);
52109584
AN
9773 pr_warn("prog '%s': failed to attach to %s '%s': %s\n",
9774 prog->name, retprobe ? "kretprobe" : "kprobe", func_name,
be18010e 9775 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
b2650027
AN
9776 return link;
9777 }
9778 return link;
9779}
9780
d7a18ea7
AN
9781static struct bpf_link *attach_kprobe(const struct bpf_sec_def *sec,
9782 struct bpf_program *prog)
9783{
9784 const char *func_name;
9785 bool retprobe;
9786
52109584 9787 func_name = prog->sec_name + sec->len;
d7a18ea7
AN
9788 retprobe = strcmp(sec->sec, "kretprobe/") == 0;
9789
9790 return bpf_program__attach_kprobe(prog, retprobe, func_name);
9791}
9792
b2650027
AN
9793struct bpf_link *bpf_program__attach_uprobe(struct bpf_program *prog,
9794 bool retprobe, pid_t pid,
9795 const char *binary_path,
9796 size_t func_offset)
9797{
9798 char errmsg[STRERR_BUFSIZE];
9799 struct bpf_link *link;
9800 int pfd, err;
9801
9802 pfd = perf_event_open_probe(true /* uprobe */, retprobe,
9803 binary_path, func_offset, pid);
9804 if (pfd < 0) {
52109584
AN
9805 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
9806 prog->name, retprobe ? "uretprobe" : "uprobe",
be18010e
KW
9807 binary_path, func_offset,
9808 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
b2650027
AN
9809 return ERR_PTR(pfd);
9810 }
9811 link = bpf_program__attach_perf_event(prog, pfd);
9812 if (IS_ERR(link)) {
9813 close(pfd);
9814 err = PTR_ERR(link);
52109584
AN
9815 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
9816 prog->name, retprobe ? "uretprobe" : "uprobe",
be18010e
KW
9817 binary_path, func_offset,
9818 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
b2650027
AN
9819 return link;
9820 }
9821 return link;
9822}
9823
f6de59c1
AN
9824static int determine_tracepoint_id(const char *tp_category,
9825 const char *tp_name)
9826{
9827 char file[PATH_MAX];
9828 int ret;
9829
9830 ret = snprintf(file, sizeof(file),
9831 "/sys/kernel/debug/tracing/events/%s/%s/id",
9832 tp_category, tp_name);
9833 if (ret < 0)
9834 return -errno;
9835 if (ret >= sizeof(file)) {
9836 pr_debug("tracepoint %s/%s path is too long\n",
9837 tp_category, tp_name);
9838 return -E2BIG;
9839 }
9840 return parse_uint_from_file(file, "%d\n");
9841}
9842
9843static int perf_event_open_tracepoint(const char *tp_category,
9844 const char *tp_name)
9845{
9846 struct perf_event_attr attr = {};
9847 char errmsg[STRERR_BUFSIZE];
9848 int tp_id, pfd, err;
9849
9850 tp_id = determine_tracepoint_id(tp_category, tp_name);
9851 if (tp_id < 0) {
be18010e
KW
9852 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
9853 tp_category, tp_name,
9854 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
f6de59c1
AN
9855 return tp_id;
9856 }
9857
9858 attr.type = PERF_TYPE_TRACEPOINT;
9859 attr.size = sizeof(attr);
9860 attr.config = tp_id;
9861
9862 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
9863 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
9864 if (pfd < 0) {
9865 err = -errno;
be18010e
KW
9866 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
9867 tp_category, tp_name,
9868 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
f6de59c1
AN
9869 return err;
9870 }
9871 return pfd;
9872}
9873
9874struct bpf_link *bpf_program__attach_tracepoint(struct bpf_program *prog,
9875 const char *tp_category,
9876 const char *tp_name)
9877{
9878 char errmsg[STRERR_BUFSIZE];
9879 struct bpf_link *link;
9880 int pfd, err;
9881
9882 pfd = perf_event_open_tracepoint(tp_category, tp_name);
9883 if (pfd < 0) {
52109584
AN
9884 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
9885 prog->name, tp_category, tp_name,
be18010e 9886 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
f6de59c1
AN
9887 return ERR_PTR(pfd);
9888 }
9889 link = bpf_program__attach_perf_event(prog, pfd);
9890 if (IS_ERR(link)) {
9891 close(pfd);
9892 err = PTR_ERR(link);
52109584
AN
9893 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
9894 prog->name, tp_category, tp_name,
be18010e 9895 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
f6de59c1
AN
9896 return link;
9897 }
9898 return link;
9899}
9900
d7a18ea7
AN
9901static struct bpf_link *attach_tp(const struct bpf_sec_def *sec,
9902 struct bpf_program *prog)
9903{
9904 char *sec_name, *tp_cat, *tp_name;
9905 struct bpf_link *link;
9906
52109584 9907 sec_name = strdup(prog->sec_name);
d7a18ea7
AN
9908 if (!sec_name)
9909 return ERR_PTR(-ENOMEM);
9910
9911 /* extract "tp/<category>/<name>" */
9912 tp_cat = sec_name + sec->len;
9913 tp_name = strchr(tp_cat, '/');
9914 if (!tp_name) {
9915 link = ERR_PTR(-EINVAL);
9916 goto out;
9917 }
9918 *tp_name = '\0';
9919 tp_name++;
9920
9921 link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
9922out:
9923 free(sec_name);
9924 return link;
9925}
9926
84bf5e1f
AN
9927struct bpf_link *bpf_program__attach_raw_tracepoint(struct bpf_program *prog,
9928 const char *tp_name)
9929{
9930 char errmsg[STRERR_BUFSIZE];
c016b68e 9931 struct bpf_link *link;
84bf5e1f
AN
9932 int prog_fd, pfd;
9933
9934 prog_fd = bpf_program__fd(prog);
9935 if (prog_fd < 0) {
52109584 9936 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
84bf5e1f
AN
9937 return ERR_PTR(-EINVAL);
9938 }
9939
d6958706 9940 link = calloc(1, sizeof(*link));
84bf5e1f
AN
9941 if (!link)
9942 return ERR_PTR(-ENOMEM);
c016b68e 9943 link->detach = &bpf_link__detach_fd;
84bf5e1f
AN
9944
9945 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
9946 if (pfd < 0) {
9947 pfd = -errno;
9948 free(link);
52109584
AN
9949 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
9950 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
84bf5e1f
AN
9951 return ERR_PTR(pfd);
9952 }
9953 link->fd = pfd;
c016b68e 9954 return link;
84bf5e1f
AN
9955}
9956
d7a18ea7
AN
9957static struct bpf_link *attach_raw_tp(const struct bpf_sec_def *sec,
9958 struct bpf_program *prog)
9959{
52109584 9960 const char *tp_name = prog->sec_name + sec->len;
d7a18ea7
AN
9961
9962 return bpf_program__attach_raw_tracepoint(prog, tp_name);
9963}
9964
1e092a03
KS
9965/* Common logic for all BPF program types that attach to a btf_id */
9966static struct bpf_link *bpf_program__attach_btf_id(struct bpf_program *prog)
b8c54ea4
AS
9967{
9968 char errmsg[STRERR_BUFSIZE];
c016b68e 9969 struct bpf_link *link;
b8c54ea4
AS
9970 int prog_fd, pfd;
9971
9972 prog_fd = bpf_program__fd(prog);
9973 if (prog_fd < 0) {
52109584 9974 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
b8c54ea4
AS
9975 return ERR_PTR(-EINVAL);
9976 }
9977
d6958706 9978 link = calloc(1, sizeof(*link));
b8c54ea4
AS
9979 if (!link)
9980 return ERR_PTR(-ENOMEM);
c016b68e 9981 link->detach = &bpf_link__detach_fd;
b8c54ea4
AS
9982
9983 pfd = bpf_raw_tracepoint_open(NULL, prog_fd);
9984 if (pfd < 0) {
9985 pfd = -errno;
9986 free(link);
52109584
AN
9987 pr_warn("prog '%s': failed to attach: %s\n",
9988 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
b8c54ea4
AS
9989 return ERR_PTR(pfd);
9990 }
9991 link->fd = pfd;
9992 return (struct bpf_link *)link;
9993}
9994
1e092a03
KS
9995struct bpf_link *bpf_program__attach_trace(struct bpf_program *prog)
9996{
9997 return bpf_program__attach_btf_id(prog);
9998}
9999
10000struct bpf_link *bpf_program__attach_lsm(struct bpf_program *prog)
10001{
10002 return bpf_program__attach_btf_id(prog);
10003}
10004
d7a18ea7
AN
10005static struct bpf_link *attach_trace(const struct bpf_sec_def *sec,
10006 struct bpf_program *prog)
10007{
10008 return bpf_program__attach_trace(prog);
10009}
10010
1e092a03
KS
10011static struct bpf_link *attach_lsm(const struct bpf_sec_def *sec,
10012 struct bpf_program *prog)
10013{
10014 return bpf_program__attach_lsm(prog);
10015}
10016
c09add2f
YS
10017static struct bpf_link *attach_iter(const struct bpf_sec_def *sec,
10018 struct bpf_program *prog)
10019{
10020 return bpf_program__attach_iter(prog, NULL);
10021}
10022
d60d81ac 10023static struct bpf_link *
a5359091 10024bpf_program__attach_fd(struct bpf_program *prog, int target_fd, int btf_id,
d60d81ac 10025 const char *target_name)
cc4f864b 10026{
a5359091
THJ
10027 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, opts,
10028 .target_btf_id = btf_id);
cc4f864b
AN
10029 enum bpf_attach_type attach_type;
10030 char errmsg[STRERR_BUFSIZE];
10031 struct bpf_link *link;
10032 int prog_fd, link_fd;
10033
10034 prog_fd = bpf_program__fd(prog);
10035 if (prog_fd < 0) {
52109584 10036 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
cc4f864b
AN
10037 return ERR_PTR(-EINVAL);
10038 }
10039
10040 link = calloc(1, sizeof(*link));
10041 if (!link)
10042 return ERR_PTR(-ENOMEM);
10043 link->detach = &bpf_link__detach_fd;
10044
10045 attach_type = bpf_program__get_expected_attach_type(prog);
a5359091 10046 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, &opts);
cc4f864b
AN
10047 if (link_fd < 0) {
10048 link_fd = -errno;
10049 free(link);
52109584
AN
10050 pr_warn("prog '%s': failed to attach to %s: %s\n",
10051 prog->name, target_name,
cc4f864b
AN
10052 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
10053 return ERR_PTR(link_fd);
10054 }
10055 link->fd = link_fd;
10056 return link;
10057}
10058
d60d81ac
JS
10059struct bpf_link *
10060bpf_program__attach_cgroup(struct bpf_program *prog, int cgroup_fd)
10061{
a5359091 10062 return bpf_program__attach_fd(prog, cgroup_fd, 0, "cgroup");
d60d81ac
JS
10063}
10064
10065struct bpf_link *
10066bpf_program__attach_netns(struct bpf_program *prog, int netns_fd)
10067{
a5359091 10068 return bpf_program__attach_fd(prog, netns_fd, 0, "netns");
d60d81ac
JS
10069}
10070
dc8698ca
AN
10071struct bpf_link *bpf_program__attach_xdp(struct bpf_program *prog, int ifindex)
10072{
10073 /* target_fd/target_ifindex use the same field in LINK_CREATE */
a5359091
THJ
10074 return bpf_program__attach_fd(prog, ifindex, 0, "xdp");
10075}
10076
10077struct bpf_link *bpf_program__attach_freplace(struct bpf_program *prog,
10078 int target_fd,
10079 const char *attach_func_name)
10080{
10081 int btf_id;
10082
10083 if (!!target_fd != !!attach_func_name) {
10084 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
10085 prog->name);
10086 return ERR_PTR(-EINVAL);
10087 }
10088
10089 if (prog->type != BPF_PROG_TYPE_EXT) {
10090 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
10091 prog->name);
10092 return ERR_PTR(-EINVAL);
10093 }
10094
10095 if (target_fd) {
10096 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
10097 if (btf_id < 0)
10098 return ERR_PTR(btf_id);
10099
10100 return bpf_program__attach_fd(prog, target_fd, btf_id, "freplace");
10101 } else {
10102 /* no target, so use raw_tracepoint_open for compatibility
10103 * with old kernels
10104 */
10105 return bpf_program__attach_trace(prog);
10106 }
dc8698ca
AN
10107}
10108
c09add2f
YS
10109struct bpf_link *
10110bpf_program__attach_iter(struct bpf_program *prog,
10111 const struct bpf_iter_attach_opts *opts)
10112{
cd31039a 10113 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
c09add2f
YS
10114 char errmsg[STRERR_BUFSIZE];
10115 struct bpf_link *link;
10116 int prog_fd, link_fd;
cd31039a 10117 __u32 target_fd = 0;
c09add2f
YS
10118
10119 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
10120 return ERR_PTR(-EINVAL);
10121
74fc097d
YS
10122 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
10123 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
cd31039a 10124
c09add2f
YS
10125 prog_fd = bpf_program__fd(prog);
10126 if (prog_fd < 0) {
52109584 10127 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
c09add2f
YS
10128 return ERR_PTR(-EINVAL);
10129 }
10130
10131 link = calloc(1, sizeof(*link));
10132 if (!link)
10133 return ERR_PTR(-ENOMEM);
10134 link->detach = &bpf_link__detach_fd;
10135
cd31039a
YS
10136 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
10137 &link_create_opts);
c09add2f
YS
10138 if (link_fd < 0) {
10139 link_fd = -errno;
10140 free(link);
52109584
AN
10141 pr_warn("prog '%s': failed to attach to iterator: %s\n",
10142 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
c09add2f
YS
10143 return ERR_PTR(link_fd);
10144 }
10145 link->fd = link_fd;
10146 return link;
10147}
10148
d7a18ea7
AN
10149struct bpf_link *bpf_program__attach(struct bpf_program *prog)
10150{
10151 const struct bpf_sec_def *sec_def;
10152
52109584 10153 sec_def = find_sec_def(prog->sec_name);
d7a18ea7
AN
10154 if (!sec_def || !sec_def->attach_fn)
10155 return ERR_PTR(-ESRCH);
10156
10157 return sec_def->attach_fn(sec_def, prog);
10158}
10159
590a0088
MKL
10160static int bpf_link__detach_struct_ops(struct bpf_link *link)
10161{
590a0088
MKL
10162 __u32 zero = 0;
10163
c016b68e 10164 if (bpf_map_delete_elem(link->fd, &zero))
590a0088
MKL
10165 return -errno;
10166
10167 return 0;
10168}
10169
10170struct bpf_link *bpf_map__attach_struct_ops(struct bpf_map *map)
10171{
10172 struct bpf_struct_ops *st_ops;
c016b68e 10173 struct bpf_link *link;
590a0088
MKL
10174 __u32 i, zero = 0;
10175 int err;
10176
10177 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
10178 return ERR_PTR(-EINVAL);
10179
10180 link = calloc(1, sizeof(*link));
10181 if (!link)
10182 return ERR_PTR(-EINVAL);
10183
10184 st_ops = map->st_ops;
10185 for (i = 0; i < btf_vlen(st_ops->type); i++) {
10186 struct bpf_program *prog = st_ops->progs[i];
10187 void *kern_data;
10188 int prog_fd;
10189
10190 if (!prog)
10191 continue;
10192
10193 prog_fd = bpf_program__fd(prog);
10194 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
10195 *(unsigned long *)kern_data = prog_fd;
10196 }
10197
10198 err = bpf_map_update_elem(map->fd, &zero, st_ops->kern_vdata, 0);
10199 if (err) {
10200 err = -errno;
10201 free(link);
10202 return ERR_PTR(err);
10203 }
10204
c016b68e 10205 link->detach = bpf_link__detach_struct_ops;
590a0088
MKL
10206 link->fd = map->fd;
10207
c016b68e 10208 return link;
590a0088
MKL
10209}
10210
d0cabbb0 10211enum bpf_perf_event_ret
3dca2115
DB
10212bpf_perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
10213 void **copy_mem, size_t *copy_size,
10214 bpf_perf_event_print_t fn, void *private_data)
d0cabbb0 10215{
3dca2115 10216 struct perf_event_mmap_page *header = mmap_mem;
a64af0ef 10217 __u64 data_head = ring_buffer_read_head(header);
d0cabbb0 10218 __u64 data_tail = header->data_tail;
3dca2115
DB
10219 void *base = ((__u8 *)header) + page_size;
10220 int ret = LIBBPF_PERF_EVENT_CONT;
10221 struct perf_event_header *ehdr;
10222 size_t ehdr_size;
10223
10224 while (data_head != data_tail) {
10225 ehdr = base + (data_tail & (mmap_size - 1));
10226 ehdr_size = ehdr->size;
10227
10228 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
10229 void *copy_start = ehdr;
10230 size_t len_first = base + mmap_size - copy_start;
10231 size_t len_secnd = ehdr_size - len_first;
10232
10233 if (*copy_size < ehdr_size) {
10234 free(*copy_mem);
10235 *copy_mem = malloc(ehdr_size);
10236 if (!*copy_mem) {
10237 *copy_size = 0;
d0cabbb0
JK
10238 ret = LIBBPF_PERF_EVENT_ERROR;
10239 break;
10240 }
3dca2115 10241 *copy_size = ehdr_size;
d0cabbb0
JK
10242 }
10243
3dca2115
DB
10244 memcpy(*copy_mem, copy_start, len_first);
10245 memcpy(*copy_mem + len_first, base, len_secnd);
10246 ehdr = *copy_mem;
d0cabbb0
JK
10247 }
10248
3dca2115
DB
10249 ret = fn(ehdr, private_data);
10250 data_tail += ehdr_size;
d0cabbb0
JK
10251 if (ret != LIBBPF_PERF_EVENT_CONT)
10252 break;
d0cabbb0
JK
10253 }
10254
a64af0ef 10255 ring_buffer_write_tail(header, data_tail);
d0cabbb0
JK
10256 return ret;
10257}
34be1646 10258
fb84b822
AN
10259struct perf_buffer;
10260
10261struct perf_buffer_params {
10262 struct perf_event_attr *attr;
10263 /* if event_cb is specified, it takes precendence */
10264 perf_buffer_event_fn event_cb;
10265 /* sample_cb and lost_cb are higher-level common-case callbacks */
10266 perf_buffer_sample_fn sample_cb;
10267 perf_buffer_lost_fn lost_cb;
10268 void *ctx;
10269 int cpu_cnt;
10270 int *cpus;
10271 int *map_keys;
10272};
10273
10274struct perf_cpu_buf {
10275 struct perf_buffer *pb;
10276 void *base; /* mmap()'ed memory */
10277 void *buf; /* for reconstructing segmented data */
10278 size_t buf_size;
10279 int fd;
10280 int cpu;
10281 int map_key;
10282};
10283
10284struct perf_buffer {
10285 perf_buffer_event_fn event_cb;
10286 perf_buffer_sample_fn sample_cb;
10287 perf_buffer_lost_fn lost_cb;
10288 void *ctx; /* passed into callbacks */
10289
10290 size_t page_size;
10291 size_t mmap_size;
10292 struct perf_cpu_buf **cpu_bufs;
10293 struct epoll_event *events;
783b8f01 10294 int cpu_cnt; /* number of allocated CPU buffers */
fb84b822
AN
10295 int epoll_fd; /* perf event FD */
10296 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
10297};
10298
10299static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
10300 struct perf_cpu_buf *cpu_buf)
10301{
10302 if (!cpu_buf)
10303 return;
10304 if (cpu_buf->base &&
10305 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
be18010e 10306 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
fb84b822
AN
10307 if (cpu_buf->fd >= 0) {
10308 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
10309 close(cpu_buf->fd);
10310 }
10311 free(cpu_buf->buf);
10312 free(cpu_buf);
10313}
10314
10315void perf_buffer__free(struct perf_buffer *pb)
10316{
10317 int i;
10318
50450fc7 10319 if (IS_ERR_OR_NULL(pb))
fb84b822
AN
10320 return;
10321 if (pb->cpu_bufs) {
601b05ca 10322 for (i = 0; i < pb->cpu_cnt; i++) {
fb84b822
AN
10323 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10324
601b05ca
EC
10325 if (!cpu_buf)
10326 continue;
10327
fb84b822
AN
10328 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
10329 perf_buffer__free_cpu_buf(pb, cpu_buf);
10330 }
10331 free(pb->cpu_bufs);
10332 }
10333 if (pb->epoll_fd >= 0)
10334 close(pb->epoll_fd);
10335 free(pb->events);
10336 free(pb);
10337}
10338
10339static struct perf_cpu_buf *
10340perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
10341 int cpu, int map_key)
10342{
10343 struct perf_cpu_buf *cpu_buf;
10344 char msg[STRERR_BUFSIZE];
10345 int err;
10346
10347 cpu_buf = calloc(1, sizeof(*cpu_buf));
10348 if (!cpu_buf)
10349 return ERR_PTR(-ENOMEM);
10350
10351 cpu_buf->pb = pb;
10352 cpu_buf->cpu = cpu;
10353 cpu_buf->map_key = map_key;
10354
10355 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
10356 -1, PERF_FLAG_FD_CLOEXEC);
10357 if (cpu_buf->fd < 0) {
10358 err = -errno;
be18010e
KW
10359 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
10360 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
10361 goto error;
10362 }
10363
10364 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
10365 PROT_READ | PROT_WRITE, MAP_SHARED,
10366 cpu_buf->fd, 0);
10367 if (cpu_buf->base == MAP_FAILED) {
10368 cpu_buf->base = NULL;
10369 err = -errno;
be18010e
KW
10370 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
10371 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
10372 goto error;
10373 }
10374
10375 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10376 err = -errno;
be18010e
KW
10377 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
10378 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
10379 goto error;
10380 }
10381
10382 return cpu_buf;
10383
10384error:
10385 perf_buffer__free_cpu_buf(pb, cpu_buf);
10386 return (struct perf_cpu_buf *)ERR_PTR(err);
10387}
10388
10389static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10390 struct perf_buffer_params *p);
10391
10392struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
10393 const struct perf_buffer_opts *opts)
10394{
10395 struct perf_buffer_params p = {};
4be6e05c
ACM
10396 struct perf_event_attr attr = { 0, };
10397
65bb2e0f 10398 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
4be6e05c
ACM
10399 attr.type = PERF_TYPE_SOFTWARE;
10400 attr.sample_type = PERF_SAMPLE_RAW;
10401 attr.sample_period = 1;
10402 attr.wakeup_events = 1;
fb84b822
AN
10403
10404 p.attr = &attr;
10405 p.sample_cb = opts ? opts->sample_cb : NULL;
10406 p.lost_cb = opts ? opts->lost_cb : NULL;
10407 p.ctx = opts ? opts->ctx : NULL;
10408
10409 return __perf_buffer__new(map_fd, page_cnt, &p);
10410}
10411
10412struct perf_buffer *
10413perf_buffer__new_raw(int map_fd, size_t page_cnt,
10414 const struct perf_buffer_raw_opts *opts)
10415{
10416 struct perf_buffer_params p = {};
10417
10418 p.attr = opts->attr;
10419 p.event_cb = opts->event_cb;
10420 p.ctx = opts->ctx;
10421 p.cpu_cnt = opts->cpu_cnt;
10422 p.cpus = opts->cpus;
10423 p.map_keys = opts->map_keys;
10424
10425 return __perf_buffer__new(map_fd, page_cnt, &p);
10426}
10427
10428static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
10429 struct perf_buffer_params *p)
10430{
783b8f01 10431 const char *online_cpus_file = "/sys/devices/system/cpu/online";
0e289487 10432 struct bpf_map_info map;
fb84b822
AN
10433 char msg[STRERR_BUFSIZE];
10434 struct perf_buffer *pb;
783b8f01 10435 bool *online = NULL;
fb84b822 10436 __u32 map_info_len;
783b8f01 10437 int err, i, j, n;
fb84b822
AN
10438
10439 if (page_cnt & (page_cnt - 1)) {
be18010e
KW
10440 pr_warn("page count should be power of two, but is %zu\n",
10441 page_cnt);
fb84b822
AN
10442 return ERR_PTR(-EINVAL);
10443 }
10444
0e289487
AN
10445 /* best-effort sanity checks */
10446 memset(&map, 0, sizeof(map));
fb84b822
AN
10447 map_info_len = sizeof(map);
10448 err = bpf_obj_get_info_by_fd(map_fd, &map, &map_info_len);
10449 if (err) {
10450 err = -errno;
0e289487
AN
10451 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
10452 * -EBADFD, -EFAULT, or -E2BIG on real error
10453 */
10454 if (err != -EINVAL) {
10455 pr_warn("failed to get map info for map FD %d: %s\n",
10456 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
10457 return ERR_PTR(err);
10458 }
10459 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
10460 map_fd);
10461 } else {
10462 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
10463 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
10464 map.name);
10465 return ERR_PTR(-EINVAL);
10466 }
fb84b822
AN
10467 }
10468
10469 pb = calloc(1, sizeof(*pb));
10470 if (!pb)
10471 return ERR_PTR(-ENOMEM);
10472
10473 pb->event_cb = p->event_cb;
10474 pb->sample_cb = p->sample_cb;
10475 pb->lost_cb = p->lost_cb;
10476 pb->ctx = p->ctx;
10477
10478 pb->page_size = getpagesize();
10479 pb->mmap_size = pb->page_size * page_cnt;
10480 pb->map_fd = map_fd;
10481
10482 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
10483 if (pb->epoll_fd < 0) {
10484 err = -errno;
be18010e
KW
10485 pr_warn("failed to create epoll instance: %s\n",
10486 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
10487 goto error;
10488 }
10489
10490 if (p->cpu_cnt > 0) {
10491 pb->cpu_cnt = p->cpu_cnt;
10492 } else {
10493 pb->cpu_cnt = libbpf_num_possible_cpus();
10494 if (pb->cpu_cnt < 0) {
10495 err = pb->cpu_cnt;
10496 goto error;
10497 }
0e289487 10498 if (map.max_entries && map.max_entries < pb->cpu_cnt)
fb84b822
AN
10499 pb->cpu_cnt = map.max_entries;
10500 }
10501
10502 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
10503 if (!pb->events) {
10504 err = -ENOMEM;
be18010e 10505 pr_warn("failed to allocate events: out of memory\n");
fb84b822
AN
10506 goto error;
10507 }
10508 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
10509 if (!pb->cpu_bufs) {
10510 err = -ENOMEM;
be18010e 10511 pr_warn("failed to allocate buffers: out of memory\n");
fb84b822
AN
10512 goto error;
10513 }
10514
783b8f01
AN
10515 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
10516 if (err) {
10517 pr_warn("failed to get online CPU mask: %d\n", err);
10518 goto error;
10519 }
10520
10521 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
fb84b822
AN
10522 struct perf_cpu_buf *cpu_buf;
10523 int cpu, map_key;
10524
10525 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
10526 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
10527
783b8f01
AN
10528 /* in case user didn't explicitly requested particular CPUs to
10529 * be attached to, skip offline/not present CPUs
10530 */
10531 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
10532 continue;
10533
fb84b822
AN
10534 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
10535 if (IS_ERR(cpu_buf)) {
10536 err = PTR_ERR(cpu_buf);
10537 goto error;
10538 }
10539
783b8f01 10540 pb->cpu_bufs[j] = cpu_buf;
fb84b822
AN
10541
10542 err = bpf_map_update_elem(pb->map_fd, &map_key,
10543 &cpu_buf->fd, 0);
10544 if (err) {
10545 err = -errno;
be18010e
KW
10546 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
10547 cpu, map_key, cpu_buf->fd,
10548 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
10549 goto error;
10550 }
10551
783b8f01
AN
10552 pb->events[j].events = EPOLLIN;
10553 pb->events[j].data.ptr = cpu_buf;
fb84b822 10554 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
783b8f01 10555 &pb->events[j]) < 0) {
fb84b822 10556 err = -errno;
be18010e
KW
10557 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
10558 cpu, cpu_buf->fd,
10559 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
10560 goto error;
10561 }
783b8f01 10562 j++;
fb84b822 10563 }
783b8f01
AN
10564 pb->cpu_cnt = j;
10565 free(online);
fb84b822
AN
10566
10567 return pb;
10568
10569error:
783b8f01 10570 free(online);
fb84b822
AN
10571 if (pb)
10572 perf_buffer__free(pb);
10573 return ERR_PTR(err);
10574}
10575
10576struct perf_sample_raw {
10577 struct perf_event_header header;
10578 uint32_t size;
385bbf7b 10579 char data[];
fb84b822
AN
10580};
10581
10582struct perf_sample_lost {
10583 struct perf_event_header header;
10584 uint64_t id;
10585 uint64_t lost;
10586 uint64_t sample_id;
10587};
10588
10589static enum bpf_perf_event_ret
10590perf_buffer__process_record(struct perf_event_header *e, void *ctx)
10591{
10592 struct perf_cpu_buf *cpu_buf = ctx;
10593 struct perf_buffer *pb = cpu_buf->pb;
10594 void *data = e;
10595
10596 /* user wants full control over parsing perf event */
10597 if (pb->event_cb)
10598 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
10599
10600 switch (e->type) {
10601 case PERF_RECORD_SAMPLE: {
10602 struct perf_sample_raw *s = data;
10603
10604 if (pb->sample_cb)
10605 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
10606 break;
10607 }
10608 case PERF_RECORD_LOST: {
10609 struct perf_sample_lost *s = data;
10610
10611 if (pb->lost_cb)
10612 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
10613 break;
10614 }
10615 default:
be18010e 10616 pr_warn("unknown perf sample type %d\n", e->type);
fb84b822
AN
10617 return LIBBPF_PERF_EVENT_ERROR;
10618 }
10619 return LIBBPF_PERF_EVENT_CONT;
10620}
10621
10622static int perf_buffer__process_records(struct perf_buffer *pb,
10623 struct perf_cpu_buf *cpu_buf)
10624{
10625 enum bpf_perf_event_ret ret;
10626
10627 ret = bpf_perf_event_read_simple(cpu_buf->base, pb->mmap_size,
10628 pb->page_size, &cpu_buf->buf,
10629 &cpu_buf->buf_size,
10630 perf_buffer__process_record, cpu_buf);
10631 if (ret != LIBBPF_PERF_EVENT_CONT)
10632 return ret;
10633 return 0;
10634}
10635
dca5612f
AN
10636int perf_buffer__epoll_fd(const struct perf_buffer *pb)
10637{
10638 return pb->epoll_fd;
10639}
10640
fb84b822
AN
10641int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
10642{
10643 int i, cnt, err;
10644
10645 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
10646 for (i = 0; i < cnt; i++) {
10647 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
10648
10649 err = perf_buffer__process_records(pb, cpu_buf);
10650 if (err) {
be18010e 10651 pr_warn("error while processing records: %d\n", err);
fb84b822
AN
10652 return err;
10653 }
10654 }
10655 return cnt < 0 ? -errno : cnt;
10656}
10657
dca5612f
AN
10658/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
10659 * manager.
10660 */
10661size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
10662{
10663 return pb->cpu_cnt;
10664}
10665
10666/*
10667 * Return perf_event FD of a ring buffer in *buf_idx* slot of
10668 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
10669 * select()/poll()/epoll() Linux syscalls.
10670 */
10671int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
10672{
10673 struct perf_cpu_buf *cpu_buf;
10674
10675 if (buf_idx >= pb->cpu_cnt)
10676 return -EINVAL;
10677
10678 cpu_buf = pb->cpu_bufs[buf_idx];
10679 if (!cpu_buf)
10680 return -ENOENT;
10681
10682 return cpu_buf->fd;
10683}
10684
10685/*
10686 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
10687 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
10688 * consume, do nothing and return success.
10689 * Returns:
10690 * - 0 on success;
10691 * - <0 on failure.
10692 */
10693int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
10694{
10695 struct perf_cpu_buf *cpu_buf;
10696
10697 if (buf_idx >= pb->cpu_cnt)
10698 return -EINVAL;
10699
10700 cpu_buf = pb->cpu_bufs[buf_idx];
10701 if (!cpu_buf)
10702 return -ENOENT;
10703
10704 return perf_buffer__process_records(pb, cpu_buf);
10705}
10706
272d51af
EC
10707int perf_buffer__consume(struct perf_buffer *pb)
10708{
10709 int i, err;
10710
10711 for (i = 0; i < pb->cpu_cnt; i++) {
10712 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
10713
10714 if (!cpu_buf)
10715 continue;
10716
10717 err = perf_buffer__process_records(pb, cpu_buf);
10718 if (err) {
dca5612f 10719 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
272d51af
EC
10720 return err;
10721 }
10722 }
10723 return 0;
10724}
10725
34be1646
SL
10726struct bpf_prog_info_array_desc {
10727 int array_offset; /* e.g. offset of jited_prog_insns */
10728 int count_offset; /* e.g. offset of jited_prog_len */
10729 int size_offset; /* > 0: offset of rec size,
10730 * < 0: fix size of -size_offset
10731 */
10732};
10733
10734static struct bpf_prog_info_array_desc bpf_prog_info_array_desc[] = {
10735 [BPF_PROG_INFO_JITED_INSNS] = {
10736 offsetof(struct bpf_prog_info, jited_prog_insns),
10737 offsetof(struct bpf_prog_info, jited_prog_len),
10738 -1,
10739 },
10740 [BPF_PROG_INFO_XLATED_INSNS] = {
10741 offsetof(struct bpf_prog_info, xlated_prog_insns),
10742 offsetof(struct bpf_prog_info, xlated_prog_len),
10743 -1,
10744 },
10745 [BPF_PROG_INFO_MAP_IDS] = {
10746 offsetof(struct bpf_prog_info, map_ids),
10747 offsetof(struct bpf_prog_info, nr_map_ids),
10748 -(int)sizeof(__u32),
10749 },
10750 [BPF_PROG_INFO_JITED_KSYMS] = {
10751 offsetof(struct bpf_prog_info, jited_ksyms),
10752 offsetof(struct bpf_prog_info, nr_jited_ksyms),
10753 -(int)sizeof(__u64),
10754 },
10755 [BPF_PROG_INFO_JITED_FUNC_LENS] = {
10756 offsetof(struct bpf_prog_info, jited_func_lens),
10757 offsetof(struct bpf_prog_info, nr_jited_func_lens),
10758 -(int)sizeof(__u32),
10759 },
10760 [BPF_PROG_INFO_FUNC_INFO] = {
10761 offsetof(struct bpf_prog_info, func_info),
10762 offsetof(struct bpf_prog_info, nr_func_info),
10763 offsetof(struct bpf_prog_info, func_info_rec_size),
10764 },
10765 [BPF_PROG_INFO_LINE_INFO] = {
10766 offsetof(struct bpf_prog_info, line_info),
10767 offsetof(struct bpf_prog_info, nr_line_info),
10768 offsetof(struct bpf_prog_info, line_info_rec_size),
10769 },
10770 [BPF_PROG_INFO_JITED_LINE_INFO] = {
10771 offsetof(struct bpf_prog_info, jited_line_info),
10772 offsetof(struct bpf_prog_info, nr_jited_line_info),
10773 offsetof(struct bpf_prog_info, jited_line_info_rec_size),
10774 },
10775 [BPF_PROG_INFO_PROG_TAGS] = {
10776 offsetof(struct bpf_prog_info, prog_tags),
10777 offsetof(struct bpf_prog_info, nr_prog_tags),
10778 -(int)sizeof(__u8) * BPF_TAG_SIZE,
10779 },
10780
10781};
10782
8983b731
AN
10783static __u32 bpf_prog_info_read_offset_u32(struct bpf_prog_info *info,
10784 int offset)
34be1646
SL
10785{
10786 __u32 *array = (__u32 *)info;
10787
10788 if (offset >= 0)
10789 return array[offset / sizeof(__u32)];
10790 return -(int)offset;
10791}
10792
8983b731
AN
10793static __u64 bpf_prog_info_read_offset_u64(struct bpf_prog_info *info,
10794 int offset)
34be1646
SL
10795{
10796 __u64 *array = (__u64 *)info;
10797
10798 if (offset >= 0)
10799 return array[offset / sizeof(__u64)];
10800 return -(int)offset;
10801}
10802
10803static void bpf_prog_info_set_offset_u32(struct bpf_prog_info *info, int offset,
10804 __u32 val)
10805{
10806 __u32 *array = (__u32 *)info;
10807
10808 if (offset >= 0)
10809 array[offset / sizeof(__u32)] = val;
10810}
10811
10812static void bpf_prog_info_set_offset_u64(struct bpf_prog_info *info, int offset,
10813 __u64 val)
10814{
10815 __u64 *array = (__u64 *)info;
10816
10817 if (offset >= 0)
10818 array[offset / sizeof(__u64)] = val;
10819}
10820
10821struct bpf_prog_info_linear *
10822bpf_program__get_prog_info_linear(int fd, __u64 arrays)
10823{
10824 struct bpf_prog_info_linear *info_linear;
10825 struct bpf_prog_info info = {};
10826 __u32 info_len = sizeof(info);
10827 __u32 data_len = 0;
10828 int i, err;
10829 void *ptr;
10830
10831 if (arrays >> BPF_PROG_INFO_LAST_ARRAY)
10832 return ERR_PTR(-EINVAL);
10833
10834 /* step 1: get array dimensions */
10835 err = bpf_obj_get_info_by_fd(fd, &info, &info_len);
10836 if (err) {
10837 pr_debug("can't get prog info: %s", strerror(errno));
10838 return ERR_PTR(-EFAULT);
10839 }
10840
10841 /* step 2: calculate total size of all arrays */
10842 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10843 bool include_array = (arrays & (1UL << i)) > 0;
10844 struct bpf_prog_info_array_desc *desc;
10845 __u32 count, size;
10846
10847 desc = bpf_prog_info_array_desc + i;
10848
10849 /* kernel is too old to support this field */
10850 if (info_len < desc->array_offset + sizeof(__u32) ||
10851 info_len < desc->count_offset + sizeof(__u32) ||
10852 (desc->size_offset > 0 && info_len < desc->size_offset))
10853 include_array = false;
10854
10855 if (!include_array) {
10856 arrays &= ~(1UL << i); /* clear the bit */
10857 continue;
10858 }
10859
10860 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10861 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10862
10863 data_len += count * size;
10864 }
10865
10866 /* step 3: allocate continuous memory */
10867 data_len = roundup(data_len, sizeof(__u64));
10868 info_linear = malloc(sizeof(struct bpf_prog_info_linear) + data_len);
10869 if (!info_linear)
10870 return ERR_PTR(-ENOMEM);
10871
10872 /* step 4: fill data to info_linear->info */
10873 info_linear->arrays = arrays;
10874 memset(&info_linear->info, 0, sizeof(info));
10875 ptr = info_linear->data;
10876
10877 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10878 struct bpf_prog_info_array_desc *desc;
10879 __u32 count, size;
10880
10881 if ((arrays & (1UL << i)) == 0)
10882 continue;
10883
10884 desc = bpf_prog_info_array_desc + i;
10885 count = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10886 size = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10887 bpf_prog_info_set_offset_u32(&info_linear->info,
10888 desc->count_offset, count);
10889 bpf_prog_info_set_offset_u32(&info_linear->info,
10890 desc->size_offset, size);
10891 bpf_prog_info_set_offset_u64(&info_linear->info,
10892 desc->array_offset,
10893 ptr_to_u64(ptr));
10894 ptr += count * size;
10895 }
10896
10897 /* step 5: call syscall again to get required arrays */
10898 err = bpf_obj_get_info_by_fd(fd, &info_linear->info, &info_len);
10899 if (err) {
10900 pr_debug("can't get prog info: %s", strerror(errno));
10901 free(info_linear);
10902 return ERR_PTR(-EFAULT);
10903 }
10904
10905 /* step 6: verify the data */
10906 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10907 struct bpf_prog_info_array_desc *desc;
10908 __u32 v1, v2;
10909
10910 if ((arrays & (1UL << i)) == 0)
10911 continue;
10912
10913 desc = bpf_prog_info_array_desc + i;
10914 v1 = bpf_prog_info_read_offset_u32(&info, desc->count_offset);
10915 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10916 desc->count_offset);
10917 if (v1 != v2)
be18010e 10918 pr_warn("%s: mismatch in element count\n", __func__);
34be1646
SL
10919
10920 v1 = bpf_prog_info_read_offset_u32(&info, desc->size_offset);
10921 v2 = bpf_prog_info_read_offset_u32(&info_linear->info,
10922 desc->size_offset);
10923 if (v1 != v2)
be18010e 10924 pr_warn("%s: mismatch in rec size\n", __func__);
34be1646
SL
10925 }
10926
10927 /* step 7: update info_len and data_len */
10928 info_linear->info_len = sizeof(struct bpf_prog_info);
10929 info_linear->data_len = data_len;
10930
10931 return info_linear;
10932}
10933
10934void bpf_program__bpil_addr_to_offs(struct bpf_prog_info_linear *info_linear)
10935{
10936 int i;
10937
10938 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10939 struct bpf_prog_info_array_desc *desc;
10940 __u64 addr, offs;
10941
10942 if ((info_linear->arrays & (1UL << i)) == 0)
10943 continue;
10944
10945 desc = bpf_prog_info_array_desc + i;
10946 addr = bpf_prog_info_read_offset_u64(&info_linear->info,
10947 desc->array_offset);
10948 offs = addr - ptr_to_u64(info_linear->data);
10949 bpf_prog_info_set_offset_u64(&info_linear->info,
10950 desc->array_offset, offs);
10951 }
10952}
10953
10954void bpf_program__bpil_offs_to_addr(struct bpf_prog_info_linear *info_linear)
10955{
10956 int i;
10957
10958 for (i = BPF_PROG_INFO_FIRST_ARRAY; i < BPF_PROG_INFO_LAST_ARRAY; ++i) {
10959 struct bpf_prog_info_array_desc *desc;
10960 __u64 addr, offs;
10961
10962 if ((info_linear->arrays & (1UL << i)) == 0)
10963 continue;
10964
10965 desc = bpf_prog_info_array_desc + i;
10966 offs = bpf_prog_info_read_offset_u64(&info_linear->info,
10967 desc->array_offset);
10968 addr = offs + ptr_to_u64(info_linear->data);
10969 bpf_prog_info_set_offset_u64(&info_linear->info,
10970 desc->array_offset, addr);
10971 }
10972}
6446b315 10973
ff26ce5c
EC
10974int bpf_program__set_attach_target(struct bpf_program *prog,
10975 int attach_prog_fd,
10976 const char *attach_func_name)
10977{
fe62de31 10978 int btf_obj_fd = 0, btf_id = 0, err;
ff26ce5c
EC
10979
10980 if (!prog || attach_prog_fd < 0 || !attach_func_name)
10981 return -EINVAL;
10982
fe62de31
AN
10983 if (prog->obj->loaded)
10984 return -EINVAL;
10985
10986 if (attach_prog_fd) {
ff26ce5c
EC
10987 btf_id = libbpf_find_prog_btf_id(attach_func_name,
10988 attach_prog_fd);
fe62de31
AN
10989 if (btf_id < 0)
10990 return btf_id;
10991 } else {
10992 /* load btf_vmlinux, if not yet */
10993 err = bpf_object__load_vmlinux_btf(prog->obj, true);
10994 if (err)
10995 return err;
10996 err = find_kernel_btf_id(prog->obj, attach_func_name,
10997 prog->expected_attach_type,
10998 &btf_obj_fd, &btf_id);
10999 if (err)
11000 return err;
11001 }
ff26ce5c
EC
11002
11003 prog->attach_btf_id = btf_id;
fe62de31 11004 prog->attach_btf_obj_fd = btf_obj_fd;
ff26ce5c
EC
11005 prog->attach_prog_fd = attach_prog_fd;
11006 return 0;
11007}
11008
6803ee25 11009int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
6446b315 11010{
6803ee25
AN
11011 int err = 0, n, len, start, end = -1;
11012 bool *tmp;
6446b315 11013
6803ee25
AN
11014 *mask = NULL;
11015 *mask_sz = 0;
11016
11017 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
11018 while (*s) {
11019 if (*s == ',' || *s == '\n') {
11020 s++;
11021 continue;
11022 }
11023 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
11024 if (n <= 0 || n > 2) {
11025 pr_warn("Failed to get CPU range %s: %d\n", s, n);
11026 err = -EINVAL;
11027 goto cleanup;
11028 } else if (n == 1) {
11029 end = start;
11030 }
11031 if (start < 0 || start > end) {
11032 pr_warn("Invalid CPU range [%d,%d] in %s\n",
11033 start, end, s);
11034 err = -EINVAL;
11035 goto cleanup;
11036 }
11037 tmp = realloc(*mask, end + 1);
11038 if (!tmp) {
11039 err = -ENOMEM;
11040 goto cleanup;
11041 }
11042 *mask = tmp;
11043 memset(tmp + *mask_sz, 0, start - *mask_sz);
11044 memset(tmp + start, 1, end - start + 1);
11045 *mask_sz = end + 1;
11046 s += len;
11047 }
11048 if (!*mask_sz) {
11049 pr_warn("Empty CPU range\n");
11050 return -EINVAL;
11051 }
11052 return 0;
11053cleanup:
11054 free(*mask);
11055 *mask = NULL;
11056 return err;
11057}
11058
11059int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
11060{
11061 int fd, err = 0, len;
11062 char buf[128];
6446b315
HL
11063
11064 fd = open(fcpu, O_RDONLY);
11065 if (fd < 0) {
6803ee25
AN
11066 err = -errno;
11067 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
11068 return err;
6446b315
HL
11069 }
11070 len = read(fd, buf, sizeof(buf));
11071 close(fd);
11072 if (len <= 0) {
6803ee25
AN
11073 err = len ? -errno : -EINVAL;
11074 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
11075 return err;
6446b315 11076 }
6803ee25
AN
11077 if (len >= sizeof(buf)) {
11078 pr_warn("CPU mask is too big in file %s\n", fcpu);
11079 return -E2BIG;
6446b315
HL
11080 }
11081 buf[len] = '\0';
11082
6803ee25
AN
11083 return parse_cpu_mask_str(buf, mask, mask_sz);
11084}
11085
11086int libbpf_num_possible_cpus(void)
11087{
11088 static const char *fcpu = "/sys/devices/system/cpu/possible";
11089 static int cpus;
11090 int err, n, i, tmp_cpus;
11091 bool *mask;
11092
11093 tmp_cpus = READ_ONCE(cpus);
11094 if (tmp_cpus > 0)
11095 return tmp_cpus;
11096
11097 err = parse_cpu_mask_file(fcpu, &mask, &n);
11098 if (err)
11099 return err;
11100
11101 tmp_cpus = 0;
11102 for (i = 0; i < n; i++) {
11103 if (mask[i])
11104 tmp_cpus++;
6446b315 11105 }
6803ee25 11106 free(mask);
56fbc241
TC
11107
11108 WRITE_ONCE(cpus, tmp_cpus);
11109 return tmp_cpus;
6446b315 11110}
d66562fb
AN
11111
11112int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
11113 const struct bpf_object_open_opts *opts)
11114{
11115 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
11116 .object_name = s->name,
11117 );
11118 struct bpf_object *obj;
11119 int i;
11120
11121 /* Attempt to preserve opts->object_name, unless overriden by user
11122 * explicitly. Overwriting object name for skeletons is discouraged,
11123 * as it breaks global data maps, because they contain object name
11124 * prefix as their own map name prefix. When skeleton is generated,
11125 * bpftool is making an assumption that this name will stay the same.
11126 */
11127 if (opts) {
11128 memcpy(&skel_opts, opts, sizeof(*opts));
11129 if (!opts->object_name)
11130 skel_opts.object_name = s->name;
11131 }
11132
11133 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
11134 if (IS_ERR(obj)) {
11135 pr_warn("failed to initialize skeleton BPF object '%s': %ld\n",
11136 s->name, PTR_ERR(obj));
11137 return PTR_ERR(obj);
11138 }
11139
11140 *s->obj = obj;
11141
11142 for (i = 0; i < s->map_cnt; i++) {
11143 struct bpf_map **map = s->maps[i].map;
11144 const char *name = s->maps[i].name;
11145 void **mmaped = s->maps[i].mmaped;
11146
11147 *map = bpf_object__find_map_by_name(obj, name);
11148 if (!*map) {
11149 pr_warn("failed to find skeleton map '%s'\n", name);
11150 return -ESRCH;
11151 }
11152
2ad97d47 11153 /* externs shouldn't be pre-setup from user code */
81bfdd08 11154 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
d66562fb
AN
11155 *mmaped = (*map)->mmaped;
11156 }
11157
11158 for (i = 0; i < s->prog_cnt; i++) {
11159 struct bpf_program **prog = s->progs[i].prog;
11160 const char *name = s->progs[i].name;
11161
11162 *prog = bpf_object__find_program_by_name(obj, name);
11163 if (!*prog) {
11164 pr_warn("failed to find skeleton program '%s'\n", name);
11165 return -ESRCH;
11166 }
11167 }
11168
11169 return 0;
11170}
11171
11172int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
11173{
11174 int i, err;
11175
11176 err = bpf_object__load(*s->obj);
11177 if (err) {
11178 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
11179 return err;
11180 }
11181
11182 for (i = 0; i < s->map_cnt; i++) {
11183 struct bpf_map *map = *s->maps[i].map;
11184 size_t mmap_sz = bpf_map_mmap_sz(map);
11185 int prot, map_fd = bpf_map__fd(map);
11186 void **mmaped = s->maps[i].mmaped;
d66562fb
AN
11187
11188 if (!mmaped)
11189 continue;
11190
11191 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
11192 *mmaped = NULL;
11193 continue;
11194 }
11195
11196 if (map->def.map_flags & BPF_F_RDONLY_PROG)
11197 prot = PROT_READ;
11198 else
11199 prot = PROT_READ | PROT_WRITE;
11200
11201 /* Remap anonymous mmap()-ed "map initialization image" as
11202 * a BPF map-backed mmap()-ed memory, but preserving the same
11203 * memory address. This will cause kernel to change process'
11204 * page table to point to a different piece of kernel memory,
11205 * but from userspace point of view memory address (and its
11206 * contents, being identical at this point) will stay the
11207 * same. This mapping will be released by bpf_object__close()
11208 * as per normal clean up procedure, so we don't need to worry
11209 * about it from skeleton's clean up perspective.
11210 */
2ad97d47
AN
11211 *mmaped = mmap(map->mmaped, mmap_sz, prot,
11212 MAP_SHARED | MAP_FIXED, map_fd, 0);
11213 if (*mmaped == MAP_FAILED) {
d66562fb
AN
11214 err = -errno;
11215 *mmaped = NULL;
11216 pr_warn("failed to re-mmap() map '%s': %d\n",
11217 bpf_map__name(map), err);
11218 return err;
11219 }
11220 }
11221
11222 return 0;
11223}
11224
11225int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
11226{
11227 int i;
11228
11229 for (i = 0; i < s->prog_cnt; i++) {
11230 struct bpf_program *prog = *s->progs[i].prog;
11231 struct bpf_link **link = s->progs[i].link;
11232 const struct bpf_sec_def *sec_def;
d66562fb 11233
d9297581
AN
11234 if (!prog->load)
11235 continue;
11236
52109584 11237 sec_def = find_sec_def(prog->sec_name);
d66562fb
AN
11238 if (!sec_def || !sec_def->attach_fn)
11239 continue;
11240
11241 *link = sec_def->attach_fn(sec_def, prog);
11242 if (IS_ERR(*link)) {
11243 pr_warn("failed to auto-attach program '%s': %ld\n",
11244 bpf_program__name(prog), PTR_ERR(*link));
11245 return PTR_ERR(*link);
11246 }
11247 }
11248
11249 return 0;
11250}
11251
11252void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
11253{
11254 int i;
11255
11256 for (i = 0; i < s->prog_cnt; i++) {
11257 struct bpf_link **link = s->progs[i].link;
11258
50450fc7 11259 bpf_link__destroy(*link);
d66562fb
AN
11260 *link = NULL;
11261 }
11262}
11263
11264void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
11265{
11266 if (s->progs)
11267 bpf_object__detach_skeleton(s);
11268 if (s->obj)
11269 bpf_object__close(*s->obj);
11270 free(s->maps);
11271 free(s->progs);
11272 free(s);
11273}