1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * Common eBPF ELF object loading operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/limits.h>
35 #include <linux/perf_event.h>
36 #include <linux/bpf_perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <sys/epoll.h>
39 #include <sys/ioctl.h>
42 #include <sys/types.h>
44 #include <sys/utsname.h>
45 #include <sys/resource.h>
53 #include "str_error.h"
54 #include "libbpf_internal.h"
56 #include "bpf_gen_internal.h"
60 #define BPF_FS_MAGIC 0xcafe4a11
63 #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
65 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
67 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
68 * compilation if user enables corresponding warning. Disable it explicitly.
70 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
72 #define __printf(a, b) __attribute__((format(printf, a, b)))
74 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
75 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
76 static int map_set_def_max_entries(struct bpf_map *map);
78 static const char * const attach_type_name[] = {
79 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
80 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
81 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
82 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
83 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
84 [BPF_CGROUP_DEVICE] = "cgroup_device",
85 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
86 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
87 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
88 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
89 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect",
90 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
91 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
92 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
93 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
94 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername",
95 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
96 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
97 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname",
98 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
99 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
100 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg",
101 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
102 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
103 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
104 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg",
105 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
106 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
107 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
108 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
109 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
110 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
111 [BPF_LIRC_MODE2] = "lirc_mode2",
112 [BPF_FLOW_DISSECTOR] = "flow_dissector",
113 [BPF_TRACE_RAW_TP] = "trace_raw_tp",
114 [BPF_TRACE_FENTRY] = "trace_fentry",
115 [BPF_TRACE_FEXIT] = "trace_fexit",
116 [BPF_MODIFY_RETURN] = "modify_return",
117 [BPF_LSM_MAC] = "lsm_mac",
118 [BPF_LSM_CGROUP] = "lsm_cgroup",
119 [BPF_SK_LOOKUP] = "sk_lookup",
120 [BPF_TRACE_ITER] = "trace_iter",
121 [BPF_XDP_DEVMAP] = "xdp_devmap",
122 [BPF_XDP_CPUMAP] = "xdp_cpumap",
124 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
125 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
126 [BPF_PERF_EVENT] = "perf_event",
127 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
128 [BPF_STRUCT_OPS] = "struct_ops",
129 [BPF_NETFILTER] = "netfilter",
130 [BPF_TCX_INGRESS] = "tcx_ingress",
131 [BPF_TCX_EGRESS] = "tcx_egress",
132 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
133 [BPF_NETKIT_PRIMARY] = "netkit_primary",
134 [BPF_NETKIT_PEER] = "netkit_peer",
137 static const char * const link_type_name[] = {
138 [BPF_LINK_TYPE_UNSPEC] = "unspec",
139 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
140 [BPF_LINK_TYPE_TRACING] = "tracing",
141 [BPF_LINK_TYPE_CGROUP] = "cgroup",
142 [BPF_LINK_TYPE_ITER] = "iter",
143 [BPF_LINK_TYPE_NETNS] = "netns",
144 [BPF_LINK_TYPE_XDP] = "xdp",
145 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
146 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
147 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
148 [BPF_LINK_TYPE_NETFILTER] = "netfilter",
149 [BPF_LINK_TYPE_TCX] = "tcx",
150 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
151 [BPF_LINK_TYPE_NETKIT] = "netkit",
154 static const char * const map_type_name[] = {
155 [BPF_MAP_TYPE_UNSPEC] = "unspec",
156 [BPF_MAP_TYPE_HASH] = "hash",
157 [BPF_MAP_TYPE_ARRAY] = "array",
158 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
159 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
160 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
161 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
162 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
163 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
164 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
165 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
166 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
167 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
168 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
169 [BPF_MAP_TYPE_DEVMAP] = "devmap",
170 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
171 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
172 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
173 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
174 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
175 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
176 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
177 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
178 [BPF_MAP_TYPE_QUEUE] = "queue",
179 [BPF_MAP_TYPE_STACK] = "stack",
180 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
181 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
182 [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
183 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
184 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
185 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
186 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
187 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
188 [BPF_MAP_TYPE_ARENA] = "arena",
191 static const char * const prog_type_name[] = {
192 [BPF_PROG_TYPE_UNSPEC] = "unspec",
193 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
194 [BPF_PROG_TYPE_KPROBE] = "kprobe",
195 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
196 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
197 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
198 [BPF_PROG_TYPE_XDP] = "xdp",
199 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
200 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
201 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
202 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
203 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
204 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
205 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
206 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
207 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
208 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
209 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
210 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
211 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
212 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
213 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
214 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
215 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
216 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
217 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
218 [BPF_PROG_TYPE_TRACING] = "tracing",
219 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
220 [BPF_PROG_TYPE_EXT] = "ext",
221 [BPF_PROG_TYPE_LSM] = "lsm",
222 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
223 [BPF_PROG_TYPE_SYSCALL] = "syscall",
224 [BPF_PROG_TYPE_NETFILTER] = "netfilter",
227 static int __base_pr(enum libbpf_print_level level, const char *format,
230 if (level == LIBBPF_DEBUG)
233 return vfprintf(stderr, format, args);
236 static libbpf_print_fn_t __libbpf_pr = __base_pr;
238 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
240 libbpf_print_fn_t old_print_fn;
242 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
248 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
252 libbpf_print_fn_t print_fn;
254 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
260 va_start(args, format);
261 __libbpf_pr(level, format, args);
267 static void pr_perm_msg(int err)
272 if (err != -EPERM || geteuid() != 0)
275 err = getrlimit(RLIMIT_MEMLOCK, &limit);
279 if (limit.rlim_cur == RLIM_INFINITY)
282 if (limit.rlim_cur < 1024)
283 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
284 else if (limit.rlim_cur < 1024*1024)
285 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
287 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
289 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
293 #define STRERR_BUFSIZE 128
295 /* Copied from tools/perf/util/util.h */
297 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
301 # define zclose(fd) ({ \
304 ___err = close((fd)); \
309 static inline __u64 ptr_to_u64(const void *ptr)
311 return (__u64) (unsigned long) ptr;
314 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
316 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
320 __u32 libbpf_major_version(void)
322 return LIBBPF_MAJOR_VERSION;
325 __u32 libbpf_minor_version(void)
327 return LIBBPF_MINOR_VERSION;
330 const char *libbpf_version_string(void)
334 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
350 enum reloc_type type;
353 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
362 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
365 /* expected_attach_type is optional, if kernel doesn't support that */
366 SEC_EXP_ATTACH_OPT = 1,
367 /* legacy, only used by libbpf_get_type_names() and
368 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
369 * This used to be associated with cgroup (and few other) BPF programs
370 * that were attachable through BPF_PROG_ATTACH command. Pretty
371 * meaningless nowadays, though.
374 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
375 /* attachment target is specified through BTF ID in either kernel or
376 * other BPF program's BTF object
379 /* BPF program type allows sleeping/blocking in kernel */
381 /* BPF program support non-linear XDP buffer */
383 /* Setup proper attach type for usdt probes. */
389 enum bpf_prog_type prog_type;
390 enum bpf_attach_type expected_attach_type;
394 libbpf_prog_setup_fn_t prog_setup_fn;
395 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
396 libbpf_prog_attach_fn_t prog_attach_fn;
400 * bpf_prog should be a better name but it has been used in
407 const struct bpf_sec_def *sec_def;
408 /* this program's instruction offset (in number of instructions)
409 * within its containing ELF section
412 /* number of original instructions in ELF section belonging to this
413 * program, not taking into account subprogram instructions possible
414 * appended later during relocation
417 /* Offset (in number of instructions) of the start of instruction
418 * belonging to this BPF program within its containing main BPF
419 * program. For the entry-point (main) BPF program, this is always
420 * zero. For a sub-program, this gets reset before each of main BPF
421 * programs are processed and relocated and is used to determined
422 * whether sub-program was already appended to the main program, and
423 * if yes, at which instruction offset.
427 /* instructions that belong to BPF program; insns[0] is located at
428 * sec_insn_off instruction within its ELF section in ELF file, so
429 * when mapping ELF file instruction index to the local instruction,
430 * one needs to subtract sec_insn_off; and vice versa.
432 struct bpf_insn *insns;
433 /* actual number of instruction in this BPF program's image; for
434 * entry-point BPF programs this includes the size of main program
435 * itself plus all the used sub-programs, appended at the end
439 struct reloc_desc *reloc_desc;
442 /* BPF verifier log settings */
447 struct bpf_object *obj;
453 bool mark_btf_static;
454 enum bpf_prog_type type;
455 enum bpf_attach_type expected_attach_type;
456 int exception_cb_idx;
459 __u32 attach_btf_obj_fd;
461 __u32 attach_prog_fd;
464 __u32 func_info_rec_size;
468 __u32 line_info_rec_size;
473 struct bpf_struct_ops {
475 const struct btf_type *type;
476 struct bpf_program **progs;
477 __u32 *kern_func_off;
478 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
480 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
481 * btf_vmlinux's format.
482 * struct bpf_struct_ops_tcp_congestion_ops {
483 * [... some other kernel fields ...]
484 * struct tcp_congestion_ops data;
486 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
487 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
494 #define DATA_SEC ".data"
495 #define BSS_SEC ".bss"
496 #define RODATA_SEC ".rodata"
497 #define KCONFIG_SEC ".kconfig"
498 #define KSYMS_SEC ".ksyms"
499 #define STRUCT_OPS_SEC ".struct_ops"
500 #define STRUCT_OPS_LINK_SEC ".struct_ops.link"
501 #define ARENA_SEC ".addr_space.1"
503 enum libbpf_map_type {
513 unsigned int key_size;
514 unsigned int value_size;
515 unsigned int max_entries;
516 unsigned int map_flags;
520 struct bpf_object *obj;
522 /* real_name is defined for special internal maps (.rodata*,
523 * .data*, .bss, .kconfig) and preserves their original ELF section
524 * name. This is important to be able to find corresponding BTF
525 * DATASEC information.
533 struct bpf_map_def def;
537 __u32 btf_key_type_id;
538 __u32 btf_value_type_id;
539 __u32 btf_vmlinux_value_type_id;
540 enum libbpf_map_type libbpf_type;
542 struct bpf_struct_ops *st_ops;
543 struct bpf_map *inner_map;
569 enum extern_type type;
586 unsigned long long addr;
588 /* target btf_id of the corresponding kernel var. */
589 int kernel_btf_obj_fd;
592 /* local btf_id of the ksym extern's type. */
594 /* BTF fd index to be patched in for insn->off, this is
595 * 0 for vmlinux BTF, index in obj->fd_array for module
620 struct elf_sec_desc {
621 enum sec_type sec_type;
633 Elf_Data *arena_data;
634 size_t shstrndx; /* section index for section name strings */
636 struct elf_sec_desc *secs;
639 __u32 btf_maps_sec_btf_id;
643 int arena_data_shndx;
649 char name[BPF_OBJ_NAME_LEN];
653 struct bpf_program *programs;
655 struct bpf_map *maps;
660 struct extern_desc *externs;
668 struct bpf_gen *gen_loader;
670 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
671 struct elf_state efile;
674 struct btf_ext *btf_ext;
676 /* Parse and load BTF vmlinux if any of the programs in the object need
679 struct btf *btf_vmlinux;
680 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
681 * override for vmlinux BTF.
683 char *btf_custom_path;
684 /* vmlinux BTF override for CO-RE relocations */
685 struct btf *btf_vmlinux_override;
686 /* Lazily initialized kernel module BTFs */
687 struct module_btf *btf_modules;
688 bool btf_modules_loaded;
689 size_t btf_module_cnt;
690 size_t btf_module_cap;
692 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
701 struct usdt_manager *usdt_man;
703 struct bpf_map *arena_map;
705 size_t arena_data_sz;
707 struct kern_feature_cache *feat_cache;
714 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
715 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
716 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
717 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
718 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
719 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
720 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
721 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
722 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
724 void bpf_program__unload(struct bpf_program *prog)
731 zfree(&prog->func_info);
732 zfree(&prog->line_info);
735 static void bpf_program__exit(struct bpf_program *prog)
740 bpf_program__unload(prog);
742 zfree(&prog->sec_name);
744 zfree(&prog->reloc_desc);
751 static bool insn_is_subprog_call(const struct bpf_insn *insn)
753 return BPF_CLASS(insn->code) == BPF_JMP &&
754 BPF_OP(insn->code) == BPF_CALL &&
755 BPF_SRC(insn->code) == BPF_K &&
756 insn->src_reg == BPF_PSEUDO_CALL &&
757 insn->dst_reg == 0 &&
761 static bool is_call_insn(const struct bpf_insn *insn)
763 return insn->code == (BPF_JMP | BPF_CALL);
766 static bool insn_is_pseudo_func(struct bpf_insn *insn)
768 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
772 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
773 const char *name, size_t sec_idx, const char *sec_name,
774 size_t sec_off, void *insn_data, size_t insn_data_sz)
776 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
777 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
778 sec_name, name, sec_off, insn_data_sz);
782 memset(prog, 0, sizeof(*prog));
785 prog->sec_idx = sec_idx;
786 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
787 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
788 /* insns_cnt can later be increased by appending used subprograms */
789 prog->insns_cnt = prog->sec_insn_cnt;
791 prog->type = BPF_PROG_TYPE_UNSPEC;
793 prog->exception_cb_idx = -1;
795 /* libbpf's convention for SEC("?abc...") is that it's just like
796 * SEC("abc...") but the corresponding bpf_program starts out with
797 * autoload set to false.
799 if (sec_name[0] == '?') {
800 prog->autoload = false;
801 /* from now on forget there was ? in section name */
804 prog->autoload = true;
807 prog->autoattach = true;
809 /* inherit object's log_level */
810 prog->log_level = obj->log_level;
812 prog->sec_name = strdup(sec_name);
816 prog->name = strdup(name);
820 prog->insns = malloc(insn_data_sz);
823 memcpy(prog->insns, insn_data, insn_data_sz);
827 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
828 bpf_program__exit(prog);
833 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
834 const char *sec_name, int sec_idx)
836 Elf_Data *symbols = obj->efile.symbols;
837 struct bpf_program *prog, *progs;
838 void *data = sec_data->d_buf;
839 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
840 int nr_progs, err, i;
844 progs = obj->programs;
845 nr_progs = obj->nr_programs;
846 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
848 for (i = 0; i < nr_syms; i++) {
849 sym = elf_sym_by_idx(obj, i);
851 if (sym->st_shndx != sec_idx)
853 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
856 prog_sz = sym->st_size;
857 sec_off = sym->st_value;
859 name = elf_sym_str(obj, sym->st_name);
861 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
863 return -LIBBPF_ERRNO__FORMAT;
866 if (sec_off + prog_sz > sec_sz) {
867 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
869 return -LIBBPF_ERRNO__FORMAT;
872 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
873 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
877 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
878 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
880 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
883 * In this case the original obj->programs
884 * is still valid, so don't need special treat for
885 * bpf_close_object().
887 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
891 obj->programs = progs;
893 prog = &progs[nr_progs];
895 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
896 sec_off, data + sec_off, prog_sz);
900 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
901 prog->sym_global = true;
903 /* if function is a global/weak symbol, but has restricted
904 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
905 * as static to enable more permissive BPF verification mode
906 * with more outside context available to BPF verifier
908 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
909 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
910 prog->mark_btf_static = true;
913 obj->nr_programs = nr_progs;
919 static const struct btf_member *
920 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
922 struct btf_member *m;
925 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
926 if (btf_member_bit_offset(t, i) == bit_offset)
933 static const struct btf_member *
934 find_member_by_name(const struct btf *btf, const struct btf_type *t,
937 struct btf_member *m;
940 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
941 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
948 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
949 __u16 kind, struct btf **res_btf,
950 struct module_btf **res_mod_btf);
952 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
953 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
954 const char *name, __u32 kind);
957 find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
958 struct module_btf **mod_btf,
959 const struct btf_type **type, __u32 *type_id,
960 const struct btf_type **vtype, __u32 *vtype_id,
961 const struct btf_member **data_member)
963 const struct btf_type *kern_type, *kern_vtype;
964 const struct btf_member *kern_data_member;
966 __s32 kern_vtype_id, kern_type_id;
970 snprintf(tname, sizeof(tname), "%.*s",
971 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
973 kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
975 if (kern_type_id < 0) {
976 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
980 kern_type = btf__type_by_id(btf, kern_type_id);
982 /* Find the corresponding "map_value" type that will be used
983 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
984 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
987 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
988 tname, BTF_KIND_STRUCT);
989 if (kern_vtype_id < 0) {
990 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
991 STRUCT_OPS_VALUE_PREFIX, tname);
992 return kern_vtype_id;
994 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
996 /* Find "struct tcp_congestion_ops" from
997 * struct bpf_struct_ops_tcp_congestion_ops {
999 * struct tcp_congestion_ops data;
1002 kern_data_member = btf_members(kern_vtype);
1003 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1004 if (kern_data_member->type == kern_type_id)
1007 if (i == btf_vlen(kern_vtype)) {
1008 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1009 tname, STRUCT_OPS_VALUE_PREFIX, tname);
1014 *type_id = kern_type_id;
1015 *vtype = kern_vtype;
1016 *vtype_id = kern_vtype_id;
1017 *data_member = kern_data_member;
1022 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1024 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1027 static bool is_valid_st_ops_program(struct bpf_object *obj,
1028 const struct bpf_program *prog)
1032 for (i = 0; i < obj->nr_programs; i++) {
1033 if (&obj->programs[i] == prog)
1034 return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1040 /* For each struct_ops program P, referenced from some struct_ops map M,
1041 * enable P.autoload if there are Ms for which M.autocreate is true,
1042 * disable P.autoload if for all Ms M.autocreate is false.
1043 * Don't change P.autoload for programs that are not referenced from any maps.
1045 static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1047 struct bpf_program *prog, *slot_prog;
1048 struct bpf_map *map;
1051 for (i = 0; i < obj->nr_programs; ++i) {
1052 int should_load = false;
1055 prog = &obj->programs[i];
1056 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1059 for (j = 0; j < obj->nr_maps; ++j) {
1060 map = &obj->maps[j];
1061 if (!bpf_map__is_struct_ops(map))
1064 vlen = btf_vlen(map->st_ops->type);
1065 for (k = 0; k < vlen; ++k) {
1066 slot_prog = map->st_ops->progs[k];
1067 if (prog != slot_prog)
1071 if (map->autocreate)
1076 prog->autoload = should_load;
1082 /* Init the map's fields that depend on kern_btf */
1083 static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
1085 const struct btf_member *member, *kern_member, *kern_data_member;
1086 const struct btf_type *type, *kern_type, *kern_vtype;
1087 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1088 struct bpf_object *obj = map->obj;
1089 const struct btf *btf = obj->btf;
1090 struct bpf_struct_ops *st_ops;
1091 const struct btf *kern_btf;
1092 struct module_btf *mod_btf;
1093 void *data, *kern_data;
1097 st_ops = map->st_ops;
1098 type = st_ops->type;
1099 tname = st_ops->tname;
1100 err = find_struct_ops_kern_types(obj, tname, &mod_btf,
1101 &kern_type, &kern_type_id,
1102 &kern_vtype, &kern_vtype_id,
1107 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1109 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1110 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1112 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
1113 map->def.value_size = kern_vtype->size;
1114 map->btf_vmlinux_value_type_id = kern_vtype_id;
1116 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1117 if (!st_ops->kern_vdata)
1120 data = st_ops->data;
1121 kern_data_off = kern_data_member->offset / 8;
1122 kern_data = st_ops->kern_vdata + kern_data_off;
1124 member = btf_members(type);
1125 for (i = 0; i < btf_vlen(type); i++, member++) {
1126 const struct btf_type *mtype, *kern_mtype;
1127 __u32 mtype_id, kern_mtype_id;
1128 void *mdata, *kern_mdata;
1129 __s64 msize, kern_msize;
1130 __u32 moff, kern_moff;
1131 __u32 kern_member_idx;
1134 mname = btf__name_by_offset(btf, member->name_off);
1135 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1137 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1142 kern_member_idx = kern_member - btf_members(kern_type);
1143 if (btf_member_bitfield_size(type, i) ||
1144 btf_member_bitfield_size(kern_type, kern_member_idx)) {
1145 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1150 moff = member->offset / 8;
1151 kern_moff = kern_member->offset / 8;
1153 mdata = data + moff;
1154 kern_mdata = kern_data + kern_moff;
1156 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1157 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1159 if (BTF_INFO_KIND(mtype->info) !=
1160 BTF_INFO_KIND(kern_mtype->info)) {
1161 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1162 map->name, mname, BTF_INFO_KIND(mtype->info),
1163 BTF_INFO_KIND(kern_mtype->info));
1167 if (btf_is_ptr(mtype)) {
1168 struct bpf_program *prog;
1170 /* Update the value from the shadow type */
1171 prog = *(void **)mdata;
1172 st_ops->progs[i] = prog;
1175 if (!is_valid_st_ops_program(obj, prog)) {
1176 pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1181 kern_mtype = skip_mods_and_typedefs(kern_btf,
1185 /* mtype->type must be a func_proto which was
1186 * guaranteed in bpf_object__collect_st_ops_relos(),
1187 * so only check kern_mtype for func_proto here.
1189 if (!btf_is_func_proto(kern_mtype)) {
1190 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1196 prog->attach_btf_obj_fd = mod_btf->fd;
1198 /* if we haven't yet processed this BPF program, record proper
1199 * attach_btf_id and member_idx
1201 if (!prog->attach_btf_id) {
1202 prog->attach_btf_id = kern_type_id;
1203 prog->expected_attach_type = kern_member_idx;
1206 /* struct_ops BPF prog can be re-used between multiple
1207 * .struct_ops & .struct_ops.link as long as it's the
1208 * same struct_ops struct definition and the same
1209 * function pointer field
1211 if (prog->attach_btf_id != kern_type_id) {
1212 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1213 map->name, mname, prog->name, prog->sec_name, prog->type,
1214 prog->attach_btf_id, kern_type_id);
1217 if (prog->expected_attach_type != kern_member_idx) {
1218 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1219 map->name, mname, prog->name, prog->sec_name, prog->type,
1220 prog->expected_attach_type, kern_member_idx);
1224 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1226 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1227 map->name, mname, prog->name, moff,
1233 msize = btf__resolve_size(btf, mtype_id);
1234 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1235 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1236 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1237 map->name, mname, (ssize_t)msize,
1238 (ssize_t)kern_msize);
1242 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1243 map->name, mname, (unsigned int)msize,
1245 memcpy(kern_mdata, mdata, msize);
1251 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1253 struct bpf_map *map;
1257 for (i = 0; i < obj->nr_maps; i++) {
1258 map = &obj->maps[i];
1260 if (!bpf_map__is_struct_ops(map))
1263 if (!map->autocreate)
1266 err = bpf_map__init_kern_struct_ops(map);
1274 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1275 int shndx, Elf_Data *data)
1277 const struct btf_type *type, *datasec;
1278 const struct btf_var_secinfo *vsi;
1279 struct bpf_struct_ops *st_ops;
1280 const char *tname, *var_name;
1281 __s32 type_id, datasec_id;
1282 const struct btf *btf;
1283 struct bpf_map *map;
1290 datasec_id = btf__find_by_name_kind(btf, sec_name,
1292 if (datasec_id < 0) {
1293 pr_warn("struct_ops init: DATASEC %s not found\n",
1298 datasec = btf__type_by_id(btf, datasec_id);
1299 vsi = btf_var_secinfos(datasec);
1300 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1301 type = btf__type_by_id(obj->btf, vsi->type);
1302 var_name = btf__name_by_offset(obj->btf, type->name_off);
1304 type_id = btf__resolve_type(obj->btf, vsi->type);
1306 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1307 vsi->type, sec_name);
1311 type = btf__type_by_id(obj->btf, type_id);
1312 tname = btf__name_by_offset(obj->btf, type->name_off);
1314 pr_warn("struct_ops init: anonymous type is not supported\n");
1317 if (!btf_is_struct(type)) {
1318 pr_warn("struct_ops init: %s is not a struct\n", tname);
1322 map = bpf_object__add_map(obj);
1324 return PTR_ERR(map);
1326 map->sec_idx = shndx;
1327 map->sec_offset = vsi->offset;
1328 map->name = strdup(var_name);
1331 map->btf_value_type_id = type_id;
1333 /* Follow same convention as for programs autoload:
1334 * SEC("?.struct_ops") means map is not created by default.
1336 if (sec_name[0] == '?') {
1337 map->autocreate = false;
1338 /* from now on forget there was ? in section name */
1342 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1343 map->def.key_size = sizeof(int);
1344 map->def.value_size = type->size;
1345 map->def.max_entries = 1;
1346 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
1348 map->st_ops = calloc(1, sizeof(*map->st_ops));
1351 st_ops = map->st_ops;
1352 st_ops->data = malloc(type->size);
1353 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1354 st_ops->kern_func_off = malloc(btf_vlen(type) *
1355 sizeof(*st_ops->kern_func_off));
1356 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1359 if (vsi->offset + type->size > data->d_size) {
1360 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1361 var_name, sec_name);
1365 memcpy(st_ops->data,
1366 data->d_buf + vsi->offset,
1368 st_ops->tname = tname;
1369 st_ops->type = type;
1370 st_ops->type_id = type_id;
1372 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1373 tname, type_id, var_name, vsi->offset);
1379 static int bpf_object_init_struct_ops(struct bpf_object *obj)
1381 const char *sec_name;
1384 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1385 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1387 if (desc->sec_type != SEC_ST_OPS)
1390 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1392 return -LIBBPF_ERRNO__FORMAT;
1394 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1402 static struct bpf_object *bpf_object__new(const char *path,
1403 const void *obj_buf,
1405 const char *obj_name)
1407 struct bpf_object *obj;
1410 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1412 pr_warn("alloc memory failed for %s\n", path);
1413 return ERR_PTR(-ENOMEM);
1416 strcpy(obj->path, path);
1418 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1420 /* Using basename() GNU version which doesn't modify arg. */
1421 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1422 end = strchr(obj->name, '.');
1429 * Caller of this function should also call
1430 * bpf_object__elf_finish() after data collection to return
1431 * obj_buf to user. If not, we should duplicate the buffer to
1432 * avoid user freeing them before elf finish.
1434 obj->efile.obj_buf = obj_buf;
1435 obj->efile.obj_buf_sz = obj_buf_sz;
1436 obj->efile.btf_maps_shndx = -1;
1437 obj->kconfig_map_idx = -1;
1439 obj->kern_version = get_kernel_version();
1440 obj->loaded = false;
1445 static void bpf_object__elf_finish(struct bpf_object *obj)
1447 if (!obj->efile.elf)
1450 elf_end(obj->efile.elf);
1451 obj->efile.elf = NULL;
1452 obj->efile.symbols = NULL;
1453 obj->efile.arena_data = NULL;
1455 zfree(&obj->efile.secs);
1456 obj->efile.sec_cnt = 0;
1457 zclose(obj->efile.fd);
1458 obj->efile.obj_buf = NULL;
1459 obj->efile.obj_buf_sz = 0;
1462 static int bpf_object__elf_init(struct bpf_object *obj)
1468 if (obj->efile.elf) {
1469 pr_warn("elf: init internal error\n");
1470 return -LIBBPF_ERRNO__LIBELF;
1473 if (obj->efile.obj_buf_sz > 0) {
1474 /* obj_buf should have been validated by bpf_object__open_mem(). */
1475 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1477 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1478 if (obj->efile.fd < 0) {
1479 char errmsg[STRERR_BUFSIZE], *cp;
1482 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1483 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1487 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1491 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1492 err = -LIBBPF_ERRNO__LIBELF;
1496 obj->efile.elf = elf;
1498 if (elf_kind(elf) != ELF_K_ELF) {
1499 err = -LIBBPF_ERRNO__FORMAT;
1500 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1504 if (gelf_getclass(elf) != ELFCLASS64) {
1505 err = -LIBBPF_ERRNO__FORMAT;
1506 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1510 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1511 if (!obj->efile.ehdr) {
1512 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1513 err = -LIBBPF_ERRNO__FORMAT;
1517 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1518 pr_warn("elf: failed to get section names section index for %s: %s\n",
1519 obj->path, elf_errmsg(-1));
1520 err = -LIBBPF_ERRNO__FORMAT;
1524 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
1525 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1526 pr_warn("elf: failed to get section names strings from %s: %s\n",
1527 obj->path, elf_errmsg(-1));
1528 err = -LIBBPF_ERRNO__FORMAT;
1532 /* Old LLVM set e_machine to EM_NONE */
1533 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1534 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1535 err = -LIBBPF_ERRNO__FORMAT;
1541 bpf_object__elf_finish(obj);
1545 static int bpf_object__check_endianness(struct bpf_object *obj)
1547 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1548 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1550 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1551 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1554 # error "Unrecognized __BYTE_ORDER__"
1556 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1557 return -LIBBPF_ERRNO__ENDIAN;
1561 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1564 pr_warn("invalid license section in %s\n", obj->path);
1565 return -LIBBPF_ERRNO__FORMAT;
1567 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1568 * go over allowed ELF data section buffer
1570 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1571 pr_debug("license of %s is %s\n", obj->path, obj->license);
1576 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1580 if (!data || size != sizeof(kver)) {
1581 pr_warn("invalid kver section in %s\n", obj->path);
1582 return -LIBBPF_ERRNO__FORMAT;
1584 memcpy(&kver, data, sizeof(kver));
1585 obj->kern_version = kver;
1586 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1590 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1592 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1593 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1598 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1606 scn = elf_sec_by_name(obj, name);
1607 data = elf_sec_data(obj, scn);
1609 *size = data->d_size;
1610 return 0; /* found it */
1616 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1618 Elf_Data *symbols = obj->efile.symbols;
1622 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1623 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1625 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1628 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1629 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1632 sname = elf_sym_str(obj, sym->st_name);
1634 pr_warn("failed to get sym name string for var %s\n", name);
1635 return ERR_PTR(-EIO);
1637 if (strcmp(name, sname) == 0)
1641 return ERR_PTR(-ENOENT);
1644 /* Some versions of Android don't provide memfd_create() in their libc
1645 * implementation, so avoid complications and just go straight to Linux
1648 static int sys_memfd_create(const char *name, unsigned flags)
1650 return syscall(__NR_memfd_create, name, flags);
1653 static int create_placeholder_fd(void)
1657 fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
1663 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1665 struct bpf_map *map;
1668 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1669 sizeof(*obj->maps), obj->nr_maps + 1);
1671 return ERR_PTR(err);
1673 map = &obj->maps[obj->nr_maps++];
1675 /* Preallocate map FD without actually creating BPF map just yet.
1676 * These map FD "placeholders" will be reused later without changing
1677 * FD value when map is actually created in the kernel.
1679 * This is useful to be able to perform BPF program relocations
1680 * without having to create BPF maps before that step. This allows us
1681 * to finalize and load BTF very late in BPF object's loading phase,
1682 * right before BPF maps have to be created and BPF programs have to
1683 * be loaded. By having these map FD placeholders we can perform all
1684 * the sanitizations, relocations, and any other adjustments before we
1685 * start creating actual BPF kernel objects (BTF, maps, progs).
1687 map->fd = create_placeholder_fd();
1689 return ERR_PTR(map->fd);
1690 map->inner_map_fd = -1;
1691 map->autocreate = true;
1696 static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
1698 const long page_sz = sysconf(_SC_PAGE_SIZE);
1701 map_sz = (size_t)roundup(value_sz, 8) * max_entries;
1702 map_sz = roundup(map_sz, page_sz);
1706 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1708 const long page_sz = sysconf(_SC_PAGE_SIZE);
1710 switch (map->def.type) {
1711 case BPF_MAP_TYPE_ARRAY:
1712 return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1713 case BPF_MAP_TYPE_ARENA:
1714 return page_sz * map->def.max_entries;
1716 return 0; /* not supported */
1720 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1727 if (old_sz == new_sz)
1730 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1731 if (mmaped == MAP_FAILED)
1734 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1735 munmap(map->mmaped, old_sz);
1736 map->mmaped = mmaped;
1740 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1742 char map_name[BPF_OBJ_NAME_LEN], *p;
1743 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1745 /* This is one of the more confusing parts of libbpf for various
1746 * reasons, some of which are historical. The original idea for naming
1747 * internal names was to include as much of BPF object name prefix as
1748 * possible, so that it can be distinguished from similar internal
1749 * maps of a different BPF object.
1750 * As an example, let's say we have bpf_object named 'my_object_name'
1751 * and internal map corresponding to '.rodata' ELF section. The final
1752 * map name advertised to user and to the kernel will be
1753 * 'my_objec.rodata', taking first 8 characters of object name and
1754 * entire 7 characters of '.rodata'.
1755 * Somewhat confusingly, if internal map ELF section name is shorter
1756 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1757 * for the suffix, even though we only have 4 actual characters, and
1758 * resulting map will be called 'my_objec.bss', not even using all 15
1759 * characters allowed by the kernel. Oh well, at least the truncated
1760 * object name is somewhat consistent in this case. But if the map
1761 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1762 * (8 chars) and thus will be left with only first 7 characters of the
1763 * object name ('my_obje'). Happy guessing, user, that the final map
1764 * name will be "my_obje.kconfig".
1765 * Now, with libbpf starting to support arbitrarily named .rodata.*
1766 * and .data.* data sections, it's possible that ELF section name is
1767 * longer than allowed 15 chars, so we now need to be careful to take
1768 * only up to 15 first characters of ELF name, taking no BPF object
1769 * name characters at all. So '.rodata.abracadabra' will result in
1770 * '.rodata.abracad' kernel and user-visible name.
1771 * We need to keep this convoluted logic intact for .data, .bss and
1772 * .rodata maps, but for new custom .data.custom and .rodata.custom
1773 * maps we use their ELF names as is, not prepending bpf_object name
1774 * in front. We still need to truncate them to 15 characters for the
1775 * kernel. Full name can be recovered for such maps by using DATASEC
1776 * BTF type associated with such map's value type, though.
1778 if (sfx_len >= BPF_OBJ_NAME_LEN)
1779 sfx_len = BPF_OBJ_NAME_LEN - 1;
1781 /* if there are two or more dots in map name, it's a custom dot map */
1782 if (strchr(real_name + 1, '.') != NULL)
1785 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1787 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1788 sfx_len, real_name);
1790 /* sanitise map name to characters allowed by kernel */
1791 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1792 if (!isalnum(*p) && *p != '_' && *p != '.')
1795 return strdup(map_name);
1799 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1801 /* Internal BPF map is mmap()'able only if at least one of corresponding
1802 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1803 * variable and it's not marked as __hidden (which turns it into, effectively,
1804 * a STATIC variable).
1806 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1808 const struct btf_type *t, *vt;
1809 struct btf_var_secinfo *vsi;
1812 if (!map->btf_value_type_id)
1815 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1816 if (!btf_is_datasec(t))
1819 vsi = btf_var_secinfos(t);
1820 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1821 vt = btf__type_by_id(obj->btf, vsi->type);
1822 if (!btf_is_var(vt))
1825 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1833 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1834 const char *real_name, int sec_idx, void *data, size_t data_sz)
1836 struct bpf_map_def *def;
1837 struct bpf_map *map;
1841 map = bpf_object__add_map(obj);
1843 return PTR_ERR(map);
1845 map->libbpf_type = type;
1846 map->sec_idx = sec_idx;
1847 map->sec_offset = 0;
1848 map->real_name = strdup(real_name);
1849 map->name = internal_map_name(obj, real_name);
1850 if (!map->real_name || !map->name) {
1851 zfree(&map->real_name);
1857 def->type = BPF_MAP_TYPE_ARRAY;
1858 def->key_size = sizeof(int);
1859 def->value_size = data_sz;
1860 def->max_entries = 1;
1861 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1862 ? BPF_F_RDONLY_PROG : 0;
1864 /* failures are fine because of maps like .rodata.str1.1 */
1865 (void) map_fill_btf_type_info(obj, map);
1867 if (map_is_mmapable(obj, map))
1868 def->map_flags |= BPF_F_MMAPABLE;
1870 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1871 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1873 mmap_sz = bpf_map_mmap_sz(map);
1874 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1875 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1876 if (map->mmaped == MAP_FAILED) {
1879 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1881 zfree(&map->real_name);
1887 memcpy(map->mmaped, data, data_sz);
1889 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1893 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1895 struct elf_sec_desc *sec_desc;
1896 const char *sec_name;
1897 int err = 0, sec_idx;
1900 * Populate obj->maps with libbpf internal maps.
1902 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1903 sec_desc = &obj->efile.secs[sec_idx];
1905 /* Skip recognized sections with size 0. */
1906 if (!sec_desc->data || sec_desc->data->d_size == 0)
1909 switch (sec_desc->sec_type) {
1911 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1912 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1914 sec_desc->data->d_buf,
1915 sec_desc->data->d_size);
1918 obj->has_rodata = true;
1919 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1920 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1922 sec_desc->data->d_buf,
1923 sec_desc->data->d_size);
1926 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1927 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1930 sec_desc->data->d_size);
1943 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1948 for (i = 0; i < obj->nr_extern; i++) {
1949 if (strcmp(obj->externs[i].name, name) == 0)
1950 return &obj->externs[i];
1955 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1958 switch (ext->kcfg.type) {
1961 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
1965 *(bool *)ext_val = value == 'y' ? true : false;
1969 *(enum libbpf_tristate *)ext_val = TRI_YES;
1970 else if (value == 'm')
1971 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1972 else /* value == 'n' */
1973 *(enum libbpf_tristate *)ext_val = TRI_NO;
1976 *(char *)ext_val = value;
1982 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
1990 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1995 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1996 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
2001 len = strlen(value);
2002 if (value[len - 1] != '"') {
2003 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
2010 if (len >= ext->kcfg.sz) {
2011 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2012 ext->name, value, len, ext->kcfg.sz - 1);
2013 len = ext->kcfg.sz - 1;
2015 memcpy(ext_val, value + 1, len);
2016 ext_val[len] = '\0';
2021 static int parse_u64(const char *value, __u64 *res)
2027 *res = strtoull(value, &value_end, 0);
2030 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
2034 pr_warn("failed to parse '%s' as integer completely\n", value);
2040 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
2042 int bit_sz = ext->kcfg.sz * 8;
2044 if (ext->kcfg.sz == 8)
2047 /* Validate that value stored in u64 fits in integer of `ext->sz`
2048 * bytes size without any loss of information. If the target integer
2049 * is signed, we rely on the following limits of integer type of
2050 * Y bits and subsequent transformation:
2052 * -2^(Y-1) <= X <= 2^(Y-1) - 1
2053 * 0 <= X + 2^(Y-1) <= 2^Y - 1
2054 * 0 <= X + 2^(Y-1) < 2^Y
2056 * For unsigned target integer, check that all the (64 - Y) bits are
2059 if (ext->kcfg.is_signed)
2060 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2062 return (v >> bit_sz) == 0;
2065 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2068 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2069 ext->kcfg.type != KCFG_BOOL) {
2070 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
2071 ext->name, (unsigned long long)value);
2074 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2075 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2076 ext->name, (unsigned long long)value);
2080 if (!is_kcfg_value_in_range(ext, value)) {
2081 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2082 ext->name, (unsigned long long)value, ext->kcfg.sz);
2085 switch (ext->kcfg.sz) {
2087 *(__u8 *)ext_val = value;
2090 *(__u16 *)ext_val = value;
2093 *(__u32 *)ext_val = value;
2096 *(__u64 *)ext_val = value;
2105 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2106 char *buf, void *data)
2108 struct extern_desc *ext;
2114 if (!str_has_pfx(buf, "CONFIG_"))
2117 sep = strchr(buf, '=');
2119 pr_warn("failed to parse '%s': no separator\n", buf);
2123 /* Trim ending '\n' */
2125 if (buf[len - 1] == '\n')
2126 buf[len - 1] = '\0';
2127 /* Split on '=' and ensure that a value is present. */
2131 pr_warn("failed to parse '%s': no value\n", buf);
2135 ext = find_extern_by_name(obj, buf);
2136 if (!ext || ext->is_set)
2139 ext_val = data + ext->kcfg.data_off;
2143 case 'y': case 'n': case 'm':
2144 err = set_kcfg_value_tri(ext, ext_val, *value);
2147 err = set_kcfg_value_str(ext, ext_val, value);
2150 /* assume integer */
2151 err = parse_u64(value, &num);
2153 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2156 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2157 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2160 err = set_kcfg_value_num(ext, ext_val, num);
2165 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2169 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2177 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2180 else if (len >= PATH_MAX)
2181 return -ENAMETOOLONG;
2183 /* gzopen also accepts uncompressed files. */
2184 file = gzopen(buf, "re");
2186 file = gzopen("/proc/config.gz", "re");
2189 pr_warn("failed to open system Kconfig\n");
2193 while (gzgets(file, buf, sizeof(buf))) {
2194 err = bpf_object__process_kconfig_line(obj, buf, data);
2196 pr_warn("error parsing system Kconfig line '%s': %d\n",
2207 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2208 const char *config, void *data)
2214 file = fmemopen((void *)config, strlen(config), "r");
2217 pr_warn("failed to open in-memory Kconfig: %d\n", err);
2221 while (fgets(buf, sizeof(buf), file)) {
2222 err = bpf_object__process_kconfig_line(obj, buf, data);
2224 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2234 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2236 struct extern_desc *last_ext = NULL, *ext;
2240 for (i = 0; i < obj->nr_extern; i++) {
2241 ext = &obj->externs[i];
2242 if (ext->type == EXT_KCFG)
2249 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2250 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2251 ".kconfig", obj->efile.symbols_shndx,
2256 obj->kconfig_map_idx = obj->nr_maps - 1;
2261 const struct btf_type *
2262 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2264 const struct btf_type *t = btf__type_by_id(btf, id);
2269 while (btf_is_mod(t) || btf_is_typedef(t)) {
2272 t = btf__type_by_id(btf, t->type);
2278 static const struct btf_type *
2279 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2281 const struct btf_type *t;
2283 t = skip_mods_and_typedefs(btf, id, NULL);
2287 t = skip_mods_and_typedefs(btf, t->type, res_id);
2289 return btf_is_func_proto(t) ? t : NULL;
2292 static const char *__btf_kind_str(__u16 kind)
2295 case BTF_KIND_UNKN: return "void";
2296 case BTF_KIND_INT: return "int";
2297 case BTF_KIND_PTR: return "ptr";
2298 case BTF_KIND_ARRAY: return "array";
2299 case BTF_KIND_STRUCT: return "struct";
2300 case BTF_KIND_UNION: return "union";
2301 case BTF_KIND_ENUM: return "enum";
2302 case BTF_KIND_FWD: return "fwd";
2303 case BTF_KIND_TYPEDEF: return "typedef";
2304 case BTF_KIND_VOLATILE: return "volatile";
2305 case BTF_KIND_CONST: return "const";
2306 case BTF_KIND_RESTRICT: return "restrict";
2307 case BTF_KIND_FUNC: return "func";
2308 case BTF_KIND_FUNC_PROTO: return "func_proto";
2309 case BTF_KIND_VAR: return "var";
2310 case BTF_KIND_DATASEC: return "datasec";
2311 case BTF_KIND_FLOAT: return "float";
2312 case BTF_KIND_DECL_TAG: return "decl_tag";
2313 case BTF_KIND_TYPE_TAG: return "type_tag";
2314 case BTF_KIND_ENUM64: return "enum64";
2315 default: return "unknown";
2319 const char *btf_kind_str(const struct btf_type *t)
2321 return __btf_kind_str(btf_kind(t));
2325 * Fetch integer attribute of BTF map definition. Such attributes are
2326 * represented using a pointer to an array, in which dimensionality of array
2327 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2328 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2329 * type definition, while using only sizeof(void *) space in ELF data section.
2331 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2332 const struct btf_member *m, __u32 *res)
2334 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2335 const char *name = btf__name_by_offset(btf, m->name_off);
2336 const struct btf_array *arr_info;
2337 const struct btf_type *arr_t;
2339 if (!btf_is_ptr(t)) {
2340 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2341 map_name, name, btf_kind_str(t));
2345 arr_t = btf__type_by_id(btf, t->type);
2347 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2348 map_name, name, t->type);
2351 if (!btf_is_array(arr_t)) {
2352 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2353 map_name, name, btf_kind_str(arr_t));
2356 arr_info = btf_array(arr_t);
2357 *res = arr_info->nelems;
2361 static bool get_map_field_long(const char *map_name, const struct btf *btf,
2362 const struct btf_member *m, __u64 *res)
2364 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2365 const char *name = btf__name_by_offset(btf, m->name_off);
2367 if (btf_is_ptr(t)) {
2371 ret = get_map_field_int(map_name, btf, m, &res32);
2373 *res = (__u64)res32;
2377 if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2378 pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2379 map_name, name, btf_kind_str(t));
2383 if (btf_vlen(t) != 1) {
2384 pr_warn("map '%s': attr '%s': invalid __ulong\n",
2389 if (btf_is_enum(t)) {
2390 const struct btf_enum *e = btf_enum(t);
2394 const struct btf_enum64 *e = btf_enum64(t);
2396 *res = btf_enum64_value(e);
2401 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2405 len = snprintf(buf, buf_sz, "%s/%s", path, name);
2409 return -ENAMETOOLONG;
2414 static int build_map_pin_path(struct bpf_map *map, const char *path)
2420 path = BPF_FS_DEFAULT_PATH;
2422 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2426 return bpf_map__set_pin_path(map, buf);
2429 /* should match definition in bpf_helpers.h */
2430 enum libbpf_pin_type {
2432 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2436 int parse_btf_map_def(const char *map_name, struct btf *btf,
2437 const struct btf_type *def_t, bool strict,
2438 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2440 const struct btf_type *t;
2441 const struct btf_member *m;
2442 bool is_inner = inner_def == NULL;
2445 vlen = btf_vlen(def_t);
2446 m = btf_members(def_t);
2447 for (i = 0; i < vlen; i++, m++) {
2448 const char *name = btf__name_by_offset(btf, m->name_off);
2451 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2454 if (strcmp(name, "type") == 0) {
2455 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2457 map_def->parts |= MAP_DEF_MAP_TYPE;
2458 } else if (strcmp(name, "max_entries") == 0) {
2459 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2461 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2462 } else if (strcmp(name, "map_flags") == 0) {
2463 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2465 map_def->parts |= MAP_DEF_MAP_FLAGS;
2466 } else if (strcmp(name, "numa_node") == 0) {
2467 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2469 map_def->parts |= MAP_DEF_NUMA_NODE;
2470 } else if (strcmp(name, "key_size") == 0) {
2473 if (!get_map_field_int(map_name, btf, m, &sz))
2475 if (map_def->key_size && map_def->key_size != sz) {
2476 pr_warn("map '%s': conflicting key size %u != %u.\n",
2477 map_name, map_def->key_size, sz);
2480 map_def->key_size = sz;
2481 map_def->parts |= MAP_DEF_KEY_SIZE;
2482 } else if (strcmp(name, "key") == 0) {
2485 t = btf__type_by_id(btf, m->type);
2487 pr_warn("map '%s': key type [%d] not found.\n",
2491 if (!btf_is_ptr(t)) {
2492 pr_warn("map '%s': key spec is not PTR: %s.\n",
2493 map_name, btf_kind_str(t));
2496 sz = btf__resolve_size(btf, t->type);
2498 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2499 map_name, t->type, (ssize_t)sz);
2502 if (map_def->key_size && map_def->key_size != sz) {
2503 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2504 map_name, map_def->key_size, (ssize_t)sz);
2507 map_def->key_size = sz;
2508 map_def->key_type_id = t->type;
2509 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2510 } else if (strcmp(name, "value_size") == 0) {
2513 if (!get_map_field_int(map_name, btf, m, &sz))
2515 if (map_def->value_size && map_def->value_size != sz) {
2516 pr_warn("map '%s': conflicting value size %u != %u.\n",
2517 map_name, map_def->value_size, sz);
2520 map_def->value_size = sz;
2521 map_def->parts |= MAP_DEF_VALUE_SIZE;
2522 } else if (strcmp(name, "value") == 0) {
2525 t = btf__type_by_id(btf, m->type);
2527 pr_warn("map '%s': value type [%d] not found.\n",
2531 if (!btf_is_ptr(t)) {
2532 pr_warn("map '%s': value spec is not PTR: %s.\n",
2533 map_name, btf_kind_str(t));
2536 sz = btf__resolve_size(btf, t->type);
2538 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2539 map_name, t->type, (ssize_t)sz);
2542 if (map_def->value_size && map_def->value_size != sz) {
2543 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2544 map_name, map_def->value_size, (ssize_t)sz);
2547 map_def->value_size = sz;
2548 map_def->value_type_id = t->type;
2549 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2551 else if (strcmp(name, "values") == 0) {
2552 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2553 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2554 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2555 char inner_map_name[128];
2559 pr_warn("map '%s': multi-level inner maps not supported.\n",
2563 if (i != vlen - 1) {
2564 pr_warn("map '%s': '%s' member should be last.\n",
2568 if (!is_map_in_map && !is_prog_array) {
2569 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2573 if (map_def->value_size && map_def->value_size != 4) {
2574 pr_warn("map '%s': conflicting value size %u != 4.\n",
2575 map_name, map_def->value_size);
2578 map_def->value_size = 4;
2579 t = btf__type_by_id(btf, m->type);
2581 pr_warn("map '%s': %s type [%d] not found.\n",
2582 map_name, desc, m->type);
2585 if (!btf_is_array(t) || btf_array(t)->nelems) {
2586 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2590 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2591 if (!btf_is_ptr(t)) {
2592 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2593 map_name, desc, btf_kind_str(t));
2596 t = skip_mods_and_typedefs(btf, t->type, NULL);
2597 if (is_prog_array) {
2598 if (!btf_is_func_proto(t)) {
2599 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2600 map_name, btf_kind_str(t));
2605 if (!btf_is_struct(t)) {
2606 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2607 map_name, btf_kind_str(t));
2611 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2612 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2616 map_def->parts |= MAP_DEF_INNER_MAP;
2617 } else if (strcmp(name, "pinning") == 0) {
2621 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2624 if (!get_map_field_int(map_name, btf, m, &val))
2626 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2627 pr_warn("map '%s': invalid pinning value %u.\n",
2631 map_def->pinning = val;
2632 map_def->parts |= MAP_DEF_PINNING;
2633 } else if (strcmp(name, "map_extra") == 0) {
2636 if (!get_map_field_long(map_name, btf, m, &map_extra))
2638 map_def->map_extra = map_extra;
2639 map_def->parts |= MAP_DEF_MAP_EXTRA;
2642 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2645 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2649 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2650 pr_warn("map '%s': map type isn't specified.\n", map_name);
2657 static size_t adjust_ringbuf_sz(size_t sz)
2659 __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2662 /* if user forgot to set any size, make sure they see error */
2665 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2666 * a power-of-2 multiple of kernel's page size. If user diligently
2667 * satisified these conditions, pass the size through.
2669 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2672 /* Otherwise find closest (page_sz * power_of_2) product bigger than
2673 * user-set size to satisfy both user size request and kernel
2674 * requirements and substitute correct max_entries for map creation.
2676 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2677 if (mul * page_sz > sz)
2678 return mul * page_sz;
2681 /* if it's impossible to satisfy the conditions (i.e., user size is
2682 * very close to UINT_MAX but is not a power-of-2 multiple of
2683 * page_size) then just return original size and let kernel reject it
2688 static bool map_is_ringbuf(const struct bpf_map *map)
2690 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2691 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2694 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2696 map->def.type = def->map_type;
2697 map->def.key_size = def->key_size;
2698 map->def.value_size = def->value_size;
2699 map->def.max_entries = def->max_entries;
2700 map->def.map_flags = def->map_flags;
2701 map->map_extra = def->map_extra;
2703 map->numa_node = def->numa_node;
2704 map->btf_key_type_id = def->key_type_id;
2705 map->btf_value_type_id = def->value_type_id;
2707 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2708 if (map_is_ringbuf(map))
2709 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2711 if (def->parts & MAP_DEF_MAP_TYPE)
2712 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2714 if (def->parts & MAP_DEF_KEY_TYPE)
2715 pr_debug("map '%s': found key [%u], sz = %u.\n",
2716 map->name, def->key_type_id, def->key_size);
2717 else if (def->parts & MAP_DEF_KEY_SIZE)
2718 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2720 if (def->parts & MAP_DEF_VALUE_TYPE)
2721 pr_debug("map '%s': found value [%u], sz = %u.\n",
2722 map->name, def->value_type_id, def->value_size);
2723 else if (def->parts & MAP_DEF_VALUE_SIZE)
2724 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2726 if (def->parts & MAP_DEF_MAX_ENTRIES)
2727 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2728 if (def->parts & MAP_DEF_MAP_FLAGS)
2729 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2730 if (def->parts & MAP_DEF_MAP_EXTRA)
2731 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2732 (unsigned long long)def->map_extra);
2733 if (def->parts & MAP_DEF_PINNING)
2734 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2735 if (def->parts & MAP_DEF_NUMA_NODE)
2736 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2738 if (def->parts & MAP_DEF_INNER_MAP)
2739 pr_debug("map '%s': found inner map definition.\n", map->name);
2742 static const char *btf_var_linkage_str(__u32 linkage)
2745 case BTF_VAR_STATIC: return "static";
2746 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2747 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2748 default: return "unknown";
2752 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2753 const struct btf_type *sec,
2754 int var_idx, int sec_idx,
2755 const Elf_Data *data, bool strict,
2756 const char *pin_root_path)
2758 struct btf_map_def map_def = {}, inner_def = {};
2759 const struct btf_type *var, *def;
2760 const struct btf_var_secinfo *vi;
2761 const struct btf_var *var_extra;
2762 const char *map_name;
2763 struct bpf_map *map;
2766 vi = btf_var_secinfos(sec) + var_idx;
2767 var = btf__type_by_id(obj->btf, vi->type);
2768 var_extra = btf_var(var);
2769 map_name = btf__name_by_offset(obj->btf, var->name_off);
2771 if (map_name == NULL || map_name[0] == '\0') {
2772 pr_warn("map #%d: empty name.\n", var_idx);
2775 if ((__u64)vi->offset + vi->size > data->d_size) {
2776 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2779 if (!btf_is_var(var)) {
2780 pr_warn("map '%s': unexpected var kind %s.\n",
2781 map_name, btf_kind_str(var));
2784 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2785 pr_warn("map '%s': unsupported map linkage %s.\n",
2786 map_name, btf_var_linkage_str(var_extra->linkage));
2790 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2791 if (!btf_is_struct(def)) {
2792 pr_warn("map '%s': unexpected def kind %s.\n",
2793 map_name, btf_kind_str(var));
2796 if (def->size > vi->size) {
2797 pr_warn("map '%s': invalid def size.\n", map_name);
2801 map = bpf_object__add_map(obj);
2803 return PTR_ERR(map);
2804 map->name = strdup(map_name);
2806 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2809 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2810 map->def.type = BPF_MAP_TYPE_UNSPEC;
2811 map->sec_idx = sec_idx;
2812 map->sec_offset = vi->offset;
2813 map->btf_var_idx = var_idx;
2814 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2815 map_name, map->sec_idx, map->sec_offset);
2817 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2821 fill_map_from_def(map, &map_def);
2823 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2824 err = build_map_pin_path(map, pin_root_path);
2826 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2831 if (map_def.parts & MAP_DEF_INNER_MAP) {
2832 map->inner_map = calloc(1, sizeof(*map->inner_map));
2833 if (!map->inner_map)
2835 map->inner_map->fd = create_placeholder_fd();
2836 if (map->inner_map->fd < 0)
2837 return map->inner_map->fd;
2838 map->inner_map->sec_idx = sec_idx;
2839 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2840 if (!map->inner_map->name)
2842 sprintf(map->inner_map->name, "%s.inner", map_name);
2844 fill_map_from_def(map->inner_map, &inner_def);
2847 err = map_fill_btf_type_info(obj, map);
2854 static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
2855 const char *sec_name, int sec_idx,
2856 void *data, size_t data_sz)
2858 const long page_sz = sysconf(_SC_PAGE_SIZE);
2861 mmap_sz = bpf_map_mmap_sz(obj->arena_map);
2862 if (roundup(data_sz, page_sz) > mmap_sz) {
2863 pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
2864 sec_name, mmap_sz, data_sz);
2868 obj->arena_data = malloc(data_sz);
2869 if (!obj->arena_data)
2871 memcpy(obj->arena_data, data, data_sz);
2872 obj->arena_data_sz = data_sz;
2874 /* make bpf_map__init_value() work for ARENA maps */
2875 map->mmaped = obj->arena_data;
2880 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2881 const char *pin_root_path)
2883 const struct btf_type *sec = NULL;
2884 int nr_types, i, vlen, err;
2885 const struct btf_type *t;
2890 if (obj->efile.btf_maps_shndx < 0)
2893 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2894 data = elf_sec_data(obj, scn);
2895 if (!scn || !data) {
2896 pr_warn("elf: failed to get %s map definitions for %s\n",
2897 MAPS_ELF_SEC, obj->path);
2901 nr_types = btf__type_cnt(obj->btf);
2902 for (i = 1; i < nr_types; i++) {
2903 t = btf__type_by_id(obj->btf, i);
2904 if (!btf_is_datasec(t))
2906 name = btf__name_by_offset(obj->btf, t->name_off);
2907 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2909 obj->efile.btf_maps_sec_btf_id = i;
2915 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2919 vlen = btf_vlen(sec);
2920 for (i = 0; i < vlen; i++) {
2921 err = bpf_object__init_user_btf_map(obj, sec, i,
2922 obj->efile.btf_maps_shndx,
2929 for (i = 0; i < obj->nr_maps; i++) {
2930 struct bpf_map *map = &obj->maps[i];
2932 if (map->def.type != BPF_MAP_TYPE_ARENA)
2935 if (obj->arena_map) {
2936 pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
2937 map->name, obj->arena_map->name);
2940 obj->arena_map = map;
2942 if (obj->efile.arena_data) {
2943 err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
2944 obj->efile.arena_data->d_buf,
2945 obj->efile.arena_data->d_size);
2950 if (obj->efile.arena_data && !obj->arena_map) {
2951 pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
2959 static int bpf_object__init_maps(struct bpf_object *obj,
2960 const struct bpf_object_open_opts *opts)
2962 const char *pin_root_path;
2966 strict = !OPTS_GET(opts, relaxed_maps, false);
2967 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2969 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2970 err = err ?: bpf_object__init_global_data_maps(obj);
2971 err = err ?: bpf_object__init_kconfig_map(obj);
2972 err = err ?: bpf_object_init_struct_ops(obj);
2977 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2981 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2985 return sh->sh_flags & SHF_EXECINSTR;
2988 static bool starts_with_qmark(const char *s)
2990 return s && s[0] == '?';
2993 static bool btf_needs_sanitization(struct bpf_object *obj)
2995 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2996 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2997 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2998 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2999 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3000 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3001 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3002 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3004 return !has_func || !has_datasec || !has_func_global || !has_float ||
3005 !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
3008 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
3010 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3011 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3012 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3013 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
3014 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
3015 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
3016 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
3017 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
3018 int enum64_placeholder_id = 0;
3022 for (i = 1; i < btf__type_cnt(btf); i++) {
3023 t = (struct btf_type *)btf__type_by_id(btf, i);
3025 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
3026 /* replace VAR/DECL_TAG with INT */
3027 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
3029 * using size = 1 is the safest choice, 4 will be too
3030 * big and cause kernel BTF validation failure if
3031 * original variable took less than 4 bytes
3034 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
3035 } else if (!has_datasec && btf_is_datasec(t)) {
3036 /* replace DATASEC with STRUCT */
3037 const struct btf_var_secinfo *v = btf_var_secinfos(t);
3038 struct btf_member *m = btf_members(t);
3039 struct btf_type *vt;
3042 name = (char *)btf__name_by_offset(btf, t->name_off);
3044 if (*name == '.' || *name == '?')
3050 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3051 for (j = 0; j < vlen; j++, v++, m++) {
3052 /* order of field assignments is important */
3053 m->offset = v->offset * 8;
3055 /* preserve variable name as member name */
3056 vt = (void *)btf__type_by_id(btf, v->type);
3057 m->name_off = vt->name_off;
3059 } else if (!has_qmark_datasec && btf_is_datasec(t) &&
3060 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3061 /* replace '?' prefix with '_' for DATASEC names */
3064 name = (char *)btf__name_by_offset(btf, t->name_off);
3067 } else if (!has_func && btf_is_func_proto(t)) {
3068 /* replace FUNC_PROTO with ENUM */
3070 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3071 t->size = sizeof(__u32); /* kernel enforced */
3072 } else if (!has_func && btf_is_func(t)) {
3073 /* replace FUNC with TYPEDEF */
3074 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
3075 } else if (!has_func_global && btf_is_func(t)) {
3076 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3077 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
3078 } else if (!has_float && btf_is_float(t)) {
3079 /* replace FLOAT with an equally-sized empty STRUCT;
3080 * since C compilers do not accept e.g. "float" as a
3081 * valid struct name, make it anonymous
3084 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
3085 } else if (!has_type_tag && btf_is_type_tag(t)) {
3086 /* replace TYPE_TAG with a CONST */
3088 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
3089 } else if (!has_enum64 && btf_is_enum(t)) {
3090 /* clear the kflag */
3091 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3092 } else if (!has_enum64 && btf_is_enum64(t)) {
3093 /* replace ENUM64 with a union */
3094 struct btf_member *m;
3096 if (enum64_placeholder_id == 0) {
3097 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3098 if (enum64_placeholder_id < 0)
3099 return enum64_placeholder_id;
3101 t = (struct btf_type *)btf__type_by_id(btf, i);
3106 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3107 for (j = 0; j < vlen; j++, m++) {
3108 m->type = enum64_placeholder_id;
3117 static bool libbpf_needs_btf(const struct bpf_object *obj)
3119 return obj->efile.btf_maps_shndx >= 0 ||
3120 obj->efile.has_st_ops ||
3124 static bool kernel_needs_btf(const struct bpf_object *obj)
3126 return obj->efile.has_st_ops;
3129 static int bpf_object__init_btf(struct bpf_object *obj,
3131 Elf_Data *btf_ext_data)
3136 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
3137 err = libbpf_get_error(obj->btf);
3140 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
3143 /* enforce 8-byte pointers for BPF-targeted BTFs */
3144 btf__set_pointer_size(obj->btf, 8);
3147 struct btf_ext_info *ext_segs[3];
3148 int seg_num, sec_num;
3151 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3152 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3155 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3156 err = libbpf_get_error(obj->btf_ext);
3158 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
3159 BTF_EXT_ELF_SEC, err);
3160 obj->btf_ext = NULL;
3164 /* setup .BTF.ext to ELF section mapping */
3165 ext_segs[0] = &obj->btf_ext->func_info;
3166 ext_segs[1] = &obj->btf_ext->line_info;
3167 ext_segs[2] = &obj->btf_ext->core_relo_info;
3168 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3169 struct btf_ext_info *seg = ext_segs[seg_num];
3170 const struct btf_ext_info_sec *sec;
3171 const char *sec_name;
3174 if (seg->sec_cnt == 0)
3177 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3178 if (!seg->sec_idxs) {
3184 for_each_btf_ext_sec(seg, sec) {
3185 /* preventively increment index to avoid doing
3186 * this before every continue below
3190 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3191 if (str_is_empty(sec_name))
3193 scn = elf_sec_by_name(obj, sec_name);
3197 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3202 if (err && libbpf_needs_btf(obj)) {
3203 pr_warn("BTF is required, but is missing or corrupted.\n");
3209 static int compare_vsi_off(const void *_a, const void *_b)
3211 const struct btf_var_secinfo *a = _a;
3212 const struct btf_var_secinfo *b = _b;
3214 return a->offset - b->offset;
3217 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3220 __u32 size = 0, i, vars = btf_vlen(t);
3221 const char *sec_name = btf__name_by_offset(btf, t->name_off);
3222 struct btf_var_secinfo *vsi;
3223 bool fixup_offsets = false;
3227 pr_debug("No name found in string section for DATASEC kind.\n");
3231 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3232 * variable offsets set at the previous step. Further, not every
3233 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3234 * all fixups altogether for such sections and go straight to sorting
3235 * VARs within their DATASEC.
3237 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
3240 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3241 * fix this up. But BPF static linker already fixes this up and fills
3242 * all the sizes and offsets during static linking. So this step has
3243 * to be optional. But the STV_HIDDEN handling is non-optional for any
3244 * non-extern DATASEC, so the variable fixup loop below handles both
3245 * functions at the same time, paying the cost of BTF VAR <-> ELF
3246 * symbol matching just once.
3249 err = find_elf_sec_sz(obj, sec_name, &size);
3251 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3252 sec_name, size, err);
3257 fixup_offsets = true;
3260 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
3261 const struct btf_type *t_var;
3262 struct btf_var *var;
3263 const char *var_name;
3266 t_var = btf__type_by_id(btf, vsi->type);
3267 if (!t_var || !btf_is_var(t_var)) {
3268 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3272 var = btf_var(t_var);
3273 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3276 var_name = btf__name_by_offset(btf, t_var->name_off);
3278 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3283 sym = find_elf_var_sym(obj, var_name);
3285 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3286 sec_name, var_name);
3291 vsi->offset = sym->st_value;
3293 /* if variable is a global/weak symbol, but has restricted
3294 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3295 * as static. This follows similar logic for functions (BPF
3296 * subprogs) and influences libbpf's further decisions about
3297 * whether to make global data BPF array maps as
3300 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3301 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3302 var->linkage = BTF_VAR_STATIC;
3306 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3310 static int bpf_object_fixup_btf(struct bpf_object *obj)
3317 n = btf__type_cnt(obj->btf);
3318 for (i = 1; i < n; i++) {
3319 struct btf_type *t = btf_type_by_id(obj->btf, i);
3321 /* Loader needs to fix up some of the things compiler
3322 * couldn't get its hands on while emitting BTF. This
3323 * is section size and global variable offset. We use
3324 * the info from the ELF itself for this purpose.
3326 if (btf_is_datasec(t)) {
3327 err = btf_fixup_datasec(obj, obj->btf, t);
3336 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
3338 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3339 prog->type == BPF_PROG_TYPE_LSM)
3342 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3343 * also need vmlinux BTF
3345 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3351 static bool map_needs_vmlinux_btf(struct bpf_map *map)
3353 return bpf_map__is_struct_ops(map);
3356 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3358 struct bpf_program *prog;
3359 struct bpf_map *map;
3362 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3365 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3368 /* Support for typed ksyms needs kernel BTF */
3369 for (i = 0; i < obj->nr_extern; i++) {
3370 const struct extern_desc *ext;
3372 ext = &obj->externs[i];
3373 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3377 bpf_object__for_each_program(prog, obj) {
3378 if (!prog->autoload)
3380 if (prog_needs_vmlinux_btf(prog))
3384 bpf_object__for_each_map(map, obj) {
3385 if (map_needs_vmlinux_btf(map))
3392 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3396 /* btf_vmlinux could be loaded earlier */
3397 if (obj->btf_vmlinux || obj->gen_loader)
3400 if (!force && !obj_needs_vmlinux_btf(obj))
3403 obj->btf_vmlinux = btf__load_vmlinux_btf();
3404 err = libbpf_get_error(obj->btf_vmlinux);
3406 pr_warn("Error loading vmlinux BTF: %d\n", err);
3407 obj->btf_vmlinux = NULL;
3413 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3415 struct btf *kern_btf = obj->btf;
3416 bool btf_mandatory, sanitize;
3422 if (!kernel_supports(obj, FEAT_BTF)) {
3423 if (kernel_needs_btf(obj)) {
3427 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3431 /* Even though some subprogs are global/weak, user might prefer more
3432 * permissive BPF verification process that BPF verifier performs for
3433 * static functions, taking into account more context from the caller
3434 * functions. In such case, they need to mark such subprogs with
3435 * __attribute__((visibility("hidden"))) and libbpf will adjust
3436 * corresponding FUNC BTF type to be marked as static and trigger more
3437 * involved BPF verification process.
3439 for (i = 0; i < obj->nr_programs; i++) {
3440 struct bpf_program *prog = &obj->programs[i];
3445 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3448 n = btf__type_cnt(obj->btf);
3449 for (j = 1; j < n; j++) {
3450 t = btf_type_by_id(obj->btf, j);
3451 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3454 name = btf__str_by_offset(obj->btf, t->name_off);
3455 if (strcmp(name, prog->name) != 0)
3458 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3463 sanitize = btf_needs_sanitization(obj);
3465 const void *raw_data;
3468 /* clone BTF to sanitize a copy and leave the original intact */
3469 raw_data = btf__raw_data(obj->btf, &sz);
3470 kern_btf = btf__new(raw_data, sz);
3471 err = libbpf_get_error(kern_btf);
3475 /* enforce 8-byte pointers for BPF-targeted BTFs */
3476 btf__set_pointer_size(obj->btf, 8);
3477 err = bpf_object__sanitize_btf(obj, kern_btf);
3482 if (obj->gen_loader) {
3484 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3488 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3489 /* Pretend to have valid FD to pass various fd >= 0 checks.
3490 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3492 btf__set_fd(kern_btf, 0);
3494 /* currently BPF_BTF_LOAD only supports log_level 1 */
3495 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3496 obj->log_level ? 1 : 0, obj->token_fd);
3500 /* move fd to libbpf's BTF */
3501 btf__set_fd(obj->btf, btf__fd(kern_btf));
3502 btf__set_fd(kern_btf, -1);
3504 btf__free(kern_btf);
3508 btf_mandatory = kernel_needs_btf(obj);
3509 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3510 btf_mandatory ? "BTF is mandatory, can't proceed."
3511 : "BTF is optional, ignoring.");
3518 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3522 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3524 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3525 off, obj->path, elf_errmsg(-1));
3532 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3536 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3538 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3539 off, obj->path, elf_errmsg(-1));
3546 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3550 scn = elf_getscn(obj->efile.elf, idx);
3552 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3553 idx, obj->path, elf_errmsg(-1));
3559 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3561 Elf_Scn *scn = NULL;
3562 Elf *elf = obj->efile.elf;
3563 const char *sec_name;
3565 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3566 sec_name = elf_sec_name(obj, scn);
3570 if (strcmp(sec_name, name) != 0)
3578 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3585 shdr = elf64_getshdr(scn);
3587 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3588 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3595 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3603 sh = elf_sec_hdr(obj, scn);
3607 name = elf_sec_str(obj, sh->sh_name);
3609 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3610 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3617 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3624 data = elf_getdata(scn, 0);
3626 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3627 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3628 obj->path, elf_errmsg(-1));
3635 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3637 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3640 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3643 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3645 if (idx >= data->d_size / sizeof(Elf64_Rel))
3648 return (Elf64_Rel *)data->d_buf + idx;
3651 static bool is_sec_name_dwarf(const char *name)
3653 /* approximation, but the actual list is too long */
3654 return str_has_pfx(name, ".debug_");
3657 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3659 /* no special handling of .strtab */
3660 if (hdr->sh_type == SHT_STRTAB)
3663 /* ignore .llvm_addrsig section as well */
3664 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3667 /* no subprograms will lead to an empty .text section, ignore it */
3668 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3669 strcmp(name, ".text") == 0)
3672 /* DWARF sections */
3673 if (is_sec_name_dwarf(name))
3676 if (str_has_pfx(name, ".rel")) {
3677 name += sizeof(".rel") - 1;
3678 /* DWARF section relocations */
3679 if (is_sec_name_dwarf(name))
3682 /* .BTF and .BTF.ext don't need relocations */
3683 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3684 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3691 static int cmp_progs(const void *_a, const void *_b)
3693 const struct bpf_program *a = _a;
3694 const struct bpf_program *b = _b;
3696 if (a->sec_idx != b->sec_idx)
3697 return a->sec_idx < b->sec_idx ? -1 : 1;
3699 /* sec_insn_off can't be the same within the section */
3700 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3703 static int bpf_object__elf_collect(struct bpf_object *obj)
3705 struct elf_sec_desc *sec_desc;
3706 Elf *elf = obj->efile.elf;
3707 Elf_Data *btf_ext_data = NULL;
3708 Elf_Data *btf_data = NULL;
3709 int idx = 0, err = 0;
3715 /* ELF section indices are 0-based, but sec #0 is special "invalid"
3716 * section. Since section count retrieved by elf_getshdrnum() does
3717 * include sec #0, it is already the necessary size of an array to keep
3720 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3721 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3722 obj->path, elf_errmsg(-1));
3723 return -LIBBPF_ERRNO__FORMAT;
3725 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3726 if (!obj->efile.secs)
3729 /* a bunch of ELF parsing functionality depends on processing symbols,
3730 * so do the first pass and find the symbol table
3733 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3734 sh = elf_sec_hdr(obj, scn);
3736 return -LIBBPF_ERRNO__FORMAT;
3738 if (sh->sh_type == SHT_SYMTAB) {
3739 if (obj->efile.symbols) {
3740 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3741 return -LIBBPF_ERRNO__FORMAT;
3744 data = elf_sec_data(obj, scn);
3746 return -LIBBPF_ERRNO__FORMAT;
3748 idx = elf_ndxscn(scn);
3750 obj->efile.symbols = data;
3751 obj->efile.symbols_shndx = idx;
3752 obj->efile.strtabidx = sh->sh_link;
3756 if (!obj->efile.symbols) {
3757 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3763 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3764 idx = elf_ndxscn(scn);
3765 sec_desc = &obj->efile.secs[idx];
3767 sh = elf_sec_hdr(obj, scn);
3769 return -LIBBPF_ERRNO__FORMAT;
3771 name = elf_sec_str(obj, sh->sh_name);
3773 return -LIBBPF_ERRNO__FORMAT;
3775 if (ignore_elf_section(sh, name))
3778 data = elf_sec_data(obj, scn);
3780 return -LIBBPF_ERRNO__FORMAT;
3782 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3783 idx, name, (unsigned long)data->d_size,
3784 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3787 if (strcmp(name, "license") == 0) {
3788 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3791 } else if (strcmp(name, "version") == 0) {
3792 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3795 } else if (strcmp(name, "maps") == 0) {
3796 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3798 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3799 obj->efile.btf_maps_shndx = idx;
3800 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3801 if (sh->sh_type != SHT_PROGBITS)
3802 return -LIBBPF_ERRNO__FORMAT;
3804 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3805 if (sh->sh_type != SHT_PROGBITS)
3806 return -LIBBPF_ERRNO__FORMAT;
3807 btf_ext_data = data;
3808 } else if (sh->sh_type == SHT_SYMTAB) {
3809 /* already processed during the first pass above */
3810 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3811 if (sh->sh_flags & SHF_EXECINSTR) {
3812 if (strcmp(name, ".text") == 0)
3813 obj->efile.text_shndx = idx;
3814 err = bpf_object__add_programs(obj, data, name, idx);
3817 } else if (strcmp(name, DATA_SEC) == 0 ||
3818 str_has_pfx(name, DATA_SEC ".")) {
3819 sec_desc->sec_type = SEC_DATA;
3820 sec_desc->shdr = sh;
3821 sec_desc->data = data;
3822 } else if (strcmp(name, RODATA_SEC) == 0 ||
3823 str_has_pfx(name, RODATA_SEC ".")) {
3824 sec_desc->sec_type = SEC_RODATA;
3825 sec_desc->shdr = sh;
3826 sec_desc->data = data;
3827 } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
3828 strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3829 strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3830 strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
3831 sec_desc->sec_type = SEC_ST_OPS;
3832 sec_desc->shdr = sh;
3833 sec_desc->data = data;
3834 obj->efile.has_st_ops = true;
3835 } else if (strcmp(name, ARENA_SEC) == 0) {
3836 obj->efile.arena_data = data;
3837 obj->efile.arena_data_shndx = idx;
3839 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3842 } else if (sh->sh_type == SHT_REL) {
3843 int targ_sec_idx = sh->sh_info; /* points to other section */
3845 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3846 targ_sec_idx >= obj->efile.sec_cnt)
3847 return -LIBBPF_ERRNO__FORMAT;
3849 /* Only do relo for section with exec instructions */
3850 if (!section_have_execinstr(obj, targ_sec_idx) &&
3851 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3852 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
3853 strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
3854 strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
3855 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3856 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3857 idx, name, targ_sec_idx,
3858 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3862 sec_desc->sec_type = SEC_RELO;
3863 sec_desc->shdr = sh;
3864 sec_desc->data = data;
3865 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3866 str_has_pfx(name, BSS_SEC "."))) {
3867 sec_desc->sec_type = SEC_BSS;
3868 sec_desc->shdr = sh;
3869 sec_desc->data = data;
3871 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3872 (size_t)sh->sh_size);
3876 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3877 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3878 return -LIBBPF_ERRNO__FORMAT;
3881 /* sort BPF programs by section name and in-section instruction offset
3884 if (obj->nr_programs)
3885 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3887 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3890 static bool sym_is_extern(const Elf64_Sym *sym)
3892 int bind = ELF64_ST_BIND(sym->st_info);
3893 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3894 return sym->st_shndx == SHN_UNDEF &&
3895 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3896 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3899 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3901 int bind = ELF64_ST_BIND(sym->st_info);
3902 int type = ELF64_ST_TYPE(sym->st_info);
3904 /* in .text section */
3905 if (sym->st_shndx != text_shndx)
3908 /* local function */
3909 if (bind == STB_LOCAL && type == STT_SECTION)
3912 /* global function */
3913 return bind == STB_GLOBAL && type == STT_FUNC;
3916 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3918 const struct btf_type *t;
3925 n = btf__type_cnt(btf);
3926 for (i = 1; i < n; i++) {
3927 t = btf__type_by_id(btf, i);
3929 if (!btf_is_var(t) && !btf_is_func(t))
3932 tname = btf__name_by_offset(btf, t->name_off);
3933 if (strcmp(tname, ext_name))
3936 if (btf_is_var(t) &&
3937 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3940 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3949 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3950 const struct btf_var_secinfo *vs;
3951 const struct btf_type *t;
3957 n = btf__type_cnt(btf);
3958 for (i = 1; i < n; i++) {
3959 t = btf__type_by_id(btf, i);
3961 if (!btf_is_datasec(t))
3964 vs = btf_var_secinfos(t);
3965 for (j = 0; j < btf_vlen(t); j++, vs++) {
3966 if (vs->type == ext_btf_id)
3974 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3977 const struct btf_type *t;
3980 t = skip_mods_and_typedefs(btf, id, NULL);
3981 name = btf__name_by_offset(btf, t->name_off);
3985 switch (btf_kind(t)) {
3986 case BTF_KIND_INT: {
3987 int enc = btf_int_encoding(t);
3989 if (enc & BTF_INT_BOOL)
3990 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3992 *is_signed = enc & BTF_INT_SIGNED;
3995 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3996 return KCFG_UNKNOWN;
4001 return KCFG_UNKNOWN;
4002 if (strcmp(name, "libbpf_tristate"))
4003 return KCFG_UNKNOWN;
4004 return KCFG_TRISTATE;
4005 case BTF_KIND_ENUM64:
4006 if (strcmp(name, "libbpf_tristate"))
4007 return KCFG_UNKNOWN;
4008 return KCFG_TRISTATE;
4009 case BTF_KIND_ARRAY:
4010 if (btf_array(t)->nelems == 0)
4011 return KCFG_UNKNOWN;
4012 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4013 return KCFG_UNKNOWN;
4014 return KCFG_CHAR_ARR;
4016 return KCFG_UNKNOWN;
4020 static int cmp_externs(const void *_a, const void *_b)
4022 const struct extern_desc *a = _a;
4023 const struct extern_desc *b = _b;
4025 if (a->type != b->type)
4026 return a->type < b->type ? -1 : 1;
4028 if (a->type == EXT_KCFG) {
4029 /* descending order by alignment requirements */
4030 if (a->kcfg.align != b->kcfg.align)
4031 return a->kcfg.align > b->kcfg.align ? -1 : 1;
4032 /* ascending order by size, within same alignment class */
4033 if (a->kcfg.sz != b->kcfg.sz)
4034 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4037 /* resolve ties by name */
4038 return strcmp(a->name, b->name);
4041 static int find_int_btf_id(const struct btf *btf)
4043 const struct btf_type *t;
4046 n = btf__type_cnt(btf);
4047 for (i = 1; i < n; i++) {
4048 t = btf__type_by_id(btf, i);
4050 if (btf_is_int(t) && btf_int_bits(t) == 32)
4057 static int add_dummy_ksym_var(struct btf *btf)
4059 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4060 const struct btf_var_secinfo *vs;
4061 const struct btf_type *sec;
4066 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4071 sec = btf__type_by_id(btf, sec_btf_id);
4072 vs = btf_var_secinfos(sec);
4073 for (i = 0; i < btf_vlen(sec); i++, vs++) {
4074 const struct btf_type *vt;
4076 vt = btf__type_by_id(btf, vs->type);
4077 if (btf_is_func(vt))
4081 /* No func in ksyms sec. No need to add dummy var. */
4082 if (i == btf_vlen(sec))
4085 int_btf_id = find_int_btf_id(btf);
4086 dummy_var_btf_id = btf__add_var(btf,
4088 BTF_VAR_GLOBAL_ALLOCATED,
4090 if (dummy_var_btf_id < 0)
4091 pr_warn("cannot create a dummy_ksym var\n");
4093 return dummy_var_btf_id;
4096 static int bpf_object__collect_externs(struct bpf_object *obj)
4098 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
4099 const struct btf_type *t;
4100 struct extern_desc *ext;
4101 int i, n, off, dummy_var_btf_id;
4102 const char *ext_name, *sec_name;
4103 size_t ext_essent_len;
4107 if (!obj->efile.symbols)
4110 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4111 sh = elf_sec_hdr(obj, scn);
4112 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4113 return -LIBBPF_ERRNO__FORMAT;
4115 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4116 if (dummy_var_btf_id < 0)
4117 return dummy_var_btf_id;
4119 n = sh->sh_size / sh->sh_entsize;
4120 pr_debug("looking for externs among %d symbols...\n", n);
4122 for (i = 0; i < n; i++) {
4123 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4126 return -LIBBPF_ERRNO__FORMAT;
4127 if (!sym_is_extern(sym))
4129 ext_name = elf_sym_str(obj, sym->st_name);
4130 if (!ext_name || !ext_name[0])
4134 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4138 ext = &ext[obj->nr_extern];
4139 memset(ext, 0, sizeof(*ext));
4142 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4143 if (ext->btf_id <= 0) {
4144 pr_warn("failed to find BTF for extern '%s': %d\n",
4145 ext_name, ext->btf_id);
4148 t = btf__type_by_id(obj->btf, ext->btf_id);
4149 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4151 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4153 ext_essent_len = bpf_core_essential_name_len(ext->name);
4154 ext->essent_name = NULL;
4155 if (ext_essent_len != strlen(ext->name)) {
4156 ext->essent_name = strndup(ext->name, ext_essent_len);
4157 if (!ext->essent_name)
4161 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4162 if (ext->sec_btf_id <= 0) {
4163 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4164 ext_name, ext->btf_id, ext->sec_btf_id);
4165 return ext->sec_btf_id;
4167 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4168 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4170 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
4171 if (btf_is_func(t)) {
4172 pr_warn("extern function %s is unsupported under %s section\n",
4173 ext->name, KCONFIG_SEC);
4177 ext->type = EXT_KCFG;
4178 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4179 if (ext->kcfg.sz <= 0) {
4180 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4181 ext_name, ext->kcfg.sz);
4182 return ext->kcfg.sz;
4184 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4185 if (ext->kcfg.align <= 0) {
4186 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4187 ext_name, ext->kcfg.align);
4190 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4191 &ext->kcfg.is_signed);
4192 if (ext->kcfg.type == KCFG_UNKNOWN) {
4193 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
4196 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
4198 ext->type = EXT_KSYM;
4199 skip_mods_and_typedefs(obj->btf, t->type,
4200 &ext->ksym.type_id);
4202 pr_warn("unrecognized extern section '%s'\n", sec_name);
4206 pr_debug("collected %d externs total\n", obj->nr_extern);
4208 if (!obj->nr_extern)
4211 /* sort externs by type, for kcfg ones also by (align, size, name) */
4212 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4214 /* for .ksyms section, we need to turn all externs into allocated
4215 * variables in BTF to pass kernel verification; we do this by
4216 * pretending that each extern is a 8-byte variable
4219 /* find existing 4-byte integer type in BTF to use for fake
4220 * extern variables in DATASEC
4222 int int_btf_id = find_int_btf_id(obj->btf);
4223 /* For extern function, a dummy_var added earlier
4224 * will be used to replace the vs->type and
4225 * its name string will be used to refill
4226 * the missing param's name.
4228 const struct btf_type *dummy_var;
4230 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4231 for (i = 0; i < obj->nr_extern; i++) {
4232 ext = &obj->externs[i];
4233 if (ext->type != EXT_KSYM)
4235 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4236 i, ext->sym_idx, ext->name);
4241 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4242 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4243 struct btf_type *vt;
4245 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4246 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4247 ext = find_extern_by_name(obj, ext_name);
4249 pr_warn("failed to find extern definition for BTF %s '%s'\n",
4250 btf_kind_str(vt), ext_name);
4253 if (btf_is_func(vt)) {
4254 const struct btf_type *func_proto;
4255 struct btf_param *param;
4258 func_proto = btf__type_by_id(obj->btf,
4260 param = btf_params(func_proto);
4261 /* Reuse the dummy_var string if the
4262 * func proto does not have param name.
4264 for (j = 0; j < btf_vlen(func_proto); j++)
4265 if (param[j].type && !param[j].name_off)
4267 dummy_var->name_off;
4268 vs->type = dummy_var_btf_id;
4269 vt->info &= ~0xffff;
4270 vt->info |= BTF_FUNC_GLOBAL;
4272 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4273 vt->type = int_btf_id;
4276 vs->size = sizeof(int);
4283 /* for kcfg externs calculate their offsets within a .kconfig map */
4285 for (i = 0; i < obj->nr_extern; i++) {
4286 ext = &obj->externs[i];
4287 if (ext->type != EXT_KCFG)
4290 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4291 off = ext->kcfg.data_off + ext->kcfg.sz;
4292 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
4293 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4297 for (i = 0; i < n; i++) {
4298 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4300 t = btf__type_by_id(obj->btf, vs->type);
4301 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4302 ext = find_extern_by_name(obj, ext_name);
4304 pr_warn("failed to find extern definition for BTF var '%s'\n",
4308 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4309 vs->offset = ext->kcfg.data_off;
4315 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4317 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
4320 struct bpf_program *
4321 bpf_object__find_program_by_name(const struct bpf_object *obj,
4324 struct bpf_program *prog;
4326 bpf_object__for_each_program(prog, obj) {
4327 if (prog_is_subprog(obj, prog))
4329 if (!strcmp(prog->name, name))
4332 return errno = ENOENT, NULL;
4335 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4338 switch (obj->efile.secs[shndx].sec_type) {
4348 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4351 return shndx == obj->efile.btf_maps_shndx;
4354 static enum libbpf_map_type
4355 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4357 if (shndx == obj->efile.symbols_shndx)
4358 return LIBBPF_MAP_KCONFIG;
4360 switch (obj->efile.secs[shndx].sec_type) {
4362 return LIBBPF_MAP_BSS;
4364 return LIBBPF_MAP_DATA;
4366 return LIBBPF_MAP_RODATA;
4368 return LIBBPF_MAP_UNSPEC;
4372 static int bpf_program__record_reloc(struct bpf_program *prog,
4373 struct reloc_desc *reloc_desc,
4374 __u32 insn_idx, const char *sym_name,
4375 const Elf64_Sym *sym, const Elf64_Rel *rel)
4377 struct bpf_insn *insn = &prog->insns[insn_idx];
4378 size_t map_idx, nr_maps = prog->obj->nr_maps;
4379 struct bpf_object *obj = prog->obj;
4380 __u32 shdr_idx = sym->st_shndx;
4381 enum libbpf_map_type type;
4382 const char *sym_sec_name;
4383 struct bpf_map *map;
4385 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4386 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4387 prog->name, sym_name, insn_idx, insn->code);
4388 return -LIBBPF_ERRNO__RELOC;
4391 if (sym_is_extern(sym)) {
4392 int sym_idx = ELF64_R_SYM(rel->r_info);
4393 int i, n = obj->nr_extern;
4394 struct extern_desc *ext;
4396 for (i = 0; i < n; i++) {
4397 ext = &obj->externs[i];
4398 if (ext->sym_idx == sym_idx)
4402 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4403 prog->name, sym_name, sym_idx);
4404 return -LIBBPF_ERRNO__RELOC;
4406 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4407 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4408 if (insn->code == (BPF_JMP | BPF_CALL))
4409 reloc_desc->type = RELO_EXTERN_CALL;
4411 reloc_desc->type = RELO_EXTERN_LD64;
4412 reloc_desc->insn_idx = insn_idx;
4413 reloc_desc->ext_idx = i;
4417 /* sub-program call relocation */
4418 if (is_call_insn(insn)) {
4419 if (insn->src_reg != BPF_PSEUDO_CALL) {
4420 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4421 return -LIBBPF_ERRNO__RELOC;
4423 /* text_shndx can be 0, if no default "main" program exists */
4424 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4425 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4426 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4427 prog->name, sym_name, sym_sec_name);
4428 return -LIBBPF_ERRNO__RELOC;
4430 if (sym->st_value % BPF_INSN_SZ) {
4431 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4432 prog->name, sym_name, (size_t)sym->st_value);
4433 return -LIBBPF_ERRNO__RELOC;
4435 reloc_desc->type = RELO_CALL;
4436 reloc_desc->insn_idx = insn_idx;
4437 reloc_desc->sym_off = sym->st_value;
4441 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4442 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4443 prog->name, sym_name, shdr_idx);
4444 return -LIBBPF_ERRNO__RELOC;
4447 /* loading subprog addresses */
4448 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4449 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4450 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4452 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4453 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4454 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4455 return -LIBBPF_ERRNO__RELOC;
4458 reloc_desc->type = RELO_SUBPROG_ADDR;
4459 reloc_desc->insn_idx = insn_idx;
4460 reloc_desc->sym_off = sym->st_value;
4464 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4465 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4467 /* arena data relocation */
4468 if (shdr_idx == obj->efile.arena_data_shndx) {
4469 reloc_desc->type = RELO_DATA;
4470 reloc_desc->insn_idx = insn_idx;
4471 reloc_desc->map_idx = obj->arena_map - obj->maps;
4472 reloc_desc->sym_off = sym->st_value;
4476 /* generic map reference relocation */
4477 if (type == LIBBPF_MAP_UNSPEC) {
4478 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4479 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4480 prog->name, sym_name, sym_sec_name);
4481 return -LIBBPF_ERRNO__RELOC;
4483 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4484 map = &obj->maps[map_idx];
4485 if (map->libbpf_type != type ||
4486 map->sec_idx != sym->st_shndx ||
4487 map->sec_offset != sym->st_value)
4489 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4490 prog->name, map_idx, map->name, map->sec_idx,
4491 map->sec_offset, insn_idx);
4494 if (map_idx >= nr_maps) {
4495 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4496 prog->name, sym_sec_name, (size_t)sym->st_value);
4497 return -LIBBPF_ERRNO__RELOC;
4499 reloc_desc->type = RELO_LD64;
4500 reloc_desc->insn_idx = insn_idx;
4501 reloc_desc->map_idx = map_idx;
4502 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4506 /* global data map relocation */
4507 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4508 pr_warn("prog '%s': bad data relo against section '%s'\n",
4509 prog->name, sym_sec_name);
4510 return -LIBBPF_ERRNO__RELOC;
4512 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4513 map = &obj->maps[map_idx];
4514 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4516 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4517 prog->name, map_idx, map->name, map->sec_idx,
4518 map->sec_offset, insn_idx);
4521 if (map_idx >= nr_maps) {
4522 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4523 prog->name, sym_sec_name);
4524 return -LIBBPF_ERRNO__RELOC;
4527 reloc_desc->type = RELO_DATA;
4528 reloc_desc->insn_idx = insn_idx;
4529 reloc_desc->map_idx = map_idx;
4530 reloc_desc->sym_off = sym->st_value;
4534 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4536 return insn_idx >= prog->sec_insn_off &&
4537 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4540 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4541 size_t sec_idx, size_t insn_idx)
4543 int l = 0, r = obj->nr_programs - 1, m;
4544 struct bpf_program *prog;
4546 if (!obj->nr_programs)
4550 m = l + (r - l + 1) / 2;
4551 prog = &obj->programs[m];
4553 if (prog->sec_idx < sec_idx ||
4554 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4559 /* matching program could be at index l, but it still might be the
4560 * wrong one, so we need to double check conditions for the last time
4562 prog = &obj->programs[l];
4563 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4569 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4571 const char *relo_sec_name, *sec_name;
4572 size_t sec_idx = shdr->sh_info, sym_idx;
4573 struct bpf_program *prog;
4574 struct reloc_desc *relos;
4576 const char *sym_name;
4583 if (sec_idx >= obj->efile.sec_cnt)
4586 scn = elf_sec_by_idx(obj, sec_idx);
4587 scn_data = elf_sec_data(obj, scn);
4589 return -LIBBPF_ERRNO__FORMAT;
4591 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4592 sec_name = elf_sec_name(obj, scn);
4593 if (!relo_sec_name || !sec_name)
4596 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4597 relo_sec_name, sec_idx, sec_name);
4598 nrels = shdr->sh_size / shdr->sh_entsize;
4600 for (i = 0; i < nrels; i++) {
4601 rel = elf_rel_by_idx(data, i);
4603 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4604 return -LIBBPF_ERRNO__FORMAT;
4607 sym_idx = ELF64_R_SYM(rel->r_info);
4608 sym = elf_sym_by_idx(obj, sym_idx);
4610 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4611 relo_sec_name, sym_idx, i);
4612 return -LIBBPF_ERRNO__FORMAT;
4615 if (sym->st_shndx >= obj->efile.sec_cnt) {
4616 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4617 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4618 return -LIBBPF_ERRNO__FORMAT;
4621 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4622 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4623 relo_sec_name, (size_t)rel->r_offset, i);
4624 return -LIBBPF_ERRNO__FORMAT;
4627 insn_idx = rel->r_offset / BPF_INSN_SZ;
4628 /* relocations against static functions are recorded as
4629 * relocations against the section that contains a function;
4630 * in such case, symbol will be STT_SECTION and sym.st_name
4631 * will point to empty string (0), so fetch section name
4634 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4635 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4637 sym_name = elf_sym_str(obj, sym->st_name);
4638 sym_name = sym_name ?: "<?";
4640 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4641 relo_sec_name, i, insn_idx, sym_name);
4643 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4645 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4646 relo_sec_name, i, sec_name, insn_idx);
4650 relos = libbpf_reallocarray(prog->reloc_desc,
4651 prog->nr_reloc + 1, sizeof(*relos));
4654 prog->reloc_desc = relos;
4656 /* adjust insn_idx to local BPF program frame of reference */
4657 insn_idx -= prog->sec_insn_off;
4658 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4659 insn_idx, sym_name, sym, rel);
4668 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4675 /* if it's BTF-defined map, we don't need to search for type IDs.
4676 * For struct_ops map, it does not need btf_key_type_id and
4677 * btf_value_type_id.
4679 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4683 * LLVM annotates global data differently in BTF, that is,
4684 * only as '.data', '.bss' or '.rodata'.
4686 if (!bpf_map__is_internal(map))
4689 id = btf__find_by_name(obj->btf, map->real_name);
4693 map->btf_key_type_id = 0;
4694 map->btf_value_type_id = id;
4698 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4700 char file[PATH_MAX], buff[4096];
4705 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4706 memset(info, 0, sizeof(*info));
4708 fp = fopen(file, "re");
4711 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4716 while (fgets(buff, sizeof(buff), fp)) {
4717 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4719 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4720 info->key_size = val;
4721 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4722 info->value_size = val;
4723 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4724 info->max_entries = val;
4725 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4726 info->map_flags = val;
4734 bool bpf_map__autocreate(const struct bpf_map *map)
4736 return map->autocreate;
4739 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4741 if (map->obj->loaded)
4742 return libbpf_err(-EBUSY);
4744 map->autocreate = autocreate;
4748 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4750 struct bpf_map_info info;
4751 __u32 len = sizeof(info), name_len;
4755 memset(&info, 0, len);
4756 err = bpf_map_get_info_by_fd(fd, &info, &len);
4757 if (err && errno == EINVAL)
4758 err = bpf_get_map_info_from_fdinfo(fd, &info);
4760 return libbpf_err(err);
4762 name_len = strlen(info.name);
4763 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4764 new_name = strdup(map->name);
4766 new_name = strdup(info.name);
4769 return libbpf_err(-errno);
4772 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4773 * This is similar to what we do in ensure_good_fd(), but without
4774 * closing original FD.
4776 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
4779 goto err_free_new_name;
4782 err = reuse_fd(map->fd, new_fd);
4784 goto err_free_new_name;
4788 map->name = new_name;
4789 map->def.type = info.type;
4790 map->def.key_size = info.key_size;
4791 map->def.value_size = info.value_size;
4792 map->def.max_entries = info.max_entries;
4793 map->def.map_flags = info.map_flags;
4794 map->btf_key_type_id = info.btf_key_type_id;
4795 map->btf_value_type_id = info.btf_value_type_id;
4797 map->map_extra = info.map_extra;
4803 return libbpf_err(err);
4806 __u32 bpf_map__max_entries(const struct bpf_map *map)
4808 return map->def.max_entries;
4811 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4813 if (!bpf_map_type__is_map_in_map(map->def.type))
4814 return errno = EINVAL, NULL;
4816 return map->inner_map;
4819 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4821 if (map->obj->loaded)
4822 return libbpf_err(-EBUSY);
4824 map->def.max_entries = max_entries;
4826 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4827 if (map_is_ringbuf(map))
4828 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4833 static int bpf_object_prepare_token(struct bpf_object *obj)
4835 const char *bpffs_path;
4836 int bpffs_fd = -1, token_fd, err;
4838 enum libbpf_print_level level;
4840 /* token is explicitly prevented */
4841 if (obj->token_path && obj->token_path[0] == '\0') {
4842 pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
4846 mandatory = obj->token_path != NULL;
4847 level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
4849 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
4850 bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
4853 __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
4854 obj->name, err, bpffs_path,
4855 mandatory ? "" : ", skipping optional step...");
4856 return mandatory ? err : 0;
4859 token_fd = bpf_token_create(bpffs_fd, 0);
4862 if (!mandatory && token_fd == -ENOENT) {
4863 pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
4864 obj->name, bpffs_path);
4867 __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
4868 obj->name, token_fd, bpffs_path,
4869 mandatory ? "" : ", skipping optional step...");
4870 return mandatory ? token_fd : 0;
4873 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
4874 if (!obj->feat_cache) {
4879 obj->token_fd = token_fd;
4880 obj->feat_cache->token_fd = token_fd;
4886 bpf_object__probe_loading(struct bpf_object *obj)
4888 char *cp, errmsg[STRERR_BUFSIZE];
4889 struct bpf_insn insns[] = {
4890 BPF_MOV64_IMM(BPF_REG_0, 0),
4893 int ret, insn_cnt = ARRAY_SIZE(insns);
4894 LIBBPF_OPTS(bpf_prog_load_opts, opts,
4895 .token_fd = obj->token_fd,
4896 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
4899 if (obj->gen_loader)
4902 ret = bump_rlimit_memlock();
4904 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4906 /* make sure basic loading works */
4907 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
4909 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
4912 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4913 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4914 "program. Make sure your kernel supports BPF "
4915 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4916 "set to big enough value.\n", __func__, cp, ret);
4924 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4926 if (obj->gen_loader)
4927 /* To generate loader program assume the latest kernel
4928 * to avoid doing extra prog_load, map_create syscalls.
4933 return feat_supported(obj->feat_cache, feat_id);
4935 return feat_supported(NULL, feat_id);
4938 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4940 struct bpf_map_info map_info;
4941 char msg[STRERR_BUFSIZE];
4942 __u32 map_info_len = sizeof(map_info);
4945 memset(&map_info, 0, map_info_len);
4946 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
4947 if (err && errno == EINVAL)
4948 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4950 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4951 libbpf_strerror_r(errno, msg, sizeof(msg)));
4955 return (map_info.type == map->def.type &&
4956 map_info.key_size == map->def.key_size &&
4957 map_info.value_size == map->def.value_size &&
4958 map_info.max_entries == map->def.max_entries &&
4959 map_info.map_flags == map->def.map_flags &&
4960 map_info.map_extra == map->map_extra);
4964 bpf_object__reuse_map(struct bpf_map *map)
4966 char *cp, errmsg[STRERR_BUFSIZE];
4969 pin_fd = bpf_obj_get(map->pin_path);
4972 if (err == -ENOENT) {
4973 pr_debug("found no pinned map to reuse at '%s'\n",
4978 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4979 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4984 if (!map_is_reuse_compat(map, pin_fd)) {
4985 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4991 err = bpf_map__reuse_fd(map, pin_fd);
4997 pr_debug("reused pinned map at '%s'\n", map->pin_path);
5003 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5005 enum libbpf_map_type map_type = map->libbpf_type;
5006 char *cp, errmsg[STRERR_BUFSIZE];
5009 if (obj->gen_loader) {
5010 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5011 map->mmaped, map->def.value_size);
5012 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5013 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5017 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5020 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5021 pr_warn("Error setting initial map(%s) contents: %s\n",
5026 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
5027 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
5028 err = bpf_map_freeze(map->fd);
5031 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5032 pr_warn("Error freezing map(%s) as read-only: %s\n",
5040 static void bpf_map__destroy(struct bpf_map *map);
5042 static bool map_is_created(const struct bpf_map *map)
5044 return map->obj->loaded || map->reused;
5047 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
5049 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
5050 struct bpf_map_def *def = &map->def;
5051 const char *map_name = NULL;
5052 int err = 0, map_fd;
5054 if (kernel_supports(obj, FEAT_PROG_NAME))
5055 map_name = map->name;
5056 create_attr.map_ifindex = map->map_ifindex;
5057 create_attr.map_flags = def->map_flags;
5058 create_attr.numa_node = map->numa_node;
5059 create_attr.map_extra = map->map_extra;
5060 create_attr.token_fd = obj->token_fd;
5062 create_attr.map_flags |= BPF_F_TOKEN_FD;
5064 if (bpf_map__is_struct_ops(map)) {
5065 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
5066 if (map->mod_btf_fd >= 0) {
5067 create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5068 create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
5072 if (obj->btf && btf__fd(obj->btf) >= 0) {
5073 create_attr.btf_fd = btf__fd(obj->btf);
5074 create_attr.btf_key_type_id = map->btf_key_type_id;
5075 create_attr.btf_value_type_id = map->btf_value_type_id;
5078 if (bpf_map_type__is_map_in_map(def->type)) {
5079 if (map->inner_map) {
5080 err = map_set_def_max_entries(map->inner_map);
5083 err = bpf_object__create_map(obj, map->inner_map, true);
5085 pr_warn("map '%s': failed to create inner map: %d\n",
5089 map->inner_map_fd = map->inner_map->fd;
5091 if (map->inner_map_fd >= 0)
5092 create_attr.inner_map_fd = map->inner_map_fd;
5095 switch (def->type) {
5096 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5097 case BPF_MAP_TYPE_CGROUP_ARRAY:
5098 case BPF_MAP_TYPE_STACK_TRACE:
5099 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5100 case BPF_MAP_TYPE_HASH_OF_MAPS:
5101 case BPF_MAP_TYPE_DEVMAP:
5102 case BPF_MAP_TYPE_DEVMAP_HASH:
5103 case BPF_MAP_TYPE_CPUMAP:
5104 case BPF_MAP_TYPE_XSKMAP:
5105 case BPF_MAP_TYPE_SOCKMAP:
5106 case BPF_MAP_TYPE_SOCKHASH:
5107 case BPF_MAP_TYPE_QUEUE:
5108 case BPF_MAP_TYPE_STACK:
5109 case BPF_MAP_TYPE_ARENA:
5110 create_attr.btf_fd = 0;
5111 create_attr.btf_key_type_id = 0;
5112 create_attr.btf_value_type_id = 0;
5113 map->btf_key_type_id = 0;
5114 map->btf_value_type_id = 0;
5116 case BPF_MAP_TYPE_STRUCT_OPS:
5117 create_attr.btf_value_type_id = 0;
5123 if (obj->gen_loader) {
5124 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5125 def->key_size, def->value_size, def->max_entries,
5126 &create_attr, is_inner ? -1 : map - obj->maps);
5127 /* We keep pretenting we have valid FD to pass various fd >= 0
5128 * checks by just keeping original placeholder FDs in place.
5129 * See bpf_object__add_map() comment.
5130 * This placeholder fd will not be used with any syscall and
5131 * will be reset to -1 eventually.
5135 map_fd = bpf_map_create(def->type, map_name,
5136 def->key_size, def->value_size,
5137 def->max_entries, &create_attr);
5139 if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
5140 char *cp, errmsg[STRERR_BUFSIZE];
5143 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5144 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5145 map->name, cp, err);
5146 create_attr.btf_fd = 0;
5147 create_attr.btf_key_type_id = 0;
5148 create_attr.btf_value_type_id = 0;
5149 map->btf_key_type_id = 0;
5150 map->btf_value_type_id = 0;
5151 map_fd = bpf_map_create(def->type, map_name,
5152 def->key_size, def->value_size,
5153 def->max_entries, &create_attr);
5156 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5157 if (obj->gen_loader)
5158 map->inner_map->fd = -1;
5159 bpf_map__destroy(map->inner_map);
5160 zfree(&map->inner_map);
5166 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5167 if (map->fd == map_fd)
5170 /* Keep placeholder FD value but now point it to the BPF map object.
5171 * This way everything that relied on this map's FD (e.g., relocated
5172 * ldimm64 instructions) will stay valid and won't need adjustments.
5173 * map->fd stays valid but now point to what map_fd points to.
5175 return reuse_fd(map->fd, map_fd);
5178 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5180 const struct bpf_map *targ_map;
5184 for (i = 0; i < map->init_slots_sz; i++) {
5185 if (!map->init_slots[i])
5188 targ_map = map->init_slots[i];
5191 if (obj->gen_loader) {
5192 bpf_gen__populate_outer_map(obj->gen_loader,
5194 targ_map - obj->maps);
5196 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5200 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5201 map->name, i, targ_map->name, fd, err);
5204 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5205 map->name, i, targ_map->name, fd);
5208 zfree(&map->init_slots);
5209 map->init_slots_sz = 0;
5214 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5216 const struct bpf_program *targ_prog;
5220 if (obj->gen_loader)
5223 for (i = 0; i < map->init_slots_sz; i++) {
5224 if (!map->init_slots[i])
5227 targ_prog = map->init_slots[i];
5228 fd = bpf_program__fd(targ_prog);
5230 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5233 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5234 map->name, i, targ_prog->name, fd, err);
5237 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5238 map->name, i, targ_prog->name, fd);
5241 zfree(&map->init_slots);
5242 map->init_slots_sz = 0;
5247 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5249 struct bpf_map *map;
5252 for (i = 0; i < obj->nr_maps; i++) {
5253 map = &obj->maps[i];
5255 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5258 err = init_prog_array_slots(obj, map);
5265 static int map_set_def_max_entries(struct bpf_map *map)
5267 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5270 nr_cpus = libbpf_num_possible_cpus();
5272 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5273 map->name, nr_cpus);
5276 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5277 map->def.max_entries = nr_cpus;
5284 bpf_object__create_maps(struct bpf_object *obj)
5286 struct bpf_map *map;
5287 char *cp, errmsg[STRERR_BUFSIZE];
5292 for (i = 0; i < obj->nr_maps; i++) {
5293 map = &obj->maps[i];
5295 /* To support old kernels, we skip creating global data maps
5296 * (.rodata, .data, .kconfig, etc); later on, during program
5297 * loading, if we detect that at least one of the to-be-loaded
5298 * programs is referencing any global data map, we'll error
5299 * out with program name and relocation index logged.
5300 * This approach allows to accommodate Clang emitting
5301 * unnecessary .rodata.str1.1 sections for string literals,
5302 * but also it allows to have CO-RE applications that use
5303 * global variables in some of BPF programs, but not others.
5304 * If those global variable-using programs are not loaded at
5305 * runtime due to bpf_program__set_autoload(prog, false),
5306 * bpf_object loading will succeed just fine even on old
5309 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5310 map->autocreate = false;
5312 if (!map->autocreate) {
5313 pr_debug("map '%s': skipped auto-creating...\n", map->name);
5317 err = map_set_def_max_entries(map);
5323 if (map->pin_path) {
5324 err = bpf_object__reuse_map(map);
5326 pr_warn("map '%s': error reusing pinned map\n",
5330 if (retried && map->fd < 0) {
5331 pr_warn("map '%s': cannot find pinned map\n",
5339 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5340 map->name, map->fd);
5342 err = bpf_object__create_map(obj, map, false);
5346 pr_debug("map '%s': created successfully, fd=%d\n",
5347 map->name, map->fd);
5349 if (bpf_map__is_internal(map)) {
5350 err = bpf_object__populate_internal_map(obj, map);
5354 if (map->def.type == BPF_MAP_TYPE_ARENA) {
5355 map->mmaped = mmap((void *)(long)map->map_extra,
5356 bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
5357 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5359 if (map->mmaped == MAP_FAILED) {
5362 pr_warn("map '%s': failed to mmap arena: %d\n",
5366 if (obj->arena_data) {
5367 memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
5368 zfree(&obj->arena_data);
5371 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5372 err = init_map_in_map_slots(obj, map);
5378 if (map->pin_path && !map->pinned) {
5379 err = bpf_map__pin(map, NULL);
5381 if (!retried && err == -EEXIST) {
5385 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5386 map->name, map->pin_path, err);
5395 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5396 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5398 for (j = 0; j < i; j++)
5399 zclose(obj->maps[j].fd);
5403 static bool bpf_core_is_flavor_sep(const char *s)
5405 /* check X___Y name pattern, where X and Y are not underscores */
5406 return s[0] != '_' && /* X */
5407 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
5408 s[4] != '_'; /* Y */
5411 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
5412 * before last triple underscore. Struct name part after last triple
5413 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5415 size_t bpf_core_essential_name_len(const char *name)
5417 size_t n = strlen(name);
5420 for (i = n - 5; i >= 0; i--) {
5421 if (bpf_core_is_flavor_sep(name + i))
5427 void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5436 int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5437 size_t local_essent_len,
5438 const struct btf *targ_btf,
5439 const char *targ_btf_name,
5441 struct bpf_core_cand_list *cands)
5443 struct bpf_core_cand *new_cands, *cand;
5444 const struct btf_type *t, *local_t;
5445 const char *targ_name, *local_name;
5446 size_t targ_essent_len;
5449 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5450 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5452 n = btf__type_cnt(targ_btf);
5453 for (i = targ_start_id; i < n; i++) {
5454 t = btf__type_by_id(targ_btf, i);
5455 if (!btf_kind_core_compat(t, local_t))
5458 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5459 if (str_is_empty(targ_name))
5462 targ_essent_len = bpf_core_essential_name_len(targ_name);
5463 if (targ_essent_len != local_essent_len)
5466 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5469 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5470 local_cand->id, btf_kind_str(local_t),
5471 local_name, i, btf_kind_str(t), targ_name,
5473 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5474 sizeof(*cands->cands));
5478 cand = &new_cands[cands->len];
5479 cand->btf = targ_btf;
5482 cands->cands = new_cands;
5488 static int load_module_btfs(struct bpf_object *obj)
5490 struct bpf_btf_info info;
5491 struct module_btf *mod_btf;
5497 if (obj->btf_modules_loaded)
5500 if (obj->gen_loader)
5503 /* don't do this again, even if we find no module BTFs */
5504 obj->btf_modules_loaded = true;
5506 /* kernel too old to support module BTFs */
5507 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5511 err = bpf_btf_get_next_id(id, &id);
5512 if (err && errno == ENOENT)
5514 if (err && errno == EPERM) {
5515 pr_debug("skipping module BTFs loading, missing privileges\n");
5520 pr_warn("failed to iterate BTF objects: %d\n", err);
5524 fd = bpf_btf_get_fd_by_id(id);
5526 if (errno == ENOENT)
5527 continue; /* expected race: BTF was unloaded */
5529 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5534 memset(&info, 0, sizeof(info));
5535 info.name = ptr_to_u64(name);
5536 info.name_len = sizeof(name);
5538 err = bpf_btf_get_info_by_fd(fd, &info, &len);
5541 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5545 /* ignore non-module BTFs */
5546 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5551 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5552 err = libbpf_get_error(btf);
5554 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5559 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5560 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5564 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5569 mod_btf->name = strdup(name);
5570 if (!mod_btf->name) {
5584 static struct bpf_core_cand_list *
5585 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5587 struct bpf_core_cand local_cand = {};
5588 struct bpf_core_cand_list *cands;
5589 const struct btf *main_btf;
5590 const struct btf_type *local_t;
5591 const char *local_name;
5592 size_t local_essent_len;
5595 local_cand.btf = local_btf;
5596 local_cand.id = local_type_id;
5597 local_t = btf__type_by_id(local_btf, local_type_id);
5599 return ERR_PTR(-EINVAL);
5601 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5602 if (str_is_empty(local_name))
5603 return ERR_PTR(-EINVAL);
5604 local_essent_len = bpf_core_essential_name_len(local_name);
5606 cands = calloc(1, sizeof(*cands));
5608 return ERR_PTR(-ENOMEM);
5610 /* Attempt to find target candidates in vmlinux BTF first */
5611 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5612 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5616 /* if vmlinux BTF has any candidate, don't got for module BTFs */
5620 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5621 if (obj->btf_vmlinux_override)
5624 /* now look through module BTFs, trying to still find candidates */
5625 err = load_module_btfs(obj);
5629 for (i = 0; i < obj->btf_module_cnt; i++) {
5630 err = bpf_core_add_cands(&local_cand, local_essent_len,
5631 obj->btf_modules[i].btf,
5632 obj->btf_modules[i].name,
5633 btf__type_cnt(obj->btf_vmlinux),
5641 bpf_core_free_cands(cands);
5642 return ERR_PTR(err);
5645 /* Check local and target types for compatibility. This check is used for
5646 * type-based CO-RE relocations and follow slightly different rules than
5647 * field-based relocations. This function assumes that root types were already
5648 * checked for name match. Beyond that initial root-level name check, names
5649 * are completely ignored. Compatibility rules are as follows:
5650 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5651 * kind should match for local and target types (i.e., STRUCT is not
5652 * compatible with UNION);
5653 * - for ENUMs, the size is ignored;
5654 * - for INT, size and signedness are ignored;
5655 * - for ARRAY, dimensionality is ignored, element types are checked for
5656 * compatibility recursively;
5657 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5658 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5659 * - FUNC_PROTOs are compatible if they have compatible signature: same
5660 * number of input args and compatible return and argument types.
5661 * These rules are not set in stone and probably will be adjusted as we get
5662 * more experience with using BPF CO-RE relocations.
5664 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5665 const struct btf *targ_btf, __u32 targ_id)
5667 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5670 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5671 const struct btf *targ_btf, __u32 targ_id)
5673 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5676 static size_t bpf_core_hash_fn(const long key, void *ctx)
5681 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
5686 static int record_relo_core(struct bpf_program *prog,
5687 const struct bpf_core_relo *core_relo, int insn_idx)
5689 struct reloc_desc *relos, *relo;
5691 relos = libbpf_reallocarray(prog->reloc_desc,
5692 prog->nr_reloc + 1, sizeof(*relos));
5695 relo = &relos[prog->nr_reloc];
5696 relo->type = RELO_CORE;
5697 relo->insn_idx = insn_idx;
5698 relo->core_relo = core_relo;
5699 prog->reloc_desc = relos;
5704 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5706 struct reloc_desc *relo;
5709 for (i = 0; i < prog->nr_reloc; i++) {
5710 relo = &prog->reloc_desc[i];
5711 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5714 return relo->core_relo;
5720 static int bpf_core_resolve_relo(struct bpf_program *prog,
5721 const struct bpf_core_relo *relo,
5723 const struct btf *local_btf,
5724 struct hashmap *cand_cache,
5725 struct bpf_core_relo_res *targ_res)
5727 struct bpf_core_spec specs_scratch[3] = {};
5728 struct bpf_core_cand_list *cands = NULL;
5729 const char *prog_name = prog->name;
5730 const struct btf_type *local_type;
5731 const char *local_name;
5732 __u32 local_id = relo->type_id;
5735 local_type = btf__type_by_id(local_btf, local_id);
5739 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5743 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5744 !hashmap__find(cand_cache, local_id, &cands)) {
5745 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5746 if (IS_ERR(cands)) {
5747 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5748 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5749 local_name, PTR_ERR(cands));
5750 return PTR_ERR(cands);
5752 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
5754 bpf_core_free_cands(cands);
5759 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5764 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5766 const struct btf_ext_info_sec *sec;
5767 struct bpf_core_relo_res targ_res;
5768 const struct bpf_core_relo *rec;
5769 const struct btf_ext_info *seg;
5770 struct hashmap_entry *entry;
5771 struct hashmap *cand_cache = NULL;
5772 struct bpf_program *prog;
5773 struct bpf_insn *insn;
5774 const char *sec_name;
5775 int i, err = 0, insn_idx, sec_idx, sec_num;
5777 if (obj->btf_ext->core_relo_info.len == 0)
5780 if (targ_btf_path) {
5781 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5782 err = libbpf_get_error(obj->btf_vmlinux_override);
5784 pr_warn("failed to parse target BTF: %d\n", err);
5789 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5790 if (IS_ERR(cand_cache)) {
5791 err = PTR_ERR(cand_cache);
5795 seg = &obj->btf_ext->core_relo_info;
5797 for_each_btf_ext_sec(seg, sec) {
5798 sec_idx = seg->sec_idxs[sec_num];
5801 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5802 if (str_is_empty(sec_name)) {
5807 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5809 for_each_btf_ext_rec(seg, sec, i, rec) {
5810 if (rec->insn_off % BPF_INSN_SZ)
5812 insn_idx = rec->insn_off / BPF_INSN_SZ;
5813 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5815 /* When __weak subprog is "overridden" by another instance
5816 * of the subprog from a different object file, linker still
5817 * appends all the .BTF.ext info that used to belong to that
5818 * eliminated subprogram.
5819 * This is similar to what x86-64 linker does for relocations.
5820 * So just ignore such relocations just like we ignore
5821 * subprog instructions when discovering subprograms.
5823 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5824 sec_name, i, insn_idx);
5827 /* no need to apply CO-RE relocation if the program is
5828 * not going to be loaded
5830 if (!prog->autoload)
5833 /* adjust insn_idx from section frame of reference to the local
5834 * program's frame of reference; (sub-)program code is not yet
5835 * relocated, so it's enough to just subtract in-section offset
5837 insn_idx = insn_idx - prog->sec_insn_off;
5838 if (insn_idx >= prog->insns_cnt)
5840 insn = &prog->insns[insn_idx];
5842 err = record_relo_core(prog, rec, insn_idx);
5844 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5845 prog->name, i, err);
5849 if (prog->obj->gen_loader)
5852 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5854 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5855 prog->name, i, err);
5859 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5861 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5862 prog->name, i, insn_idx, err);
5869 /* obj->btf_vmlinux and module BTFs are freed after object load */
5870 btf__free(obj->btf_vmlinux_override);
5871 obj->btf_vmlinux_override = NULL;
5873 if (!IS_ERR_OR_NULL(cand_cache)) {
5874 hashmap__for_each_entry(cand_cache, entry, i) {
5875 bpf_core_free_cands(entry->pvalue);
5877 hashmap__free(cand_cache);
5882 /* base map load ldimm64 special constant, used also for log fixup logic */
5883 #define POISON_LDIMM64_MAP_BASE 2001000000
5884 #define POISON_LDIMM64_MAP_PFX "200100"
5886 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5887 int insn_idx, struct bpf_insn *insn,
5888 int map_idx, const struct bpf_map *map)
5892 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5893 prog->name, relo_idx, insn_idx, map_idx, map->name);
5895 /* we turn single ldimm64 into two identical invalid calls */
5896 for (i = 0; i < 2; i++) {
5897 insn->code = BPF_JMP | BPF_CALL;
5901 /* if this instruction is reachable (not a dead code),
5902 * verifier will complain with something like:
5903 * invalid func unknown#2001000123
5904 * where lower 123 is map index into obj->maps[] array
5906 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
5912 /* unresolved kfunc call special constant, used also for log fixup logic */
5913 #define POISON_CALL_KFUNC_BASE 2002000000
5914 #define POISON_CALL_KFUNC_PFX "2002"
5916 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
5917 int insn_idx, struct bpf_insn *insn,
5918 int ext_idx, const struct extern_desc *ext)
5920 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
5921 prog->name, relo_idx, insn_idx, ext->name);
5923 /* we turn kfunc call into invalid helper call with identifiable constant */
5924 insn->code = BPF_JMP | BPF_CALL;
5928 /* if this instruction is reachable (not a dead code),
5929 * verifier will complain with something like:
5930 * invalid func unknown#2001000123
5931 * where lower 123 is extern index into obj->externs[] array
5933 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
5936 /* Relocate data references within program code:
5938 * - global variable references;
5939 * - extern references.
5942 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5946 for (i = 0; i < prog->nr_reloc; i++) {
5947 struct reloc_desc *relo = &prog->reloc_desc[i];
5948 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5949 const struct bpf_map *map;
5950 struct extern_desc *ext;
5952 switch (relo->type) {
5954 map = &obj->maps[relo->map_idx];
5955 if (obj->gen_loader) {
5956 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5957 insn[0].imm = relo->map_idx;
5958 } else if (map->autocreate) {
5959 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5960 insn[0].imm = map->fd;
5962 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5963 relo->map_idx, map);
5967 map = &obj->maps[relo->map_idx];
5968 insn[1].imm = insn[0].imm + relo->sym_off;
5969 if (obj->gen_loader) {
5970 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5971 insn[0].imm = relo->map_idx;
5972 } else if (map->autocreate) {
5973 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5974 insn[0].imm = map->fd;
5976 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5977 relo->map_idx, map);
5980 case RELO_EXTERN_LD64:
5981 ext = &obj->externs[relo->ext_idx];
5982 if (ext->type == EXT_KCFG) {
5983 if (obj->gen_loader) {
5984 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5985 insn[0].imm = obj->kconfig_map_idx;
5987 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5988 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5990 insn[1].imm = ext->kcfg.data_off;
5991 } else /* EXT_KSYM */ {
5992 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5993 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5994 insn[0].imm = ext->ksym.kernel_btf_id;
5995 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5996 } else { /* typeless ksyms or unresolved typed ksyms */
5997 insn[0].imm = (__u32)ext->ksym.addr;
5998 insn[1].imm = ext->ksym.addr >> 32;
6002 case RELO_EXTERN_CALL:
6003 ext = &obj->externs[relo->ext_idx];
6004 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
6006 insn[0].imm = ext->ksym.kernel_btf_id;
6007 insn[0].off = ext->ksym.btf_fd_idx;
6008 } else { /* unresolved weak kfunc call */
6009 poison_kfunc_call(prog, i, relo->insn_idx, insn,
6010 relo->ext_idx, ext);
6013 case RELO_SUBPROG_ADDR:
6014 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6015 pr_warn("prog '%s': relo #%d: bad insn\n",
6019 /* handled already */
6022 /* handled already */
6025 /* will be handled by bpf_program_record_relos() */
6028 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6029 prog->name, i, relo->type);
6037 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6038 const struct bpf_program *prog,
6039 const struct btf_ext_info *ext_info,
6040 void **prog_info, __u32 *prog_rec_cnt,
6043 void *copy_start = NULL, *copy_end = NULL;
6044 void *rec, *rec_end, *new_prog_info;
6045 const struct btf_ext_info_sec *sec;
6046 size_t old_sz, new_sz;
6047 int i, sec_num, sec_idx, off_adj;
6050 for_each_btf_ext_sec(ext_info, sec) {
6051 sec_idx = ext_info->sec_idxs[sec_num];
6053 if (prog->sec_idx != sec_idx)
6056 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6057 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6059 if (insn_off < prog->sec_insn_off)
6061 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6066 copy_end = rec + ext_info->rec_size;
6072 /* append func/line info of a given (sub-)program to the main
6073 * program func/line info
6075 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
6076 new_sz = old_sz + (copy_end - copy_start);
6077 new_prog_info = realloc(*prog_info, new_sz);
6080 *prog_info = new_prog_info;
6081 *prog_rec_cnt = new_sz / ext_info->rec_size;
6082 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6084 /* Kernel instruction offsets are in units of 8-byte
6085 * instructions, while .BTF.ext instruction offsets generated
6086 * by Clang are in units of bytes. So convert Clang offsets
6087 * into kernel offsets and adjust offset according to program
6088 * relocated position.
6090 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6091 rec = new_prog_info + old_sz;
6092 rec_end = new_prog_info + new_sz;
6093 for (; rec < rec_end; rec += ext_info->rec_size) {
6094 __u32 *insn_off = rec;
6096 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6098 *prog_rec_sz = ext_info->rec_size;
6106 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6107 struct bpf_program *main_prog,
6108 const struct bpf_program *prog)
6112 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6113 * support func/line info
6115 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6118 /* only attempt func info relocation if main program's func_info
6119 * relocation was successful
6121 if (main_prog != prog && !main_prog->func_info)
6124 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6125 &main_prog->func_info,
6126 &main_prog->func_info_cnt,
6127 &main_prog->func_info_rec_size);
6129 if (err != -ENOENT) {
6130 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6134 if (main_prog->func_info) {
6136 * Some info has already been found but has problem
6137 * in the last btf_ext reloc. Must have to error out.
6139 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6142 /* Have problem loading the very first info. Ignore the rest. */
6143 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6148 /* don't relocate line info if main program's relocation failed */
6149 if (main_prog != prog && !main_prog->line_info)
6152 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6153 &main_prog->line_info,
6154 &main_prog->line_info_cnt,
6155 &main_prog->line_info_rec_size);
6157 if (err != -ENOENT) {
6158 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6162 if (main_prog->line_info) {
6164 * Some info has already been found but has problem
6165 * in the last btf_ext reloc. Must have to error out.
6167 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6170 /* Have problem loading the very first info. Ignore the rest. */
6171 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6177 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6179 size_t insn_idx = *(const size_t *)key;
6180 const struct reloc_desc *relo = elem;
6182 if (insn_idx == relo->insn_idx)
6184 return insn_idx < relo->insn_idx ? -1 : 1;
6187 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6189 if (!prog->nr_reloc)
6191 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6192 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6195 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6197 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6198 struct reloc_desc *relos;
6201 if (main_prog == subprog)
6203 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6204 /* if new count is zero, reallocarray can return a valid NULL result;
6205 * in this case the previous pointer will be freed, so we *have to*
6206 * reassign old pointer to the new value (even if it's NULL)
6208 if (!relos && new_cnt)
6210 if (subprog->nr_reloc)
6211 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6212 sizeof(*relos) * subprog->nr_reloc);
6214 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6215 relos[i].insn_idx += subprog->sub_insn_off;
6216 /* After insn_idx adjustment the 'relos' array is still sorted
6217 * by insn_idx and doesn't break bsearch.
6219 main_prog->reloc_desc = relos;
6220 main_prog->nr_reloc = new_cnt;
6225 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6226 struct bpf_program *subprog)
6228 struct bpf_insn *insns;
6232 subprog->sub_insn_off = main_prog->insns_cnt;
6234 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6235 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6237 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6240 main_prog->insns = insns;
6241 main_prog->insns_cnt = new_cnt;
6243 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6244 subprog->insns_cnt * sizeof(*insns));
6246 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6247 main_prog->name, subprog->insns_cnt, subprog->name);
6249 /* The subprog insns are now appended. Append its relos too. */
6250 err = append_subprog_relos(main_prog, subprog);
6257 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6258 struct bpf_program *prog)
6260 size_t sub_insn_idx, insn_idx;
6261 struct bpf_program *subprog;
6262 struct reloc_desc *relo;
6263 struct bpf_insn *insn;
6266 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6270 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6271 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6272 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6275 relo = find_prog_insn_relo(prog, insn_idx);
6276 if (relo && relo->type == RELO_EXTERN_CALL)
6277 /* kfunc relocations will be handled later
6278 * in bpf_object__relocate_data()
6281 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6282 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6283 prog->name, insn_idx, relo->type);
6284 return -LIBBPF_ERRNO__RELOC;
6287 /* sub-program instruction index is a combination of
6288 * an offset of a symbol pointed to by relocation and
6289 * call instruction's imm field; for global functions,
6290 * call always has imm = -1, but for static functions
6291 * relocation is against STT_SECTION and insn->imm
6292 * points to a start of a static function
6294 * for subprog addr relocation, the relo->sym_off + insn->imm is
6295 * the byte offset in the corresponding section.
6297 if (relo->type == RELO_CALL)
6298 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6300 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6301 } else if (insn_is_pseudo_func(insn)) {
6303 * RELO_SUBPROG_ADDR relo is always emitted even if both
6304 * functions are in the same section, so it shouldn't reach here.
6306 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6307 prog->name, insn_idx);
6308 return -LIBBPF_ERRNO__RELOC;
6310 /* if subprogram call is to a static function within
6311 * the same ELF section, there won't be any relocation
6312 * emitted, but it also means there is no additional
6313 * offset necessary, insns->imm is relative to
6314 * instruction's original position within the section
6316 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6319 /* we enforce that sub-programs should be in .text section */
6320 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6322 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6324 return -LIBBPF_ERRNO__RELOC;
6327 /* if it's the first call instruction calling into this
6328 * subprogram (meaning this subprog hasn't been processed
6329 * yet) within the context of current main program:
6330 * - append it at the end of main program's instructions blog;
6331 * - process is recursively, while current program is put on hold;
6332 * - if that subprogram calls some other not yet processes
6333 * subprogram, same thing will happen recursively until
6334 * there are no more unprocesses subprograms left to append
6337 if (subprog->sub_insn_off == 0) {
6338 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6341 err = bpf_object__reloc_code(obj, main_prog, subprog);
6346 /* main_prog->insns memory could have been re-allocated, so
6347 * calculate pointer again
6349 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6350 /* calculate correct instruction position within current main
6351 * prog; each main prog can have a different set of
6352 * subprograms appended (potentially in different order as
6353 * well), so position of any subprog can be different for
6354 * different main programs
6356 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6358 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6359 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6366 * Relocate sub-program calls.
6368 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6369 * main prog) is processed separately. For each subprog (non-entry functions,
6370 * that can be called from either entry progs or other subprogs) gets their
6371 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6372 * hasn't been yet appended and relocated within current main prog. Once its
6373 * relocated, sub_insn_off will point at the position within current main prog
6374 * where given subprog was appended. This will further be used to relocate all
6375 * the call instructions jumping into this subprog.
6377 * We start with main program and process all call instructions. If the call
6378 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6379 * is zero), subprog instructions are appended at the end of main program's
6380 * instruction array. Then main program is "put on hold" while we recursively
6381 * process newly appended subprogram. If that subprogram calls into another
6382 * subprogram that hasn't been appended, new subprogram is appended again to
6383 * the *main* prog's instructions (subprog's instructions are always left
6384 * untouched, as they need to be in unmodified state for subsequent main progs
6385 * and subprog instructions are always sent only as part of a main prog) and
6386 * the process continues recursively. Once all the subprogs called from a main
6387 * prog or any of its subprogs are appended (and relocated), all their
6388 * positions within finalized instructions array are known, so it's easy to
6389 * rewrite call instructions with correct relative offsets, corresponding to
6390 * desired target subprog.
6392 * Its important to realize that some subprogs might not be called from some
6393 * main prog and any of its called/used subprogs. Those will keep their
6394 * subprog->sub_insn_off as zero at all times and won't be appended to current
6395 * main prog and won't be relocated within the context of current main prog.
6396 * They might still be used from other main progs later.
6398 * Visually this process can be shown as below. Suppose we have two main
6399 * programs mainA and mainB and BPF object contains three subprogs: subA,
6400 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6401 * subC both call subB:
6403 * +--------+ +-------+
6405 * +--+---+ +--+-+-+ +---+--+
6406 * | subA | | subB | | subC |
6407 * +--+---+ +------+ +---+--+
6410 * +---+-------+ +------+----+
6411 * | mainA | | mainB |
6412 * +-----------+ +-----------+
6414 * We'll start relocating mainA, will find subA, append it and start
6415 * processing sub A recursively:
6417 * +-----------+------+
6419 * +-----------+------+
6421 * At this point we notice that subB is used from subA, so we append it and
6422 * relocate (there are no further subcalls from subB):
6424 * +-----------+------+------+
6425 * | mainA | subA | subB |
6426 * +-----------+------+------+
6428 * At this point, we relocate subA calls, then go one level up and finish with
6429 * relocatin mainA calls. mainA is done.
6431 * For mainB process is similar but results in different order. We start with
6432 * mainB and skip subA and subB, as mainB never calls them (at least
6433 * directly), but we see subC is needed, so we append and start processing it:
6435 * +-----------+------+
6437 * +-----------+------+
6438 * Now we see subC needs subB, so we go back to it, append and relocate it:
6440 * +-----------+------+------+
6441 * | mainB | subC | subB |
6442 * +-----------+------+------+
6444 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6447 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6449 struct bpf_program *subprog;
6452 /* mark all subprogs as not relocated (yet) within the context of
6453 * current main program
6455 for (i = 0; i < obj->nr_programs; i++) {
6456 subprog = &obj->programs[i];
6457 if (!prog_is_subprog(obj, subprog))
6460 subprog->sub_insn_off = 0;
6463 err = bpf_object__reloc_code(obj, prog, prog);
6471 bpf_object__free_relocs(struct bpf_object *obj)
6473 struct bpf_program *prog;
6476 /* free up relocation descriptors */
6477 for (i = 0; i < obj->nr_programs; i++) {
6478 prog = &obj->programs[i];
6479 zfree(&prog->reloc_desc);
6484 static int cmp_relocs(const void *_a, const void *_b)
6486 const struct reloc_desc *a = _a;
6487 const struct reloc_desc *b = _b;
6489 if (a->insn_idx != b->insn_idx)
6490 return a->insn_idx < b->insn_idx ? -1 : 1;
6492 /* no two relocations should have the same insn_idx, but ... */
6493 if (a->type != b->type)
6494 return a->type < b->type ? -1 : 1;
6499 static void bpf_object__sort_relos(struct bpf_object *obj)
6503 for (i = 0; i < obj->nr_programs; i++) {
6504 struct bpf_program *p = &obj->programs[i];
6509 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6513 static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6515 const char *str = "exception_callback:";
6516 size_t pfx_len = strlen(str);
6519 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6522 n = btf__type_cnt(obj->btf);
6523 for (i = 1; i < n; i++) {
6527 t = btf_type_by_id(obj->btf, i);
6528 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6531 name = btf__str_by_offset(obj->btf, t->name_off);
6532 if (strncmp(name, str, pfx_len) != 0)
6535 t = btf_type_by_id(obj->btf, t->type);
6536 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6537 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6541 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6543 /* Multiple callbacks are specified for the same prog,
6544 * the verifier will eventually return an error for this
6545 * case, hence simply skip appending a subprog.
6547 if (prog->exception_cb_idx >= 0) {
6548 prog->exception_cb_idx = -1;
6553 if (str_is_empty(name)) {
6554 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
6559 for (j = 0; j < obj->nr_programs; j++) {
6560 struct bpf_program *subprog = &obj->programs[j];
6562 if (!prog_is_subprog(obj, subprog))
6564 if (strcmp(name, subprog->name) != 0)
6566 /* Enforce non-hidden, as from verifier point of
6567 * view it expects global functions, whereas the
6568 * mark_btf_static fixes up linkage as static.
6570 if (!subprog->sym_global || subprog->mark_btf_static) {
6571 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6572 prog->name, subprog->name);
6575 /* Let's see if we already saw a static exception callback with the same name */
6576 if (prog->exception_cb_idx >= 0) {
6577 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
6578 prog->name, subprog->name);
6581 prog->exception_cb_idx = j;
6585 if (prog->exception_cb_idx >= 0)
6588 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6596 enum bpf_prog_type prog_type;
6597 const char *ctx_name;
6598 } global_ctx_map[] = {
6599 { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" },
6600 { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" },
6601 { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" },
6602 { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" },
6603 { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" },
6604 { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" },
6605 { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" },
6606 { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" },
6607 { BPF_PROG_TYPE_LWT_IN, "__sk_buff" },
6608 { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" },
6609 { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" },
6610 { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" },
6611 { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" },
6612 { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" },
6613 { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" },
6614 { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
6615 { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" },
6616 { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" },
6617 { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" },
6618 { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" },
6619 { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" },
6620 { BPF_PROG_TYPE_SK_SKB, "__sk_buff" },
6621 { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" },
6622 { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" },
6623 { BPF_PROG_TYPE_XDP, "xdp_md" },
6624 /* all other program types don't have "named" context structs */
6627 /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6628 * for below __builtin_types_compatible_p() checks;
6629 * with this approach we don't need any extra arch-specific #ifdef guards
6632 struct user_pt_regs;
6633 struct user_regs_struct;
6635 static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
6636 const char *subprog_name, int arg_idx,
6637 int arg_type_id, const char *ctx_name)
6639 const struct btf_type *t;
6642 /* check if existing parameter already matches verifier expectations */
6643 t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
6647 /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
6648 * and perf_event programs, so check this case early on and forget
6649 * about it for subsequent checks
6651 while (btf_is_mod(t))
6652 t = btf__type_by_id(btf, t->type);
6653 if (btf_is_typedef(t) &&
6654 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6655 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6656 if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
6657 return false; /* canonical type for kprobe/perf_event */
6660 /* now we can ignore typedefs moving forward */
6661 t = skip_mods_and_typedefs(btf, t->type, NULL);
6663 /* if it's `void *`, definitely fix up BTF info */
6667 /* if it's already proper canonical type, no need to fix up */
6668 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6669 if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
6673 switch (prog->type) {
6674 case BPF_PROG_TYPE_KPROBE:
6675 /* `struct pt_regs *` is expected, but we need to fix up */
6676 if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6679 case BPF_PROG_TYPE_PERF_EVENT:
6680 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6681 btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6683 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6684 btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6686 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6687 btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6690 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6691 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6692 /* allow u64* as ctx */
6693 if (btf_is_int(t) && t->size == 8)
6701 pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
6702 prog->name, subprog_name, arg_idx, ctx_name);
6706 static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
6708 int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
6709 int i, err, arg_cnt, fn_name_off, linkage;
6710 struct btf_type *fn_t, *fn_proto_t, *t;
6711 struct btf_param *p;
6713 /* caller already validated FUNC -> FUNC_PROTO validity */
6714 fn_t = btf_type_by_id(btf, orig_fn_id);
6715 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6717 /* Note that each btf__add_xxx() operation invalidates
6718 * all btf_type and string pointers, so we need to be
6719 * very careful when cloning BTF types. BTF type
6720 * pointers have to be always refetched. And to avoid
6721 * problems with invalidated string pointers, we
6722 * add empty strings initially, then just fix up
6723 * name_off offsets in place. Offsets are stable for
6724 * existing strings, so that works out.
6726 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6727 linkage = btf_func_linkage(fn_t);
6728 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6729 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6730 arg_cnt = btf_vlen(fn_proto_t);
6732 /* clone FUNC_PROTO and its params */
6733 fn_proto_id = btf__add_func_proto(btf, ret_type_id);
6734 if (fn_proto_id < 0)
6737 for (i = 0; i < arg_cnt; i++) {
6740 /* copy original parameter data */
6741 t = btf_type_by_id(btf, orig_proto_id);
6742 p = &btf_params(t)[i];
6743 name_off = p->name_off;
6745 err = btf__add_func_param(btf, "", p->type);
6749 fn_proto_t = btf_type_by_id(btf, fn_proto_id);
6750 p = &btf_params(fn_proto_t)[i];
6751 p->name_off = name_off; /* use remembered str offset */
6754 /* clone FUNC now, btf__add_func() enforces non-empty name, so use
6755 * entry program's name as a placeholder, which we replace immediately
6756 * with original name_off
6758 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6762 fn_t = btf_type_by_id(btf, fn_id);
6763 fn_t->name_off = fn_name_off; /* reuse original string */
6768 /* Check if main program or global subprog's function prototype has `arg:ctx`
6769 * argument tags, and, if necessary, substitute correct type to match what BPF
6770 * verifier would expect, taking into account specific program type. This
6771 * allows to support __arg_ctx tag transparently on old kernels that don't yet
6772 * have a native support for it in the verifier, making user's life much
6775 static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
6777 const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
6778 struct bpf_func_info_min *func_rec;
6779 struct btf_type *fn_t, *fn_proto_t;
6780 struct btf *btf = obj->btf;
6781 const struct btf_type *t;
6782 struct btf_param *p;
6783 int ptr_id = 0, struct_id, tag_id, orig_fn_id;
6784 int i, n, arg_idx, arg_cnt, err, rec_idx;
6787 /* no .BTF.ext, no problem */
6788 if (!obj->btf_ext || !prog->func_info)
6791 /* don't do any fix ups if kernel natively supports __arg_ctx */
6792 if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
6795 /* some BPF program types just don't have named context structs, so
6796 * this fallback mechanism doesn't work for them
6798 for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
6799 if (global_ctx_map[i].prog_type != prog->type)
6801 ctx_name = global_ctx_map[i].ctx_name;
6807 /* remember original func BTF IDs to detect if we already cloned them */
6808 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
6811 for (i = 0; i < prog->func_info_cnt; i++) {
6812 func_rec = prog->func_info + prog->func_info_rec_size * i;
6813 orig_ids[i] = func_rec->type_id;
6816 /* go through each DECL_TAG with "arg:ctx" and see if it points to one
6817 * of our subprogs; if yes and subprog is global and needs adjustment,
6818 * clone and adjust FUNC -> FUNC_PROTO combo
6820 for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
6821 /* only DECL_TAG with "arg:ctx" value are interesting */
6822 t = btf__type_by_id(btf, i);
6823 if (!btf_is_decl_tag(t))
6825 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
6828 /* only global funcs need adjustment, if at all */
6829 orig_fn_id = t->type;
6830 fn_t = btf_type_by_id(btf, orig_fn_id);
6831 if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
6834 /* sanity check FUNC -> FUNC_PROTO chain, just in case */
6835 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6836 if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
6839 /* find corresponding func_info record */
6841 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
6842 if (orig_ids[rec_idx] == t->type) {
6843 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
6847 /* current main program doesn't call into this subprog */
6851 /* some more sanity checking of DECL_TAG */
6852 arg_cnt = btf_vlen(fn_proto_t);
6853 arg_idx = btf_decl_tag(t)->component_idx;
6854 if (arg_idx < 0 || arg_idx >= arg_cnt)
6857 /* check if we should fix up argument type */
6858 p = &btf_params(fn_proto_t)[arg_idx];
6859 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
6860 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
6863 /* clone fn/fn_proto, unless we already did it for another arg */
6864 if (func_rec->type_id == orig_fn_id) {
6867 fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
6873 /* point func_info record to a cloned FUNC type */
6874 func_rec->type_id = fn_id;
6877 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
6878 * we do it just once per main BPF program, as all global
6879 * funcs share the same program type, so need only PTR ->
6883 struct_id = btf__add_struct(btf, ctx_name, 0);
6884 ptr_id = btf__add_ptr(btf, struct_id);
6885 if (ptr_id < 0 || struct_id < 0) {
6891 /* for completeness, clone DECL_TAG and point it to cloned param */
6892 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
6898 /* all the BTF manipulations invalidated pointers, refetch them */
6899 fn_t = btf_type_by_id(btf, func_rec->type_id);
6900 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6902 /* fix up type ID pointed to by param */
6903 p = &btf_params(fn_proto_t)[arg_idx];
6914 static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6916 struct bpf_program *prog;
6921 err = bpf_object__relocate_core(obj, targ_btf_path);
6923 pr_warn("failed to perform CO-RE relocations: %d\n",
6927 bpf_object__sort_relos(obj);
6930 /* Before relocating calls pre-process relocations and mark
6931 * few ld_imm64 instructions that points to subprogs.
6932 * Otherwise bpf_object__reloc_code() later would have to consider
6933 * all ld_imm64 insns as relocation candidates. That would
6934 * reduce relocation speed, since amount of find_prog_insn_relo()
6935 * would increase and most of them will fail to find a relo.
6937 for (i = 0; i < obj->nr_programs; i++) {
6938 prog = &obj->programs[i];
6939 for (j = 0; j < prog->nr_reloc; j++) {
6940 struct reloc_desc *relo = &prog->reloc_desc[j];
6941 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6943 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6944 if (relo->type == RELO_SUBPROG_ADDR)
6945 insn[0].src_reg = BPF_PSEUDO_FUNC;
6949 /* relocate subprogram calls and append used subprograms to main
6950 * programs; each copy of subprogram code needs to be relocated
6951 * differently for each main program, because its code location might
6953 * Append subprog relos to main programs to allow data relos to be
6954 * processed after text is completely relocated.
6956 for (i = 0; i < obj->nr_programs; i++) {
6957 prog = &obj->programs[i];
6958 /* sub-program's sub-calls are relocated within the context of
6959 * its main program only
6961 if (prog_is_subprog(obj, prog))
6963 if (!prog->autoload)
6966 err = bpf_object__relocate_calls(obj, prog);
6968 pr_warn("prog '%s': failed to relocate calls: %d\n",
6973 err = bpf_prog_assign_exc_cb(obj, prog);
6976 /* Now, also append exception callback if it has not been done already. */
6977 if (prog->exception_cb_idx >= 0) {
6978 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
6980 /* Calling exception callback directly is disallowed, which the
6981 * verifier will reject later. In case it was processed already,
6982 * we can skip this step, otherwise for all other valid cases we
6983 * have to append exception callback now.
6985 if (subprog->sub_insn_off == 0) {
6986 err = bpf_object__append_subprog_code(obj, prog, subprog);
6989 err = bpf_object__reloc_code(obj, prog, subprog);
6995 for (i = 0; i < obj->nr_programs; i++) {
6996 prog = &obj->programs[i];
6997 if (prog_is_subprog(obj, prog))
6999 if (!prog->autoload)
7002 /* Process data relos for main programs */
7003 err = bpf_object__relocate_data(obj, prog);
7005 pr_warn("prog '%s': failed to relocate data references: %d\n",
7010 /* Fix up .BTF.ext information, if necessary */
7011 err = bpf_program_fixup_func_info(obj, prog);
7013 pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
7022 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
7023 Elf64_Shdr *shdr, Elf_Data *data);
7025 static int bpf_object__collect_map_relos(struct bpf_object *obj,
7026 Elf64_Shdr *shdr, Elf_Data *data)
7028 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7029 int i, j, nrels, new_sz;
7030 const struct btf_var_secinfo *vi = NULL;
7031 const struct btf_type *sec, *var, *def;
7032 struct bpf_map *map = NULL, *targ_map = NULL;
7033 struct bpf_program *targ_prog = NULL;
7034 bool is_prog_array, is_map_in_map;
7035 const struct btf_member *member;
7036 const char *name, *mname, *type;
7042 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7044 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7048 nrels = shdr->sh_size / shdr->sh_entsize;
7049 for (i = 0; i < nrels; i++) {
7050 rel = elf_rel_by_idx(data, i);
7052 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7053 return -LIBBPF_ERRNO__FORMAT;
7056 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7058 pr_warn(".maps relo #%d: symbol %zx not found\n",
7059 i, (size_t)ELF64_R_SYM(rel->r_info));
7060 return -LIBBPF_ERRNO__FORMAT;
7062 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
7064 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7065 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7066 (size_t)rel->r_offset, sym->st_name, name);
7068 for (j = 0; j < obj->nr_maps; j++) {
7069 map = &obj->maps[j];
7070 if (map->sec_idx != obj->efile.btf_maps_shndx)
7073 vi = btf_var_secinfos(sec) + map->btf_var_idx;
7074 if (vi->offset <= rel->r_offset &&
7075 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7078 if (j == obj->nr_maps) {
7079 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7080 i, name, (size_t)rel->r_offset);
7084 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7085 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7086 type = is_map_in_map ? "map" : "prog";
7087 if (is_map_in_map) {
7088 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7089 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7091 return -LIBBPF_ERRNO__RELOC;
7093 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7094 map->def.key_size != sizeof(int)) {
7095 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7096 i, map->name, sizeof(int));
7099 targ_map = bpf_object__find_map_by_name(obj, name);
7101 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7105 } else if (is_prog_array) {
7106 targ_prog = bpf_object__find_program_by_name(obj, name);
7108 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7112 if (targ_prog->sec_idx != sym->st_shndx ||
7113 targ_prog->sec_insn_off * 8 != sym->st_value ||
7114 prog_is_subprog(obj, targ_prog)) {
7115 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7117 return -LIBBPF_ERRNO__RELOC;
7123 var = btf__type_by_id(obj->btf, vi->type);
7124 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7125 if (btf_vlen(def) == 0)
7127 member = btf_members(def) + btf_vlen(def) - 1;
7128 mname = btf__name_by_offset(obj->btf, member->name_off);
7129 if (strcmp(mname, "values"))
7132 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7133 if (rel->r_offset - vi->offset < moff)
7136 moff = rel->r_offset - vi->offset - moff;
7137 /* here we use BPF pointer size, which is always 64 bit, as we
7138 * are parsing ELF that was built for BPF target
7140 if (moff % bpf_ptr_sz)
7143 if (moff >= map->init_slots_sz) {
7145 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7148 map->init_slots = tmp;
7149 memset(map->init_slots + map->init_slots_sz, 0,
7150 (new_sz - map->init_slots_sz) * host_ptr_sz);
7151 map->init_slots_sz = new_sz;
7153 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7155 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7156 i, map->name, moff, type, name);
7162 static int bpf_object__collect_relos(struct bpf_object *obj)
7166 for (i = 0; i < obj->efile.sec_cnt; i++) {
7167 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7172 if (sec_desc->sec_type != SEC_RELO)
7175 shdr = sec_desc->shdr;
7176 data = sec_desc->data;
7177 idx = shdr->sh_info;
7179 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
7180 pr_warn("internal error at %d\n", __LINE__);
7181 return -LIBBPF_ERRNO__INTERNAL;
7184 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
7185 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7186 else if (idx == obj->efile.btf_maps_shndx)
7187 err = bpf_object__collect_map_relos(obj, shdr, data);
7189 err = bpf_object__collect_prog_relos(obj, shdr, data);
7194 bpf_object__sort_relos(obj);
7198 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7200 if (BPF_CLASS(insn->code) == BPF_JMP &&
7201 BPF_OP(insn->code) == BPF_CALL &&
7202 BPF_SRC(insn->code) == BPF_K &&
7203 insn->src_reg == 0 &&
7204 insn->dst_reg == 0) {
7205 *func_id = insn->imm;
7211 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7213 struct bpf_insn *insn = prog->insns;
7214 enum bpf_func_id func_id;
7217 if (obj->gen_loader)
7220 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7221 if (!insn_is_helper_call(insn, &func_id))
7224 /* on kernels that don't yet support
7225 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7226 * to bpf_probe_read() which works well for old kernels
7229 case BPF_FUNC_probe_read_kernel:
7230 case BPF_FUNC_probe_read_user:
7231 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7232 insn->imm = BPF_FUNC_probe_read;
7234 case BPF_FUNC_probe_read_kernel_str:
7235 case BPF_FUNC_probe_read_user_str:
7236 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7237 insn->imm = BPF_FUNC_probe_read_str;
7246 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7247 int *btf_obj_fd, int *btf_type_id);
7249 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7250 static int libbpf_prepare_prog_load(struct bpf_program *prog,
7251 struct bpf_prog_load_opts *opts, long cookie)
7253 enum sec_def_flags def = cookie;
7255 /* old kernels might not support specifying expected_attach_type */
7256 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7257 opts->expected_attach_type = 0;
7259 if (def & SEC_SLEEPABLE)
7260 opts->prog_flags |= BPF_F_SLEEPABLE;
7262 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7263 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7265 /* special check for usdt to use uprobe_multi link */
7266 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7267 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7269 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7270 int btf_obj_fd = 0, btf_type_id = 0, err;
7271 const char *attach_name;
7273 attach_name = strchr(prog->sec_name, '/');
7275 /* if BPF program is annotated with just SEC("fentry")
7276 * (or similar) without declaratively specifying
7277 * target, then it is expected that target will be
7278 * specified with bpf_program__set_attach_target() at
7279 * runtime before BPF object load step. If not, then
7280 * there is nothing to load into the kernel as BPF
7281 * verifier won't be able to validate BPF program
7282 * correctness anyways.
7284 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7288 attach_name++; /* skip over / */
7290 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
7294 /* cache resolved BTF FD and BTF type ID in the prog */
7295 prog->attach_btf_obj_fd = btf_obj_fd;
7296 prog->attach_btf_id = btf_type_id;
7298 /* but by now libbpf common logic is not utilizing
7299 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7300 * this callback is called after opts were populated by
7301 * libbpf, so this callback has to update opts explicitly here
7303 opts->attach_btf_obj_fd = btf_obj_fd;
7304 opts->attach_btf_id = btf_type_id;
7309 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7311 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7312 struct bpf_insn *insns, int insns_cnt,
7313 const char *license, __u32 kern_version, int *prog_fd)
7315 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7316 const char *prog_name = NULL;
7317 char *cp, errmsg[STRERR_BUFSIZE];
7318 size_t log_buf_size = 0;
7319 char *log_buf = NULL, *tmp;
7320 int btf_fd, ret, err;
7321 bool own_log_buf = true;
7322 __u32 log_level = prog->log_level;
7324 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7326 * The program type must be set. Most likely we couldn't find a proper
7327 * section definition at load time, and thus we didn't infer the type.
7329 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7330 prog->name, prog->sec_name);
7334 if (!insns || !insns_cnt)
7337 if (kernel_supports(obj, FEAT_PROG_NAME))
7338 prog_name = prog->name;
7339 load_attr.attach_prog_fd = prog->attach_prog_fd;
7340 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7341 load_attr.attach_btf_id = prog->attach_btf_id;
7342 load_attr.kern_version = kern_version;
7343 load_attr.prog_ifindex = prog->prog_ifindex;
7345 /* specify func_info/line_info only if kernel supports them */
7346 btf_fd = btf__fd(obj->btf);
7347 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7348 load_attr.prog_btf_fd = btf_fd;
7349 load_attr.func_info = prog->func_info;
7350 load_attr.func_info_rec_size = prog->func_info_rec_size;
7351 load_attr.func_info_cnt = prog->func_info_cnt;
7352 load_attr.line_info = prog->line_info;
7353 load_attr.line_info_rec_size = prog->line_info_rec_size;
7354 load_attr.line_info_cnt = prog->line_info_cnt;
7356 load_attr.log_level = log_level;
7357 load_attr.prog_flags = prog->prog_flags;
7358 load_attr.fd_array = obj->fd_array;
7360 load_attr.token_fd = obj->token_fd;
7362 load_attr.prog_flags |= BPF_F_TOKEN_FD;
7364 /* adjust load_attr if sec_def provides custom preload callback */
7365 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7366 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7368 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7372 insns = prog->insns;
7373 insns_cnt = prog->insns_cnt;
7376 /* allow prog_prepare_load_fn to change expected_attach_type */
7377 load_attr.expected_attach_type = prog->expected_attach_type;
7379 if (obj->gen_loader) {
7380 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7381 license, insns, insns_cnt, &load_attr,
7382 prog - obj->programs);
7388 /* if log_level is zero, we don't request logs initially even if
7389 * custom log_buf is specified; if the program load fails, then we'll
7390 * bump log_level to 1 and use either custom log_buf or we'll allocate
7391 * our own and retry the load to get details on what failed
7394 if (prog->log_buf) {
7395 log_buf = prog->log_buf;
7396 log_buf_size = prog->log_size;
7397 own_log_buf = false;
7398 } else if (obj->log_buf) {
7399 log_buf = obj->log_buf;
7400 log_buf_size = obj->log_size;
7401 own_log_buf = false;
7403 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7404 tmp = realloc(log_buf, log_buf_size);
7415 load_attr.log_buf = log_buf;
7416 load_attr.log_size = log_buf_size;
7417 load_attr.log_level = log_level;
7419 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7421 if (log_level && own_log_buf) {
7422 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7423 prog->name, log_buf);
7426 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7427 struct bpf_map *map;
7430 for (i = 0; i < obj->nr_maps; i++) {
7431 map = &prog->obj->maps[i];
7432 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7435 if (bpf_prog_bind_map(ret, map->fd, NULL)) {
7436 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7437 pr_warn("prog '%s': failed to bind map '%s': %s\n",
7438 prog->name, map->real_name, cp);
7439 /* Don't fail hard if can't bind rodata. */
7449 if (log_level == 0) {
7453 /* On ENOSPC, increase log buffer size and retry, unless custom
7454 * log_buf is specified.
7455 * Be careful to not overflow u32, though. Kernel's log buf size limit
7456 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7457 * multiply by 2 unless we are sure we'll fit within 32 bits.
7458 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7460 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7465 /* post-process verifier log to improve error descriptions */
7466 fixup_verifier_log(prog, log_buf, log_buf_size);
7468 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7469 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
7472 if (own_log_buf && log_buf && log_buf[0] != '\0') {
7473 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7474 prog->name, log_buf);
7483 static char *find_prev_line(char *buf, char *cur)
7487 if (cur == buf) /* end of a log buf */
7491 while (p - 1 >= buf && *(p - 1) != '\n')
7497 static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7498 char *orig, size_t orig_sz, const char *patch)
7500 /* size of the remaining log content to the right from the to-be-replaced part */
7501 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7502 size_t patch_sz = strlen(patch);
7504 if (patch_sz != orig_sz) {
7505 /* If patch line(s) are longer than original piece of verifier log,
7506 * shift log contents by (patch_sz - orig_sz) bytes to the right
7507 * starting from after to-be-replaced part of the log.
7509 * If patch line(s) are shorter than original piece of verifier log,
7510 * shift log contents by (orig_sz - patch_sz) bytes to the left
7511 * starting from after to-be-replaced part of the log
7513 * We need to be careful about not overflowing available
7514 * buf_sz capacity. If that's the case, we'll truncate the end
7515 * of the original log, as necessary.
7517 if (patch_sz > orig_sz) {
7518 if (orig + patch_sz >= buf + buf_sz) {
7519 /* patch is big enough to cover remaining space completely */
7520 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7522 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7523 /* patch causes part of remaining log to be truncated */
7524 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7527 /* shift remaining log to the right by calculated amount */
7528 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7531 memcpy(orig, patch, patch_sz);
7534 static void fixup_log_failed_core_relo(struct bpf_program *prog,
7535 char *buf, size_t buf_sz, size_t log_sz,
7536 char *line1, char *line2, char *line3)
7538 /* Expected log for failed and not properly guarded CO-RE relocation:
7539 * line1 -> 123: (85) call unknown#195896080
7540 * line2 -> invalid func unknown#195896080
7541 * line3 -> <anything else or end of buffer>
7543 * "123" is the index of the instruction that was poisoned. We extract
7544 * instruction index to find corresponding CO-RE relocation and
7545 * replace this part of the log with more relevant information about
7546 * failed CO-RE relocation.
7548 const struct bpf_core_relo *relo;
7549 struct bpf_core_spec spec;
7550 char patch[512], spec_buf[256];
7551 int insn_idx, err, spec_len;
7553 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7556 relo = find_relo_core(prog, insn_idx);
7560 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7564 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
7565 snprintf(patch, sizeof(patch),
7566 "%d: <invalid CO-RE relocation>\n"
7567 "failed to resolve CO-RE relocation %s%s\n",
7568 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
7570 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7573 static void fixup_log_missing_map_load(struct bpf_program *prog,
7574 char *buf, size_t buf_sz, size_t log_sz,
7575 char *line1, char *line2, char *line3)
7577 /* Expected log for failed and not properly guarded map reference:
7578 * line1 -> 123: (85) call unknown#2001000345
7579 * line2 -> invalid func unknown#2001000345
7580 * line3 -> <anything else or end of buffer>
7582 * "123" is the index of the instruction that was poisoned.
7583 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7585 struct bpf_object *obj = prog->obj;
7586 const struct bpf_map *map;
7587 int insn_idx, map_idx;
7590 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7593 map_idx -= POISON_LDIMM64_MAP_BASE;
7594 if (map_idx < 0 || map_idx >= obj->nr_maps)
7596 map = &obj->maps[map_idx];
7598 snprintf(patch, sizeof(patch),
7599 "%d: <invalid BPF map reference>\n"
7600 "BPF map '%s' is referenced but wasn't created\n",
7601 insn_idx, map->name);
7603 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7606 static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7607 char *buf, size_t buf_sz, size_t log_sz,
7608 char *line1, char *line2, char *line3)
7610 /* Expected log for failed and not properly guarded kfunc call:
7611 * line1 -> 123: (85) call unknown#2002000345
7612 * line2 -> invalid func unknown#2002000345
7613 * line3 -> <anything else or end of buffer>
7615 * "123" is the index of the instruction that was poisoned.
7616 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7618 struct bpf_object *obj = prog->obj;
7619 const struct extern_desc *ext;
7620 int insn_idx, ext_idx;
7623 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7626 ext_idx -= POISON_CALL_KFUNC_BASE;
7627 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7629 ext = &obj->externs[ext_idx];
7631 snprintf(patch, sizeof(patch),
7632 "%d: <invalid kfunc call>\n"
7633 "kfunc '%s' is referenced but wasn't resolved\n",
7634 insn_idx, ext->name);
7636 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7639 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7641 /* look for familiar error patterns in last N lines of the log */
7642 const size_t max_last_line_cnt = 10;
7643 char *prev_line, *cur_line, *next_line;
7650 log_sz = strlen(buf) + 1;
7651 next_line = buf + log_sz - 1;
7653 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7654 cur_line = find_prev_line(buf, next_line);
7658 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7659 prev_line = find_prev_line(buf, cur_line);
7663 /* failed CO-RE relocation case */
7664 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7665 prev_line, cur_line, next_line);
7667 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
7668 prev_line = find_prev_line(buf, cur_line);
7672 /* reference to uncreated BPF map */
7673 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7674 prev_line, cur_line, next_line);
7676 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7677 prev_line = find_prev_line(buf, cur_line);
7681 /* reference to unresolved kfunc */
7682 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7683 prev_line, cur_line, next_line);
7689 static int bpf_program_record_relos(struct bpf_program *prog)
7691 struct bpf_object *obj = prog->obj;
7694 for (i = 0; i < prog->nr_reloc; i++) {
7695 struct reloc_desc *relo = &prog->reloc_desc[i];
7696 struct extern_desc *ext = &obj->externs[relo->ext_idx];
7699 switch (relo->type) {
7700 case RELO_EXTERN_LD64:
7701 if (ext->type != EXT_KSYM)
7703 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7704 BTF_KIND_VAR : BTF_KIND_FUNC;
7705 bpf_gen__record_extern(obj->gen_loader, ext->name,
7706 ext->is_weak, !ext->ksym.type_id,
7707 true, kind, relo->insn_idx);
7709 case RELO_EXTERN_CALL:
7710 bpf_gen__record_extern(obj->gen_loader, ext->name,
7711 ext->is_weak, false, false, BTF_KIND_FUNC,
7715 struct bpf_core_relo cr = {
7716 .insn_off = relo->insn_idx * 8,
7717 .type_id = relo->core_relo->type_id,
7718 .access_str_off = relo->core_relo->access_str_off,
7719 .kind = relo->core_relo->kind,
7722 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7733 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7735 struct bpf_program *prog;
7739 for (i = 0; i < obj->nr_programs; i++) {
7740 prog = &obj->programs[i];
7741 err = bpf_object__sanitize_prog(obj, prog);
7746 for (i = 0; i < obj->nr_programs; i++) {
7747 prog = &obj->programs[i];
7748 if (prog_is_subprog(obj, prog))
7750 if (!prog->autoload) {
7751 pr_debug("prog '%s': skipped loading\n", prog->name);
7754 prog->log_level |= log_level;
7756 if (obj->gen_loader)
7757 bpf_program_record_relos(prog);
7759 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7760 obj->license, obj->kern_version, &prog->fd);
7762 pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
7767 bpf_object__free_relocs(obj);
7771 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7773 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7775 struct bpf_program *prog;
7778 bpf_object__for_each_program(prog, obj) {
7779 prog->sec_def = find_sec_def(prog->sec_name);
7780 if (!prog->sec_def) {
7781 /* couldn't guess, but user might manually specify */
7782 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7783 prog->name, prog->sec_name);
7787 prog->type = prog->sec_def->prog_type;
7788 prog->expected_attach_type = prog->sec_def->expected_attach_type;
7790 /* sec_def can have custom callback which should be called
7791 * after bpf_program is initialized to adjust its properties
7793 if (prog->sec_def->prog_setup_fn) {
7794 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7796 pr_warn("prog '%s': failed to initialize: %d\n",
7806 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7807 const struct bpf_object_open_opts *opts)
7809 const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
7810 struct bpf_object *obj;
7817 if (elf_version(EV_CURRENT) == EV_NONE) {
7818 pr_warn("failed to init libelf for %s\n",
7819 path ? : "(mem buf)");
7820 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7823 if (!OPTS_VALID(opts, bpf_object_open_opts))
7824 return ERR_PTR(-EINVAL);
7826 obj_name = OPTS_GET(opts, object_name, NULL);
7829 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7830 (unsigned long)obj_buf,
7831 (unsigned long)obj_buf_sz);
7832 obj_name = tmp_name;
7835 pr_debug("loading object '%s' from buffer\n", obj_name);
7838 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7839 log_size = OPTS_GET(opts, kernel_log_size, 0);
7840 log_level = OPTS_GET(opts, kernel_log_level, 0);
7841 if (log_size > UINT_MAX)
7842 return ERR_PTR(-EINVAL);
7843 if (log_size && !log_buf)
7844 return ERR_PTR(-EINVAL);
7846 token_path = OPTS_GET(opts, bpf_token_path, NULL);
7847 /* if user didn't specify bpf_token_path explicitly, check if
7848 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
7852 token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
7853 if (token_path && strlen(token_path) >= PATH_MAX)
7854 return ERR_PTR(-ENAMETOOLONG);
7856 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7860 obj->log_buf = log_buf;
7861 obj->log_size = log_size;
7862 obj->log_level = log_level;
7865 obj->token_path = strdup(token_path);
7866 if (!obj->token_path) {
7872 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7874 if (strlen(btf_tmp_path) >= PATH_MAX) {
7875 err = -ENAMETOOLONG;
7878 obj->btf_custom_path = strdup(btf_tmp_path);
7879 if (!obj->btf_custom_path) {
7885 kconfig = OPTS_GET(opts, kconfig, NULL);
7887 obj->kconfig = strdup(kconfig);
7888 if (!obj->kconfig) {
7894 err = bpf_object__elf_init(obj);
7895 err = err ? : bpf_object__check_endianness(obj);
7896 err = err ? : bpf_object__elf_collect(obj);
7897 err = err ? : bpf_object__collect_externs(obj);
7898 err = err ? : bpf_object_fixup_btf(obj);
7899 err = err ? : bpf_object__init_maps(obj, opts);
7900 err = err ? : bpf_object_init_progs(obj, opts);
7901 err = err ? : bpf_object__collect_relos(obj);
7905 bpf_object__elf_finish(obj);
7909 bpf_object__close(obj);
7910 return ERR_PTR(err);
7914 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7917 return libbpf_err_ptr(-EINVAL);
7919 pr_debug("loading %s\n", path);
7921 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7924 struct bpf_object *bpf_object__open(const char *path)
7926 return bpf_object__open_file(path, NULL);
7930 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7931 const struct bpf_object_open_opts *opts)
7933 if (!obj_buf || obj_buf_sz == 0)
7934 return libbpf_err_ptr(-EINVAL);
7936 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7939 static int bpf_object_unload(struct bpf_object *obj)
7944 return libbpf_err(-EINVAL);
7946 for (i = 0; i < obj->nr_maps; i++) {
7947 zclose(obj->maps[i].fd);
7948 if (obj->maps[i].st_ops)
7949 zfree(&obj->maps[i].st_ops->kern_vdata);
7952 for (i = 0; i < obj->nr_programs; i++)
7953 bpf_program__unload(&obj->programs[i]);
7958 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7962 bpf_object__for_each_map(m, obj) {
7963 if (!bpf_map__is_internal(m))
7965 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7966 m->def.map_flags &= ~BPF_F_MMAPABLE;
7972 int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
7974 char sym_type, sym_name[500];
7975 unsigned long long sym_addr;
7979 f = fopen("/proc/kallsyms", "re");
7982 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7987 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7988 &sym_addr, &sym_type, sym_name);
7989 if (ret == EOF && feof(f))
7992 pr_warn("failed to read kallsyms entry: %d\n", ret);
7997 err = cb(sym_addr, sym_type, sym_name, ctx);
8006 static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8007 const char *sym_name, void *ctx)
8009 struct bpf_object *obj = ctx;
8010 const struct btf_type *t;
8011 struct extern_desc *ext;
8013 ext = find_extern_by_name(obj, sym_name);
8014 if (!ext || ext->type != EXT_KSYM)
8017 t = btf__type_by_id(obj->btf, ext->btf_id);
8021 if (ext->is_set && ext->ksym.addr != sym_addr) {
8022 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
8023 sym_name, ext->ksym.addr, sym_addr);
8028 ext->ksym.addr = sym_addr;
8029 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
8034 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8036 return libbpf_kallsyms_parse(kallsyms_cb, obj);
8039 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8040 __u16 kind, struct btf **res_btf,
8041 struct module_btf **res_mod_btf)
8043 struct module_btf *mod_btf;
8047 btf = obj->btf_vmlinux;
8049 id = btf__find_by_name_kind(btf, ksym_name, kind);
8051 if (id == -ENOENT) {
8052 err = load_module_btfs(obj);
8056 for (i = 0; i < obj->btf_module_cnt; i++) {
8057 /* we assume module_btf's BTF FD is always >0 */
8058 mod_btf = &obj->btf_modules[i];
8060 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
8069 *res_mod_btf = mod_btf;
8073 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8074 struct extern_desc *ext)
8076 const struct btf_type *targ_var, *targ_type;
8077 __u32 targ_type_id, local_type_id;
8078 struct module_btf *mod_btf = NULL;
8079 const char *targ_var_name;
8080 struct btf *btf = NULL;
8083 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8085 if (id == -ESRCH && ext->is_weak)
8087 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8092 /* find local type_id */
8093 local_type_id = ext->ksym.type_id;
8095 /* find target type_id */
8096 targ_var = btf__type_by_id(btf, id);
8097 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8098 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8100 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8103 const struct btf_type *local_type;
8104 const char *targ_name, *local_name;
8106 local_type = btf__type_by_id(obj->btf, local_type_id);
8107 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8108 targ_name = btf__name_by_offset(btf, targ_type->name_off);
8110 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8111 ext->name, local_type_id,
8112 btf_kind_str(local_type), local_name, targ_type_id,
8113 btf_kind_str(targ_type), targ_name);
8118 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8119 ext->ksym.kernel_btf_id = id;
8120 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8121 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8126 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8127 struct extern_desc *ext)
8129 int local_func_proto_id, kfunc_proto_id, kfunc_id;
8130 struct module_btf *mod_btf = NULL;
8131 const struct btf_type *kern_func;
8132 struct btf *kern_btf = NULL;
8135 local_func_proto_id = ext->ksym.type_id;
8137 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8140 if (kfunc_id == -ESRCH && ext->is_weak)
8142 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
8147 kern_func = btf__type_by_id(kern_btf, kfunc_id);
8148 kfunc_proto_id = kern_func->type;
8150 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8151 kern_btf, kfunc_proto_id);
8156 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8157 ext->name, local_func_proto_id,
8158 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8162 /* set index for module BTF fd in fd_array, if unset */
8163 if (mod_btf && !mod_btf->fd_array_idx) {
8164 /* insn->off is s16 */
8165 if (obj->fd_array_cnt == INT16_MAX) {
8166 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8167 ext->name, mod_btf->fd_array_idx);
8170 /* Cannot use index 0 for module BTF fd */
8171 if (!obj->fd_array_cnt)
8172 obj->fd_array_cnt = 1;
8174 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8175 obj->fd_array_cnt + 1);
8178 mod_btf->fd_array_idx = obj->fd_array_cnt;
8179 /* we assume module BTF FD is always >0 */
8180 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8184 ext->ksym.kernel_btf_id = kfunc_id;
8185 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8186 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8187 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8188 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8189 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8191 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8192 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8193 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8198 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8200 const struct btf_type *t;
8201 struct extern_desc *ext;
8204 for (i = 0; i < obj->nr_extern; i++) {
8205 ext = &obj->externs[i];
8206 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8209 if (obj->gen_loader) {
8211 ext->ksym.kernel_btf_obj_fd = 0;
8212 ext->ksym.kernel_btf_id = 0;
8215 t = btf__type_by_id(obj->btf, ext->btf_id);
8217 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8219 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8226 static int bpf_object__resolve_externs(struct bpf_object *obj,
8227 const char *extra_kconfig)
8229 bool need_config = false, need_kallsyms = false;
8230 bool need_vmlinux_btf = false;
8231 struct extern_desc *ext;
8232 void *kcfg_data = NULL;
8235 if (obj->nr_extern == 0)
8238 if (obj->kconfig_map_idx >= 0)
8239 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8241 for (i = 0; i < obj->nr_extern; i++) {
8242 ext = &obj->externs[i];
8244 if (ext->type == EXT_KSYM) {
8245 if (ext->ksym.type_id)
8246 need_vmlinux_btf = true;
8248 need_kallsyms = true;
8250 } else if (ext->type == EXT_KCFG) {
8251 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8254 /* Kconfig externs need actual /proc/config.gz */
8255 if (str_has_pfx(ext->name, "CONFIG_")) {
8260 /* Virtual kcfg externs are customly handled by libbpf */
8261 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8262 value = get_kernel_version();
8264 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8267 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8268 value = kernel_supports(obj, FEAT_BPF_COOKIE);
8269 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8270 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8271 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8272 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8273 * __kconfig externs, where LINUX_ ones are virtual and filled out
8274 * customly by libbpf (their values don't come from Kconfig).
8275 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8276 * __weak, it defaults to zero value, just like for CONFIG_xxx
8279 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8283 err = set_kcfg_value_num(ext, ext_ptr, value);
8286 pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8287 ext->name, (long long)value);
8289 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8293 if (need_config && extra_kconfig) {
8294 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8297 need_config = false;
8298 for (i = 0; i < obj->nr_extern; i++) {
8299 ext = &obj->externs[i];
8300 if (ext->type == EXT_KCFG && !ext->is_set) {
8307 err = bpf_object__read_kconfig_file(obj, kcfg_data);
8311 if (need_kallsyms) {
8312 err = bpf_object__read_kallsyms_file(obj);
8316 if (need_vmlinux_btf) {
8317 err = bpf_object__resolve_ksyms_btf_id(obj);
8321 for (i = 0; i < obj->nr_extern; i++) {
8322 ext = &obj->externs[i];
8324 if (!ext->is_set && !ext->is_weak) {
8325 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8327 } else if (!ext->is_set) {
8328 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
8336 static void bpf_map_prepare_vdata(const struct bpf_map *map)
8338 struct bpf_struct_ops *st_ops;
8341 st_ops = map->st_ops;
8342 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8343 struct bpf_program *prog = st_ops->progs[i];
8350 prog_fd = bpf_program__fd(prog);
8351 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8352 *(unsigned long *)kern_data = prog_fd;
8356 static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8358 struct bpf_map *map;
8361 for (i = 0; i < obj->nr_maps; i++) {
8362 map = &obj->maps[i];
8364 if (!bpf_map__is_struct_ops(map))
8367 if (!map->autocreate)
8370 bpf_map_prepare_vdata(map);
8376 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8381 return libbpf_err(-EINVAL);
8384 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8385 return libbpf_err(-EINVAL);
8388 if (obj->gen_loader)
8389 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8391 err = bpf_object_prepare_token(obj);
8392 err = err ? : bpf_object__probe_loading(obj);
8393 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8394 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8395 err = err ? : bpf_object__sanitize_maps(obj);
8396 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8397 err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8398 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8399 err = err ? : bpf_object__sanitize_and_load_btf(obj);
8400 err = err ? : bpf_object__create_maps(obj);
8401 err = err ? : bpf_object__load_progs(obj, extra_log_level);
8402 err = err ? : bpf_object_init_prog_arrays(obj);
8403 err = err ? : bpf_object_prepare_struct_ops(obj);
8405 if (obj->gen_loader) {
8408 btf__set_fd(obj->btf, -1);
8410 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8413 /* clean up fd_array */
8414 zfree(&obj->fd_array);
8416 /* clean up module BTFs */
8417 for (i = 0; i < obj->btf_module_cnt; i++) {
8418 close(obj->btf_modules[i].fd);
8419 btf__free(obj->btf_modules[i].btf);
8420 free(obj->btf_modules[i].name);
8422 free(obj->btf_modules);
8424 /* clean up vmlinux BTF */
8425 btf__free(obj->btf_vmlinux);
8426 obj->btf_vmlinux = NULL;
8428 obj->loaded = true; /* doesn't matter if successfully or not */
8435 /* unpin any maps that were auto-pinned during load */
8436 for (i = 0; i < obj->nr_maps; i++)
8437 if (obj->maps[i].pinned && !obj->maps[i].reused)
8438 bpf_map__unpin(&obj->maps[i], NULL);
8440 bpf_object_unload(obj);
8441 pr_warn("failed to load object '%s'\n", obj->path);
8442 return libbpf_err(err);
8445 int bpf_object__load(struct bpf_object *obj)
8447 return bpf_object_load(obj, 0, NULL);
8450 static int make_parent_dir(const char *path)
8452 char *cp, errmsg[STRERR_BUFSIZE];
8456 dname = strdup(path);
8460 dir = dirname(dname);
8461 if (mkdir(dir, 0700) && errno != EEXIST)
8466 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8467 pr_warn("failed to mkdir %s: %s\n", path, cp);
8472 static int check_path(const char *path)
8474 char *cp, errmsg[STRERR_BUFSIZE];
8475 struct statfs st_fs;
8482 dname = strdup(path);
8486 dir = dirname(dname);
8487 if (statfs(dir, &st_fs)) {
8488 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
8489 pr_warn("failed to statfs %s: %s\n", dir, cp);
8494 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
8495 pr_warn("specified path %s is not on BPF FS\n", path);
8502 int bpf_program__pin(struct bpf_program *prog, const char *path)
8504 char *cp, errmsg[STRERR_BUFSIZE];
8508 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8509 return libbpf_err(-EINVAL);
8512 err = make_parent_dir(path);
8514 return libbpf_err(err);
8516 err = check_path(path);
8518 return libbpf_err(err);
8520 if (bpf_obj_pin(prog->fd, path)) {
8522 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8523 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
8524 return libbpf_err(err);
8527 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
8531 int bpf_program__unpin(struct bpf_program *prog, const char *path)
8536 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
8537 return libbpf_err(-EINVAL);
8540 err = check_path(path);
8542 return libbpf_err(err);
8546 return libbpf_err(-errno);
8548 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
8552 int bpf_map__pin(struct bpf_map *map, const char *path)
8554 char *cp, errmsg[STRERR_BUFSIZE];
8558 pr_warn("invalid map pointer\n");
8559 return libbpf_err(-EINVAL);
8562 if (map->pin_path) {
8563 if (path && strcmp(path, map->pin_path)) {
8564 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8565 bpf_map__name(map), map->pin_path, path);
8566 return libbpf_err(-EINVAL);
8567 } else if (map->pinned) {
8568 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8569 bpf_map__name(map), map->pin_path);
8574 pr_warn("missing a path to pin map '%s' at\n",
8575 bpf_map__name(map));
8576 return libbpf_err(-EINVAL);
8577 } else if (map->pinned) {
8578 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8579 return libbpf_err(-EEXIST);
8582 map->pin_path = strdup(path);
8583 if (!map->pin_path) {
8589 err = make_parent_dir(map->pin_path);
8591 return libbpf_err(err);
8593 err = check_path(map->pin_path);
8595 return libbpf_err(err);
8597 if (bpf_obj_pin(map->fd, map->pin_path)) {
8603 pr_debug("pinned map '%s'\n", map->pin_path);
8608 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8609 pr_warn("failed to pin map: %s\n", cp);
8610 return libbpf_err(err);
8613 int bpf_map__unpin(struct bpf_map *map, const char *path)
8618 pr_warn("invalid map pointer\n");
8619 return libbpf_err(-EINVAL);
8622 if (map->pin_path) {
8623 if (path && strcmp(path, map->pin_path)) {
8624 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8625 bpf_map__name(map), map->pin_path, path);
8626 return libbpf_err(-EINVAL);
8628 path = map->pin_path;
8630 pr_warn("no path to unpin map '%s' from\n",
8631 bpf_map__name(map));
8632 return libbpf_err(-EINVAL);
8635 err = check_path(path);
8637 return libbpf_err(err);
8641 return libbpf_err(-errno);
8643 map->pinned = false;
8644 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8649 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8656 return libbpf_err(-errno);
8659 free(map->pin_path);
8660 map->pin_path = new;
8664 __alias(bpf_map__pin_path)
8665 const char *bpf_map__get_pin_path(const struct bpf_map *map);
8667 const char *bpf_map__pin_path(const struct bpf_map *map)
8669 return map->pin_path;
8672 bool bpf_map__is_pinned(const struct bpf_map *map)
8677 static void sanitize_pin_path(char *s)
8679 /* bpffs disallows periods in path names */
8687 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8689 struct bpf_map *map;
8693 return libbpf_err(-ENOENT);
8696 pr_warn("object not yet loaded; load it first\n");
8697 return libbpf_err(-ENOENT);
8700 bpf_object__for_each_map(map, obj) {
8701 char *pin_path = NULL;
8704 if (!map->autocreate)
8708 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8710 goto err_unpin_maps;
8711 sanitize_pin_path(buf);
8713 } else if (!map->pin_path) {
8717 err = bpf_map__pin(map, pin_path);
8719 goto err_unpin_maps;
8725 while ((map = bpf_object__prev_map(obj, map))) {
8729 bpf_map__unpin(map, NULL);
8732 return libbpf_err(err);
8735 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8737 struct bpf_map *map;
8741 return libbpf_err(-ENOENT);
8743 bpf_object__for_each_map(map, obj) {
8744 char *pin_path = NULL;
8748 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8750 return libbpf_err(err);
8751 sanitize_pin_path(buf);
8753 } else if (!map->pin_path) {
8757 err = bpf_map__unpin(map, pin_path);
8759 return libbpf_err(err);
8765 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8767 struct bpf_program *prog;
8772 return libbpf_err(-ENOENT);
8775 pr_warn("object not yet loaded; load it first\n");
8776 return libbpf_err(-ENOENT);
8779 bpf_object__for_each_program(prog, obj) {
8780 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8782 goto err_unpin_programs;
8784 err = bpf_program__pin(prog, buf);
8786 goto err_unpin_programs;
8792 while ((prog = bpf_object__prev_program(obj, prog))) {
8793 if (pathname_concat(buf, sizeof(buf), path, prog->name))
8796 bpf_program__unpin(prog, buf);
8799 return libbpf_err(err);
8802 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8804 struct bpf_program *prog;
8808 return libbpf_err(-ENOENT);
8810 bpf_object__for_each_program(prog, obj) {
8813 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8815 return libbpf_err(err);
8817 err = bpf_program__unpin(prog, buf);
8819 return libbpf_err(err);
8825 int bpf_object__pin(struct bpf_object *obj, const char *path)
8829 err = bpf_object__pin_maps(obj, path);
8831 return libbpf_err(err);
8833 err = bpf_object__pin_programs(obj, path);
8835 bpf_object__unpin_maps(obj, path);
8836 return libbpf_err(err);
8842 int bpf_object__unpin(struct bpf_object *obj, const char *path)
8846 err = bpf_object__unpin_programs(obj, path);
8848 return libbpf_err(err);
8850 err = bpf_object__unpin_maps(obj, path);
8852 return libbpf_err(err);
8857 static void bpf_map__destroy(struct bpf_map *map)
8859 if (map->inner_map) {
8860 bpf_map__destroy(map->inner_map);
8861 zfree(&map->inner_map);
8864 zfree(&map->init_slots);
8865 map->init_slots_sz = 0;
8867 if (map->mmaped && map->mmaped != map->obj->arena_data)
8868 munmap(map->mmaped, bpf_map_mmap_sz(map));
8872 zfree(&map->st_ops->data);
8873 zfree(&map->st_ops->progs);
8874 zfree(&map->st_ops->kern_func_off);
8875 zfree(&map->st_ops);
8879 zfree(&map->real_name);
8880 zfree(&map->pin_path);
8886 void bpf_object__close(struct bpf_object *obj)
8890 if (IS_ERR_OR_NULL(obj))
8893 usdt_manager_free(obj->usdt_man);
8894 obj->usdt_man = NULL;
8896 bpf_gen__free(obj->gen_loader);
8897 bpf_object__elf_finish(obj);
8898 bpf_object_unload(obj);
8899 btf__free(obj->btf);
8900 btf__free(obj->btf_vmlinux);
8901 btf_ext__free(obj->btf_ext);
8903 for (i = 0; i < obj->nr_maps; i++)
8904 bpf_map__destroy(&obj->maps[i]);
8906 zfree(&obj->btf_custom_path);
8907 zfree(&obj->kconfig);
8909 for (i = 0; i < obj->nr_extern; i++)
8910 zfree(&obj->externs[i].essent_name);
8912 zfree(&obj->externs);
8918 if (obj->programs && obj->nr_programs) {
8919 for (i = 0; i < obj->nr_programs; i++)
8920 bpf_program__exit(&obj->programs[i]);
8922 zfree(&obj->programs);
8924 zfree(&obj->feat_cache);
8925 zfree(&obj->token_path);
8926 if (obj->token_fd > 0)
8927 close(obj->token_fd);
8929 zfree(&obj->arena_data);
8934 const char *bpf_object__name(const struct bpf_object *obj)
8936 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8939 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8941 return obj ? obj->kern_version : 0;
8944 struct btf *bpf_object__btf(const struct bpf_object *obj)
8946 return obj ? obj->btf : NULL;
8949 int bpf_object__btf_fd(const struct bpf_object *obj)
8951 return obj->btf ? btf__fd(obj->btf) : -1;
8954 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8957 return libbpf_err(-EINVAL);
8959 obj->kern_version = kern_version;
8964 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8966 struct bpf_gen *gen;
8970 if (!OPTS_VALID(opts, gen_loader_opts))
8972 gen = calloc(sizeof(*gen), 1);
8976 obj->gen_loader = gen;
8980 static struct bpf_program *
8981 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8984 size_t nr_programs = obj->nr_programs;
8991 /* Iter from the beginning */
8992 return forward ? &obj->programs[0] :
8993 &obj->programs[nr_programs - 1];
8995 if (p->obj != obj) {
8996 pr_warn("error: program handler doesn't match object\n");
8997 return errno = EINVAL, NULL;
9000 idx = (p - obj->programs) + (forward ? 1 : -1);
9001 if (idx >= obj->nr_programs || idx < 0)
9003 return &obj->programs[idx];
9006 struct bpf_program *
9007 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
9009 struct bpf_program *prog = prev;
9012 prog = __bpf_program__iter(prog, obj, true);
9013 } while (prog && prog_is_subprog(obj, prog));
9018 struct bpf_program *
9019 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
9021 struct bpf_program *prog = next;
9024 prog = __bpf_program__iter(prog, obj, false);
9025 } while (prog && prog_is_subprog(obj, prog));
9030 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9032 prog->prog_ifindex = ifindex;
9035 const char *bpf_program__name(const struct bpf_program *prog)
9040 const char *bpf_program__section_name(const struct bpf_program *prog)
9042 return prog->sec_name;
9045 bool bpf_program__autoload(const struct bpf_program *prog)
9047 return prog->autoload;
9050 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9052 if (prog->obj->loaded)
9053 return libbpf_err(-EINVAL);
9055 prog->autoload = autoload;
9059 bool bpf_program__autoattach(const struct bpf_program *prog)
9061 return prog->autoattach;
9064 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9066 prog->autoattach = autoattach;
9069 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9074 size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9076 return prog->insns_cnt;
9079 int bpf_program__set_insns(struct bpf_program *prog,
9080 struct bpf_insn *new_insns, size_t new_insn_cnt)
9082 struct bpf_insn *insns;
9084 if (prog->obj->loaded)
9087 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9088 /* NULL is a valid return from reallocarray if the new count is zero */
9089 if (!insns && new_insn_cnt) {
9090 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9093 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9095 prog->insns = insns;
9096 prog->insns_cnt = new_insn_cnt;
9100 int bpf_program__fd(const struct bpf_program *prog)
9103 return libbpf_err(-EINVAL);
9106 return libbpf_err(-ENOENT);
9111 __alias(bpf_program__type)
9112 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9114 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
9119 static size_t custom_sec_def_cnt;
9120 static struct bpf_sec_def *custom_sec_defs;
9121 static struct bpf_sec_def custom_fallback_def;
9122 static bool has_custom_fallback_def;
9123 static int last_custom_sec_def_handler_id;
9125 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
9127 if (prog->obj->loaded)
9128 return libbpf_err(-EBUSY);
9130 /* if type is not changed, do nothing */
9131 if (prog->type == type)
9136 /* If a program type was changed, we need to reset associated SEC()
9137 * handler, as it will be invalid now. The only exception is a generic
9138 * fallback handler, which by definition is program type-agnostic and
9139 * is a catch-all custom handler, optionally set by the application,
9140 * so should be able to handle any type of BPF program.
9142 if (prog->sec_def != &custom_fallback_def)
9143 prog->sec_def = NULL;
9147 __alias(bpf_program__expected_attach_type)
9148 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9150 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
9152 return prog->expected_attach_type;
9155 int bpf_program__set_expected_attach_type(struct bpf_program *prog,
9156 enum bpf_attach_type type)
9158 if (prog->obj->loaded)
9159 return libbpf_err(-EBUSY);
9161 prog->expected_attach_type = type;
9165 __u32 bpf_program__flags(const struct bpf_program *prog)
9167 return prog->prog_flags;
9170 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
9172 if (prog->obj->loaded)
9173 return libbpf_err(-EBUSY);
9175 prog->prog_flags = flags;
9179 __u32 bpf_program__log_level(const struct bpf_program *prog)
9181 return prog->log_level;
9184 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9186 if (prog->obj->loaded)
9187 return libbpf_err(-EBUSY);
9189 prog->log_level = log_level;
9193 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9195 *log_size = prog->log_size;
9196 return prog->log_buf;
9199 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9201 if (log_size && !log_buf)
9203 if (prog->log_size > UINT_MAX)
9205 if (prog->obj->loaded)
9208 prog->log_buf = log_buf;
9209 prog->log_size = log_size;
9213 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
9214 .sec = (char *)sec_pfx, \
9215 .prog_type = BPF_PROG_TYPE_##ptype, \
9216 .expected_attach_type = atype, \
9217 .cookie = (long)(flags), \
9218 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
9222 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9223 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9224 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9225 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9226 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9227 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9228 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9229 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9230 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9231 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9232 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9234 static const struct bpf_sec_def section_defs[] = {
9235 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
9236 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9237 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9238 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9239 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9240 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9241 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9242 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9243 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9244 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9245 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9246 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9247 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9248 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9249 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9250 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9251 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9252 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
9253 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
9254 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9255 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
9256 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9257 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9258 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9259 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9260 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9261 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9262 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9263 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9264 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9265 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9266 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9267 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9268 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9269 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9270 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9271 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9272 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9273 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9274 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9275 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9276 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
9277 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9278 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9279 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
9280 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9281 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9282 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
9283 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9284 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9285 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9286 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9287 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
9288 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9289 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
9290 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
9291 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
9292 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
9293 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
9294 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9295 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9296 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
9297 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
9298 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9299 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9300 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9301 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9302 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9303 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
9304 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9305 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9306 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9307 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9308 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9309 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9310 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9311 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9312 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
9313 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
9314 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9315 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
9316 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
9317 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9318 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
9319 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
9320 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9321 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
9322 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
9323 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9324 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
9325 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
9326 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9327 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9328 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9329 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
9330 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
9331 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
9332 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
9333 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
9336 int libbpf_register_prog_handler(const char *sec,
9337 enum bpf_prog_type prog_type,
9338 enum bpf_attach_type exp_attach_type,
9339 const struct libbpf_prog_handler_opts *opts)
9341 struct bpf_sec_def *sec_def;
9343 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9344 return libbpf_err(-EINVAL);
9346 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9347 return libbpf_err(-E2BIG);
9350 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9353 return libbpf_err(-ENOMEM);
9355 custom_sec_defs = sec_def;
9356 sec_def = &custom_sec_defs[custom_sec_def_cnt];
9358 if (has_custom_fallback_def)
9359 return libbpf_err(-EBUSY);
9361 sec_def = &custom_fallback_def;
9364 sec_def->sec = sec ? strdup(sec) : NULL;
9365 if (sec && !sec_def->sec)
9366 return libbpf_err(-ENOMEM);
9368 sec_def->prog_type = prog_type;
9369 sec_def->expected_attach_type = exp_attach_type;
9370 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9372 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9373 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9374 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9376 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9379 custom_sec_def_cnt++;
9381 has_custom_fallback_def = true;
9383 return sec_def->handler_id;
9386 int libbpf_unregister_prog_handler(int handler_id)
9388 struct bpf_sec_def *sec_defs;
9391 if (handler_id <= 0)
9392 return libbpf_err(-EINVAL);
9394 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9395 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9396 has_custom_fallback_def = false;
9400 for (i = 0; i < custom_sec_def_cnt; i++) {
9401 if (custom_sec_defs[i].handler_id == handler_id)
9405 if (i == custom_sec_def_cnt)
9406 return libbpf_err(-ENOENT);
9408 free(custom_sec_defs[i].sec);
9409 for (i = i + 1; i < custom_sec_def_cnt; i++)
9410 custom_sec_defs[i - 1] = custom_sec_defs[i];
9411 custom_sec_def_cnt--;
9413 /* try to shrink the array, but it's ok if we couldn't */
9414 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
9415 /* if new count is zero, reallocarray can return a valid NULL result;
9416 * in this case the previous pointer will be freed, so we *have to*
9417 * reassign old pointer to the new value (even if it's NULL)
9419 if (sec_defs || custom_sec_def_cnt == 0)
9420 custom_sec_defs = sec_defs;
9425 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
9427 size_t len = strlen(sec_def->sec);
9429 /* "type/" always has to have proper SEC("type/extras") form */
9430 if (sec_def->sec[len - 1] == '/') {
9431 if (str_has_pfx(sec_name, sec_def->sec))
9436 /* "type+" means it can be either exact SEC("type") or
9437 * well-formed SEC("type/extras") with proper '/' separator
9439 if (sec_def->sec[len - 1] == '+') {
9441 /* not even a prefix */
9442 if (strncmp(sec_name, sec_def->sec, len) != 0)
9444 /* exact match or has '/' separator */
9445 if (sec_name[len] == '\0' || sec_name[len] == '/')
9450 return strcmp(sec_name, sec_def->sec) == 0;
9453 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9455 const struct bpf_sec_def *sec_def;
9458 n = custom_sec_def_cnt;
9459 for (i = 0; i < n; i++) {
9460 sec_def = &custom_sec_defs[i];
9461 if (sec_def_matches(sec_def, sec_name))
9465 n = ARRAY_SIZE(section_defs);
9466 for (i = 0; i < n; i++) {
9467 sec_def = §ion_defs[i];
9468 if (sec_def_matches(sec_def, sec_name))
9472 if (has_custom_fallback_def)
9473 return &custom_fallback_def;
9478 #define MAX_TYPE_NAME_SIZE 32
9480 static char *libbpf_get_type_names(bool attach_type)
9482 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9490 /* Forge string buf with all available names */
9491 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9492 const struct bpf_sec_def *sec_def = §ion_defs[i];
9495 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9498 if (!(sec_def->cookie & SEC_ATTACHABLE))
9502 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9507 strcat(buf, section_defs[i].sec);
9513 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9514 enum bpf_attach_type *expected_attach_type)
9516 const struct bpf_sec_def *sec_def;
9520 return libbpf_err(-EINVAL);
9522 sec_def = find_sec_def(name);
9524 *prog_type = sec_def->prog_type;
9525 *expected_attach_type = sec_def->expected_attach_type;
9529 pr_debug("failed to guess program type from ELF section '%s'\n", name);
9530 type_names = libbpf_get_type_names(false);
9531 if (type_names != NULL) {
9532 pr_debug("supported section(type) names are:%s\n", type_names);
9536 return libbpf_err(-ESRCH);
9539 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9541 if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9544 return attach_type_name[t];
9547 const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9549 if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9552 return link_type_name[t];
9555 const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9557 if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9560 return map_type_name[t];
9563 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9565 if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9568 return prog_type_name[t];
9571 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9575 struct bpf_map *map;
9578 for (i = 0; i < obj->nr_maps; i++) {
9579 map = &obj->maps[i];
9580 if (!bpf_map__is_struct_ops(map))
9582 if (map->sec_idx == sec_idx &&
9583 map->sec_offset <= offset &&
9584 offset - map->sec_offset < map->def.value_size)
9591 /* Collect the reloc from ELF, populate the st_ops->progs[], and update
9592 * st_ops->data for shadow type.
9594 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9595 Elf64_Shdr *shdr, Elf_Data *data)
9597 const struct btf_member *member;
9598 struct bpf_struct_ops *st_ops;
9599 struct bpf_program *prog;
9600 unsigned int shdr_idx;
9601 const struct btf *btf;
9602 struct bpf_map *map;
9603 unsigned int moff, insn_idx;
9611 nrels = shdr->sh_size / shdr->sh_entsize;
9612 for (i = 0; i < nrels; i++) {
9613 rel = elf_rel_by_idx(data, i);
9615 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9616 return -LIBBPF_ERRNO__FORMAT;
9619 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9621 pr_warn("struct_ops reloc: symbol %zx not found\n",
9622 (size_t)ELF64_R_SYM(rel->r_info));
9623 return -LIBBPF_ERRNO__FORMAT;
9626 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9627 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9629 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9630 (size_t)rel->r_offset);
9634 moff = rel->r_offset - map->sec_offset;
9635 shdr_idx = sym->st_shndx;
9636 st_ops = map->st_ops;
9637 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9639 (long long)(rel->r_info >> 32),
9640 (long long)sym->st_value,
9641 shdr_idx, (size_t)rel->r_offset,
9642 map->sec_offset, sym->st_name, name);
9644 if (shdr_idx >= SHN_LORESERVE) {
9645 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9646 map->name, (size_t)rel->r_offset, shdr_idx);
9647 return -LIBBPF_ERRNO__RELOC;
9649 if (sym->st_value % BPF_INSN_SZ) {
9650 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9651 map->name, (unsigned long long)sym->st_value);
9652 return -LIBBPF_ERRNO__FORMAT;
9654 insn_idx = sym->st_value / BPF_INSN_SZ;
9656 member = find_member_by_offset(st_ops->type, moff * 8);
9658 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9662 member_idx = member - btf_members(st_ops->type);
9663 name = btf__name_by_offset(btf, member->name_off);
9665 if (!resolve_func_ptr(btf, member->type, NULL)) {
9666 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9671 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9673 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9674 map->name, shdr_idx, name);
9678 /* prevent the use of BPF prog with invalid type */
9679 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9680 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9681 map->name, prog->name);
9685 st_ops->progs[member_idx] = prog;
9687 /* st_ops->data will be exposed to users, being returned by
9688 * bpf_map__initial_value() as a pointer to the shadow
9689 * type. All function pointers in the original struct type
9690 * should be converted to a pointer to struct bpf_program
9691 * in the shadow type.
9693 *((struct bpf_program **)(st_ops->data + moff)) = prog;
9699 #define BTF_TRACE_PREFIX "btf_trace_"
9700 #define BTF_LSM_PREFIX "bpf_lsm_"
9701 #define BTF_ITER_PREFIX "bpf_iter_"
9702 #define BTF_MAX_NAME_SIZE 128
9704 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9705 const char **prefix, int *kind)
9707 switch (attach_type) {
9708 case BPF_TRACE_RAW_TP:
9709 *prefix = BTF_TRACE_PREFIX;
9710 *kind = BTF_KIND_TYPEDEF;
9713 case BPF_LSM_CGROUP:
9714 *prefix = BTF_LSM_PREFIX;
9715 *kind = BTF_KIND_FUNC;
9717 case BPF_TRACE_ITER:
9718 *prefix = BTF_ITER_PREFIX;
9719 *kind = BTF_KIND_FUNC;
9723 *kind = BTF_KIND_FUNC;
9727 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9728 const char *name, __u32 kind)
9730 char btf_type_name[BTF_MAX_NAME_SIZE];
9733 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9734 "%s%s", prefix, name);
9735 /* snprintf returns the number of characters written excluding the
9736 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9737 * indicates truncation.
9739 if (ret < 0 || ret >= sizeof(btf_type_name))
9740 return -ENAMETOOLONG;
9741 return btf__find_by_name_kind(btf, btf_type_name, kind);
9744 static inline int find_attach_btf_id(struct btf *btf, const char *name,
9745 enum bpf_attach_type attach_type)
9750 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9751 return find_btf_by_prefix_kind(btf, prefix, name, kind);
9754 int libbpf_find_vmlinux_btf_id(const char *name,
9755 enum bpf_attach_type attach_type)
9760 btf = btf__load_vmlinux_btf();
9761 err = libbpf_get_error(btf);
9763 pr_warn("vmlinux BTF is not found\n");
9764 return libbpf_err(err);
9767 err = find_attach_btf_id(btf, name, attach_type);
9769 pr_warn("%s is not found in vmlinux BTF\n", name);
9772 return libbpf_err(err);
9775 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9777 struct bpf_prog_info info;
9778 __u32 info_len = sizeof(info);
9782 memset(&info, 0, info_len);
9783 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
9785 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
9786 attach_prog_fd, err);
9792 pr_warn("The target program doesn't have BTF\n");
9795 btf = btf__load_from_kernel_by_id(info.btf_id);
9796 err = libbpf_get_error(btf);
9798 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9801 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9804 pr_warn("%s is not found in prog's BTF\n", name);
9811 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9812 enum bpf_attach_type attach_type,
9813 int *btf_obj_fd, int *btf_type_id)
9817 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9819 *btf_obj_fd = 0; /* vmlinux BTF */
9826 ret = load_module_btfs(obj);
9830 for (i = 0; i < obj->btf_module_cnt; i++) {
9831 const struct module_btf *mod = &obj->btf_modules[i];
9833 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9835 *btf_obj_fd = mod->fd;
9848 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9849 int *btf_obj_fd, int *btf_type_id)
9851 enum bpf_attach_type attach_type = prog->expected_attach_type;
9852 __u32 attach_prog_fd = prog->attach_prog_fd;
9855 /* BPF program's BTF ID */
9856 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9857 if (!attach_prog_fd) {
9858 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9861 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9863 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9864 prog->name, attach_prog_fd, attach_name, err);
9872 /* kernel/module BTF ID */
9873 if (prog->obj->gen_loader) {
9874 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9878 err = find_kernel_btf_id(prog->obj, attach_name,
9879 attach_type, btf_obj_fd,
9883 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9884 prog->name, attach_name, err);
9890 int libbpf_attach_type_by_name(const char *name,
9891 enum bpf_attach_type *attach_type)
9894 const struct bpf_sec_def *sec_def;
9897 return libbpf_err(-EINVAL);
9899 sec_def = find_sec_def(name);
9901 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9902 type_names = libbpf_get_type_names(true);
9903 if (type_names != NULL) {
9904 pr_debug("attachable section(type) names are:%s\n", type_names);
9908 return libbpf_err(-EINVAL);
9911 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9912 return libbpf_err(-EINVAL);
9913 if (!(sec_def->cookie & SEC_ATTACHABLE))
9914 return libbpf_err(-EINVAL);
9916 *attach_type = sec_def->expected_attach_type;
9920 int bpf_map__fd(const struct bpf_map *map)
9923 return libbpf_err(-EINVAL);
9924 if (!map_is_created(map))
9929 static bool map_uses_real_name(const struct bpf_map *map)
9931 /* Since libbpf started to support custom .data.* and .rodata.* maps,
9932 * their user-visible name differs from kernel-visible name. Users see
9933 * such map's corresponding ELF section name as a map name.
9934 * This check distinguishes .data/.rodata from .data.* and .rodata.*
9935 * maps to know which name has to be returned to the user.
9937 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9939 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9944 const char *bpf_map__name(const struct bpf_map *map)
9949 if (map_uses_real_name(map))
9950 return map->real_name;
9955 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9957 return map->def.type;
9960 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9962 if (map_is_created(map))
9963 return libbpf_err(-EBUSY);
9964 map->def.type = type;
9968 __u32 bpf_map__map_flags(const struct bpf_map *map)
9970 return map->def.map_flags;
9973 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9975 if (map_is_created(map))
9976 return libbpf_err(-EBUSY);
9977 map->def.map_flags = flags;
9981 __u64 bpf_map__map_extra(const struct bpf_map *map)
9983 return map->map_extra;
9986 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9988 if (map_is_created(map))
9989 return libbpf_err(-EBUSY);
9990 map->map_extra = map_extra;
9994 __u32 bpf_map__numa_node(const struct bpf_map *map)
9996 return map->numa_node;
9999 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10001 if (map_is_created(map))
10002 return libbpf_err(-EBUSY);
10003 map->numa_node = numa_node;
10007 __u32 bpf_map__key_size(const struct bpf_map *map)
10009 return map->def.key_size;
10012 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10014 if (map_is_created(map))
10015 return libbpf_err(-EBUSY);
10016 map->def.key_size = size;
10020 __u32 bpf_map__value_size(const struct bpf_map *map)
10022 return map->def.value_size;
10025 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10028 struct btf_type *datasec_type, *var_type;
10029 struct btf_var_secinfo *var;
10030 const struct btf_type *array_type;
10031 const struct btf_array *array;
10032 int vlen, element_sz, new_array_id;
10035 /* check btf existence */
10036 btf = bpf_object__btf(map->obj);
10040 /* verify map is datasec */
10041 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10042 if (!btf_is_datasec(datasec_type)) {
10043 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10044 bpf_map__name(map));
10048 /* verify datasec has at least one var */
10049 vlen = btf_vlen(datasec_type);
10051 pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10052 bpf_map__name(map));
10056 /* verify last var in the datasec is an array */
10057 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10058 var_type = btf_type_by_id(btf, var->type);
10059 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10060 if (!btf_is_array(array_type)) {
10061 pr_warn("map '%s': cannot be resized, last var must be an array\n",
10062 bpf_map__name(map));
10066 /* verify request size aligns with array */
10067 array = btf_array(array_type);
10068 element_sz = btf__resolve_size(btf, array->type);
10069 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10070 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10071 bpf_map__name(map), element_sz, size);
10075 /* create a new array based on the existing array, but with new length */
10076 nr_elements = (size - var->offset) / element_sz;
10077 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10078 if (new_array_id < 0)
10079 return new_array_id;
10081 /* adding a new btf type invalidates existing pointers to btf objects,
10082 * so refresh pointers before proceeding
10084 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10085 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10086 var_type = btf_type_by_id(btf, var->type);
10088 /* finally update btf info */
10089 datasec_type->size = size;
10090 var->size = size - var->offset;
10091 var_type->type = new_array_id;
10096 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10098 if (map->obj->loaded || map->reused)
10099 return libbpf_err(-EBUSY);
10102 size_t mmap_old_sz, mmap_new_sz;
10105 if (map->def.type != BPF_MAP_TYPE_ARRAY)
10106 return -EOPNOTSUPP;
10108 mmap_old_sz = bpf_map_mmap_sz(map);
10109 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
10110 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10112 pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10113 bpf_map__name(map), err);
10116 err = map_btf_datasec_resize(map, size);
10117 if (err && err != -ENOENT) {
10118 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10119 bpf_map__name(map), err);
10120 map->btf_value_type_id = 0;
10121 map->btf_key_type_id = 0;
10125 map->def.value_size = size;
10129 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
10131 return map ? map->btf_key_type_id : 0;
10134 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
10136 return map ? map->btf_value_type_id : 0;
10139 int bpf_map__set_initial_value(struct bpf_map *map,
10140 const void *data, size_t size)
10144 if (map->obj->loaded || map->reused)
10145 return libbpf_err(-EBUSY);
10147 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10148 return libbpf_err(-EINVAL);
10150 if (map->def.type == BPF_MAP_TYPE_ARENA)
10151 actual_sz = map->obj->arena_data_sz;
10153 actual_sz = map->def.value_size;
10154 if (size != actual_sz)
10155 return libbpf_err(-EINVAL);
10157 memcpy(map->mmaped, data, size);
10161 void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
10163 if (bpf_map__is_struct_ops(map)) {
10165 *psize = map->def.value_size;
10166 return map->st_ops->data;
10172 if (map->def.type == BPF_MAP_TYPE_ARENA)
10173 *psize = map->obj->arena_data_sz;
10175 *psize = map->def.value_size;
10177 return map->mmaped;
10180 bool bpf_map__is_internal(const struct bpf_map *map)
10182 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10185 __u32 bpf_map__ifindex(const struct bpf_map *map)
10187 return map->map_ifindex;
10190 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
10192 if (map_is_created(map))
10193 return libbpf_err(-EBUSY);
10194 map->map_ifindex = ifindex;
10198 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10200 if (!bpf_map_type__is_map_in_map(map->def.type)) {
10201 pr_warn("error: unsupported map type\n");
10202 return libbpf_err(-EINVAL);
10204 if (map->inner_map_fd != -1) {
10205 pr_warn("error: inner_map_fd already specified\n");
10206 return libbpf_err(-EINVAL);
10208 if (map->inner_map) {
10209 bpf_map__destroy(map->inner_map);
10210 zfree(&map->inner_map);
10212 map->inner_map_fd = fd;
10216 static struct bpf_map *
10217 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10220 struct bpf_map *s, *e;
10222 if (!obj || !obj->maps)
10223 return errno = EINVAL, NULL;
10226 e = obj->maps + obj->nr_maps;
10228 if ((m < s) || (m >= e)) {
10229 pr_warn("error in %s: map handler doesn't belong to object\n",
10231 return errno = EINVAL, NULL;
10234 idx = (m - obj->maps) + i;
10235 if (idx >= obj->nr_maps || idx < 0)
10237 return &obj->maps[idx];
10241 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10246 return __bpf_map__iter(prev, obj, 1);
10250 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10252 if (next == NULL) {
10255 return obj->maps + obj->nr_maps - 1;
10258 return __bpf_map__iter(next, obj, -1);
10262 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10264 struct bpf_map *pos;
10266 bpf_object__for_each_map(pos, obj) {
10267 /* if it's a special internal map name (which always starts
10268 * with dot) then check if that special name matches the
10269 * real map name (ELF section name)
10271 if (name[0] == '.') {
10272 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10276 /* otherwise map name has to be an exact match */
10277 if (map_uses_real_name(pos)) {
10278 if (strcmp(pos->real_name, name) == 0)
10282 if (strcmp(pos->name, name) == 0)
10285 return errno = ENOENT, NULL;
10289 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10291 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10294 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10295 size_t value_sz, bool check_value_sz)
10297 if (!map_is_created(map)) /* map is not yet created */
10300 if (map->def.key_size != key_sz) {
10301 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10302 map->name, key_sz, map->def.key_size);
10306 if (!check_value_sz)
10309 switch (map->def.type) {
10310 case BPF_MAP_TYPE_PERCPU_ARRAY:
10311 case BPF_MAP_TYPE_PERCPU_HASH:
10312 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10313 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10314 int num_cpu = libbpf_num_possible_cpus();
10315 size_t elem_sz = roundup(map->def.value_size, 8);
10317 if (value_sz != num_cpu * elem_sz) {
10318 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10319 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10325 if (map->def.value_size != value_sz) {
10326 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10327 map->name, value_sz, map->def.value_size);
10335 int bpf_map__lookup_elem(const struct bpf_map *map,
10336 const void *key, size_t key_sz,
10337 void *value, size_t value_sz, __u64 flags)
10341 err = validate_map_op(map, key_sz, value_sz, true);
10343 return libbpf_err(err);
10345 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10348 int bpf_map__update_elem(const struct bpf_map *map,
10349 const void *key, size_t key_sz,
10350 const void *value, size_t value_sz, __u64 flags)
10354 err = validate_map_op(map, key_sz, value_sz, true);
10356 return libbpf_err(err);
10358 return bpf_map_update_elem(map->fd, key, value, flags);
10361 int bpf_map__delete_elem(const struct bpf_map *map,
10362 const void *key, size_t key_sz, __u64 flags)
10366 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10368 return libbpf_err(err);
10370 return bpf_map_delete_elem_flags(map->fd, key, flags);
10373 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10374 const void *key, size_t key_sz,
10375 void *value, size_t value_sz, __u64 flags)
10379 err = validate_map_op(map, key_sz, value_sz, true);
10381 return libbpf_err(err);
10383 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10386 int bpf_map__get_next_key(const struct bpf_map *map,
10387 const void *cur_key, void *next_key, size_t key_sz)
10391 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10393 return libbpf_err(err);
10395 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10398 long libbpf_get_error(const void *ptr)
10400 if (!IS_ERR_OR_NULL(ptr))
10404 errno = -PTR_ERR(ptr);
10406 /* If ptr == NULL, then errno should be already set by the failing
10407 * API, because libbpf never returns NULL on success and it now always
10408 * sets errno on error. So no extra errno handling for ptr == NULL
10414 /* Replace link's underlying BPF program with the new one */
10415 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10419 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10420 return libbpf_err_errno(ret);
10423 /* Release "ownership" of underlying BPF resource (typically, BPF program
10424 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10425 * link, when destructed through bpf_link__destroy() call won't attempt to
10426 * detach/unregisted that BPF resource. This is useful in situations where,
10427 * say, attached BPF program has to outlive userspace program that attached it
10428 * in the system. Depending on type of BPF program, though, there might be
10429 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10430 * exit of userspace program doesn't trigger automatic detachment and clean up
10431 * inside the kernel.
10433 void bpf_link__disconnect(struct bpf_link *link)
10435 link->disconnected = true;
10438 int bpf_link__destroy(struct bpf_link *link)
10442 if (IS_ERR_OR_NULL(link))
10445 if (!link->disconnected && link->detach)
10446 err = link->detach(link);
10447 if (link->pin_path)
10448 free(link->pin_path);
10450 link->dealloc(link);
10454 return libbpf_err(err);
10457 int bpf_link__fd(const struct bpf_link *link)
10462 const char *bpf_link__pin_path(const struct bpf_link *link)
10464 return link->pin_path;
10467 static int bpf_link__detach_fd(struct bpf_link *link)
10469 return libbpf_err_errno(close(link->fd));
10472 struct bpf_link *bpf_link__open(const char *path)
10474 struct bpf_link *link;
10477 fd = bpf_obj_get(path);
10480 pr_warn("failed to open link at %s: %d\n", path, fd);
10481 return libbpf_err_ptr(fd);
10484 link = calloc(1, sizeof(*link));
10487 return libbpf_err_ptr(-ENOMEM);
10489 link->detach = &bpf_link__detach_fd;
10492 link->pin_path = strdup(path);
10493 if (!link->pin_path) {
10494 bpf_link__destroy(link);
10495 return libbpf_err_ptr(-ENOMEM);
10501 int bpf_link__detach(struct bpf_link *link)
10503 return bpf_link_detach(link->fd) ? -errno : 0;
10506 int bpf_link__pin(struct bpf_link *link, const char *path)
10510 if (link->pin_path)
10511 return libbpf_err(-EBUSY);
10512 err = make_parent_dir(path);
10514 return libbpf_err(err);
10515 err = check_path(path);
10517 return libbpf_err(err);
10519 link->pin_path = strdup(path);
10520 if (!link->pin_path)
10521 return libbpf_err(-ENOMEM);
10523 if (bpf_obj_pin(link->fd, link->pin_path)) {
10525 zfree(&link->pin_path);
10526 return libbpf_err(err);
10529 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10533 int bpf_link__unpin(struct bpf_link *link)
10537 if (!link->pin_path)
10538 return libbpf_err(-EINVAL);
10540 err = unlink(link->pin_path);
10544 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10545 zfree(&link->pin_path);
10549 struct bpf_link_perf {
10550 struct bpf_link link;
10552 /* legacy kprobe support: keep track of probe identifier and type */
10553 char *legacy_probe_name;
10554 bool legacy_is_kprobe;
10555 bool legacy_is_retprobe;
10558 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
10559 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
10561 static int bpf_link_perf_detach(struct bpf_link *link)
10563 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10566 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10569 if (perf_link->perf_event_fd != link->fd)
10570 close(perf_link->perf_event_fd);
10573 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
10574 if (perf_link->legacy_probe_name) {
10575 if (perf_link->legacy_is_kprobe) {
10576 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10577 perf_link->legacy_is_retprobe);
10579 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10580 perf_link->legacy_is_retprobe);
10587 static void bpf_link_perf_dealloc(struct bpf_link *link)
10589 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10591 free(perf_link->legacy_probe_name);
10595 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
10596 const struct bpf_perf_event_opts *opts)
10598 char errmsg[STRERR_BUFSIZE];
10599 struct bpf_link_perf *link;
10600 int prog_fd, link_fd = -1, err;
10601 bool force_ioctl_attach;
10603 if (!OPTS_VALID(opts, bpf_perf_event_opts))
10604 return libbpf_err_ptr(-EINVAL);
10607 pr_warn("prog '%s': invalid perf event FD %d\n",
10609 return libbpf_err_ptr(-EINVAL);
10611 prog_fd = bpf_program__fd(prog);
10613 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10615 return libbpf_err_ptr(-EINVAL);
10618 link = calloc(1, sizeof(*link));
10620 return libbpf_err_ptr(-ENOMEM);
10621 link->link.detach = &bpf_link_perf_detach;
10622 link->link.dealloc = &bpf_link_perf_dealloc;
10623 link->perf_event_fd = pfd;
10625 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10626 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
10627 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10628 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10630 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
10633 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10635 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10638 link->link.fd = link_fd;
10640 if (OPTS_GET(opts, bpf_cookie, 0)) {
10641 pr_warn("prog '%s': user context value is not supported\n", prog->name);
10646 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10648 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10649 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10650 if (err == -EPROTO)
10651 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10655 link->link.fd = pfd;
10657 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10659 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
10660 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10664 return &link->link;
10669 return libbpf_err_ptr(err);
10672 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
10674 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10678 * this function is expected to parse integer in the range of [0, 2^31-1] from
10679 * given file using scanf format string fmt. If actual parsed value is
10680 * negative, the result might be indistinguishable from error
10682 static int parse_uint_from_file(const char *file, const char *fmt)
10684 char buf[STRERR_BUFSIZE];
10688 f = fopen(file, "re");
10691 pr_debug("failed to open '%s': %s\n", file,
10692 libbpf_strerror_r(err, buf, sizeof(buf)));
10695 err = fscanf(f, fmt, &ret);
10697 err = err == EOF ? -EIO : -errno;
10698 pr_debug("failed to parse '%s': %s\n", file,
10699 libbpf_strerror_r(err, buf, sizeof(buf)));
10707 static int determine_kprobe_perf_type(void)
10709 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10711 return parse_uint_from_file(file, "%d\n");
10714 static int determine_uprobe_perf_type(void)
10716 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10718 return parse_uint_from_file(file, "%d\n");
10721 static int determine_kprobe_retprobe_bit(void)
10723 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10725 return parse_uint_from_file(file, "config:%d\n");
10728 static int determine_uprobe_retprobe_bit(void)
10730 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10732 return parse_uint_from_file(file, "config:%d\n");
10735 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10736 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10738 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10739 uint64_t offset, int pid, size_t ref_ctr_off)
10741 const size_t attr_sz = sizeof(struct perf_event_attr);
10742 struct perf_event_attr attr;
10743 char errmsg[STRERR_BUFSIZE];
10746 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10749 memset(&attr, 0, attr_sz);
10751 type = uprobe ? determine_uprobe_perf_type()
10752 : determine_kprobe_perf_type();
10754 pr_warn("failed to determine %s perf type: %s\n",
10755 uprobe ? "uprobe" : "kprobe",
10756 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10760 int bit = uprobe ? determine_uprobe_retprobe_bit()
10761 : determine_kprobe_retprobe_bit();
10764 pr_warn("failed to determine %s retprobe bit: %s\n",
10765 uprobe ? "uprobe" : "kprobe",
10766 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10769 attr.config |= 1 << bit;
10771 attr.size = attr_sz;
10773 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10774 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10775 attr.config2 = offset; /* kprobe_addr or probe_offset */
10777 /* pid filter is meaningful only for uprobes */
10778 pfd = syscall(__NR_perf_event_open, &attr,
10779 pid < 0 ? -1 : pid /* pid */,
10780 pid == -1 ? 0 : -1 /* cpu */,
10781 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10782 return pfd >= 0 ? pfd : -errno;
10785 static int append_to_file(const char *file, const char *fmt, ...)
10787 int fd, n, err = 0;
10792 n = vsnprintf(buf, sizeof(buf), fmt, ap);
10795 if (n < 0 || n >= sizeof(buf))
10798 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10802 if (write(fd, buf, n) < 0)
10809 #define DEBUGFS "/sys/kernel/debug/tracing"
10810 #define TRACEFS "/sys/kernel/tracing"
10812 static bool use_debugfs(void)
10814 static int has_debugfs = -1;
10816 if (has_debugfs < 0)
10817 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
10819 return has_debugfs == 1;
10822 static const char *tracefs_path(void)
10824 return use_debugfs() ? DEBUGFS : TRACEFS;
10827 static const char *tracefs_kprobe_events(void)
10829 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10832 static const char *tracefs_uprobe_events(void)
10834 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10837 static const char *tracefs_available_filter_functions(void)
10839 return use_debugfs() ? DEBUGFS"/available_filter_functions"
10840 : TRACEFS"/available_filter_functions";
10843 static const char *tracefs_available_filter_functions_addrs(void)
10845 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10846 : TRACEFS"/available_filter_functions_addrs";
10849 static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10850 const char *kfunc_name, size_t offset)
10852 static int index = 0;
10855 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10856 __sync_fetch_and_add(&index, 1));
10858 /* sanitize binary_path in the probe name */
10859 for (i = 0; buf[i]; i++) {
10860 if (!isalnum(buf[i]))
10865 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10866 const char *kfunc_name, size_t offset)
10868 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
10869 retprobe ? 'r' : 'p',
10870 retprobe ? "kretprobes" : "kprobes",
10871 probe_name, kfunc_name, offset);
10874 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10876 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10877 retprobe ? "kretprobes" : "kprobes", probe_name);
10880 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10884 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10885 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
10887 return parse_uint_from_file(file, "%d\n");
10890 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10891 const char *kfunc_name, size_t offset, int pid)
10893 const size_t attr_sz = sizeof(struct perf_event_attr);
10894 struct perf_event_attr attr;
10895 char errmsg[STRERR_BUFSIZE];
10896 int type, pfd, err;
10898 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10900 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10901 kfunc_name, offset,
10902 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10905 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10908 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10909 kfunc_name, offset,
10910 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10911 goto err_clean_legacy;
10914 memset(&attr, 0, attr_sz);
10915 attr.size = attr_sz;
10916 attr.config = type;
10917 attr.type = PERF_TYPE_TRACEPOINT;
10919 pfd = syscall(__NR_perf_event_open, &attr,
10920 pid < 0 ? -1 : pid, /* pid */
10921 pid == -1 ? 0 : -1, /* cpu */
10922 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10925 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10926 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10927 goto err_clean_legacy;
10932 /* Clear the newly added legacy kprobe_event */
10933 remove_kprobe_event_legacy(probe_name, retprobe);
10937 static const char *arch_specific_syscall_pfx(void)
10939 #if defined(__x86_64__)
10941 #elif defined(__i386__)
10943 #elif defined(__s390x__)
10945 #elif defined(__s390__)
10947 #elif defined(__arm__)
10949 #elif defined(__aarch64__)
10951 #elif defined(__mips__)
10953 #elif defined(__riscv)
10955 #elif defined(__powerpc__)
10957 #elif defined(__powerpc64__)
10958 return "powerpc64";
10964 int probe_kern_syscall_wrapper(int token_fd)
10966 char syscall_name[64];
10967 const char *ksys_pfx;
10969 ksys_pfx = arch_specific_syscall_pfx();
10973 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
10975 if (determine_kprobe_perf_type() >= 0) {
10978 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
10982 return pfd >= 0 ? 1 : 0;
10983 } else { /* legacy mode */
10984 char probe_name[128];
10986 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
10987 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
10990 (void)remove_kprobe_event_legacy(probe_name, false);
10996 bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
10997 const char *func_name,
10998 const struct bpf_kprobe_opts *opts)
11000 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11001 enum probe_attach_mode attach_mode;
11002 char errmsg[STRERR_BUFSIZE];
11003 char *legacy_probe = NULL;
11004 struct bpf_link *link;
11006 bool retprobe, legacy;
11009 if (!OPTS_VALID(opts, bpf_kprobe_opts))
11010 return libbpf_err_ptr(-EINVAL);
11012 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11013 retprobe = OPTS_GET(opts, retprobe, false);
11014 offset = OPTS_GET(opts, offset, 0);
11015 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11017 legacy = determine_kprobe_perf_type() < 0;
11018 switch (attach_mode) {
11019 case PROBE_ATTACH_MODE_LEGACY:
11021 pe_opts.force_ioctl_attach = true;
11023 case PROBE_ATTACH_MODE_PERF:
11025 return libbpf_err_ptr(-ENOTSUP);
11026 pe_opts.force_ioctl_attach = true;
11028 case PROBE_ATTACH_MODE_LINK:
11029 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11030 return libbpf_err_ptr(-ENOTSUP);
11032 case PROBE_ATTACH_MODE_DEFAULT:
11035 return libbpf_err_ptr(-EINVAL);
11039 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11041 -1 /* pid */, 0 /* ref_ctr_off */);
11043 char probe_name[256];
11045 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
11046 func_name, offset);
11048 legacy_probe = strdup(probe_name);
11050 return libbpf_err_ptr(-ENOMEM);
11052 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
11053 offset, -1 /* pid */);
11057 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11058 prog->name, retprobe ? "kretprobe" : "kprobe",
11060 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11063 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11064 err = libbpf_get_error(link);
11067 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11068 prog->name, retprobe ? "kretprobe" : "kprobe",
11070 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11071 goto err_clean_legacy;
11074 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11076 perf_link->legacy_probe_name = legacy_probe;
11077 perf_link->legacy_is_kprobe = true;
11078 perf_link->legacy_is_retprobe = retprobe;
11085 remove_kprobe_event_legacy(legacy_probe, retprobe);
11087 free(legacy_probe);
11088 return libbpf_err_ptr(err);
11091 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
11093 const char *func_name)
11095 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
11096 .retprobe = retprobe,
11099 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11102 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11103 const char *syscall_name,
11104 const struct bpf_ksyscall_opts *opts)
11106 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11107 char func_name[128];
11109 if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11110 return libbpf_err_ptr(-EINVAL);
11112 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11113 /* arch_specific_syscall_pfx() should never return NULL here
11114 * because it is guarded by kernel_supports(). However, since
11115 * compiler does not know that we have an explicit conditional
11118 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
11119 arch_specific_syscall_pfx() ? : "", syscall_name);
11121 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11124 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11125 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11127 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11130 /* Adapted from perf/util/string.c */
11131 bool glob_match(const char *str, const char *pat)
11133 while (*str && *pat && *pat != '*') {
11134 if (*pat == '?') { /* Matches any single character */
11144 /* Check wild card */
11146 while (*pat == '*')
11148 if (!*pat) /* Tail wild card matches all */
11151 if (glob_match(str++, pat))
11154 return !*str && !*pat;
11157 struct kprobe_multi_resolve {
11158 const char *pattern;
11159 unsigned long *addrs;
11164 struct avail_kallsyms_data {
11167 struct kprobe_multi_resolve *res;
11170 static int avail_func_cmp(const void *a, const void *b)
11172 return strcmp(*(const char **)a, *(const char **)b);
11175 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11176 const char *sym_name, void *ctx)
11178 struct avail_kallsyms_data *data = ctx;
11179 struct kprobe_multi_resolve *res = data->res;
11182 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11185 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11189 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11193 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11195 const char *available_functions_file = tracefs_available_filter_functions();
11196 struct avail_kallsyms_data data;
11197 char sym_name[500];
11199 int err = 0, ret, i;
11200 char **syms = NULL;
11201 size_t cap = 0, cnt = 0;
11203 f = fopen(available_functions_file, "re");
11206 pr_warn("failed to open %s: %d\n", available_functions_file, err);
11213 ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11214 if (ret == EOF && feof(f))
11218 pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11223 if (!glob_match(sym_name, res->pattern))
11226 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11230 name = strdup(sym_name);
11236 syms[cnt++] = name;
11239 /* no entries found, bail out */
11245 /* sort available functions */
11246 qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11251 libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11257 for (i = 0; i < cnt; i++)
11258 free((char *)syms[i]);
11265 static bool has_available_filter_functions_addrs(void)
11267 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11270 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11272 const char *available_path = tracefs_available_filter_functions_addrs();
11273 char sym_name[500];
11276 unsigned long long sym_addr;
11278 f = fopen(available_path, "re");
11281 pr_warn("failed to open %s: %d\n", available_path, err);
11286 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11287 if (ret == EOF && feof(f))
11291 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11297 if (!glob_match(sym_name, res->pattern))
11300 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11301 sizeof(*res->addrs), res->cnt + 1);
11305 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11317 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11318 const char *pattern,
11319 const struct bpf_kprobe_multi_opts *opts)
11321 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11322 struct kprobe_multi_resolve res = {
11323 .pattern = pattern,
11325 struct bpf_link *link = NULL;
11326 char errmsg[STRERR_BUFSIZE];
11327 const unsigned long *addrs;
11328 int err, link_fd, prog_fd;
11329 const __u64 *cookies;
11334 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11335 return libbpf_err_ptr(-EINVAL);
11337 syms = OPTS_GET(opts, syms, false);
11338 addrs = OPTS_GET(opts, addrs, false);
11339 cnt = OPTS_GET(opts, cnt, false);
11340 cookies = OPTS_GET(opts, cookies, false);
11342 if (!pattern && !addrs && !syms)
11343 return libbpf_err_ptr(-EINVAL);
11344 if (pattern && (addrs || syms || cookies || cnt))
11345 return libbpf_err_ptr(-EINVAL);
11346 if (!pattern && !cnt)
11347 return libbpf_err_ptr(-EINVAL);
11349 return libbpf_err_ptr(-EINVAL);
11352 if (has_available_filter_functions_addrs())
11353 err = libbpf_available_kprobes_parse(&res);
11355 err = libbpf_available_kallsyms_parse(&res);
11362 retprobe = OPTS_GET(opts, retprobe, false);
11364 lopts.kprobe_multi.syms = syms;
11365 lopts.kprobe_multi.addrs = addrs;
11366 lopts.kprobe_multi.cookies = cookies;
11367 lopts.kprobe_multi.cnt = cnt;
11368 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11370 link = calloc(1, sizeof(*link));
11375 link->detach = &bpf_link__detach_fd;
11377 prog_fd = bpf_program__fd(prog);
11378 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
11381 pr_warn("prog '%s': failed to attach: %s\n",
11382 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11385 link->fd = link_fd;
11392 return libbpf_err_ptr(err);
11395 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11397 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
11398 unsigned long offset = 0;
11399 const char *func_name;
11405 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11406 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11409 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11411 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11413 func_name = prog->sec_name + sizeof("kprobe/") - 1;
11415 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
11417 pr_warn("kprobe name is invalid: %s\n", func_name);
11420 if (opts.retprobe && offset != 0) {
11422 pr_warn("kretprobes do not support offset specification\n");
11426 opts.offset = offset;
11427 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
11429 return libbpf_get_error(*link);
11432 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11434 LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11435 const char *syscall_name;
11439 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11440 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11443 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11445 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11447 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11449 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11450 return *link ? 0 : -errno;
11453 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11455 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11462 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11463 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11464 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11467 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11469 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11471 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11473 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11475 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
11479 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11481 return libbpf_get_error(*link);
11484 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11486 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11487 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11488 int n, ret = -EINVAL;
11492 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11493 &probe_type, &binary_path, &func_name);
11496 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11500 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11501 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11502 ret = libbpf_get_error(*link);
11505 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11515 static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11516 const char *binary_path, uint64_t offset)
11520 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11522 /* sanitize binary_path in the probe name */
11523 for (i = 0; buf[i]; i++) {
11524 if (!isalnum(buf[i]))
11529 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11530 const char *binary_path, size_t offset)
11532 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
11533 retprobe ? 'r' : 'p',
11534 retprobe ? "uretprobes" : "uprobes",
11535 probe_name, binary_path, offset);
11538 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11540 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11541 retprobe ? "uretprobes" : "uprobes", probe_name);
11544 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11548 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11549 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
11551 return parse_uint_from_file(file, "%d\n");
11554 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11555 const char *binary_path, size_t offset, int pid)
11557 const size_t attr_sz = sizeof(struct perf_event_attr);
11558 struct perf_event_attr attr;
11559 int type, pfd, err;
11561 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11563 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11564 binary_path, (size_t)offset, err);
11567 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11570 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
11571 binary_path, offset, err);
11572 goto err_clean_legacy;
11575 memset(&attr, 0, attr_sz);
11576 attr.size = attr_sz;
11577 attr.config = type;
11578 attr.type = PERF_TYPE_TRACEPOINT;
11580 pfd = syscall(__NR_perf_event_open, &attr,
11581 pid < 0 ? -1 : pid, /* pid */
11582 pid == -1 ? 0 : -1, /* cpu */
11583 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11586 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
11587 goto err_clean_legacy;
11592 /* Clear the newly added legacy uprobe_event */
11593 remove_uprobe_event_legacy(probe_name, retprobe);
11597 /* Find offset of function name in archive specified by path. Currently
11598 * supported are .zip files that do not compress their contents, as used on
11599 * Android in the form of APKs, for example. "file_name" is the name of the ELF
11600 * file inside the archive. "func_name" matches symbol name or name@@LIB for
11601 * library functions.
11603 * An overview of the APK format specifically provided here:
11604 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11606 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11607 const char *func_name)
11609 struct zip_archive *archive;
11610 struct zip_entry entry;
11614 archive = zip_archive_open(archive_path);
11615 if (IS_ERR(archive)) {
11616 ret = PTR_ERR(archive);
11617 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11621 ret = zip_archive_find_entry(archive, file_name, &entry);
11623 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11624 archive_path, ret);
11627 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11628 (unsigned long)entry.data_offset);
11630 if (entry.compression) {
11631 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11633 ret = -LIBBPF_ERRNO__FORMAT;
11637 elf = elf_memory((void *)entry.data, entry.data_length);
11639 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11641 ret = -LIBBPF_ERRNO__LIBELF;
11645 ret = elf_find_func_offset(elf, file_name, func_name);
11647 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11648 func_name, file_name, archive_path, entry.data_offset, ret,
11649 ret + entry.data_offset);
11650 ret += entry.data_offset;
11655 zip_archive_close(archive);
11659 static const char *arch_specific_lib_paths(void)
11662 * Based on https://packages.debian.org/sid/libc6.
11664 * Assume that the traced program is built for the same architecture
11665 * as libbpf, which should cover the vast majority of cases.
11667 #if defined(__x86_64__)
11668 return "/lib/x86_64-linux-gnu";
11669 #elif defined(__i386__)
11670 return "/lib/i386-linux-gnu";
11671 #elif defined(__s390x__)
11672 return "/lib/s390x-linux-gnu";
11673 #elif defined(__s390__)
11674 return "/lib/s390-linux-gnu";
11675 #elif defined(__arm__) && defined(__SOFTFP__)
11676 return "/lib/arm-linux-gnueabi";
11677 #elif defined(__arm__) && !defined(__SOFTFP__)
11678 return "/lib/arm-linux-gnueabihf";
11679 #elif defined(__aarch64__)
11680 return "/lib/aarch64-linux-gnu";
11681 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11682 return "/lib/mips64el-linux-gnuabi64";
11683 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11684 return "/lib/mipsel-linux-gnu";
11685 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11686 return "/lib/powerpc64le-linux-gnu";
11687 #elif defined(__sparc__) && defined(__arch64__)
11688 return "/lib/sparc64-linux-gnu";
11689 #elif defined(__riscv) && __riscv_xlen == 64
11690 return "/lib/riscv64-linux-gnu";
11696 /* Get full path to program/shared library. */
11697 static int resolve_full_path(const char *file, char *result, size_t result_sz)
11699 const char *search_paths[3] = {};
11702 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
11703 search_paths[0] = getenv("LD_LIBRARY_PATH");
11704 search_paths[1] = "/usr/lib64:/usr/lib";
11705 search_paths[2] = arch_specific_lib_paths();
11708 search_paths[0] = getenv("PATH");
11709 search_paths[1] = "/usr/bin:/usr/sbin";
11710 perm = R_OK | X_OK;
11713 for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11716 if (!search_paths[i])
11718 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11724 next_path = strchr(s, ':');
11725 seg_len = next_path ? next_path - s : strlen(s);
11728 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
11729 /* ensure it has required permissions */
11730 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
11732 pr_debug("resolved '%s' to '%s'\n", file, result);
11740 bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11743 const char *func_pattern,
11744 const struct bpf_uprobe_multi_opts *opts)
11746 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11747 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11748 unsigned long *resolved_offsets = NULL;
11749 int err = 0, link_fd, prog_fd;
11750 struct bpf_link *link = NULL;
11751 char errmsg[STRERR_BUFSIZE];
11752 char full_path[PATH_MAX];
11753 const __u64 *cookies;
11757 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11758 return libbpf_err_ptr(-EINVAL);
11760 syms = OPTS_GET(opts, syms, NULL);
11761 offsets = OPTS_GET(opts, offsets, NULL);
11762 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11763 cookies = OPTS_GET(opts, cookies, NULL);
11764 cnt = OPTS_GET(opts, cnt, 0);
11767 * User can specify 2 mutually exclusive set of inputs:
11769 * 1) use only path/func_pattern/pid arguments
11771 * 2) use path/pid with allowed combinations of:
11772 * syms/offsets/ref_ctr_offsets/cookies/cnt
11774 * - syms and offsets are mutually exclusive
11775 * - ref_ctr_offsets and cookies are optional
11777 * Any other usage results in error.
11781 return libbpf_err_ptr(-EINVAL);
11782 if (!func_pattern && cnt == 0)
11783 return libbpf_err_ptr(-EINVAL);
11785 if (func_pattern) {
11786 if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11787 return libbpf_err_ptr(-EINVAL);
11789 if (!!syms == !!offsets)
11790 return libbpf_err_ptr(-EINVAL);
11793 if (func_pattern) {
11794 if (!strchr(path, '/')) {
11795 err = resolve_full_path(path, full_path, sizeof(full_path));
11797 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11798 prog->name, path, err);
11799 return libbpf_err_ptr(err);
11804 err = elf_resolve_pattern_offsets(path, func_pattern,
11805 &resolved_offsets, &cnt);
11807 return libbpf_err_ptr(err);
11808 offsets = resolved_offsets;
11810 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
11812 return libbpf_err_ptr(err);
11813 offsets = resolved_offsets;
11816 lopts.uprobe_multi.path = path;
11817 lopts.uprobe_multi.offsets = offsets;
11818 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11819 lopts.uprobe_multi.cookies = cookies;
11820 lopts.uprobe_multi.cnt = cnt;
11821 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11826 lopts.uprobe_multi.pid = pid;
11828 link = calloc(1, sizeof(*link));
11833 link->detach = &bpf_link__detach_fd;
11835 prog_fd = bpf_program__fd(prog);
11836 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11839 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11840 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11843 link->fd = link_fd;
11844 free(resolved_offsets);
11848 free(resolved_offsets);
11850 return libbpf_err_ptr(err);
11853 LIBBPF_API struct bpf_link *
11854 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
11855 const char *binary_path, size_t func_offset,
11856 const struct bpf_uprobe_opts *opts)
11858 const char *archive_path = NULL, *archive_sep = NULL;
11859 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
11860 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11861 enum probe_attach_mode attach_mode;
11862 char full_path[PATH_MAX];
11863 struct bpf_link *link;
11864 size_t ref_ctr_off;
11866 bool retprobe, legacy;
11867 const char *func_name;
11869 if (!OPTS_VALID(opts, bpf_uprobe_opts))
11870 return libbpf_err_ptr(-EINVAL);
11872 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11873 retprobe = OPTS_GET(opts, retprobe, false);
11874 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
11875 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11878 return libbpf_err_ptr(-EINVAL);
11880 /* Check if "binary_path" refers to an archive. */
11881 archive_sep = strstr(binary_path, "!/");
11883 full_path[0] = '\0';
11884 libbpf_strlcpy(full_path, binary_path,
11885 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
11886 archive_path = full_path;
11887 binary_path = archive_sep + 2;
11888 } else if (!strchr(binary_path, '/')) {
11889 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
11891 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11892 prog->name, binary_path, err);
11893 return libbpf_err_ptr(err);
11895 binary_path = full_path;
11897 func_name = OPTS_GET(opts, func_name, NULL);
11901 if (archive_path) {
11902 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
11904 binary_path = archive_path;
11906 sym_off = elf_find_func_offset_from_file(binary_path, func_name);
11909 return libbpf_err_ptr(sym_off);
11910 func_offset += sym_off;
11913 legacy = determine_uprobe_perf_type() < 0;
11914 switch (attach_mode) {
11915 case PROBE_ATTACH_MODE_LEGACY:
11917 pe_opts.force_ioctl_attach = true;
11919 case PROBE_ATTACH_MODE_PERF:
11921 return libbpf_err_ptr(-ENOTSUP);
11922 pe_opts.force_ioctl_attach = true;
11924 case PROBE_ATTACH_MODE_LINK:
11925 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11926 return libbpf_err_ptr(-ENOTSUP);
11928 case PROBE_ATTACH_MODE_DEFAULT:
11931 return libbpf_err_ptr(-EINVAL);
11935 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
11936 func_offset, pid, ref_ctr_off);
11938 char probe_name[PATH_MAX + 64];
11941 return libbpf_err_ptr(-EINVAL);
11943 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
11944 binary_path, func_offset);
11946 legacy_probe = strdup(probe_name);
11948 return libbpf_err_ptr(-ENOMEM);
11950 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
11951 binary_path, func_offset, pid);
11955 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
11956 prog->name, retprobe ? "uretprobe" : "uprobe",
11957 binary_path, func_offset,
11958 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11962 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11963 err = libbpf_get_error(link);
11966 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
11967 prog->name, retprobe ? "uretprobe" : "uprobe",
11968 binary_path, func_offset,
11969 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11970 goto err_clean_legacy;
11973 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11975 perf_link->legacy_probe_name = legacy_probe;
11976 perf_link->legacy_is_kprobe = false;
11977 perf_link->legacy_is_retprobe = retprobe;
11983 remove_uprobe_event_legacy(legacy_probe, retprobe);
11985 free(legacy_probe);
11986 return libbpf_err_ptr(err);
11989 /* Format of u[ret]probe section definition supporting auto-attach:
11990 * u[ret]probe/binary:function[+offset]
11992 * binary can be an absolute/relative path or a filename; the latter is resolved to a
11993 * full binary path via bpf_program__attach_uprobe_opts.
11995 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
11996 * specified (and auto-attach is not possible) or the above format is specified for
11999 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12001 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
12002 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12003 int n, c, ret = -EINVAL;
12008 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12009 &probe_type, &binary_path, &func_name);
12012 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12016 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12017 prog->name, prog->sec_name);
12020 /* check if user specifies `+offset`, if yes, this should be
12021 * the last part of the string, make sure sscanf read to EOL
12023 func_off = strrchr(func_name, '+');
12025 n = sscanf(func_off, "+%li%n", &offset, &c);
12026 if (n == 1 && *(func_off + c) == '\0')
12027 func_off[0] = '\0';
12031 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12032 strcmp(probe_type, "uretprobe.s") == 0;
12033 if (opts.retprobe && offset != 0) {
12034 pr_warn("prog '%s': uretprobes do not support offset specification\n",
12038 opts.func_name = func_name;
12039 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12040 ret = libbpf_get_error(*link);
12043 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
12054 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
12055 bool retprobe, pid_t pid,
12056 const char *binary_path,
12057 size_t func_offset)
12059 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12061 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12064 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12065 pid_t pid, const char *binary_path,
12066 const char *usdt_provider, const char *usdt_name,
12067 const struct bpf_usdt_opts *opts)
12069 char resolved_path[512];
12070 struct bpf_object *obj = prog->obj;
12071 struct bpf_link *link;
12075 if (!OPTS_VALID(opts, bpf_uprobe_opts))
12076 return libbpf_err_ptr(-EINVAL);
12078 if (bpf_program__fd(prog) < 0) {
12079 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
12081 return libbpf_err_ptr(-EINVAL);
12085 return libbpf_err_ptr(-EINVAL);
12087 if (!strchr(binary_path, '/')) {
12088 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12090 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12091 prog->name, binary_path, err);
12092 return libbpf_err_ptr(err);
12094 binary_path = resolved_path;
12097 /* USDT manager is instantiated lazily on first USDT attach. It will
12098 * be destroyed together with BPF object in bpf_object__close().
12100 if (IS_ERR(obj->usdt_man))
12101 return libbpf_ptr(obj->usdt_man);
12102 if (!obj->usdt_man) {
12103 obj->usdt_man = usdt_manager_new(obj);
12104 if (IS_ERR(obj->usdt_man))
12105 return libbpf_ptr(obj->usdt_man);
12108 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12109 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12110 usdt_provider, usdt_name, usdt_cookie);
12111 err = libbpf_get_error(link);
12113 return libbpf_err_ptr(err);
12117 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12119 char *path = NULL, *provider = NULL, *name = NULL;
12120 const char *sec_name;
12123 sec_name = bpf_program__section_name(prog);
12124 if (strcmp(sec_name, "usdt") == 0) {
12125 /* no auto-attach for just SEC("usdt") */
12130 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12132 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12136 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12137 provider, name, NULL);
12138 err = libbpf_get_error(*link);
12146 static int determine_tracepoint_id(const char *tp_category,
12147 const char *tp_name)
12149 char file[PATH_MAX];
12152 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12153 tracefs_path(), tp_category, tp_name);
12156 if (ret >= sizeof(file)) {
12157 pr_debug("tracepoint %s/%s path is too long\n",
12158 tp_category, tp_name);
12161 return parse_uint_from_file(file, "%d\n");
12164 static int perf_event_open_tracepoint(const char *tp_category,
12165 const char *tp_name)
12167 const size_t attr_sz = sizeof(struct perf_event_attr);
12168 struct perf_event_attr attr;
12169 char errmsg[STRERR_BUFSIZE];
12170 int tp_id, pfd, err;
12172 tp_id = determine_tracepoint_id(tp_category, tp_name);
12174 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12175 tp_category, tp_name,
12176 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
12180 memset(&attr, 0, attr_sz);
12181 attr.type = PERF_TYPE_TRACEPOINT;
12182 attr.size = attr_sz;
12183 attr.config = tp_id;
12185 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12186 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12189 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12190 tp_category, tp_name,
12191 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12197 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
12198 const char *tp_category,
12199 const char *tp_name,
12200 const struct bpf_tracepoint_opts *opts)
12202 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12203 char errmsg[STRERR_BUFSIZE];
12204 struct bpf_link *link;
12207 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12208 return libbpf_err_ptr(-EINVAL);
12210 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12212 pfd = perf_event_open_tracepoint(tp_category, tp_name);
12214 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12215 prog->name, tp_category, tp_name,
12216 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12217 return libbpf_err_ptr(pfd);
12219 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12220 err = libbpf_get_error(link);
12223 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12224 prog->name, tp_category, tp_name,
12225 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12226 return libbpf_err_ptr(err);
12231 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
12232 const char *tp_category,
12233 const char *tp_name)
12235 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12238 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12240 char *sec_name, *tp_cat, *tp_name;
12244 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12245 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12248 sec_name = strdup(prog->sec_name);
12252 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12253 if (str_has_pfx(prog->sec_name, "tp/"))
12254 tp_cat = sec_name + sizeof("tp/") - 1;
12256 tp_cat = sec_name + sizeof("tracepoint/") - 1;
12257 tp_name = strchr(tp_cat, '/');
12265 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
12267 return libbpf_get_error(*link);
12270 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12271 const char *tp_name)
12273 char errmsg[STRERR_BUFSIZE];
12274 struct bpf_link *link;
12277 prog_fd = bpf_program__fd(prog);
12279 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12280 return libbpf_err_ptr(-EINVAL);
12283 link = calloc(1, sizeof(*link));
12285 return libbpf_err_ptr(-ENOMEM);
12286 link->detach = &bpf_link__detach_fd;
12288 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
12292 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12293 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12294 return libbpf_err_ptr(pfd);
12300 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12302 static const char *const prefixes[] = {
12306 "raw_tracepoint.w",
12309 const char *tp_name = NULL;
12313 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
12316 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12319 pfx_len = strlen(prefixes[i]);
12320 /* no auto-attach case of, e.g., SEC("raw_tp") */
12321 if (prog->sec_name[pfx_len] == '\0')
12324 if (prog->sec_name[pfx_len] != '/')
12327 tp_name = prog->sec_name + pfx_len + 1;
12332 pr_warn("prog '%s': invalid section name '%s'\n",
12333 prog->name, prog->sec_name);
12337 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
12338 return libbpf_get_error(*link);
12341 /* Common logic for all BPF program types that attach to a btf_id */
12342 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12343 const struct bpf_trace_opts *opts)
12345 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
12346 char errmsg[STRERR_BUFSIZE];
12347 struct bpf_link *link;
12350 if (!OPTS_VALID(opts, bpf_trace_opts))
12351 return libbpf_err_ptr(-EINVAL);
12353 prog_fd = bpf_program__fd(prog);
12355 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12356 return libbpf_err_ptr(-EINVAL);
12359 link = calloc(1, sizeof(*link));
12361 return libbpf_err_ptr(-ENOMEM);
12362 link->detach = &bpf_link__detach_fd;
12364 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
12365 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12366 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
12370 pr_warn("prog '%s': failed to attach: %s\n",
12371 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12372 return libbpf_err_ptr(pfd);
12378 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
12380 return bpf_program__attach_btf_id(prog, NULL);
12383 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12384 const struct bpf_trace_opts *opts)
12386 return bpf_program__attach_btf_id(prog, opts);
12389 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
12391 return bpf_program__attach_btf_id(prog, NULL);
12394 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12396 *link = bpf_program__attach_trace(prog);
12397 return libbpf_get_error(*link);
12400 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12402 *link = bpf_program__attach_lsm(prog);
12403 return libbpf_get_error(*link);
12406 static struct bpf_link *
12407 bpf_program_attach_fd(const struct bpf_program *prog,
12408 int target_fd, const char *target_name,
12409 const struct bpf_link_create_opts *opts)
12411 enum bpf_attach_type attach_type;
12412 char errmsg[STRERR_BUFSIZE];
12413 struct bpf_link *link;
12414 int prog_fd, link_fd;
12416 prog_fd = bpf_program__fd(prog);
12418 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12419 return libbpf_err_ptr(-EINVAL);
12422 link = calloc(1, sizeof(*link));
12424 return libbpf_err_ptr(-ENOMEM);
12425 link->detach = &bpf_link__detach_fd;
12427 attach_type = bpf_program__expected_attach_type(prog);
12428 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
12432 pr_warn("prog '%s': failed to attach to %s: %s\n",
12433 prog->name, target_name,
12434 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12435 return libbpf_err_ptr(link_fd);
12437 link->fd = link_fd;
12442 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
12444 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
12448 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
12450 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
12453 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
12455 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12456 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12460 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12461 const struct bpf_tcx_opts *opts)
12463 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12467 if (!OPTS_VALID(opts, bpf_tcx_opts))
12468 return libbpf_err_ptr(-EINVAL);
12470 relative_id = OPTS_GET(opts, relative_id, 0);
12471 relative_fd = OPTS_GET(opts, relative_fd, 0);
12473 /* validate we don't have unexpected combinations of non-zero fields */
12475 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12477 return libbpf_err_ptr(-EINVAL);
12479 if (relative_fd && relative_id) {
12480 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12482 return libbpf_err_ptr(-EINVAL);
12485 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12486 link_create_opts.tcx.relative_fd = relative_fd;
12487 link_create_opts.tcx.relative_id = relative_id;
12488 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12490 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12491 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
12495 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12496 const struct bpf_netkit_opts *opts)
12498 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12502 if (!OPTS_VALID(opts, bpf_netkit_opts))
12503 return libbpf_err_ptr(-EINVAL);
12505 relative_id = OPTS_GET(opts, relative_id, 0);
12506 relative_fd = OPTS_GET(opts, relative_fd, 0);
12508 /* validate we don't have unexpected combinations of non-zero fields */
12510 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12512 return libbpf_err_ptr(-EINVAL);
12514 if (relative_fd && relative_id) {
12515 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12517 return libbpf_err_ptr(-EINVAL);
12520 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12521 link_create_opts.netkit.relative_fd = relative_fd;
12522 link_create_opts.netkit.relative_id = relative_id;
12523 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12525 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12528 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
12530 const char *attach_func_name)
12534 if (!!target_fd != !!attach_func_name) {
12535 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12537 return libbpf_err_ptr(-EINVAL);
12540 if (prog->type != BPF_PROG_TYPE_EXT) {
12541 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12543 return libbpf_err_ptr(-EINVAL);
12547 LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12549 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12551 return libbpf_err_ptr(btf_id);
12553 target_opts.target_btf_id = btf_id;
12555 return bpf_program_attach_fd(prog, target_fd, "freplace",
12558 /* no target, so use raw_tracepoint_open for compatibility
12561 return bpf_program__attach_trace(prog);
12566 bpf_program__attach_iter(const struct bpf_program *prog,
12567 const struct bpf_iter_attach_opts *opts)
12569 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12570 char errmsg[STRERR_BUFSIZE];
12571 struct bpf_link *link;
12572 int prog_fd, link_fd;
12573 __u32 target_fd = 0;
12575 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
12576 return libbpf_err_ptr(-EINVAL);
12578 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12579 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
12581 prog_fd = bpf_program__fd(prog);
12583 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12584 return libbpf_err_ptr(-EINVAL);
12587 link = calloc(1, sizeof(*link));
12589 return libbpf_err_ptr(-ENOMEM);
12590 link->detach = &bpf_link__detach_fd;
12592 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12593 &link_create_opts);
12597 pr_warn("prog '%s': failed to attach to iterator: %s\n",
12598 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12599 return libbpf_err_ptr(link_fd);
12601 link->fd = link_fd;
12605 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12607 *link = bpf_program__attach_iter(prog, NULL);
12608 return libbpf_get_error(*link);
12611 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12612 const struct bpf_netfilter_opts *opts)
12614 LIBBPF_OPTS(bpf_link_create_opts, lopts);
12615 struct bpf_link *link;
12616 int prog_fd, link_fd;
12618 if (!OPTS_VALID(opts, bpf_netfilter_opts))
12619 return libbpf_err_ptr(-EINVAL);
12621 prog_fd = bpf_program__fd(prog);
12623 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12624 return libbpf_err_ptr(-EINVAL);
12627 link = calloc(1, sizeof(*link));
12629 return libbpf_err_ptr(-ENOMEM);
12631 link->detach = &bpf_link__detach_fd;
12633 lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12634 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12635 lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12636 lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12638 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12640 char errmsg[STRERR_BUFSIZE];
12644 pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12645 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12646 return libbpf_err_ptr(link_fd);
12648 link->fd = link_fd;
12653 struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
12655 struct bpf_link *link = NULL;
12658 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12659 return libbpf_err_ptr(-EOPNOTSUPP);
12661 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12663 return libbpf_err_ptr(err);
12665 /* When calling bpf_program__attach() explicitly, auto-attach support
12666 * is expected to work, so NULL returned link is considered an error.
12667 * This is different for skeleton's attach, see comment in
12668 * bpf_object__attach_skeleton().
12671 return libbpf_err_ptr(-EOPNOTSUPP);
12676 struct bpf_link_struct_ops {
12677 struct bpf_link link;
12681 static int bpf_link__detach_struct_ops(struct bpf_link *link)
12683 struct bpf_link_struct_ops *st_link;
12686 st_link = container_of(link, struct bpf_link_struct_ops, link);
12688 if (st_link->map_fd < 0)
12689 /* w/o a real link */
12690 return bpf_map_delete_elem(link->fd, &zero);
12692 return close(link->fd);
12695 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
12697 struct bpf_link_struct_ops *link;
12701 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
12702 return libbpf_err_ptr(-EINVAL);
12704 link = calloc(1, sizeof(*link));
12706 return libbpf_err_ptr(-EINVAL);
12708 /* kern_vdata should be prepared during the loading phase. */
12709 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12710 /* It can be EBUSY if the map has been used to create or
12711 * update a link before. We don't allow updating the value of
12712 * a struct_ops once it is set. That ensures that the value
12713 * never changed. So, it is safe to skip EBUSY.
12715 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12717 return libbpf_err_ptr(err);
12720 link->link.detach = bpf_link__detach_struct_ops;
12722 if (!(map->def.map_flags & BPF_F_LINK)) {
12723 /* w/o a real link */
12724 link->link.fd = map->fd;
12726 return &link->link;
12729 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12732 return libbpf_err_ptr(fd);
12735 link->link.fd = fd;
12736 link->map_fd = map->fd;
12738 return &link->link;
12742 * Swap the back struct_ops of a link with a new struct_ops map.
12744 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12746 struct bpf_link_struct_ops *st_ops_link;
12750 if (!bpf_map__is_struct_ops(map) || !map_is_created(map))
12753 st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12754 /* Ensure the type of a link is correct */
12755 if (st_ops_link->map_fd < 0)
12758 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12759 /* It can be EBUSY if the map has been used to create or
12760 * update a link before. We don't allow updating the value of
12761 * a struct_ops once it is set. That ensures that the value
12762 * never changed. So, it is safe to skip EBUSY.
12764 if (err && err != -EBUSY)
12767 err = bpf_link_update(link->fd, map->fd, NULL);
12771 st_ops_link->map_fd = map->fd;
12776 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12777 void *private_data);
12779 static enum bpf_perf_event_ret
12780 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12781 void **copy_mem, size_t *copy_size,
12782 bpf_perf_event_print_t fn, void *private_data)
12784 struct perf_event_mmap_page *header = mmap_mem;
12785 __u64 data_head = ring_buffer_read_head(header);
12786 __u64 data_tail = header->data_tail;
12787 void *base = ((__u8 *)header) + page_size;
12788 int ret = LIBBPF_PERF_EVENT_CONT;
12789 struct perf_event_header *ehdr;
12792 while (data_head != data_tail) {
12793 ehdr = base + (data_tail & (mmap_size - 1));
12794 ehdr_size = ehdr->size;
12796 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12797 void *copy_start = ehdr;
12798 size_t len_first = base + mmap_size - copy_start;
12799 size_t len_secnd = ehdr_size - len_first;
12801 if (*copy_size < ehdr_size) {
12803 *copy_mem = malloc(ehdr_size);
12806 ret = LIBBPF_PERF_EVENT_ERROR;
12809 *copy_size = ehdr_size;
12812 memcpy(*copy_mem, copy_start, len_first);
12813 memcpy(*copy_mem + len_first, base, len_secnd);
12817 ret = fn(ehdr, private_data);
12818 data_tail += ehdr_size;
12819 if (ret != LIBBPF_PERF_EVENT_CONT)
12823 ring_buffer_write_tail(header, data_tail);
12824 return libbpf_err(ret);
12827 struct perf_buffer;
12829 struct perf_buffer_params {
12830 struct perf_event_attr *attr;
12831 /* if event_cb is specified, it takes precendence */
12832 perf_buffer_event_fn event_cb;
12833 /* sample_cb and lost_cb are higher-level common-case callbacks */
12834 perf_buffer_sample_fn sample_cb;
12835 perf_buffer_lost_fn lost_cb;
12842 struct perf_cpu_buf {
12843 struct perf_buffer *pb;
12844 void *base; /* mmap()'ed memory */
12845 void *buf; /* for reconstructing segmented data */
12852 struct perf_buffer {
12853 perf_buffer_event_fn event_cb;
12854 perf_buffer_sample_fn sample_cb;
12855 perf_buffer_lost_fn lost_cb;
12856 void *ctx; /* passed into callbacks */
12860 struct perf_cpu_buf **cpu_bufs;
12861 struct epoll_event *events;
12862 int cpu_cnt; /* number of allocated CPU buffers */
12863 int epoll_fd; /* perf event FD */
12864 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
12867 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
12868 struct perf_cpu_buf *cpu_buf)
12872 if (cpu_buf->base &&
12873 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
12874 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
12875 if (cpu_buf->fd >= 0) {
12876 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
12877 close(cpu_buf->fd);
12879 free(cpu_buf->buf);
12883 void perf_buffer__free(struct perf_buffer *pb)
12887 if (IS_ERR_OR_NULL(pb))
12889 if (pb->cpu_bufs) {
12890 for (i = 0; i < pb->cpu_cnt; i++) {
12891 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12896 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
12897 perf_buffer__free_cpu_buf(pb, cpu_buf);
12899 free(pb->cpu_bufs);
12901 if (pb->epoll_fd >= 0)
12902 close(pb->epoll_fd);
12907 static struct perf_cpu_buf *
12908 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
12909 int cpu, int map_key)
12911 struct perf_cpu_buf *cpu_buf;
12912 char msg[STRERR_BUFSIZE];
12915 cpu_buf = calloc(1, sizeof(*cpu_buf));
12917 return ERR_PTR(-ENOMEM);
12920 cpu_buf->cpu = cpu;
12921 cpu_buf->map_key = map_key;
12923 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
12924 -1, PERF_FLAG_FD_CLOEXEC);
12925 if (cpu_buf->fd < 0) {
12927 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
12928 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12932 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
12933 PROT_READ | PROT_WRITE, MAP_SHARED,
12935 if (cpu_buf->base == MAP_FAILED) {
12936 cpu_buf->base = NULL;
12938 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
12939 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12943 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
12945 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
12946 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12953 perf_buffer__free_cpu_buf(pb, cpu_buf);
12954 return (struct perf_cpu_buf *)ERR_PTR(err);
12957 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12958 struct perf_buffer_params *p);
12960 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
12961 perf_buffer_sample_fn sample_cb,
12962 perf_buffer_lost_fn lost_cb,
12964 const struct perf_buffer_opts *opts)
12966 const size_t attr_sz = sizeof(struct perf_event_attr);
12967 struct perf_buffer_params p = {};
12968 struct perf_event_attr attr;
12969 __u32 sample_period;
12971 if (!OPTS_VALID(opts, perf_buffer_opts))
12972 return libbpf_err_ptr(-EINVAL);
12974 sample_period = OPTS_GET(opts, sample_period, 1);
12975 if (!sample_period)
12978 memset(&attr, 0, attr_sz);
12979 attr.size = attr_sz;
12980 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
12981 attr.type = PERF_TYPE_SOFTWARE;
12982 attr.sample_type = PERF_SAMPLE_RAW;
12983 attr.sample_period = sample_period;
12984 attr.wakeup_events = sample_period;
12987 p.sample_cb = sample_cb;
12988 p.lost_cb = lost_cb;
12991 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
12994 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
12995 struct perf_event_attr *attr,
12996 perf_buffer_event_fn event_cb, void *ctx,
12997 const struct perf_buffer_raw_opts *opts)
12999 struct perf_buffer_params p = {};
13002 return libbpf_err_ptr(-EINVAL);
13004 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13005 return libbpf_err_ptr(-EINVAL);
13008 p.event_cb = event_cb;
13010 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13011 p.cpus = OPTS_GET(opts, cpus, NULL);
13012 p.map_keys = OPTS_GET(opts, map_keys, NULL);
13014 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
13017 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13018 struct perf_buffer_params *p)
13020 const char *online_cpus_file = "/sys/devices/system/cpu/online";
13021 struct bpf_map_info map;
13022 char msg[STRERR_BUFSIZE];
13023 struct perf_buffer *pb;
13024 bool *online = NULL;
13025 __u32 map_info_len;
13028 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
13029 pr_warn("page count should be power of two, but is %zu\n",
13031 return ERR_PTR(-EINVAL);
13034 /* best-effort sanity checks */
13035 memset(&map, 0, sizeof(map));
13036 map_info_len = sizeof(map);
13037 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
13040 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13041 * -EBADFD, -EFAULT, or -E2BIG on real error
13043 if (err != -EINVAL) {
13044 pr_warn("failed to get map info for map FD %d: %s\n",
13045 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
13046 return ERR_PTR(err);
13048 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13051 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13052 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13054 return ERR_PTR(-EINVAL);
13058 pb = calloc(1, sizeof(*pb));
13060 return ERR_PTR(-ENOMEM);
13062 pb->event_cb = p->event_cb;
13063 pb->sample_cb = p->sample_cb;
13064 pb->lost_cb = p->lost_cb;
13067 pb->page_size = getpagesize();
13068 pb->mmap_size = pb->page_size * page_cnt;
13069 pb->map_fd = map_fd;
13071 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13072 if (pb->epoll_fd < 0) {
13074 pr_warn("failed to create epoll instance: %s\n",
13075 libbpf_strerror_r(err, msg, sizeof(msg)));
13079 if (p->cpu_cnt > 0) {
13080 pb->cpu_cnt = p->cpu_cnt;
13082 pb->cpu_cnt = libbpf_num_possible_cpus();
13083 if (pb->cpu_cnt < 0) {
13087 if (map.max_entries && map.max_entries < pb->cpu_cnt)
13088 pb->cpu_cnt = map.max_entries;
13091 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13094 pr_warn("failed to allocate events: out of memory\n");
13097 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13098 if (!pb->cpu_bufs) {
13100 pr_warn("failed to allocate buffers: out of memory\n");
13104 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13106 pr_warn("failed to get online CPU mask: %d\n", err);
13110 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13111 struct perf_cpu_buf *cpu_buf;
13114 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13115 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13117 /* in case user didn't explicitly requested particular CPUs to
13118 * be attached to, skip offline/not present CPUs
13120 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13123 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13124 if (IS_ERR(cpu_buf)) {
13125 err = PTR_ERR(cpu_buf);
13129 pb->cpu_bufs[j] = cpu_buf;
13131 err = bpf_map_update_elem(pb->map_fd, &map_key,
13135 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13136 cpu, map_key, cpu_buf->fd,
13137 libbpf_strerror_r(err, msg, sizeof(msg)));
13141 pb->events[j].events = EPOLLIN;
13142 pb->events[j].data.ptr = cpu_buf;
13143 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13144 &pb->events[j]) < 0) {
13146 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13148 libbpf_strerror_r(err, msg, sizeof(msg)));
13161 perf_buffer__free(pb);
13162 return ERR_PTR(err);
13165 struct perf_sample_raw {
13166 struct perf_event_header header;
13171 struct perf_sample_lost {
13172 struct perf_event_header header;
13175 uint64_t sample_id;
13178 static enum bpf_perf_event_ret
13179 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13181 struct perf_cpu_buf *cpu_buf = ctx;
13182 struct perf_buffer *pb = cpu_buf->pb;
13185 /* user wants full control over parsing perf event */
13187 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13190 case PERF_RECORD_SAMPLE: {
13191 struct perf_sample_raw *s = data;
13194 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13197 case PERF_RECORD_LOST: {
13198 struct perf_sample_lost *s = data;
13201 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13205 pr_warn("unknown perf sample type %d\n", e->type);
13206 return LIBBPF_PERF_EVENT_ERROR;
13208 return LIBBPF_PERF_EVENT_CONT;
13211 static int perf_buffer__process_records(struct perf_buffer *pb,
13212 struct perf_cpu_buf *cpu_buf)
13214 enum bpf_perf_event_ret ret;
13216 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13217 pb->page_size, &cpu_buf->buf,
13218 &cpu_buf->buf_size,
13219 perf_buffer__process_record, cpu_buf);
13220 if (ret != LIBBPF_PERF_EVENT_CONT)
13225 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13227 return pb->epoll_fd;
13230 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13234 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13238 for (i = 0; i < cnt; i++) {
13239 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13241 err = perf_buffer__process_records(pb, cpu_buf);
13243 pr_warn("error while processing records: %d\n", err);
13244 return libbpf_err(err);
13250 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13253 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13255 return pb->cpu_cnt;
13259 * Return perf_event FD of a ring buffer in *buf_idx* slot of
13260 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13261 * select()/poll()/epoll() Linux syscalls.
13263 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13265 struct perf_cpu_buf *cpu_buf;
13267 if (buf_idx >= pb->cpu_cnt)
13268 return libbpf_err(-EINVAL);
13270 cpu_buf = pb->cpu_bufs[buf_idx];
13272 return libbpf_err(-ENOENT);
13274 return cpu_buf->fd;
13277 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13279 struct perf_cpu_buf *cpu_buf;
13281 if (buf_idx >= pb->cpu_cnt)
13282 return libbpf_err(-EINVAL);
13284 cpu_buf = pb->cpu_bufs[buf_idx];
13286 return libbpf_err(-ENOENT);
13288 *buf = cpu_buf->base;
13289 *buf_size = pb->mmap_size;
13294 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13295 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13296 * consume, do nothing and return success.
13301 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13303 struct perf_cpu_buf *cpu_buf;
13305 if (buf_idx >= pb->cpu_cnt)
13306 return libbpf_err(-EINVAL);
13308 cpu_buf = pb->cpu_bufs[buf_idx];
13310 return libbpf_err(-ENOENT);
13312 return perf_buffer__process_records(pb, cpu_buf);
13315 int perf_buffer__consume(struct perf_buffer *pb)
13319 for (i = 0; i < pb->cpu_cnt; i++) {
13320 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13325 err = perf_buffer__process_records(pb, cpu_buf);
13327 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
13328 return libbpf_err(err);
13334 int bpf_program__set_attach_target(struct bpf_program *prog,
13335 int attach_prog_fd,
13336 const char *attach_func_name)
13338 int btf_obj_fd = 0, btf_id = 0, err;
13340 if (!prog || attach_prog_fd < 0)
13341 return libbpf_err(-EINVAL);
13343 if (prog->obj->loaded)
13344 return libbpf_err(-EINVAL);
13346 if (attach_prog_fd && !attach_func_name) {
13347 /* remember attach_prog_fd and let bpf_program__load() find
13348 * BTF ID during the program load
13350 prog->attach_prog_fd = attach_prog_fd;
13354 if (attach_prog_fd) {
13355 btf_id = libbpf_find_prog_btf_id(attach_func_name,
13358 return libbpf_err(btf_id);
13360 if (!attach_func_name)
13361 return libbpf_err(-EINVAL);
13363 /* load btf_vmlinux, if not yet */
13364 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13366 return libbpf_err(err);
13367 err = find_kernel_btf_id(prog->obj, attach_func_name,
13368 prog->expected_attach_type,
13369 &btf_obj_fd, &btf_id);
13371 return libbpf_err(err);
13374 prog->attach_btf_id = btf_id;
13375 prog->attach_btf_obj_fd = btf_obj_fd;
13376 prog->attach_prog_fd = attach_prog_fd;
13380 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
13382 int err = 0, n, len, start, end = -1;
13388 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13390 if (*s == ',' || *s == '\n') {
13394 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13395 if (n <= 0 || n > 2) {
13396 pr_warn("Failed to get CPU range %s: %d\n", s, n);
13399 } else if (n == 1) {
13402 if (start < 0 || start > end) {
13403 pr_warn("Invalid CPU range [%d,%d] in %s\n",
13408 tmp = realloc(*mask, end + 1);
13414 memset(tmp + *mask_sz, 0, start - *mask_sz);
13415 memset(tmp + start, 1, end - start + 1);
13416 *mask_sz = end + 1;
13420 pr_warn("Empty CPU range\n");
13430 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13432 int fd, err = 0, len;
13435 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
13438 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13441 len = read(fd, buf, sizeof(buf));
13444 err = len ? -errno : -EINVAL;
13445 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13448 if (len >= sizeof(buf)) {
13449 pr_warn("CPU mask is too big in file %s\n", fcpu);
13454 return parse_cpu_mask_str(buf, mask, mask_sz);
13457 int libbpf_num_possible_cpus(void)
13459 static const char *fcpu = "/sys/devices/system/cpu/possible";
13461 int err, n, i, tmp_cpus;
13464 tmp_cpus = READ_ONCE(cpus);
13468 err = parse_cpu_mask_file(fcpu, &mask, &n);
13470 return libbpf_err(err);
13473 for (i = 0; i < n; i++) {
13479 WRITE_ONCE(cpus, tmp_cpus);
13483 static int populate_skeleton_maps(const struct bpf_object *obj,
13484 struct bpf_map_skeleton *maps,
13489 for (i = 0; i < map_cnt; i++) {
13490 struct bpf_map **map = maps[i].map;
13491 const char *name = maps[i].name;
13492 void **mmaped = maps[i].mmaped;
13494 *map = bpf_object__find_map_by_name(obj, name);
13496 pr_warn("failed to find skeleton map '%s'\n", name);
13500 /* externs shouldn't be pre-setup from user code */
13501 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13502 *mmaped = (*map)->mmaped;
13507 static int populate_skeleton_progs(const struct bpf_object *obj,
13508 struct bpf_prog_skeleton *progs,
13513 for (i = 0; i < prog_cnt; i++) {
13514 struct bpf_program **prog = progs[i].prog;
13515 const char *name = progs[i].name;
13517 *prog = bpf_object__find_program_by_name(obj, name);
13519 pr_warn("failed to find skeleton program '%s'\n", name);
13526 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13527 const struct bpf_object_open_opts *opts)
13529 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
13530 .object_name = s->name,
13532 struct bpf_object *obj;
13535 /* Attempt to preserve opts->object_name, unless overriden by user
13536 * explicitly. Overwriting object name for skeletons is discouraged,
13537 * as it breaks global data maps, because they contain object name
13538 * prefix as their own map name prefix. When skeleton is generated,
13539 * bpftool is making an assumption that this name will stay the same.
13542 memcpy(&skel_opts, opts, sizeof(*opts));
13543 if (!opts->object_name)
13544 skel_opts.object_name = s->name;
13547 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
13548 err = libbpf_get_error(obj);
13550 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
13552 return libbpf_err(err);
13556 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13558 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13559 return libbpf_err(err);
13562 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13564 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13565 return libbpf_err(err);
13571 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13573 int err, len, var_idx, i;
13574 const char *var_name;
13575 const struct bpf_map *map;
13578 const struct btf_type *map_type, *var_type;
13579 const struct bpf_var_skeleton *var_skel;
13580 struct btf_var_secinfo *var;
13583 return libbpf_err(-EINVAL);
13585 btf = bpf_object__btf(s->obj);
13587 pr_warn("subskeletons require BTF at runtime (object %s)\n",
13588 bpf_object__name(s->obj));
13589 return libbpf_err(-errno);
13592 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13594 pr_warn("failed to populate subskeleton maps: %d\n", err);
13595 return libbpf_err(err);
13598 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13600 pr_warn("failed to populate subskeleton maps: %d\n", err);
13601 return libbpf_err(err);
13604 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13605 var_skel = &s->vars[var_idx];
13606 map = *var_skel->map;
13607 map_type_id = bpf_map__btf_value_type_id(map);
13608 map_type = btf__type_by_id(btf, map_type_id);
13610 if (!btf_is_datasec(map_type)) {
13611 pr_warn("type for map '%1$s' is not a datasec: %2$s",
13612 bpf_map__name(map),
13613 __btf_kind_str(btf_kind(map_type)));
13614 return libbpf_err(-EINVAL);
13617 len = btf_vlen(map_type);
13618 var = btf_var_secinfos(map_type);
13619 for (i = 0; i < len; i++, var++) {
13620 var_type = btf__type_by_id(btf, var->type);
13621 var_name = btf__name_by_offset(btf, var_type->name_off);
13622 if (strcmp(var_name, var_skel->name) == 0) {
13623 *var_skel->addr = map->mmaped + var->offset;
13631 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13641 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13645 err = bpf_object__load(*s->obj);
13647 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
13648 return libbpf_err(err);
13651 for (i = 0; i < s->map_cnt; i++) {
13652 struct bpf_map *map = *s->maps[i].map;
13653 size_t mmap_sz = bpf_map_mmap_sz(map);
13654 int prot, map_fd = map->fd;
13655 void **mmaped = s->maps[i].mmaped;
13660 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13665 if (map->def.type == BPF_MAP_TYPE_ARENA) {
13666 *mmaped = map->mmaped;
13670 if (map->def.map_flags & BPF_F_RDONLY_PROG)
13673 prot = PROT_READ | PROT_WRITE;
13675 /* Remap anonymous mmap()-ed "map initialization image" as
13676 * a BPF map-backed mmap()-ed memory, but preserving the same
13677 * memory address. This will cause kernel to change process'
13678 * page table to point to a different piece of kernel memory,
13679 * but from userspace point of view memory address (and its
13680 * contents, being identical at this point) will stay the
13681 * same. This mapping will be released by bpf_object__close()
13682 * as per normal clean up procedure, so we don't need to worry
13683 * about it from skeleton's clean up perspective.
13685 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
13686 if (*mmaped == MAP_FAILED) {
13689 pr_warn("failed to re-mmap() map '%s': %d\n",
13690 bpf_map__name(map), err);
13691 return libbpf_err(err);
13698 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13702 for (i = 0; i < s->prog_cnt; i++) {
13703 struct bpf_program *prog = *s->progs[i].prog;
13704 struct bpf_link **link = s->progs[i].link;
13706 if (!prog->autoload || !prog->autoattach)
13709 /* auto-attaching not supported for this program */
13710 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13713 /* if user already set the link manually, don't attempt auto-attach */
13717 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
13719 pr_warn("prog '%s': failed to auto-attach: %d\n",
13720 bpf_program__name(prog), err);
13721 return libbpf_err(err);
13724 /* It's possible that for some SEC() definitions auto-attach
13725 * is supported in some cases (e.g., if definition completely
13726 * specifies target information), but is not in other cases.
13727 * SEC("uprobe") is one such case. If user specified target
13728 * binary and function name, such BPF program can be
13729 * auto-attached. But if not, it shouldn't trigger skeleton's
13730 * attach to fail. It should just be skipped.
13731 * attach_fn signals such case with returning 0 (no error) and
13732 * setting link to NULL.
13739 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13743 for (i = 0; i < s->prog_cnt; i++) {
13744 struct bpf_link **link = s->progs[i].link;
13746 bpf_link__destroy(*link);
13751 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13757 bpf_object__detach_skeleton(s);
13759 bpf_object__close(*s->obj);