1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
4 * Common eBPF ELF object loading operations.
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
9 * Copyright (C) 2017 Nicira, Inc.
10 * Copyright (C) 2019 Isovalent, Inc.
28 #include <asm/unistd.h>
29 #include <linux/err.h>
30 #include <linux/kernel.h>
31 #include <linux/bpf.h>
32 #include <linux/btf.h>
33 #include <linux/filter.h>
34 #include <linux/limits.h>
35 #include <linux/perf_event.h>
36 #include <linux/bpf_perf_event.h>
37 #include <linux/ring_buffer.h>
38 #include <sys/epoll.h>
39 #include <sys/ioctl.h>
42 #include <sys/types.h>
44 #include <sys/utsname.h>
45 #include <sys/resource.h>
53 #include "str_error.h"
54 #include "libbpf_internal.h"
56 #include "bpf_gen_internal.h"
60 #define BPF_FS_MAGIC 0xcafe4a11
63 #define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
65 #define BPF_INSN_SZ (sizeof(struct bpf_insn))
67 /* vsprintf() in __base_pr() uses nonliteral format string. It may break
68 * compilation if user enables corresponding warning. Disable it explicitly.
70 #pragma GCC diagnostic ignored "-Wformat-nonliteral"
72 #define __printf(a, b) __attribute__((format(printf, a, b)))
74 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
75 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
76 static int map_set_def_max_entries(struct bpf_map *map);
78 static const char * const attach_type_name[] = {
79 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
80 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
81 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
82 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
83 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
84 [BPF_CGROUP_DEVICE] = "cgroup_device",
85 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
86 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
87 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
88 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
89 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect",
90 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
91 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
92 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
93 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
94 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername",
95 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
96 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
97 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname",
98 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
99 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
100 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg",
101 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
102 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
103 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
104 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg",
105 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
106 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
107 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
108 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
109 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
110 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
111 [BPF_LIRC_MODE2] = "lirc_mode2",
112 [BPF_FLOW_DISSECTOR] = "flow_dissector",
113 [BPF_TRACE_RAW_TP] = "trace_raw_tp",
114 [BPF_TRACE_FENTRY] = "trace_fentry",
115 [BPF_TRACE_FEXIT] = "trace_fexit",
116 [BPF_MODIFY_RETURN] = "modify_return",
117 [BPF_LSM_MAC] = "lsm_mac",
118 [BPF_LSM_CGROUP] = "lsm_cgroup",
119 [BPF_SK_LOOKUP] = "sk_lookup",
120 [BPF_TRACE_ITER] = "trace_iter",
121 [BPF_XDP_DEVMAP] = "xdp_devmap",
122 [BPF_XDP_CPUMAP] = "xdp_cpumap",
124 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
125 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
126 [BPF_PERF_EVENT] = "perf_event",
127 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
128 [BPF_STRUCT_OPS] = "struct_ops",
129 [BPF_NETFILTER] = "netfilter",
130 [BPF_TCX_INGRESS] = "tcx_ingress",
131 [BPF_TCX_EGRESS] = "tcx_egress",
132 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
133 [BPF_NETKIT_PRIMARY] = "netkit_primary",
134 [BPF_NETKIT_PEER] = "netkit_peer",
137 static const char * const link_type_name[] = {
138 [BPF_LINK_TYPE_UNSPEC] = "unspec",
139 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
140 [BPF_LINK_TYPE_TRACING] = "tracing",
141 [BPF_LINK_TYPE_CGROUP] = "cgroup",
142 [BPF_LINK_TYPE_ITER] = "iter",
143 [BPF_LINK_TYPE_NETNS] = "netns",
144 [BPF_LINK_TYPE_XDP] = "xdp",
145 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
146 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
147 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
148 [BPF_LINK_TYPE_NETFILTER] = "netfilter",
149 [BPF_LINK_TYPE_TCX] = "tcx",
150 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
151 [BPF_LINK_TYPE_NETKIT] = "netkit",
154 static const char * const map_type_name[] = {
155 [BPF_MAP_TYPE_UNSPEC] = "unspec",
156 [BPF_MAP_TYPE_HASH] = "hash",
157 [BPF_MAP_TYPE_ARRAY] = "array",
158 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
159 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
160 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
161 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
162 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
163 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
164 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
165 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
166 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
167 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
168 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
169 [BPF_MAP_TYPE_DEVMAP] = "devmap",
170 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
171 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
172 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
173 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
174 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
175 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
176 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
177 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
178 [BPF_MAP_TYPE_QUEUE] = "queue",
179 [BPF_MAP_TYPE_STACK] = "stack",
180 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
181 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
182 [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
183 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
184 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
185 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
186 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
187 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
188 [BPF_MAP_TYPE_ARENA] = "arena",
191 static const char * const prog_type_name[] = {
192 [BPF_PROG_TYPE_UNSPEC] = "unspec",
193 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
194 [BPF_PROG_TYPE_KPROBE] = "kprobe",
195 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
196 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
197 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
198 [BPF_PROG_TYPE_XDP] = "xdp",
199 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
200 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
201 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
202 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
203 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
204 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
205 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
206 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
207 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
208 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
209 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
210 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
211 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
212 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
213 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
214 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
215 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
216 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
217 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
218 [BPF_PROG_TYPE_TRACING] = "tracing",
219 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
220 [BPF_PROG_TYPE_EXT] = "ext",
221 [BPF_PROG_TYPE_LSM] = "lsm",
222 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
223 [BPF_PROG_TYPE_SYSCALL] = "syscall",
224 [BPF_PROG_TYPE_NETFILTER] = "netfilter",
227 static int __base_pr(enum libbpf_print_level level, const char *format,
230 if (level == LIBBPF_DEBUG)
233 return vfprintf(stderr, format, args);
236 static libbpf_print_fn_t __libbpf_pr = __base_pr;
238 libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
240 libbpf_print_fn_t old_print_fn;
242 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
248 void libbpf_print(enum libbpf_print_level level, const char *format, ...)
252 libbpf_print_fn_t print_fn;
254 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
260 va_start(args, format);
261 __libbpf_pr(level, format, args);
267 static void pr_perm_msg(int err)
272 if (err != -EPERM || geteuid() != 0)
275 err = getrlimit(RLIMIT_MEMLOCK, &limit);
279 if (limit.rlim_cur == RLIM_INFINITY)
282 if (limit.rlim_cur < 1024)
283 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
284 else if (limit.rlim_cur < 1024*1024)
285 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
287 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
289 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
293 #define STRERR_BUFSIZE 128
295 /* Copied from tools/perf/util/util.h */
297 # define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
301 # define zclose(fd) ({ \
304 ___err = close((fd)); \
309 static inline __u64 ptr_to_u64(const void *ptr)
311 return (__u64) (unsigned long) ptr;
314 int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
316 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
320 __u32 libbpf_major_version(void)
322 return LIBBPF_MAJOR_VERSION;
325 __u32 libbpf_minor_version(void)
327 return LIBBPF_MINOR_VERSION;
330 const char *libbpf_version_string(void)
334 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
350 enum reloc_type type;
353 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
362 /* stored as sec_def->cookie for all libbpf-supported SEC()s */
365 /* expected_attach_type is optional, if kernel doesn't support that */
366 SEC_EXP_ATTACH_OPT = 1,
367 /* legacy, only used by libbpf_get_type_names() and
368 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
369 * This used to be associated with cgroup (and few other) BPF programs
370 * that were attachable through BPF_PROG_ATTACH command. Pretty
371 * meaningless nowadays, though.
374 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
375 /* attachment target is specified through BTF ID in either kernel or
376 * other BPF program's BTF object
379 /* BPF program type allows sleeping/blocking in kernel */
381 /* BPF program support non-linear XDP buffer */
383 /* Setup proper attach type for usdt probes. */
389 enum bpf_prog_type prog_type;
390 enum bpf_attach_type expected_attach_type;
394 libbpf_prog_setup_fn_t prog_setup_fn;
395 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
396 libbpf_prog_attach_fn_t prog_attach_fn;
400 * bpf_prog should be a better name but it has been used in
407 const struct bpf_sec_def *sec_def;
408 /* this program's instruction offset (in number of instructions)
409 * within its containing ELF section
412 /* number of original instructions in ELF section belonging to this
413 * program, not taking into account subprogram instructions possible
414 * appended later during relocation
417 /* Offset (in number of instructions) of the start of instruction
418 * belonging to this BPF program within its containing main BPF
419 * program. For the entry-point (main) BPF program, this is always
420 * zero. For a sub-program, this gets reset before each of main BPF
421 * programs are processed and relocated and is used to determined
422 * whether sub-program was already appended to the main program, and
423 * if yes, at which instruction offset.
427 /* instructions that belong to BPF program; insns[0] is located at
428 * sec_insn_off instruction within its ELF section in ELF file, so
429 * when mapping ELF file instruction index to the local instruction,
430 * one needs to subtract sec_insn_off; and vice versa.
432 struct bpf_insn *insns;
433 /* actual number of instruction in this BPF program's image; for
434 * entry-point BPF programs this includes the size of main program
435 * itself plus all the used sub-programs, appended at the end
439 struct reloc_desc *reloc_desc;
442 /* BPF verifier log settings */
447 struct bpf_object *obj;
453 bool mark_btf_static;
454 enum bpf_prog_type type;
455 enum bpf_attach_type expected_attach_type;
456 int exception_cb_idx;
459 __u32 attach_btf_obj_fd;
461 __u32 attach_prog_fd;
464 __u32 func_info_rec_size;
468 __u32 line_info_rec_size;
473 struct bpf_struct_ops {
475 const struct btf_type *type;
476 struct bpf_program **progs;
477 __u32 *kern_func_off;
478 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
480 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
481 * btf_vmlinux's format.
482 * struct bpf_struct_ops_tcp_congestion_ops {
483 * [... some other kernel fields ...]
484 * struct tcp_congestion_ops data;
486 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
487 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
494 #define DATA_SEC ".data"
495 #define BSS_SEC ".bss"
496 #define RODATA_SEC ".rodata"
497 #define KCONFIG_SEC ".kconfig"
498 #define KSYMS_SEC ".ksyms"
499 #define STRUCT_OPS_SEC ".struct_ops"
500 #define STRUCT_OPS_LINK_SEC ".struct_ops.link"
502 enum libbpf_map_type {
512 unsigned int key_size;
513 unsigned int value_size;
514 unsigned int max_entries;
515 unsigned int map_flags;
519 struct bpf_object *obj;
521 /* real_name is defined for special internal maps (.rodata*,
522 * .data*, .bss, .kconfig) and preserves their original ELF section
523 * name. This is important to be able to find corresponding BTF
524 * DATASEC information.
532 struct bpf_map_def def;
536 __u32 btf_key_type_id;
537 __u32 btf_value_type_id;
538 __u32 btf_vmlinux_value_type_id;
539 enum libbpf_map_type libbpf_type;
541 struct bpf_struct_ops *st_ops;
542 struct bpf_map *inner_map;
568 enum extern_type type;
585 unsigned long long addr;
587 /* target btf_id of the corresponding kernel var. */
588 int kernel_btf_obj_fd;
591 /* local btf_id of the ksym extern's type. */
593 /* BTF fd index to be patched in for insn->off, this is
594 * 0 for vmlinux BTF, index in obj->fd_array for module
619 struct elf_sec_desc {
620 enum sec_type sec_type;
632 size_t shstrndx; /* section index for section name strings */
634 struct elf_sec_desc *secs;
637 __u32 btf_maps_sec_btf_id;
646 char name[BPF_OBJ_NAME_LEN];
650 struct bpf_program *programs;
652 struct bpf_map *maps;
657 struct extern_desc *externs;
665 struct bpf_gen *gen_loader;
667 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
668 struct elf_state efile;
671 struct btf_ext *btf_ext;
673 /* Parse and load BTF vmlinux if any of the programs in the object need
676 struct btf *btf_vmlinux;
677 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
678 * override for vmlinux BTF.
680 char *btf_custom_path;
681 /* vmlinux BTF override for CO-RE relocations */
682 struct btf *btf_vmlinux_override;
683 /* Lazily initialized kernel module BTFs */
684 struct module_btf *btf_modules;
685 bool btf_modules_loaded;
686 size_t btf_module_cnt;
687 size_t btf_module_cap;
689 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
698 struct usdt_manager *usdt_man;
700 struct kern_feature_cache *feat_cache;
707 static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
708 static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
709 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
710 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
711 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
712 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
713 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
714 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
715 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
717 void bpf_program__unload(struct bpf_program *prog)
724 zfree(&prog->func_info);
725 zfree(&prog->line_info);
728 static void bpf_program__exit(struct bpf_program *prog)
733 bpf_program__unload(prog);
735 zfree(&prog->sec_name);
737 zfree(&prog->reloc_desc);
744 static bool insn_is_subprog_call(const struct bpf_insn *insn)
746 return BPF_CLASS(insn->code) == BPF_JMP &&
747 BPF_OP(insn->code) == BPF_CALL &&
748 BPF_SRC(insn->code) == BPF_K &&
749 insn->src_reg == BPF_PSEUDO_CALL &&
750 insn->dst_reg == 0 &&
754 static bool is_call_insn(const struct bpf_insn *insn)
756 return insn->code == (BPF_JMP | BPF_CALL);
759 static bool insn_is_pseudo_func(struct bpf_insn *insn)
761 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
765 bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
766 const char *name, size_t sec_idx, const char *sec_name,
767 size_t sec_off, void *insn_data, size_t insn_data_sz)
769 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
770 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
771 sec_name, name, sec_off, insn_data_sz);
775 memset(prog, 0, sizeof(*prog));
778 prog->sec_idx = sec_idx;
779 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
780 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
781 /* insns_cnt can later be increased by appending used subprograms */
782 prog->insns_cnt = prog->sec_insn_cnt;
784 prog->type = BPF_PROG_TYPE_UNSPEC;
786 prog->exception_cb_idx = -1;
788 /* libbpf's convention for SEC("?abc...") is that it's just like
789 * SEC("abc...") but the corresponding bpf_program starts out with
790 * autoload set to false.
792 if (sec_name[0] == '?') {
793 prog->autoload = false;
794 /* from now on forget there was ? in section name */
797 prog->autoload = true;
800 prog->autoattach = true;
802 /* inherit object's log_level */
803 prog->log_level = obj->log_level;
805 prog->sec_name = strdup(sec_name);
809 prog->name = strdup(name);
813 prog->insns = malloc(insn_data_sz);
816 memcpy(prog->insns, insn_data, insn_data_sz);
820 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
821 bpf_program__exit(prog);
826 bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
827 const char *sec_name, int sec_idx)
829 Elf_Data *symbols = obj->efile.symbols;
830 struct bpf_program *prog, *progs;
831 void *data = sec_data->d_buf;
832 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
833 int nr_progs, err, i;
837 progs = obj->programs;
838 nr_progs = obj->nr_programs;
839 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
841 for (i = 0; i < nr_syms; i++) {
842 sym = elf_sym_by_idx(obj, i);
844 if (sym->st_shndx != sec_idx)
846 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
849 prog_sz = sym->st_size;
850 sec_off = sym->st_value;
852 name = elf_sym_str(obj, sym->st_name);
854 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
856 return -LIBBPF_ERRNO__FORMAT;
859 if (sec_off + prog_sz > sec_sz) {
860 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
862 return -LIBBPF_ERRNO__FORMAT;
865 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
866 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
870 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
871 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
873 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
876 * In this case the original obj->programs
877 * is still valid, so don't need special treat for
878 * bpf_close_object().
880 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
884 obj->programs = progs;
886 prog = &progs[nr_progs];
888 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
889 sec_off, data + sec_off, prog_sz);
893 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
894 prog->sym_global = true;
896 /* if function is a global/weak symbol, but has restricted
897 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
898 * as static to enable more permissive BPF verification mode
899 * with more outside context available to BPF verifier
901 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
902 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
903 prog->mark_btf_static = true;
906 obj->nr_programs = nr_progs;
912 static const struct btf_member *
913 find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
915 struct btf_member *m;
918 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
919 if (btf_member_bit_offset(t, i) == bit_offset)
926 static const struct btf_member *
927 find_member_by_name(const struct btf *btf, const struct btf_type *t,
930 struct btf_member *m;
933 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
934 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
941 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
942 __u16 kind, struct btf **res_btf,
943 struct module_btf **res_mod_btf);
945 #define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
946 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
947 const char *name, __u32 kind);
950 find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
951 struct module_btf **mod_btf,
952 const struct btf_type **type, __u32 *type_id,
953 const struct btf_type **vtype, __u32 *vtype_id,
954 const struct btf_member **data_member)
956 const struct btf_type *kern_type, *kern_vtype;
957 const struct btf_member *kern_data_member;
959 __s32 kern_vtype_id, kern_type_id;
963 snprintf(tname, sizeof(tname), "%.*s",
964 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
966 kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
968 if (kern_type_id < 0) {
969 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
973 kern_type = btf__type_by_id(btf, kern_type_id);
975 /* Find the corresponding "map_value" type that will be used
976 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
977 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
980 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
981 tname, BTF_KIND_STRUCT);
982 if (kern_vtype_id < 0) {
983 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
984 STRUCT_OPS_VALUE_PREFIX, tname);
985 return kern_vtype_id;
987 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
989 /* Find "struct tcp_congestion_ops" from
990 * struct bpf_struct_ops_tcp_congestion_ops {
992 * struct tcp_congestion_ops data;
995 kern_data_member = btf_members(kern_vtype);
996 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
997 if (kern_data_member->type == kern_type_id)
1000 if (i == btf_vlen(kern_vtype)) {
1001 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1002 tname, STRUCT_OPS_VALUE_PREFIX, tname);
1007 *type_id = kern_type_id;
1008 *vtype = kern_vtype;
1009 *vtype_id = kern_vtype_id;
1010 *data_member = kern_data_member;
1015 static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1017 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1020 static bool is_valid_st_ops_program(struct bpf_object *obj,
1021 const struct bpf_program *prog)
1025 for (i = 0; i < obj->nr_programs; i++) {
1026 if (&obj->programs[i] == prog)
1027 return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1033 /* For each struct_ops program P, referenced from some struct_ops map M,
1034 * enable P.autoload if there are Ms for which M.autocreate is true,
1035 * disable P.autoload if for all Ms M.autocreate is false.
1036 * Don't change P.autoload for programs that are not referenced from any maps.
1038 static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1040 struct bpf_program *prog, *slot_prog;
1041 struct bpf_map *map;
1044 for (i = 0; i < obj->nr_programs; ++i) {
1045 int should_load = false;
1048 prog = &obj->programs[i];
1049 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1052 for (j = 0; j < obj->nr_maps; ++j) {
1053 map = &obj->maps[j];
1054 if (!bpf_map__is_struct_ops(map))
1057 vlen = btf_vlen(map->st_ops->type);
1058 for (k = 0; k < vlen; ++k) {
1059 slot_prog = map->st_ops->progs[k];
1060 if (prog != slot_prog)
1064 if (map->autocreate)
1069 prog->autoload = should_load;
1075 /* Init the map's fields that depend on kern_btf */
1076 static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
1078 const struct btf_member *member, *kern_member, *kern_data_member;
1079 const struct btf_type *type, *kern_type, *kern_vtype;
1080 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
1081 struct bpf_object *obj = map->obj;
1082 const struct btf *btf = obj->btf;
1083 struct bpf_struct_ops *st_ops;
1084 const struct btf *kern_btf;
1085 struct module_btf *mod_btf;
1086 void *data, *kern_data;
1090 st_ops = map->st_ops;
1091 type = st_ops->type;
1092 tname = st_ops->tname;
1093 err = find_struct_ops_kern_types(obj, tname, &mod_btf,
1094 &kern_type, &kern_type_id,
1095 &kern_vtype, &kern_vtype_id,
1100 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1102 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1103 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1105 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
1106 map->def.value_size = kern_vtype->size;
1107 map->btf_vmlinux_value_type_id = kern_vtype_id;
1109 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1110 if (!st_ops->kern_vdata)
1113 data = st_ops->data;
1114 kern_data_off = kern_data_member->offset / 8;
1115 kern_data = st_ops->kern_vdata + kern_data_off;
1117 member = btf_members(type);
1118 for (i = 0; i < btf_vlen(type); i++, member++) {
1119 const struct btf_type *mtype, *kern_mtype;
1120 __u32 mtype_id, kern_mtype_id;
1121 void *mdata, *kern_mdata;
1122 __s64 msize, kern_msize;
1123 __u32 moff, kern_moff;
1124 __u32 kern_member_idx;
1127 mname = btf__name_by_offset(btf, member->name_off);
1128 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1130 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1135 kern_member_idx = kern_member - btf_members(kern_type);
1136 if (btf_member_bitfield_size(type, i) ||
1137 btf_member_bitfield_size(kern_type, kern_member_idx)) {
1138 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1143 moff = member->offset / 8;
1144 kern_moff = kern_member->offset / 8;
1146 mdata = data + moff;
1147 kern_mdata = kern_data + kern_moff;
1149 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1150 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1152 if (BTF_INFO_KIND(mtype->info) !=
1153 BTF_INFO_KIND(kern_mtype->info)) {
1154 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1155 map->name, mname, BTF_INFO_KIND(mtype->info),
1156 BTF_INFO_KIND(kern_mtype->info));
1160 if (btf_is_ptr(mtype)) {
1161 struct bpf_program *prog;
1163 /* Update the value from the shadow type */
1164 prog = *(void **)mdata;
1165 st_ops->progs[i] = prog;
1168 if (!is_valid_st_ops_program(obj, prog)) {
1169 pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1174 kern_mtype = skip_mods_and_typedefs(kern_btf,
1178 /* mtype->type must be a func_proto which was
1179 * guaranteed in bpf_object__collect_st_ops_relos(),
1180 * so only check kern_mtype for func_proto here.
1182 if (!btf_is_func_proto(kern_mtype)) {
1183 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
1189 prog->attach_btf_obj_fd = mod_btf->fd;
1191 /* if we haven't yet processed this BPF program, record proper
1192 * attach_btf_id and member_idx
1194 if (!prog->attach_btf_id) {
1195 prog->attach_btf_id = kern_type_id;
1196 prog->expected_attach_type = kern_member_idx;
1199 /* struct_ops BPF prog can be re-used between multiple
1200 * .struct_ops & .struct_ops.link as long as it's the
1201 * same struct_ops struct definition and the same
1202 * function pointer field
1204 if (prog->attach_btf_id != kern_type_id) {
1205 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1206 map->name, mname, prog->name, prog->sec_name, prog->type,
1207 prog->attach_btf_id, kern_type_id);
1210 if (prog->expected_attach_type != kern_member_idx) {
1211 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1212 map->name, mname, prog->name, prog->sec_name, prog->type,
1213 prog->expected_attach_type, kern_member_idx);
1217 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1219 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1220 map->name, mname, prog->name, moff,
1226 msize = btf__resolve_size(btf, mtype_id);
1227 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
1228 if (msize < 0 || kern_msize < 0 || msize != kern_msize) {
1229 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1230 map->name, mname, (ssize_t)msize,
1231 (ssize_t)kern_msize);
1235 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1236 map->name, mname, (unsigned int)msize,
1238 memcpy(kern_mdata, mdata, msize);
1244 static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1246 struct bpf_map *map;
1250 for (i = 0; i < obj->nr_maps; i++) {
1251 map = &obj->maps[i];
1253 if (!bpf_map__is_struct_ops(map))
1256 if (!map->autocreate)
1259 err = bpf_map__init_kern_struct_ops(map);
1267 static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
1268 int shndx, Elf_Data *data)
1270 const struct btf_type *type, *datasec;
1271 const struct btf_var_secinfo *vsi;
1272 struct bpf_struct_ops *st_ops;
1273 const char *tname, *var_name;
1274 __s32 type_id, datasec_id;
1275 const struct btf *btf;
1276 struct bpf_map *map;
1283 datasec_id = btf__find_by_name_kind(btf, sec_name,
1285 if (datasec_id < 0) {
1286 pr_warn("struct_ops init: DATASEC %s not found\n",
1291 datasec = btf__type_by_id(btf, datasec_id);
1292 vsi = btf_var_secinfos(datasec);
1293 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1294 type = btf__type_by_id(obj->btf, vsi->type);
1295 var_name = btf__name_by_offset(obj->btf, type->name_off);
1297 type_id = btf__resolve_type(obj->btf, vsi->type);
1299 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
1300 vsi->type, sec_name);
1304 type = btf__type_by_id(obj->btf, type_id);
1305 tname = btf__name_by_offset(obj->btf, type->name_off);
1307 pr_warn("struct_ops init: anonymous type is not supported\n");
1310 if (!btf_is_struct(type)) {
1311 pr_warn("struct_ops init: %s is not a struct\n", tname);
1315 map = bpf_object__add_map(obj);
1317 return PTR_ERR(map);
1319 map->sec_idx = shndx;
1320 map->sec_offset = vsi->offset;
1321 map->name = strdup(var_name);
1324 map->btf_value_type_id = type_id;
1326 /* Follow same convention as for programs autoload:
1327 * SEC("?.struct_ops") means map is not created by default.
1329 if (sec_name[0] == '?') {
1330 map->autocreate = false;
1331 /* from now on forget there was ? in section name */
1335 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1336 map->def.key_size = sizeof(int);
1337 map->def.value_size = type->size;
1338 map->def.max_entries = 1;
1339 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
1341 map->st_ops = calloc(1, sizeof(*map->st_ops));
1344 st_ops = map->st_ops;
1345 st_ops->data = malloc(type->size);
1346 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1347 st_ops->kern_func_off = malloc(btf_vlen(type) *
1348 sizeof(*st_ops->kern_func_off));
1349 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1352 if (vsi->offset + type->size > data->d_size) {
1353 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
1354 var_name, sec_name);
1358 memcpy(st_ops->data,
1359 data->d_buf + vsi->offset,
1361 st_ops->tname = tname;
1362 st_ops->type = type;
1363 st_ops->type_id = type_id;
1365 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1366 tname, type_id, var_name, vsi->offset);
1372 static int bpf_object_init_struct_ops(struct bpf_object *obj)
1374 const char *sec_name;
1377 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1378 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1380 if (desc->sec_type != SEC_ST_OPS)
1383 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1385 return -LIBBPF_ERRNO__FORMAT;
1387 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1395 static struct bpf_object *bpf_object__new(const char *path,
1396 const void *obj_buf,
1398 const char *obj_name)
1400 struct bpf_object *obj;
1403 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1405 pr_warn("alloc memory failed for %s\n", path);
1406 return ERR_PTR(-ENOMEM);
1409 strcpy(obj->path, path);
1411 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
1413 /* Using basename() GNU version which doesn't modify arg. */
1414 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
1415 end = strchr(obj->name, '.');
1422 * Caller of this function should also call
1423 * bpf_object__elf_finish() after data collection to return
1424 * obj_buf to user. If not, we should duplicate the buffer to
1425 * avoid user freeing them before elf finish.
1427 obj->efile.obj_buf = obj_buf;
1428 obj->efile.obj_buf_sz = obj_buf_sz;
1429 obj->efile.btf_maps_shndx = -1;
1430 obj->kconfig_map_idx = -1;
1432 obj->kern_version = get_kernel_version();
1433 obj->loaded = false;
1438 static void bpf_object__elf_finish(struct bpf_object *obj)
1440 if (!obj->efile.elf)
1443 elf_end(obj->efile.elf);
1444 obj->efile.elf = NULL;
1445 obj->efile.symbols = NULL;
1447 zfree(&obj->efile.secs);
1448 obj->efile.sec_cnt = 0;
1449 zclose(obj->efile.fd);
1450 obj->efile.obj_buf = NULL;
1451 obj->efile.obj_buf_sz = 0;
1454 static int bpf_object__elf_init(struct bpf_object *obj)
1460 if (obj->efile.elf) {
1461 pr_warn("elf: init internal error\n");
1462 return -LIBBPF_ERRNO__LIBELF;
1465 if (obj->efile.obj_buf_sz > 0) {
1466 /* obj_buf should have been validated by bpf_object__open_mem(). */
1467 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
1469 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
1470 if (obj->efile.fd < 0) {
1471 char errmsg[STRERR_BUFSIZE], *cp;
1474 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
1475 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
1479 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1483 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
1484 err = -LIBBPF_ERRNO__LIBELF;
1488 obj->efile.elf = elf;
1490 if (elf_kind(elf) != ELF_K_ELF) {
1491 err = -LIBBPF_ERRNO__FORMAT;
1492 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1496 if (gelf_getclass(elf) != ELFCLASS64) {
1497 err = -LIBBPF_ERRNO__FORMAT;
1498 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1502 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1503 if (!obj->efile.ehdr) {
1504 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
1505 err = -LIBBPF_ERRNO__FORMAT;
1509 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
1510 pr_warn("elf: failed to get section names section index for %s: %s\n",
1511 obj->path, elf_errmsg(-1));
1512 err = -LIBBPF_ERRNO__FORMAT;
1516 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
1517 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
1518 pr_warn("elf: failed to get section names strings from %s: %s\n",
1519 obj->path, elf_errmsg(-1));
1520 err = -LIBBPF_ERRNO__FORMAT;
1524 /* Old LLVM set e_machine to EM_NONE */
1525 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
1526 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
1527 err = -LIBBPF_ERRNO__FORMAT;
1533 bpf_object__elf_finish(obj);
1537 static int bpf_object__check_endianness(struct bpf_object *obj)
1539 #if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
1540 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
1542 #elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
1543 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
1546 # error "Unrecognized __BYTE_ORDER__"
1548 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
1549 return -LIBBPF_ERRNO__ENDIAN;
1553 bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
1556 pr_warn("invalid license section in %s\n", obj->path);
1557 return -LIBBPF_ERRNO__FORMAT;
1559 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1560 * go over allowed ELF data section buffer
1562 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
1563 pr_debug("license of %s is %s\n", obj->path, obj->license);
1568 bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1572 if (!data || size != sizeof(kver)) {
1573 pr_warn("invalid kver section in %s\n", obj->path);
1574 return -LIBBPF_ERRNO__FORMAT;
1576 memcpy(&kver, data, sizeof(kver));
1577 obj->kern_version = kver;
1578 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1582 static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1584 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1585 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1590 static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1598 scn = elf_sec_by_name(obj, name);
1599 data = elf_sec_data(obj, scn);
1601 *size = data->d_size;
1602 return 0; /* found it */
1608 static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1610 Elf_Data *symbols = obj->efile.symbols;
1614 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1615 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1617 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1620 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1621 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1624 sname = elf_sym_str(obj, sym->st_name);
1626 pr_warn("failed to get sym name string for var %s\n", name);
1627 return ERR_PTR(-EIO);
1629 if (strcmp(name, sname) == 0)
1633 return ERR_PTR(-ENOENT);
1636 /* Some versions of Android don't provide memfd_create() in their libc
1637 * implementation, so avoid complications and just go straight to Linux
1640 static int sys_memfd_create(const char *name, unsigned flags)
1642 return syscall(__NR_memfd_create, name, flags);
1645 static int create_placeholder_fd(void)
1649 fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
1655 static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
1657 struct bpf_map *map;
1660 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1661 sizeof(*obj->maps), obj->nr_maps + 1);
1663 return ERR_PTR(err);
1665 map = &obj->maps[obj->nr_maps++];
1667 /* Preallocate map FD without actually creating BPF map just yet.
1668 * These map FD "placeholders" will be reused later without changing
1669 * FD value when map is actually created in the kernel.
1671 * This is useful to be able to perform BPF program relocations
1672 * without having to create BPF maps before that step. This allows us
1673 * to finalize and load BTF very late in BPF object's loading phase,
1674 * right before BPF maps have to be created and BPF programs have to
1675 * be loaded. By having these map FD placeholders we can perform all
1676 * the sanitizations, relocations, and any other adjustments before we
1677 * start creating actual BPF kernel objects (BTF, maps, progs).
1679 map->fd = create_placeholder_fd();
1681 return ERR_PTR(map->fd);
1682 map->inner_map_fd = -1;
1683 map->autocreate = true;
1688 static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
1690 const long page_sz = sysconf(_SC_PAGE_SIZE);
1693 map_sz = (size_t)roundup(value_sz, 8) * max_entries;
1694 map_sz = roundup(map_sz, page_sz);
1698 static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1700 const long page_sz = sysconf(_SC_PAGE_SIZE);
1702 switch (map->def.type) {
1703 case BPF_MAP_TYPE_ARRAY:
1704 return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1705 case BPF_MAP_TYPE_ARENA:
1706 return page_sz * map->def.max_entries;
1708 return 0; /* not supported */
1712 static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1719 if (old_sz == new_sz)
1722 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1723 if (mmaped == MAP_FAILED)
1726 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1727 munmap(map->mmaped, old_sz);
1728 map->mmaped = mmaped;
1732 static char *internal_map_name(struct bpf_object *obj, const char *real_name)
1734 char map_name[BPF_OBJ_NAME_LEN], *p;
1735 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1737 /* This is one of the more confusing parts of libbpf for various
1738 * reasons, some of which are historical. The original idea for naming
1739 * internal names was to include as much of BPF object name prefix as
1740 * possible, so that it can be distinguished from similar internal
1741 * maps of a different BPF object.
1742 * As an example, let's say we have bpf_object named 'my_object_name'
1743 * and internal map corresponding to '.rodata' ELF section. The final
1744 * map name advertised to user and to the kernel will be
1745 * 'my_objec.rodata', taking first 8 characters of object name and
1746 * entire 7 characters of '.rodata'.
1747 * Somewhat confusingly, if internal map ELF section name is shorter
1748 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1749 * for the suffix, even though we only have 4 actual characters, and
1750 * resulting map will be called 'my_objec.bss', not even using all 15
1751 * characters allowed by the kernel. Oh well, at least the truncated
1752 * object name is somewhat consistent in this case. But if the map
1753 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1754 * (8 chars) and thus will be left with only first 7 characters of the
1755 * object name ('my_obje'). Happy guessing, user, that the final map
1756 * name will be "my_obje.kconfig".
1757 * Now, with libbpf starting to support arbitrarily named .rodata.*
1758 * and .data.* data sections, it's possible that ELF section name is
1759 * longer than allowed 15 chars, so we now need to be careful to take
1760 * only up to 15 first characters of ELF name, taking no BPF object
1761 * name characters at all. So '.rodata.abracadabra' will result in
1762 * '.rodata.abracad' kernel and user-visible name.
1763 * We need to keep this convoluted logic intact for .data, .bss and
1764 * .rodata maps, but for new custom .data.custom and .rodata.custom
1765 * maps we use their ELF names as is, not prepending bpf_object name
1766 * in front. We still need to truncate them to 15 characters for the
1767 * kernel. Full name can be recovered for such maps by using DATASEC
1768 * BTF type associated with such map's value type, though.
1770 if (sfx_len >= BPF_OBJ_NAME_LEN)
1771 sfx_len = BPF_OBJ_NAME_LEN - 1;
1773 /* if there are two or more dots in map name, it's a custom dot map */
1774 if (strchr(real_name + 1, '.') != NULL)
1777 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
1779 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
1780 sfx_len, real_name);
1782 /* sanitise map name to characters allowed by kernel */
1783 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1784 if (!isalnum(*p) && *p != '_' && *p != '.')
1787 return strdup(map_name);
1791 map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1793 /* Internal BPF map is mmap()'able only if at least one of corresponding
1794 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1795 * variable and it's not marked as __hidden (which turns it into, effectively,
1796 * a STATIC variable).
1798 static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1800 const struct btf_type *t, *vt;
1801 struct btf_var_secinfo *vsi;
1804 if (!map->btf_value_type_id)
1807 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1808 if (!btf_is_datasec(t))
1811 vsi = btf_var_secinfos(t);
1812 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1813 vt = btf__type_by_id(obj->btf, vsi->type);
1814 if (!btf_is_var(vt))
1817 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1825 bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
1826 const char *real_name, int sec_idx, void *data, size_t data_sz)
1828 struct bpf_map_def *def;
1829 struct bpf_map *map;
1833 map = bpf_object__add_map(obj);
1835 return PTR_ERR(map);
1837 map->libbpf_type = type;
1838 map->sec_idx = sec_idx;
1839 map->sec_offset = 0;
1840 map->real_name = strdup(real_name);
1841 map->name = internal_map_name(obj, real_name);
1842 if (!map->real_name || !map->name) {
1843 zfree(&map->real_name);
1849 def->type = BPF_MAP_TYPE_ARRAY;
1850 def->key_size = sizeof(int);
1851 def->value_size = data_sz;
1852 def->max_entries = 1;
1853 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
1854 ? BPF_F_RDONLY_PROG : 0;
1856 /* failures are fine because of maps like .rodata.str1.1 */
1857 (void) map_fill_btf_type_info(obj, map);
1859 if (map_is_mmapable(obj, map))
1860 def->map_flags |= BPF_F_MMAPABLE;
1862 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
1863 map->name, map->sec_idx, map->sec_offset, def->map_flags);
1865 mmap_sz = bpf_map_mmap_sz(map);
1866 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
1867 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1868 if (map->mmaped == MAP_FAILED) {
1871 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1873 zfree(&map->real_name);
1879 memcpy(map->mmaped, data, data_sz);
1881 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
1885 static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1887 struct elf_sec_desc *sec_desc;
1888 const char *sec_name;
1889 int err = 0, sec_idx;
1892 * Populate obj->maps with libbpf internal maps.
1894 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1895 sec_desc = &obj->efile.secs[sec_idx];
1897 /* Skip recognized sections with size 0. */
1898 if (!sec_desc->data || sec_desc->data->d_size == 0)
1901 switch (sec_desc->sec_type) {
1903 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1904 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
1906 sec_desc->data->d_buf,
1907 sec_desc->data->d_size);
1910 obj->has_rodata = true;
1911 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1912 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
1914 sec_desc->data->d_buf,
1915 sec_desc->data->d_size);
1918 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1919 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
1922 sec_desc->data->d_size);
1935 static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1940 for (i = 0; i < obj->nr_extern; i++) {
1941 if (strcmp(obj->externs[i].name, name) == 0)
1942 return &obj->externs[i];
1947 static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1950 switch (ext->kcfg.type) {
1953 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
1957 *(bool *)ext_val = value == 'y' ? true : false;
1961 *(enum libbpf_tristate *)ext_val = TRI_YES;
1962 else if (value == 'm')
1963 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
1964 else /* value == 'n' */
1965 *(enum libbpf_tristate *)ext_val = TRI_NO;
1968 *(char *)ext_val = value;
1974 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
1982 static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
1987 if (ext->kcfg.type != KCFG_CHAR_ARR) {
1988 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
1993 len = strlen(value);
1994 if (value[len - 1] != '"') {
1995 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
2002 if (len >= ext->kcfg.sz) {
2003 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2004 ext->name, value, len, ext->kcfg.sz - 1);
2005 len = ext->kcfg.sz - 1;
2007 memcpy(ext_val, value + 1, len);
2008 ext_val[len] = '\0';
2013 static int parse_u64(const char *value, __u64 *res)
2019 *res = strtoull(value, &value_end, 0);
2022 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
2026 pr_warn("failed to parse '%s' as integer completely\n", value);
2032 static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
2034 int bit_sz = ext->kcfg.sz * 8;
2036 if (ext->kcfg.sz == 8)
2039 /* Validate that value stored in u64 fits in integer of `ext->sz`
2040 * bytes size without any loss of information. If the target integer
2041 * is signed, we rely on the following limits of integer type of
2042 * Y bits and subsequent transformation:
2044 * -2^(Y-1) <= X <= 2^(Y-1) - 1
2045 * 0 <= X + 2^(Y-1) <= 2^Y - 1
2046 * 0 <= X + 2^(Y-1) < 2^Y
2048 * For unsigned target integer, check that all the (64 - Y) bits are
2051 if (ext->kcfg.is_signed)
2052 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2054 return (v >> bit_sz) == 0;
2057 static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2060 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2061 ext->kcfg.type != KCFG_BOOL) {
2062 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
2063 ext->name, (unsigned long long)value);
2066 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2067 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2068 ext->name, (unsigned long long)value);
2072 if (!is_kcfg_value_in_range(ext, value)) {
2073 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2074 ext->name, (unsigned long long)value, ext->kcfg.sz);
2077 switch (ext->kcfg.sz) {
2079 *(__u8 *)ext_val = value;
2082 *(__u16 *)ext_val = value;
2085 *(__u32 *)ext_val = value;
2088 *(__u64 *)ext_val = value;
2097 static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2098 char *buf, void *data)
2100 struct extern_desc *ext;
2106 if (!str_has_pfx(buf, "CONFIG_"))
2109 sep = strchr(buf, '=');
2111 pr_warn("failed to parse '%s': no separator\n", buf);
2115 /* Trim ending '\n' */
2117 if (buf[len - 1] == '\n')
2118 buf[len - 1] = '\0';
2119 /* Split on '=' and ensure that a value is present. */
2123 pr_warn("failed to parse '%s': no value\n", buf);
2127 ext = find_extern_by_name(obj, buf);
2128 if (!ext || ext->is_set)
2131 ext_val = data + ext->kcfg.data_off;
2135 case 'y': case 'n': case 'm':
2136 err = set_kcfg_value_tri(ext, ext_val, *value);
2139 err = set_kcfg_value_str(ext, ext_val, value);
2142 /* assume integer */
2143 err = parse_u64(value, &num);
2145 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
2148 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2149 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2152 err = set_kcfg_value_num(ext, ext_val, num);
2157 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
2161 static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2169 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2172 else if (len >= PATH_MAX)
2173 return -ENAMETOOLONG;
2175 /* gzopen also accepts uncompressed files. */
2176 file = gzopen(buf, "re");
2178 file = gzopen("/proc/config.gz", "re");
2181 pr_warn("failed to open system Kconfig\n");
2185 while (gzgets(file, buf, sizeof(buf))) {
2186 err = bpf_object__process_kconfig_line(obj, buf, data);
2188 pr_warn("error parsing system Kconfig line '%s': %d\n",
2199 static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2200 const char *config, void *data)
2206 file = fmemopen((void *)config, strlen(config), "r");
2209 pr_warn("failed to open in-memory Kconfig: %d\n", err);
2213 while (fgets(buf, sizeof(buf), file)) {
2214 err = bpf_object__process_kconfig_line(obj, buf, data);
2216 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2226 static int bpf_object__init_kconfig_map(struct bpf_object *obj)
2228 struct extern_desc *last_ext = NULL, *ext;
2232 for (i = 0; i < obj->nr_extern; i++) {
2233 ext = &obj->externs[i];
2234 if (ext->type == EXT_KCFG)
2241 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
2242 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
2243 ".kconfig", obj->efile.symbols_shndx,
2248 obj->kconfig_map_idx = obj->nr_maps - 1;
2253 const struct btf_type *
2254 skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
2256 const struct btf_type *t = btf__type_by_id(btf, id);
2261 while (btf_is_mod(t) || btf_is_typedef(t)) {
2264 t = btf__type_by_id(btf, t->type);
2270 static const struct btf_type *
2271 resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2273 const struct btf_type *t;
2275 t = skip_mods_and_typedefs(btf, id, NULL);
2279 t = skip_mods_and_typedefs(btf, t->type, res_id);
2281 return btf_is_func_proto(t) ? t : NULL;
2284 static const char *__btf_kind_str(__u16 kind)
2287 case BTF_KIND_UNKN: return "void";
2288 case BTF_KIND_INT: return "int";
2289 case BTF_KIND_PTR: return "ptr";
2290 case BTF_KIND_ARRAY: return "array";
2291 case BTF_KIND_STRUCT: return "struct";
2292 case BTF_KIND_UNION: return "union";
2293 case BTF_KIND_ENUM: return "enum";
2294 case BTF_KIND_FWD: return "fwd";
2295 case BTF_KIND_TYPEDEF: return "typedef";
2296 case BTF_KIND_VOLATILE: return "volatile";
2297 case BTF_KIND_CONST: return "const";
2298 case BTF_KIND_RESTRICT: return "restrict";
2299 case BTF_KIND_FUNC: return "func";
2300 case BTF_KIND_FUNC_PROTO: return "func_proto";
2301 case BTF_KIND_VAR: return "var";
2302 case BTF_KIND_DATASEC: return "datasec";
2303 case BTF_KIND_FLOAT: return "float";
2304 case BTF_KIND_DECL_TAG: return "decl_tag";
2305 case BTF_KIND_TYPE_TAG: return "type_tag";
2306 case BTF_KIND_ENUM64: return "enum64";
2307 default: return "unknown";
2311 const char *btf_kind_str(const struct btf_type *t)
2313 return __btf_kind_str(btf_kind(t));
2317 * Fetch integer attribute of BTF map definition. Such attributes are
2318 * represented using a pointer to an array, in which dimensionality of array
2319 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2320 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2321 * type definition, while using only sizeof(void *) space in ELF data section.
2323 static bool get_map_field_int(const char *map_name, const struct btf *btf,
2324 const struct btf_member *m, __u32 *res)
2326 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2327 const char *name = btf__name_by_offset(btf, m->name_off);
2328 const struct btf_array *arr_info;
2329 const struct btf_type *arr_t;
2331 if (!btf_is_ptr(t)) {
2332 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2333 map_name, name, btf_kind_str(t));
2337 arr_t = btf__type_by_id(btf, t->type);
2339 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2340 map_name, name, t->type);
2343 if (!btf_is_array(arr_t)) {
2344 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2345 map_name, name, btf_kind_str(arr_t));
2348 arr_info = btf_array(arr_t);
2349 *res = arr_info->nelems;
2353 static bool get_map_field_long(const char *map_name, const struct btf *btf,
2354 const struct btf_member *m, __u64 *res)
2356 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2357 const char *name = btf__name_by_offset(btf, m->name_off);
2359 if (btf_is_ptr(t)) {
2363 ret = get_map_field_int(map_name, btf, m, &res32);
2365 *res = (__u64)res32;
2369 if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2370 pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2371 map_name, name, btf_kind_str(t));
2375 if (btf_vlen(t) != 1) {
2376 pr_warn("map '%s': attr '%s': invalid __ulong\n",
2381 if (btf_is_enum(t)) {
2382 const struct btf_enum *e = btf_enum(t);
2386 const struct btf_enum64 *e = btf_enum64(t);
2388 *res = btf_enum64_value(e);
2393 static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2397 len = snprintf(buf, buf_sz, "%s/%s", path, name);
2401 return -ENAMETOOLONG;
2406 static int build_map_pin_path(struct bpf_map *map, const char *path)
2412 path = BPF_FS_DEFAULT_PATH;
2414 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2418 return bpf_map__set_pin_path(map, buf);
2421 /* should match definition in bpf_helpers.h */
2422 enum libbpf_pin_type {
2424 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2428 int parse_btf_map_def(const char *map_name, struct btf *btf,
2429 const struct btf_type *def_t, bool strict,
2430 struct btf_map_def *map_def, struct btf_map_def *inner_def)
2432 const struct btf_type *t;
2433 const struct btf_member *m;
2434 bool is_inner = inner_def == NULL;
2437 vlen = btf_vlen(def_t);
2438 m = btf_members(def_t);
2439 for (i = 0; i < vlen; i++, m++) {
2440 const char *name = btf__name_by_offset(btf, m->name_off);
2443 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
2446 if (strcmp(name, "type") == 0) {
2447 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
2449 map_def->parts |= MAP_DEF_MAP_TYPE;
2450 } else if (strcmp(name, "max_entries") == 0) {
2451 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
2453 map_def->parts |= MAP_DEF_MAX_ENTRIES;
2454 } else if (strcmp(name, "map_flags") == 0) {
2455 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
2457 map_def->parts |= MAP_DEF_MAP_FLAGS;
2458 } else if (strcmp(name, "numa_node") == 0) {
2459 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
2461 map_def->parts |= MAP_DEF_NUMA_NODE;
2462 } else if (strcmp(name, "key_size") == 0) {
2465 if (!get_map_field_int(map_name, btf, m, &sz))
2467 if (map_def->key_size && map_def->key_size != sz) {
2468 pr_warn("map '%s': conflicting key size %u != %u.\n",
2469 map_name, map_def->key_size, sz);
2472 map_def->key_size = sz;
2473 map_def->parts |= MAP_DEF_KEY_SIZE;
2474 } else if (strcmp(name, "key") == 0) {
2477 t = btf__type_by_id(btf, m->type);
2479 pr_warn("map '%s': key type [%d] not found.\n",
2483 if (!btf_is_ptr(t)) {
2484 pr_warn("map '%s': key spec is not PTR: %s.\n",
2485 map_name, btf_kind_str(t));
2488 sz = btf__resolve_size(btf, t->type);
2490 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
2491 map_name, t->type, (ssize_t)sz);
2494 if (map_def->key_size && map_def->key_size != sz) {
2495 pr_warn("map '%s': conflicting key size %u != %zd.\n",
2496 map_name, map_def->key_size, (ssize_t)sz);
2499 map_def->key_size = sz;
2500 map_def->key_type_id = t->type;
2501 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
2502 } else if (strcmp(name, "value_size") == 0) {
2505 if (!get_map_field_int(map_name, btf, m, &sz))
2507 if (map_def->value_size && map_def->value_size != sz) {
2508 pr_warn("map '%s': conflicting value size %u != %u.\n",
2509 map_name, map_def->value_size, sz);
2512 map_def->value_size = sz;
2513 map_def->parts |= MAP_DEF_VALUE_SIZE;
2514 } else if (strcmp(name, "value") == 0) {
2517 t = btf__type_by_id(btf, m->type);
2519 pr_warn("map '%s': value type [%d] not found.\n",
2523 if (!btf_is_ptr(t)) {
2524 pr_warn("map '%s': value spec is not PTR: %s.\n",
2525 map_name, btf_kind_str(t));
2528 sz = btf__resolve_size(btf, t->type);
2530 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
2531 map_name, t->type, (ssize_t)sz);
2534 if (map_def->value_size && map_def->value_size != sz) {
2535 pr_warn("map '%s': conflicting value size %u != %zd.\n",
2536 map_name, map_def->value_size, (ssize_t)sz);
2539 map_def->value_size = sz;
2540 map_def->value_type_id = t->type;
2541 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
2543 else if (strcmp(name, "values") == 0) {
2544 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2545 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2546 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
2547 char inner_map_name[128];
2551 pr_warn("map '%s': multi-level inner maps not supported.\n",
2555 if (i != vlen - 1) {
2556 pr_warn("map '%s': '%s' member should be last.\n",
2560 if (!is_map_in_map && !is_prog_array) {
2561 pr_warn("map '%s': should be map-in-map or prog-array.\n",
2565 if (map_def->value_size && map_def->value_size != 4) {
2566 pr_warn("map '%s': conflicting value size %u != 4.\n",
2567 map_name, map_def->value_size);
2570 map_def->value_size = 4;
2571 t = btf__type_by_id(btf, m->type);
2573 pr_warn("map '%s': %s type [%d] not found.\n",
2574 map_name, desc, m->type);
2577 if (!btf_is_array(t) || btf_array(t)->nelems) {
2578 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2582 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
2583 if (!btf_is_ptr(t)) {
2584 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2585 map_name, desc, btf_kind_str(t));
2588 t = skip_mods_and_typedefs(btf, t->type, NULL);
2589 if (is_prog_array) {
2590 if (!btf_is_func_proto(t)) {
2591 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2592 map_name, btf_kind_str(t));
2597 if (!btf_is_struct(t)) {
2598 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
2599 map_name, btf_kind_str(t));
2603 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2604 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
2608 map_def->parts |= MAP_DEF_INNER_MAP;
2609 } else if (strcmp(name, "pinning") == 0) {
2613 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
2616 if (!get_map_field_int(map_name, btf, m, &val))
2618 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
2619 pr_warn("map '%s': invalid pinning value %u.\n",
2623 map_def->pinning = val;
2624 map_def->parts |= MAP_DEF_PINNING;
2625 } else if (strcmp(name, "map_extra") == 0) {
2628 if (!get_map_field_long(map_name, btf, m, &map_extra))
2630 map_def->map_extra = map_extra;
2631 map_def->parts |= MAP_DEF_MAP_EXTRA;
2634 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
2637 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
2641 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2642 pr_warn("map '%s': map type isn't specified.\n", map_name);
2649 static size_t adjust_ringbuf_sz(size_t sz)
2651 __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2654 /* if user forgot to set any size, make sure they see error */
2657 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2658 * a power-of-2 multiple of kernel's page size. If user diligently
2659 * satisified these conditions, pass the size through.
2661 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2664 /* Otherwise find closest (page_sz * power_of_2) product bigger than
2665 * user-set size to satisfy both user size request and kernel
2666 * requirements and substitute correct max_entries for map creation.
2668 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2669 if (mul * page_sz > sz)
2670 return mul * page_sz;
2673 /* if it's impossible to satisfy the conditions (i.e., user size is
2674 * very close to UINT_MAX but is not a power-of-2 multiple of
2675 * page_size) then just return original size and let kernel reject it
2680 static bool map_is_ringbuf(const struct bpf_map *map)
2682 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2683 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2686 static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2688 map->def.type = def->map_type;
2689 map->def.key_size = def->key_size;
2690 map->def.value_size = def->value_size;
2691 map->def.max_entries = def->max_entries;
2692 map->def.map_flags = def->map_flags;
2693 map->map_extra = def->map_extra;
2695 map->numa_node = def->numa_node;
2696 map->btf_key_type_id = def->key_type_id;
2697 map->btf_value_type_id = def->value_type_id;
2699 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
2700 if (map_is_ringbuf(map))
2701 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2703 if (def->parts & MAP_DEF_MAP_TYPE)
2704 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2706 if (def->parts & MAP_DEF_KEY_TYPE)
2707 pr_debug("map '%s': found key [%u], sz = %u.\n",
2708 map->name, def->key_type_id, def->key_size);
2709 else if (def->parts & MAP_DEF_KEY_SIZE)
2710 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2712 if (def->parts & MAP_DEF_VALUE_TYPE)
2713 pr_debug("map '%s': found value [%u], sz = %u.\n",
2714 map->name, def->value_type_id, def->value_size);
2715 else if (def->parts & MAP_DEF_VALUE_SIZE)
2716 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2718 if (def->parts & MAP_DEF_MAX_ENTRIES)
2719 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2720 if (def->parts & MAP_DEF_MAP_FLAGS)
2721 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2722 if (def->parts & MAP_DEF_MAP_EXTRA)
2723 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2724 (unsigned long long)def->map_extra);
2725 if (def->parts & MAP_DEF_PINNING)
2726 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2727 if (def->parts & MAP_DEF_NUMA_NODE)
2728 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2730 if (def->parts & MAP_DEF_INNER_MAP)
2731 pr_debug("map '%s': found inner map definition.\n", map->name);
2734 static const char *btf_var_linkage_str(__u32 linkage)
2737 case BTF_VAR_STATIC: return "static";
2738 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2739 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2740 default: return "unknown";
2744 static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2745 const struct btf_type *sec,
2746 int var_idx, int sec_idx,
2747 const Elf_Data *data, bool strict,
2748 const char *pin_root_path)
2750 struct btf_map_def map_def = {}, inner_def = {};
2751 const struct btf_type *var, *def;
2752 const struct btf_var_secinfo *vi;
2753 const struct btf_var *var_extra;
2754 const char *map_name;
2755 struct bpf_map *map;
2758 vi = btf_var_secinfos(sec) + var_idx;
2759 var = btf__type_by_id(obj->btf, vi->type);
2760 var_extra = btf_var(var);
2761 map_name = btf__name_by_offset(obj->btf, var->name_off);
2763 if (map_name == NULL || map_name[0] == '\0') {
2764 pr_warn("map #%d: empty name.\n", var_idx);
2767 if ((__u64)vi->offset + vi->size > data->d_size) {
2768 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2771 if (!btf_is_var(var)) {
2772 pr_warn("map '%s': unexpected var kind %s.\n",
2773 map_name, btf_kind_str(var));
2776 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2777 pr_warn("map '%s': unsupported map linkage %s.\n",
2778 map_name, btf_var_linkage_str(var_extra->linkage));
2782 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2783 if (!btf_is_struct(def)) {
2784 pr_warn("map '%s': unexpected def kind %s.\n",
2785 map_name, btf_kind_str(var));
2788 if (def->size > vi->size) {
2789 pr_warn("map '%s': invalid def size.\n", map_name);
2793 map = bpf_object__add_map(obj);
2795 return PTR_ERR(map);
2796 map->name = strdup(map_name);
2798 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2801 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2802 map->def.type = BPF_MAP_TYPE_UNSPEC;
2803 map->sec_idx = sec_idx;
2804 map->sec_offset = vi->offset;
2805 map->btf_var_idx = var_idx;
2806 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2807 map_name, map->sec_idx, map->sec_offset);
2809 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2813 fill_map_from_def(map, &map_def);
2815 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2816 err = build_map_pin_path(map, pin_root_path);
2818 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2823 if (map_def.parts & MAP_DEF_INNER_MAP) {
2824 map->inner_map = calloc(1, sizeof(*map->inner_map));
2825 if (!map->inner_map)
2827 map->inner_map->fd = create_placeholder_fd();
2828 if (map->inner_map->fd < 0)
2829 return map->inner_map->fd;
2830 map->inner_map->sec_idx = sec_idx;
2831 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2832 if (!map->inner_map->name)
2834 sprintf(map->inner_map->name, "%s.inner", map_name);
2836 fill_map_from_def(map->inner_map, &inner_def);
2839 err = map_fill_btf_type_info(obj, map);
2846 static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2847 const char *pin_root_path)
2849 const struct btf_type *sec = NULL;
2850 int nr_types, i, vlen, err;
2851 const struct btf_type *t;
2856 if (obj->efile.btf_maps_shndx < 0)
2859 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2860 data = elf_sec_data(obj, scn);
2861 if (!scn || !data) {
2862 pr_warn("elf: failed to get %s map definitions for %s\n",
2863 MAPS_ELF_SEC, obj->path);
2867 nr_types = btf__type_cnt(obj->btf);
2868 for (i = 1; i < nr_types; i++) {
2869 t = btf__type_by_id(obj->btf, i);
2870 if (!btf_is_datasec(t))
2872 name = btf__name_by_offset(obj->btf, t->name_off);
2873 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2875 obj->efile.btf_maps_sec_btf_id = i;
2881 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
2885 vlen = btf_vlen(sec);
2886 for (i = 0; i < vlen; i++) {
2887 err = bpf_object__init_user_btf_map(obj, sec, i,
2888 obj->efile.btf_maps_shndx,
2898 static int bpf_object__init_maps(struct bpf_object *obj,
2899 const struct bpf_object_open_opts *opts)
2901 const char *pin_root_path;
2905 strict = !OPTS_GET(opts, relaxed_maps, false);
2906 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
2908 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
2909 err = err ?: bpf_object__init_global_data_maps(obj);
2910 err = err ?: bpf_object__init_kconfig_map(obj);
2911 err = err ?: bpf_object_init_struct_ops(obj);
2916 static bool section_have_execinstr(struct bpf_object *obj, int idx)
2920 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
2924 return sh->sh_flags & SHF_EXECINSTR;
2927 static bool starts_with_qmark(const char *s)
2929 return s && s[0] == '?';
2932 static bool btf_needs_sanitization(struct bpf_object *obj)
2934 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2935 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2936 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2937 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2938 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2939 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2940 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2941 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
2943 return !has_func || !has_datasec || !has_func_global || !has_float ||
2944 !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
2947 static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
2949 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
2950 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
2951 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
2952 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
2953 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2954 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
2955 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
2956 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
2957 int enum64_placeholder_id = 0;
2961 for (i = 1; i < btf__type_cnt(btf); i++) {
2962 t = (struct btf_type *)btf__type_by_id(btf, i);
2964 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
2965 /* replace VAR/DECL_TAG with INT */
2966 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
2968 * using size = 1 is the safest choice, 4 will be too
2969 * big and cause kernel BTF validation failure if
2970 * original variable took less than 4 bytes
2973 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
2974 } else if (!has_datasec && btf_is_datasec(t)) {
2975 /* replace DATASEC with STRUCT */
2976 const struct btf_var_secinfo *v = btf_var_secinfos(t);
2977 struct btf_member *m = btf_members(t);
2978 struct btf_type *vt;
2981 name = (char *)btf__name_by_offset(btf, t->name_off);
2983 if (*name == '.' || *name == '?')
2989 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
2990 for (j = 0; j < vlen; j++, v++, m++) {
2991 /* order of field assignments is important */
2992 m->offset = v->offset * 8;
2994 /* preserve variable name as member name */
2995 vt = (void *)btf__type_by_id(btf, v->type);
2996 m->name_off = vt->name_off;
2998 } else if (!has_qmark_datasec && btf_is_datasec(t) &&
2999 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3000 /* replace '?' prefix with '_' for DATASEC names */
3003 name = (char *)btf__name_by_offset(btf, t->name_off);
3006 } else if (!has_func && btf_is_func_proto(t)) {
3007 /* replace FUNC_PROTO with ENUM */
3009 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3010 t->size = sizeof(__u32); /* kernel enforced */
3011 } else if (!has_func && btf_is_func(t)) {
3012 /* replace FUNC with TYPEDEF */
3013 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
3014 } else if (!has_func_global && btf_is_func(t)) {
3015 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3016 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
3017 } else if (!has_float && btf_is_float(t)) {
3018 /* replace FLOAT with an equally-sized empty STRUCT;
3019 * since C compilers do not accept e.g. "float" as a
3020 * valid struct name, make it anonymous
3023 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
3024 } else if (!has_type_tag && btf_is_type_tag(t)) {
3025 /* replace TYPE_TAG with a CONST */
3027 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
3028 } else if (!has_enum64 && btf_is_enum(t)) {
3029 /* clear the kflag */
3030 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3031 } else if (!has_enum64 && btf_is_enum64(t)) {
3032 /* replace ENUM64 with a union */
3033 struct btf_member *m;
3035 if (enum64_placeholder_id == 0) {
3036 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3037 if (enum64_placeholder_id < 0)
3038 return enum64_placeholder_id;
3040 t = (struct btf_type *)btf__type_by_id(btf, i);
3045 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3046 for (j = 0; j < vlen; j++, m++) {
3047 m->type = enum64_placeholder_id;
3056 static bool libbpf_needs_btf(const struct bpf_object *obj)
3058 return obj->efile.btf_maps_shndx >= 0 ||
3059 obj->efile.has_st_ops ||
3063 static bool kernel_needs_btf(const struct bpf_object *obj)
3065 return obj->efile.has_st_ops;
3068 static int bpf_object__init_btf(struct bpf_object *obj,
3070 Elf_Data *btf_ext_data)
3075 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
3076 err = libbpf_get_error(obj->btf);
3079 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
3082 /* enforce 8-byte pointers for BPF-targeted BTFs */
3083 btf__set_pointer_size(obj->btf, 8);
3086 struct btf_ext_info *ext_segs[3];
3087 int seg_num, sec_num;
3090 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3091 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3094 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3095 err = libbpf_get_error(obj->btf_ext);
3097 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
3098 BTF_EXT_ELF_SEC, err);
3099 obj->btf_ext = NULL;
3103 /* setup .BTF.ext to ELF section mapping */
3104 ext_segs[0] = &obj->btf_ext->func_info;
3105 ext_segs[1] = &obj->btf_ext->line_info;
3106 ext_segs[2] = &obj->btf_ext->core_relo_info;
3107 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3108 struct btf_ext_info *seg = ext_segs[seg_num];
3109 const struct btf_ext_info_sec *sec;
3110 const char *sec_name;
3113 if (seg->sec_cnt == 0)
3116 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3117 if (!seg->sec_idxs) {
3123 for_each_btf_ext_sec(seg, sec) {
3124 /* preventively increment index to avoid doing
3125 * this before every continue below
3129 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3130 if (str_is_empty(sec_name))
3132 scn = elf_sec_by_name(obj, sec_name);
3136 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3141 if (err && libbpf_needs_btf(obj)) {
3142 pr_warn("BTF is required, but is missing or corrupted.\n");
3148 static int compare_vsi_off(const void *_a, const void *_b)
3150 const struct btf_var_secinfo *a = _a;
3151 const struct btf_var_secinfo *b = _b;
3153 return a->offset - b->offset;
3156 static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3159 __u32 size = 0, i, vars = btf_vlen(t);
3160 const char *sec_name = btf__name_by_offset(btf, t->name_off);
3161 struct btf_var_secinfo *vsi;
3162 bool fixup_offsets = false;
3166 pr_debug("No name found in string section for DATASEC kind.\n");
3170 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3171 * variable offsets set at the previous step. Further, not every
3172 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3173 * all fixups altogether for such sections and go straight to sorting
3174 * VARs within their DATASEC.
3176 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
3179 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3180 * fix this up. But BPF static linker already fixes this up and fills
3181 * all the sizes and offsets during static linking. So this step has
3182 * to be optional. But the STV_HIDDEN handling is non-optional for any
3183 * non-extern DATASEC, so the variable fixup loop below handles both
3184 * functions at the same time, paying the cost of BTF VAR <-> ELF
3185 * symbol matching just once.
3188 err = find_elf_sec_sz(obj, sec_name, &size);
3190 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3191 sec_name, size, err);
3196 fixup_offsets = true;
3199 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
3200 const struct btf_type *t_var;
3201 struct btf_var *var;
3202 const char *var_name;
3205 t_var = btf__type_by_id(btf, vsi->type);
3206 if (!t_var || !btf_is_var(t_var)) {
3207 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
3211 var = btf_var(t_var);
3212 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
3215 var_name = btf__name_by_offset(btf, t_var->name_off);
3217 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3222 sym = find_elf_var_sym(obj, var_name);
3224 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3225 sec_name, var_name);
3230 vsi->offset = sym->st_value;
3232 /* if variable is a global/weak symbol, but has restricted
3233 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3234 * as static. This follows similar logic for functions (BPF
3235 * subprogs) and influences libbpf's further decisions about
3236 * whether to make global data BPF array maps as
3239 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3240 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3241 var->linkage = BTF_VAR_STATIC;
3245 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3249 static int bpf_object_fixup_btf(struct bpf_object *obj)
3256 n = btf__type_cnt(obj->btf);
3257 for (i = 1; i < n; i++) {
3258 struct btf_type *t = btf_type_by_id(obj->btf, i);
3260 /* Loader needs to fix up some of the things compiler
3261 * couldn't get its hands on while emitting BTF. This
3262 * is section size and global variable offset. We use
3263 * the info from the ELF itself for this purpose.
3265 if (btf_is_datasec(t)) {
3266 err = btf_fixup_datasec(obj, obj->btf, t);
3275 static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
3277 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3278 prog->type == BPF_PROG_TYPE_LSM)
3281 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3282 * also need vmlinux BTF
3284 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3290 static bool map_needs_vmlinux_btf(struct bpf_map *map)
3292 return bpf_map__is_struct_ops(map);
3295 static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
3297 struct bpf_program *prog;
3298 struct bpf_map *map;
3301 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3304 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
3307 /* Support for typed ksyms needs kernel BTF */
3308 for (i = 0; i < obj->nr_extern; i++) {
3309 const struct extern_desc *ext;
3311 ext = &obj->externs[i];
3312 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3316 bpf_object__for_each_program(prog, obj) {
3317 if (!prog->autoload)
3319 if (prog_needs_vmlinux_btf(prog))
3323 bpf_object__for_each_map(map, obj) {
3324 if (map_needs_vmlinux_btf(map))
3331 static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3335 /* btf_vmlinux could be loaded earlier */
3336 if (obj->btf_vmlinux || obj->gen_loader)
3339 if (!force && !obj_needs_vmlinux_btf(obj))
3342 obj->btf_vmlinux = btf__load_vmlinux_btf();
3343 err = libbpf_get_error(obj->btf_vmlinux);
3345 pr_warn("Error loading vmlinux BTF: %d\n", err);
3346 obj->btf_vmlinux = NULL;
3352 static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3354 struct btf *kern_btf = obj->btf;
3355 bool btf_mandatory, sanitize;
3361 if (!kernel_supports(obj, FEAT_BTF)) {
3362 if (kernel_needs_btf(obj)) {
3366 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3370 /* Even though some subprogs are global/weak, user might prefer more
3371 * permissive BPF verification process that BPF verifier performs for
3372 * static functions, taking into account more context from the caller
3373 * functions. In such case, they need to mark such subprogs with
3374 * __attribute__((visibility("hidden"))) and libbpf will adjust
3375 * corresponding FUNC BTF type to be marked as static and trigger more
3376 * involved BPF verification process.
3378 for (i = 0; i < obj->nr_programs; i++) {
3379 struct bpf_program *prog = &obj->programs[i];
3384 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3387 n = btf__type_cnt(obj->btf);
3388 for (j = 1; j < n; j++) {
3389 t = btf_type_by_id(obj->btf, j);
3390 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3393 name = btf__str_by_offset(obj->btf, t->name_off);
3394 if (strcmp(name, prog->name) != 0)
3397 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3402 sanitize = btf_needs_sanitization(obj);
3404 const void *raw_data;
3407 /* clone BTF to sanitize a copy and leave the original intact */
3408 raw_data = btf__raw_data(obj->btf, &sz);
3409 kern_btf = btf__new(raw_data, sz);
3410 err = libbpf_get_error(kern_btf);
3414 /* enforce 8-byte pointers for BPF-targeted BTFs */
3415 btf__set_pointer_size(obj->btf, 8);
3416 err = bpf_object__sanitize_btf(obj, kern_btf);
3421 if (obj->gen_loader) {
3423 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
3427 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3428 /* Pretend to have valid FD to pass various fd >= 0 checks.
3429 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3431 btf__set_fd(kern_btf, 0);
3433 /* currently BPF_BTF_LOAD only supports log_level 1 */
3434 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
3435 obj->log_level ? 1 : 0, obj->token_fd);
3439 /* move fd to libbpf's BTF */
3440 btf__set_fd(obj->btf, btf__fd(kern_btf));
3441 btf__set_fd(kern_btf, -1);
3443 btf__free(kern_btf);
3447 btf_mandatory = kernel_needs_btf(obj);
3448 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3449 btf_mandatory ? "BTF is mandatory, can't proceed."
3450 : "BTF is optional, ignoring.");
3457 static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3461 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3463 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3464 off, obj->path, elf_errmsg(-1));
3471 static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3475 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3477 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3478 off, obj->path, elf_errmsg(-1));
3485 static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3489 scn = elf_getscn(obj->efile.elf, idx);
3491 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3492 idx, obj->path, elf_errmsg(-1));
3498 static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3500 Elf_Scn *scn = NULL;
3501 Elf *elf = obj->efile.elf;
3502 const char *sec_name;
3504 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3505 sec_name = elf_sec_name(obj, scn);
3509 if (strcmp(sec_name, name) != 0)
3517 static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
3524 shdr = elf64_getshdr(scn);
3526 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3527 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3534 static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3542 sh = elf_sec_hdr(obj, scn);
3546 name = elf_sec_str(obj, sh->sh_name);
3548 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3549 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3556 static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3563 data = elf_getdata(scn, 0);
3565 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3566 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3567 obj->path, elf_errmsg(-1));
3574 static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3576 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3579 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3582 static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3584 if (idx >= data->d_size / sizeof(Elf64_Rel))
3587 return (Elf64_Rel *)data->d_buf + idx;
3590 static bool is_sec_name_dwarf(const char *name)
3592 /* approximation, but the actual list is too long */
3593 return str_has_pfx(name, ".debug_");
3596 static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
3598 /* no special handling of .strtab */
3599 if (hdr->sh_type == SHT_STRTAB)
3602 /* ignore .llvm_addrsig section as well */
3603 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
3606 /* no subprograms will lead to an empty .text section, ignore it */
3607 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3608 strcmp(name, ".text") == 0)
3611 /* DWARF sections */
3612 if (is_sec_name_dwarf(name))
3615 if (str_has_pfx(name, ".rel")) {
3616 name += sizeof(".rel") - 1;
3617 /* DWARF section relocations */
3618 if (is_sec_name_dwarf(name))
3621 /* .BTF and .BTF.ext don't need relocations */
3622 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3623 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3630 static int cmp_progs(const void *_a, const void *_b)
3632 const struct bpf_program *a = _a;
3633 const struct bpf_program *b = _b;
3635 if (a->sec_idx != b->sec_idx)
3636 return a->sec_idx < b->sec_idx ? -1 : 1;
3638 /* sec_insn_off can't be the same within the section */
3639 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3642 static int bpf_object__elf_collect(struct bpf_object *obj)
3644 struct elf_sec_desc *sec_desc;
3645 Elf *elf = obj->efile.elf;
3646 Elf_Data *btf_ext_data = NULL;
3647 Elf_Data *btf_data = NULL;
3648 int idx = 0, err = 0;
3654 /* ELF section indices are 0-based, but sec #0 is special "invalid"
3655 * section. Since section count retrieved by elf_getshdrnum() does
3656 * include sec #0, it is already the necessary size of an array to keep
3659 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3660 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3661 obj->path, elf_errmsg(-1));
3662 return -LIBBPF_ERRNO__FORMAT;
3664 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3665 if (!obj->efile.secs)
3668 /* a bunch of ELF parsing functionality depends on processing symbols,
3669 * so do the first pass and find the symbol table
3672 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3673 sh = elf_sec_hdr(obj, scn);
3675 return -LIBBPF_ERRNO__FORMAT;
3677 if (sh->sh_type == SHT_SYMTAB) {
3678 if (obj->efile.symbols) {
3679 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3680 return -LIBBPF_ERRNO__FORMAT;
3683 data = elf_sec_data(obj, scn);
3685 return -LIBBPF_ERRNO__FORMAT;
3687 idx = elf_ndxscn(scn);
3689 obj->efile.symbols = data;
3690 obj->efile.symbols_shndx = idx;
3691 obj->efile.strtabidx = sh->sh_link;
3695 if (!obj->efile.symbols) {
3696 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3702 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3703 idx = elf_ndxscn(scn);
3704 sec_desc = &obj->efile.secs[idx];
3706 sh = elf_sec_hdr(obj, scn);
3708 return -LIBBPF_ERRNO__FORMAT;
3710 name = elf_sec_str(obj, sh->sh_name);
3712 return -LIBBPF_ERRNO__FORMAT;
3714 if (ignore_elf_section(sh, name))
3717 data = elf_sec_data(obj, scn);
3719 return -LIBBPF_ERRNO__FORMAT;
3721 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
3722 idx, name, (unsigned long)data->d_size,
3723 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3726 if (strcmp(name, "license") == 0) {
3727 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
3730 } else if (strcmp(name, "version") == 0) {
3731 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
3734 } else if (strcmp(name, "maps") == 0) {
3735 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3737 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3738 obj->efile.btf_maps_shndx = idx;
3739 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
3740 if (sh->sh_type != SHT_PROGBITS)
3741 return -LIBBPF_ERRNO__FORMAT;
3743 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
3744 if (sh->sh_type != SHT_PROGBITS)
3745 return -LIBBPF_ERRNO__FORMAT;
3746 btf_ext_data = data;
3747 } else if (sh->sh_type == SHT_SYMTAB) {
3748 /* already processed during the first pass above */
3749 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3750 if (sh->sh_flags & SHF_EXECINSTR) {
3751 if (strcmp(name, ".text") == 0)
3752 obj->efile.text_shndx = idx;
3753 err = bpf_object__add_programs(obj, data, name, idx);
3756 } else if (strcmp(name, DATA_SEC) == 0 ||
3757 str_has_pfx(name, DATA_SEC ".")) {
3758 sec_desc->sec_type = SEC_DATA;
3759 sec_desc->shdr = sh;
3760 sec_desc->data = data;
3761 } else if (strcmp(name, RODATA_SEC) == 0 ||
3762 str_has_pfx(name, RODATA_SEC ".")) {
3763 sec_desc->sec_type = SEC_RODATA;
3764 sec_desc->shdr = sh;
3765 sec_desc->data = data;
3766 } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
3767 strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3768 strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3769 strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
3770 sec_desc->sec_type = SEC_ST_OPS;
3771 sec_desc->shdr = sh;
3772 sec_desc->data = data;
3773 obj->efile.has_st_ops = true;
3775 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3778 } else if (sh->sh_type == SHT_REL) {
3779 int targ_sec_idx = sh->sh_info; /* points to other section */
3781 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3782 targ_sec_idx >= obj->efile.sec_cnt)
3783 return -LIBBPF_ERRNO__FORMAT;
3785 /* Only do relo for section with exec instructions */
3786 if (!section_have_execinstr(obj, targ_sec_idx) &&
3787 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
3788 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
3789 strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
3790 strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
3791 strcmp(name, ".rel" MAPS_ELF_SEC)) {
3792 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
3793 idx, name, targ_sec_idx,
3794 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
3798 sec_desc->sec_type = SEC_RELO;
3799 sec_desc->shdr = sh;
3800 sec_desc->data = data;
3801 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3802 str_has_pfx(name, BSS_SEC "."))) {
3803 sec_desc->sec_type = SEC_BSS;
3804 sec_desc->shdr = sh;
3805 sec_desc->data = data;
3807 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
3808 (size_t)sh->sh_size);
3812 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
3813 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
3814 return -LIBBPF_ERRNO__FORMAT;
3817 /* sort BPF programs by section name and in-section instruction offset
3820 if (obj->nr_programs)
3821 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
3823 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
3826 static bool sym_is_extern(const Elf64_Sym *sym)
3828 int bind = ELF64_ST_BIND(sym->st_info);
3829 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3830 return sym->st_shndx == SHN_UNDEF &&
3831 (bind == STB_GLOBAL || bind == STB_WEAK) &&
3832 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
3835 static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
3837 int bind = ELF64_ST_BIND(sym->st_info);
3838 int type = ELF64_ST_TYPE(sym->st_info);
3840 /* in .text section */
3841 if (sym->st_shndx != text_shndx)
3844 /* local function */
3845 if (bind == STB_LOCAL && type == STT_SECTION)
3848 /* global function */
3849 return bind == STB_GLOBAL && type == STT_FUNC;
3852 static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3854 const struct btf_type *t;
3861 n = btf__type_cnt(btf);
3862 for (i = 1; i < n; i++) {
3863 t = btf__type_by_id(btf, i);
3865 if (!btf_is_var(t) && !btf_is_func(t))
3868 tname = btf__name_by_offset(btf, t->name_off);
3869 if (strcmp(tname, ext_name))
3872 if (btf_is_var(t) &&
3873 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3876 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
3885 static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3886 const struct btf_var_secinfo *vs;
3887 const struct btf_type *t;
3893 n = btf__type_cnt(btf);
3894 for (i = 1; i < n; i++) {
3895 t = btf__type_by_id(btf, i);
3897 if (!btf_is_datasec(t))
3900 vs = btf_var_secinfos(t);
3901 for (j = 0; j < btf_vlen(t); j++, vs++) {
3902 if (vs->type == ext_btf_id)
3910 static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
3913 const struct btf_type *t;
3916 t = skip_mods_and_typedefs(btf, id, NULL);
3917 name = btf__name_by_offset(btf, t->name_off);
3921 switch (btf_kind(t)) {
3922 case BTF_KIND_INT: {
3923 int enc = btf_int_encoding(t);
3925 if (enc & BTF_INT_BOOL)
3926 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
3928 *is_signed = enc & BTF_INT_SIGNED;
3931 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
3932 return KCFG_UNKNOWN;
3937 return KCFG_UNKNOWN;
3938 if (strcmp(name, "libbpf_tristate"))
3939 return KCFG_UNKNOWN;
3940 return KCFG_TRISTATE;
3941 case BTF_KIND_ENUM64:
3942 if (strcmp(name, "libbpf_tristate"))
3943 return KCFG_UNKNOWN;
3944 return KCFG_TRISTATE;
3945 case BTF_KIND_ARRAY:
3946 if (btf_array(t)->nelems == 0)
3947 return KCFG_UNKNOWN;
3948 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
3949 return KCFG_UNKNOWN;
3950 return KCFG_CHAR_ARR;
3952 return KCFG_UNKNOWN;
3956 static int cmp_externs(const void *_a, const void *_b)
3958 const struct extern_desc *a = _a;
3959 const struct extern_desc *b = _b;
3961 if (a->type != b->type)
3962 return a->type < b->type ? -1 : 1;
3964 if (a->type == EXT_KCFG) {
3965 /* descending order by alignment requirements */
3966 if (a->kcfg.align != b->kcfg.align)
3967 return a->kcfg.align > b->kcfg.align ? -1 : 1;
3968 /* ascending order by size, within same alignment class */
3969 if (a->kcfg.sz != b->kcfg.sz)
3970 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
3973 /* resolve ties by name */
3974 return strcmp(a->name, b->name);
3977 static int find_int_btf_id(const struct btf *btf)
3979 const struct btf_type *t;
3982 n = btf__type_cnt(btf);
3983 for (i = 1; i < n; i++) {
3984 t = btf__type_by_id(btf, i);
3986 if (btf_is_int(t) && btf_int_bits(t) == 32)
3993 static int add_dummy_ksym_var(struct btf *btf)
3995 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
3996 const struct btf_var_secinfo *vs;
3997 const struct btf_type *sec;
4002 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4007 sec = btf__type_by_id(btf, sec_btf_id);
4008 vs = btf_var_secinfos(sec);
4009 for (i = 0; i < btf_vlen(sec); i++, vs++) {
4010 const struct btf_type *vt;
4012 vt = btf__type_by_id(btf, vs->type);
4013 if (btf_is_func(vt))
4017 /* No func in ksyms sec. No need to add dummy var. */
4018 if (i == btf_vlen(sec))
4021 int_btf_id = find_int_btf_id(btf);
4022 dummy_var_btf_id = btf__add_var(btf,
4024 BTF_VAR_GLOBAL_ALLOCATED,
4026 if (dummy_var_btf_id < 0)
4027 pr_warn("cannot create a dummy_ksym var\n");
4029 return dummy_var_btf_id;
4032 static int bpf_object__collect_externs(struct bpf_object *obj)
4034 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
4035 const struct btf_type *t;
4036 struct extern_desc *ext;
4037 int i, n, off, dummy_var_btf_id;
4038 const char *ext_name, *sec_name;
4039 size_t ext_essent_len;
4043 if (!obj->efile.symbols)
4046 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
4047 sh = elf_sec_hdr(obj, scn);
4048 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
4049 return -LIBBPF_ERRNO__FORMAT;
4051 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4052 if (dummy_var_btf_id < 0)
4053 return dummy_var_btf_id;
4055 n = sh->sh_size / sh->sh_entsize;
4056 pr_debug("looking for externs among %d symbols...\n", n);
4058 for (i = 0; i < n; i++) {
4059 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
4062 return -LIBBPF_ERRNO__FORMAT;
4063 if (!sym_is_extern(sym))
4065 ext_name = elf_sym_str(obj, sym->st_name);
4066 if (!ext_name || !ext_name[0])
4070 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
4074 ext = &ext[obj->nr_extern];
4075 memset(ext, 0, sizeof(*ext));
4078 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4079 if (ext->btf_id <= 0) {
4080 pr_warn("failed to find BTF for extern '%s': %d\n",
4081 ext_name, ext->btf_id);
4084 t = btf__type_by_id(obj->btf, ext->btf_id);
4085 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4087 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
4089 ext_essent_len = bpf_core_essential_name_len(ext->name);
4090 ext->essent_name = NULL;
4091 if (ext_essent_len != strlen(ext->name)) {
4092 ext->essent_name = strndup(ext->name, ext_essent_len);
4093 if (!ext->essent_name)
4097 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4098 if (ext->sec_btf_id <= 0) {
4099 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4100 ext_name, ext->btf_id, ext->sec_btf_id);
4101 return ext->sec_btf_id;
4103 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4104 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4106 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
4107 if (btf_is_func(t)) {
4108 pr_warn("extern function %s is unsupported under %s section\n",
4109 ext->name, KCONFIG_SEC);
4113 ext->type = EXT_KCFG;
4114 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4115 if (ext->kcfg.sz <= 0) {
4116 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4117 ext_name, ext->kcfg.sz);
4118 return ext->kcfg.sz;
4120 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4121 if (ext->kcfg.align <= 0) {
4122 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4123 ext_name, ext->kcfg.align);
4126 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
4127 &ext->kcfg.is_signed);
4128 if (ext->kcfg.type == KCFG_UNKNOWN) {
4129 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
4132 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
4134 ext->type = EXT_KSYM;
4135 skip_mods_and_typedefs(obj->btf, t->type,
4136 &ext->ksym.type_id);
4138 pr_warn("unrecognized extern section '%s'\n", sec_name);
4142 pr_debug("collected %d externs total\n", obj->nr_extern);
4144 if (!obj->nr_extern)
4147 /* sort externs by type, for kcfg ones also by (align, size, name) */
4148 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
4150 /* for .ksyms section, we need to turn all externs into allocated
4151 * variables in BTF to pass kernel verification; we do this by
4152 * pretending that each extern is a 8-byte variable
4155 /* find existing 4-byte integer type in BTF to use for fake
4156 * extern variables in DATASEC
4158 int int_btf_id = find_int_btf_id(obj->btf);
4159 /* For extern function, a dummy_var added earlier
4160 * will be used to replace the vs->type and
4161 * its name string will be used to refill
4162 * the missing param's name.
4164 const struct btf_type *dummy_var;
4166 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
4167 for (i = 0; i < obj->nr_extern; i++) {
4168 ext = &obj->externs[i];
4169 if (ext->type != EXT_KSYM)
4171 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4172 i, ext->sym_idx, ext->name);
4177 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4178 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4179 struct btf_type *vt;
4181 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4182 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4183 ext = find_extern_by_name(obj, ext_name);
4185 pr_warn("failed to find extern definition for BTF %s '%s'\n",
4186 btf_kind_str(vt), ext_name);
4189 if (btf_is_func(vt)) {
4190 const struct btf_type *func_proto;
4191 struct btf_param *param;
4194 func_proto = btf__type_by_id(obj->btf,
4196 param = btf_params(func_proto);
4197 /* Reuse the dummy_var string if the
4198 * func proto does not have param name.
4200 for (j = 0; j < btf_vlen(func_proto); j++)
4201 if (param[j].type && !param[j].name_off)
4203 dummy_var->name_off;
4204 vs->type = dummy_var_btf_id;
4205 vt->info &= ~0xffff;
4206 vt->info |= BTF_FUNC_GLOBAL;
4208 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4209 vt->type = int_btf_id;
4212 vs->size = sizeof(int);
4219 /* for kcfg externs calculate their offsets within a .kconfig map */
4221 for (i = 0; i < obj->nr_extern; i++) {
4222 ext = &obj->externs[i];
4223 if (ext->type != EXT_KCFG)
4226 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4227 off = ext->kcfg.data_off + ext->kcfg.sz;
4228 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
4229 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4233 for (i = 0; i < n; i++) {
4234 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4236 t = btf__type_by_id(obj->btf, vs->type);
4237 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4238 ext = find_extern_by_name(obj, ext_name);
4240 pr_warn("failed to find extern definition for BTF var '%s'\n",
4244 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4245 vs->offset = ext->kcfg.data_off;
4251 static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4253 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
4256 struct bpf_program *
4257 bpf_object__find_program_by_name(const struct bpf_object *obj,
4260 struct bpf_program *prog;
4262 bpf_object__for_each_program(prog, obj) {
4263 if (prog_is_subprog(obj, prog))
4265 if (!strcmp(prog->name, name))
4268 return errno = ENOENT, NULL;
4271 static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4274 switch (obj->efile.secs[shndx].sec_type) {
4284 static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4287 return shndx == obj->efile.btf_maps_shndx;
4290 static enum libbpf_map_type
4291 bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4293 if (shndx == obj->efile.symbols_shndx)
4294 return LIBBPF_MAP_KCONFIG;
4296 switch (obj->efile.secs[shndx].sec_type) {
4298 return LIBBPF_MAP_BSS;
4300 return LIBBPF_MAP_DATA;
4302 return LIBBPF_MAP_RODATA;
4304 return LIBBPF_MAP_UNSPEC;
4308 static int bpf_program__record_reloc(struct bpf_program *prog,
4309 struct reloc_desc *reloc_desc,
4310 __u32 insn_idx, const char *sym_name,
4311 const Elf64_Sym *sym, const Elf64_Rel *rel)
4313 struct bpf_insn *insn = &prog->insns[insn_idx];
4314 size_t map_idx, nr_maps = prog->obj->nr_maps;
4315 struct bpf_object *obj = prog->obj;
4316 __u32 shdr_idx = sym->st_shndx;
4317 enum libbpf_map_type type;
4318 const char *sym_sec_name;
4319 struct bpf_map *map;
4321 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
4322 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4323 prog->name, sym_name, insn_idx, insn->code);
4324 return -LIBBPF_ERRNO__RELOC;
4327 if (sym_is_extern(sym)) {
4328 int sym_idx = ELF64_R_SYM(rel->r_info);
4329 int i, n = obj->nr_extern;
4330 struct extern_desc *ext;
4332 for (i = 0; i < n; i++) {
4333 ext = &obj->externs[i];
4334 if (ext->sym_idx == sym_idx)
4338 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4339 prog->name, sym_name, sym_idx);
4340 return -LIBBPF_ERRNO__RELOC;
4342 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4343 prog->name, i, ext->name, ext->sym_idx, insn_idx);
4344 if (insn->code == (BPF_JMP | BPF_CALL))
4345 reloc_desc->type = RELO_EXTERN_CALL;
4347 reloc_desc->type = RELO_EXTERN_LD64;
4348 reloc_desc->insn_idx = insn_idx;
4349 reloc_desc->ext_idx = i;
4353 /* sub-program call relocation */
4354 if (is_call_insn(insn)) {
4355 if (insn->src_reg != BPF_PSEUDO_CALL) {
4356 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4357 return -LIBBPF_ERRNO__RELOC;
4359 /* text_shndx can be 0, if no default "main" program exists */
4360 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4361 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4362 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4363 prog->name, sym_name, sym_sec_name);
4364 return -LIBBPF_ERRNO__RELOC;
4366 if (sym->st_value % BPF_INSN_SZ) {
4367 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4368 prog->name, sym_name, (size_t)sym->st_value);
4369 return -LIBBPF_ERRNO__RELOC;
4371 reloc_desc->type = RELO_CALL;
4372 reloc_desc->insn_idx = insn_idx;
4373 reloc_desc->sym_off = sym->st_value;
4377 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
4378 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4379 prog->name, sym_name, shdr_idx);
4380 return -LIBBPF_ERRNO__RELOC;
4383 /* loading subprog addresses */
4384 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4385 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4386 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4388 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4389 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4390 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4391 return -LIBBPF_ERRNO__RELOC;
4394 reloc_desc->type = RELO_SUBPROG_ADDR;
4395 reloc_desc->insn_idx = insn_idx;
4396 reloc_desc->sym_off = sym->st_value;
4400 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
4401 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4403 /* generic map reference relocation */
4404 if (type == LIBBPF_MAP_UNSPEC) {
4405 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
4406 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4407 prog->name, sym_name, sym_sec_name);
4408 return -LIBBPF_ERRNO__RELOC;
4410 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4411 map = &obj->maps[map_idx];
4412 if (map->libbpf_type != type ||
4413 map->sec_idx != sym->st_shndx ||
4414 map->sec_offset != sym->st_value)
4416 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4417 prog->name, map_idx, map->name, map->sec_idx,
4418 map->sec_offset, insn_idx);
4421 if (map_idx >= nr_maps) {
4422 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4423 prog->name, sym_sec_name, (size_t)sym->st_value);
4424 return -LIBBPF_ERRNO__RELOC;
4426 reloc_desc->type = RELO_LD64;
4427 reloc_desc->insn_idx = insn_idx;
4428 reloc_desc->map_idx = map_idx;
4429 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
4433 /* global data map relocation */
4434 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
4435 pr_warn("prog '%s': bad data relo against section '%s'\n",
4436 prog->name, sym_sec_name);
4437 return -LIBBPF_ERRNO__RELOC;
4439 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4440 map = &obj->maps[map_idx];
4441 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
4443 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4444 prog->name, map_idx, map->name, map->sec_idx,
4445 map->sec_offset, insn_idx);
4448 if (map_idx >= nr_maps) {
4449 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4450 prog->name, sym_sec_name);
4451 return -LIBBPF_ERRNO__RELOC;
4454 reloc_desc->type = RELO_DATA;
4455 reloc_desc->insn_idx = insn_idx;
4456 reloc_desc->map_idx = map_idx;
4457 reloc_desc->sym_off = sym->st_value;
4461 static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4463 return insn_idx >= prog->sec_insn_off &&
4464 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4467 static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4468 size_t sec_idx, size_t insn_idx)
4470 int l = 0, r = obj->nr_programs - 1, m;
4471 struct bpf_program *prog;
4473 if (!obj->nr_programs)
4477 m = l + (r - l + 1) / 2;
4478 prog = &obj->programs[m];
4480 if (prog->sec_idx < sec_idx ||
4481 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4486 /* matching program could be at index l, but it still might be the
4487 * wrong one, so we need to double check conditions for the last time
4489 prog = &obj->programs[l];
4490 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4496 bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
4498 const char *relo_sec_name, *sec_name;
4499 size_t sec_idx = shdr->sh_info, sym_idx;
4500 struct bpf_program *prog;
4501 struct reloc_desc *relos;
4503 const char *sym_name;
4510 if (sec_idx >= obj->efile.sec_cnt)
4513 scn = elf_sec_by_idx(obj, sec_idx);
4514 scn_data = elf_sec_data(obj, scn);
4516 return -LIBBPF_ERRNO__FORMAT;
4518 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
4519 sec_name = elf_sec_name(obj, scn);
4520 if (!relo_sec_name || !sec_name)
4523 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4524 relo_sec_name, sec_idx, sec_name);
4525 nrels = shdr->sh_size / shdr->sh_entsize;
4527 for (i = 0; i < nrels; i++) {
4528 rel = elf_rel_by_idx(data, i);
4530 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
4531 return -LIBBPF_ERRNO__FORMAT;
4534 sym_idx = ELF64_R_SYM(rel->r_info);
4535 sym = elf_sym_by_idx(obj, sym_idx);
4537 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4538 relo_sec_name, sym_idx, i);
4539 return -LIBBPF_ERRNO__FORMAT;
4542 if (sym->st_shndx >= obj->efile.sec_cnt) {
4543 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4544 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
4545 return -LIBBPF_ERRNO__FORMAT;
4548 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
4549 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
4550 relo_sec_name, (size_t)rel->r_offset, i);
4551 return -LIBBPF_ERRNO__FORMAT;
4554 insn_idx = rel->r_offset / BPF_INSN_SZ;
4555 /* relocations against static functions are recorded as
4556 * relocations against the section that contains a function;
4557 * in such case, symbol will be STT_SECTION and sym.st_name
4558 * will point to empty string (0), so fetch section name
4561 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4562 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
4564 sym_name = elf_sym_str(obj, sym->st_name);
4565 sym_name = sym_name ?: "<?";
4567 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4568 relo_sec_name, i, insn_idx, sym_name);
4570 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4572 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
4573 relo_sec_name, i, sec_name, insn_idx);
4577 relos = libbpf_reallocarray(prog->reloc_desc,
4578 prog->nr_reloc + 1, sizeof(*relos));
4581 prog->reloc_desc = relos;
4583 /* adjust insn_idx to local BPF program frame of reference */
4584 insn_idx -= prog->sec_insn_off;
4585 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
4586 insn_idx, sym_name, sym, rel);
4595 static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
4602 /* if it's BTF-defined map, we don't need to search for type IDs.
4603 * For struct_ops map, it does not need btf_key_type_id and
4604 * btf_value_type_id.
4606 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
4610 * LLVM annotates global data differently in BTF, that is,
4611 * only as '.data', '.bss' or '.rodata'.
4613 if (!bpf_map__is_internal(map))
4616 id = btf__find_by_name(obj->btf, map->real_name);
4620 map->btf_key_type_id = 0;
4621 map->btf_value_type_id = id;
4625 static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4627 char file[PATH_MAX], buff[4096];
4632 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4633 memset(info, 0, sizeof(*info));
4635 fp = fopen(file, "re");
4638 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4643 while (fgets(buff, sizeof(buff), fp)) {
4644 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4646 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4647 info->key_size = val;
4648 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4649 info->value_size = val;
4650 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4651 info->max_entries = val;
4652 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4653 info->map_flags = val;
4661 bool bpf_map__autocreate(const struct bpf_map *map)
4663 return map->autocreate;
4666 int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4668 if (map->obj->loaded)
4669 return libbpf_err(-EBUSY);
4671 map->autocreate = autocreate;
4675 int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4677 struct bpf_map_info info;
4678 __u32 len = sizeof(info), name_len;
4682 memset(&info, 0, len);
4683 err = bpf_map_get_info_by_fd(fd, &info, &len);
4684 if (err && errno == EINVAL)
4685 err = bpf_get_map_info_from_fdinfo(fd, &info);
4687 return libbpf_err(err);
4689 name_len = strlen(info.name);
4690 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4691 new_name = strdup(map->name);
4693 new_name = strdup(info.name);
4696 return libbpf_err(-errno);
4699 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4700 * This is similar to what we do in ensure_good_fd(), but without
4701 * closing original FD.
4703 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
4706 goto err_free_new_name;
4709 err = reuse_fd(map->fd, new_fd);
4711 goto err_free_new_name;
4715 map->name = new_name;
4716 map->def.type = info.type;
4717 map->def.key_size = info.key_size;
4718 map->def.value_size = info.value_size;
4719 map->def.max_entries = info.max_entries;
4720 map->def.map_flags = info.map_flags;
4721 map->btf_key_type_id = info.btf_key_type_id;
4722 map->btf_value_type_id = info.btf_value_type_id;
4724 map->map_extra = info.map_extra;
4730 return libbpf_err(err);
4733 __u32 bpf_map__max_entries(const struct bpf_map *map)
4735 return map->def.max_entries;
4738 struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4740 if (!bpf_map_type__is_map_in_map(map->def.type))
4741 return errno = EINVAL, NULL;
4743 return map->inner_map;
4746 int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4748 if (map->obj->loaded)
4749 return libbpf_err(-EBUSY);
4751 map->def.max_entries = max_entries;
4753 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
4754 if (map_is_ringbuf(map))
4755 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4760 static int bpf_object_prepare_token(struct bpf_object *obj)
4762 const char *bpffs_path;
4763 int bpffs_fd = -1, token_fd, err;
4765 enum libbpf_print_level level;
4767 /* token is explicitly prevented */
4768 if (obj->token_path && obj->token_path[0] == '\0') {
4769 pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
4773 mandatory = obj->token_path != NULL;
4774 level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
4776 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
4777 bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
4780 __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
4781 obj->name, err, bpffs_path,
4782 mandatory ? "" : ", skipping optional step...");
4783 return mandatory ? err : 0;
4786 token_fd = bpf_token_create(bpffs_fd, 0);
4789 if (!mandatory && token_fd == -ENOENT) {
4790 pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
4791 obj->name, bpffs_path);
4794 __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
4795 obj->name, token_fd, bpffs_path,
4796 mandatory ? "" : ", skipping optional step...");
4797 return mandatory ? token_fd : 0;
4800 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
4801 if (!obj->feat_cache) {
4806 obj->token_fd = token_fd;
4807 obj->feat_cache->token_fd = token_fd;
4813 bpf_object__probe_loading(struct bpf_object *obj)
4815 char *cp, errmsg[STRERR_BUFSIZE];
4816 struct bpf_insn insns[] = {
4817 BPF_MOV64_IMM(BPF_REG_0, 0),
4820 int ret, insn_cnt = ARRAY_SIZE(insns);
4821 LIBBPF_OPTS(bpf_prog_load_opts, opts,
4822 .token_fd = obj->token_fd,
4823 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
4826 if (obj->gen_loader)
4829 ret = bump_rlimit_memlock();
4831 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4833 /* make sure basic loading works */
4834 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
4836 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
4839 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4840 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4841 "program. Make sure your kernel supports BPF "
4842 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4843 "set to big enough value.\n", __func__, cp, ret);
4851 bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4853 if (obj->gen_loader)
4854 /* To generate loader program assume the latest kernel
4855 * to avoid doing extra prog_load, map_create syscalls.
4860 return feat_supported(obj->feat_cache, feat_id);
4862 return feat_supported(NULL, feat_id);
4865 static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4867 struct bpf_map_info map_info;
4868 char msg[STRERR_BUFSIZE];
4869 __u32 map_info_len = sizeof(map_info);
4872 memset(&map_info, 0, map_info_len);
4873 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
4874 if (err && errno == EINVAL)
4875 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4877 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4878 libbpf_strerror_r(errno, msg, sizeof(msg)));
4882 return (map_info.type == map->def.type &&
4883 map_info.key_size == map->def.key_size &&
4884 map_info.value_size == map->def.value_size &&
4885 map_info.max_entries == map->def.max_entries &&
4886 map_info.map_flags == map->def.map_flags &&
4887 map_info.map_extra == map->map_extra);
4891 bpf_object__reuse_map(struct bpf_map *map)
4893 char *cp, errmsg[STRERR_BUFSIZE];
4896 pin_fd = bpf_obj_get(map->pin_path);
4899 if (err == -ENOENT) {
4900 pr_debug("found no pinned map to reuse at '%s'\n",
4905 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
4906 pr_warn("couldn't retrieve pinned map '%s': %s\n",
4911 if (!map_is_reuse_compat(map, pin_fd)) {
4912 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
4918 err = bpf_map__reuse_fd(map, pin_fd);
4924 pr_debug("reused pinned map at '%s'\n", map->pin_path);
4930 bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
4932 enum libbpf_map_type map_type = map->libbpf_type;
4933 char *cp, errmsg[STRERR_BUFSIZE];
4936 if (obj->gen_loader) {
4937 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
4938 map->mmaped, map->def.value_size);
4939 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
4940 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
4943 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
4946 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4947 pr_warn("Error setting initial map(%s) contents: %s\n",
4952 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
4953 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
4954 err = bpf_map_freeze(map->fd);
4957 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
4958 pr_warn("Error freezing map(%s) as read-only: %s\n",
4966 static void bpf_map__destroy(struct bpf_map *map);
4968 static bool map_is_created(const struct bpf_map *map)
4970 return map->obj->loaded || map->reused;
4973 static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
4975 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
4976 struct bpf_map_def *def = &map->def;
4977 const char *map_name = NULL;
4978 int err = 0, map_fd;
4980 if (kernel_supports(obj, FEAT_PROG_NAME))
4981 map_name = map->name;
4982 create_attr.map_ifindex = map->map_ifindex;
4983 create_attr.map_flags = def->map_flags;
4984 create_attr.numa_node = map->numa_node;
4985 create_attr.map_extra = map->map_extra;
4986 create_attr.token_fd = obj->token_fd;
4988 create_attr.map_flags |= BPF_F_TOKEN_FD;
4990 if (bpf_map__is_struct_ops(map)) {
4991 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
4992 if (map->mod_btf_fd >= 0) {
4993 create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
4994 create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
4998 if (obj->btf && btf__fd(obj->btf) >= 0) {
4999 create_attr.btf_fd = btf__fd(obj->btf);
5000 create_attr.btf_key_type_id = map->btf_key_type_id;
5001 create_attr.btf_value_type_id = map->btf_value_type_id;
5004 if (bpf_map_type__is_map_in_map(def->type)) {
5005 if (map->inner_map) {
5006 err = map_set_def_max_entries(map->inner_map);
5009 err = bpf_object__create_map(obj, map->inner_map, true);
5011 pr_warn("map '%s': failed to create inner map: %d\n",
5015 map->inner_map_fd = map->inner_map->fd;
5017 if (map->inner_map_fd >= 0)
5018 create_attr.inner_map_fd = map->inner_map_fd;
5021 switch (def->type) {
5022 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5023 case BPF_MAP_TYPE_CGROUP_ARRAY:
5024 case BPF_MAP_TYPE_STACK_TRACE:
5025 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5026 case BPF_MAP_TYPE_HASH_OF_MAPS:
5027 case BPF_MAP_TYPE_DEVMAP:
5028 case BPF_MAP_TYPE_DEVMAP_HASH:
5029 case BPF_MAP_TYPE_CPUMAP:
5030 case BPF_MAP_TYPE_XSKMAP:
5031 case BPF_MAP_TYPE_SOCKMAP:
5032 case BPF_MAP_TYPE_SOCKHASH:
5033 case BPF_MAP_TYPE_QUEUE:
5034 case BPF_MAP_TYPE_STACK:
5035 case BPF_MAP_TYPE_ARENA:
5036 create_attr.btf_fd = 0;
5037 create_attr.btf_key_type_id = 0;
5038 create_attr.btf_value_type_id = 0;
5039 map->btf_key_type_id = 0;
5040 map->btf_value_type_id = 0;
5042 case BPF_MAP_TYPE_STRUCT_OPS:
5043 create_attr.btf_value_type_id = 0;
5049 if (obj->gen_loader) {
5050 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
5051 def->key_size, def->value_size, def->max_entries,
5052 &create_attr, is_inner ? -1 : map - obj->maps);
5053 /* We keep pretenting we have valid FD to pass various fd >= 0
5054 * checks by just keeping original placeholder FDs in place.
5055 * See bpf_object__add_map() comment.
5056 * This placeholder fd will not be used with any syscall and
5057 * will be reset to -1 eventually.
5061 map_fd = bpf_map_create(def->type, map_name,
5062 def->key_size, def->value_size,
5063 def->max_entries, &create_attr);
5065 if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
5066 char *cp, errmsg[STRERR_BUFSIZE];
5069 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5070 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5071 map->name, cp, err);
5072 create_attr.btf_fd = 0;
5073 create_attr.btf_key_type_id = 0;
5074 create_attr.btf_value_type_id = 0;
5075 map->btf_key_type_id = 0;
5076 map->btf_value_type_id = 0;
5077 map_fd = bpf_map_create(def->type, map_name,
5078 def->key_size, def->value_size,
5079 def->max_entries, &create_attr);
5082 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
5083 if (obj->gen_loader)
5084 map->inner_map->fd = -1;
5085 bpf_map__destroy(map->inner_map);
5086 zfree(&map->inner_map);
5092 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5093 if (map->fd == map_fd)
5096 /* Keep placeholder FD value but now point it to the BPF map object.
5097 * This way everything that relied on this map's FD (e.g., relocated
5098 * ldimm64 instructions) will stay valid and won't need adjustments.
5099 * map->fd stays valid but now point to what map_fd points to.
5101 return reuse_fd(map->fd, map_fd);
5104 static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
5106 const struct bpf_map *targ_map;
5110 for (i = 0; i < map->init_slots_sz; i++) {
5111 if (!map->init_slots[i])
5114 targ_map = map->init_slots[i];
5117 if (obj->gen_loader) {
5118 bpf_gen__populate_outer_map(obj->gen_loader,
5120 targ_map - obj->maps);
5122 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5126 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
5127 map->name, i, targ_map->name, fd, err);
5130 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5131 map->name, i, targ_map->name, fd);
5134 zfree(&map->init_slots);
5135 map->init_slots_sz = 0;
5140 static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5142 const struct bpf_program *targ_prog;
5146 if (obj->gen_loader)
5149 for (i = 0; i < map->init_slots_sz; i++) {
5150 if (!map->init_slots[i])
5153 targ_prog = map->init_slots[i];
5154 fd = bpf_program__fd(targ_prog);
5156 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5159 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5160 map->name, i, targ_prog->name, fd, err);
5163 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5164 map->name, i, targ_prog->name, fd);
5167 zfree(&map->init_slots);
5168 map->init_slots_sz = 0;
5173 static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5175 struct bpf_map *map;
5178 for (i = 0; i < obj->nr_maps; i++) {
5179 map = &obj->maps[i];
5181 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5184 err = init_prog_array_slots(obj, map);
5191 static int map_set_def_max_entries(struct bpf_map *map)
5193 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5196 nr_cpus = libbpf_num_possible_cpus();
5198 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5199 map->name, nr_cpus);
5202 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5203 map->def.max_entries = nr_cpus;
5210 bpf_object__create_maps(struct bpf_object *obj)
5212 struct bpf_map *map;
5213 char *cp, errmsg[STRERR_BUFSIZE];
5218 for (i = 0; i < obj->nr_maps; i++) {
5219 map = &obj->maps[i];
5221 /* To support old kernels, we skip creating global data maps
5222 * (.rodata, .data, .kconfig, etc); later on, during program
5223 * loading, if we detect that at least one of the to-be-loaded
5224 * programs is referencing any global data map, we'll error
5225 * out with program name and relocation index logged.
5226 * This approach allows to accommodate Clang emitting
5227 * unnecessary .rodata.str1.1 sections for string literals,
5228 * but also it allows to have CO-RE applications that use
5229 * global variables in some of BPF programs, but not others.
5230 * If those global variable-using programs are not loaded at
5231 * runtime due to bpf_program__set_autoload(prog, false),
5232 * bpf_object loading will succeed just fine even on old
5235 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5236 map->autocreate = false;
5238 if (!map->autocreate) {
5239 pr_debug("map '%s': skipped auto-creating...\n", map->name);
5243 err = map_set_def_max_entries(map);
5249 if (map->pin_path) {
5250 err = bpf_object__reuse_map(map);
5252 pr_warn("map '%s': error reusing pinned map\n",
5256 if (retried && map->fd < 0) {
5257 pr_warn("map '%s': cannot find pinned map\n",
5265 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
5266 map->name, map->fd);
5268 err = bpf_object__create_map(obj, map, false);
5272 pr_debug("map '%s': created successfully, fd=%d\n",
5273 map->name, map->fd);
5275 if (bpf_map__is_internal(map)) {
5276 err = bpf_object__populate_internal_map(obj, map);
5280 if (map->def.type == BPF_MAP_TYPE_ARENA) {
5281 map->mmaped = mmap((void *)map->map_extra, bpf_map_mmap_sz(map),
5282 PROT_READ | PROT_WRITE,
5283 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5285 if (map->mmaped == MAP_FAILED) {
5288 pr_warn("map '%s': failed to mmap arena: %d\n",
5293 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5294 err = init_map_in_map_slots(obj, map);
5300 if (map->pin_path && !map->pinned) {
5301 err = bpf_map__pin(map, NULL);
5303 if (!retried && err == -EEXIST) {
5307 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5308 map->name, map->pin_path, err);
5317 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5318 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5320 for (j = 0; j < i; j++)
5321 zclose(obj->maps[j].fd);
5325 static bool bpf_core_is_flavor_sep(const char *s)
5327 /* check X___Y name pattern, where X and Y are not underscores */
5328 return s[0] != '_' && /* X */
5329 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
5330 s[4] != '_'; /* Y */
5333 /* Given 'some_struct_name___with_flavor' return the length of a name prefix
5334 * before last triple underscore. Struct name part after last triple
5335 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5337 size_t bpf_core_essential_name_len(const char *name)
5339 size_t n = strlen(name);
5342 for (i = n - 5; i >= 0; i--) {
5343 if (bpf_core_is_flavor_sep(name + i))
5349 void bpf_core_free_cands(struct bpf_core_cand_list *cands)
5358 int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5359 size_t local_essent_len,
5360 const struct btf *targ_btf,
5361 const char *targ_btf_name,
5363 struct bpf_core_cand_list *cands)
5365 struct bpf_core_cand *new_cands, *cand;
5366 const struct btf_type *t, *local_t;
5367 const char *targ_name, *local_name;
5368 size_t targ_essent_len;
5371 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5372 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5374 n = btf__type_cnt(targ_btf);
5375 for (i = targ_start_id; i < n; i++) {
5376 t = btf__type_by_id(targ_btf, i);
5377 if (!btf_kind_core_compat(t, local_t))
5380 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5381 if (str_is_empty(targ_name))
5384 targ_essent_len = bpf_core_essential_name_len(targ_name);
5385 if (targ_essent_len != local_essent_len)
5388 if (strncmp(local_name, targ_name, local_essent_len) != 0)
5391 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
5392 local_cand->id, btf_kind_str(local_t),
5393 local_name, i, btf_kind_str(t), targ_name,
5395 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5396 sizeof(*cands->cands));
5400 cand = &new_cands[cands->len];
5401 cand->btf = targ_btf;
5404 cands->cands = new_cands;
5410 static int load_module_btfs(struct bpf_object *obj)
5412 struct bpf_btf_info info;
5413 struct module_btf *mod_btf;
5419 if (obj->btf_modules_loaded)
5422 if (obj->gen_loader)
5425 /* don't do this again, even if we find no module BTFs */
5426 obj->btf_modules_loaded = true;
5428 /* kernel too old to support module BTFs */
5429 if (!kernel_supports(obj, FEAT_MODULE_BTF))
5433 err = bpf_btf_get_next_id(id, &id);
5434 if (err && errno == ENOENT)
5436 if (err && errno == EPERM) {
5437 pr_debug("skipping module BTFs loading, missing privileges\n");
5442 pr_warn("failed to iterate BTF objects: %d\n", err);
5446 fd = bpf_btf_get_fd_by_id(id);
5448 if (errno == ENOENT)
5449 continue; /* expected race: BTF was unloaded */
5451 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5456 memset(&info, 0, sizeof(info));
5457 info.name = ptr_to_u64(name);
5458 info.name_len = sizeof(name);
5460 err = bpf_btf_get_info_by_fd(fd, &info, &len);
5463 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
5467 /* ignore non-module BTFs */
5468 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5473 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
5474 err = libbpf_get_error(btf);
5476 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5481 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
5482 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
5486 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5491 mod_btf->name = strdup(name);
5492 if (!mod_btf->name) {
5506 static struct bpf_core_cand_list *
5507 bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5509 struct bpf_core_cand local_cand = {};
5510 struct bpf_core_cand_list *cands;
5511 const struct btf *main_btf;
5512 const struct btf_type *local_t;
5513 const char *local_name;
5514 size_t local_essent_len;
5517 local_cand.btf = local_btf;
5518 local_cand.id = local_type_id;
5519 local_t = btf__type_by_id(local_btf, local_type_id);
5521 return ERR_PTR(-EINVAL);
5523 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5524 if (str_is_empty(local_name))
5525 return ERR_PTR(-EINVAL);
5526 local_essent_len = bpf_core_essential_name_len(local_name);
5528 cands = calloc(1, sizeof(*cands));
5530 return ERR_PTR(-ENOMEM);
5532 /* Attempt to find target candidates in vmlinux BTF first */
5533 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5534 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5538 /* if vmlinux BTF has any candidate, don't got for module BTFs */
5542 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5543 if (obj->btf_vmlinux_override)
5546 /* now look through module BTFs, trying to still find candidates */
5547 err = load_module_btfs(obj);
5551 for (i = 0; i < obj->btf_module_cnt; i++) {
5552 err = bpf_core_add_cands(&local_cand, local_essent_len,
5553 obj->btf_modules[i].btf,
5554 obj->btf_modules[i].name,
5555 btf__type_cnt(obj->btf_vmlinux),
5563 bpf_core_free_cands(cands);
5564 return ERR_PTR(err);
5567 /* Check local and target types for compatibility. This check is used for
5568 * type-based CO-RE relocations and follow slightly different rules than
5569 * field-based relocations. This function assumes that root types were already
5570 * checked for name match. Beyond that initial root-level name check, names
5571 * are completely ignored. Compatibility rules are as follows:
5572 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5573 * kind should match for local and target types (i.e., STRUCT is not
5574 * compatible with UNION);
5575 * - for ENUMs, the size is ignored;
5576 * - for INT, size and signedness are ignored;
5577 * - for ARRAY, dimensionality is ignored, element types are checked for
5578 * compatibility recursively;
5579 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5580 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5581 * - FUNC_PROTOs are compatible if they have compatible signature: same
5582 * number of input args and compatible return and argument types.
5583 * These rules are not set in stone and probably will be adjusted as we get
5584 * more experience with using BPF CO-RE relocations.
5586 int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5587 const struct btf *targ_btf, __u32 targ_id)
5589 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
5592 int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5593 const struct btf *targ_btf, __u32 targ_id)
5595 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5598 static size_t bpf_core_hash_fn(const long key, void *ctx)
5603 static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
5608 static int record_relo_core(struct bpf_program *prog,
5609 const struct bpf_core_relo *core_relo, int insn_idx)
5611 struct reloc_desc *relos, *relo;
5613 relos = libbpf_reallocarray(prog->reloc_desc,
5614 prog->nr_reloc + 1, sizeof(*relos));
5617 relo = &relos[prog->nr_reloc];
5618 relo->type = RELO_CORE;
5619 relo->insn_idx = insn_idx;
5620 relo->core_relo = core_relo;
5621 prog->reloc_desc = relos;
5626 static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5628 struct reloc_desc *relo;
5631 for (i = 0; i < prog->nr_reloc; i++) {
5632 relo = &prog->reloc_desc[i];
5633 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5636 return relo->core_relo;
5642 static int bpf_core_resolve_relo(struct bpf_program *prog,
5643 const struct bpf_core_relo *relo,
5645 const struct btf *local_btf,
5646 struct hashmap *cand_cache,
5647 struct bpf_core_relo_res *targ_res)
5649 struct bpf_core_spec specs_scratch[3] = {};
5650 struct bpf_core_cand_list *cands = NULL;
5651 const char *prog_name = prog->name;
5652 const struct btf_type *local_type;
5653 const char *local_name;
5654 __u32 local_id = relo->type_id;
5657 local_type = btf__type_by_id(local_btf, local_id);
5661 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5665 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
5666 !hashmap__find(cand_cache, local_id, &cands)) {
5667 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5668 if (IS_ERR(cands)) {
5669 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5670 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5671 local_name, PTR_ERR(cands));
5672 return PTR_ERR(cands);
5674 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
5676 bpf_core_free_cands(cands);
5681 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5686 bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
5688 const struct btf_ext_info_sec *sec;
5689 struct bpf_core_relo_res targ_res;
5690 const struct bpf_core_relo *rec;
5691 const struct btf_ext_info *seg;
5692 struct hashmap_entry *entry;
5693 struct hashmap *cand_cache = NULL;
5694 struct bpf_program *prog;
5695 struct bpf_insn *insn;
5696 const char *sec_name;
5697 int i, err = 0, insn_idx, sec_idx, sec_num;
5699 if (obj->btf_ext->core_relo_info.len == 0)
5702 if (targ_btf_path) {
5703 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
5704 err = libbpf_get_error(obj->btf_vmlinux_override);
5706 pr_warn("failed to parse target BTF: %d\n", err);
5711 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5712 if (IS_ERR(cand_cache)) {
5713 err = PTR_ERR(cand_cache);
5717 seg = &obj->btf_ext->core_relo_info;
5719 for_each_btf_ext_sec(seg, sec) {
5720 sec_idx = seg->sec_idxs[sec_num];
5723 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5724 if (str_is_empty(sec_name)) {
5729 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
5731 for_each_btf_ext_rec(seg, sec, i, rec) {
5732 if (rec->insn_off % BPF_INSN_SZ)
5734 insn_idx = rec->insn_off / BPF_INSN_SZ;
5735 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5737 /* When __weak subprog is "overridden" by another instance
5738 * of the subprog from a different object file, linker still
5739 * appends all the .BTF.ext info that used to belong to that
5740 * eliminated subprogram.
5741 * This is similar to what x86-64 linker does for relocations.
5742 * So just ignore such relocations just like we ignore
5743 * subprog instructions when discovering subprograms.
5745 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5746 sec_name, i, insn_idx);
5749 /* no need to apply CO-RE relocation if the program is
5750 * not going to be loaded
5752 if (!prog->autoload)
5755 /* adjust insn_idx from section frame of reference to the local
5756 * program's frame of reference; (sub-)program code is not yet
5757 * relocated, so it's enough to just subtract in-section offset
5759 insn_idx = insn_idx - prog->sec_insn_off;
5760 if (insn_idx >= prog->insns_cnt)
5762 insn = &prog->insns[insn_idx];
5764 err = record_relo_core(prog, rec, insn_idx);
5766 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5767 prog->name, i, err);
5771 if (prog->obj->gen_loader)
5774 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
5776 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
5777 prog->name, i, err);
5781 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5783 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5784 prog->name, i, insn_idx, err);
5791 /* obj->btf_vmlinux and module BTFs are freed after object load */
5792 btf__free(obj->btf_vmlinux_override);
5793 obj->btf_vmlinux_override = NULL;
5795 if (!IS_ERR_OR_NULL(cand_cache)) {
5796 hashmap__for_each_entry(cand_cache, entry, i) {
5797 bpf_core_free_cands(entry->pvalue);
5799 hashmap__free(cand_cache);
5804 /* base map load ldimm64 special constant, used also for log fixup logic */
5805 #define POISON_LDIMM64_MAP_BASE 2001000000
5806 #define POISON_LDIMM64_MAP_PFX "200100"
5808 static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5809 int insn_idx, struct bpf_insn *insn,
5810 int map_idx, const struct bpf_map *map)
5814 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5815 prog->name, relo_idx, insn_idx, map_idx, map->name);
5817 /* we turn single ldimm64 into two identical invalid calls */
5818 for (i = 0; i < 2; i++) {
5819 insn->code = BPF_JMP | BPF_CALL;
5823 /* if this instruction is reachable (not a dead code),
5824 * verifier will complain with something like:
5825 * invalid func unknown#2001000123
5826 * where lower 123 is map index into obj->maps[] array
5828 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
5834 /* unresolved kfunc call special constant, used also for log fixup logic */
5835 #define POISON_CALL_KFUNC_BASE 2002000000
5836 #define POISON_CALL_KFUNC_PFX "2002"
5838 static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
5839 int insn_idx, struct bpf_insn *insn,
5840 int ext_idx, const struct extern_desc *ext)
5842 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
5843 prog->name, relo_idx, insn_idx, ext->name);
5845 /* we turn kfunc call into invalid helper call with identifiable constant */
5846 insn->code = BPF_JMP | BPF_CALL;
5850 /* if this instruction is reachable (not a dead code),
5851 * verifier will complain with something like:
5852 * invalid func unknown#2001000123
5853 * where lower 123 is extern index into obj->externs[] array
5855 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
5858 /* Relocate data references within program code:
5860 * - global variable references;
5861 * - extern references.
5864 bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
5868 for (i = 0; i < prog->nr_reloc; i++) {
5869 struct reloc_desc *relo = &prog->reloc_desc[i];
5870 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
5871 const struct bpf_map *map;
5872 struct extern_desc *ext;
5874 switch (relo->type) {
5876 map = &obj->maps[relo->map_idx];
5877 if (obj->gen_loader) {
5878 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5879 insn[0].imm = relo->map_idx;
5880 } else if (map->autocreate) {
5881 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
5882 insn[0].imm = map->fd;
5884 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5885 relo->map_idx, map);
5889 map = &obj->maps[relo->map_idx];
5890 insn[1].imm = insn[0].imm + relo->sym_off;
5891 if (obj->gen_loader) {
5892 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5893 insn[0].imm = relo->map_idx;
5894 } else if (map->autocreate) {
5895 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5896 insn[0].imm = map->fd;
5898 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5899 relo->map_idx, map);
5902 case RELO_EXTERN_LD64:
5903 ext = &obj->externs[relo->ext_idx];
5904 if (ext->type == EXT_KCFG) {
5905 if (obj->gen_loader) {
5906 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
5907 insn[0].imm = obj->kconfig_map_idx;
5909 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
5910 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
5912 insn[1].imm = ext->kcfg.data_off;
5913 } else /* EXT_KSYM */ {
5914 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
5915 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
5916 insn[0].imm = ext->ksym.kernel_btf_id;
5917 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
5918 } else { /* typeless ksyms or unresolved typed ksyms */
5919 insn[0].imm = (__u32)ext->ksym.addr;
5920 insn[1].imm = ext->ksym.addr >> 32;
5924 case RELO_EXTERN_CALL:
5925 ext = &obj->externs[relo->ext_idx];
5926 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
5928 insn[0].imm = ext->ksym.kernel_btf_id;
5929 insn[0].off = ext->ksym.btf_fd_idx;
5930 } else { /* unresolved weak kfunc call */
5931 poison_kfunc_call(prog, i, relo->insn_idx, insn,
5932 relo->ext_idx, ext);
5935 case RELO_SUBPROG_ADDR:
5936 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
5937 pr_warn("prog '%s': relo #%d: bad insn\n",
5941 /* handled already */
5944 /* handled already */
5947 /* will be handled by bpf_program_record_relos() */
5950 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
5951 prog->name, i, relo->type);
5959 static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
5960 const struct bpf_program *prog,
5961 const struct btf_ext_info *ext_info,
5962 void **prog_info, __u32 *prog_rec_cnt,
5965 void *copy_start = NULL, *copy_end = NULL;
5966 void *rec, *rec_end, *new_prog_info;
5967 const struct btf_ext_info_sec *sec;
5968 size_t old_sz, new_sz;
5969 int i, sec_num, sec_idx, off_adj;
5972 for_each_btf_ext_sec(ext_info, sec) {
5973 sec_idx = ext_info->sec_idxs[sec_num];
5975 if (prog->sec_idx != sec_idx)
5978 for_each_btf_ext_rec(ext_info, sec, i, rec) {
5979 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
5981 if (insn_off < prog->sec_insn_off)
5983 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
5988 copy_end = rec + ext_info->rec_size;
5994 /* append func/line info of a given (sub-)program to the main
5995 * program func/line info
5997 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
5998 new_sz = old_sz + (copy_end - copy_start);
5999 new_prog_info = realloc(*prog_info, new_sz);
6002 *prog_info = new_prog_info;
6003 *prog_rec_cnt = new_sz / ext_info->rec_size;
6004 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6006 /* Kernel instruction offsets are in units of 8-byte
6007 * instructions, while .BTF.ext instruction offsets generated
6008 * by Clang are in units of bytes. So convert Clang offsets
6009 * into kernel offsets and adjust offset according to program
6010 * relocated position.
6012 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6013 rec = new_prog_info + old_sz;
6014 rec_end = new_prog_info + new_sz;
6015 for (; rec < rec_end; rec += ext_info->rec_size) {
6016 __u32 *insn_off = rec;
6018 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6020 *prog_rec_sz = ext_info->rec_size;
6028 reloc_prog_func_and_line_info(const struct bpf_object *obj,
6029 struct bpf_program *main_prog,
6030 const struct bpf_program *prog)
6034 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
6035 * support func/line info
6037 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
6040 /* only attempt func info relocation if main program's func_info
6041 * relocation was successful
6043 if (main_prog != prog && !main_prog->func_info)
6046 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6047 &main_prog->func_info,
6048 &main_prog->func_info_cnt,
6049 &main_prog->func_info_rec_size);
6051 if (err != -ENOENT) {
6052 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6056 if (main_prog->func_info) {
6058 * Some info has already been found but has problem
6059 * in the last btf_ext reloc. Must have to error out.
6061 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6064 /* Have problem loading the very first info. Ignore the rest. */
6065 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6070 /* don't relocate line info if main program's relocation failed */
6071 if (main_prog != prog && !main_prog->line_info)
6074 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6075 &main_prog->line_info,
6076 &main_prog->line_info_cnt,
6077 &main_prog->line_info_rec_size);
6079 if (err != -ENOENT) {
6080 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6084 if (main_prog->line_info) {
6086 * Some info has already been found but has problem
6087 * in the last btf_ext reloc. Must have to error out.
6089 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6092 /* Have problem loading the very first info. Ignore the rest. */
6093 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6099 static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6101 size_t insn_idx = *(const size_t *)key;
6102 const struct reloc_desc *relo = elem;
6104 if (insn_idx == relo->insn_idx)
6106 return insn_idx < relo->insn_idx ? -1 : 1;
6109 static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6111 if (!prog->nr_reloc)
6113 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6114 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6117 static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6119 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6120 struct reloc_desc *relos;
6123 if (main_prog == subprog)
6125 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
6126 /* if new count is zero, reallocarray can return a valid NULL result;
6127 * in this case the previous pointer will be freed, so we *have to*
6128 * reassign old pointer to the new value (even if it's NULL)
6130 if (!relos && new_cnt)
6132 if (subprog->nr_reloc)
6133 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6134 sizeof(*relos) * subprog->nr_reloc);
6136 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6137 relos[i].insn_idx += subprog->sub_insn_off;
6138 /* After insn_idx adjustment the 'relos' array is still sorted
6139 * by insn_idx and doesn't break bsearch.
6141 main_prog->reloc_desc = relos;
6142 main_prog->nr_reloc = new_cnt;
6147 bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6148 struct bpf_program *subprog)
6150 struct bpf_insn *insns;
6154 subprog->sub_insn_off = main_prog->insns_cnt;
6156 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6157 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6159 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6162 main_prog->insns = insns;
6163 main_prog->insns_cnt = new_cnt;
6165 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6166 subprog->insns_cnt * sizeof(*insns));
6168 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6169 main_prog->name, subprog->insns_cnt, subprog->name);
6171 /* The subprog insns are now appended. Append its relos too. */
6172 err = append_subprog_relos(main_prog, subprog);
6179 bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6180 struct bpf_program *prog)
6182 size_t sub_insn_idx, insn_idx;
6183 struct bpf_program *subprog;
6184 struct reloc_desc *relo;
6185 struct bpf_insn *insn;
6188 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6192 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6193 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6194 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
6197 relo = find_prog_insn_relo(prog, insn_idx);
6198 if (relo && relo->type == RELO_EXTERN_CALL)
6199 /* kfunc relocations will be handled later
6200 * in bpf_object__relocate_data()
6203 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
6204 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6205 prog->name, insn_idx, relo->type);
6206 return -LIBBPF_ERRNO__RELOC;
6209 /* sub-program instruction index is a combination of
6210 * an offset of a symbol pointed to by relocation and
6211 * call instruction's imm field; for global functions,
6212 * call always has imm = -1, but for static functions
6213 * relocation is against STT_SECTION and insn->imm
6214 * points to a start of a static function
6216 * for subprog addr relocation, the relo->sym_off + insn->imm is
6217 * the byte offset in the corresponding section.
6219 if (relo->type == RELO_CALL)
6220 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6222 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6223 } else if (insn_is_pseudo_func(insn)) {
6225 * RELO_SUBPROG_ADDR relo is always emitted even if both
6226 * functions are in the same section, so it shouldn't reach here.
6228 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6229 prog->name, insn_idx);
6230 return -LIBBPF_ERRNO__RELOC;
6232 /* if subprogram call is to a static function within
6233 * the same ELF section, there won't be any relocation
6234 * emitted, but it also means there is no additional
6235 * offset necessary, insns->imm is relative to
6236 * instruction's original position within the section
6238 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6241 /* we enforce that sub-programs should be in .text section */
6242 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6244 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6246 return -LIBBPF_ERRNO__RELOC;
6249 /* if it's the first call instruction calling into this
6250 * subprogram (meaning this subprog hasn't been processed
6251 * yet) within the context of current main program:
6252 * - append it at the end of main program's instructions blog;
6253 * - process is recursively, while current program is put on hold;
6254 * - if that subprogram calls some other not yet processes
6255 * subprogram, same thing will happen recursively until
6256 * there are no more unprocesses subprograms left to append
6259 if (subprog->sub_insn_off == 0) {
6260 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
6263 err = bpf_object__reloc_code(obj, main_prog, subprog);
6268 /* main_prog->insns memory could have been re-allocated, so
6269 * calculate pointer again
6271 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6272 /* calculate correct instruction position within current main
6273 * prog; each main prog can have a different set of
6274 * subprograms appended (potentially in different order as
6275 * well), so position of any subprog can be different for
6276 * different main programs
6278 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6280 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6281 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6288 * Relocate sub-program calls.
6290 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6291 * main prog) is processed separately. For each subprog (non-entry functions,
6292 * that can be called from either entry progs or other subprogs) gets their
6293 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6294 * hasn't been yet appended and relocated within current main prog. Once its
6295 * relocated, sub_insn_off will point at the position within current main prog
6296 * where given subprog was appended. This will further be used to relocate all
6297 * the call instructions jumping into this subprog.
6299 * We start with main program and process all call instructions. If the call
6300 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6301 * is zero), subprog instructions are appended at the end of main program's
6302 * instruction array. Then main program is "put on hold" while we recursively
6303 * process newly appended subprogram. If that subprogram calls into another
6304 * subprogram that hasn't been appended, new subprogram is appended again to
6305 * the *main* prog's instructions (subprog's instructions are always left
6306 * untouched, as they need to be in unmodified state for subsequent main progs
6307 * and subprog instructions are always sent only as part of a main prog) and
6308 * the process continues recursively. Once all the subprogs called from a main
6309 * prog or any of its subprogs are appended (and relocated), all their
6310 * positions within finalized instructions array are known, so it's easy to
6311 * rewrite call instructions with correct relative offsets, corresponding to
6312 * desired target subprog.
6314 * Its important to realize that some subprogs might not be called from some
6315 * main prog and any of its called/used subprogs. Those will keep their
6316 * subprog->sub_insn_off as zero at all times and won't be appended to current
6317 * main prog and won't be relocated within the context of current main prog.
6318 * They might still be used from other main progs later.
6320 * Visually this process can be shown as below. Suppose we have two main
6321 * programs mainA and mainB and BPF object contains three subprogs: subA,
6322 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6323 * subC both call subB:
6325 * +--------+ +-------+
6327 * +--+---+ +--+-+-+ +---+--+
6328 * | subA | | subB | | subC |
6329 * +--+---+ +------+ +---+--+
6332 * +---+-------+ +------+----+
6333 * | mainA | | mainB |
6334 * +-----------+ +-----------+
6336 * We'll start relocating mainA, will find subA, append it and start
6337 * processing sub A recursively:
6339 * +-----------+------+
6341 * +-----------+------+
6343 * At this point we notice that subB is used from subA, so we append it and
6344 * relocate (there are no further subcalls from subB):
6346 * +-----------+------+------+
6347 * | mainA | subA | subB |
6348 * +-----------+------+------+
6350 * At this point, we relocate subA calls, then go one level up and finish with
6351 * relocatin mainA calls. mainA is done.
6353 * For mainB process is similar but results in different order. We start with
6354 * mainB and skip subA and subB, as mainB never calls them (at least
6355 * directly), but we see subC is needed, so we append and start processing it:
6357 * +-----------+------+
6359 * +-----------+------+
6360 * Now we see subC needs subB, so we go back to it, append and relocate it:
6362 * +-----------+------+------+
6363 * | mainB | subC | subB |
6364 * +-----------+------+------+
6366 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6369 bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6371 struct bpf_program *subprog;
6374 /* mark all subprogs as not relocated (yet) within the context of
6375 * current main program
6377 for (i = 0; i < obj->nr_programs; i++) {
6378 subprog = &obj->programs[i];
6379 if (!prog_is_subprog(obj, subprog))
6382 subprog->sub_insn_off = 0;
6385 err = bpf_object__reloc_code(obj, prog, prog);
6393 bpf_object__free_relocs(struct bpf_object *obj)
6395 struct bpf_program *prog;
6398 /* free up relocation descriptors */
6399 for (i = 0; i < obj->nr_programs; i++) {
6400 prog = &obj->programs[i];
6401 zfree(&prog->reloc_desc);
6406 static int cmp_relocs(const void *_a, const void *_b)
6408 const struct reloc_desc *a = _a;
6409 const struct reloc_desc *b = _b;
6411 if (a->insn_idx != b->insn_idx)
6412 return a->insn_idx < b->insn_idx ? -1 : 1;
6414 /* no two relocations should have the same insn_idx, but ... */
6415 if (a->type != b->type)
6416 return a->type < b->type ? -1 : 1;
6421 static void bpf_object__sort_relos(struct bpf_object *obj)
6425 for (i = 0; i < obj->nr_programs; i++) {
6426 struct bpf_program *p = &obj->programs[i];
6431 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6435 static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6437 const char *str = "exception_callback:";
6438 size_t pfx_len = strlen(str);
6441 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6444 n = btf__type_cnt(obj->btf);
6445 for (i = 1; i < n; i++) {
6449 t = btf_type_by_id(obj->btf, i);
6450 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6453 name = btf__str_by_offset(obj->btf, t->name_off);
6454 if (strncmp(name, str, pfx_len) != 0)
6457 t = btf_type_by_id(obj->btf, t->type);
6458 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6459 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6463 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6465 /* Multiple callbacks are specified for the same prog,
6466 * the verifier will eventually return an error for this
6467 * case, hence simply skip appending a subprog.
6469 if (prog->exception_cb_idx >= 0) {
6470 prog->exception_cb_idx = -1;
6475 if (str_is_empty(name)) {
6476 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
6481 for (j = 0; j < obj->nr_programs; j++) {
6482 struct bpf_program *subprog = &obj->programs[j];
6484 if (!prog_is_subprog(obj, subprog))
6486 if (strcmp(name, subprog->name) != 0)
6488 /* Enforce non-hidden, as from verifier point of
6489 * view it expects global functions, whereas the
6490 * mark_btf_static fixes up linkage as static.
6492 if (!subprog->sym_global || subprog->mark_btf_static) {
6493 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6494 prog->name, subprog->name);
6497 /* Let's see if we already saw a static exception callback with the same name */
6498 if (prog->exception_cb_idx >= 0) {
6499 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
6500 prog->name, subprog->name);
6503 prog->exception_cb_idx = j;
6507 if (prog->exception_cb_idx >= 0)
6510 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6518 enum bpf_prog_type prog_type;
6519 const char *ctx_name;
6520 } global_ctx_map[] = {
6521 { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" },
6522 { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" },
6523 { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" },
6524 { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" },
6525 { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" },
6526 { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" },
6527 { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" },
6528 { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" },
6529 { BPF_PROG_TYPE_LWT_IN, "__sk_buff" },
6530 { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" },
6531 { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" },
6532 { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" },
6533 { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" },
6534 { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" },
6535 { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" },
6536 { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
6537 { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" },
6538 { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" },
6539 { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" },
6540 { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" },
6541 { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" },
6542 { BPF_PROG_TYPE_SK_SKB, "__sk_buff" },
6543 { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" },
6544 { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" },
6545 { BPF_PROG_TYPE_XDP, "xdp_md" },
6546 /* all other program types don't have "named" context structs */
6549 /* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6550 * for below __builtin_types_compatible_p() checks;
6551 * with this approach we don't need any extra arch-specific #ifdef guards
6554 struct user_pt_regs;
6555 struct user_regs_struct;
6557 static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
6558 const char *subprog_name, int arg_idx,
6559 int arg_type_id, const char *ctx_name)
6561 const struct btf_type *t;
6564 /* check if existing parameter already matches verifier expectations */
6565 t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
6569 /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
6570 * and perf_event programs, so check this case early on and forget
6571 * about it for subsequent checks
6573 while (btf_is_mod(t))
6574 t = btf__type_by_id(btf, t->type);
6575 if (btf_is_typedef(t) &&
6576 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6577 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6578 if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
6579 return false; /* canonical type for kprobe/perf_event */
6582 /* now we can ignore typedefs moving forward */
6583 t = skip_mods_and_typedefs(btf, t->type, NULL);
6585 /* if it's `void *`, definitely fix up BTF info */
6589 /* if it's already proper canonical type, no need to fix up */
6590 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6591 if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
6595 switch (prog->type) {
6596 case BPF_PROG_TYPE_KPROBE:
6597 /* `struct pt_regs *` is expected, but we need to fix up */
6598 if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6601 case BPF_PROG_TYPE_PERF_EVENT:
6602 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6603 btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6605 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6606 btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
6608 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6609 btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
6612 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6613 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6614 /* allow u64* as ctx */
6615 if (btf_is_int(t) && t->size == 8)
6623 pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
6624 prog->name, subprog_name, arg_idx, ctx_name);
6628 static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
6630 int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
6631 int i, err, arg_cnt, fn_name_off, linkage;
6632 struct btf_type *fn_t, *fn_proto_t, *t;
6633 struct btf_param *p;
6635 /* caller already validated FUNC -> FUNC_PROTO validity */
6636 fn_t = btf_type_by_id(btf, orig_fn_id);
6637 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6639 /* Note that each btf__add_xxx() operation invalidates
6640 * all btf_type and string pointers, so we need to be
6641 * very careful when cloning BTF types. BTF type
6642 * pointers have to be always refetched. And to avoid
6643 * problems with invalidated string pointers, we
6644 * add empty strings initially, then just fix up
6645 * name_off offsets in place. Offsets are stable for
6646 * existing strings, so that works out.
6648 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6649 linkage = btf_func_linkage(fn_t);
6650 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6651 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6652 arg_cnt = btf_vlen(fn_proto_t);
6654 /* clone FUNC_PROTO and its params */
6655 fn_proto_id = btf__add_func_proto(btf, ret_type_id);
6656 if (fn_proto_id < 0)
6659 for (i = 0; i < arg_cnt; i++) {
6662 /* copy original parameter data */
6663 t = btf_type_by_id(btf, orig_proto_id);
6664 p = &btf_params(t)[i];
6665 name_off = p->name_off;
6667 err = btf__add_func_param(btf, "", p->type);
6671 fn_proto_t = btf_type_by_id(btf, fn_proto_id);
6672 p = &btf_params(fn_proto_t)[i];
6673 p->name_off = name_off; /* use remembered str offset */
6676 /* clone FUNC now, btf__add_func() enforces non-empty name, so use
6677 * entry program's name as a placeholder, which we replace immediately
6678 * with original name_off
6680 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6684 fn_t = btf_type_by_id(btf, fn_id);
6685 fn_t->name_off = fn_name_off; /* reuse original string */
6690 /* Check if main program or global subprog's function prototype has `arg:ctx`
6691 * argument tags, and, if necessary, substitute correct type to match what BPF
6692 * verifier would expect, taking into account specific program type. This
6693 * allows to support __arg_ctx tag transparently on old kernels that don't yet
6694 * have a native support for it in the verifier, making user's life much
6697 static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
6699 const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
6700 struct bpf_func_info_min *func_rec;
6701 struct btf_type *fn_t, *fn_proto_t;
6702 struct btf *btf = obj->btf;
6703 const struct btf_type *t;
6704 struct btf_param *p;
6705 int ptr_id = 0, struct_id, tag_id, orig_fn_id;
6706 int i, n, arg_idx, arg_cnt, err, rec_idx;
6709 /* no .BTF.ext, no problem */
6710 if (!obj->btf_ext || !prog->func_info)
6713 /* don't do any fix ups if kernel natively supports __arg_ctx */
6714 if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
6717 /* some BPF program types just don't have named context structs, so
6718 * this fallback mechanism doesn't work for them
6720 for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
6721 if (global_ctx_map[i].prog_type != prog->type)
6723 ctx_name = global_ctx_map[i].ctx_name;
6729 /* remember original func BTF IDs to detect if we already cloned them */
6730 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
6733 for (i = 0; i < prog->func_info_cnt; i++) {
6734 func_rec = prog->func_info + prog->func_info_rec_size * i;
6735 orig_ids[i] = func_rec->type_id;
6738 /* go through each DECL_TAG with "arg:ctx" and see if it points to one
6739 * of our subprogs; if yes and subprog is global and needs adjustment,
6740 * clone and adjust FUNC -> FUNC_PROTO combo
6742 for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
6743 /* only DECL_TAG with "arg:ctx" value are interesting */
6744 t = btf__type_by_id(btf, i);
6745 if (!btf_is_decl_tag(t))
6747 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
6750 /* only global funcs need adjustment, if at all */
6751 orig_fn_id = t->type;
6752 fn_t = btf_type_by_id(btf, orig_fn_id);
6753 if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
6756 /* sanity check FUNC -> FUNC_PROTO chain, just in case */
6757 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6758 if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
6761 /* find corresponding func_info record */
6763 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
6764 if (orig_ids[rec_idx] == t->type) {
6765 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
6769 /* current main program doesn't call into this subprog */
6773 /* some more sanity checking of DECL_TAG */
6774 arg_cnt = btf_vlen(fn_proto_t);
6775 arg_idx = btf_decl_tag(t)->component_idx;
6776 if (arg_idx < 0 || arg_idx >= arg_cnt)
6779 /* check if we should fix up argument type */
6780 p = &btf_params(fn_proto_t)[arg_idx];
6781 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
6782 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
6785 /* clone fn/fn_proto, unless we already did it for another arg */
6786 if (func_rec->type_id == orig_fn_id) {
6789 fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
6795 /* point func_info record to a cloned FUNC type */
6796 func_rec->type_id = fn_id;
6799 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
6800 * we do it just once per main BPF program, as all global
6801 * funcs share the same program type, so need only PTR ->
6805 struct_id = btf__add_struct(btf, ctx_name, 0);
6806 ptr_id = btf__add_ptr(btf, struct_id);
6807 if (ptr_id < 0 || struct_id < 0) {
6813 /* for completeness, clone DECL_TAG and point it to cloned param */
6814 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
6820 /* all the BTF manipulations invalidated pointers, refetch them */
6821 fn_t = btf_type_by_id(btf, func_rec->type_id);
6822 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6824 /* fix up type ID pointed to by param */
6825 p = &btf_params(fn_proto_t)[arg_idx];
6836 static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
6838 struct bpf_program *prog;
6843 err = bpf_object__relocate_core(obj, targ_btf_path);
6845 pr_warn("failed to perform CO-RE relocations: %d\n",
6849 bpf_object__sort_relos(obj);
6852 /* Before relocating calls pre-process relocations and mark
6853 * few ld_imm64 instructions that points to subprogs.
6854 * Otherwise bpf_object__reloc_code() later would have to consider
6855 * all ld_imm64 insns as relocation candidates. That would
6856 * reduce relocation speed, since amount of find_prog_insn_relo()
6857 * would increase and most of them will fail to find a relo.
6859 for (i = 0; i < obj->nr_programs; i++) {
6860 prog = &obj->programs[i];
6861 for (j = 0; j < prog->nr_reloc; j++) {
6862 struct reloc_desc *relo = &prog->reloc_desc[j];
6863 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6865 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6866 if (relo->type == RELO_SUBPROG_ADDR)
6867 insn[0].src_reg = BPF_PSEUDO_FUNC;
6871 /* relocate subprogram calls and append used subprograms to main
6872 * programs; each copy of subprogram code needs to be relocated
6873 * differently for each main program, because its code location might
6875 * Append subprog relos to main programs to allow data relos to be
6876 * processed after text is completely relocated.
6878 for (i = 0; i < obj->nr_programs; i++) {
6879 prog = &obj->programs[i];
6880 /* sub-program's sub-calls are relocated within the context of
6881 * its main program only
6883 if (prog_is_subprog(obj, prog))
6885 if (!prog->autoload)
6888 err = bpf_object__relocate_calls(obj, prog);
6890 pr_warn("prog '%s': failed to relocate calls: %d\n",
6895 err = bpf_prog_assign_exc_cb(obj, prog);
6898 /* Now, also append exception callback if it has not been done already. */
6899 if (prog->exception_cb_idx >= 0) {
6900 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
6902 /* Calling exception callback directly is disallowed, which the
6903 * verifier will reject later. In case it was processed already,
6904 * we can skip this step, otherwise for all other valid cases we
6905 * have to append exception callback now.
6907 if (subprog->sub_insn_off == 0) {
6908 err = bpf_object__append_subprog_code(obj, prog, subprog);
6911 err = bpf_object__reloc_code(obj, prog, subprog);
6917 for (i = 0; i < obj->nr_programs; i++) {
6918 prog = &obj->programs[i];
6919 if (prog_is_subprog(obj, prog))
6921 if (!prog->autoload)
6924 /* Process data relos for main programs */
6925 err = bpf_object__relocate_data(obj, prog);
6927 pr_warn("prog '%s': failed to relocate data references: %d\n",
6932 /* Fix up .BTF.ext information, if necessary */
6933 err = bpf_program_fixup_func_info(obj, prog);
6935 pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
6944 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
6945 Elf64_Shdr *shdr, Elf_Data *data);
6947 static int bpf_object__collect_map_relos(struct bpf_object *obj,
6948 Elf64_Shdr *shdr, Elf_Data *data)
6950 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
6951 int i, j, nrels, new_sz;
6952 const struct btf_var_secinfo *vi = NULL;
6953 const struct btf_type *sec, *var, *def;
6954 struct bpf_map *map = NULL, *targ_map = NULL;
6955 struct bpf_program *targ_prog = NULL;
6956 bool is_prog_array, is_map_in_map;
6957 const struct btf_member *member;
6958 const char *name, *mname, *type;
6964 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
6966 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
6970 nrels = shdr->sh_size / shdr->sh_entsize;
6971 for (i = 0; i < nrels; i++) {
6972 rel = elf_rel_by_idx(data, i);
6974 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
6975 return -LIBBPF_ERRNO__FORMAT;
6978 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
6980 pr_warn(".maps relo #%d: symbol %zx not found\n",
6981 i, (size_t)ELF64_R_SYM(rel->r_info));
6982 return -LIBBPF_ERRNO__FORMAT;
6984 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
6986 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
6987 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
6988 (size_t)rel->r_offset, sym->st_name, name);
6990 for (j = 0; j < obj->nr_maps; j++) {
6991 map = &obj->maps[j];
6992 if (map->sec_idx != obj->efile.btf_maps_shndx)
6995 vi = btf_var_secinfos(sec) + map->btf_var_idx;
6996 if (vi->offset <= rel->r_offset &&
6997 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
7000 if (j == obj->nr_maps) {
7001 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7002 i, name, (size_t)rel->r_offset);
7006 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7007 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7008 type = is_map_in_map ? "map" : "prog";
7009 if (is_map_in_map) {
7010 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7011 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7013 return -LIBBPF_ERRNO__RELOC;
7015 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7016 map->def.key_size != sizeof(int)) {
7017 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7018 i, map->name, sizeof(int));
7021 targ_map = bpf_object__find_map_by_name(obj, name);
7023 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7027 } else if (is_prog_array) {
7028 targ_prog = bpf_object__find_program_by_name(obj, name);
7030 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7034 if (targ_prog->sec_idx != sym->st_shndx ||
7035 targ_prog->sec_insn_off * 8 != sym->st_value ||
7036 prog_is_subprog(obj, targ_prog)) {
7037 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7039 return -LIBBPF_ERRNO__RELOC;
7045 var = btf__type_by_id(obj->btf, vi->type);
7046 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7047 if (btf_vlen(def) == 0)
7049 member = btf_members(def) + btf_vlen(def) - 1;
7050 mname = btf__name_by_offset(obj->btf, member->name_off);
7051 if (strcmp(mname, "values"))
7054 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
7055 if (rel->r_offset - vi->offset < moff)
7058 moff = rel->r_offset - vi->offset - moff;
7059 /* here we use BPF pointer size, which is always 64 bit, as we
7060 * are parsing ELF that was built for BPF target
7062 if (moff % bpf_ptr_sz)
7065 if (moff >= map->init_slots_sz) {
7067 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
7070 map->init_slots = tmp;
7071 memset(map->init_slots + map->init_slots_sz, 0,
7072 (new_sz - map->init_slots_sz) * host_ptr_sz);
7073 map->init_slots_sz = new_sz;
7075 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
7077 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7078 i, map->name, moff, type, name);
7084 static int bpf_object__collect_relos(struct bpf_object *obj)
7088 for (i = 0; i < obj->efile.sec_cnt; i++) {
7089 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7094 if (sec_desc->sec_type != SEC_RELO)
7097 shdr = sec_desc->shdr;
7098 data = sec_desc->data;
7099 idx = shdr->sh_info;
7101 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
7102 pr_warn("internal error at %d\n", __LINE__);
7103 return -LIBBPF_ERRNO__INTERNAL;
7106 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
7107 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
7108 else if (idx == obj->efile.btf_maps_shndx)
7109 err = bpf_object__collect_map_relos(obj, shdr, data);
7111 err = bpf_object__collect_prog_relos(obj, shdr, data);
7116 bpf_object__sort_relos(obj);
7120 static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7122 if (BPF_CLASS(insn->code) == BPF_JMP &&
7123 BPF_OP(insn->code) == BPF_CALL &&
7124 BPF_SRC(insn->code) == BPF_K &&
7125 insn->src_reg == 0 &&
7126 insn->dst_reg == 0) {
7127 *func_id = insn->imm;
7133 static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
7135 struct bpf_insn *insn = prog->insns;
7136 enum bpf_func_id func_id;
7139 if (obj->gen_loader)
7142 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7143 if (!insn_is_helper_call(insn, &func_id))
7146 /* on kernels that don't yet support
7147 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7148 * to bpf_probe_read() which works well for old kernels
7151 case BPF_FUNC_probe_read_kernel:
7152 case BPF_FUNC_probe_read_user:
7153 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7154 insn->imm = BPF_FUNC_probe_read;
7156 case BPF_FUNC_probe_read_kernel_str:
7157 case BPF_FUNC_probe_read_user_str:
7158 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
7159 insn->imm = BPF_FUNC_probe_read_str;
7168 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7169 int *btf_obj_fd, int *btf_type_id);
7171 /* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7172 static int libbpf_prepare_prog_load(struct bpf_program *prog,
7173 struct bpf_prog_load_opts *opts, long cookie)
7175 enum sec_def_flags def = cookie;
7177 /* old kernels might not support specifying expected_attach_type */
7178 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
7179 opts->expected_attach_type = 0;
7181 if (def & SEC_SLEEPABLE)
7182 opts->prog_flags |= BPF_F_SLEEPABLE;
7184 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7185 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7187 /* special check for usdt to use uprobe_multi link */
7188 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7189 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7191 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
7192 int btf_obj_fd = 0, btf_type_id = 0, err;
7193 const char *attach_name;
7195 attach_name = strchr(prog->sec_name, '/');
7197 /* if BPF program is annotated with just SEC("fentry")
7198 * (or similar) without declaratively specifying
7199 * target, then it is expected that target will be
7200 * specified with bpf_program__set_attach_target() at
7201 * runtime before BPF object load step. If not, then
7202 * there is nothing to load into the kernel as BPF
7203 * verifier won't be able to validate BPF program
7204 * correctness anyways.
7206 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7210 attach_name++; /* skip over / */
7212 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
7216 /* cache resolved BTF FD and BTF type ID in the prog */
7217 prog->attach_btf_obj_fd = btf_obj_fd;
7218 prog->attach_btf_id = btf_type_id;
7220 /* but by now libbpf common logic is not utilizing
7221 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
7222 * this callback is called after opts were populated by
7223 * libbpf, so this callback has to update opts explicitly here
7225 opts->attach_btf_obj_fd = btf_obj_fd;
7226 opts->attach_btf_id = btf_type_id;
7231 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7233 static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7234 struct bpf_insn *insns, int insns_cnt,
7235 const char *license, __u32 kern_version, int *prog_fd)
7237 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7238 const char *prog_name = NULL;
7239 char *cp, errmsg[STRERR_BUFSIZE];
7240 size_t log_buf_size = 0;
7241 char *log_buf = NULL, *tmp;
7242 int btf_fd, ret, err;
7243 bool own_log_buf = true;
7244 __u32 log_level = prog->log_level;
7246 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7248 * The program type must be set. Most likely we couldn't find a proper
7249 * section definition at load time, and thus we didn't infer the type.
7251 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7252 prog->name, prog->sec_name);
7256 if (!insns || !insns_cnt)
7259 if (kernel_supports(obj, FEAT_PROG_NAME))
7260 prog_name = prog->name;
7261 load_attr.attach_prog_fd = prog->attach_prog_fd;
7262 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
7263 load_attr.attach_btf_id = prog->attach_btf_id;
7264 load_attr.kern_version = kern_version;
7265 load_attr.prog_ifindex = prog->prog_ifindex;
7267 /* specify func_info/line_info only if kernel supports them */
7268 btf_fd = btf__fd(obj->btf);
7269 if (btf_fd >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7270 load_attr.prog_btf_fd = btf_fd;
7271 load_attr.func_info = prog->func_info;
7272 load_attr.func_info_rec_size = prog->func_info_rec_size;
7273 load_attr.func_info_cnt = prog->func_info_cnt;
7274 load_attr.line_info = prog->line_info;
7275 load_attr.line_info_rec_size = prog->line_info_rec_size;
7276 load_attr.line_info_cnt = prog->line_info_cnt;
7278 load_attr.log_level = log_level;
7279 load_attr.prog_flags = prog->prog_flags;
7280 load_attr.fd_array = obj->fd_array;
7282 load_attr.token_fd = obj->token_fd;
7284 load_attr.prog_flags |= BPF_F_TOKEN_FD;
7286 /* adjust load_attr if sec_def provides custom preload callback */
7287 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7288 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
7290 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7294 insns = prog->insns;
7295 insns_cnt = prog->insns_cnt;
7298 /* allow prog_prepare_load_fn to change expected_attach_type */
7299 load_attr.expected_attach_type = prog->expected_attach_type;
7301 if (obj->gen_loader) {
7302 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7303 license, insns, insns_cnt, &load_attr,
7304 prog - obj->programs);
7310 /* if log_level is zero, we don't request logs initially even if
7311 * custom log_buf is specified; if the program load fails, then we'll
7312 * bump log_level to 1 and use either custom log_buf or we'll allocate
7313 * our own and retry the load to get details on what failed
7316 if (prog->log_buf) {
7317 log_buf = prog->log_buf;
7318 log_buf_size = prog->log_size;
7319 own_log_buf = false;
7320 } else if (obj->log_buf) {
7321 log_buf = obj->log_buf;
7322 log_buf_size = obj->log_size;
7323 own_log_buf = false;
7325 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7326 tmp = realloc(log_buf, log_buf_size);
7337 load_attr.log_buf = log_buf;
7338 load_attr.log_size = log_buf_size;
7339 load_attr.log_level = log_level;
7341 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
7343 if (log_level && own_log_buf) {
7344 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7345 prog->name, log_buf);
7348 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7349 struct bpf_map *map;
7352 for (i = 0; i < obj->nr_maps; i++) {
7353 map = &prog->obj->maps[i];
7354 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7357 if (bpf_prog_bind_map(ret, map->fd, NULL)) {
7358 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7359 pr_warn("prog '%s': failed to bind map '%s': %s\n",
7360 prog->name, map->real_name, cp);
7361 /* Don't fail hard if can't bind rodata. */
7371 if (log_level == 0) {
7375 /* On ENOSPC, increase log buffer size and retry, unless custom
7376 * log_buf is specified.
7377 * Be careful to not overflow u32, though. Kernel's log buf size limit
7378 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7379 * multiply by 2 unless we are sure we'll fit within 32 bits.
7380 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7382 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7387 /* post-process verifier log to improve error descriptions */
7388 fixup_verifier_log(prog, log_buf, log_buf_size);
7390 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
7391 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
7394 if (own_log_buf && log_buf && log_buf[0] != '\0') {
7395 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7396 prog->name, log_buf);
7405 static char *find_prev_line(char *buf, char *cur)
7409 if (cur == buf) /* end of a log buf */
7413 while (p - 1 >= buf && *(p - 1) != '\n')
7419 static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7420 char *orig, size_t orig_sz, const char *patch)
7422 /* size of the remaining log content to the right from the to-be-replaced part */
7423 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7424 size_t patch_sz = strlen(patch);
7426 if (patch_sz != orig_sz) {
7427 /* If patch line(s) are longer than original piece of verifier log,
7428 * shift log contents by (patch_sz - orig_sz) bytes to the right
7429 * starting from after to-be-replaced part of the log.
7431 * If patch line(s) are shorter than original piece of verifier log,
7432 * shift log contents by (orig_sz - patch_sz) bytes to the left
7433 * starting from after to-be-replaced part of the log
7435 * We need to be careful about not overflowing available
7436 * buf_sz capacity. If that's the case, we'll truncate the end
7437 * of the original log, as necessary.
7439 if (patch_sz > orig_sz) {
7440 if (orig + patch_sz >= buf + buf_sz) {
7441 /* patch is big enough to cover remaining space completely */
7442 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7444 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7445 /* patch causes part of remaining log to be truncated */
7446 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7449 /* shift remaining log to the right by calculated amount */
7450 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7453 memcpy(orig, patch, patch_sz);
7456 static void fixup_log_failed_core_relo(struct bpf_program *prog,
7457 char *buf, size_t buf_sz, size_t log_sz,
7458 char *line1, char *line2, char *line3)
7460 /* Expected log for failed and not properly guarded CO-RE relocation:
7461 * line1 -> 123: (85) call unknown#195896080
7462 * line2 -> invalid func unknown#195896080
7463 * line3 -> <anything else or end of buffer>
7465 * "123" is the index of the instruction that was poisoned. We extract
7466 * instruction index to find corresponding CO-RE relocation and
7467 * replace this part of the log with more relevant information about
7468 * failed CO-RE relocation.
7470 const struct bpf_core_relo *relo;
7471 struct bpf_core_spec spec;
7472 char patch[512], spec_buf[256];
7473 int insn_idx, err, spec_len;
7475 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7478 relo = find_relo_core(prog, insn_idx);
7482 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7486 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
7487 snprintf(patch, sizeof(patch),
7488 "%d: <invalid CO-RE relocation>\n"
7489 "failed to resolve CO-RE relocation %s%s\n",
7490 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
7492 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7495 static void fixup_log_missing_map_load(struct bpf_program *prog,
7496 char *buf, size_t buf_sz, size_t log_sz,
7497 char *line1, char *line2, char *line3)
7499 /* Expected log for failed and not properly guarded map reference:
7500 * line1 -> 123: (85) call unknown#2001000345
7501 * line2 -> invalid func unknown#2001000345
7502 * line3 -> <anything else or end of buffer>
7504 * "123" is the index of the instruction that was poisoned.
7505 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
7507 struct bpf_object *obj = prog->obj;
7508 const struct bpf_map *map;
7509 int insn_idx, map_idx;
7512 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7515 map_idx -= POISON_LDIMM64_MAP_BASE;
7516 if (map_idx < 0 || map_idx >= obj->nr_maps)
7518 map = &obj->maps[map_idx];
7520 snprintf(patch, sizeof(patch),
7521 "%d: <invalid BPF map reference>\n"
7522 "BPF map '%s' is referenced but wasn't created\n",
7523 insn_idx, map->name);
7525 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7528 static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7529 char *buf, size_t buf_sz, size_t log_sz,
7530 char *line1, char *line2, char *line3)
7532 /* Expected log for failed and not properly guarded kfunc call:
7533 * line1 -> 123: (85) call unknown#2002000345
7534 * line2 -> invalid func unknown#2002000345
7535 * line3 -> <anything else or end of buffer>
7537 * "123" is the index of the instruction that was poisoned.
7538 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7540 struct bpf_object *obj = prog->obj;
7541 const struct extern_desc *ext;
7542 int insn_idx, ext_idx;
7545 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7548 ext_idx -= POISON_CALL_KFUNC_BASE;
7549 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7551 ext = &obj->externs[ext_idx];
7553 snprintf(patch, sizeof(patch),
7554 "%d: <invalid kfunc call>\n"
7555 "kfunc '%s' is referenced but wasn't resolved\n",
7556 insn_idx, ext->name);
7558 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7561 static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7563 /* look for familiar error patterns in last N lines of the log */
7564 const size_t max_last_line_cnt = 10;
7565 char *prev_line, *cur_line, *next_line;
7572 log_sz = strlen(buf) + 1;
7573 next_line = buf + log_sz - 1;
7575 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7576 cur_line = find_prev_line(buf, next_line);
7580 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7581 prev_line = find_prev_line(buf, cur_line);
7585 /* failed CO-RE relocation case */
7586 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7587 prev_line, cur_line, next_line);
7589 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
7590 prev_line = find_prev_line(buf, cur_line);
7594 /* reference to uncreated BPF map */
7595 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7596 prev_line, cur_line, next_line);
7598 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7599 prev_line = find_prev_line(buf, cur_line);
7603 /* reference to unresolved kfunc */
7604 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7605 prev_line, cur_line, next_line);
7611 static int bpf_program_record_relos(struct bpf_program *prog)
7613 struct bpf_object *obj = prog->obj;
7616 for (i = 0; i < prog->nr_reloc; i++) {
7617 struct reloc_desc *relo = &prog->reloc_desc[i];
7618 struct extern_desc *ext = &obj->externs[relo->ext_idx];
7621 switch (relo->type) {
7622 case RELO_EXTERN_LD64:
7623 if (ext->type != EXT_KSYM)
7625 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7626 BTF_KIND_VAR : BTF_KIND_FUNC;
7627 bpf_gen__record_extern(obj->gen_loader, ext->name,
7628 ext->is_weak, !ext->ksym.type_id,
7629 true, kind, relo->insn_idx);
7631 case RELO_EXTERN_CALL:
7632 bpf_gen__record_extern(obj->gen_loader, ext->name,
7633 ext->is_weak, false, false, BTF_KIND_FUNC,
7637 struct bpf_core_relo cr = {
7638 .insn_off = relo->insn_idx * 8,
7639 .type_id = relo->core_relo->type_id,
7640 .access_str_off = relo->core_relo->access_str_off,
7641 .kind = relo->core_relo->kind,
7644 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7655 bpf_object__load_progs(struct bpf_object *obj, int log_level)
7657 struct bpf_program *prog;
7661 for (i = 0; i < obj->nr_programs; i++) {
7662 prog = &obj->programs[i];
7663 err = bpf_object__sanitize_prog(obj, prog);
7668 for (i = 0; i < obj->nr_programs; i++) {
7669 prog = &obj->programs[i];
7670 if (prog_is_subprog(obj, prog))
7672 if (!prog->autoload) {
7673 pr_debug("prog '%s': skipped loading\n", prog->name);
7676 prog->log_level |= log_level;
7678 if (obj->gen_loader)
7679 bpf_program_record_relos(prog);
7681 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7682 obj->license, obj->kern_version, &prog->fd);
7684 pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
7689 bpf_object__free_relocs(obj);
7693 static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7695 static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7697 struct bpf_program *prog;
7700 bpf_object__for_each_program(prog, obj) {
7701 prog->sec_def = find_sec_def(prog->sec_name);
7702 if (!prog->sec_def) {
7703 /* couldn't guess, but user might manually specify */
7704 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7705 prog->name, prog->sec_name);
7709 prog->type = prog->sec_def->prog_type;
7710 prog->expected_attach_type = prog->sec_def->expected_attach_type;
7712 /* sec_def can have custom callback which should be called
7713 * after bpf_program is initialized to adjust its properties
7715 if (prog->sec_def->prog_setup_fn) {
7716 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
7718 pr_warn("prog '%s': failed to initialize: %d\n",
7728 static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7729 const struct bpf_object_open_opts *opts)
7731 const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
7732 struct bpf_object *obj;
7739 if (elf_version(EV_CURRENT) == EV_NONE) {
7740 pr_warn("failed to init libelf for %s\n",
7741 path ? : "(mem buf)");
7742 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
7745 if (!OPTS_VALID(opts, bpf_object_open_opts))
7746 return ERR_PTR(-EINVAL);
7748 obj_name = OPTS_GET(opts, object_name, NULL);
7751 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7752 (unsigned long)obj_buf,
7753 (unsigned long)obj_buf_sz);
7754 obj_name = tmp_name;
7757 pr_debug("loading object '%s' from buffer\n", obj_name);
7760 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7761 log_size = OPTS_GET(opts, kernel_log_size, 0);
7762 log_level = OPTS_GET(opts, kernel_log_level, 0);
7763 if (log_size > UINT_MAX)
7764 return ERR_PTR(-EINVAL);
7765 if (log_size && !log_buf)
7766 return ERR_PTR(-EINVAL);
7768 token_path = OPTS_GET(opts, bpf_token_path, NULL);
7769 /* if user didn't specify bpf_token_path explicitly, check if
7770 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
7774 token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
7775 if (token_path && strlen(token_path) >= PATH_MAX)
7776 return ERR_PTR(-ENAMETOOLONG);
7778 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
7782 obj->log_buf = log_buf;
7783 obj->log_size = log_size;
7784 obj->log_level = log_level;
7787 obj->token_path = strdup(token_path);
7788 if (!obj->token_path) {
7794 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7796 if (strlen(btf_tmp_path) >= PATH_MAX) {
7797 err = -ENAMETOOLONG;
7800 obj->btf_custom_path = strdup(btf_tmp_path);
7801 if (!obj->btf_custom_path) {
7807 kconfig = OPTS_GET(opts, kconfig, NULL);
7809 obj->kconfig = strdup(kconfig);
7810 if (!obj->kconfig) {
7816 err = bpf_object__elf_init(obj);
7817 err = err ? : bpf_object__check_endianness(obj);
7818 err = err ? : bpf_object__elf_collect(obj);
7819 err = err ? : bpf_object__collect_externs(obj);
7820 err = err ? : bpf_object_fixup_btf(obj);
7821 err = err ? : bpf_object__init_maps(obj, opts);
7822 err = err ? : bpf_object_init_progs(obj, opts);
7823 err = err ? : bpf_object__collect_relos(obj);
7827 bpf_object__elf_finish(obj);
7831 bpf_object__close(obj);
7832 return ERR_PTR(err);
7836 bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
7839 return libbpf_err_ptr(-EINVAL);
7841 pr_debug("loading %s\n", path);
7843 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
7846 struct bpf_object *bpf_object__open(const char *path)
7848 return bpf_object__open_file(path, NULL);
7852 bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
7853 const struct bpf_object_open_opts *opts)
7855 if (!obj_buf || obj_buf_sz == 0)
7856 return libbpf_err_ptr(-EINVAL);
7858 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
7861 static int bpf_object_unload(struct bpf_object *obj)
7866 return libbpf_err(-EINVAL);
7868 for (i = 0; i < obj->nr_maps; i++) {
7869 zclose(obj->maps[i].fd);
7870 if (obj->maps[i].st_ops)
7871 zfree(&obj->maps[i].st_ops->kern_vdata);
7874 for (i = 0; i < obj->nr_programs; i++)
7875 bpf_program__unload(&obj->programs[i]);
7880 static int bpf_object__sanitize_maps(struct bpf_object *obj)
7884 bpf_object__for_each_map(m, obj) {
7885 if (!bpf_map__is_internal(m))
7887 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
7888 m->def.map_flags &= ~BPF_F_MMAPABLE;
7894 int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
7896 char sym_type, sym_name[500];
7897 unsigned long long sym_addr;
7901 f = fopen("/proc/kallsyms", "re");
7904 pr_warn("failed to open /proc/kallsyms: %d\n", err);
7909 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
7910 &sym_addr, &sym_type, sym_name);
7911 if (ret == EOF && feof(f))
7914 pr_warn("failed to read kallsyms entry: %d\n", ret);
7919 err = cb(sym_addr, sym_type, sym_name, ctx);
7928 static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
7929 const char *sym_name, void *ctx)
7931 struct bpf_object *obj = ctx;
7932 const struct btf_type *t;
7933 struct extern_desc *ext;
7935 ext = find_extern_by_name(obj, sym_name);
7936 if (!ext || ext->type != EXT_KSYM)
7939 t = btf__type_by_id(obj->btf, ext->btf_id);
7943 if (ext->is_set && ext->ksym.addr != sym_addr) {
7944 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
7945 sym_name, ext->ksym.addr, sym_addr);
7950 ext->ksym.addr = sym_addr;
7951 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
7956 static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
7958 return libbpf_kallsyms_parse(kallsyms_cb, obj);
7961 static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
7962 __u16 kind, struct btf **res_btf,
7963 struct module_btf **res_mod_btf)
7965 struct module_btf *mod_btf;
7969 btf = obj->btf_vmlinux;
7971 id = btf__find_by_name_kind(btf, ksym_name, kind);
7973 if (id == -ENOENT) {
7974 err = load_module_btfs(obj);
7978 for (i = 0; i < obj->btf_module_cnt; i++) {
7979 /* we assume module_btf's BTF FD is always >0 */
7980 mod_btf = &obj->btf_modules[i];
7982 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
7991 *res_mod_btf = mod_btf;
7995 static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
7996 struct extern_desc *ext)
7998 const struct btf_type *targ_var, *targ_type;
7999 __u32 targ_type_id, local_type_id;
8000 struct module_btf *mod_btf = NULL;
8001 const char *targ_var_name;
8002 struct btf *btf = NULL;
8005 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
8007 if (id == -ESRCH && ext->is_weak)
8009 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8014 /* find local type_id */
8015 local_type_id = ext->ksym.type_id;
8017 /* find target type_id */
8018 targ_var = btf__type_by_id(btf, id);
8019 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8020 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
8022 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8025 const struct btf_type *local_type;
8026 const char *targ_name, *local_name;
8028 local_type = btf__type_by_id(obj->btf, local_type_id);
8029 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8030 targ_name = btf__name_by_offset(btf, targ_type->name_off);
8032 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8033 ext->name, local_type_id,
8034 btf_kind_str(local_type), local_name, targ_type_id,
8035 btf_kind_str(targ_type), targ_name);
8040 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8041 ext->ksym.kernel_btf_id = id;
8042 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8043 ext->name, id, btf_kind_str(targ_var), targ_var_name);
8048 static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8049 struct extern_desc *ext)
8051 int local_func_proto_id, kfunc_proto_id, kfunc_id;
8052 struct module_btf *mod_btf = NULL;
8053 const struct btf_type *kern_func;
8054 struct btf *kern_btf = NULL;
8057 local_func_proto_id = ext->ksym.type_id;
8059 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8062 if (kfunc_id == -ESRCH && ext->is_weak)
8064 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
8069 kern_func = btf__type_by_id(kern_btf, kfunc_id);
8070 kfunc_proto_id = kern_func->type;
8072 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8073 kern_btf, kfunc_proto_id);
8078 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8079 ext->name, local_func_proto_id,
8080 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
8084 /* set index for module BTF fd in fd_array, if unset */
8085 if (mod_btf && !mod_btf->fd_array_idx) {
8086 /* insn->off is s16 */
8087 if (obj->fd_array_cnt == INT16_MAX) {
8088 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8089 ext->name, mod_btf->fd_array_idx);
8092 /* Cannot use index 0 for module BTF fd */
8093 if (!obj->fd_array_cnt)
8094 obj->fd_array_cnt = 1;
8096 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8097 obj->fd_array_cnt + 1);
8100 mod_btf->fd_array_idx = obj->fd_array_cnt;
8101 /* we assume module BTF FD is always >0 */
8102 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8106 ext->ksym.kernel_btf_id = kfunc_id;
8107 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
8108 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8109 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8110 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8111 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8113 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
8114 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8115 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
8120 static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8122 const struct btf_type *t;
8123 struct extern_desc *ext;
8126 for (i = 0; i < obj->nr_extern; i++) {
8127 ext = &obj->externs[i];
8128 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8131 if (obj->gen_loader) {
8133 ext->ksym.kernel_btf_obj_fd = 0;
8134 ext->ksym.kernel_btf_id = 0;
8137 t = btf__type_by_id(obj->btf, ext->btf_id);
8139 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8141 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
8148 static int bpf_object__resolve_externs(struct bpf_object *obj,
8149 const char *extra_kconfig)
8151 bool need_config = false, need_kallsyms = false;
8152 bool need_vmlinux_btf = false;
8153 struct extern_desc *ext;
8154 void *kcfg_data = NULL;
8157 if (obj->nr_extern == 0)
8160 if (obj->kconfig_map_idx >= 0)
8161 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
8163 for (i = 0; i < obj->nr_extern; i++) {
8164 ext = &obj->externs[i];
8166 if (ext->type == EXT_KSYM) {
8167 if (ext->ksym.type_id)
8168 need_vmlinux_btf = true;
8170 need_kallsyms = true;
8172 } else if (ext->type == EXT_KCFG) {
8173 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8176 /* Kconfig externs need actual /proc/config.gz */
8177 if (str_has_pfx(ext->name, "CONFIG_")) {
8182 /* Virtual kcfg externs are customly handled by libbpf */
8183 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8184 value = get_kernel_version();
8186 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8189 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8190 value = kernel_supports(obj, FEAT_BPF_COOKIE);
8191 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8192 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
8193 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8194 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8195 * __kconfig externs, where LINUX_ ones are virtual and filled out
8196 * customly by libbpf (their values don't come from Kconfig).
8197 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8198 * __weak, it defaults to zero value, just like for CONFIG_xxx
8201 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
8205 err = set_kcfg_value_num(ext, ext_ptr, value);
8208 pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8209 ext->name, (long long)value);
8211 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
8215 if (need_config && extra_kconfig) {
8216 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8219 need_config = false;
8220 for (i = 0; i < obj->nr_extern; i++) {
8221 ext = &obj->externs[i];
8222 if (ext->type == EXT_KCFG && !ext->is_set) {
8229 err = bpf_object__read_kconfig_file(obj, kcfg_data);
8233 if (need_kallsyms) {
8234 err = bpf_object__read_kallsyms_file(obj);
8238 if (need_vmlinux_btf) {
8239 err = bpf_object__resolve_ksyms_btf_id(obj);
8243 for (i = 0; i < obj->nr_extern; i++) {
8244 ext = &obj->externs[i];
8246 if (!ext->is_set && !ext->is_weak) {
8247 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
8249 } else if (!ext->is_set) {
8250 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
8258 static void bpf_map_prepare_vdata(const struct bpf_map *map)
8260 struct bpf_struct_ops *st_ops;
8263 st_ops = map->st_ops;
8264 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8265 struct bpf_program *prog = st_ops->progs[i];
8272 prog_fd = bpf_program__fd(prog);
8273 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8274 *(unsigned long *)kern_data = prog_fd;
8278 static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8280 struct bpf_map *map;
8283 for (i = 0; i < obj->nr_maps; i++) {
8284 map = &obj->maps[i];
8286 if (!bpf_map__is_struct_ops(map))
8289 if (!map->autocreate)
8292 bpf_map_prepare_vdata(map);
8298 static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
8303 return libbpf_err(-EINVAL);
8306 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
8307 return libbpf_err(-EINVAL);
8310 if (obj->gen_loader)
8311 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
8313 err = bpf_object_prepare_token(obj);
8314 err = err ? : bpf_object__probe_loading(obj);
8315 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8316 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
8317 err = err ? : bpf_object__sanitize_maps(obj);
8318 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
8319 err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
8320 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
8321 err = err ? : bpf_object__sanitize_and_load_btf(obj);
8322 err = err ? : bpf_object__create_maps(obj);
8323 err = err ? : bpf_object__load_progs(obj, extra_log_level);
8324 err = err ? : bpf_object_init_prog_arrays(obj);
8325 err = err ? : bpf_object_prepare_struct_ops(obj);
8327 if (obj->gen_loader) {
8330 btf__set_fd(obj->btf, -1);
8332 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
8335 /* clean up fd_array */
8336 zfree(&obj->fd_array);
8338 /* clean up module BTFs */
8339 for (i = 0; i < obj->btf_module_cnt; i++) {
8340 close(obj->btf_modules[i].fd);
8341 btf__free(obj->btf_modules[i].btf);
8342 free(obj->btf_modules[i].name);
8344 free(obj->btf_modules);
8346 /* clean up vmlinux BTF */
8347 btf__free(obj->btf_vmlinux);
8348 obj->btf_vmlinux = NULL;
8350 obj->loaded = true; /* doesn't matter if successfully or not */
8357 /* unpin any maps that were auto-pinned during load */
8358 for (i = 0; i < obj->nr_maps; i++)
8359 if (obj->maps[i].pinned && !obj->maps[i].reused)
8360 bpf_map__unpin(&obj->maps[i], NULL);
8362 bpf_object_unload(obj);
8363 pr_warn("failed to load object '%s'\n", obj->path);
8364 return libbpf_err(err);
8367 int bpf_object__load(struct bpf_object *obj)
8369 return bpf_object_load(obj, 0, NULL);
8372 static int make_parent_dir(const char *path)
8374 char *cp, errmsg[STRERR_BUFSIZE];
8378 dname = strdup(path);
8382 dir = dirname(dname);
8383 if (mkdir(dir, 0700) && errno != EEXIST)
8388 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8389 pr_warn("failed to mkdir %s: %s\n", path, cp);
8394 static int check_path(const char *path)
8396 char *cp, errmsg[STRERR_BUFSIZE];
8397 struct statfs st_fs;
8404 dname = strdup(path);
8408 dir = dirname(dname);
8409 if (statfs(dir, &st_fs)) {
8410 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
8411 pr_warn("failed to statfs %s: %s\n", dir, cp);
8416 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
8417 pr_warn("specified path %s is not on BPF FS\n", path);
8424 int bpf_program__pin(struct bpf_program *prog, const char *path)
8426 char *cp, errmsg[STRERR_BUFSIZE];
8430 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8431 return libbpf_err(-EINVAL);
8434 err = make_parent_dir(path);
8436 return libbpf_err(err);
8438 err = check_path(path);
8440 return libbpf_err(err);
8442 if (bpf_obj_pin(prog->fd, path)) {
8444 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
8445 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
8446 return libbpf_err(err);
8449 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
8453 int bpf_program__unpin(struct bpf_program *prog, const char *path)
8458 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
8459 return libbpf_err(-EINVAL);
8462 err = check_path(path);
8464 return libbpf_err(err);
8468 return libbpf_err(-errno);
8470 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
8474 int bpf_map__pin(struct bpf_map *map, const char *path)
8476 char *cp, errmsg[STRERR_BUFSIZE];
8480 pr_warn("invalid map pointer\n");
8481 return libbpf_err(-EINVAL);
8484 if (map->pin_path) {
8485 if (path && strcmp(path, map->pin_path)) {
8486 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8487 bpf_map__name(map), map->pin_path, path);
8488 return libbpf_err(-EINVAL);
8489 } else if (map->pinned) {
8490 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8491 bpf_map__name(map), map->pin_path);
8496 pr_warn("missing a path to pin map '%s' at\n",
8497 bpf_map__name(map));
8498 return libbpf_err(-EINVAL);
8499 } else if (map->pinned) {
8500 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
8501 return libbpf_err(-EEXIST);
8504 map->pin_path = strdup(path);
8505 if (!map->pin_path) {
8511 err = make_parent_dir(map->pin_path);
8513 return libbpf_err(err);
8515 err = check_path(map->pin_path);
8517 return libbpf_err(err);
8519 if (bpf_obj_pin(map->fd, map->pin_path)) {
8525 pr_debug("pinned map '%s'\n", map->pin_path);
8530 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8531 pr_warn("failed to pin map: %s\n", cp);
8532 return libbpf_err(err);
8535 int bpf_map__unpin(struct bpf_map *map, const char *path)
8540 pr_warn("invalid map pointer\n");
8541 return libbpf_err(-EINVAL);
8544 if (map->pin_path) {
8545 if (path && strcmp(path, map->pin_path)) {
8546 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8547 bpf_map__name(map), map->pin_path, path);
8548 return libbpf_err(-EINVAL);
8550 path = map->pin_path;
8552 pr_warn("no path to unpin map '%s' from\n",
8553 bpf_map__name(map));
8554 return libbpf_err(-EINVAL);
8557 err = check_path(path);
8559 return libbpf_err(err);
8563 return libbpf_err(-errno);
8565 map->pinned = false;
8566 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
8571 int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8578 return libbpf_err(-errno);
8581 free(map->pin_path);
8582 map->pin_path = new;
8586 __alias(bpf_map__pin_path)
8587 const char *bpf_map__get_pin_path(const struct bpf_map *map);
8589 const char *bpf_map__pin_path(const struct bpf_map *map)
8591 return map->pin_path;
8594 bool bpf_map__is_pinned(const struct bpf_map *map)
8599 static void sanitize_pin_path(char *s)
8601 /* bpffs disallows periods in path names */
8609 int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
8611 struct bpf_map *map;
8615 return libbpf_err(-ENOENT);
8618 pr_warn("object not yet loaded; load it first\n");
8619 return libbpf_err(-ENOENT);
8622 bpf_object__for_each_map(map, obj) {
8623 char *pin_path = NULL;
8626 if (!map->autocreate)
8630 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8632 goto err_unpin_maps;
8633 sanitize_pin_path(buf);
8635 } else if (!map->pin_path) {
8639 err = bpf_map__pin(map, pin_path);
8641 goto err_unpin_maps;
8647 while ((map = bpf_object__prev_map(obj, map))) {
8651 bpf_map__unpin(map, NULL);
8654 return libbpf_err(err);
8657 int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8659 struct bpf_map *map;
8663 return libbpf_err(-ENOENT);
8665 bpf_object__for_each_map(map, obj) {
8666 char *pin_path = NULL;
8670 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8672 return libbpf_err(err);
8673 sanitize_pin_path(buf);
8675 } else if (!map->pin_path) {
8679 err = bpf_map__unpin(map, pin_path);
8681 return libbpf_err(err);
8687 int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8689 struct bpf_program *prog;
8694 return libbpf_err(-ENOENT);
8697 pr_warn("object not yet loaded; load it first\n");
8698 return libbpf_err(-ENOENT);
8701 bpf_object__for_each_program(prog, obj) {
8702 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8704 goto err_unpin_programs;
8706 err = bpf_program__pin(prog, buf);
8708 goto err_unpin_programs;
8714 while ((prog = bpf_object__prev_program(obj, prog))) {
8715 if (pathname_concat(buf, sizeof(buf), path, prog->name))
8718 bpf_program__unpin(prog, buf);
8721 return libbpf_err(err);
8724 int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8726 struct bpf_program *prog;
8730 return libbpf_err(-ENOENT);
8732 bpf_object__for_each_program(prog, obj) {
8735 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8737 return libbpf_err(err);
8739 err = bpf_program__unpin(prog, buf);
8741 return libbpf_err(err);
8747 int bpf_object__pin(struct bpf_object *obj, const char *path)
8751 err = bpf_object__pin_maps(obj, path);
8753 return libbpf_err(err);
8755 err = bpf_object__pin_programs(obj, path);
8757 bpf_object__unpin_maps(obj, path);
8758 return libbpf_err(err);
8764 int bpf_object__unpin(struct bpf_object *obj, const char *path)
8768 err = bpf_object__unpin_programs(obj, path);
8770 return libbpf_err(err);
8772 err = bpf_object__unpin_maps(obj, path);
8774 return libbpf_err(err);
8779 static void bpf_map__destroy(struct bpf_map *map)
8781 if (map->inner_map) {
8782 bpf_map__destroy(map->inner_map);
8783 zfree(&map->inner_map);
8786 zfree(&map->init_slots);
8787 map->init_slots_sz = 0;
8792 mmap_sz = bpf_map_mmap_sz(map);
8793 munmap(map->mmaped, mmap_sz);
8798 zfree(&map->st_ops->data);
8799 zfree(&map->st_ops->progs);
8800 zfree(&map->st_ops->kern_func_off);
8801 zfree(&map->st_ops);
8805 zfree(&map->real_name);
8806 zfree(&map->pin_path);
8812 void bpf_object__close(struct bpf_object *obj)
8816 if (IS_ERR_OR_NULL(obj))
8819 usdt_manager_free(obj->usdt_man);
8820 obj->usdt_man = NULL;
8822 bpf_gen__free(obj->gen_loader);
8823 bpf_object__elf_finish(obj);
8824 bpf_object_unload(obj);
8825 btf__free(obj->btf);
8826 btf__free(obj->btf_vmlinux);
8827 btf_ext__free(obj->btf_ext);
8829 for (i = 0; i < obj->nr_maps; i++)
8830 bpf_map__destroy(&obj->maps[i]);
8832 zfree(&obj->btf_custom_path);
8833 zfree(&obj->kconfig);
8835 for (i = 0; i < obj->nr_extern; i++)
8836 zfree(&obj->externs[i].essent_name);
8838 zfree(&obj->externs);
8844 if (obj->programs && obj->nr_programs) {
8845 for (i = 0; i < obj->nr_programs; i++)
8846 bpf_program__exit(&obj->programs[i]);
8848 zfree(&obj->programs);
8850 zfree(&obj->feat_cache);
8851 zfree(&obj->token_path);
8852 if (obj->token_fd > 0)
8853 close(obj->token_fd);
8858 const char *bpf_object__name(const struct bpf_object *obj)
8860 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
8863 unsigned int bpf_object__kversion(const struct bpf_object *obj)
8865 return obj ? obj->kern_version : 0;
8868 struct btf *bpf_object__btf(const struct bpf_object *obj)
8870 return obj ? obj->btf : NULL;
8873 int bpf_object__btf_fd(const struct bpf_object *obj)
8875 return obj->btf ? btf__fd(obj->btf) : -1;
8878 int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
8881 return libbpf_err(-EINVAL);
8883 obj->kern_version = kern_version;
8888 int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
8890 struct bpf_gen *gen;
8894 if (!OPTS_VALID(opts, gen_loader_opts))
8896 gen = calloc(sizeof(*gen), 1);
8900 obj->gen_loader = gen;
8904 static struct bpf_program *
8905 __bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
8908 size_t nr_programs = obj->nr_programs;
8915 /* Iter from the beginning */
8916 return forward ? &obj->programs[0] :
8917 &obj->programs[nr_programs - 1];
8919 if (p->obj != obj) {
8920 pr_warn("error: program handler doesn't match object\n");
8921 return errno = EINVAL, NULL;
8924 idx = (p - obj->programs) + (forward ? 1 : -1);
8925 if (idx >= obj->nr_programs || idx < 0)
8927 return &obj->programs[idx];
8930 struct bpf_program *
8931 bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
8933 struct bpf_program *prog = prev;
8936 prog = __bpf_program__iter(prog, obj, true);
8937 } while (prog && prog_is_subprog(obj, prog));
8942 struct bpf_program *
8943 bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
8945 struct bpf_program *prog = next;
8948 prog = __bpf_program__iter(prog, obj, false);
8949 } while (prog && prog_is_subprog(obj, prog));
8954 void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
8956 prog->prog_ifindex = ifindex;
8959 const char *bpf_program__name(const struct bpf_program *prog)
8964 const char *bpf_program__section_name(const struct bpf_program *prog)
8966 return prog->sec_name;
8969 bool bpf_program__autoload(const struct bpf_program *prog)
8971 return prog->autoload;
8974 int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
8976 if (prog->obj->loaded)
8977 return libbpf_err(-EINVAL);
8979 prog->autoload = autoload;
8983 bool bpf_program__autoattach(const struct bpf_program *prog)
8985 return prog->autoattach;
8988 void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
8990 prog->autoattach = autoattach;
8993 const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
8998 size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9000 return prog->insns_cnt;
9003 int bpf_program__set_insns(struct bpf_program *prog,
9004 struct bpf_insn *new_insns, size_t new_insn_cnt)
9006 struct bpf_insn *insns;
9008 if (prog->obj->loaded)
9011 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
9012 /* NULL is a valid return from reallocarray if the new count is zero */
9013 if (!insns && new_insn_cnt) {
9014 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9017 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9019 prog->insns = insns;
9020 prog->insns_cnt = new_insn_cnt;
9024 int bpf_program__fd(const struct bpf_program *prog)
9027 return libbpf_err(-EINVAL);
9030 return libbpf_err(-ENOENT);
9035 __alias(bpf_program__type)
9036 enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9038 enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
9043 static size_t custom_sec_def_cnt;
9044 static struct bpf_sec_def *custom_sec_defs;
9045 static struct bpf_sec_def custom_fallback_def;
9046 static bool has_custom_fallback_def;
9047 static int last_custom_sec_def_handler_id;
9049 int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
9051 if (prog->obj->loaded)
9052 return libbpf_err(-EBUSY);
9054 /* if type is not changed, do nothing */
9055 if (prog->type == type)
9060 /* If a program type was changed, we need to reset associated SEC()
9061 * handler, as it will be invalid now. The only exception is a generic
9062 * fallback handler, which by definition is program type-agnostic and
9063 * is a catch-all custom handler, optionally set by the application,
9064 * so should be able to handle any type of BPF program.
9066 if (prog->sec_def != &custom_fallback_def)
9067 prog->sec_def = NULL;
9071 __alias(bpf_program__expected_attach_type)
9072 enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9074 enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
9076 return prog->expected_attach_type;
9079 int bpf_program__set_expected_attach_type(struct bpf_program *prog,
9080 enum bpf_attach_type type)
9082 if (prog->obj->loaded)
9083 return libbpf_err(-EBUSY);
9085 prog->expected_attach_type = type;
9089 __u32 bpf_program__flags(const struct bpf_program *prog)
9091 return prog->prog_flags;
9094 int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
9096 if (prog->obj->loaded)
9097 return libbpf_err(-EBUSY);
9099 prog->prog_flags = flags;
9103 __u32 bpf_program__log_level(const struct bpf_program *prog)
9105 return prog->log_level;
9108 int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9110 if (prog->obj->loaded)
9111 return libbpf_err(-EBUSY);
9113 prog->log_level = log_level;
9117 const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9119 *log_size = prog->log_size;
9120 return prog->log_buf;
9123 int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9125 if (log_size && !log_buf)
9127 if (prog->log_size > UINT_MAX)
9129 if (prog->obj->loaded)
9132 prog->log_buf = log_buf;
9133 prog->log_size = log_size;
9137 #define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
9138 .sec = (char *)sec_pfx, \
9139 .prog_type = BPF_PROG_TYPE_##ptype, \
9140 .expected_attach_type = atype, \
9141 .cookie = (long)(flags), \
9142 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
9146 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9147 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9148 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9149 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9150 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9151 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9152 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9153 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9154 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9155 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9156 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9158 static const struct bpf_sec_def section_defs[] = {
9159 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
9160 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9161 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9162 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9163 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9164 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9165 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
9166 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
9167 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9168 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9169 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9170 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9171 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9172 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9173 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9174 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9175 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9176 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
9177 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
9178 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9179 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
9180 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9181 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9182 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9183 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9184 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9185 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9186 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9187 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9188 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9189 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9190 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9191 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9192 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9193 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9194 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9195 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9196 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9197 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9198 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9199 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9200 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
9201 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9202 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
9203 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
9204 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9205 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
9206 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
9207 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
9208 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
9209 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
9210 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
9211 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
9212 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9213 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
9214 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
9215 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
9216 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
9217 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
9218 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9219 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9220 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
9221 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
9222 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9223 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9224 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9225 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9226 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9227 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
9228 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9229 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9230 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9231 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9232 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9233 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9234 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9235 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9236 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
9237 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
9238 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9239 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
9240 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
9241 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9242 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
9243 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
9244 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9245 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
9246 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
9247 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9248 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
9249 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
9250 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9251 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9252 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9253 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
9254 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
9255 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
9256 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
9257 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
9260 int libbpf_register_prog_handler(const char *sec,
9261 enum bpf_prog_type prog_type,
9262 enum bpf_attach_type exp_attach_type,
9263 const struct libbpf_prog_handler_opts *opts)
9265 struct bpf_sec_def *sec_def;
9267 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9268 return libbpf_err(-EINVAL);
9270 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9271 return libbpf_err(-E2BIG);
9274 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9277 return libbpf_err(-ENOMEM);
9279 custom_sec_defs = sec_def;
9280 sec_def = &custom_sec_defs[custom_sec_def_cnt];
9282 if (has_custom_fallback_def)
9283 return libbpf_err(-EBUSY);
9285 sec_def = &custom_fallback_def;
9288 sec_def->sec = sec ? strdup(sec) : NULL;
9289 if (sec && !sec_def->sec)
9290 return libbpf_err(-ENOMEM);
9292 sec_def->prog_type = prog_type;
9293 sec_def->expected_attach_type = exp_attach_type;
9294 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9296 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9297 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9298 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9300 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9303 custom_sec_def_cnt++;
9305 has_custom_fallback_def = true;
9307 return sec_def->handler_id;
9310 int libbpf_unregister_prog_handler(int handler_id)
9312 struct bpf_sec_def *sec_defs;
9315 if (handler_id <= 0)
9316 return libbpf_err(-EINVAL);
9318 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9319 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9320 has_custom_fallback_def = false;
9324 for (i = 0; i < custom_sec_def_cnt; i++) {
9325 if (custom_sec_defs[i].handler_id == handler_id)
9329 if (i == custom_sec_def_cnt)
9330 return libbpf_err(-ENOENT);
9332 free(custom_sec_defs[i].sec);
9333 for (i = i + 1; i < custom_sec_def_cnt; i++)
9334 custom_sec_defs[i - 1] = custom_sec_defs[i];
9335 custom_sec_def_cnt--;
9337 /* try to shrink the array, but it's ok if we couldn't */
9338 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
9339 /* if new count is zero, reallocarray can return a valid NULL result;
9340 * in this case the previous pointer will be freed, so we *have to*
9341 * reassign old pointer to the new value (even if it's NULL)
9343 if (sec_defs || custom_sec_def_cnt == 0)
9344 custom_sec_defs = sec_defs;
9349 static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
9351 size_t len = strlen(sec_def->sec);
9353 /* "type/" always has to have proper SEC("type/extras") form */
9354 if (sec_def->sec[len - 1] == '/') {
9355 if (str_has_pfx(sec_name, sec_def->sec))
9360 /* "type+" means it can be either exact SEC("type") or
9361 * well-formed SEC("type/extras") with proper '/' separator
9363 if (sec_def->sec[len - 1] == '+') {
9365 /* not even a prefix */
9366 if (strncmp(sec_name, sec_def->sec, len) != 0)
9368 /* exact match or has '/' separator */
9369 if (sec_name[len] == '\0' || sec_name[len] == '/')
9374 return strcmp(sec_name, sec_def->sec) == 0;
9377 static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9379 const struct bpf_sec_def *sec_def;
9382 n = custom_sec_def_cnt;
9383 for (i = 0; i < n; i++) {
9384 sec_def = &custom_sec_defs[i];
9385 if (sec_def_matches(sec_def, sec_name))
9389 n = ARRAY_SIZE(section_defs);
9390 for (i = 0; i < n; i++) {
9391 sec_def = §ion_defs[i];
9392 if (sec_def_matches(sec_def, sec_name))
9396 if (has_custom_fallback_def)
9397 return &custom_fallback_def;
9402 #define MAX_TYPE_NAME_SIZE 32
9404 static char *libbpf_get_type_names(bool attach_type)
9406 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
9414 /* Forge string buf with all available names */
9415 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
9416 const struct bpf_sec_def *sec_def = §ion_defs[i];
9419 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9422 if (!(sec_def->cookie & SEC_ATTACHABLE))
9426 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
9431 strcat(buf, section_defs[i].sec);
9437 int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9438 enum bpf_attach_type *expected_attach_type)
9440 const struct bpf_sec_def *sec_def;
9444 return libbpf_err(-EINVAL);
9446 sec_def = find_sec_def(name);
9448 *prog_type = sec_def->prog_type;
9449 *expected_attach_type = sec_def->expected_attach_type;
9453 pr_debug("failed to guess program type from ELF section '%s'\n", name);
9454 type_names = libbpf_get_type_names(false);
9455 if (type_names != NULL) {
9456 pr_debug("supported section(type) names are:%s\n", type_names);
9460 return libbpf_err(-ESRCH);
9463 const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9465 if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9468 return attach_type_name[t];
9471 const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9473 if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9476 return link_type_name[t];
9479 const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9481 if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9484 return map_type_name[t];
9487 const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9489 if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9492 return prog_type_name[t];
9495 static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
9499 struct bpf_map *map;
9502 for (i = 0; i < obj->nr_maps; i++) {
9503 map = &obj->maps[i];
9504 if (!bpf_map__is_struct_ops(map))
9506 if (map->sec_idx == sec_idx &&
9507 map->sec_offset <= offset &&
9508 offset - map->sec_offset < map->def.value_size)
9515 /* Collect the reloc from ELF, populate the st_ops->progs[], and update
9516 * st_ops->data for shadow type.
9518 static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
9519 Elf64_Shdr *shdr, Elf_Data *data)
9521 const struct btf_member *member;
9522 struct bpf_struct_ops *st_ops;
9523 struct bpf_program *prog;
9524 unsigned int shdr_idx;
9525 const struct btf *btf;
9526 struct bpf_map *map;
9527 unsigned int moff, insn_idx;
9535 nrels = shdr->sh_size / shdr->sh_entsize;
9536 for (i = 0; i < nrels; i++) {
9537 rel = elf_rel_by_idx(data, i);
9539 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9540 return -LIBBPF_ERRNO__FORMAT;
9543 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9545 pr_warn("struct_ops reloc: symbol %zx not found\n",
9546 (size_t)ELF64_R_SYM(rel->r_info));
9547 return -LIBBPF_ERRNO__FORMAT;
9550 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
9551 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
9553 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9554 (size_t)rel->r_offset);
9558 moff = rel->r_offset - map->sec_offset;
9559 shdr_idx = sym->st_shndx;
9560 st_ops = map->st_ops;
9561 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
9563 (long long)(rel->r_info >> 32),
9564 (long long)sym->st_value,
9565 shdr_idx, (size_t)rel->r_offset,
9566 map->sec_offset, sym->st_name, name);
9568 if (shdr_idx >= SHN_LORESERVE) {
9569 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9570 map->name, (size_t)rel->r_offset, shdr_idx);
9571 return -LIBBPF_ERRNO__RELOC;
9573 if (sym->st_value % BPF_INSN_SZ) {
9574 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
9575 map->name, (unsigned long long)sym->st_value);
9576 return -LIBBPF_ERRNO__FORMAT;
9578 insn_idx = sym->st_value / BPF_INSN_SZ;
9580 member = find_member_by_offset(st_ops->type, moff * 8);
9582 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9586 member_idx = member - btf_members(st_ops->type);
9587 name = btf__name_by_offset(btf, member->name_off);
9589 if (!resolve_func_ptr(btf, member->type, NULL)) {
9590 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9595 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
9597 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9598 map->name, shdr_idx, name);
9602 /* prevent the use of BPF prog with invalid type */
9603 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9604 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9605 map->name, prog->name);
9609 st_ops->progs[member_idx] = prog;
9611 /* st_ops->data will be exposed to users, being returned by
9612 * bpf_map__initial_value() as a pointer to the shadow
9613 * type. All function pointers in the original struct type
9614 * should be converted to a pointer to struct bpf_program
9615 * in the shadow type.
9617 *((struct bpf_program **)(st_ops->data + moff)) = prog;
9623 #define BTF_TRACE_PREFIX "btf_trace_"
9624 #define BTF_LSM_PREFIX "bpf_lsm_"
9625 #define BTF_ITER_PREFIX "bpf_iter_"
9626 #define BTF_MAX_NAME_SIZE 128
9628 void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9629 const char **prefix, int *kind)
9631 switch (attach_type) {
9632 case BPF_TRACE_RAW_TP:
9633 *prefix = BTF_TRACE_PREFIX;
9634 *kind = BTF_KIND_TYPEDEF;
9637 case BPF_LSM_CGROUP:
9638 *prefix = BTF_LSM_PREFIX;
9639 *kind = BTF_KIND_FUNC;
9641 case BPF_TRACE_ITER:
9642 *prefix = BTF_ITER_PREFIX;
9643 *kind = BTF_KIND_FUNC;
9647 *kind = BTF_KIND_FUNC;
9651 static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9652 const char *name, __u32 kind)
9654 char btf_type_name[BTF_MAX_NAME_SIZE];
9657 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9658 "%s%s", prefix, name);
9659 /* snprintf returns the number of characters written excluding the
9660 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
9661 * indicates truncation.
9663 if (ret < 0 || ret >= sizeof(btf_type_name))
9664 return -ENAMETOOLONG;
9665 return btf__find_by_name_kind(btf, btf_type_name, kind);
9668 static inline int find_attach_btf_id(struct btf *btf, const char *name,
9669 enum bpf_attach_type attach_type)
9674 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9675 return find_btf_by_prefix_kind(btf, prefix, name, kind);
9678 int libbpf_find_vmlinux_btf_id(const char *name,
9679 enum bpf_attach_type attach_type)
9684 btf = btf__load_vmlinux_btf();
9685 err = libbpf_get_error(btf);
9687 pr_warn("vmlinux BTF is not found\n");
9688 return libbpf_err(err);
9691 err = find_attach_btf_id(btf, name, attach_type);
9693 pr_warn("%s is not found in vmlinux BTF\n", name);
9696 return libbpf_err(err);
9699 static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9701 struct bpf_prog_info info;
9702 __u32 info_len = sizeof(info);
9706 memset(&info, 0, info_len);
9707 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
9709 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
9710 attach_prog_fd, err);
9716 pr_warn("The target program doesn't have BTF\n");
9719 btf = btf__load_from_kernel_by_id(info.btf_id);
9720 err = libbpf_get_error(btf);
9722 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
9725 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9728 pr_warn("%s is not found in prog's BTF\n", name);
9735 static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9736 enum bpf_attach_type attach_type,
9737 int *btf_obj_fd, int *btf_type_id)
9741 ret = find_attach_btf_id(obj->btf_vmlinux, attach_name, attach_type);
9743 *btf_obj_fd = 0; /* vmlinux BTF */
9750 ret = load_module_btfs(obj);
9754 for (i = 0; i < obj->btf_module_cnt; i++) {
9755 const struct module_btf *mod = &obj->btf_modules[i];
9757 ret = find_attach_btf_id(mod->btf, attach_name, attach_type);
9759 *btf_obj_fd = mod->fd;
9772 static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9773 int *btf_obj_fd, int *btf_type_id)
9775 enum bpf_attach_type attach_type = prog->expected_attach_type;
9776 __u32 attach_prog_fd = prog->attach_prog_fd;
9779 /* BPF program's BTF ID */
9780 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9781 if (!attach_prog_fd) {
9782 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9785 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9787 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9788 prog->name, attach_prog_fd, attach_name, err);
9796 /* kernel/module BTF ID */
9797 if (prog->obj->gen_loader) {
9798 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9802 err = find_kernel_btf_id(prog->obj, attach_name,
9803 attach_type, btf_obj_fd,
9807 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9808 prog->name, attach_name, err);
9814 int libbpf_attach_type_by_name(const char *name,
9815 enum bpf_attach_type *attach_type)
9818 const struct bpf_sec_def *sec_def;
9821 return libbpf_err(-EINVAL);
9823 sec_def = find_sec_def(name);
9825 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9826 type_names = libbpf_get_type_names(true);
9827 if (type_names != NULL) {
9828 pr_debug("attachable section(type) names are:%s\n", type_names);
9832 return libbpf_err(-EINVAL);
9835 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
9836 return libbpf_err(-EINVAL);
9837 if (!(sec_def->cookie & SEC_ATTACHABLE))
9838 return libbpf_err(-EINVAL);
9840 *attach_type = sec_def->expected_attach_type;
9844 int bpf_map__fd(const struct bpf_map *map)
9847 return libbpf_err(-EINVAL);
9848 if (!map_is_created(map))
9853 static bool map_uses_real_name(const struct bpf_map *map)
9855 /* Since libbpf started to support custom .data.* and .rodata.* maps,
9856 * their user-visible name differs from kernel-visible name. Users see
9857 * such map's corresponding ELF section name as a map name.
9858 * This check distinguishes .data/.rodata from .data.* and .rodata.*
9859 * maps to know which name has to be returned to the user.
9861 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
9863 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
9868 const char *bpf_map__name(const struct bpf_map *map)
9873 if (map_uses_real_name(map))
9874 return map->real_name;
9879 enum bpf_map_type bpf_map__type(const struct bpf_map *map)
9881 return map->def.type;
9884 int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
9886 if (map_is_created(map))
9887 return libbpf_err(-EBUSY);
9888 map->def.type = type;
9892 __u32 bpf_map__map_flags(const struct bpf_map *map)
9894 return map->def.map_flags;
9897 int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
9899 if (map_is_created(map))
9900 return libbpf_err(-EBUSY);
9901 map->def.map_flags = flags;
9905 __u64 bpf_map__map_extra(const struct bpf_map *map)
9907 return map->map_extra;
9910 int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
9912 if (map_is_created(map))
9913 return libbpf_err(-EBUSY);
9914 map->map_extra = map_extra;
9918 __u32 bpf_map__numa_node(const struct bpf_map *map)
9920 return map->numa_node;
9923 int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
9925 if (map_is_created(map))
9926 return libbpf_err(-EBUSY);
9927 map->numa_node = numa_node;
9931 __u32 bpf_map__key_size(const struct bpf_map *map)
9933 return map->def.key_size;
9936 int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
9938 if (map_is_created(map))
9939 return libbpf_err(-EBUSY);
9940 map->def.key_size = size;
9944 __u32 bpf_map__value_size(const struct bpf_map *map)
9946 return map->def.value_size;
9949 static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
9952 struct btf_type *datasec_type, *var_type;
9953 struct btf_var_secinfo *var;
9954 const struct btf_type *array_type;
9955 const struct btf_array *array;
9956 int vlen, element_sz, new_array_id;
9959 /* check btf existence */
9960 btf = bpf_object__btf(map->obj);
9964 /* verify map is datasec */
9965 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
9966 if (!btf_is_datasec(datasec_type)) {
9967 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
9968 bpf_map__name(map));
9972 /* verify datasec has at least one var */
9973 vlen = btf_vlen(datasec_type);
9975 pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
9976 bpf_map__name(map));
9980 /* verify last var in the datasec is an array */
9981 var = &btf_var_secinfos(datasec_type)[vlen - 1];
9982 var_type = btf_type_by_id(btf, var->type);
9983 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
9984 if (!btf_is_array(array_type)) {
9985 pr_warn("map '%s': cannot be resized, last var must be an array\n",
9986 bpf_map__name(map));
9990 /* verify request size aligns with array */
9991 array = btf_array(array_type);
9992 element_sz = btf__resolve_size(btf, array->type);
9993 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
9994 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
9995 bpf_map__name(map), element_sz, size);
9999 /* create a new array based on the existing array, but with new length */
10000 nr_elements = (size - var->offset) / element_sz;
10001 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10002 if (new_array_id < 0)
10003 return new_array_id;
10005 /* adding a new btf type invalidates existing pointers to btf objects,
10006 * so refresh pointers before proceeding
10008 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10009 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10010 var_type = btf_type_by_id(btf, var->type);
10012 /* finally update btf info */
10013 datasec_type->size = size;
10014 var->size = size - var->offset;
10015 var_type->type = new_array_id;
10020 int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10022 if (map->obj->loaded || map->reused)
10023 return libbpf_err(-EBUSY);
10026 size_t mmap_old_sz, mmap_new_sz;
10029 if (map->def.type != BPF_MAP_TYPE_ARRAY)
10030 return -EOPNOTSUPP;
10032 mmap_old_sz = bpf_map_mmap_sz(map);
10033 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
10034 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10036 pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10037 bpf_map__name(map), err);
10040 err = map_btf_datasec_resize(map, size);
10041 if (err && err != -ENOENT) {
10042 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10043 bpf_map__name(map), err);
10044 map->btf_value_type_id = 0;
10045 map->btf_key_type_id = 0;
10049 map->def.value_size = size;
10053 __u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
10055 return map ? map->btf_key_type_id : 0;
10058 __u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
10060 return map ? map->btf_value_type_id : 0;
10063 int bpf_map__set_initial_value(struct bpf_map *map,
10064 const void *data, size_t size)
10066 if (map->obj->loaded || map->reused)
10067 return libbpf_err(-EBUSY);
10069 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG ||
10070 size != map->def.value_size)
10071 return libbpf_err(-EINVAL);
10073 memcpy(map->mmaped, data, size);
10077 void *bpf_map__initial_value(struct bpf_map *map, size_t *psize)
10079 if (bpf_map__is_struct_ops(map)) {
10081 *psize = map->def.value_size;
10082 return map->st_ops->data;
10087 *psize = map->def.value_size;
10088 return map->mmaped;
10091 bool bpf_map__is_internal(const struct bpf_map *map)
10093 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10096 __u32 bpf_map__ifindex(const struct bpf_map *map)
10098 return map->map_ifindex;
10101 int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
10103 if (map_is_created(map))
10104 return libbpf_err(-EBUSY);
10105 map->map_ifindex = ifindex;
10109 int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10111 if (!bpf_map_type__is_map_in_map(map->def.type)) {
10112 pr_warn("error: unsupported map type\n");
10113 return libbpf_err(-EINVAL);
10115 if (map->inner_map_fd != -1) {
10116 pr_warn("error: inner_map_fd already specified\n");
10117 return libbpf_err(-EINVAL);
10119 if (map->inner_map) {
10120 bpf_map__destroy(map->inner_map);
10121 zfree(&map->inner_map);
10123 map->inner_map_fd = fd;
10127 static struct bpf_map *
10128 __bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
10131 struct bpf_map *s, *e;
10133 if (!obj || !obj->maps)
10134 return errno = EINVAL, NULL;
10137 e = obj->maps + obj->nr_maps;
10139 if ((m < s) || (m >= e)) {
10140 pr_warn("error in %s: map handler doesn't belong to object\n",
10142 return errno = EINVAL, NULL;
10145 idx = (m - obj->maps) + i;
10146 if (idx >= obj->nr_maps || idx < 0)
10148 return &obj->maps[idx];
10152 bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
10157 return __bpf_map__iter(prev, obj, 1);
10161 bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
10163 if (next == NULL) {
10166 return obj->maps + obj->nr_maps - 1;
10169 return __bpf_map__iter(next, obj, -1);
10173 bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
10175 struct bpf_map *pos;
10177 bpf_object__for_each_map(pos, obj) {
10178 /* if it's a special internal map name (which always starts
10179 * with dot) then check if that special name matches the
10180 * real map name (ELF section name)
10182 if (name[0] == '.') {
10183 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10187 /* otherwise map name has to be an exact match */
10188 if (map_uses_real_name(pos)) {
10189 if (strcmp(pos->real_name, name) == 0)
10193 if (strcmp(pos->name, name) == 0)
10196 return errno = ENOENT, NULL;
10200 bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
10202 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10205 static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10206 size_t value_sz, bool check_value_sz)
10208 if (!map_is_created(map)) /* map is not yet created */
10211 if (map->def.key_size != key_sz) {
10212 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10213 map->name, key_sz, map->def.key_size);
10217 if (!check_value_sz)
10220 switch (map->def.type) {
10221 case BPF_MAP_TYPE_PERCPU_ARRAY:
10222 case BPF_MAP_TYPE_PERCPU_HASH:
10223 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10224 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10225 int num_cpu = libbpf_num_possible_cpus();
10226 size_t elem_sz = roundup(map->def.value_size, 8);
10228 if (value_sz != num_cpu * elem_sz) {
10229 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10230 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10236 if (map->def.value_size != value_sz) {
10237 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10238 map->name, value_sz, map->def.value_size);
10246 int bpf_map__lookup_elem(const struct bpf_map *map,
10247 const void *key, size_t key_sz,
10248 void *value, size_t value_sz, __u64 flags)
10252 err = validate_map_op(map, key_sz, value_sz, true);
10254 return libbpf_err(err);
10256 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10259 int bpf_map__update_elem(const struct bpf_map *map,
10260 const void *key, size_t key_sz,
10261 const void *value, size_t value_sz, __u64 flags)
10265 err = validate_map_op(map, key_sz, value_sz, true);
10267 return libbpf_err(err);
10269 return bpf_map_update_elem(map->fd, key, value, flags);
10272 int bpf_map__delete_elem(const struct bpf_map *map,
10273 const void *key, size_t key_sz, __u64 flags)
10277 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10279 return libbpf_err(err);
10281 return bpf_map_delete_elem_flags(map->fd, key, flags);
10284 int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10285 const void *key, size_t key_sz,
10286 void *value, size_t value_sz, __u64 flags)
10290 err = validate_map_op(map, key_sz, value_sz, true);
10292 return libbpf_err(err);
10294 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10297 int bpf_map__get_next_key(const struct bpf_map *map,
10298 const void *cur_key, void *next_key, size_t key_sz)
10302 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10304 return libbpf_err(err);
10306 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10309 long libbpf_get_error(const void *ptr)
10311 if (!IS_ERR_OR_NULL(ptr))
10315 errno = -PTR_ERR(ptr);
10317 /* If ptr == NULL, then errno should be already set by the failing
10318 * API, because libbpf never returns NULL on success and it now always
10319 * sets errno on error. So no extra errno handling for ptr == NULL
10325 /* Replace link's underlying BPF program with the new one */
10326 int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10330 ret = bpf_link_update(bpf_link__fd(link), bpf_program__fd(prog), NULL);
10331 return libbpf_err_errno(ret);
10334 /* Release "ownership" of underlying BPF resource (typically, BPF program
10335 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10336 * link, when destructed through bpf_link__destroy() call won't attempt to
10337 * detach/unregisted that BPF resource. This is useful in situations where,
10338 * say, attached BPF program has to outlive userspace program that attached it
10339 * in the system. Depending on type of BPF program, though, there might be
10340 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10341 * exit of userspace program doesn't trigger automatic detachment and clean up
10342 * inside the kernel.
10344 void bpf_link__disconnect(struct bpf_link *link)
10346 link->disconnected = true;
10349 int bpf_link__destroy(struct bpf_link *link)
10353 if (IS_ERR_OR_NULL(link))
10356 if (!link->disconnected && link->detach)
10357 err = link->detach(link);
10358 if (link->pin_path)
10359 free(link->pin_path);
10361 link->dealloc(link);
10365 return libbpf_err(err);
10368 int bpf_link__fd(const struct bpf_link *link)
10373 const char *bpf_link__pin_path(const struct bpf_link *link)
10375 return link->pin_path;
10378 static int bpf_link__detach_fd(struct bpf_link *link)
10380 return libbpf_err_errno(close(link->fd));
10383 struct bpf_link *bpf_link__open(const char *path)
10385 struct bpf_link *link;
10388 fd = bpf_obj_get(path);
10391 pr_warn("failed to open link at %s: %d\n", path, fd);
10392 return libbpf_err_ptr(fd);
10395 link = calloc(1, sizeof(*link));
10398 return libbpf_err_ptr(-ENOMEM);
10400 link->detach = &bpf_link__detach_fd;
10403 link->pin_path = strdup(path);
10404 if (!link->pin_path) {
10405 bpf_link__destroy(link);
10406 return libbpf_err_ptr(-ENOMEM);
10412 int bpf_link__detach(struct bpf_link *link)
10414 return bpf_link_detach(link->fd) ? -errno : 0;
10417 int bpf_link__pin(struct bpf_link *link, const char *path)
10421 if (link->pin_path)
10422 return libbpf_err(-EBUSY);
10423 err = make_parent_dir(path);
10425 return libbpf_err(err);
10426 err = check_path(path);
10428 return libbpf_err(err);
10430 link->pin_path = strdup(path);
10431 if (!link->pin_path)
10432 return libbpf_err(-ENOMEM);
10434 if (bpf_obj_pin(link->fd, link->pin_path)) {
10436 zfree(&link->pin_path);
10437 return libbpf_err(err);
10440 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10444 int bpf_link__unpin(struct bpf_link *link)
10448 if (!link->pin_path)
10449 return libbpf_err(-EINVAL);
10451 err = unlink(link->pin_path);
10455 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10456 zfree(&link->pin_path);
10460 struct bpf_link_perf {
10461 struct bpf_link link;
10463 /* legacy kprobe support: keep track of probe identifier and type */
10464 char *legacy_probe_name;
10465 bool legacy_is_kprobe;
10466 bool legacy_is_retprobe;
10469 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
10470 static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
10472 static int bpf_link_perf_detach(struct bpf_link *link)
10474 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10477 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
10480 if (perf_link->perf_event_fd != link->fd)
10481 close(perf_link->perf_event_fd);
10484 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
10485 if (perf_link->legacy_probe_name) {
10486 if (perf_link->legacy_is_kprobe) {
10487 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10488 perf_link->legacy_is_retprobe);
10490 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10491 perf_link->legacy_is_retprobe);
10498 static void bpf_link_perf_dealloc(struct bpf_link *link)
10500 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10502 free(perf_link->legacy_probe_name);
10506 struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
10507 const struct bpf_perf_event_opts *opts)
10509 char errmsg[STRERR_BUFSIZE];
10510 struct bpf_link_perf *link;
10511 int prog_fd, link_fd = -1, err;
10512 bool force_ioctl_attach;
10514 if (!OPTS_VALID(opts, bpf_perf_event_opts))
10515 return libbpf_err_ptr(-EINVAL);
10518 pr_warn("prog '%s': invalid perf event FD %d\n",
10520 return libbpf_err_ptr(-EINVAL);
10522 prog_fd = bpf_program__fd(prog);
10524 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
10526 return libbpf_err_ptr(-EINVAL);
10529 link = calloc(1, sizeof(*link));
10531 return libbpf_err_ptr(-ENOMEM);
10532 link->link.detach = &bpf_link_perf_detach;
10533 link->link.dealloc = &bpf_link_perf_dealloc;
10534 link->perf_event_fd = pfd;
10536 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10537 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
10538 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10539 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10541 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
10544 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10546 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10549 link->link.fd = link_fd;
10551 if (OPTS_GET(opts, bpf_cookie, 0)) {
10552 pr_warn("prog '%s': user context value is not supported\n", prog->name);
10557 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10559 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10560 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10561 if (err == -EPROTO)
10562 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10566 link->link.fd = pfd;
10568 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10570 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
10571 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10575 return &link->link;
10580 return libbpf_err_ptr(err);
10583 struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
10585 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10589 * this function is expected to parse integer in the range of [0, 2^31-1] from
10590 * given file using scanf format string fmt. If actual parsed value is
10591 * negative, the result might be indistinguishable from error
10593 static int parse_uint_from_file(const char *file, const char *fmt)
10595 char buf[STRERR_BUFSIZE];
10599 f = fopen(file, "re");
10602 pr_debug("failed to open '%s': %s\n", file,
10603 libbpf_strerror_r(err, buf, sizeof(buf)));
10606 err = fscanf(f, fmt, &ret);
10608 err = err == EOF ? -EIO : -errno;
10609 pr_debug("failed to parse '%s': %s\n", file,
10610 libbpf_strerror_r(err, buf, sizeof(buf)));
10618 static int determine_kprobe_perf_type(void)
10620 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10622 return parse_uint_from_file(file, "%d\n");
10625 static int determine_uprobe_perf_type(void)
10627 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10629 return parse_uint_from_file(file, "%d\n");
10632 static int determine_kprobe_retprobe_bit(void)
10634 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10636 return parse_uint_from_file(file, "config:%d\n");
10639 static int determine_uprobe_retprobe_bit(void)
10641 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10643 return parse_uint_from_file(file, "config:%d\n");
10646 #define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10647 #define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10649 static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
10650 uint64_t offset, int pid, size_t ref_ctr_off)
10652 const size_t attr_sz = sizeof(struct perf_event_attr);
10653 struct perf_event_attr attr;
10654 char errmsg[STRERR_BUFSIZE];
10657 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
10660 memset(&attr, 0, attr_sz);
10662 type = uprobe ? determine_uprobe_perf_type()
10663 : determine_kprobe_perf_type();
10665 pr_warn("failed to determine %s perf type: %s\n",
10666 uprobe ? "uprobe" : "kprobe",
10667 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
10671 int bit = uprobe ? determine_uprobe_retprobe_bit()
10672 : determine_kprobe_retprobe_bit();
10675 pr_warn("failed to determine %s retprobe bit: %s\n",
10676 uprobe ? "uprobe" : "kprobe",
10677 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
10680 attr.config |= 1 << bit;
10682 attr.size = attr_sz;
10684 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
10685 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10686 attr.config2 = offset; /* kprobe_addr or probe_offset */
10688 /* pid filter is meaningful only for uprobes */
10689 pfd = syscall(__NR_perf_event_open, &attr,
10690 pid < 0 ? -1 : pid /* pid */,
10691 pid == -1 ? 0 : -1 /* cpu */,
10692 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10693 return pfd >= 0 ? pfd : -errno;
10696 static int append_to_file(const char *file, const char *fmt, ...)
10698 int fd, n, err = 0;
10703 n = vsnprintf(buf, sizeof(buf), fmt, ap);
10706 if (n < 0 || n >= sizeof(buf))
10709 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
10713 if (write(fd, buf, n) < 0)
10720 #define DEBUGFS "/sys/kernel/debug/tracing"
10721 #define TRACEFS "/sys/kernel/tracing"
10723 static bool use_debugfs(void)
10725 static int has_debugfs = -1;
10727 if (has_debugfs < 0)
10728 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
10730 return has_debugfs == 1;
10733 static const char *tracefs_path(void)
10735 return use_debugfs() ? DEBUGFS : TRACEFS;
10738 static const char *tracefs_kprobe_events(void)
10740 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10743 static const char *tracefs_uprobe_events(void)
10745 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10748 static const char *tracefs_available_filter_functions(void)
10750 return use_debugfs() ? DEBUGFS"/available_filter_functions"
10751 : TRACEFS"/available_filter_functions";
10754 static const char *tracefs_available_filter_functions_addrs(void)
10756 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10757 : TRACEFS"/available_filter_functions_addrs";
10760 static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10761 const char *kfunc_name, size_t offset)
10763 static int index = 0;
10766 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10767 __sync_fetch_and_add(&index, 1));
10769 /* sanitize binary_path in the probe name */
10770 for (i = 0; buf[i]; i++) {
10771 if (!isalnum(buf[i]))
10776 static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10777 const char *kfunc_name, size_t offset)
10779 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
10780 retprobe ? 'r' : 'p',
10781 retprobe ? "kretprobes" : "kprobes",
10782 probe_name, kfunc_name, offset);
10785 static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10787 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10788 retprobe ? "kretprobes" : "kprobes", probe_name);
10791 static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10795 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10796 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
10798 return parse_uint_from_file(file, "%d\n");
10801 static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10802 const char *kfunc_name, size_t offset, int pid)
10804 const size_t attr_sz = sizeof(struct perf_event_attr);
10805 struct perf_event_attr attr;
10806 char errmsg[STRERR_BUFSIZE];
10807 int type, pfd, err;
10809 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
10811 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10812 kfunc_name, offset,
10813 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10816 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
10819 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10820 kfunc_name, offset,
10821 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10822 goto err_clean_legacy;
10825 memset(&attr, 0, attr_sz);
10826 attr.size = attr_sz;
10827 attr.config = type;
10828 attr.type = PERF_TYPE_TRACEPOINT;
10830 pfd = syscall(__NR_perf_event_open, &attr,
10831 pid < 0 ? -1 : pid, /* pid */
10832 pid == -1 ? 0 : -1, /* cpu */
10833 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
10836 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
10837 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10838 goto err_clean_legacy;
10843 /* Clear the newly added legacy kprobe_event */
10844 remove_kprobe_event_legacy(probe_name, retprobe);
10848 static const char *arch_specific_syscall_pfx(void)
10850 #if defined(__x86_64__)
10852 #elif defined(__i386__)
10854 #elif defined(__s390x__)
10856 #elif defined(__s390__)
10858 #elif defined(__arm__)
10860 #elif defined(__aarch64__)
10862 #elif defined(__mips__)
10864 #elif defined(__riscv)
10866 #elif defined(__powerpc__)
10868 #elif defined(__powerpc64__)
10869 return "powerpc64";
10875 int probe_kern_syscall_wrapper(int token_fd)
10877 char syscall_name[64];
10878 const char *ksys_pfx;
10880 ksys_pfx = arch_specific_syscall_pfx();
10884 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
10886 if (determine_kprobe_perf_type() >= 0) {
10889 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
10893 return pfd >= 0 ? 1 : 0;
10894 } else { /* legacy mode */
10895 char probe_name[128];
10897 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
10898 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
10901 (void)remove_kprobe_event_legacy(probe_name, false);
10907 bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
10908 const char *func_name,
10909 const struct bpf_kprobe_opts *opts)
10911 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
10912 enum probe_attach_mode attach_mode;
10913 char errmsg[STRERR_BUFSIZE];
10914 char *legacy_probe = NULL;
10915 struct bpf_link *link;
10917 bool retprobe, legacy;
10920 if (!OPTS_VALID(opts, bpf_kprobe_opts))
10921 return libbpf_err_ptr(-EINVAL);
10923 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
10924 retprobe = OPTS_GET(opts, retprobe, false);
10925 offset = OPTS_GET(opts, offset, 0);
10926 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
10928 legacy = determine_kprobe_perf_type() < 0;
10929 switch (attach_mode) {
10930 case PROBE_ATTACH_MODE_LEGACY:
10932 pe_opts.force_ioctl_attach = true;
10934 case PROBE_ATTACH_MODE_PERF:
10936 return libbpf_err_ptr(-ENOTSUP);
10937 pe_opts.force_ioctl_attach = true;
10939 case PROBE_ATTACH_MODE_LINK:
10940 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
10941 return libbpf_err_ptr(-ENOTSUP);
10943 case PROBE_ATTACH_MODE_DEFAULT:
10946 return libbpf_err_ptr(-EINVAL);
10950 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
10952 -1 /* pid */, 0 /* ref_ctr_off */);
10954 char probe_name[256];
10956 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
10957 func_name, offset);
10959 legacy_probe = strdup(probe_name);
10961 return libbpf_err_ptr(-ENOMEM);
10963 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
10964 offset, -1 /* pid */);
10968 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
10969 prog->name, retprobe ? "kretprobe" : "kprobe",
10971 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10974 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
10975 err = libbpf_get_error(link);
10978 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
10979 prog->name, retprobe ? "kretprobe" : "kprobe",
10981 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10982 goto err_clean_legacy;
10985 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10987 perf_link->legacy_probe_name = legacy_probe;
10988 perf_link->legacy_is_kprobe = true;
10989 perf_link->legacy_is_retprobe = retprobe;
10996 remove_kprobe_event_legacy(legacy_probe, retprobe);
10998 free(legacy_probe);
10999 return libbpf_err_ptr(err);
11002 struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
11004 const char *func_name)
11006 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
11007 .retprobe = retprobe,
11010 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11013 struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11014 const char *syscall_name,
11015 const struct bpf_ksyscall_opts *opts)
11017 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11018 char func_name[128];
11020 if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11021 return libbpf_err_ptr(-EINVAL);
11023 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
11024 /* arch_specific_syscall_pfx() should never return NULL here
11025 * because it is guarded by kernel_supports(). However, since
11026 * compiler does not know that we have an explicit conditional
11029 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
11030 arch_specific_syscall_pfx() ? : "", syscall_name);
11032 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11035 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11036 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11038 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11041 /* Adapted from perf/util/string.c */
11042 bool glob_match(const char *str, const char *pat)
11044 while (*str && *pat && *pat != '*') {
11045 if (*pat == '?') { /* Matches any single character */
11055 /* Check wild card */
11057 while (*pat == '*')
11059 if (!*pat) /* Tail wild card matches all */
11062 if (glob_match(str++, pat))
11065 return !*str && !*pat;
11068 struct kprobe_multi_resolve {
11069 const char *pattern;
11070 unsigned long *addrs;
11075 struct avail_kallsyms_data {
11078 struct kprobe_multi_resolve *res;
11081 static int avail_func_cmp(const void *a, const void *b)
11083 return strcmp(*(const char **)a, *(const char **)b);
11086 static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11087 const char *sym_name, void *ctx)
11089 struct avail_kallsyms_data *data = ctx;
11090 struct kprobe_multi_resolve *res = data->res;
11093 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
11096 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
11100 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11104 static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11106 const char *available_functions_file = tracefs_available_filter_functions();
11107 struct avail_kallsyms_data data;
11108 char sym_name[500];
11110 int err = 0, ret, i;
11111 char **syms = NULL;
11112 size_t cap = 0, cnt = 0;
11114 f = fopen(available_functions_file, "re");
11117 pr_warn("failed to open %s: %d\n", available_functions_file, err);
11124 ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11125 if (ret == EOF && feof(f))
11129 pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11134 if (!glob_match(sym_name, res->pattern))
11137 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11141 name = strdup(sym_name);
11147 syms[cnt++] = name;
11150 /* no entries found, bail out */
11156 /* sort available functions */
11157 qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11162 libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11168 for (i = 0; i < cnt; i++)
11169 free((char *)syms[i]);
11176 static bool has_available_filter_functions_addrs(void)
11178 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11181 static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11183 const char *available_path = tracefs_available_filter_functions_addrs();
11184 char sym_name[500];
11187 unsigned long long sym_addr;
11189 f = fopen(available_path, "re");
11192 pr_warn("failed to open %s: %d\n", available_path, err);
11197 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11198 if (ret == EOF && feof(f))
11202 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11208 if (!glob_match(sym_name, res->pattern))
11211 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11212 sizeof(*res->addrs), res->cnt + 1);
11216 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11228 bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11229 const char *pattern,
11230 const struct bpf_kprobe_multi_opts *opts)
11232 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11233 struct kprobe_multi_resolve res = {
11234 .pattern = pattern,
11236 struct bpf_link *link = NULL;
11237 char errmsg[STRERR_BUFSIZE];
11238 const unsigned long *addrs;
11239 int err, link_fd, prog_fd;
11240 const __u64 *cookies;
11245 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11246 return libbpf_err_ptr(-EINVAL);
11248 syms = OPTS_GET(opts, syms, false);
11249 addrs = OPTS_GET(opts, addrs, false);
11250 cnt = OPTS_GET(opts, cnt, false);
11251 cookies = OPTS_GET(opts, cookies, false);
11253 if (!pattern && !addrs && !syms)
11254 return libbpf_err_ptr(-EINVAL);
11255 if (pattern && (addrs || syms || cookies || cnt))
11256 return libbpf_err_ptr(-EINVAL);
11257 if (!pattern && !cnt)
11258 return libbpf_err_ptr(-EINVAL);
11260 return libbpf_err_ptr(-EINVAL);
11263 if (has_available_filter_functions_addrs())
11264 err = libbpf_available_kprobes_parse(&res);
11266 err = libbpf_available_kallsyms_parse(&res);
11273 retprobe = OPTS_GET(opts, retprobe, false);
11275 lopts.kprobe_multi.syms = syms;
11276 lopts.kprobe_multi.addrs = addrs;
11277 lopts.kprobe_multi.cookies = cookies;
11278 lopts.kprobe_multi.cnt = cnt;
11279 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11281 link = calloc(1, sizeof(*link));
11286 link->detach = &bpf_link__detach_fd;
11288 prog_fd = bpf_program__fd(prog);
11289 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_KPROBE_MULTI, &lopts);
11292 pr_warn("prog '%s': failed to attach: %s\n",
11293 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11296 link->fd = link_fd;
11303 return libbpf_err_ptr(err);
11306 static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11308 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
11309 unsigned long offset = 0;
11310 const char *func_name;
11316 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11317 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11320 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11322 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11324 func_name = prog->sec_name + sizeof("kprobe/") - 1;
11326 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
11328 pr_warn("kprobe name is invalid: %s\n", func_name);
11331 if (opts.retprobe && offset != 0) {
11333 pr_warn("kretprobes do not support offset specification\n");
11337 opts.offset = offset;
11338 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
11340 return libbpf_get_error(*link);
11343 static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11345 LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11346 const char *syscall_name;
11350 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11351 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11354 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11356 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11358 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11360 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11361 return *link ? 0 : -errno;
11364 static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11366 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11373 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11374 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11375 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11378 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11380 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11382 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11384 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11386 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
11390 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11392 return libbpf_get_error(*link);
11395 static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11397 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11398 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11399 int n, ret = -EINVAL;
11403 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11404 &probe_type, &binary_path, &func_name);
11407 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11411 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11412 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11413 ret = libbpf_get_error(*link);
11416 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11426 static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11427 const char *binary_path, uint64_t offset)
11431 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11433 /* sanitize binary_path in the probe name */
11434 for (i = 0; buf[i]; i++) {
11435 if (!isalnum(buf[i]))
11440 static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11441 const char *binary_path, size_t offset)
11443 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
11444 retprobe ? 'r' : 'p',
11445 retprobe ? "uretprobes" : "uprobes",
11446 probe_name, binary_path, offset);
11449 static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11451 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11452 retprobe ? "uretprobes" : "uprobes", probe_name);
11455 static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11459 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11460 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
11462 return parse_uint_from_file(file, "%d\n");
11465 static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11466 const char *binary_path, size_t offset, int pid)
11468 const size_t attr_sz = sizeof(struct perf_event_attr);
11469 struct perf_event_attr attr;
11470 int type, pfd, err;
11472 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11474 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11475 binary_path, (size_t)offset, err);
11478 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11481 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
11482 binary_path, offset, err);
11483 goto err_clean_legacy;
11486 memset(&attr, 0, attr_sz);
11487 attr.size = attr_sz;
11488 attr.config = type;
11489 attr.type = PERF_TYPE_TRACEPOINT;
11491 pfd = syscall(__NR_perf_event_open, &attr,
11492 pid < 0 ? -1 : pid, /* pid */
11493 pid == -1 ? 0 : -1, /* cpu */
11494 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11497 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
11498 goto err_clean_legacy;
11503 /* Clear the newly added legacy uprobe_event */
11504 remove_uprobe_event_legacy(probe_name, retprobe);
11508 /* Find offset of function name in archive specified by path. Currently
11509 * supported are .zip files that do not compress their contents, as used on
11510 * Android in the form of APKs, for example. "file_name" is the name of the ELF
11511 * file inside the archive. "func_name" matches symbol name or name@@LIB for
11512 * library functions.
11514 * An overview of the APK format specifically provided here:
11515 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11517 static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11518 const char *func_name)
11520 struct zip_archive *archive;
11521 struct zip_entry entry;
11525 archive = zip_archive_open(archive_path);
11526 if (IS_ERR(archive)) {
11527 ret = PTR_ERR(archive);
11528 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11532 ret = zip_archive_find_entry(archive, file_name, &entry);
11534 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11535 archive_path, ret);
11538 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11539 (unsigned long)entry.data_offset);
11541 if (entry.compression) {
11542 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11544 ret = -LIBBPF_ERRNO__FORMAT;
11548 elf = elf_memory((void *)entry.data, entry.data_length);
11550 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11552 ret = -LIBBPF_ERRNO__LIBELF;
11556 ret = elf_find_func_offset(elf, file_name, func_name);
11558 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11559 func_name, file_name, archive_path, entry.data_offset, ret,
11560 ret + entry.data_offset);
11561 ret += entry.data_offset;
11566 zip_archive_close(archive);
11570 static const char *arch_specific_lib_paths(void)
11573 * Based on https://packages.debian.org/sid/libc6.
11575 * Assume that the traced program is built for the same architecture
11576 * as libbpf, which should cover the vast majority of cases.
11578 #if defined(__x86_64__)
11579 return "/lib/x86_64-linux-gnu";
11580 #elif defined(__i386__)
11581 return "/lib/i386-linux-gnu";
11582 #elif defined(__s390x__)
11583 return "/lib/s390x-linux-gnu";
11584 #elif defined(__s390__)
11585 return "/lib/s390-linux-gnu";
11586 #elif defined(__arm__) && defined(__SOFTFP__)
11587 return "/lib/arm-linux-gnueabi";
11588 #elif defined(__arm__) && !defined(__SOFTFP__)
11589 return "/lib/arm-linux-gnueabihf";
11590 #elif defined(__aarch64__)
11591 return "/lib/aarch64-linux-gnu";
11592 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11593 return "/lib/mips64el-linux-gnuabi64";
11594 #elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11595 return "/lib/mipsel-linux-gnu";
11596 #elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11597 return "/lib/powerpc64le-linux-gnu";
11598 #elif defined(__sparc__) && defined(__arch64__)
11599 return "/lib/sparc64-linux-gnu";
11600 #elif defined(__riscv) && __riscv_xlen == 64
11601 return "/lib/riscv64-linux-gnu";
11607 /* Get full path to program/shared library. */
11608 static int resolve_full_path(const char *file, char *result, size_t result_sz)
11610 const char *search_paths[3] = {};
11613 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
11614 search_paths[0] = getenv("LD_LIBRARY_PATH");
11615 search_paths[1] = "/usr/lib64:/usr/lib";
11616 search_paths[2] = arch_specific_lib_paths();
11619 search_paths[0] = getenv("PATH");
11620 search_paths[1] = "/usr/bin:/usr/sbin";
11621 perm = R_OK | X_OK;
11624 for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11627 if (!search_paths[i])
11629 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11635 next_path = strchr(s, ':');
11636 seg_len = next_path ? next_path - s : strlen(s);
11639 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
11640 /* ensure it has required permissions */
11641 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
11643 pr_debug("resolved '%s' to '%s'\n", file, result);
11651 bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11654 const char *func_pattern,
11655 const struct bpf_uprobe_multi_opts *opts)
11657 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11658 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11659 unsigned long *resolved_offsets = NULL;
11660 int err = 0, link_fd, prog_fd;
11661 struct bpf_link *link = NULL;
11662 char errmsg[STRERR_BUFSIZE];
11663 char full_path[PATH_MAX];
11664 const __u64 *cookies;
11668 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11669 return libbpf_err_ptr(-EINVAL);
11671 syms = OPTS_GET(opts, syms, NULL);
11672 offsets = OPTS_GET(opts, offsets, NULL);
11673 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11674 cookies = OPTS_GET(opts, cookies, NULL);
11675 cnt = OPTS_GET(opts, cnt, 0);
11678 * User can specify 2 mutually exclusive set of inputs:
11680 * 1) use only path/func_pattern/pid arguments
11682 * 2) use path/pid with allowed combinations of:
11683 * syms/offsets/ref_ctr_offsets/cookies/cnt
11685 * - syms and offsets are mutually exclusive
11686 * - ref_ctr_offsets and cookies are optional
11688 * Any other usage results in error.
11692 return libbpf_err_ptr(-EINVAL);
11693 if (!func_pattern && cnt == 0)
11694 return libbpf_err_ptr(-EINVAL);
11696 if (func_pattern) {
11697 if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11698 return libbpf_err_ptr(-EINVAL);
11700 if (!!syms == !!offsets)
11701 return libbpf_err_ptr(-EINVAL);
11704 if (func_pattern) {
11705 if (!strchr(path, '/')) {
11706 err = resolve_full_path(path, full_path, sizeof(full_path));
11708 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11709 prog->name, path, err);
11710 return libbpf_err_ptr(err);
11715 err = elf_resolve_pattern_offsets(path, func_pattern,
11716 &resolved_offsets, &cnt);
11718 return libbpf_err_ptr(err);
11719 offsets = resolved_offsets;
11721 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
11723 return libbpf_err_ptr(err);
11724 offsets = resolved_offsets;
11727 lopts.uprobe_multi.path = path;
11728 lopts.uprobe_multi.offsets = offsets;
11729 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11730 lopts.uprobe_multi.cookies = cookies;
11731 lopts.uprobe_multi.cnt = cnt;
11732 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11737 lopts.uprobe_multi.pid = pid;
11739 link = calloc(1, sizeof(*link));
11744 link->detach = &bpf_link__detach_fd;
11746 prog_fd = bpf_program__fd(prog);
11747 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11750 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11751 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11754 link->fd = link_fd;
11755 free(resolved_offsets);
11759 free(resolved_offsets);
11761 return libbpf_err_ptr(err);
11764 LIBBPF_API struct bpf_link *
11765 bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
11766 const char *binary_path, size_t func_offset,
11767 const struct bpf_uprobe_opts *opts)
11769 const char *archive_path = NULL, *archive_sep = NULL;
11770 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
11771 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
11772 enum probe_attach_mode attach_mode;
11773 char full_path[PATH_MAX];
11774 struct bpf_link *link;
11775 size_t ref_ctr_off;
11777 bool retprobe, legacy;
11778 const char *func_name;
11780 if (!OPTS_VALID(opts, bpf_uprobe_opts))
11781 return libbpf_err_ptr(-EINVAL);
11783 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
11784 retprobe = OPTS_GET(opts, retprobe, false);
11785 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
11786 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11789 return libbpf_err_ptr(-EINVAL);
11791 /* Check if "binary_path" refers to an archive. */
11792 archive_sep = strstr(binary_path, "!/");
11794 full_path[0] = '\0';
11795 libbpf_strlcpy(full_path, binary_path,
11796 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
11797 archive_path = full_path;
11798 binary_path = archive_sep + 2;
11799 } else if (!strchr(binary_path, '/')) {
11800 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
11802 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11803 prog->name, binary_path, err);
11804 return libbpf_err_ptr(err);
11806 binary_path = full_path;
11808 func_name = OPTS_GET(opts, func_name, NULL);
11812 if (archive_path) {
11813 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
11815 binary_path = archive_path;
11817 sym_off = elf_find_func_offset_from_file(binary_path, func_name);
11820 return libbpf_err_ptr(sym_off);
11821 func_offset += sym_off;
11824 legacy = determine_uprobe_perf_type() < 0;
11825 switch (attach_mode) {
11826 case PROBE_ATTACH_MODE_LEGACY:
11828 pe_opts.force_ioctl_attach = true;
11830 case PROBE_ATTACH_MODE_PERF:
11832 return libbpf_err_ptr(-ENOTSUP);
11833 pe_opts.force_ioctl_attach = true;
11835 case PROBE_ATTACH_MODE_LINK:
11836 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11837 return libbpf_err_ptr(-ENOTSUP);
11839 case PROBE_ATTACH_MODE_DEFAULT:
11842 return libbpf_err_ptr(-EINVAL);
11846 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
11847 func_offset, pid, ref_ctr_off);
11849 char probe_name[PATH_MAX + 64];
11852 return libbpf_err_ptr(-EINVAL);
11854 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
11855 binary_path, func_offset);
11857 legacy_probe = strdup(probe_name);
11859 return libbpf_err_ptr(-ENOMEM);
11861 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
11862 binary_path, func_offset, pid);
11866 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
11867 prog->name, retprobe ? "uretprobe" : "uprobe",
11868 binary_path, func_offset,
11869 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11873 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
11874 err = libbpf_get_error(link);
11877 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
11878 prog->name, retprobe ? "uretprobe" : "uprobe",
11879 binary_path, func_offset,
11880 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11881 goto err_clean_legacy;
11884 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11886 perf_link->legacy_probe_name = legacy_probe;
11887 perf_link->legacy_is_kprobe = false;
11888 perf_link->legacy_is_retprobe = retprobe;
11894 remove_uprobe_event_legacy(legacy_probe, retprobe);
11896 free(legacy_probe);
11897 return libbpf_err_ptr(err);
11900 /* Format of u[ret]probe section definition supporting auto-attach:
11901 * u[ret]probe/binary:function[+offset]
11903 * binary can be an absolute/relative path or a filename; the latter is resolved to a
11904 * full binary path via bpf_program__attach_uprobe_opts.
11906 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
11907 * specified (and auto-attach is not possible) or the above format is specified for
11910 static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11912 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
11913 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
11914 int n, c, ret = -EINVAL;
11919 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
11920 &probe_type, &binary_path, &func_name);
11923 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11927 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
11928 prog->name, prog->sec_name);
11931 /* check if user specifies `+offset`, if yes, this should be
11932 * the last part of the string, make sure sscanf read to EOL
11934 func_off = strrchr(func_name, '+');
11936 n = sscanf(func_off, "+%li%n", &offset, &c);
11937 if (n == 1 && *(func_off + c) == '\0')
11938 func_off[0] = '\0';
11942 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
11943 strcmp(probe_type, "uretprobe.s") == 0;
11944 if (opts.retprobe && offset != 0) {
11945 pr_warn("prog '%s': uretprobes do not support offset specification\n",
11949 opts.func_name = func_name;
11950 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
11951 ret = libbpf_get_error(*link);
11954 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11965 struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
11966 bool retprobe, pid_t pid,
11967 const char *binary_path,
11968 size_t func_offset)
11970 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
11972 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
11975 struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
11976 pid_t pid, const char *binary_path,
11977 const char *usdt_provider, const char *usdt_name,
11978 const struct bpf_usdt_opts *opts)
11980 char resolved_path[512];
11981 struct bpf_object *obj = prog->obj;
11982 struct bpf_link *link;
11986 if (!OPTS_VALID(opts, bpf_uprobe_opts))
11987 return libbpf_err_ptr(-EINVAL);
11989 if (bpf_program__fd(prog) < 0) {
11990 pr_warn("prog '%s': can't attach BPF program w/o FD (did you load it?)\n",
11992 return libbpf_err_ptr(-EINVAL);
11996 return libbpf_err_ptr(-EINVAL);
11998 if (!strchr(binary_path, '/')) {
11999 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12001 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12002 prog->name, binary_path, err);
12003 return libbpf_err_ptr(err);
12005 binary_path = resolved_path;
12008 /* USDT manager is instantiated lazily on first USDT attach. It will
12009 * be destroyed together with BPF object in bpf_object__close().
12011 if (IS_ERR(obj->usdt_man))
12012 return libbpf_ptr(obj->usdt_man);
12013 if (!obj->usdt_man) {
12014 obj->usdt_man = usdt_manager_new(obj);
12015 if (IS_ERR(obj->usdt_man))
12016 return libbpf_ptr(obj->usdt_man);
12019 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12020 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
12021 usdt_provider, usdt_name, usdt_cookie);
12022 err = libbpf_get_error(link);
12024 return libbpf_err_ptr(err);
12028 static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12030 char *path = NULL, *provider = NULL, *name = NULL;
12031 const char *sec_name;
12034 sec_name = bpf_program__section_name(prog);
12035 if (strcmp(sec_name, "usdt") == 0) {
12036 /* no auto-attach for just SEC("usdt") */
12041 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12043 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12047 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12048 provider, name, NULL);
12049 err = libbpf_get_error(*link);
12057 static int determine_tracepoint_id(const char *tp_category,
12058 const char *tp_name)
12060 char file[PATH_MAX];
12063 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12064 tracefs_path(), tp_category, tp_name);
12067 if (ret >= sizeof(file)) {
12068 pr_debug("tracepoint %s/%s path is too long\n",
12069 tp_category, tp_name);
12072 return parse_uint_from_file(file, "%d\n");
12075 static int perf_event_open_tracepoint(const char *tp_category,
12076 const char *tp_name)
12078 const size_t attr_sz = sizeof(struct perf_event_attr);
12079 struct perf_event_attr attr;
12080 char errmsg[STRERR_BUFSIZE];
12081 int tp_id, pfd, err;
12083 tp_id = determine_tracepoint_id(tp_category, tp_name);
12085 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12086 tp_category, tp_name,
12087 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
12091 memset(&attr, 0, attr_sz);
12092 attr.type = PERF_TYPE_TRACEPOINT;
12093 attr.size = attr_sz;
12094 attr.config = tp_id;
12096 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12097 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12100 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12101 tp_category, tp_name,
12102 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12108 struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
12109 const char *tp_category,
12110 const char *tp_name,
12111 const struct bpf_tracepoint_opts *opts)
12113 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
12114 char errmsg[STRERR_BUFSIZE];
12115 struct bpf_link *link;
12118 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12119 return libbpf_err_ptr(-EINVAL);
12121 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12123 pfd = perf_event_open_tracepoint(tp_category, tp_name);
12125 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12126 prog->name, tp_category, tp_name,
12127 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12128 return libbpf_err_ptr(pfd);
12130 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
12131 err = libbpf_get_error(link);
12134 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12135 prog->name, tp_category, tp_name,
12136 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12137 return libbpf_err_ptr(err);
12142 struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
12143 const char *tp_category,
12144 const char *tp_name)
12146 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12149 static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12151 char *sec_name, *tp_cat, *tp_name;
12155 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12156 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12159 sec_name = strdup(prog->sec_name);
12163 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12164 if (str_has_pfx(prog->sec_name, "tp/"))
12165 tp_cat = sec_name + sizeof("tp/") - 1;
12167 tp_cat = sec_name + sizeof("tracepoint/") - 1;
12168 tp_name = strchr(tp_cat, '/');
12176 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
12178 return libbpf_get_error(*link);
12181 struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12182 const char *tp_name)
12184 char errmsg[STRERR_BUFSIZE];
12185 struct bpf_link *link;
12188 prog_fd = bpf_program__fd(prog);
12190 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12191 return libbpf_err_ptr(-EINVAL);
12194 link = calloc(1, sizeof(*link));
12196 return libbpf_err_ptr(-ENOMEM);
12197 link->detach = &bpf_link__detach_fd;
12199 pfd = bpf_raw_tracepoint_open(tp_name, prog_fd);
12203 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12204 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12205 return libbpf_err_ptr(pfd);
12211 static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12213 static const char *const prefixes[] = {
12217 "raw_tracepoint.w",
12220 const char *tp_name = NULL;
12224 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
12227 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12230 pfx_len = strlen(prefixes[i]);
12231 /* no auto-attach case of, e.g., SEC("raw_tp") */
12232 if (prog->sec_name[pfx_len] == '\0')
12235 if (prog->sec_name[pfx_len] != '/')
12238 tp_name = prog->sec_name + pfx_len + 1;
12243 pr_warn("prog '%s': invalid section name '%s'\n",
12244 prog->name, prog->sec_name);
12248 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
12249 return libbpf_get_error(*link);
12252 /* Common logic for all BPF program types that attach to a btf_id */
12253 static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12254 const struct bpf_trace_opts *opts)
12256 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
12257 char errmsg[STRERR_BUFSIZE];
12258 struct bpf_link *link;
12261 if (!OPTS_VALID(opts, bpf_trace_opts))
12262 return libbpf_err_ptr(-EINVAL);
12264 prog_fd = bpf_program__fd(prog);
12266 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12267 return libbpf_err_ptr(-EINVAL);
12270 link = calloc(1, sizeof(*link));
12272 return libbpf_err_ptr(-ENOMEM);
12273 link->detach = &bpf_link__detach_fd;
12275 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
12276 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12277 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
12281 pr_warn("prog '%s': failed to attach: %s\n",
12282 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
12283 return libbpf_err_ptr(pfd);
12289 struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
12291 return bpf_program__attach_btf_id(prog, NULL);
12294 struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12295 const struct bpf_trace_opts *opts)
12297 return bpf_program__attach_btf_id(prog, opts);
12300 struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
12302 return bpf_program__attach_btf_id(prog, NULL);
12305 static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12307 *link = bpf_program__attach_trace(prog);
12308 return libbpf_get_error(*link);
12311 static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12313 *link = bpf_program__attach_lsm(prog);
12314 return libbpf_get_error(*link);
12317 static struct bpf_link *
12318 bpf_program_attach_fd(const struct bpf_program *prog,
12319 int target_fd, const char *target_name,
12320 const struct bpf_link_create_opts *opts)
12322 enum bpf_attach_type attach_type;
12323 char errmsg[STRERR_BUFSIZE];
12324 struct bpf_link *link;
12325 int prog_fd, link_fd;
12327 prog_fd = bpf_program__fd(prog);
12329 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12330 return libbpf_err_ptr(-EINVAL);
12333 link = calloc(1, sizeof(*link));
12335 return libbpf_err_ptr(-ENOMEM);
12336 link->detach = &bpf_link__detach_fd;
12338 attach_type = bpf_program__expected_attach_type(prog);
12339 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
12343 pr_warn("prog '%s': failed to attach to %s: %s\n",
12344 prog->name, target_name,
12345 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12346 return libbpf_err_ptr(link_fd);
12348 link->fd = link_fd;
12353 bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
12355 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
12359 bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
12361 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
12364 struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
12366 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12367 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12371 bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12372 const struct bpf_tcx_opts *opts)
12374 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12378 if (!OPTS_VALID(opts, bpf_tcx_opts))
12379 return libbpf_err_ptr(-EINVAL);
12381 relative_id = OPTS_GET(opts, relative_id, 0);
12382 relative_fd = OPTS_GET(opts, relative_fd, 0);
12384 /* validate we don't have unexpected combinations of non-zero fields */
12386 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12388 return libbpf_err_ptr(-EINVAL);
12390 if (relative_fd && relative_id) {
12391 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12393 return libbpf_err_ptr(-EINVAL);
12396 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12397 link_create_opts.tcx.relative_fd = relative_fd;
12398 link_create_opts.tcx.relative_id = relative_id;
12399 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12401 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12402 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
12406 bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12407 const struct bpf_netkit_opts *opts)
12409 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12413 if (!OPTS_VALID(opts, bpf_netkit_opts))
12414 return libbpf_err_ptr(-EINVAL);
12416 relative_id = OPTS_GET(opts, relative_id, 0);
12417 relative_fd = OPTS_GET(opts, relative_fd, 0);
12419 /* validate we don't have unexpected combinations of non-zero fields */
12421 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12423 return libbpf_err_ptr(-EINVAL);
12425 if (relative_fd && relative_id) {
12426 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12428 return libbpf_err_ptr(-EINVAL);
12431 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12432 link_create_opts.netkit.relative_fd = relative_fd;
12433 link_create_opts.netkit.relative_id = relative_id;
12434 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12436 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12439 struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
12441 const char *attach_func_name)
12445 if (!!target_fd != !!attach_func_name) {
12446 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12448 return libbpf_err_ptr(-EINVAL);
12451 if (prog->type != BPF_PROG_TYPE_EXT) {
12452 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12454 return libbpf_err_ptr(-EINVAL);
12458 LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12460 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12462 return libbpf_err_ptr(btf_id);
12464 target_opts.target_btf_id = btf_id;
12466 return bpf_program_attach_fd(prog, target_fd, "freplace",
12469 /* no target, so use raw_tracepoint_open for compatibility
12472 return bpf_program__attach_trace(prog);
12477 bpf_program__attach_iter(const struct bpf_program *prog,
12478 const struct bpf_iter_attach_opts *opts)
12480 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12481 char errmsg[STRERR_BUFSIZE];
12482 struct bpf_link *link;
12483 int prog_fd, link_fd;
12484 __u32 target_fd = 0;
12486 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
12487 return libbpf_err_ptr(-EINVAL);
12489 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12490 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
12492 prog_fd = bpf_program__fd(prog);
12494 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12495 return libbpf_err_ptr(-EINVAL);
12498 link = calloc(1, sizeof(*link));
12500 return libbpf_err_ptr(-ENOMEM);
12501 link->detach = &bpf_link__detach_fd;
12503 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12504 &link_create_opts);
12508 pr_warn("prog '%s': failed to attach to iterator: %s\n",
12509 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12510 return libbpf_err_ptr(link_fd);
12512 link->fd = link_fd;
12516 static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12518 *link = bpf_program__attach_iter(prog, NULL);
12519 return libbpf_get_error(*link);
12522 struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12523 const struct bpf_netfilter_opts *opts)
12525 LIBBPF_OPTS(bpf_link_create_opts, lopts);
12526 struct bpf_link *link;
12527 int prog_fd, link_fd;
12529 if (!OPTS_VALID(opts, bpf_netfilter_opts))
12530 return libbpf_err_ptr(-EINVAL);
12532 prog_fd = bpf_program__fd(prog);
12534 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12535 return libbpf_err_ptr(-EINVAL);
12538 link = calloc(1, sizeof(*link));
12540 return libbpf_err_ptr(-ENOMEM);
12542 link->detach = &bpf_link__detach_fd;
12544 lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12545 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12546 lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12547 lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12549 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12551 char errmsg[STRERR_BUFSIZE];
12555 pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12556 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12557 return libbpf_err_ptr(link_fd);
12559 link->fd = link_fd;
12564 struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
12566 struct bpf_link *link = NULL;
12569 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12570 return libbpf_err_ptr(-EOPNOTSUPP);
12572 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12574 return libbpf_err_ptr(err);
12576 /* When calling bpf_program__attach() explicitly, auto-attach support
12577 * is expected to work, so NULL returned link is considered an error.
12578 * This is different for skeleton's attach, see comment in
12579 * bpf_object__attach_skeleton().
12582 return libbpf_err_ptr(-EOPNOTSUPP);
12587 struct bpf_link_struct_ops {
12588 struct bpf_link link;
12592 static int bpf_link__detach_struct_ops(struct bpf_link *link)
12594 struct bpf_link_struct_ops *st_link;
12597 st_link = container_of(link, struct bpf_link_struct_ops, link);
12599 if (st_link->map_fd < 0)
12600 /* w/o a real link */
12601 return bpf_map_delete_elem(link->fd, &zero);
12603 return close(link->fd);
12606 struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
12608 struct bpf_link_struct_ops *link;
12612 if (!bpf_map__is_struct_ops(map) || map->fd == -1)
12613 return libbpf_err_ptr(-EINVAL);
12615 link = calloc(1, sizeof(*link));
12617 return libbpf_err_ptr(-EINVAL);
12619 /* kern_vdata should be prepared during the loading phase. */
12620 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12621 /* It can be EBUSY if the map has been used to create or
12622 * update a link before. We don't allow updating the value of
12623 * a struct_ops once it is set. That ensures that the value
12624 * never changed. So, it is safe to skip EBUSY.
12626 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12628 return libbpf_err_ptr(err);
12631 link->link.detach = bpf_link__detach_struct_ops;
12633 if (!(map->def.map_flags & BPF_F_LINK)) {
12634 /* w/o a real link */
12635 link->link.fd = map->fd;
12637 return &link->link;
12640 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12643 return libbpf_err_ptr(fd);
12646 link->link.fd = fd;
12647 link->map_fd = map->fd;
12649 return &link->link;
12653 * Swap the back struct_ops of a link with a new struct_ops map.
12655 int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12657 struct bpf_link_struct_ops *st_ops_link;
12661 if (!bpf_map__is_struct_ops(map) || !map_is_created(map))
12664 st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12665 /* Ensure the type of a link is correct */
12666 if (st_ops_link->map_fd < 0)
12669 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12670 /* It can be EBUSY if the map has been used to create or
12671 * update a link before. We don't allow updating the value of
12672 * a struct_ops once it is set. That ensures that the value
12673 * never changed. So, it is safe to skip EBUSY.
12675 if (err && err != -EBUSY)
12678 err = bpf_link_update(link->fd, map->fd, NULL);
12682 st_ops_link->map_fd = map->fd;
12687 typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12688 void *private_data);
12690 static enum bpf_perf_event_ret
12691 perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12692 void **copy_mem, size_t *copy_size,
12693 bpf_perf_event_print_t fn, void *private_data)
12695 struct perf_event_mmap_page *header = mmap_mem;
12696 __u64 data_head = ring_buffer_read_head(header);
12697 __u64 data_tail = header->data_tail;
12698 void *base = ((__u8 *)header) + page_size;
12699 int ret = LIBBPF_PERF_EVENT_CONT;
12700 struct perf_event_header *ehdr;
12703 while (data_head != data_tail) {
12704 ehdr = base + (data_tail & (mmap_size - 1));
12705 ehdr_size = ehdr->size;
12707 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12708 void *copy_start = ehdr;
12709 size_t len_first = base + mmap_size - copy_start;
12710 size_t len_secnd = ehdr_size - len_first;
12712 if (*copy_size < ehdr_size) {
12714 *copy_mem = malloc(ehdr_size);
12717 ret = LIBBPF_PERF_EVENT_ERROR;
12720 *copy_size = ehdr_size;
12723 memcpy(*copy_mem, copy_start, len_first);
12724 memcpy(*copy_mem + len_first, base, len_secnd);
12728 ret = fn(ehdr, private_data);
12729 data_tail += ehdr_size;
12730 if (ret != LIBBPF_PERF_EVENT_CONT)
12734 ring_buffer_write_tail(header, data_tail);
12735 return libbpf_err(ret);
12738 struct perf_buffer;
12740 struct perf_buffer_params {
12741 struct perf_event_attr *attr;
12742 /* if event_cb is specified, it takes precendence */
12743 perf_buffer_event_fn event_cb;
12744 /* sample_cb and lost_cb are higher-level common-case callbacks */
12745 perf_buffer_sample_fn sample_cb;
12746 perf_buffer_lost_fn lost_cb;
12753 struct perf_cpu_buf {
12754 struct perf_buffer *pb;
12755 void *base; /* mmap()'ed memory */
12756 void *buf; /* for reconstructing segmented data */
12763 struct perf_buffer {
12764 perf_buffer_event_fn event_cb;
12765 perf_buffer_sample_fn sample_cb;
12766 perf_buffer_lost_fn lost_cb;
12767 void *ctx; /* passed into callbacks */
12771 struct perf_cpu_buf **cpu_bufs;
12772 struct epoll_event *events;
12773 int cpu_cnt; /* number of allocated CPU buffers */
12774 int epoll_fd; /* perf event FD */
12775 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
12778 static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
12779 struct perf_cpu_buf *cpu_buf)
12783 if (cpu_buf->base &&
12784 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
12785 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
12786 if (cpu_buf->fd >= 0) {
12787 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
12788 close(cpu_buf->fd);
12790 free(cpu_buf->buf);
12794 void perf_buffer__free(struct perf_buffer *pb)
12798 if (IS_ERR_OR_NULL(pb))
12800 if (pb->cpu_bufs) {
12801 for (i = 0; i < pb->cpu_cnt; i++) {
12802 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
12807 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
12808 perf_buffer__free_cpu_buf(pb, cpu_buf);
12810 free(pb->cpu_bufs);
12812 if (pb->epoll_fd >= 0)
12813 close(pb->epoll_fd);
12818 static struct perf_cpu_buf *
12819 perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
12820 int cpu, int map_key)
12822 struct perf_cpu_buf *cpu_buf;
12823 char msg[STRERR_BUFSIZE];
12826 cpu_buf = calloc(1, sizeof(*cpu_buf));
12828 return ERR_PTR(-ENOMEM);
12831 cpu_buf->cpu = cpu;
12832 cpu_buf->map_key = map_key;
12834 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
12835 -1, PERF_FLAG_FD_CLOEXEC);
12836 if (cpu_buf->fd < 0) {
12838 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
12839 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12843 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
12844 PROT_READ | PROT_WRITE, MAP_SHARED,
12846 if (cpu_buf->base == MAP_FAILED) {
12847 cpu_buf->base = NULL;
12849 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
12850 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12854 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
12856 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
12857 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
12864 perf_buffer__free_cpu_buf(pb, cpu_buf);
12865 return (struct perf_cpu_buf *)ERR_PTR(err);
12868 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12869 struct perf_buffer_params *p);
12871 struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
12872 perf_buffer_sample_fn sample_cb,
12873 perf_buffer_lost_fn lost_cb,
12875 const struct perf_buffer_opts *opts)
12877 const size_t attr_sz = sizeof(struct perf_event_attr);
12878 struct perf_buffer_params p = {};
12879 struct perf_event_attr attr;
12880 __u32 sample_period;
12882 if (!OPTS_VALID(opts, perf_buffer_opts))
12883 return libbpf_err_ptr(-EINVAL);
12885 sample_period = OPTS_GET(opts, sample_period, 1);
12886 if (!sample_period)
12889 memset(&attr, 0, attr_sz);
12890 attr.size = attr_sz;
12891 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
12892 attr.type = PERF_TYPE_SOFTWARE;
12893 attr.sample_type = PERF_SAMPLE_RAW;
12894 attr.sample_period = sample_period;
12895 attr.wakeup_events = sample_period;
12898 p.sample_cb = sample_cb;
12899 p.lost_cb = lost_cb;
12902 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
12905 struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
12906 struct perf_event_attr *attr,
12907 perf_buffer_event_fn event_cb, void *ctx,
12908 const struct perf_buffer_raw_opts *opts)
12910 struct perf_buffer_params p = {};
12913 return libbpf_err_ptr(-EINVAL);
12915 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
12916 return libbpf_err_ptr(-EINVAL);
12919 p.event_cb = event_cb;
12921 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
12922 p.cpus = OPTS_GET(opts, cpus, NULL);
12923 p.map_keys = OPTS_GET(opts, map_keys, NULL);
12925 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
12928 static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
12929 struct perf_buffer_params *p)
12931 const char *online_cpus_file = "/sys/devices/system/cpu/online";
12932 struct bpf_map_info map;
12933 char msg[STRERR_BUFSIZE];
12934 struct perf_buffer *pb;
12935 bool *online = NULL;
12936 __u32 map_info_len;
12939 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
12940 pr_warn("page count should be power of two, but is %zu\n",
12942 return ERR_PTR(-EINVAL);
12945 /* best-effort sanity checks */
12946 memset(&map, 0, sizeof(map));
12947 map_info_len = sizeof(map);
12948 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
12951 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
12952 * -EBADFD, -EFAULT, or -E2BIG on real error
12954 if (err != -EINVAL) {
12955 pr_warn("failed to get map info for map FD %d: %s\n",
12956 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
12957 return ERR_PTR(err);
12959 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
12962 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
12963 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
12965 return ERR_PTR(-EINVAL);
12969 pb = calloc(1, sizeof(*pb));
12971 return ERR_PTR(-ENOMEM);
12973 pb->event_cb = p->event_cb;
12974 pb->sample_cb = p->sample_cb;
12975 pb->lost_cb = p->lost_cb;
12978 pb->page_size = getpagesize();
12979 pb->mmap_size = pb->page_size * page_cnt;
12980 pb->map_fd = map_fd;
12982 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
12983 if (pb->epoll_fd < 0) {
12985 pr_warn("failed to create epoll instance: %s\n",
12986 libbpf_strerror_r(err, msg, sizeof(msg)));
12990 if (p->cpu_cnt > 0) {
12991 pb->cpu_cnt = p->cpu_cnt;
12993 pb->cpu_cnt = libbpf_num_possible_cpus();
12994 if (pb->cpu_cnt < 0) {
12998 if (map.max_entries && map.max_entries < pb->cpu_cnt)
12999 pb->cpu_cnt = map.max_entries;
13002 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13005 pr_warn("failed to allocate events: out of memory\n");
13008 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13009 if (!pb->cpu_bufs) {
13011 pr_warn("failed to allocate buffers: out of memory\n");
13015 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13017 pr_warn("failed to get online CPU mask: %d\n", err);
13021 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
13022 struct perf_cpu_buf *cpu_buf;
13025 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13026 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13028 /* in case user didn't explicitly requested particular CPUs to
13029 * be attached to, skip offline/not present CPUs
13031 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13034 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13035 if (IS_ERR(cpu_buf)) {
13036 err = PTR_ERR(cpu_buf);
13040 pb->cpu_bufs[j] = cpu_buf;
13042 err = bpf_map_update_elem(pb->map_fd, &map_key,
13046 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13047 cpu, map_key, cpu_buf->fd,
13048 libbpf_strerror_r(err, msg, sizeof(msg)));
13052 pb->events[j].events = EPOLLIN;
13053 pb->events[j].data.ptr = cpu_buf;
13054 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
13055 &pb->events[j]) < 0) {
13057 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13059 libbpf_strerror_r(err, msg, sizeof(msg)));
13072 perf_buffer__free(pb);
13073 return ERR_PTR(err);
13076 struct perf_sample_raw {
13077 struct perf_event_header header;
13082 struct perf_sample_lost {
13083 struct perf_event_header header;
13086 uint64_t sample_id;
13089 static enum bpf_perf_event_ret
13090 perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13092 struct perf_cpu_buf *cpu_buf = ctx;
13093 struct perf_buffer *pb = cpu_buf->pb;
13096 /* user wants full control over parsing perf event */
13098 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13101 case PERF_RECORD_SAMPLE: {
13102 struct perf_sample_raw *s = data;
13105 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13108 case PERF_RECORD_LOST: {
13109 struct perf_sample_lost *s = data;
13112 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13116 pr_warn("unknown perf sample type %d\n", e->type);
13117 return LIBBPF_PERF_EVENT_ERROR;
13119 return LIBBPF_PERF_EVENT_CONT;
13122 static int perf_buffer__process_records(struct perf_buffer *pb,
13123 struct perf_cpu_buf *cpu_buf)
13125 enum bpf_perf_event_ret ret;
13127 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13128 pb->page_size, &cpu_buf->buf,
13129 &cpu_buf->buf_size,
13130 perf_buffer__process_record, cpu_buf);
13131 if (ret != LIBBPF_PERF_EVENT_CONT)
13136 int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13138 return pb->epoll_fd;
13141 int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13145 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
13149 for (i = 0; i < cnt; i++) {
13150 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13152 err = perf_buffer__process_records(pb, cpu_buf);
13154 pr_warn("error while processing records: %d\n", err);
13155 return libbpf_err(err);
13161 /* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13164 size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13166 return pb->cpu_cnt;
13170 * Return perf_event FD of a ring buffer in *buf_idx* slot of
13171 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13172 * select()/poll()/epoll() Linux syscalls.
13174 int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13176 struct perf_cpu_buf *cpu_buf;
13178 if (buf_idx >= pb->cpu_cnt)
13179 return libbpf_err(-EINVAL);
13181 cpu_buf = pb->cpu_bufs[buf_idx];
13183 return libbpf_err(-ENOENT);
13185 return cpu_buf->fd;
13188 int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13190 struct perf_cpu_buf *cpu_buf;
13192 if (buf_idx >= pb->cpu_cnt)
13193 return libbpf_err(-EINVAL);
13195 cpu_buf = pb->cpu_bufs[buf_idx];
13197 return libbpf_err(-ENOENT);
13199 *buf = cpu_buf->base;
13200 *buf_size = pb->mmap_size;
13205 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13206 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13207 * consume, do nothing and return success.
13212 int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13214 struct perf_cpu_buf *cpu_buf;
13216 if (buf_idx >= pb->cpu_cnt)
13217 return libbpf_err(-EINVAL);
13219 cpu_buf = pb->cpu_bufs[buf_idx];
13221 return libbpf_err(-ENOENT);
13223 return perf_buffer__process_records(pb, cpu_buf);
13226 int perf_buffer__consume(struct perf_buffer *pb)
13230 for (i = 0; i < pb->cpu_cnt; i++) {
13231 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13236 err = perf_buffer__process_records(pb, cpu_buf);
13238 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
13239 return libbpf_err(err);
13245 int bpf_program__set_attach_target(struct bpf_program *prog,
13246 int attach_prog_fd,
13247 const char *attach_func_name)
13249 int btf_obj_fd = 0, btf_id = 0, err;
13251 if (!prog || attach_prog_fd < 0)
13252 return libbpf_err(-EINVAL);
13254 if (prog->obj->loaded)
13255 return libbpf_err(-EINVAL);
13257 if (attach_prog_fd && !attach_func_name) {
13258 /* remember attach_prog_fd and let bpf_program__load() find
13259 * BTF ID during the program load
13261 prog->attach_prog_fd = attach_prog_fd;
13265 if (attach_prog_fd) {
13266 btf_id = libbpf_find_prog_btf_id(attach_func_name,
13269 return libbpf_err(btf_id);
13271 if (!attach_func_name)
13272 return libbpf_err(-EINVAL);
13274 /* load btf_vmlinux, if not yet */
13275 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13277 return libbpf_err(err);
13278 err = find_kernel_btf_id(prog->obj, attach_func_name,
13279 prog->expected_attach_type,
13280 &btf_obj_fd, &btf_id);
13282 return libbpf_err(err);
13285 prog->attach_btf_id = btf_id;
13286 prog->attach_btf_obj_fd = btf_obj_fd;
13287 prog->attach_prog_fd = attach_prog_fd;
13291 int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
13293 int err = 0, n, len, start, end = -1;
13299 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13301 if (*s == ',' || *s == '\n') {
13305 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13306 if (n <= 0 || n > 2) {
13307 pr_warn("Failed to get CPU range %s: %d\n", s, n);
13310 } else if (n == 1) {
13313 if (start < 0 || start > end) {
13314 pr_warn("Invalid CPU range [%d,%d] in %s\n",
13319 tmp = realloc(*mask, end + 1);
13325 memset(tmp + *mask_sz, 0, start - *mask_sz);
13326 memset(tmp + start, 1, end - start + 1);
13327 *mask_sz = end + 1;
13331 pr_warn("Empty CPU range\n");
13341 int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13343 int fd, err = 0, len;
13346 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
13349 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13352 len = read(fd, buf, sizeof(buf));
13355 err = len ? -errno : -EINVAL;
13356 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13359 if (len >= sizeof(buf)) {
13360 pr_warn("CPU mask is too big in file %s\n", fcpu);
13365 return parse_cpu_mask_str(buf, mask, mask_sz);
13368 int libbpf_num_possible_cpus(void)
13370 static const char *fcpu = "/sys/devices/system/cpu/possible";
13372 int err, n, i, tmp_cpus;
13375 tmp_cpus = READ_ONCE(cpus);
13379 err = parse_cpu_mask_file(fcpu, &mask, &n);
13381 return libbpf_err(err);
13384 for (i = 0; i < n; i++) {
13390 WRITE_ONCE(cpus, tmp_cpus);
13394 static int populate_skeleton_maps(const struct bpf_object *obj,
13395 struct bpf_map_skeleton *maps,
13400 for (i = 0; i < map_cnt; i++) {
13401 struct bpf_map **map = maps[i].map;
13402 const char *name = maps[i].name;
13403 void **mmaped = maps[i].mmaped;
13405 *map = bpf_object__find_map_by_name(obj, name);
13407 pr_warn("failed to find skeleton map '%s'\n", name);
13411 /* externs shouldn't be pre-setup from user code */
13412 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13413 *mmaped = (*map)->mmaped;
13418 static int populate_skeleton_progs(const struct bpf_object *obj,
13419 struct bpf_prog_skeleton *progs,
13424 for (i = 0; i < prog_cnt; i++) {
13425 struct bpf_program **prog = progs[i].prog;
13426 const char *name = progs[i].name;
13428 *prog = bpf_object__find_program_by_name(obj, name);
13430 pr_warn("failed to find skeleton program '%s'\n", name);
13437 int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13438 const struct bpf_object_open_opts *opts)
13440 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
13441 .object_name = s->name,
13443 struct bpf_object *obj;
13446 /* Attempt to preserve opts->object_name, unless overriden by user
13447 * explicitly. Overwriting object name for skeletons is discouraged,
13448 * as it breaks global data maps, because they contain object name
13449 * prefix as their own map name prefix. When skeleton is generated,
13450 * bpftool is making an assumption that this name will stay the same.
13453 memcpy(&skel_opts, opts, sizeof(*opts));
13454 if (!opts->object_name)
13455 skel_opts.object_name = s->name;
13458 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
13459 err = libbpf_get_error(obj);
13461 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
13463 return libbpf_err(err);
13467 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13469 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13470 return libbpf_err(err);
13473 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13475 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13476 return libbpf_err(err);
13482 int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13484 int err, len, var_idx, i;
13485 const char *var_name;
13486 const struct bpf_map *map;
13489 const struct btf_type *map_type, *var_type;
13490 const struct bpf_var_skeleton *var_skel;
13491 struct btf_var_secinfo *var;
13494 return libbpf_err(-EINVAL);
13496 btf = bpf_object__btf(s->obj);
13498 pr_warn("subskeletons require BTF at runtime (object %s)\n",
13499 bpf_object__name(s->obj));
13500 return libbpf_err(-errno);
13503 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13505 pr_warn("failed to populate subskeleton maps: %d\n", err);
13506 return libbpf_err(err);
13509 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13511 pr_warn("failed to populate subskeleton maps: %d\n", err);
13512 return libbpf_err(err);
13515 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13516 var_skel = &s->vars[var_idx];
13517 map = *var_skel->map;
13518 map_type_id = bpf_map__btf_value_type_id(map);
13519 map_type = btf__type_by_id(btf, map_type_id);
13521 if (!btf_is_datasec(map_type)) {
13522 pr_warn("type for map '%1$s' is not a datasec: %2$s",
13523 bpf_map__name(map),
13524 __btf_kind_str(btf_kind(map_type)));
13525 return libbpf_err(-EINVAL);
13528 len = btf_vlen(map_type);
13529 var = btf_var_secinfos(map_type);
13530 for (i = 0; i < len; i++, var++) {
13531 var_type = btf__type_by_id(btf, var->type);
13532 var_name = btf__name_by_offset(btf, var_type->name_off);
13533 if (strcmp(var_name, var_skel->name) == 0) {
13534 *var_skel->addr = map->mmaped + var->offset;
13542 void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13552 int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13556 err = bpf_object__load(*s->obj);
13558 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
13559 return libbpf_err(err);
13562 for (i = 0; i < s->map_cnt; i++) {
13563 struct bpf_map *map = *s->maps[i].map;
13564 size_t mmap_sz = bpf_map_mmap_sz(map);
13565 int prot, map_fd = map->fd;
13566 void **mmaped = s->maps[i].mmaped;
13571 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13576 if (map->def.map_flags & BPF_F_RDONLY_PROG)
13579 prot = PROT_READ | PROT_WRITE;
13581 /* Remap anonymous mmap()-ed "map initialization image" as
13582 * a BPF map-backed mmap()-ed memory, but preserving the same
13583 * memory address. This will cause kernel to change process'
13584 * page table to point to a different piece of kernel memory,
13585 * but from userspace point of view memory address (and its
13586 * contents, being identical at this point) will stay the
13587 * same. This mapping will be released by bpf_object__close()
13588 * as per normal clean up procedure, so we don't need to worry
13589 * about it from skeleton's clean up perspective.
13591 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
13592 if (*mmaped == MAP_FAILED) {
13595 pr_warn("failed to re-mmap() map '%s': %d\n",
13596 bpf_map__name(map), err);
13597 return libbpf_err(err);
13604 int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13608 for (i = 0; i < s->prog_cnt; i++) {
13609 struct bpf_program *prog = *s->progs[i].prog;
13610 struct bpf_link **link = s->progs[i].link;
13612 if (!prog->autoload || !prog->autoattach)
13615 /* auto-attaching not supported for this program */
13616 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
13619 /* if user already set the link manually, don't attempt auto-attach */
13623 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
13625 pr_warn("prog '%s': failed to auto-attach: %d\n",
13626 bpf_program__name(prog), err);
13627 return libbpf_err(err);
13630 /* It's possible that for some SEC() definitions auto-attach
13631 * is supported in some cases (e.g., if definition completely
13632 * specifies target information), but is not in other cases.
13633 * SEC("uprobe") is one such case. If user specified target
13634 * binary and function name, such BPF program can be
13635 * auto-attached. But if not, it shouldn't trigger skeleton's
13636 * attach to fail. It should just be skipped.
13637 * attach_fn signals such case with returning 0 (no error) and
13638 * setting link to NULL.
13645 void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13649 for (i = 0; i < s->prog_cnt; i++) {
13650 struct bpf_link **link = s->progs[i].link;
13652 bpf_link__destroy(*link);
13657 void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13663 bpf_object__detach_skeleton(s);
13665 bpf_object__close(*s->obj);