Merge branch 'libbpf-support-module-function-syntax-for-tracing-programs'
[linux-2.6-block.git] / tools / lib / bpf / libbpf.c
CommitLineData
1bc38b8f 1// SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
6061a3d6 2
1b76c13e
WN
3/*
4 * Common eBPF ELF object loading operations.
5 *
6 * Copyright (C) 2013-2015 Alexei Starovoitov <ast@kernel.org>
7 * Copyright (C) 2015 Wang Nan <wangnan0@huawei.com>
8 * Copyright (C) 2015 Huawei Inc.
f367540c 9 * Copyright (C) 2017 Nicira, Inc.
d859900c 10 * Copyright (C) 2019 Isovalent, Inc.
1b76c13e
WN
11 */
12
b4269954 13#ifndef _GNU_SOURCE
531b014e 14#define _GNU_SOURCE
b4269954 15#endif
1b76c13e 16#include <stdlib.h>
b3f59d66
WN
17#include <stdio.h>
18#include <stdarg.h>
f367540c 19#include <libgen.h>
34090915 20#include <inttypes.h>
8ab9da57 21#include <limits.h>
b3f59d66 22#include <string.h>
1b76c13e 23#include <unistd.h>
cdb2f920 24#include <endian.h>
1a5e3fb1
WN
25#include <fcntl.h>
26#include <errno.h>
113e6b7e 27#include <ctype.h>
1b76c13e 28#include <asm/unistd.h>
e28ff1a8 29#include <linux/err.h>
cb1e5e96 30#include <linux/kernel.h>
1b76c13e 31#include <linux/bpf.h>
38d5d3b3 32#include <linux/btf.h>
47eff617 33#include <linux/filter.h>
f367540c 34#include <linux/limits.h>
438363c0 35#include <linux/perf_event.h>
9eea8faf 36#include <linux/bpf_perf_event.h>
a64af0ef 37#include <linux/ring_buffer.h>
fb84b822 38#include <sys/epoll.h>
63f2f5ee 39#include <sys/ioctl.h>
fb84b822 40#include <sys/mman.h>
f367540c
JS
41#include <sys/stat.h>
42#include <sys/types.h>
43#include <sys/vfs.h>
ddc7c304 44#include <sys/utsname.h>
dc3a2d25 45#include <sys/resource.h>
1a5e3fb1
WN
46#include <libelf.h>
47#include <gelf.h>
166750bc 48#include <zlib.h>
1b76c13e
WN
49
50#include "libbpf.h"
52d3352e 51#include "bpf.h"
8a138aed 52#include "btf.h"
6d41907c 53#include "str_error.h"
d7c4b398 54#include "libbpf_internal.h"
ddc7c304 55#include "hashmap.h"
67234743 56#include "bpf_gen_internal.h"
c44fd845 57#include "zip.h"
b3f59d66 58
f367540c
JS
59#ifndef BPF_FS_MAGIC
60#define BPF_FS_MAGIC 0xcafe4a11
61#endif
62
6b434b61
AN
63#define BPF_FS_DEFAULT_PATH "/sys/fs/bpf"
64
9c0f8cbd
AN
65#define BPF_INSN_SZ (sizeof(struct bpf_insn))
66
ff466b58
AI
67/* vsprintf() in __base_pr() uses nonliteral format string. It may break
68 * compilation if user enables corresponding warning. Disable it explicitly.
69 */
70#pragma GCC diagnostic ignored "-Wformat-nonliteral"
71
b3f59d66
WN
72#define __printf(a, b) __attribute__((format(printf, a, b)))
73
590a0088 74static struct bpf_map *bpf_object__add_map(struct bpf_object *obj);
aea28a60 75static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog);
f04deb90 76static int map_set_def_max_entries(struct bpf_map *map);
590a0088 77
ccde5760
DM
78static const char * const attach_type_name[] = {
79 [BPF_CGROUP_INET_INGRESS] = "cgroup_inet_ingress",
80 [BPF_CGROUP_INET_EGRESS] = "cgroup_inet_egress",
81 [BPF_CGROUP_INET_SOCK_CREATE] = "cgroup_inet_sock_create",
82 [BPF_CGROUP_INET_SOCK_RELEASE] = "cgroup_inet_sock_release",
83 [BPF_CGROUP_SOCK_OPS] = "cgroup_sock_ops",
84 [BPF_CGROUP_DEVICE] = "cgroup_device",
85 [BPF_CGROUP_INET4_BIND] = "cgroup_inet4_bind",
86 [BPF_CGROUP_INET6_BIND] = "cgroup_inet6_bind",
87 [BPF_CGROUP_INET4_CONNECT] = "cgroup_inet4_connect",
88 [BPF_CGROUP_INET6_CONNECT] = "cgroup_inet6_connect",
bf90438c 89 [BPF_CGROUP_UNIX_CONNECT] = "cgroup_unix_connect",
ccde5760
DM
90 [BPF_CGROUP_INET4_POST_BIND] = "cgroup_inet4_post_bind",
91 [BPF_CGROUP_INET6_POST_BIND] = "cgroup_inet6_post_bind",
92 [BPF_CGROUP_INET4_GETPEERNAME] = "cgroup_inet4_getpeername",
93 [BPF_CGROUP_INET6_GETPEERNAME] = "cgroup_inet6_getpeername",
bf90438c 94 [BPF_CGROUP_UNIX_GETPEERNAME] = "cgroup_unix_getpeername",
ccde5760
DM
95 [BPF_CGROUP_INET4_GETSOCKNAME] = "cgroup_inet4_getsockname",
96 [BPF_CGROUP_INET6_GETSOCKNAME] = "cgroup_inet6_getsockname",
bf90438c 97 [BPF_CGROUP_UNIX_GETSOCKNAME] = "cgroup_unix_getsockname",
ccde5760
DM
98 [BPF_CGROUP_UDP4_SENDMSG] = "cgroup_udp4_sendmsg",
99 [BPF_CGROUP_UDP6_SENDMSG] = "cgroup_udp6_sendmsg",
bf90438c 100 [BPF_CGROUP_UNIX_SENDMSG] = "cgroup_unix_sendmsg",
ccde5760
DM
101 [BPF_CGROUP_SYSCTL] = "cgroup_sysctl",
102 [BPF_CGROUP_UDP4_RECVMSG] = "cgroup_udp4_recvmsg",
103 [BPF_CGROUP_UDP6_RECVMSG] = "cgroup_udp6_recvmsg",
bf90438c 104 [BPF_CGROUP_UNIX_RECVMSG] = "cgroup_unix_recvmsg",
ccde5760
DM
105 [BPF_CGROUP_GETSOCKOPT] = "cgroup_getsockopt",
106 [BPF_CGROUP_SETSOCKOPT] = "cgroup_setsockopt",
107 [BPF_SK_SKB_STREAM_PARSER] = "sk_skb_stream_parser",
108 [BPF_SK_SKB_STREAM_VERDICT] = "sk_skb_stream_verdict",
109 [BPF_SK_SKB_VERDICT] = "sk_skb_verdict",
110 [BPF_SK_MSG_VERDICT] = "sk_msg_verdict",
111 [BPF_LIRC_MODE2] = "lirc_mode2",
112 [BPF_FLOW_DISSECTOR] = "flow_dissector",
113 [BPF_TRACE_RAW_TP] = "trace_raw_tp",
114 [BPF_TRACE_FENTRY] = "trace_fentry",
115 [BPF_TRACE_FEXIT] = "trace_fexit",
116 [BPF_MODIFY_RETURN] = "modify_return",
117 [BPF_LSM_MAC] = "lsm_mac",
bffcf348 118 [BPF_LSM_CGROUP] = "lsm_cgroup",
ccde5760
DM
119 [BPF_SK_LOOKUP] = "sk_lookup",
120 [BPF_TRACE_ITER] = "trace_iter",
121 [BPF_XDP_DEVMAP] = "xdp_devmap",
122 [BPF_XDP_CPUMAP] = "xdp_cpumap",
123 [BPF_XDP] = "xdp",
124 [BPF_SK_REUSEPORT_SELECT] = "sk_reuseport_select",
125 [BPF_SK_REUSEPORT_SELECT_OR_MIGRATE] = "sk_reuseport_select_or_migrate",
126 [BPF_PERF_EVENT] = "perf_event",
127 [BPF_TRACE_KPROBE_MULTI] = "trace_kprobe_multi",
8d1608d7 128 [BPF_STRUCT_OPS] = "struct_ops",
132328e8 129 [BPF_NETFILTER] = "netfilter",
fe20ce3a
DB
130 [BPF_TCX_INGRESS] = "tcx_ingress",
131 [BPF_TCX_EGRESS] = "tcx_egress",
8097e460 132 [BPF_TRACE_UPROBE_MULTI] = "trace_uprobe_multi",
05c31b4a
DB
133 [BPF_NETKIT_PRIMARY] = "netkit_primary",
134 [BPF_NETKIT_PEER] = "netkit_peer",
7b949654 135 [BPF_TRACE_KPROBE_SESSION] = "trace_kprobe_session",
ccde5760
DM
136};
137
ba5d1b58
DM
138static const char * const link_type_name[] = {
139 [BPF_LINK_TYPE_UNSPEC] = "unspec",
140 [BPF_LINK_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
141 [BPF_LINK_TYPE_TRACING] = "tracing",
142 [BPF_LINK_TYPE_CGROUP] = "cgroup",
143 [BPF_LINK_TYPE_ITER] = "iter",
144 [BPF_LINK_TYPE_NETNS] = "netns",
145 [BPF_LINK_TYPE_XDP] = "xdp",
146 [BPF_LINK_TYPE_PERF_EVENT] = "perf_event",
147 [BPF_LINK_TYPE_KPROBE_MULTI] = "kprobe_multi",
148 [BPF_LINK_TYPE_STRUCT_OPS] = "struct_ops",
d0fe92fb 149 [BPF_LINK_TYPE_NETFILTER] = "netfilter",
55cc3768 150 [BPF_LINK_TYPE_TCX] = "tcx",
8097e460 151 [BPF_LINK_TYPE_UPROBE_MULTI] = "uprobe_multi",
05c31b4a 152 [BPF_LINK_TYPE_NETKIT] = "netkit",
849989af 153 [BPF_LINK_TYPE_SOCKMAP] = "sockmap",
ba5d1b58
DM
154};
155
3e6dc020
DM
156static const char * const map_type_name[] = {
157 [BPF_MAP_TYPE_UNSPEC] = "unspec",
158 [BPF_MAP_TYPE_HASH] = "hash",
159 [BPF_MAP_TYPE_ARRAY] = "array",
160 [BPF_MAP_TYPE_PROG_ARRAY] = "prog_array",
161 [BPF_MAP_TYPE_PERF_EVENT_ARRAY] = "perf_event_array",
162 [BPF_MAP_TYPE_PERCPU_HASH] = "percpu_hash",
163 [BPF_MAP_TYPE_PERCPU_ARRAY] = "percpu_array",
164 [BPF_MAP_TYPE_STACK_TRACE] = "stack_trace",
165 [BPF_MAP_TYPE_CGROUP_ARRAY] = "cgroup_array",
166 [BPF_MAP_TYPE_LRU_HASH] = "lru_hash",
167 [BPF_MAP_TYPE_LRU_PERCPU_HASH] = "lru_percpu_hash",
168 [BPF_MAP_TYPE_LPM_TRIE] = "lpm_trie",
169 [BPF_MAP_TYPE_ARRAY_OF_MAPS] = "array_of_maps",
170 [BPF_MAP_TYPE_HASH_OF_MAPS] = "hash_of_maps",
171 [BPF_MAP_TYPE_DEVMAP] = "devmap",
172 [BPF_MAP_TYPE_DEVMAP_HASH] = "devmap_hash",
173 [BPF_MAP_TYPE_SOCKMAP] = "sockmap",
174 [BPF_MAP_TYPE_CPUMAP] = "cpumap",
175 [BPF_MAP_TYPE_XSKMAP] = "xskmap",
176 [BPF_MAP_TYPE_SOCKHASH] = "sockhash",
177 [BPF_MAP_TYPE_CGROUP_STORAGE] = "cgroup_storage",
178 [BPF_MAP_TYPE_REUSEPORT_SOCKARRAY] = "reuseport_sockarray",
179 [BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE] = "percpu_cgroup_storage",
180 [BPF_MAP_TYPE_QUEUE] = "queue",
181 [BPF_MAP_TYPE_STACK] = "stack",
182 [BPF_MAP_TYPE_SK_STORAGE] = "sk_storage",
183 [BPF_MAP_TYPE_STRUCT_OPS] = "struct_ops",
184 [BPF_MAP_TYPE_RINGBUF] = "ringbuf",
185 [BPF_MAP_TYPE_INODE_STORAGE] = "inode_storage",
186 [BPF_MAP_TYPE_TASK_STORAGE] = "task_storage",
187 [BPF_MAP_TYPE_BLOOM_FILTER] = "bloom_filter",
583c1f42 188 [BPF_MAP_TYPE_USER_RINGBUF] = "user_ringbuf",
4fe64af2 189 [BPF_MAP_TYPE_CGRP_STORAGE] = "cgrp_storage",
79ff13e9 190 [BPF_MAP_TYPE_ARENA] = "arena",
3e6dc020
DM
191};
192
d18616e7
DM
193static const char * const prog_type_name[] = {
194 [BPF_PROG_TYPE_UNSPEC] = "unspec",
195 [BPF_PROG_TYPE_SOCKET_FILTER] = "socket_filter",
196 [BPF_PROG_TYPE_KPROBE] = "kprobe",
197 [BPF_PROG_TYPE_SCHED_CLS] = "sched_cls",
198 [BPF_PROG_TYPE_SCHED_ACT] = "sched_act",
199 [BPF_PROG_TYPE_TRACEPOINT] = "tracepoint",
200 [BPF_PROG_TYPE_XDP] = "xdp",
201 [BPF_PROG_TYPE_PERF_EVENT] = "perf_event",
202 [BPF_PROG_TYPE_CGROUP_SKB] = "cgroup_skb",
203 [BPF_PROG_TYPE_CGROUP_SOCK] = "cgroup_sock",
204 [BPF_PROG_TYPE_LWT_IN] = "lwt_in",
205 [BPF_PROG_TYPE_LWT_OUT] = "lwt_out",
206 [BPF_PROG_TYPE_LWT_XMIT] = "lwt_xmit",
207 [BPF_PROG_TYPE_SOCK_OPS] = "sock_ops",
208 [BPF_PROG_TYPE_SK_SKB] = "sk_skb",
209 [BPF_PROG_TYPE_CGROUP_DEVICE] = "cgroup_device",
210 [BPF_PROG_TYPE_SK_MSG] = "sk_msg",
211 [BPF_PROG_TYPE_RAW_TRACEPOINT] = "raw_tracepoint",
212 [BPF_PROG_TYPE_CGROUP_SOCK_ADDR] = "cgroup_sock_addr",
213 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = "lwt_seg6local",
214 [BPF_PROG_TYPE_LIRC_MODE2] = "lirc_mode2",
215 [BPF_PROG_TYPE_SK_REUSEPORT] = "sk_reuseport",
216 [BPF_PROG_TYPE_FLOW_DISSECTOR] = "flow_dissector",
217 [BPF_PROG_TYPE_CGROUP_SYSCTL] = "cgroup_sysctl",
218 [BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE] = "raw_tracepoint_writable",
219 [BPF_PROG_TYPE_CGROUP_SOCKOPT] = "cgroup_sockopt",
220 [BPF_PROG_TYPE_TRACING] = "tracing",
221 [BPF_PROG_TYPE_STRUCT_OPS] = "struct_ops",
222 [BPF_PROG_TYPE_EXT] = "ext",
223 [BPF_PROG_TYPE_LSM] = "lsm",
224 [BPF_PROG_TYPE_SK_LOOKUP] = "sk_lookup",
225 [BPF_PROG_TYPE_SYSCALL] = "syscall",
d0fe92fb 226 [BPF_PROG_TYPE_NETFILTER] = "netfilter",
d18616e7
DM
227};
228
a8a1f7d0
SF
229static int __base_pr(enum libbpf_print_level level, const char *format,
230 va_list args)
b3f59d66 231{
6f1ae8b6
YS
232 if (level == LIBBPF_DEBUG)
233 return 0;
234
a8a1f7d0 235 return vfprintf(stderr, format, args);
b3f59d66
WN
236}
237
a8a1f7d0 238static libbpf_print_fn_t __libbpf_pr = __base_pr;
b3f59d66 239
e87fd8ba 240libbpf_print_fn_t libbpf_set_print(libbpf_print_fn_t fn)
b3f59d66 241{
f1cb927c
JK
242 libbpf_print_fn_t old_print_fn;
243
244 old_print_fn = __atomic_exchange_n(&__libbpf_pr, fn, __ATOMIC_RELAXED);
e87fd8ba 245
e87fd8ba 246 return old_print_fn;
b3f59d66 247}
1a5e3fb1 248
8461ef8b
YS
249__printf(2, 3)
250void libbpf_print(enum libbpf_print_level level, const char *format, ...)
251{
252 va_list args;
d7c5802f 253 int old_errno;
f1cb927c 254 libbpf_print_fn_t print_fn;
8461ef8b 255
f1cb927c
JK
256 print_fn = __atomic_load_n(&__libbpf_pr, __ATOMIC_RELAXED);
257 if (!print_fn)
6f1ae8b6
YS
258 return;
259
d7c5802f
AN
260 old_errno = errno;
261
8461ef8b 262 va_start(args, format);
6f1ae8b6 263 __libbpf_pr(level, format, args);
8461ef8b 264 va_end(args);
d7c5802f
AN
265
266 errno = old_errno;
8461ef8b
YS
267}
268
dc3a2d25
THJ
269static void pr_perm_msg(int err)
270{
271 struct rlimit limit;
272 char buf[100];
273
274 if (err != -EPERM || geteuid() != 0)
275 return;
276
277 err = getrlimit(RLIMIT_MEMLOCK, &limit);
278 if (err)
279 return;
280
281 if (limit.rlim_cur == RLIM_INFINITY)
282 return;
283
284 if (limit.rlim_cur < 1024)
b5c7d0d0 285 snprintf(buf, sizeof(buf), "%zu bytes", (size_t)limit.rlim_cur);
dc3a2d25
THJ
286 else if (limit.rlim_cur < 1024*1024)
287 snprintf(buf, sizeof(buf), "%.1f KiB", (double)limit.rlim_cur / 1024);
288 else
289 snprintf(buf, sizeof(buf), "%.1f MiB", (double)limit.rlim_cur / (1024*1024));
290
291 pr_warn("permission error while running as root; try raising 'ulimit -l'? current value: %s\n",
292 buf);
293}
294
6371ca3b
WN
295#define STRERR_BUFSIZE 128
296
1a5e3fb1
WN
297/* Copied from tools/perf/util/util.h */
298#ifndef zfree
299# define zfree(ptr) ({ free(*ptr); *ptr = NULL; })
300#endif
301
302#ifndef zclose
303# define zclose(fd) ({ \
304 int ___err = 0; \
305 if ((fd) >= 0) \
306 ___err = close((fd)); \
307 fd = -1; \
308 ___err; })
309#endif
310
34be1646
SL
311static inline __u64 ptr_to_u64(const void *ptr)
312{
313 return (__u64) (unsigned long) ptr;
314}
315
5981881d
AN
316int libbpf_set_strict_mode(enum libbpf_strict_mode mode)
317{
bd054102 318 /* as of v1.0 libbpf_set_strict_mode() is a no-op */
5981881d
AN
319 return 0;
320}
321
7615209f
AN
322__u32 libbpf_major_version(void)
323{
324 return LIBBPF_MAJOR_VERSION;
325}
326
327__u32 libbpf_minor_version(void)
328{
329 return LIBBPF_MINOR_VERSION;
330}
331
332const char *libbpf_version_string(void)
333{
334#define __S(X) #X
335#define _S(X) __S(X)
336 return "v" _S(LIBBPF_MAJOR_VERSION) "." _S(LIBBPF_MINOR_VERSION);
337#undef _S
338#undef __S
339}
340
166750bc
AN
341enum reloc_type {
342 RELO_LD64,
343 RELO_CALL,
344 RELO_DATA,
a18f7214
AS
345 RELO_EXTERN_LD64,
346 RELO_EXTERN_CALL,
53eddb5e 347 RELO_SUBPROG_ADDR,
d0e92887 348 RELO_CORE,
166750bc
AN
349};
350
351struct reloc_desc {
352 enum reloc_type type;
353 int insn_idx;
d0e92887
AS
354 union {
355 const struct bpf_core_relo *core_relo; /* used when type == RELO_CORE */
356 struct {
357 int map_idx;
358 int sym_off;
3055ddd6 359 int ext_idx;
d0e92887
AS
360 };
361 };
166750bc
AN
362};
363
15ea31fa
AN
364/* stored as sec_def->cookie for all libbpf-supported SEC()s */
365enum sec_def_flags {
366 SEC_NONE = 0,
367 /* expected_attach_type is optional, if kernel doesn't support that */
368 SEC_EXP_ATTACH_OPT = 1,
369 /* legacy, only used by libbpf_get_type_names() and
370 * libbpf_attach_type_by_name(), not used by libbpf itself at all.
371 * This used to be associated with cgroup (and few other) BPF programs
372 * that were attachable through BPF_PROG_ATTACH command. Pretty
373 * meaningless nowadays, though.
374 */
375 SEC_ATTACHABLE = 2,
376 SEC_ATTACHABLE_OPT = SEC_ATTACHABLE | SEC_EXP_ATTACH_OPT,
377 /* attachment target is specified through BTF ID in either kernel or
e3ba8e4e
KM
378 * other BPF program's BTF object
379 */
15ea31fa
AN
380 SEC_ATTACH_BTF = 4,
381 /* BPF program type allows sleeping/blocking in kernel */
382 SEC_SLEEPABLE = 8,
082c4bfb 383 /* BPF program support non-linear XDP buffer */
450b167f 384 SEC_XDP_FRAGS = 16,
5902da6d
JO
385 /* Setup proper attach type for usdt probes. */
386 SEC_USDT = 32,
15ea31fa
AN
387};
388
25498a19 389struct bpf_sec_def {
697f104d 390 char *sec;
25498a19
AN
391 enum bpf_prog_type prog_type;
392 enum bpf_attach_type expected_attach_type;
15ea31fa 393 long cookie;
697f104d 394 int handler_id;
12d9466d 395
4fa5bcfe
AN
396 libbpf_prog_setup_fn_t prog_setup_fn;
397 libbpf_prog_prepare_load_fn_t prog_prepare_load_fn;
398 libbpf_prog_attach_fn_t prog_attach_fn;
25498a19
AN
399};
400
a5b8bd47
WN
401/*
402 * bpf_prog should be a better name but it has been used in
403 * linux/filter.h.
404 */
405struct bpf_program {
bd054102 406 char *name;
52109584 407 char *sec_name;
c1122392 408 size_t sec_idx;
bd054102 409 const struct bpf_sec_def *sec_def;
c1122392
AN
410 /* this program's instruction offset (in number of instructions)
411 * within its containing ELF section
412 */
413 size_t sec_insn_off;
414 /* number of original instructions in ELF section belonging to this
415 * program, not taking into account subprogram instructions possible
416 * appended later during relocation
417 */
418 size_t sec_insn_cnt;
419 /* Offset (in number of instructions) of the start of instruction
420 * belonging to this BPF program within its containing main BPF
421 * program. For the entry-point (main) BPF program, this is always
422 * zero. For a sub-program, this gets reset before each of main BPF
423 * programs are processed and relocated and is used to determined
424 * whether sub-program was already appended to the main program, and
425 * if yes, at which instruction offset.
426 */
427 size_t sub_insn_off;
428
c1122392
AN
429 /* instructions that belong to BPF program; insns[0] is located at
430 * sec_insn_off instruction within its ELF section in ELF file, so
431 * when mapping ELF file instruction index to the local instruction,
432 * one needs to subtract sec_insn_off; and vice versa.
433 */
a5b8bd47 434 struct bpf_insn *insns;
c1122392
AN
435 /* actual number of instruction in this BPF program's image; for
436 * entry-point BPF programs this includes the size of main program
437 * itself plus all the used sub-programs, appended at the end
438 */
c3c55696 439 size_t insns_cnt;
34090915 440
166750bc 441 struct reloc_desc *reloc_desc;
34090915 442 int nr_reloc;
b3ce9079
AN
443
444 /* BPF verifier log settings */
445 char *log_buf;
446 size_t log_size;
447 __u32 log_level;
55cffde2 448
aa9b1ac3 449 struct bpf_object *obj;
d7be143b 450
cf90a20d 451 int fd;
a3820c48 452 bool autoload;
43cb8cba 453 bool autoattach;
7e2925f6 454 bool sym_global;
aea28a60 455 bool mark_btf_static;
c1122392 456 enum bpf_prog_type type;
d7be143b 457 enum bpf_attach_type expected_attach_type;
7e2925f6 458 int exception_cb_idx;
cf90a20d 459
c1122392 460 int prog_ifindex;
91abb4a6 461 __u32 attach_btf_obj_fd;
12a8654b 462 __u32 attach_btf_id;
e7bf94db 463 __u32 attach_prog_fd;
cf90a20d 464
2993e051
YS
465 void *func_info;
466 __u32 func_info_rec_size;
f0187f0b 467 __u32 func_info_cnt;
47eff617 468
3d650141
MKL
469 void *line_info;
470 __u32 line_info_rec_size;
471 __u32 line_info_cnt;
04656198 472 __u32 prog_flags;
a5b8bd47
WN
473};
474
590a0088
MKL
475struct bpf_struct_ops {
476 const char *tname;
477 const struct btf_type *type;
478 struct bpf_program **progs;
479 __u32 *kern_func_off;
480 /* e.g. struct tcp_congestion_ops in bpf_prog's btf format */
481 void *data;
482 /* e.g. struct bpf_struct_ops_tcp_congestion_ops in
483 * btf_vmlinux's format.
484 * struct bpf_struct_ops_tcp_congestion_ops {
485 * [... some other kernel fields ...]
486 * struct tcp_congestion_ops data;
487 * }
488 * kern_vdata-size == sizeof(struct bpf_struct_ops_tcp_congestion_ops)
489 * bpf_map__init_kern_struct_ops() will populate the "kern_vdata"
490 * from "data".
491 */
492 void *kern_vdata;
493 __u32 type_id;
494};
495
ac9d1389
AN
496#define DATA_SEC ".data"
497#define BSS_SEC ".bss"
498#define RODATA_SEC ".rodata"
81bfdd08 499#define KCONFIG_SEC ".kconfig"
1c0c7074 500#define KSYMS_SEC ".ksyms"
590a0088 501#define STRUCT_OPS_SEC ".struct_ops"
809a69d6 502#define STRUCT_OPS_LINK_SEC ".struct_ops.link"
10ebe835 503#define ARENA_SEC ".addr_space.1"
ac9d1389 504
d859900c
DB
505enum libbpf_map_type {
506 LIBBPF_MAP_UNSPEC,
507 LIBBPF_MAP_DATA,
508 LIBBPF_MAP_BSS,
509 LIBBPF_MAP_RODATA,
81bfdd08 510 LIBBPF_MAP_KCONFIG,
d859900c
DB
511};
512
146bf811
AN
513struct bpf_map_def {
514 unsigned int type;
515 unsigned int key_size;
516 unsigned int value_size;
517 unsigned int max_entries;
518 unsigned int map_flags;
519};
520
9d759a9b 521struct bpf_map {
ec41817b 522 struct bpf_object *obj;
561bbcca 523 char *name;
aed65917
AN
524 /* real_name is defined for special internal maps (.rodata*,
525 * .data*, .bss, .kconfig) and preserves their original ELF section
e3ba8e4e 526 * name. This is important to be able to find corresponding BTF
aed65917
AN
527 * DATASEC information.
528 */
529 char *real_name;
01af3bf0 530 int fd;
db48814b
AN
531 int sec_idx;
532 size_t sec_offset;
f0307a7e 533 int map_ifindex;
addb9fc9 534 int inner_map_fd;
9d759a9b 535 struct bpf_map_def def;
1bdb6c9a 536 __u32 numa_node;
646f02ff 537 __u32 btf_var_idx;
9e926acd 538 int mod_btf_fd;
5b891af7
MKL
539 __u32 btf_key_type_id;
540 __u32 btf_value_type_id;
590a0088 541 __u32 btf_vmlinux_value_type_id;
d859900c 542 enum libbpf_map_type libbpf_type;
eba9c5f4 543 void *mmaped;
590a0088 544 struct bpf_struct_ops *st_ops;
646f02ff
AN
545 struct bpf_map *inner_map;
546 void **init_slots;
547 int init_slots_sz;
4580b25f
THJ
548 char *pin_path;
549 bool pinned;
ec6d5f47 550 bool reused;
ec41817b 551 bool autocreate;
47512102 552 __u64 map_extra;
d859900c
DB
553};
554
166750bc
AN
555enum extern_type {
556 EXT_UNKNOWN,
2e33efe3 557 EXT_KCFG,
1c0c7074 558 EXT_KSYM,
2e33efe3
AN
559};
560
561enum kcfg_type {
562 KCFG_UNKNOWN,
563 KCFG_CHAR,
564 KCFG_BOOL,
565 KCFG_INT,
566 KCFG_TRISTATE,
567 KCFG_CHAR_ARR,
166750bc
AN
568};
569
570struct extern_desc {
2e33efe3 571 enum extern_type type;
166750bc
AN
572 int sym_idx;
573 int btf_id;
2e33efe3
AN
574 int sec_btf_id;
575 const char *name;
5964a223 576 char *essent_name;
166750bc 577 bool is_set;
2e33efe3
AN
578 bool is_weak;
579 union {
580 struct {
581 enum kcfg_type type;
582 int sz;
583 int align;
584 int data_off;
585 bool is_signed;
586 } kcfg;
1c0c7074
AN
587 struct {
588 unsigned long long addr;
d370bbe1
HL
589
590 /* target btf_id of the corresponding kernel var. */
284d2587
AN
591 int kernel_btf_obj_fd;
592 int kernel_btf_id;
d370bbe1
HL
593
594 /* local btf_id of the ksym extern's type. */
595 __u32 type_id;
9dbe6015
KKD
596 /* BTF fd index to be patched in for insn->off, this is
597 * 0 for vmlinux BTF, index in obj->fd_array for module
598 * BTF
599 */
600 __s16 btf_fd_idx;
1c0c7074 601 } ksym;
2e33efe3 602 };
166750bc
AN
603};
604
4f33a53d
AN
605struct module_btf {
606 struct btf *btf;
607 char *name;
608 __u32 id;
91abb4a6 609 int fd;
9dbe6015 610 int fd_array_idx;
4f33a53d
AN
611};
612
25bbbd7a
AN
613enum sec_type {
614 SEC_UNUSED = 0,
615 SEC_RELO,
616 SEC_BSS,
617 SEC_DATA,
618 SEC_RODATA,
240bf8a5 619 SEC_ST_OPS,
25bbbd7a
AN
620};
621
622struct elf_sec_desc {
623 enum sec_type sec_type;
624 Elf64_Shdr *shdr;
625 Elf_Data *data;
626};
627
29a30ff5
AN
628struct elf_state {
629 int fd;
630 const void *obj_buf;
631 size_t obj_buf_sz;
632 Elf *elf;
ad23b723 633 Elf64_Ehdr *ehdr;
29a30ff5 634 Elf_Data *symbols;
2e7ba4f8 635 Elf_Data *arena_data;
29a30ff5
AN
636 size_t shstrndx; /* section index for section name strings */
637 size_t strtabidx;
25bbbd7a 638 struct elf_sec_desc *secs;
51deedc9 639 size_t sec_cnt;
29a30ff5
AN
640 int btf_maps_shndx;
641 __u32 btf_maps_sec_btf_id;
642 int text_shndx;
643 int symbols_shndx;
240bf8a5 644 bool has_st_ops;
2e7ba4f8 645 int arena_data_shndx;
4f33a53d
AN
646};
647
2e4913e0
AN
648struct usdt_manager;
649
1a5e3fb1 650struct bpf_object {
d859900c 651 char name[BPF_OBJ_NAME_LEN];
cb1e5e96 652 char license[64];
438363c0 653 __u32 kern_version;
0b3d1efa 654
a5b8bd47
WN
655 struct bpf_program *programs;
656 size_t nr_programs;
9d759a9b
WN
657 struct bpf_map *maps;
658 size_t nr_maps;
bf829271 659 size_t maps_cap;
9d759a9b 660
8601fd42 661 char *kconfig;
166750bc
AN
662 struct extern_desc *externs;
663 int nr_extern;
81bfdd08 664 int kconfig_map_idx;
166750bc 665
52d3352e 666 bool loaded;
c3c55696 667 bool has_subcalls;
25bbbd7a 668 bool has_rodata;
a5b8bd47 669
e2fa0156
AS
670 struct bpf_gen *gen_loader;
671
29a30ff5
AN
672 /* Information when doing ELF related work. Only valid if efile.elf is not NULL */
673 struct elf_state efile;
10931d24 674
8a138aed 675 struct btf *btf;
0f7515ca
AN
676 struct btf_ext *btf_ext;
677
a6ed02ca
KS
678 /* Parse and load BTF vmlinux if any of the programs in the object need
679 * it at load time.
680 */
681 struct btf *btf_vmlinux;
1373ff59
SC
682 /* Path to the custom BTF to be used for BPF CO-RE relocations as an
683 * override for vmlinux BTF.
684 */
685 char *btf_custom_path;
0f7515ca
AN
686 /* vmlinux BTF override for CO-RE relocations */
687 struct btf *btf_vmlinux_override;
4f33a53d
AN
688 /* Lazily initialized kernel module BTFs */
689 struct module_btf *btf_modules;
690 bool btf_modules_loaded;
691 size_t btf_module_cnt;
692 size_t btf_module_cap;
8a138aed 693
e0e3ea88
AN
694 /* optional log settings passed to BPF_BTF_LOAD and BPF_PROG_LOAD commands */
695 char *log_buf;
696 size_t log_size;
697 __u32 log_level;
698
9dbe6015
KKD
699 int *fd_array;
700 size_t fd_array_cap;
701 size_t fd_array_cnt;
702
2e4913e0
AN
703 struct usdt_manager *usdt_man;
704
2e7ba4f8
AN
705 struct bpf_map *arena_map;
706 void *arena_data;
707 size_t arena_data_sz;
708
6b434b61
AN
709 struct kern_feature_cache *feat_cache;
710 char *token_path;
711 int token_fd;
712
1a5e3fb1
WN
713 char path[];
714};
1a5e3fb1 715
88a82120
AN
716static const char *elf_sym_str(const struct bpf_object *obj, size_t off);
717static const char *elf_sec_str(const struct bpf_object *obj, size_t off);
718static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx);
719static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name);
ad23b723 720static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn);
88a82120
AN
721static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn);
722static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn);
ad23b723
AN
723static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx);
724static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx);
88a82120 725
29cd77f4 726void bpf_program__unload(struct bpf_program *prog)
55cffde2
WN
727{
728 if (!prog)
729 return;
730
cf90a20d 731 zclose(prog->fd);
2993e051 732
2993e051 733 zfree(&prog->func_info);
07a09d1b 734 zfree(&prog->line_info);
55cffde2
WN
735}
736
a5b8bd47
WN
737static void bpf_program__exit(struct bpf_program *prog)
738{
739 if (!prog)
740 return;
741
55cffde2 742 bpf_program__unload(prog);
88cda1c9 743 zfree(&prog->name);
52109584 744 zfree(&prog->sec_name);
a5b8bd47 745 zfree(&prog->insns);
34090915
WN
746 zfree(&prog->reloc_desc);
747
748 prog->nr_reloc = 0;
a5b8bd47 749 prog->insns_cnt = 0;
c1122392 750 prog->sec_idx = -1;
a5b8bd47
WN
751}
752
c3c55696
AN
753static bool insn_is_subprog_call(const struct bpf_insn *insn)
754{
755 return BPF_CLASS(insn->code) == BPF_JMP &&
756 BPF_OP(insn->code) == BPF_CALL &&
757 BPF_SRC(insn->code) == BPF_K &&
758 insn->src_reg == BPF_PSEUDO_CALL &&
759 insn->dst_reg == 0 &&
760 insn->off == 0;
761}
762
aa0b8d43
MKL
763static bool is_call_insn(const struct bpf_insn *insn)
764{
765 return insn->code == (BPF_JMP | BPF_CALL);
766}
767
53eddb5e
YS
768static bool insn_is_pseudo_func(struct bpf_insn *insn)
769{
aa0b8d43 770 return is_ldimm64_insn(insn) && insn->src_reg == BPF_PSEUDO_FUNC;
53eddb5e
YS
771}
772
a5b8bd47 773static int
c3c55696
AN
774bpf_object__init_prog(struct bpf_object *obj, struct bpf_program *prog,
775 const char *name, size_t sec_idx, const char *sec_name,
776 size_t sec_off, void *insn_data, size_t insn_data_sz)
a5b8bd47 777{
c1122392
AN
778 if (insn_data_sz == 0 || insn_data_sz % BPF_INSN_SZ || sec_off % BPF_INSN_SZ) {
779 pr_warn("sec '%s': corrupted program '%s', offset %zu, size %zu\n",
780 sec_name, name, sec_off, insn_data_sz);
a5b8bd47
WN
781 return -EINVAL;
782 }
783
1ad9cbb8 784 memset(prog, 0, sizeof(*prog));
c3c55696
AN
785 prog->obj = obj;
786
c1122392
AN
787 prog->sec_idx = sec_idx;
788 prog->sec_insn_off = sec_off / BPF_INSN_SZ;
789 prog->sec_insn_cnt = insn_data_sz / BPF_INSN_SZ;
790 /* insns_cnt can later be increased by appending used subprograms */
791 prog->insns_cnt = prog->sec_insn_cnt;
a5b8bd47 792
c1122392 793 prog->type = BPF_PROG_TYPE_UNSPEC;
cf90a20d 794 prog->fd = -1;
7e2925f6 795 prog->exception_cb_idx = -1;
a3820c48
AN
796
797 /* libbpf's convention for SEC("?abc...") is that it's just like
798 * SEC("abc...") but the corresponding bpf_program starts out with
799 * autoload set to false.
800 */
801 if (sec_name[0] == '?') {
802 prog->autoload = false;
803 /* from now on forget there was ? in section name */
804 sec_name++;
805 } else {
806 prog->autoload = true;
807 }
a5b8bd47 808
43cb8cba
HL
809 prog->autoattach = true;
810
b3ce9079
AN
811 /* inherit object's log_level */
812 prog->log_level = obj->log_level;
813
52109584
AN
814 prog->sec_name = strdup(sec_name);
815 if (!prog->sec_name)
c1122392
AN
816 goto errout;
817
818 prog->name = strdup(name);
819 if (!prog->name)
a5b8bd47 820 goto errout;
a5b8bd47 821
c1122392
AN
822 prog->insns = malloc(insn_data_sz);
823 if (!prog->insns)
a5b8bd47 824 goto errout;
c1122392 825 memcpy(prog->insns, insn_data, insn_data_sz);
a5b8bd47
WN
826
827 return 0;
828errout:
c1122392 829 pr_warn("sec '%s': failed to allocate memory for prog '%s'\n", sec_name, name);
a5b8bd47
WN
830 bpf_program__exit(prog);
831 return -ENOMEM;
832}
833
834static int
c1122392
AN
835bpf_object__add_programs(struct bpf_object *obj, Elf_Data *sec_data,
836 const char *sec_name, int sec_idx)
a5b8bd47 837{
6245947c 838 Elf_Data *symbols = obj->efile.symbols;
c1122392
AN
839 struct bpf_program *prog, *progs;
840 void *data = sec_data->d_buf;
6245947c
AN
841 size_t sec_sz = sec_data->d_size, sec_off, prog_sz, nr_syms;
842 int nr_progs, err, i;
c1122392 843 const char *name;
ad23b723 844 Elf64_Sym *sym;
a5b8bd47
WN
845
846 progs = obj->programs;
847 nr_progs = obj->nr_programs;
ad23b723 848 nr_syms = symbols->d_size / sizeof(Elf64_Sym);
a5b8bd47 849
6245947c 850 for (i = 0; i < nr_syms; i++) {
ad23b723
AN
851 sym = elf_sym_by_idx(obj, i);
852
853 if (sym->st_shndx != sec_idx)
6245947c 854 continue;
ad23b723 855 if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
6245947c 856 continue;
88cda1c9 857
ad23b723
AN
858 prog_sz = sym->st_size;
859 sec_off = sym->st_value;
88cda1c9 860
ad23b723 861 name = elf_sym_str(obj, sym->st_name);
c1122392
AN
862 if (!name) {
863 pr_warn("sec '%s': failed to get symbol name for offset %zu\n",
864 sec_name, sec_off);
865 return -LIBBPF_ERRNO__FORMAT;
866 }
88cda1c9 867
c1122392
AN
868 if (sec_off + prog_sz > sec_sz) {
869 pr_warn("sec '%s': program at offset %zu crosses section boundary\n",
870 sec_name, sec_off);
871 return -LIBBPF_ERRNO__FORMAT;
872 }
88cda1c9 873
ad23b723 874 if (sec_idx != obj->efile.text_shndx && ELF64_ST_BIND(sym->st_info) == STB_LOCAL) {
513f485c
AN
875 pr_warn("sec '%s': program '%s' is static and not supported\n", sec_name, name);
876 return -ENOTSUP;
877 }
878
c3c55696
AN
879 pr_debug("sec '%s': found program '%s' at insn offset %zu (%zu bytes), code size %zu insns (%zu bytes)\n",
880 sec_name, name, sec_off / BPF_INSN_SZ, sec_off, prog_sz / BPF_INSN_SZ, prog_sz);
88cda1c9 881
c3c55696 882 progs = libbpf_reallocarray(progs, nr_progs + 1, sizeof(*progs));
c1122392
AN
883 if (!progs) {
884 /*
885 * In this case the original obj->programs
886 * is still valid, so don't need special treat for
887 * bpf_close_object().
888 */
889 pr_warn("sec '%s': failed to alloc memory for new program '%s'\n",
890 sec_name, name);
891 return -ENOMEM;
88cda1c9 892 }
c1122392 893 obj->programs = progs;
88cda1c9 894
c1122392 895 prog = &progs[nr_progs];
9a94f277 896
c3c55696
AN
897 err = bpf_object__init_prog(obj, prog, name, sec_idx, sec_name,
898 sec_off, data + sec_off, prog_sz);
c1122392
AN
899 if (err)
900 return err;
9a94f277 901
7e2925f6
KKD
902 if (ELF64_ST_BIND(sym->st_info) != STB_LOCAL)
903 prog->sym_global = true;
904
e5670fa0
AN
905 /* if function is a global/weak symbol, but has restricted
906 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF FUNC
907 * as static to enable more permissive BPF verification mode
908 * with more outside context available to BPF verifier
aea28a60 909 */
7e2925f6
KKD
910 if (prog->sym_global && (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
911 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL))
aea28a60
AN
912 prog->mark_btf_static = true;
913
c1122392
AN
914 nr_progs++;
915 obj->nr_programs = nr_progs;
88cda1c9
MKL
916 }
917
918 return 0;
919}
920
590a0088
MKL
921static const struct btf_member *
922find_member_by_offset(const struct btf_type *t, __u32 bit_offset)
923{
924 struct btf_member *m;
925 int i;
926
927 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
928 if (btf_member_bit_offset(t, i) == bit_offset)
929 return m;
930 }
931
932 return NULL;
933}
934
935static const struct btf_member *
936find_member_by_name(const struct btf *btf, const struct btf_type *t,
937 const char *name)
938{
939 struct btf_member *m;
940 int i;
941
942 for (i = 0, m = btf_members(t); i < btf_vlen(t); i++, m++) {
943 if (!strcmp(btf__name_by_offset(btf, m->name_off), name))
944 return m;
945 }
946
947 return NULL;
948}
949
9e926acd
KFL
950static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
951 __u16 kind, struct btf **res_btf,
952 struct module_btf **res_mod_btf);
953
590a0088 954#define STRUCT_OPS_VALUE_PREFIX "bpf_struct_ops_"
a6ed02ca
KS
955static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
956 const char *name, __u32 kind);
590a0088
MKL
957
958static int
a2a5172c 959find_struct_ops_kern_types(struct bpf_object *obj, const char *tname_raw,
9e926acd 960 struct module_btf **mod_btf,
590a0088
MKL
961 const struct btf_type **type, __u32 *type_id,
962 const struct btf_type **vtype, __u32 *vtype_id,
963 const struct btf_member **data_member)
964{
965 const struct btf_type *kern_type, *kern_vtype;
966 const struct btf_member *kern_data_member;
9e926acd 967 struct btf *btf;
590a0088 968 __s32 kern_vtype_id, kern_type_id;
a2a5172c 969 char tname[256];
590a0088
MKL
970 __u32 i;
971
a2a5172c
EZ
972 snprintf(tname, sizeof(tname), "%.*s",
973 (int)bpf_core_essential_name_len(tname_raw), tname_raw);
974
9e926acd
KFL
975 kern_type_id = find_ksym_btf_id(obj, tname, BTF_KIND_STRUCT,
976 &btf, mod_btf);
590a0088
MKL
977 if (kern_type_id < 0) {
978 pr_warn("struct_ops init_kern: struct %s is not found in kernel BTF\n",
979 tname);
980 return kern_type_id;
981 }
982 kern_type = btf__type_by_id(btf, kern_type_id);
983
984 /* Find the corresponding "map_value" type that will be used
985 * in map_update(BPF_MAP_TYPE_STRUCT_OPS). For example,
986 * find "struct bpf_struct_ops_tcp_congestion_ops" from the
987 * btf_vmlinux.
988 */
a6ed02ca
KS
989 kern_vtype_id = find_btf_by_prefix_kind(btf, STRUCT_OPS_VALUE_PREFIX,
990 tname, BTF_KIND_STRUCT);
590a0088 991 if (kern_vtype_id < 0) {
a6ed02ca
KS
992 pr_warn("struct_ops init_kern: struct %s%s is not found in kernel BTF\n",
993 STRUCT_OPS_VALUE_PREFIX, tname);
590a0088
MKL
994 return kern_vtype_id;
995 }
996 kern_vtype = btf__type_by_id(btf, kern_vtype_id);
997
998 /* Find "struct tcp_congestion_ops" from
999 * struct bpf_struct_ops_tcp_congestion_ops {
1000 * [ ... ]
1001 * struct tcp_congestion_ops data;
1002 * }
1003 */
1004 kern_data_member = btf_members(kern_vtype);
1005 for (i = 0; i < btf_vlen(kern_vtype); i++, kern_data_member++) {
1006 if (kern_data_member->type == kern_type_id)
1007 break;
1008 }
1009 if (i == btf_vlen(kern_vtype)) {
a6ed02ca
KS
1010 pr_warn("struct_ops init_kern: struct %s data is not found in struct %s%s\n",
1011 tname, STRUCT_OPS_VALUE_PREFIX, tname);
590a0088
MKL
1012 return -EINVAL;
1013 }
1014
1015 *type = kern_type;
1016 *type_id = kern_type_id;
1017 *vtype = kern_vtype;
1018 *vtype_id = kern_vtype_id;
1019 *data_member = kern_data_member;
1020
1021 return 0;
1022}
1023
1024static bool bpf_map__is_struct_ops(const struct bpf_map *map)
1025{
1026 return map->def.type == BPF_MAP_TYPE_STRUCT_OPS;
1027}
1028
69e4a9d2
KFL
1029static bool is_valid_st_ops_program(struct bpf_object *obj,
1030 const struct bpf_program *prog)
1031{
1032 int i;
1033
1034 for (i = 0; i < obj->nr_programs; i++) {
1035 if (&obj->programs[i] == prog)
1036 return prog->type == BPF_PROG_TYPE_STRUCT_OPS;
1037 }
1038
1039 return false;
1040}
1041
fe9d049c
EZ
1042/* For each struct_ops program P, referenced from some struct_ops map M,
1043 * enable P.autoload if there are Ms for which M.autocreate is true,
1044 * disable P.autoload if for all Ms M.autocreate is false.
1045 * Don't change P.autoload for programs that are not referenced from any maps.
1046 */
1047static int bpf_object_adjust_struct_ops_autoload(struct bpf_object *obj)
1048{
1049 struct bpf_program *prog, *slot_prog;
1050 struct bpf_map *map;
1051 int i, j, k, vlen;
1052
1053 for (i = 0; i < obj->nr_programs; ++i) {
1054 int should_load = false;
1055 int use_cnt = 0;
1056
1057 prog = &obj->programs[i];
1058 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS)
1059 continue;
1060
1061 for (j = 0; j < obj->nr_maps; ++j) {
1062 map = &obj->maps[j];
1063 if (!bpf_map__is_struct_ops(map))
1064 continue;
1065
1066 vlen = btf_vlen(map->st_ops->type);
1067 for (k = 0; k < vlen; ++k) {
1068 slot_prog = map->st_ops->progs[k];
1069 if (prog != slot_prog)
1070 continue;
1071
1072 use_cnt++;
1073 if (map->autocreate)
1074 should_load = true;
1075 }
1076 }
1077 if (use_cnt)
1078 prog->autoload = should_load;
1079 }
1080
1081 return 0;
1082}
1083
590a0088 1084/* Init the map's fields that depend on kern_btf */
9e926acd 1085static int bpf_map__init_kern_struct_ops(struct bpf_map *map)
590a0088
MKL
1086{
1087 const struct btf_member *member, *kern_member, *kern_data_member;
1088 const struct btf_type *type, *kern_type, *kern_vtype;
1089 __u32 i, kern_type_id, kern_vtype_id, kern_data_off;
9e926acd
KFL
1090 struct bpf_object *obj = map->obj;
1091 const struct btf *btf = obj->btf;
590a0088 1092 struct bpf_struct_ops *st_ops;
9e926acd
KFL
1093 const struct btf *kern_btf;
1094 struct module_btf *mod_btf;
590a0088
MKL
1095 void *data, *kern_data;
1096 const char *tname;
1097 int err;
1098
1099 st_ops = map->st_ops;
1100 type = st_ops->type;
1101 tname = st_ops->tname;
9e926acd 1102 err = find_struct_ops_kern_types(obj, tname, &mod_btf,
590a0088
MKL
1103 &kern_type, &kern_type_id,
1104 &kern_vtype, &kern_vtype_id,
1105 &kern_data_member);
1106 if (err)
1107 return err;
1108
9e926acd
KFL
1109 kern_btf = mod_btf ? mod_btf->btf : obj->btf_vmlinux;
1110
590a0088
MKL
1111 pr_debug("struct_ops init_kern %s: type_id:%u kern_type_id:%u kern_vtype_id:%u\n",
1112 map->name, st_ops->type_id, kern_type_id, kern_vtype_id);
1113
9e926acd 1114 map->mod_btf_fd = mod_btf ? mod_btf->fd : -1;
590a0088
MKL
1115 map->def.value_size = kern_vtype->size;
1116 map->btf_vmlinux_value_type_id = kern_vtype_id;
1117
1118 st_ops->kern_vdata = calloc(1, kern_vtype->size);
1119 if (!st_ops->kern_vdata)
1120 return -ENOMEM;
1121
1122 data = st_ops->data;
1123 kern_data_off = kern_data_member->offset / 8;
1124 kern_data = st_ops->kern_vdata + kern_data_off;
1125
1126 member = btf_members(type);
1127 for (i = 0; i < btf_vlen(type); i++, member++) {
1128 const struct btf_type *mtype, *kern_mtype;
1129 __u32 mtype_id, kern_mtype_id;
1130 void *mdata, *kern_mdata;
1131 __s64 msize, kern_msize;
1132 __u32 moff, kern_moff;
1133 __u32 kern_member_idx;
1134 const char *mname;
1135
1136 mname = btf__name_by_offset(btf, member->name_off);
c911fc61
KFL
1137 moff = member->offset / 8;
1138 mdata = data + moff;
1139 msize = btf__resolve_size(btf, member->type);
1140 if (msize < 0) {
1141 pr_warn("struct_ops init_kern %s: failed to resolve the size of member %s\n",
1142 map->name, mname);
1143 return msize;
1144 }
1145
590a0088
MKL
1146 kern_member = find_member_by_name(kern_btf, kern_type, mname);
1147 if (!kern_member) {
c911fc61
KFL
1148 /* Skip all zeros or null fields if they are not
1149 * presented in the kernel BTF.
1150 */
1151 if (libbpf_is_mem_zeroed(mdata, msize)) {
f973fccd 1152 st_ops->progs[i] = NULL;
c911fc61
KFL
1153 pr_info("struct_ops %s: member %s not found in kernel, skipping it as it's set to zero\n",
1154 map->name, mname);
1155 continue;
1156 }
1157
590a0088
MKL
1158 pr_warn("struct_ops init_kern %s: Cannot find member %s in kernel BTF\n",
1159 map->name, mname);
1160 return -ENOTSUP;
1161 }
1162
1163 kern_member_idx = kern_member - btf_members(kern_type);
1164 if (btf_member_bitfield_size(type, i) ||
1165 btf_member_bitfield_size(kern_type, kern_member_idx)) {
1166 pr_warn("struct_ops init_kern %s: bitfield %s is not supported\n",
1167 map->name, mname);
1168 return -ENOTSUP;
1169 }
1170
590a0088 1171 kern_moff = kern_member->offset / 8;
590a0088
MKL
1172 kern_mdata = kern_data + kern_moff;
1173
1174 mtype = skip_mods_and_typedefs(btf, member->type, &mtype_id);
1175 kern_mtype = skip_mods_and_typedefs(kern_btf, kern_member->type,
1176 &kern_mtype_id);
1177 if (BTF_INFO_KIND(mtype->info) !=
1178 BTF_INFO_KIND(kern_mtype->info)) {
1179 pr_warn("struct_ops init_kern %s: Unmatched member type %s %u != %u(kernel)\n",
1180 map->name, mname, BTF_INFO_KIND(mtype->info),
1181 BTF_INFO_KIND(kern_mtype->info));
1182 return -ENOTSUP;
1183 }
1184
1185 if (btf_is_ptr(mtype)) {
1186 struct bpf_program *prog;
1187
69e4a9d2
KFL
1188 /* Update the value from the shadow type */
1189 prog = *(void **)mdata;
1190 st_ops->progs[i] = prog;
d2836ddd
MKL
1191 if (!prog)
1192 continue;
69e4a9d2
KFL
1193 if (!is_valid_st_ops_program(obj, prog)) {
1194 pr_warn("struct_ops init_kern %s: member %s is not a struct_ops program\n",
1195 map->name, mname);
1196 return -ENOTSUP;
1197 }
d2836ddd 1198
590a0088
MKL
1199 kern_mtype = skip_mods_and_typedefs(kern_btf,
1200 kern_mtype->type,
1201 &kern_mtype_id);
d2836ddd
MKL
1202
1203 /* mtype->type must be a func_proto which was
1204 * guaranteed in bpf_object__collect_st_ops_relos(),
1205 * so only check kern_mtype for func_proto here.
1206 */
1207 if (!btf_is_func_proto(kern_mtype)) {
1208 pr_warn("struct_ops init_kern %s: kernel member %s is not a func ptr\n",
590a0088
MKL
1209 map->name, mname);
1210 return -ENOTSUP;
1211 }
1212
9e926acd
KFL
1213 if (mod_btf)
1214 prog->attach_btf_obj_fd = mod_btf->fd;
d9ab2f76
EZ
1215
1216 /* if we haven't yet processed this BPF program, record proper
1217 * attach_btf_id and member_idx
1218 */
1219 if (!prog->attach_btf_id) {
1220 prog->attach_btf_id = kern_type_id;
1221 prog->expected_attach_type = kern_member_idx;
1222 }
1223
1224 /* struct_ops BPF prog can be re-used between multiple
1225 * .struct_ops & .struct_ops.link as long as it's the
1226 * same struct_ops struct definition and the same
1227 * function pointer field
1228 */
1229 if (prog->attach_btf_id != kern_type_id) {
1230 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: attach_btf_id %u != kern_type_id %u\n",
1231 map->name, mname, prog->name, prog->sec_name, prog->type,
1232 prog->attach_btf_id, kern_type_id);
1233 return -EINVAL;
1234 }
1235 if (prog->expected_attach_type != kern_member_idx) {
1236 pr_warn("struct_ops init_kern %s func ptr %s: invalid reuse of prog %s in sec %s with type %u: expected_attach_type %u != kern_member_idx %u\n",
1237 map->name, mname, prog->name, prog->sec_name, prog->type,
1238 prog->expected_attach_type, kern_member_idx);
1239 return -EINVAL;
1240 }
590a0088
MKL
1241
1242 st_ops->kern_func_off[i] = kern_data_off + kern_moff;
1243
1244 pr_debug("struct_ops init_kern %s: func ptr %s is set to prog %s from data(+%u) to kern_data(+%u)\n",
1245 map->name, mname, prog->name, moff,
1246 kern_moff);
1247
1248 continue;
1249 }
1250
590a0088 1251 kern_msize = btf__resolve_size(kern_btf, kern_mtype_id);
c911fc61 1252 if (kern_msize < 0 || msize != kern_msize) {
590a0088
MKL
1253 pr_warn("struct_ops init_kern %s: Error in size of member %s: %zd != %zd(kernel)\n",
1254 map->name, mname, (ssize_t)msize,
1255 (ssize_t)kern_msize);
1256 return -ENOTSUP;
1257 }
1258
1259 pr_debug("struct_ops init_kern %s: copy %s %u bytes from data(+%u) to kern_data(+%u)\n",
1260 map->name, mname, (unsigned int)msize,
1261 moff, kern_moff);
1262 memcpy(kern_mdata, mdata, msize);
1263 }
1264
1265 return 0;
1266}
1267
1268static int bpf_object__init_kern_struct_ops_maps(struct bpf_object *obj)
1269{
590a0088
MKL
1270 struct bpf_map *map;
1271 size_t i;
1272 int err;
1273
1274 for (i = 0; i < obj->nr_maps; i++) {
1275 map = &obj->maps[i];
1276
1277 if (!bpf_map__is_struct_ops(map))
1278 continue;
1279
8db05261
EZ
1280 if (!map->autocreate)
1281 continue;
1282
9e926acd 1283 err = bpf_map__init_kern_struct_ops(map);
a6ed02ca 1284 if (err)
590a0088 1285 return err;
590a0088
MKL
1286 }
1287
590a0088
MKL
1288 return 0;
1289}
1290
809a69d6 1291static int init_struct_ops_maps(struct bpf_object *obj, const char *sec_name,
240bf8a5 1292 int shndx, Elf_Data *data)
590a0088
MKL
1293{
1294 const struct btf_type *type, *datasec;
1295 const struct btf_var_secinfo *vsi;
1296 struct bpf_struct_ops *st_ops;
1297 const char *tname, *var_name;
1298 __s32 type_id, datasec_id;
1299 const struct btf *btf;
1300 struct bpf_map *map;
1301 __u32 i;
1302
809a69d6 1303 if (shndx == -1)
590a0088
MKL
1304 return 0;
1305
1306 btf = obj->btf;
809a69d6 1307 datasec_id = btf__find_by_name_kind(btf, sec_name,
590a0088
MKL
1308 BTF_KIND_DATASEC);
1309 if (datasec_id < 0) {
1310 pr_warn("struct_ops init: DATASEC %s not found\n",
809a69d6 1311 sec_name);
590a0088
MKL
1312 return -EINVAL;
1313 }
1314
1315 datasec = btf__type_by_id(btf, datasec_id);
1316 vsi = btf_var_secinfos(datasec);
1317 for (i = 0; i < btf_vlen(datasec); i++, vsi++) {
1318 type = btf__type_by_id(obj->btf, vsi->type);
1319 var_name = btf__name_by_offset(obj->btf, type->name_off);
1320
1321 type_id = btf__resolve_type(obj->btf, vsi->type);
1322 if (type_id < 0) {
1323 pr_warn("struct_ops init: Cannot resolve var type_id %u in DATASEC %s\n",
809a69d6 1324 vsi->type, sec_name);
590a0088
MKL
1325 return -EINVAL;
1326 }
1327
1328 type = btf__type_by_id(obj->btf, type_id);
1329 tname = btf__name_by_offset(obj->btf, type->name_off);
1330 if (!tname[0]) {
1331 pr_warn("struct_ops init: anonymous type is not supported\n");
1332 return -ENOTSUP;
1333 }
1334 if (!btf_is_struct(type)) {
1335 pr_warn("struct_ops init: %s is not a struct\n", tname);
1336 return -EINVAL;
1337 }
1338
1339 map = bpf_object__add_map(obj);
1340 if (IS_ERR(map))
1341 return PTR_ERR(map);
1342
809a69d6 1343 map->sec_idx = shndx;
590a0088
MKL
1344 map->sec_offset = vsi->offset;
1345 map->name = strdup(var_name);
1346 if (!map->name)
1347 return -ENOMEM;
3644d285 1348 map->btf_value_type_id = type_id;
590a0088 1349
5ad0ecbe
EZ
1350 /* Follow same convention as for programs autoload:
1351 * SEC("?.struct_ops") means map is not created by default.
1352 */
1353 if (sec_name[0] == '?') {
1354 map->autocreate = false;
1355 /* from now on forget there was ? in section name */
1356 sec_name++;
1357 }
1358
590a0088
MKL
1359 map->def.type = BPF_MAP_TYPE_STRUCT_OPS;
1360 map->def.key_size = sizeof(int);
1361 map->def.value_size = type->size;
1362 map->def.max_entries = 1;
240bf8a5 1363 map->def.map_flags = strcmp(sec_name, STRUCT_OPS_LINK_SEC) == 0 ? BPF_F_LINK : 0;
590a0088
MKL
1364
1365 map->st_ops = calloc(1, sizeof(*map->st_ops));
1366 if (!map->st_ops)
1367 return -ENOMEM;
1368 st_ops = map->st_ops;
1369 st_ops->data = malloc(type->size);
1370 st_ops->progs = calloc(btf_vlen(type), sizeof(*st_ops->progs));
1371 st_ops->kern_func_off = malloc(btf_vlen(type) *
1372 sizeof(*st_ops->kern_func_off));
1373 if (!st_ops->data || !st_ops->progs || !st_ops->kern_func_off)
1374 return -ENOMEM;
1375
809a69d6 1376 if (vsi->offset + type->size > data->d_size) {
590a0088 1377 pr_warn("struct_ops init: var %s is beyond the end of DATASEC %s\n",
809a69d6 1378 var_name, sec_name);
590a0088
MKL
1379 return -EINVAL;
1380 }
1381
1382 memcpy(st_ops->data,
809a69d6 1383 data->d_buf + vsi->offset,
590a0088
MKL
1384 type->size);
1385 st_ops->tname = tname;
1386 st_ops->type = type;
1387 st_ops->type_id = type_id;
1388
1389 pr_debug("struct_ops init: struct %s(type_id=%u) %s found at offset %u\n",
1390 tname, type_id, var_name, vsi->offset);
1391 }
1392
1393 return 0;
1394}
1395
809a69d6
KFL
1396static int bpf_object_init_struct_ops(struct bpf_object *obj)
1397{
240bf8a5
EZ
1398 const char *sec_name;
1399 int sec_idx, err;
809a69d6 1400
240bf8a5
EZ
1401 for (sec_idx = 0; sec_idx < obj->efile.sec_cnt; ++sec_idx) {
1402 struct elf_sec_desc *desc = &obj->efile.secs[sec_idx];
1403
1404 if (desc->sec_type != SEC_ST_OPS)
1405 continue;
1406
1407 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
1408 if (!sec_name)
1409 return -LIBBPF_ERRNO__FORMAT;
1410
1411 err = init_struct_ops_maps(obj, sec_name, sec_idx, desc->data);
1412 if (err)
1413 return err;
1414 }
1415
1416 return 0;
809a69d6
KFL
1417}
1418
6c956392 1419static struct bpf_object *bpf_object__new(const char *path,
5e61f270 1420 const void *obj_buf,
2ce8450e
AN
1421 size_t obj_buf_sz,
1422 const char *obj_name)
1a5e3fb1
WN
1423{
1424 struct bpf_object *obj;
d859900c 1425 char *end;
1a5e3fb1
WN
1426
1427 obj = calloc(1, sizeof(struct bpf_object) + strlen(path) + 1);
1428 if (!obj) {
be18010e 1429 pr_warn("alloc memory failed for %s\n", path);
6371ca3b 1430 return ERR_PTR(-ENOMEM);
1a5e3fb1
WN
1431 }
1432
1433 strcpy(obj->path, path);
2ce8450e 1434 if (obj_name) {
9fc205b4 1435 libbpf_strlcpy(obj->name, obj_name, sizeof(obj->name));
2ce8450e
AN
1436 } else {
1437 /* Using basename() GNU version which doesn't modify arg. */
9fc205b4 1438 libbpf_strlcpy(obj->name, basename((void *)path), sizeof(obj->name));
2ce8450e
AN
1439 end = strchr(obj->name, '.');
1440 if (end)
1441 *end = 0;
1442 }
6c956392 1443
d859900c 1444 obj->efile.fd = -1;
6c956392 1445 /*
76e1022b 1446 * Caller of this function should also call
6c956392
WN
1447 * bpf_object__elf_finish() after data collection to return
1448 * obj_buf to user. If not, we should duplicate the buffer to
1449 * avoid user freeing them before elf finish.
1450 */
1451 obj->efile.obj_buf = obj_buf;
1452 obj->efile.obj_buf_sz = obj_buf_sz;
abd29c93 1453 obj->efile.btf_maps_shndx = -1;
81bfdd08 1454 obj->kconfig_map_idx = -1;
6c956392 1455
5e61f270 1456 obj->kern_version = get_kernel_version();
52d3352e 1457 obj->loaded = false;
9a208eff 1458
1a5e3fb1
WN
1459 return obj;
1460}
1461
1462static void bpf_object__elf_finish(struct bpf_object *obj)
1463{
29a30ff5 1464 if (!obj->efile.elf)
1a5e3fb1
WN
1465 return;
1466
b71a2ebf
GC
1467 elf_end(obj->efile.elf);
1468 obj->efile.elf = NULL;
bec7d68c 1469 obj->efile.symbols = NULL;
2e7ba4f8 1470 obj->efile.arena_data = NULL;
b62f06e8 1471
25bbbd7a
AN
1472 zfree(&obj->efile.secs);
1473 obj->efile.sec_cnt = 0;
1a5e3fb1 1474 zclose(obj->efile.fd);
6c956392
WN
1475 obj->efile.obj_buf = NULL;
1476 obj->efile.obj_buf_sz = 0;
1a5e3fb1
WN
1477}
1478
1479static int bpf_object__elf_init(struct bpf_object *obj)
1480{
ad23b723 1481 Elf64_Ehdr *ehdr;
1a5e3fb1 1482 int err = 0;
ad23b723 1483 Elf *elf;
1a5e3fb1 1484
29a30ff5 1485 if (obj->efile.elf) {
88a82120 1486 pr_warn("elf: init internal error\n");
6371ca3b 1487 return -LIBBPF_ERRNO__LIBELF;
1a5e3fb1
WN
1488 }
1489
6c956392 1490 if (obj->efile.obj_buf_sz > 0) {
146bf811 1491 /* obj_buf should have been validated by bpf_object__open_mem(). */
ad23b723 1492 elf = elf_memory((char *)obj->efile.obj_buf, obj->efile.obj_buf_sz);
6c956392 1493 } else {
92274e24 1494 obj->efile.fd = open(obj->path, O_RDONLY | O_CLOEXEC);
6c956392 1495 if (obj->efile.fd < 0) {
be5c5d4e 1496 char errmsg[STRERR_BUFSIZE], *cp;
1ce6a9fc 1497
be5c5d4e
AN
1498 err = -errno;
1499 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
88a82120 1500 pr_warn("elf: failed to open %s: %s\n", obj->path, cp);
be5c5d4e 1501 return err;
6c956392
WN
1502 }
1503
ad23b723 1504 elf = elf_begin(obj->efile.fd, ELF_C_READ_MMAP, NULL);
1a5e3fb1
WN
1505 }
1506
ad23b723 1507 if (!elf) {
88a82120 1508 pr_warn("elf: failed to open %s as ELF file: %s\n", obj->path, elf_errmsg(-1));
6371ca3b 1509 err = -LIBBPF_ERRNO__LIBELF;
1a5e3fb1
WN
1510 goto errout;
1511 }
1512
ad23b723
AN
1513 obj->efile.elf = elf;
1514
1515 if (elf_kind(elf) != ELF_K_ELF) {
1516 err = -LIBBPF_ERRNO__FORMAT;
1517 pr_warn("elf: '%s' is not a proper ELF object\n", obj->path);
1518 goto errout;
1519 }
1520
1521 if (gelf_getclass(elf) != ELFCLASS64) {
1522 err = -LIBBPF_ERRNO__FORMAT;
1523 pr_warn("elf: '%s' is not a 64-bit ELF object\n", obj->path);
1524 goto errout;
1525 }
1526
1527 obj->efile.ehdr = ehdr = elf64_getehdr(elf);
1528 if (!obj->efile.ehdr) {
88a82120 1529 pr_warn("elf: failed to get ELF header from %s: %s\n", obj->path, elf_errmsg(-1));
6371ca3b 1530 err = -LIBBPF_ERRNO__FORMAT;
1a5e3fb1
WN
1531 goto errout;
1532 }
1a5e3fb1 1533
ad23b723 1534 if (elf_getshdrstrndx(elf, &obj->efile.shstrndx)) {
88a82120
AN
1535 pr_warn("elf: failed to get section names section index for %s: %s\n",
1536 obj->path, elf_errmsg(-1));
1537 err = -LIBBPF_ERRNO__FORMAT;
1538 goto errout;
1539 }
1540
70e79866 1541 /* ELF is corrupted/truncated, avoid calling elf_strptr. */
ad23b723 1542 if (!elf_rawdata(elf_getscn(elf, obj->efile.shstrndx), NULL)) {
88a82120
AN
1543 pr_warn("elf: failed to get section names strings from %s: %s\n",
1544 obj->path, elf_errmsg(-1));
8f3f5792
NK
1545 err = -LIBBPF_ERRNO__FORMAT;
1546 goto errout;
88a82120
AN
1547 }
1548
9b16137a 1549 /* Old LLVM set e_machine to EM_NONE */
ad23b723 1550 if (ehdr->e_type != ET_REL || (ehdr->e_machine && ehdr->e_machine != EM_BPF)) {
88a82120 1551 pr_warn("elf: %s is not a valid eBPF object file\n", obj->path);
6371ca3b 1552 err = -LIBBPF_ERRNO__FORMAT;
1a5e3fb1
WN
1553 goto errout;
1554 }
1555
1556 return 0;
1557errout:
1558 bpf_object__elf_finish(obj);
1559 return err;
1560}
1561
12ef5634 1562static int bpf_object__check_endianness(struct bpf_object *obj)
cc4228d5 1563{
3930198d 1564#if __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
ad23b723 1565 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2LSB)
12ef5634 1566 return 0;
3930198d 1567#elif __BYTE_ORDER__ == __ORDER_BIG_ENDIAN__
ad23b723 1568 if (obj->efile.ehdr->e_ident[EI_DATA] == ELFDATA2MSB)
12ef5634
AN
1569 return 0;
1570#else
1571# error "Unrecognized __BYTE_ORDER__"
1572#endif
88a82120 1573 pr_warn("elf: endianness mismatch in %s.\n", obj->path);
6371ca3b 1574 return -LIBBPF_ERRNO__ENDIAN;
cc4228d5
WN
1575}
1576
cb1e5e96 1577static int
399dc65e 1578bpf_object__init_license(struct bpf_object *obj, void *data, size_t size)
cb1e5e96 1579{
35a85550
SHY
1580 if (!data) {
1581 pr_warn("invalid license section in %s\n", obj->path);
1582 return -LIBBPF_ERRNO__FORMAT;
1583 }
f9798239
AN
1584 /* libbpf_strlcpy() only copies first N - 1 bytes, so size + 1 won't
1585 * go over allowed ELF data section buffer
1586 */
1587 libbpf_strlcpy(obj->license, data, min(size + 1, sizeof(obj->license)));
cb1e5e96
WN
1588 pr_debug("license of %s is %s\n", obj->path, obj->license);
1589 return 0;
1590}
1591
54b8625c
JF
1592static int
1593bpf_object__init_kversion(struct bpf_object *obj, void *data, size_t size)
1594{
1595 __u32 kver;
1596
35a85550 1597 if (!data || size != sizeof(kver)) {
be18010e 1598 pr_warn("invalid kver section in %s\n", obj->path);
54b8625c
JF
1599 return -LIBBPF_ERRNO__FORMAT;
1600 }
1601 memcpy(&kver, data, sizeof(kver));
1602 obj->kern_version = kver;
1603 pr_debug("kernel version of %s is %x\n", obj->path, obj->kern_version);
1604 return 0;
1605}
1606
addb9fc9
NS
1607static bool bpf_map_type__is_map_in_map(enum bpf_map_type type)
1608{
1609 if (type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1610 type == BPF_MAP_TYPE_HASH_OF_MAPS)
1611 return true;
1612 return false;
1613}
1614
b96c07f3 1615static int find_elf_sec_sz(const struct bpf_object *obj, const char *name, __u32 *size)
1713d68b 1616{
25bbbd7a
AN
1617 Elf_Data *data;
1618 Elf_Scn *scn;
1713d68b 1619
25bbbd7a 1620 if (!name)
1713d68b 1621 return -EINVAL;
88a82120 1622
25bbbd7a
AN
1623 scn = elf_sec_by_name(obj, name);
1624 data = elf_sec_data(obj, scn);
1625 if (data) {
25bbbd7a 1626 *size = data->d_size;
08894d9c 1627 return 0; /* found it */
1713d68b
DB
1628 }
1629
08894d9c 1630 return -ENOENT;
1713d68b
DB
1631}
1632
f33f742d 1633static Elf64_Sym *find_elf_var_sym(const struct bpf_object *obj, const char *name)
1713d68b
DB
1634{
1635 Elf_Data *symbols = obj->efile.symbols;
1636 const char *sname;
1637 size_t si;
1638
ad23b723
AN
1639 for (si = 0; si < symbols->d_size / sizeof(Elf64_Sym); si++) {
1640 Elf64_Sym *sym = elf_sym_by_idx(obj, si);
1713d68b 1641
3a06ec0a
AN
1642 if (ELF64_ST_TYPE(sym->st_info) != STT_OBJECT)
1643 continue;
1644
1645 if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
1646 ELF64_ST_BIND(sym->st_info) != STB_WEAK)
1713d68b
DB
1647 continue;
1648
ad23b723 1649 sname = elf_sym_str(obj, sym->st_name);
1713d68b 1650 if (!sname) {
ad23b723 1651 pr_warn("failed to get sym name string for var %s\n", name);
f33f742d 1652 return ERR_PTR(-EIO);
1713d68b 1653 }
f33f742d
AN
1654 if (strcmp(name, sname) == 0)
1655 return sym;
1713d68b
DB
1656 }
1657
f33f742d 1658 return ERR_PTR(-ENOENT);
1713d68b
DB
1659}
1660
9fa5e1a1
AN
1661/* Some versions of Android don't provide memfd_create() in their libc
1662 * implementation, so avoid complications and just go straight to Linux
1663 * syscall.
1664 */
1665static int sys_memfd_create(const char *name, unsigned flags)
1666{
1667 return syscall(__NR_memfd_create, name, flags);
1668}
1669
ddb2ffdc
ACM
1670#ifndef MFD_CLOEXEC
1671#define MFD_CLOEXEC 0x0001U
1672#endif
1673
dac645b9
AN
1674static int create_placeholder_fd(void)
1675{
1676 int fd;
1677
9fa5e1a1 1678 fd = ensure_good_fd(sys_memfd_create("libbpf-placeholder-fd", MFD_CLOEXEC));
dac645b9
AN
1679 if (fd < 0)
1680 return -errno;
1681 return fd;
1682}
1683
bf829271 1684static struct bpf_map *bpf_object__add_map(struct bpf_object *obj)
d859900c 1685{
69721203
AN
1686 struct bpf_map *map;
1687 int err;
bf829271 1688
69721203
AN
1689 err = libbpf_ensure_mem((void **)&obj->maps, &obj->maps_cap,
1690 sizeof(*obj->maps), obj->nr_maps + 1);
1691 if (err)
1692 return ERR_PTR(err);
bf829271 1693
69721203 1694 map = &obj->maps[obj->nr_maps++];
ec41817b 1695 map->obj = obj;
dac645b9
AN
1696 /* Preallocate map FD without actually creating BPF map just yet.
1697 * These map FD "placeholders" will be reused later without changing
1698 * FD value when map is actually created in the kernel.
1699 *
1700 * This is useful to be able to perform BPF program relocations
1701 * without having to create BPF maps before that step. This allows us
1702 * to finalize and load BTF very late in BPF object's loading phase,
1703 * right before BPF maps have to be created and BPF programs have to
1704 * be loaded. By having these map FD placeholders we can perform all
1705 * the sanitizations, relocations, and any other adjustments before we
1706 * start creating actual BPF kernel objects (BTF, maps, progs).
1707 */
1708 map->fd = create_placeholder_fd();
1709 if (map->fd < 0)
1710 return ERR_PTR(map->fd);
69721203 1711 map->inner_map_fd = -1;
ec41817b 1712 map->autocreate = true;
bf829271 1713
69721203 1714 return map;
d859900c
DB
1715}
1716
79ff13e9 1717static size_t array_map_mmap_sz(unsigned int value_sz, unsigned int max_entries)
eba9c5f4 1718{
9d0a2331 1719 const long page_sz = sysconf(_SC_PAGE_SIZE);
eba9c5f4
AN
1720 size_t map_sz;
1721
9d0a2331 1722 map_sz = (size_t)roundup(value_sz, 8) * max_entries;
eba9c5f4
AN
1723 map_sz = roundup(map_sz, page_sz);
1724 return map_sz;
1725}
1726
79ff13e9
AS
1727static size_t bpf_map_mmap_sz(const struct bpf_map *map)
1728{
1729 const long page_sz = sysconf(_SC_PAGE_SIZE);
1730
1731 switch (map->def.type) {
1732 case BPF_MAP_TYPE_ARRAY:
1733 return array_map_mmap_sz(map->def.value_size, map->def.max_entries);
1734 case BPF_MAP_TYPE_ARENA:
1735 return page_sz * map->def.max_entries;
1736 default:
1737 return 0; /* not supported */
1738 }
1739}
1740
9d0a2331
JK
1741static int bpf_map_mmap_resize(struct bpf_map *map, size_t old_sz, size_t new_sz)
1742{
1743 void *mmaped;
1744
1745 if (!map->mmaped)
1746 return -EINVAL;
1747
1748 if (old_sz == new_sz)
1749 return 0;
1750
1751 mmaped = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1752 if (mmaped == MAP_FAILED)
1753 return -errno;
1754
1755 memcpy(mmaped, map->mmaped, min(old_sz, new_sz));
1756 munmap(map->mmaped, old_sz);
1757 map->mmaped = mmaped;
1758 return 0;
1759}
1760
aed65917 1761static char *internal_map_name(struct bpf_object *obj, const char *real_name)
81bfdd08 1762{
113e6b7e 1763 char map_name[BPF_OBJ_NAME_LEN], *p;
aed65917
AN
1764 int pfx_len, sfx_len = max((size_t)7, strlen(real_name));
1765
1766 /* This is one of the more confusing parts of libbpf for various
1767 * reasons, some of which are historical. The original idea for naming
1768 * internal names was to include as much of BPF object name prefix as
1769 * possible, so that it can be distinguished from similar internal
1770 * maps of a different BPF object.
1771 * As an example, let's say we have bpf_object named 'my_object_name'
1772 * and internal map corresponding to '.rodata' ELF section. The final
1773 * map name advertised to user and to the kernel will be
1774 * 'my_objec.rodata', taking first 8 characters of object name and
1775 * entire 7 characters of '.rodata'.
1776 * Somewhat confusingly, if internal map ELF section name is shorter
1777 * than 7 characters, e.g., '.bss', we still reserve 7 characters
1778 * for the suffix, even though we only have 4 actual characters, and
1779 * resulting map will be called 'my_objec.bss', not even using all 15
1780 * characters allowed by the kernel. Oh well, at least the truncated
1781 * object name is somewhat consistent in this case. But if the map
1782 * name is '.kconfig', we'll still have entirety of '.kconfig' added
1783 * (8 chars) and thus will be left with only first 7 characters of the
1784 * object name ('my_obje'). Happy guessing, user, that the final map
1785 * name will be "my_obje.kconfig".
1786 * Now, with libbpf starting to support arbitrarily named .rodata.*
1787 * and .data.* data sections, it's possible that ELF section name is
1788 * longer than allowed 15 chars, so we now need to be careful to take
1789 * only up to 15 first characters of ELF name, taking no BPF object
1790 * name characters at all. So '.rodata.abracadabra' will result in
1791 * '.rodata.abracad' kernel and user-visible name.
1792 * We need to keep this convoluted logic intact for .data, .bss and
1793 * .rodata maps, but for new custom .data.custom and .rodata.custom
1794 * maps we use their ELF names as is, not prepending bpf_object name
1795 * in front. We still need to truncate them to 15 characters for the
1796 * kernel. Full name can be recovered for such maps by using DATASEC
1797 * BTF type associated with such map's value type, though.
1798 */
1799 if (sfx_len >= BPF_OBJ_NAME_LEN)
1800 sfx_len = BPF_OBJ_NAME_LEN - 1;
1801
1802 /* if there are two or more dots in map name, it's a custom dot map */
1803 if (strchr(real_name + 1, '.') != NULL)
1804 pfx_len = 0;
1805 else
1806 pfx_len = min((size_t)BPF_OBJ_NAME_LEN - sfx_len - 1, strlen(obj->name));
81bfdd08
AN
1807
1808 snprintf(map_name, sizeof(map_name), "%.*s%.*s", pfx_len, obj->name,
aed65917 1809 sfx_len, real_name);
81bfdd08 1810
113e6b7e
THJ
1811 /* sanitise map name to characters allowed by kernel */
1812 for (p = map_name; *p && p < map_name + sizeof(map_name); p++)
1813 if (!isalnum(*p) && *p != '_' && *p != '.')
1814 *p = '_';
1815
81bfdd08
AN
1816 return strdup(map_name);
1817}
1818
262cfb74 1819static int
4fcac46c
AN
1820map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map);
1821
1822/* Internal BPF map is mmap()'able only if at least one of corresponding
1823 * DATASEC's VARs are to be exposed through BPF skeleton. I.e., it's a GLOBAL
1824 * variable and it's not marked as __hidden (which turns it into, effectively,
1825 * a STATIC variable).
1826 */
1827static bool map_is_mmapable(struct bpf_object *obj, struct bpf_map *map)
1828{
1829 const struct btf_type *t, *vt;
1830 struct btf_var_secinfo *vsi;
1831 int i, n;
1832
1833 if (!map->btf_value_type_id)
1834 return false;
1835
1836 t = btf__type_by_id(obj->btf, map->btf_value_type_id);
1837 if (!btf_is_datasec(t))
1838 return false;
1839
1840 vsi = btf_var_secinfos(t);
1841 for (i = 0, n = btf_vlen(t); i < n; i++, vsi++) {
1842 vt = btf__type_by_id(obj->btf, vsi->type);
1843 if (!btf_is_var(vt))
1844 continue;
1845
1846 if (btf_var(vt)->linkage != BTF_VAR_STATIC)
1847 return true;
1848 }
1849
1850 return false;
1851}
262cfb74 1852
d859900c 1853static int
bf829271 1854bpf_object__init_internal_map(struct bpf_object *obj, enum libbpf_map_type type,
aed65917 1855 const char *real_name, int sec_idx, void *data, size_t data_sz)
d859900c 1856{
bf829271
AN
1857 struct bpf_map_def *def;
1858 struct bpf_map *map;
9d0a2331 1859 size_t mmap_sz;
eba9c5f4 1860 int err;
bf829271
AN
1861
1862 map = bpf_object__add_map(obj);
1863 if (IS_ERR(map))
1864 return PTR_ERR(map);
d859900c
DB
1865
1866 map->libbpf_type = type;
db48814b
AN
1867 map->sec_idx = sec_idx;
1868 map->sec_offset = 0;
aed65917
AN
1869 map->real_name = strdup(real_name);
1870 map->name = internal_map_name(obj, real_name);
1871 if (!map->real_name || !map->name) {
1872 zfree(&map->real_name);
1873 zfree(&map->name);
d859900c
DB
1874 return -ENOMEM;
1875 }
1876
bf829271 1877 def = &map->def;
d859900c
DB
1878 def->type = BPF_MAP_TYPE_ARRAY;
1879 def->key_size = sizeof(int);
eba9c5f4 1880 def->value_size = data_sz;
d859900c 1881 def->max_entries = 1;
81bfdd08 1882 def->map_flags = type == LIBBPF_MAP_RODATA || type == LIBBPF_MAP_KCONFIG
2e7ba4f8 1883 ? BPF_F_RDONLY_PROG : 0;
4fcac46c
AN
1884
1885 /* failures are fine because of maps like .rodata.str1.1 */
1886 (void) map_fill_btf_type_info(obj, map);
1887
1888 if (map_is_mmapable(obj, map))
1889 def->map_flags |= BPF_F_MMAPABLE;
7fe74b43
AN
1890
1891 pr_debug("map '%s' (global data): at sec_idx %d, offset %zu, flags %x.\n",
81bfdd08 1892 map->name, map->sec_idx, map->sec_offset, def->map_flags);
7fe74b43 1893
79ff13e9 1894 mmap_sz = bpf_map_mmap_sz(map);
9d0a2331 1895 map->mmaped = mmap(NULL, mmap_sz, PROT_READ | PROT_WRITE,
eba9c5f4
AN
1896 MAP_SHARED | MAP_ANONYMOUS, -1, 0);
1897 if (map->mmaped == MAP_FAILED) {
1898 err = -errno;
1899 map->mmaped = NULL;
1900 pr_warn("failed to alloc map '%s' content buffer: %d\n",
1901 map->name, err);
aed65917 1902 zfree(&map->real_name);
eba9c5f4
AN
1903 zfree(&map->name);
1904 return err;
d859900c
DB
1905 }
1906
166750bc 1907 if (data)
eba9c5f4
AN
1908 memcpy(map->mmaped, data, data_sz);
1909
e1d1dc46 1910 pr_debug("map %td is \"%s\"\n", map - obj->maps, map->name);
d859900c
DB
1911 return 0;
1912}
1913
bf829271
AN
1914static int bpf_object__init_global_data_maps(struct bpf_object *obj)
1915{
25bbbd7a 1916 struct elf_sec_desc *sec_desc;
aed65917 1917 const char *sec_name;
25bbbd7a 1918 int err = 0, sec_idx;
bf829271 1919
bf829271
AN
1920 /*
1921 * Populate obj->maps with libbpf internal maps.
1922 */
25bbbd7a
AN
1923 for (sec_idx = 1; sec_idx < obj->efile.sec_cnt; sec_idx++) {
1924 sec_desc = &obj->efile.secs[sec_idx];
1925
47ea7417 1926 /* Skip recognized sections with size 0. */
d4e6d684 1927 if (!sec_desc->data || sec_desc->data->d_size == 0)
47ea7417
JH
1928 continue;
1929
25bbbd7a
AN
1930 switch (sec_desc->sec_type) {
1931 case SEC_DATA:
aed65917 1932 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
25bbbd7a 1933 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_DATA,
aed65917 1934 sec_name, sec_idx,
25bbbd7a
AN
1935 sec_desc->data->d_buf,
1936 sec_desc->data->d_size);
1937 break;
1938 case SEC_RODATA:
1939 obj->has_rodata = true;
aed65917 1940 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
25bbbd7a 1941 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_RODATA,
aed65917 1942 sec_name, sec_idx,
25bbbd7a
AN
1943 sec_desc->data->d_buf,
1944 sec_desc->data->d_size);
1945 break;
1946 case SEC_BSS:
aed65917 1947 sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, sec_idx));
25bbbd7a 1948 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_BSS,
aed65917 1949 sec_name, sec_idx,
25bbbd7a
AN
1950 NULL,
1951 sec_desc->data->d_size);
1952 break;
1953 default:
1954 /* skip */
1955 break;
1956 }
bf829271
AN
1957 if (err)
1958 return err;
1959 }
1960 return 0;
1961}
1962
166750bc
AN
1963
1964static struct extern_desc *find_extern_by_name(const struct bpf_object *obj,
1965 const void *name)
1966{
1967 int i;
1968
1969 for (i = 0; i < obj->nr_extern; i++) {
1970 if (strcmp(obj->externs[i].name, name) == 0)
1971 return &obj->externs[i];
1972 }
1973 return NULL;
1974}
1975
c56e5977
YS
1976static struct extern_desc *find_extern_by_name_with_len(const struct bpf_object *obj,
1977 const void *name, int len)
1978{
1979 const char *ext_name;
1980 int i;
1981
1982 for (i = 0; i < obj->nr_extern; i++) {
1983 ext_name = obj->externs[i].name;
1984 if (strlen(ext_name) == len && strncmp(ext_name, name, len) == 0)
1985 return &obj->externs[i];
1986 }
1987 return NULL;
1988}
1989
2e33efe3
AN
1990static int set_kcfg_value_tri(struct extern_desc *ext, void *ext_val,
1991 char value)
166750bc 1992{
2e33efe3
AN
1993 switch (ext->kcfg.type) {
1994 case KCFG_BOOL:
166750bc 1995 if (value == 'm') {
55d00c37 1996 pr_warn("extern (kcfg) '%s': value '%c' implies tristate or char type\n",
166750bc
AN
1997 ext->name, value);
1998 return -EINVAL;
1999 }
2000 *(bool *)ext_val = value == 'y' ? true : false;
2001 break;
2e33efe3 2002 case KCFG_TRISTATE:
166750bc
AN
2003 if (value == 'y')
2004 *(enum libbpf_tristate *)ext_val = TRI_YES;
2005 else if (value == 'm')
2006 *(enum libbpf_tristate *)ext_val = TRI_MODULE;
2007 else /* value == 'n' */
2008 *(enum libbpf_tristate *)ext_val = TRI_NO;
2009 break;
2e33efe3 2010 case KCFG_CHAR:
166750bc
AN
2011 *(char *)ext_val = value;
2012 break;
2e33efe3
AN
2013 case KCFG_UNKNOWN:
2014 case KCFG_INT:
2015 case KCFG_CHAR_ARR:
166750bc 2016 default:
55d00c37 2017 pr_warn("extern (kcfg) '%s': value '%c' implies bool, tristate, or char type\n",
166750bc
AN
2018 ext->name, value);
2019 return -EINVAL;
2020 }
2021 ext->is_set = true;
2022 return 0;
2023}
2024
2e33efe3
AN
2025static int set_kcfg_value_str(struct extern_desc *ext, char *ext_val,
2026 const char *value)
166750bc
AN
2027{
2028 size_t len;
2029
2e33efe3 2030 if (ext->kcfg.type != KCFG_CHAR_ARR) {
55d00c37
AN
2031 pr_warn("extern (kcfg) '%s': value '%s' implies char array type\n",
2032 ext->name, value);
166750bc
AN
2033 return -EINVAL;
2034 }
2035
2036 len = strlen(value);
2037 if (value[len - 1] != '"') {
2e33efe3 2038 pr_warn("extern (kcfg) '%s': invalid string config '%s'\n",
166750bc
AN
2039 ext->name, value);
2040 return -EINVAL;
2041 }
2042
2043 /* strip quotes */
2044 len -= 2;
2e33efe3 2045 if (len >= ext->kcfg.sz) {
55d00c37 2046 pr_warn("extern (kcfg) '%s': long string '%s' of (%zu bytes) truncated to %d bytes\n",
2e33efe3
AN
2047 ext->name, value, len, ext->kcfg.sz - 1);
2048 len = ext->kcfg.sz - 1;
166750bc
AN
2049 }
2050 memcpy(ext_val, value + 1, len);
2051 ext_val[len] = '\0';
2052 ext->is_set = true;
2053 return 0;
2054}
2055
2056static int parse_u64(const char *value, __u64 *res)
2057{
2058 char *value_end;
2059 int err;
2060
2061 errno = 0;
2062 *res = strtoull(value, &value_end, 0);
2063 if (errno) {
2064 err = -errno;
2065 pr_warn("failed to parse '%s' as integer: %d\n", value, err);
2066 return err;
2067 }
2068 if (*value_end) {
2069 pr_warn("failed to parse '%s' as integer completely\n", value);
2070 return -EINVAL;
2071 }
2072 return 0;
2073}
2074
2e33efe3 2075static bool is_kcfg_value_in_range(const struct extern_desc *ext, __u64 v)
166750bc 2076{
2e33efe3 2077 int bit_sz = ext->kcfg.sz * 8;
166750bc 2078
2e33efe3 2079 if (ext->kcfg.sz == 8)
166750bc
AN
2080 return true;
2081
2082 /* Validate that value stored in u64 fits in integer of `ext->sz`
2083 * bytes size without any loss of information. If the target integer
2084 * is signed, we rely on the following limits of integer type of
2085 * Y bits and subsequent transformation:
2086 *
2087 * -2^(Y-1) <= X <= 2^(Y-1) - 1
2088 * 0 <= X + 2^(Y-1) <= 2^Y - 1
2089 * 0 <= X + 2^(Y-1) < 2^Y
2090 *
2091 * For unsigned target integer, check that all the (64 - Y) bits are
2092 * zero.
2093 */
2e33efe3 2094 if (ext->kcfg.is_signed)
166750bc
AN
2095 return v + (1ULL << (bit_sz - 1)) < (1ULL << bit_sz);
2096 else
2097 return (v >> bit_sz) == 0;
2098}
2099
2e33efe3
AN
2100static int set_kcfg_value_num(struct extern_desc *ext, void *ext_val,
2101 __u64 value)
166750bc 2102{
55d00c37
AN
2103 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR &&
2104 ext->kcfg.type != KCFG_BOOL) {
2105 pr_warn("extern (kcfg) '%s': value '%llu' implies integer, char, or boolean type\n",
7745ff98 2106 ext->name, (unsigned long long)value);
166750bc
AN
2107 return -EINVAL;
2108 }
55d00c37
AN
2109 if (ext->kcfg.type == KCFG_BOOL && value > 1) {
2110 pr_warn("extern (kcfg) '%s': value '%llu' isn't boolean compatible\n",
2111 ext->name, (unsigned long long)value);
2112 return -EINVAL;
2113
2114 }
2e33efe3 2115 if (!is_kcfg_value_in_range(ext, value)) {
55d00c37 2116 pr_warn("extern (kcfg) '%s': value '%llu' doesn't fit in %d bytes\n",
2e33efe3 2117 ext->name, (unsigned long long)value, ext->kcfg.sz);
166750bc
AN
2118 return -ERANGE;
2119 }
2e33efe3 2120 switch (ext->kcfg.sz) {
e3ba8e4e
KM
2121 case 1:
2122 *(__u8 *)ext_val = value;
2123 break;
2124 case 2:
2125 *(__u16 *)ext_val = value;
2126 break;
2127 case 4:
2128 *(__u32 *)ext_val = value;
2129 break;
2130 case 8:
2131 *(__u64 *)ext_val = value;
2132 break;
2133 default:
2134 return -EINVAL;
166750bc
AN
2135 }
2136 ext->is_set = true;
2137 return 0;
2138}
2139
8601fd42
AN
2140static int bpf_object__process_kconfig_line(struct bpf_object *obj,
2141 char *buf, void *data)
166750bc 2142{
166750bc 2143 struct extern_desc *ext;
8601fd42 2144 char *sep, *value;
166750bc
AN
2145 int len, err = 0;
2146 void *ext_val;
2147 __u64 num;
166750bc 2148
13d35a0c 2149 if (!str_has_pfx(buf, "CONFIG_"))
8601fd42 2150 return 0;
166750bc 2151
8601fd42
AN
2152 sep = strchr(buf, '=');
2153 if (!sep) {
2154 pr_warn("failed to parse '%s': no separator\n", buf);
2155 return -EINVAL;
2156 }
2157
2158 /* Trim ending '\n' */
2159 len = strlen(buf);
2160 if (buf[len - 1] == '\n')
2161 buf[len - 1] = '\0';
2162 /* Split on '=' and ensure that a value is present. */
2163 *sep = '\0';
2164 if (!sep[1]) {
2165 *sep = '=';
2166 pr_warn("failed to parse '%s': no value\n", buf);
2167 return -EINVAL;
2168 }
2169
2170 ext = find_extern_by_name(obj, buf);
2171 if (!ext || ext->is_set)
2172 return 0;
2173
2e33efe3 2174 ext_val = data + ext->kcfg.data_off;
8601fd42
AN
2175 value = sep + 1;
2176
2177 switch (*value) {
2178 case 'y': case 'n': case 'm':
2e33efe3 2179 err = set_kcfg_value_tri(ext, ext_val, *value);
8601fd42
AN
2180 break;
2181 case '"':
2e33efe3 2182 err = set_kcfg_value_str(ext, ext_val, value);
8601fd42
AN
2183 break;
2184 default:
2185 /* assume integer */
2186 err = parse_u64(value, &num);
2187 if (err) {
55d00c37 2188 pr_warn("extern (kcfg) '%s': value '%s' isn't a valid integer\n", ext->name, value);
8601fd42
AN
2189 return err;
2190 }
55d00c37
AN
2191 if (ext->kcfg.type != KCFG_INT && ext->kcfg.type != KCFG_CHAR) {
2192 pr_warn("extern (kcfg) '%s': value '%s' implies integer type\n", ext->name, value);
2193 return -EINVAL;
2194 }
2e33efe3 2195 err = set_kcfg_value_num(ext, ext_val, num);
8601fd42 2196 break;
166750bc 2197 }
8601fd42
AN
2198 if (err)
2199 return err;
55d00c37 2200 pr_debug("extern (kcfg) '%s': set to %s\n", ext->name, value);
8601fd42
AN
2201 return 0;
2202}
2203
2204static int bpf_object__read_kconfig_file(struct bpf_object *obj, void *data)
2205{
2206 char buf[PATH_MAX];
2207 struct utsname uts;
2208 int len, err = 0;
2209 gzFile file;
2210
2211 uname(&uts);
2212 len = snprintf(buf, PATH_MAX, "/boot/config-%s", uts.release);
2213 if (len < 0)
2214 return -EINVAL;
2215 else if (len >= PATH_MAX)
2216 return -ENAMETOOLONG;
2217
2218 /* gzopen also accepts uncompressed files. */
8e50750f 2219 file = gzopen(buf, "re");
8601fd42 2220 if (!file)
8e50750f 2221 file = gzopen("/proc/config.gz", "re");
8601fd42 2222
166750bc 2223 if (!file) {
8601fd42 2224 pr_warn("failed to open system Kconfig\n");
166750bc
AN
2225 return -ENOENT;
2226 }
2227
2228 while (gzgets(file, buf, sizeof(buf))) {
8601fd42
AN
2229 err = bpf_object__process_kconfig_line(obj, buf, data);
2230 if (err) {
2231 pr_warn("error parsing system Kconfig line '%s': %d\n",
2232 buf, err);
166750bc
AN
2233 goto out;
2234 }
8601fd42 2235 }
166750bc 2236
8601fd42
AN
2237out:
2238 gzclose(file);
2239 return err;
2240}
166750bc 2241
8601fd42
AN
2242static int bpf_object__read_kconfig_mem(struct bpf_object *obj,
2243 const char *config, void *data)
2244{
2245 char buf[PATH_MAX];
2246 int err = 0;
2247 FILE *file;
166750bc 2248
8601fd42
AN
2249 file = fmemopen((void *)config, strlen(config), "r");
2250 if (!file) {
2251 err = -errno;
2252 pr_warn("failed to open in-memory Kconfig: %d\n", err);
2253 return err;
2254 }
2255
2256 while (fgets(buf, sizeof(buf), file)) {
2257 err = bpf_object__process_kconfig_line(obj, buf, data);
2258 if (err) {
2259 pr_warn("error parsing in-memory Kconfig line '%s': %d\n",
2260 buf, err);
166750bc
AN
2261 break;
2262 }
166750bc
AN
2263 }
2264
8601fd42 2265 fclose(file);
166750bc
AN
2266 return err;
2267}
2268
81bfdd08 2269static int bpf_object__init_kconfig_map(struct bpf_object *obj)
166750bc 2270{
2e33efe3 2271 struct extern_desc *last_ext = NULL, *ext;
166750bc 2272 size_t map_sz;
2e33efe3 2273 int i, err;
166750bc 2274
2e33efe3
AN
2275 for (i = 0; i < obj->nr_extern; i++) {
2276 ext = &obj->externs[i];
2277 if (ext->type == EXT_KCFG)
2278 last_ext = ext;
2279 }
166750bc 2280
2e33efe3
AN
2281 if (!last_ext)
2282 return 0;
166750bc 2283
2e33efe3 2284 map_sz = last_ext->kcfg.data_off + last_ext->kcfg.sz;
81bfdd08 2285 err = bpf_object__init_internal_map(obj, LIBBPF_MAP_KCONFIG,
aed65917 2286 ".kconfig", obj->efile.symbols_shndx,
166750bc
AN
2287 NULL, map_sz);
2288 if (err)
2289 return err;
2290
81bfdd08 2291 obj->kconfig_map_idx = obj->nr_maps - 1;
166750bc
AN
2292
2293 return 0;
2294}
2295
42869d28 2296const struct btf_type *
ddc7c304 2297skip_mods_and_typedefs(const struct btf *btf, __u32 id, __u32 *res_id)
abd29c93
AN
2298{
2299 const struct btf_type *t = btf__type_by_id(btf, id);
8837fe5d 2300
ddc7c304
AN
2301 if (res_id)
2302 *res_id = id;
2303
2304 while (btf_is_mod(t) || btf_is_typedef(t)) {
2305 if (res_id)
2306 *res_id = t->type;
2307 t = btf__type_by_id(btf, t->type);
abd29c93 2308 }
ddc7c304
AN
2309
2310 return t;
abd29c93
AN
2311}
2312
590a0088
MKL
2313static const struct btf_type *
2314resolve_func_ptr(const struct btf *btf, __u32 id, __u32 *res_id)
2315{
2316 const struct btf_type *t;
2317
2318 t = skip_mods_and_typedefs(btf, id, NULL);
2319 if (!btf_is_ptr(t))
2320 return NULL;
2321
2322 t = skip_mods_and_typedefs(btf, t->type, res_id);
2323
2324 return btf_is_func_proto(t) ? t : NULL;
2325}
2326
774e132e 2327static const char *__btf_kind_str(__u16 kind)
81ba0889 2328{
774e132e 2329 switch (kind) {
81ba0889
AN
2330 case BTF_KIND_UNKN: return "void";
2331 case BTF_KIND_INT: return "int";
2332 case BTF_KIND_PTR: return "ptr";
2333 case BTF_KIND_ARRAY: return "array";
2334 case BTF_KIND_STRUCT: return "struct";
2335 case BTF_KIND_UNION: return "union";
2336 case BTF_KIND_ENUM: return "enum";
2337 case BTF_KIND_FWD: return "fwd";
2338 case BTF_KIND_TYPEDEF: return "typedef";
2339 case BTF_KIND_VOLATILE: return "volatile";
2340 case BTF_KIND_CONST: return "const";
2341 case BTF_KIND_RESTRICT: return "restrict";
2342 case BTF_KIND_FUNC: return "func";
2343 case BTF_KIND_FUNC_PROTO: return "func_proto";
2344 case BTF_KIND_VAR: return "var";
2345 case BTF_KIND_DATASEC: return "datasec";
22541a9e 2346 case BTF_KIND_FLOAT: return "float";
223f903e 2347 case BTF_KIND_DECL_TAG: return "decl_tag";
2dc1e488 2348 case BTF_KIND_TYPE_TAG: return "type_tag";
f2a62588 2349 case BTF_KIND_ENUM64: return "enum64";
81ba0889
AN
2350 default: return "unknown";
2351 }
2352}
2353
42869d28 2354const char *btf_kind_str(const struct btf_type *t)
774e132e
MKL
2355{
2356 return __btf_kind_str(btf_kind(t));
2357}
2358
ef99b02b
AN
2359/*
2360 * Fetch integer attribute of BTF map definition. Such attributes are
2361 * represented using a pointer to an array, in which dimensionality of array
2362 * encodes specified integer value. E.g., int (*type)[BPF_MAP_TYPE_ARRAY];
2363 * encodes `type => BPF_MAP_TYPE_ARRAY` key/value pair completely using BTF
2364 * type definition, while using only sizeof(void *) space in ELF data section.
2365 */
2366static bool get_map_field_int(const char *map_name, const struct btf *btf,
8983b731
AN
2367 const struct btf_member *m, __u32 *res)
2368{
ddc7c304 2369 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
abd29c93 2370 const char *name = btf__name_by_offset(btf, m->name_off);
ef99b02b
AN
2371 const struct btf_array *arr_info;
2372 const struct btf_type *arr_t;
abd29c93 2373
b03bc685 2374 if (!btf_is_ptr(t)) {
81ba0889
AN
2375 pr_warn("map '%s': attr '%s': expected PTR, got %s.\n",
2376 map_name, name, btf_kind_str(t));
abd29c93
AN
2377 return false;
2378 }
ef99b02b
AN
2379
2380 arr_t = btf__type_by_id(btf, t->type);
2381 if (!arr_t) {
be18010e
KW
2382 pr_warn("map '%s': attr '%s': type [%u] not found.\n",
2383 map_name, name, t->type);
abd29c93
AN
2384 return false;
2385 }
b03bc685 2386 if (!btf_is_array(arr_t)) {
81ba0889
AN
2387 pr_warn("map '%s': attr '%s': expected ARRAY, got %s.\n",
2388 map_name, name, btf_kind_str(arr_t));
abd29c93
AN
2389 return false;
2390 }
b03bc685 2391 arr_info = btf_array(arr_t);
ef99b02b 2392 *res = arr_info->nelems;
abd29c93
AN
2393 return true;
2394}
2395
d147357e
AS
2396static bool get_map_field_long(const char *map_name, const struct btf *btf,
2397 const struct btf_member *m, __u64 *res)
2398{
2399 const struct btf_type *t = skip_mods_and_typedefs(btf, m->type, NULL);
2400 const char *name = btf__name_by_offset(btf, m->name_off);
2401
2402 if (btf_is_ptr(t)) {
2403 __u32 res32;
2404 bool ret;
2405
2406 ret = get_map_field_int(map_name, btf, m, &res32);
2407 if (ret)
2408 *res = (__u64)res32;
2409 return ret;
2410 }
2411
2412 if (!btf_is_enum(t) && !btf_is_enum64(t)) {
2413 pr_warn("map '%s': attr '%s': expected ENUM or ENUM64, got %s.\n",
2414 map_name, name, btf_kind_str(t));
2415 return false;
2416 }
2417
2418 if (btf_vlen(t) != 1) {
2419 pr_warn("map '%s': attr '%s': invalid __ulong\n",
2420 map_name, name);
2421 return false;
2422 }
2423
2424 if (btf_is_enum(t)) {
2425 const struct btf_enum *e = btf_enum(t);
2426
2427 *res = e->val;
2428 } else {
2429 const struct btf_enum64 *e = btf_enum64(t);
2430
2431 *res = btf_enum64_value(e);
2432 }
2433 return true;
2434}
2435
e588c116
WY
2436static int pathname_concat(char *buf, size_t buf_sz, const char *path, const char *name)
2437{
2438 int len;
2439
2440 len = snprintf(buf, buf_sz, "%s/%s", path, name);
2441 if (len < 0)
2442 return -EINVAL;
2443 if (len >= buf_sz)
2444 return -ENAMETOOLONG;
2445
2446 return 0;
2447}
2448
57a00f41
THJ
2449static int build_map_pin_path(struct bpf_map *map, const char *path)
2450{
2451 char buf[PATH_MAX];
e588c116 2452 int err;
57a00f41
THJ
2453
2454 if (!path)
6b434b61 2455 path = BPF_FS_DEFAULT_PATH;
57a00f41 2456
e588c116
WY
2457 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
2458 if (err)
2459 return err;
57a00f41 2460
6e9cab2e 2461 return bpf_map__set_pin_path(map, buf);
57a00f41
THJ
2462}
2463
146bf811
AN
2464/* should match definition in bpf_helpers.h */
2465enum libbpf_pin_type {
2466 LIBBPF_PIN_NONE,
2467 /* PIN_BY_NAME: pin maps by name (in /sys/fs/bpf by default) */
2468 LIBBPF_PIN_BY_NAME,
2469};
2470
c7ef5ec9
AN
2471int parse_btf_map_def(const char *map_name, struct btf *btf,
2472 const struct btf_type *def_t, bool strict,
2473 struct btf_map_def *map_def, struct btf_map_def *inner_def)
abd29c93 2474{
41017e56 2475 const struct btf_type *t;
abd29c93 2476 const struct btf_member *m;
c7ef5ec9 2477 bool is_inner = inner_def == NULL;
abd29c93
AN
2478 int vlen, i;
2479
c7ef5ec9
AN
2480 vlen = btf_vlen(def_t);
2481 m = btf_members(def_t);
abd29c93 2482 for (i = 0; i < vlen; i++, m++) {
c7ef5ec9 2483 const char *name = btf__name_by_offset(btf, m->name_off);
abd29c93
AN
2484
2485 if (!name) {
c7ef5ec9 2486 pr_warn("map '%s': invalid field #%d.\n", map_name, i);
abd29c93
AN
2487 return -EINVAL;
2488 }
2489 if (strcmp(name, "type") == 0) {
c7ef5ec9 2490 if (!get_map_field_int(map_name, btf, m, &map_def->map_type))
abd29c93 2491 return -EINVAL;
c7ef5ec9 2492 map_def->parts |= MAP_DEF_MAP_TYPE;
abd29c93 2493 } else if (strcmp(name, "max_entries") == 0) {
c7ef5ec9 2494 if (!get_map_field_int(map_name, btf, m, &map_def->max_entries))
abd29c93 2495 return -EINVAL;
c7ef5ec9 2496 map_def->parts |= MAP_DEF_MAX_ENTRIES;
abd29c93 2497 } else if (strcmp(name, "map_flags") == 0) {
c7ef5ec9 2498 if (!get_map_field_int(map_name, btf, m, &map_def->map_flags))
abd29c93 2499 return -EINVAL;
c7ef5ec9 2500 map_def->parts |= MAP_DEF_MAP_FLAGS;
1bdb6c9a 2501 } else if (strcmp(name, "numa_node") == 0) {
c7ef5ec9 2502 if (!get_map_field_int(map_name, btf, m, &map_def->numa_node))
1bdb6c9a 2503 return -EINVAL;
c7ef5ec9 2504 map_def->parts |= MAP_DEF_NUMA_NODE;
abd29c93
AN
2505 } else if (strcmp(name, "key_size") == 0) {
2506 __u32 sz;
2507
c7ef5ec9 2508 if (!get_map_field_int(map_name, btf, m, &sz))
abd29c93 2509 return -EINVAL;
c7ef5ec9 2510 if (map_def->key_size && map_def->key_size != sz) {
be18010e 2511 pr_warn("map '%s': conflicting key size %u != %u.\n",
c7ef5ec9 2512 map_name, map_def->key_size, sz);
abd29c93
AN
2513 return -EINVAL;
2514 }
c7ef5ec9
AN
2515 map_def->key_size = sz;
2516 map_def->parts |= MAP_DEF_KEY_SIZE;
abd29c93
AN
2517 } else if (strcmp(name, "key") == 0) {
2518 __s64 sz;
2519
c7ef5ec9 2520 t = btf__type_by_id(btf, m->type);
abd29c93 2521 if (!t) {
be18010e 2522 pr_warn("map '%s': key type [%d] not found.\n",
c7ef5ec9 2523 map_name, m->type);
abd29c93
AN
2524 return -EINVAL;
2525 }
b03bc685 2526 if (!btf_is_ptr(t)) {
81ba0889 2527 pr_warn("map '%s': key spec is not PTR: %s.\n",
c7ef5ec9 2528 map_name, btf_kind_str(t));
abd29c93
AN
2529 return -EINVAL;
2530 }
c7ef5ec9 2531 sz = btf__resolve_size(btf, t->type);
abd29c93 2532 if (sz < 0) {
679152d3 2533 pr_warn("map '%s': can't determine key size for type [%u]: %zd.\n",
c7ef5ec9 2534 map_name, t->type, (ssize_t)sz);
abd29c93
AN
2535 return sz;
2536 }
c7ef5ec9 2537 if (map_def->key_size && map_def->key_size != sz) {
679152d3 2538 pr_warn("map '%s': conflicting key size %u != %zd.\n",
c7ef5ec9 2539 map_name, map_def->key_size, (ssize_t)sz);
abd29c93
AN
2540 return -EINVAL;
2541 }
c7ef5ec9
AN
2542 map_def->key_size = sz;
2543 map_def->key_type_id = t->type;
2544 map_def->parts |= MAP_DEF_KEY_SIZE | MAP_DEF_KEY_TYPE;
abd29c93
AN
2545 } else if (strcmp(name, "value_size") == 0) {
2546 __u32 sz;
2547
c7ef5ec9 2548 if (!get_map_field_int(map_name, btf, m, &sz))
abd29c93 2549 return -EINVAL;
c7ef5ec9 2550 if (map_def->value_size && map_def->value_size != sz) {
be18010e 2551 pr_warn("map '%s': conflicting value size %u != %u.\n",
c7ef5ec9 2552 map_name, map_def->value_size, sz);
abd29c93
AN
2553 return -EINVAL;
2554 }
c7ef5ec9
AN
2555 map_def->value_size = sz;
2556 map_def->parts |= MAP_DEF_VALUE_SIZE;
abd29c93
AN
2557 } else if (strcmp(name, "value") == 0) {
2558 __s64 sz;
2559
c7ef5ec9 2560 t = btf__type_by_id(btf, m->type);
abd29c93 2561 if (!t) {
be18010e 2562 pr_warn("map '%s': value type [%d] not found.\n",
c7ef5ec9 2563 map_name, m->type);
abd29c93
AN
2564 return -EINVAL;
2565 }
b03bc685 2566 if (!btf_is_ptr(t)) {
81ba0889 2567 pr_warn("map '%s': value spec is not PTR: %s.\n",
c7ef5ec9 2568 map_name, btf_kind_str(t));
abd29c93
AN
2569 return -EINVAL;
2570 }
c7ef5ec9 2571 sz = btf__resolve_size(btf, t->type);
abd29c93 2572 if (sz < 0) {
679152d3 2573 pr_warn("map '%s': can't determine value size for type [%u]: %zd.\n",
c7ef5ec9 2574 map_name, t->type, (ssize_t)sz);
abd29c93
AN
2575 return sz;
2576 }
c7ef5ec9 2577 if (map_def->value_size && map_def->value_size != sz) {
679152d3 2578 pr_warn("map '%s': conflicting value size %u != %zd.\n",
c7ef5ec9 2579 map_name, map_def->value_size, (ssize_t)sz);
abd29c93
AN
2580 return -EINVAL;
2581 }
c7ef5ec9
AN
2582 map_def->value_size = sz;
2583 map_def->value_type_id = t->type;
2584 map_def->parts |= MAP_DEF_VALUE_SIZE | MAP_DEF_VALUE_TYPE;
646f02ff
AN
2585 }
2586 else if (strcmp(name, "values") == 0) {
341ac5ff
HC
2587 bool is_map_in_map = bpf_map_type__is_map_in_map(map_def->map_type);
2588 bool is_prog_array = map_def->map_type == BPF_MAP_TYPE_PROG_ARRAY;
2589 const char *desc = is_map_in_map ? "map-in-map inner" : "prog-array value";
c7ef5ec9 2590 char inner_map_name[128];
646f02ff
AN
2591 int err;
2592
2593 if (is_inner) {
2594 pr_warn("map '%s': multi-level inner maps not supported.\n",
c7ef5ec9 2595 map_name);
646f02ff
AN
2596 return -ENOTSUP;
2597 }
2598 if (i != vlen - 1) {
2599 pr_warn("map '%s': '%s' member should be last.\n",
c7ef5ec9 2600 map_name, name);
646f02ff
AN
2601 return -EINVAL;
2602 }
341ac5ff
HC
2603 if (!is_map_in_map && !is_prog_array) {
2604 pr_warn("map '%s': should be map-in-map or prog-array.\n",
c7ef5ec9 2605 map_name);
646f02ff
AN
2606 return -ENOTSUP;
2607 }
c7ef5ec9 2608 if (map_def->value_size && map_def->value_size != 4) {
646f02ff 2609 pr_warn("map '%s': conflicting value size %u != 4.\n",
c7ef5ec9 2610 map_name, map_def->value_size);
646f02ff
AN
2611 return -EINVAL;
2612 }
c7ef5ec9
AN
2613 map_def->value_size = 4;
2614 t = btf__type_by_id(btf, m->type);
646f02ff 2615 if (!t) {
341ac5ff
HC
2616 pr_warn("map '%s': %s type [%d] not found.\n",
2617 map_name, desc, m->type);
646f02ff
AN
2618 return -EINVAL;
2619 }
2620 if (!btf_is_array(t) || btf_array(t)->nelems) {
341ac5ff
HC
2621 pr_warn("map '%s': %s spec is not a zero-sized array.\n",
2622 map_name, desc);
646f02ff
AN
2623 return -EINVAL;
2624 }
c7ef5ec9 2625 t = skip_mods_and_typedefs(btf, btf_array(t)->type, NULL);
646f02ff 2626 if (!btf_is_ptr(t)) {
341ac5ff
HC
2627 pr_warn("map '%s': %s def is of unexpected kind %s.\n",
2628 map_name, desc, btf_kind_str(t));
646f02ff
AN
2629 return -EINVAL;
2630 }
c7ef5ec9 2631 t = skip_mods_and_typedefs(btf, t->type, NULL);
341ac5ff
HC
2632 if (is_prog_array) {
2633 if (!btf_is_func_proto(t)) {
2634 pr_warn("map '%s': prog-array value def is of unexpected kind %s.\n",
2635 map_name, btf_kind_str(t));
2636 return -EINVAL;
2637 }
2638 continue;
2639 }
646f02ff 2640 if (!btf_is_struct(t)) {
81ba0889 2641 pr_warn("map '%s': map-in-map inner def is of unexpected kind %s.\n",
c7ef5ec9 2642 map_name, btf_kind_str(t));
646f02ff
AN
2643 return -EINVAL;
2644 }
2645
c7ef5ec9
AN
2646 snprintf(inner_map_name, sizeof(inner_map_name), "%s.inner", map_name);
2647 err = parse_btf_map_def(inner_map_name, btf, t, strict, inner_def, NULL);
646f02ff
AN
2648 if (err)
2649 return err;
c7ef5ec9
AN
2650
2651 map_def->parts |= MAP_DEF_INNER_MAP;
57a00f41
THJ
2652 } else if (strcmp(name, "pinning") == 0) {
2653 __u32 val;
57a00f41 2654
646f02ff 2655 if (is_inner) {
c7ef5ec9 2656 pr_warn("map '%s': inner def can't be pinned.\n", map_name);
646f02ff
AN
2657 return -EINVAL;
2658 }
c7ef5ec9 2659 if (!get_map_field_int(map_name, btf, m, &val))
57a00f41 2660 return -EINVAL;
c7ef5ec9 2661 if (val != LIBBPF_PIN_NONE && val != LIBBPF_PIN_BY_NAME) {
57a00f41 2662 pr_warn("map '%s': invalid pinning value %u.\n",
c7ef5ec9 2663 map_name, val);
57a00f41
THJ
2664 return -EINVAL;
2665 }
c7ef5ec9
AN
2666 map_def->pinning = val;
2667 map_def->parts |= MAP_DEF_PINNING;
47512102 2668 } else if (strcmp(name, "map_extra") == 0) {
d147357e 2669 __u64 map_extra;
47512102 2670
d147357e 2671 if (!get_map_field_long(map_name, btf, m, &map_extra))
47512102
JK
2672 return -EINVAL;
2673 map_def->map_extra = map_extra;
2674 map_def->parts |= MAP_DEF_MAP_EXTRA;
abd29c93
AN
2675 } else {
2676 if (strict) {
c7ef5ec9 2677 pr_warn("map '%s': unknown field '%s'.\n", map_name, name);
abd29c93
AN
2678 return -ENOTSUP;
2679 }
c7ef5ec9 2680 pr_debug("map '%s': ignoring unknown field '%s'.\n", map_name, name);
abd29c93
AN
2681 }
2682 }
2683
c7ef5ec9
AN
2684 if (map_def->map_type == BPF_MAP_TYPE_UNSPEC) {
2685 pr_warn("map '%s': map type isn't specified.\n", map_name);
abd29c93
AN
2686 return -EINVAL;
2687 }
2688
2689 return 0;
2690}
2691
597fbc46
AN
2692static size_t adjust_ringbuf_sz(size_t sz)
2693{
2694 __u32 page_sz = sysconf(_SC_PAGE_SIZE);
2695 __u32 mul;
2696
2697 /* if user forgot to set any size, make sure they see error */
2698 if (sz == 0)
2699 return 0;
2700 /* Kernel expects BPF_MAP_TYPE_RINGBUF's max_entries to be
2701 * a power-of-2 multiple of kernel's page size. If user diligently
2702 * satisified these conditions, pass the size through.
2703 */
2704 if ((sz % page_sz) == 0 && is_pow_of_2(sz / page_sz))
2705 return sz;
2706
2707 /* Otherwise find closest (page_sz * power_of_2) product bigger than
2708 * user-set size to satisfy both user size request and kernel
2709 * requirements and substitute correct max_entries for map creation.
2710 */
2711 for (mul = 1; mul <= UINT_MAX / page_sz; mul <<= 1) {
2712 if (mul * page_sz > sz)
2713 return mul * page_sz;
2714 }
2715
2716 /* if it's impossible to satisfy the conditions (i.e., user size is
2717 * very close to UINT_MAX but is not a power-of-2 multiple of
2718 * page_size) then just return original size and let kernel reject it
2719 */
2720 return sz;
2721}
2722
b66ccae0
DV
2723static bool map_is_ringbuf(const struct bpf_map *map)
2724{
2725 return map->def.type == BPF_MAP_TYPE_RINGBUF ||
2726 map->def.type == BPF_MAP_TYPE_USER_RINGBUF;
2727}
2728
c7ef5ec9
AN
2729static void fill_map_from_def(struct bpf_map *map, const struct btf_map_def *def)
2730{
2731 map->def.type = def->map_type;
2732 map->def.key_size = def->key_size;
2733 map->def.value_size = def->value_size;
2734 map->def.max_entries = def->max_entries;
2735 map->def.map_flags = def->map_flags;
47512102 2736 map->map_extra = def->map_extra;
c7ef5ec9
AN
2737
2738 map->numa_node = def->numa_node;
2739 map->btf_key_type_id = def->key_type_id;
2740 map->btf_value_type_id = def->value_type_id;
2741
597fbc46 2742 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
b66ccae0 2743 if (map_is_ringbuf(map))
597fbc46
AN
2744 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
2745
c7ef5ec9
AN
2746 if (def->parts & MAP_DEF_MAP_TYPE)
2747 pr_debug("map '%s': found type = %u.\n", map->name, def->map_type);
2748
2749 if (def->parts & MAP_DEF_KEY_TYPE)
2750 pr_debug("map '%s': found key [%u], sz = %u.\n",
2751 map->name, def->key_type_id, def->key_size);
2752 else if (def->parts & MAP_DEF_KEY_SIZE)
2753 pr_debug("map '%s': found key_size = %u.\n", map->name, def->key_size);
2754
2755 if (def->parts & MAP_DEF_VALUE_TYPE)
2756 pr_debug("map '%s': found value [%u], sz = %u.\n",
2757 map->name, def->value_type_id, def->value_size);
2758 else if (def->parts & MAP_DEF_VALUE_SIZE)
2759 pr_debug("map '%s': found value_size = %u.\n", map->name, def->value_size);
2760
2761 if (def->parts & MAP_DEF_MAX_ENTRIES)
2762 pr_debug("map '%s': found max_entries = %u.\n", map->name, def->max_entries);
2763 if (def->parts & MAP_DEF_MAP_FLAGS)
47512102
JK
2764 pr_debug("map '%s': found map_flags = 0x%x.\n", map->name, def->map_flags);
2765 if (def->parts & MAP_DEF_MAP_EXTRA)
2766 pr_debug("map '%s': found map_extra = 0x%llx.\n", map->name,
2767 (unsigned long long)def->map_extra);
c7ef5ec9
AN
2768 if (def->parts & MAP_DEF_PINNING)
2769 pr_debug("map '%s': found pinning = %u.\n", map->name, def->pinning);
2770 if (def->parts & MAP_DEF_NUMA_NODE)
2771 pr_debug("map '%s': found numa_node = %u.\n", map->name, def->numa_node);
2772
2773 if (def->parts & MAP_DEF_INNER_MAP)
2774 pr_debug("map '%s': found inner map definition.\n", map->name);
2775}
2776
c1cccec9
AN
2777static const char *btf_var_linkage_str(__u32 linkage)
2778{
2779 switch (linkage) {
2780 case BTF_VAR_STATIC: return "static";
2781 case BTF_VAR_GLOBAL_ALLOCATED: return "global";
2782 case BTF_VAR_GLOBAL_EXTERN: return "extern";
2783 default: return "unknown";
2784 }
2785}
2786
41017e56
AN
2787static int bpf_object__init_user_btf_map(struct bpf_object *obj,
2788 const struct btf_type *sec,
2789 int var_idx, int sec_idx,
2790 const Elf_Data *data, bool strict,
2791 const char *pin_root_path)
2792{
c7ef5ec9 2793 struct btf_map_def map_def = {}, inner_def = {};
41017e56
AN
2794 const struct btf_type *var, *def;
2795 const struct btf_var_secinfo *vi;
2796 const struct btf_var *var_extra;
2797 const char *map_name;
2798 struct bpf_map *map;
c7ef5ec9 2799 int err;
41017e56
AN
2800
2801 vi = btf_var_secinfos(sec) + var_idx;
2802 var = btf__type_by_id(obj->btf, vi->type);
2803 var_extra = btf_var(var);
2804 map_name = btf__name_by_offset(obj->btf, var->name_off);
2805
2806 if (map_name == NULL || map_name[0] == '\0') {
2807 pr_warn("map #%d: empty name.\n", var_idx);
2808 return -EINVAL;
2809 }
2810 if ((__u64)vi->offset + vi->size > data->d_size) {
2811 pr_warn("map '%s' BTF data is corrupted.\n", map_name);
2812 return -EINVAL;
2813 }
2814 if (!btf_is_var(var)) {
81ba0889
AN
2815 pr_warn("map '%s': unexpected var kind %s.\n",
2816 map_name, btf_kind_str(var));
41017e56
AN
2817 return -EINVAL;
2818 }
c1cccec9
AN
2819 if (var_extra->linkage != BTF_VAR_GLOBAL_ALLOCATED) {
2820 pr_warn("map '%s': unsupported map linkage %s.\n",
2821 map_name, btf_var_linkage_str(var_extra->linkage));
41017e56
AN
2822 return -EOPNOTSUPP;
2823 }
2824
2825 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
2826 if (!btf_is_struct(def)) {
81ba0889
AN
2827 pr_warn("map '%s': unexpected def kind %s.\n",
2828 map_name, btf_kind_str(var));
41017e56
AN
2829 return -EINVAL;
2830 }
2831 if (def->size > vi->size) {
2832 pr_warn("map '%s': invalid def size.\n", map_name);
2833 return -EINVAL;
2834 }
2835
2836 map = bpf_object__add_map(obj);
2837 if (IS_ERR(map))
2838 return PTR_ERR(map);
2839 map->name = strdup(map_name);
2840 if (!map->name) {
2841 pr_warn("map '%s': failed to alloc map name.\n", map_name);
2842 return -ENOMEM;
2843 }
2844 map->libbpf_type = LIBBPF_MAP_UNSPEC;
2845 map->def.type = BPF_MAP_TYPE_UNSPEC;
2846 map->sec_idx = sec_idx;
2847 map->sec_offset = vi->offset;
646f02ff 2848 map->btf_var_idx = var_idx;
41017e56
AN
2849 pr_debug("map '%s': at sec_idx %d, offset %zu.\n",
2850 map_name, map->sec_idx, map->sec_offset);
2851
c7ef5ec9
AN
2852 err = parse_btf_map_def(map->name, obj->btf, def, strict, &map_def, &inner_def);
2853 if (err)
2854 return err;
2855
2856 fill_map_from_def(map, &map_def);
2857
2858 if (map_def.pinning == LIBBPF_PIN_BY_NAME) {
2859 err = build_map_pin_path(map, pin_root_path);
2860 if (err) {
2861 pr_warn("map '%s': couldn't build pin path.\n", map->name);
2862 return err;
2863 }
2864 }
2865
2866 if (map_def.parts & MAP_DEF_INNER_MAP) {
2867 map->inner_map = calloc(1, sizeof(*map->inner_map));
2868 if (!map->inner_map)
2869 return -ENOMEM;
dac645b9
AN
2870 map->inner_map->fd = create_placeholder_fd();
2871 if (map->inner_map->fd < 0)
2872 return map->inner_map->fd;
c7ef5ec9
AN
2873 map->inner_map->sec_idx = sec_idx;
2874 map->inner_map->name = malloc(strlen(map_name) + sizeof(".inner") + 1);
2875 if (!map->inner_map->name)
2876 return -ENOMEM;
2877 sprintf(map->inner_map->name, "%s.inner", map_name);
2878
2879 fill_map_from_def(map->inner_map, &inner_def);
2880 }
2881
4fcac46c 2882 err = map_fill_btf_type_info(obj, map);
262cfb74
DK
2883 if (err)
2884 return err;
2885
c7ef5ec9 2886 return 0;
41017e56
AN
2887}
2888
2e7ba4f8
AN
2889static int init_arena_map_data(struct bpf_object *obj, struct bpf_map *map,
2890 const char *sec_name, int sec_idx,
2891 void *data, size_t data_sz)
2892{
2893 const long page_sz = sysconf(_SC_PAGE_SIZE);
2894 size_t mmap_sz;
2895
2896 mmap_sz = bpf_map_mmap_sz(obj->arena_map);
2897 if (roundup(data_sz, page_sz) > mmap_sz) {
2898 pr_warn("elf: sec '%s': declared ARENA map size (%zu) is too small to hold global __arena variables of size %zu\n",
2899 sec_name, mmap_sz, data_sz);
2900 return -E2BIG;
2901 }
2902
2903 obj->arena_data = malloc(data_sz);
2904 if (!obj->arena_data)
2905 return -ENOMEM;
2906 memcpy(obj->arena_data, data, data_sz);
2907 obj->arena_data_sz = data_sz;
2908
2909 /* make bpf_map__init_value() work for ARENA maps */
2910 map->mmaped = obj->arena_data;
2911
2912 return 0;
2913}
2914
57a00f41
THJ
2915static int bpf_object__init_user_btf_maps(struct bpf_object *obj, bool strict,
2916 const char *pin_root_path)
abd29c93
AN
2917{
2918 const struct btf_type *sec = NULL;
2919 int nr_types, i, vlen, err;
2920 const struct btf_type *t;
2921 const char *name;
2922 Elf_Data *data;
2923 Elf_Scn *scn;
2924
2925 if (obj->efile.btf_maps_shndx < 0)
2926 return 0;
2927
88a82120
AN
2928 scn = elf_sec_by_idx(obj, obj->efile.btf_maps_shndx);
2929 data = elf_sec_data(obj, scn);
abd29c93 2930 if (!scn || !data) {
88a82120
AN
2931 pr_warn("elf: failed to get %s map definitions for %s\n",
2932 MAPS_ELF_SEC, obj->path);
abd29c93
AN
2933 return -EINVAL;
2934 }
2935
6a886de0
HC
2936 nr_types = btf__type_cnt(obj->btf);
2937 for (i = 1; i < nr_types; i++) {
abd29c93 2938 t = btf__type_by_id(obj->btf, i);
b03bc685 2939 if (!btf_is_datasec(t))
abd29c93
AN
2940 continue;
2941 name = btf__name_by_offset(obj->btf, t->name_off);
2942 if (strcmp(name, MAPS_ELF_SEC) == 0) {
2943 sec = t;
646f02ff 2944 obj->efile.btf_maps_sec_btf_id = i;
abd29c93
AN
2945 break;
2946 }
2947 }
2948
2949 if (!sec) {
be18010e 2950 pr_warn("DATASEC '%s' not found.\n", MAPS_ELF_SEC);
abd29c93
AN
2951 return -ENOENT;
2952 }
2953
b03bc685 2954 vlen = btf_vlen(sec);
abd29c93
AN
2955 for (i = 0; i < vlen; i++) {
2956 err = bpf_object__init_user_btf_map(obj, sec, i,
2957 obj->efile.btf_maps_shndx,
8983b731
AN
2958 data, strict,
2959 pin_root_path);
abd29c93
AN
2960 if (err)
2961 return err;
2962 }
2963
2e7ba4f8
AN
2964 for (i = 0; i < obj->nr_maps; i++) {
2965 struct bpf_map *map = &obj->maps[i];
2966
2967 if (map->def.type != BPF_MAP_TYPE_ARENA)
2968 continue;
2969
2970 if (obj->arena_map) {
2971 pr_warn("map '%s': only single ARENA map is supported (map '%s' is also ARENA)\n",
2972 map->name, obj->arena_map->name);
2973 return -EINVAL;
2974 }
2975 obj->arena_map = map;
2976
2977 if (obj->efile.arena_data) {
2978 err = init_arena_map_data(obj, map, ARENA_SEC, obj->efile.arena_data_shndx,
2979 obj->efile.arena_data->d_buf,
2980 obj->efile.arena_data->d_size);
2981 if (err)
2982 return err;
2983 }
2984 }
2985 if (obj->efile.arena_data && !obj->arena_map) {
2986 pr_warn("elf: sec '%s': to use global __arena variables the ARENA map should be explicitly declared in SEC(\".maps\")\n",
2987 ARENA_SEC);
2988 return -ENOENT;
2989 }
2990
abd29c93
AN
2991 return 0;
2992}
2993
0d13bfce 2994static int bpf_object__init_maps(struct bpf_object *obj,
01af3bf0 2995 const struct bpf_object_open_opts *opts)
bf829271 2996{
166750bc
AN
2997 const char *pin_root_path;
2998 bool strict;
bd054102 2999 int err = 0;
8837fe5d 3000
166750bc
AN
3001 strict = !OPTS_GET(opts, relaxed_maps, false);
3002 pin_root_path = OPTS_GET(opts, pin_root_path, NULL);
bf829271 3003
40e1bcab 3004 err = bpf_object__init_user_btf_maps(obj, strict, pin_root_path);
166750bc 3005 err = err ?: bpf_object__init_global_data_maps(obj);
81bfdd08 3006 err = err ?: bpf_object__init_kconfig_map(obj);
809a69d6 3007 err = err ?: bpf_object_init_struct_ops(obj);
bf829271 3008
3b3af91c 3009 return err;
561bbcca
WN
3010}
3011
e3d91b0c
JDB
3012static bool section_have_execinstr(struct bpf_object *obj, int idx)
3013{
ad23b723 3014 Elf64_Shdr *sh;
e3d91b0c 3015
ad23b723
AN
3016 sh = elf_sec_hdr(obj, elf_sec_by_idx(obj, idx));
3017 if (!sh)
e3d91b0c
JDB
3018 return false;
3019
ad23b723 3020 return sh->sh_flags & SHF_EXECINSTR;
e3d91b0c
JDB
3021}
3022
6ebaa3fb
EZ
3023static bool starts_with_qmark(const char *s)
3024{
3025 return s && s[0] == '?';
3026}
3027
0f0e55d8
AN
3028static bool btf_needs_sanitization(struct bpf_object *obj)
3029{
9ca1f56a
AS
3030 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3031 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3032 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3033 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
223f903e 3034 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2dc1e488 3035 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
f2a62588 3036 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
6ebaa3fb 3037 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
0f0e55d8 3038
2dc1e488 3039 return !has_func || !has_datasec || !has_func_global || !has_float ||
6ebaa3fb 3040 !has_decl_tag || !has_type_tag || !has_enum64 || !has_qmark_datasec;
0f0e55d8
AN
3041}
3042
f2a62588 3043static int bpf_object__sanitize_btf(struct bpf_object *obj, struct btf *btf)
d7c4b398 3044{
9ca1f56a
AS
3045 bool has_func_global = kernel_supports(obj, FEAT_BTF_GLOBAL_FUNC);
3046 bool has_datasec = kernel_supports(obj, FEAT_BTF_DATASEC);
3047 bool has_float = kernel_supports(obj, FEAT_BTF_FLOAT);
3048 bool has_func = kernel_supports(obj, FEAT_BTF_FUNC);
223f903e 3049 bool has_decl_tag = kernel_supports(obj, FEAT_BTF_DECL_TAG);
2dc1e488 3050 bool has_type_tag = kernel_supports(obj, FEAT_BTF_TYPE_TAG);
f2a62588 3051 bool has_enum64 = kernel_supports(obj, FEAT_BTF_ENUM64);
6ebaa3fb 3052 bool has_qmark_datasec = kernel_supports(obj, FEAT_BTF_QMARK_DATASEC);
c49a44b3 3053 int enum64_placeholder_id = 0;
d7c4b398
AN
3054 struct btf_type *t;
3055 int i, j, vlen;
d7c4b398 3056
6a886de0 3057 for (i = 1; i < btf__type_cnt(btf); i++) {
d7c4b398 3058 t = (struct btf_type *)btf__type_by_id(btf, i);
d7c4b398 3059
223f903e
YS
3060 if ((!has_datasec && btf_is_var(t)) || (!has_decl_tag && btf_is_decl_tag(t))) {
3061 /* replace VAR/DECL_TAG with INT */
d7c4b398 3062 t->info = BTF_INFO_ENC(BTF_KIND_INT, 0, 0);
1d4126c4
AN
3063 /*
3064 * using size = 1 is the safest choice, 4 will be too
3065 * big and cause kernel BTF validation failure if
3066 * original variable took less than 4 bytes
3067 */
3068 t->size = 1;
708852dc 3069 *(int *)(t + 1) = BTF_INT_ENC(0, 0, 8);
b03bc685 3070 } else if (!has_datasec && btf_is_datasec(t)) {
d7c4b398 3071 /* replace DATASEC with STRUCT */
b03bc685
AN
3072 const struct btf_var_secinfo *v = btf_var_secinfos(t);
3073 struct btf_member *m = btf_members(t);
d7c4b398
AN
3074 struct btf_type *vt;
3075 char *name;
3076
3077 name = (char *)btf__name_by_offset(btf, t->name_off);
3078 while (*name) {
6ebaa3fb 3079 if (*name == '.' || *name == '?')
d7c4b398
AN
3080 *name = '_';
3081 name++;
3082 }
3083
b03bc685 3084 vlen = btf_vlen(t);
d7c4b398
AN
3085 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, vlen);
3086 for (j = 0; j < vlen; j++, v++, m++) {
3087 /* order of field assignments is important */
3088 m->offset = v->offset * 8;
3089 m->type = v->type;
3090 /* preserve variable name as member name */
3091 vt = (void *)btf__type_by_id(btf, v->type);
3092 m->name_off = vt->name_off;
3093 }
6ebaa3fb
EZ
3094 } else if (!has_qmark_datasec && btf_is_datasec(t) &&
3095 starts_with_qmark(btf__name_by_offset(btf, t->name_off))) {
3096 /* replace '?' prefix with '_' for DATASEC names */
3097 char *name;
3098
3099 name = (char *)btf__name_by_offset(btf, t->name_off);
3100 if (name[0] == '?')
3101 name[0] = '_';
b03bc685 3102 } else if (!has_func && btf_is_func_proto(t)) {
d7c4b398 3103 /* replace FUNC_PROTO with ENUM */
b03bc685 3104 vlen = btf_vlen(t);
d7c4b398
AN
3105 t->info = BTF_INFO_ENC(BTF_KIND_ENUM, 0, vlen);
3106 t->size = sizeof(__u32); /* kernel enforced */
b03bc685 3107 } else if (!has_func && btf_is_func(t)) {
d7c4b398
AN
3108 /* replace FUNC with TYPEDEF */
3109 t->info = BTF_INFO_ENC(BTF_KIND_TYPEDEF, 0, 0);
2d3eb67f
AS
3110 } else if (!has_func_global && btf_is_func(t)) {
3111 /* replace BTF_FUNC_GLOBAL with BTF_FUNC_STATIC */
3112 t->info = BTF_INFO_ENC(BTF_KIND_FUNC, 0, 0);
22541a9e
IL
3113 } else if (!has_float && btf_is_float(t)) {
3114 /* replace FLOAT with an equally-sized empty STRUCT;
3115 * since C compilers do not accept e.g. "float" as a
3116 * valid struct name, make it anonymous
3117 */
3118 t->name_off = 0;
3119 t->info = BTF_INFO_ENC(BTF_KIND_STRUCT, 0, 0);
2dc1e488
YS
3120 } else if (!has_type_tag && btf_is_type_tag(t)) {
3121 /* replace TYPE_TAG with a CONST */
3122 t->name_off = 0;
3123 t->info = BTF_INFO_ENC(BTF_KIND_CONST, 0, 0);
f2a62588
YS
3124 } else if (!has_enum64 && btf_is_enum(t)) {
3125 /* clear the kflag */
3126 t->info = btf_type_info(btf_kind(t), btf_vlen(t), false);
3127 } else if (!has_enum64 && btf_is_enum64(t)) {
3128 /* replace ENUM64 with a union */
3129 struct btf_member *m;
3130
3131 if (enum64_placeholder_id == 0) {
3132 enum64_placeholder_id = btf__add_int(btf, "enum64_placeholder", 1, 0);
3133 if (enum64_placeholder_id < 0)
3134 return enum64_placeholder_id;
3135
3136 t = (struct btf_type *)btf__type_by_id(btf, i);
3137 }
3138
3139 m = btf_members(t);
3140 vlen = btf_vlen(t);
3141 t->info = BTF_INFO_ENC(BTF_KIND_UNION, 0, vlen);
3142 for (j = 0; j < vlen; j++, m++) {
3143 m->type = enum64_placeholder_id;
3144 m->offset = 0;
3145 }
e3ba8e4e 3146 }
d7c4b398 3147 }
f2a62588
YS
3148
3149 return 0;
d7c4b398
AN
3150}
3151
b35f14f4 3152static bool libbpf_needs_btf(const struct bpf_object *obj)
abd29c93 3153{
b35f14f4 3154 return obj->efile.btf_maps_shndx >= 0 ||
240bf8a5 3155 obj->efile.has_st_ops ||
b35f14f4
AN
3156 obj->nr_extern > 0;
3157}
3158
3159static bool kernel_needs_btf(const struct bpf_object *obj)
3160{
240bf8a5 3161 return obj->efile.has_st_ops;
abd29c93
AN
3162}
3163
063183bf 3164static int bpf_object__init_btf(struct bpf_object *obj,
9c6660d0
AN
3165 Elf_Data *btf_data,
3166 Elf_Data *btf_ext_data)
3167{
b7d7f3e1 3168 int err = -ENOENT;
9c6660d0
AN
3169
3170 if (btf_data) {
3171 obj->btf = btf__new(btf_data->d_buf, btf_data->d_size);
e9fc3ce9
AN
3172 err = libbpf_get_error(obj->btf);
3173 if (err) {
b7d7f3e1 3174 obj->btf = NULL;
e9fc3ce9 3175 pr_warn("Error loading ELF section %s: %d.\n", BTF_ELF_SEC, err);
9c6660d0
AN
3176 goto out;
3177 }
4c01925f
AN
3178 /* enforce 8-byte pointers for BPF-targeted BTFs */
3179 btf__set_pointer_size(obj->btf, 8);
9c6660d0
AN
3180 }
3181 if (btf_ext_data) {
11d5daa8
AN
3182 struct btf_ext_info *ext_segs[3];
3183 int seg_num, sec_num;
3184
9c6660d0
AN
3185 if (!obj->btf) {
3186 pr_debug("Ignore ELF section %s because its depending ELF section %s is not found.\n",
3187 BTF_EXT_ELF_SEC, BTF_ELF_SEC);
3188 goto out;
3189 }
e9fc3ce9
AN
3190 obj->btf_ext = btf_ext__new(btf_ext_data->d_buf, btf_ext_data->d_size);
3191 err = libbpf_get_error(obj->btf_ext);
3192 if (err) {
3193 pr_warn("Error loading ELF section %s: %d. Ignored and continue.\n",
3194 BTF_EXT_ELF_SEC, err);
9c6660d0
AN
3195 obj->btf_ext = NULL;
3196 goto out;
3197 }
11d5daa8
AN
3198
3199 /* setup .BTF.ext to ELF section mapping */
3200 ext_segs[0] = &obj->btf_ext->func_info;
3201 ext_segs[1] = &obj->btf_ext->line_info;
3202 ext_segs[2] = &obj->btf_ext->core_relo_info;
3203 for (seg_num = 0; seg_num < ARRAY_SIZE(ext_segs); seg_num++) {
3204 struct btf_ext_info *seg = ext_segs[seg_num];
3205 const struct btf_ext_info_sec *sec;
3206 const char *sec_name;
3207 Elf_Scn *scn;
3208
3209 if (seg->sec_cnt == 0)
3210 continue;
3211
3212 seg->sec_idxs = calloc(seg->sec_cnt, sizeof(*seg->sec_idxs));
3213 if (!seg->sec_idxs) {
3214 err = -ENOMEM;
3215 goto out;
3216 }
3217
3218 sec_num = 0;
3219 for_each_btf_ext_sec(seg, sec) {
3220 /* preventively increment index to avoid doing
3221 * this before every continue below
3222 */
3223 sec_num++;
3224
3225 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
3226 if (str_is_empty(sec_name))
3227 continue;
3228 scn = elf_sec_by_name(obj, sec_name);
3229 if (!scn)
3230 continue;
3231
3232 seg->sec_idxs[sec_num - 1] = elf_ndxscn(scn);
3233 }
3234 }
9c6660d0
AN
3235 }
3236out:
b35f14f4 3237 if (err && libbpf_needs_btf(obj)) {
be18010e 3238 pr_warn("BTF is required, but is missing or corrupted.\n");
b7d7f3e1 3239 return err;
abd29c93 3240 }
9c6660d0
AN
3241 return 0;
3242}
3243
b96c07f3
AN
3244static int compare_vsi_off(const void *_a, const void *_b)
3245{
3246 const struct btf_var_secinfo *a = _a;
3247 const struct btf_var_secinfo *b = _b;
3248
3249 return a->offset - b->offset;
3250}
3251
3252static int btf_fixup_datasec(struct bpf_object *obj, struct btf *btf,
3253 struct btf_type *t)
3254{
f33f742d
AN
3255 __u32 size = 0, i, vars = btf_vlen(t);
3256 const char *sec_name = btf__name_by_offset(btf, t->name_off);
b96c07f3 3257 struct btf_var_secinfo *vsi;
4fcac46c 3258 bool fixup_offsets = false;
f33f742d 3259 int err;
b96c07f3 3260
f33f742d 3261 if (!sec_name) {
b96c07f3
AN
3262 pr_debug("No name found in string section for DATASEC kind.\n");
3263 return -ENOENT;
3264 }
3265
4fcac46c
AN
3266 /* Extern-backing datasecs (.ksyms, .kconfig) have their size and
3267 * variable offsets set at the previous step. Further, not every
3268 * extern BTF VAR has corresponding ELF symbol preserved, so we skip
3269 * all fixups altogether for such sections and go straight to sorting
3270 * VARs within their DATASEC.
b96c07f3 3271 */
4fcac46c 3272 if (strcmp(sec_name, KCONFIG_SEC) == 0 || strcmp(sec_name, KSYMS_SEC) == 0)
b96c07f3
AN
3273 goto sort_vars;
3274
4fcac46c
AN
3275 /* Clang leaves DATASEC size and VAR offsets as zeroes, so we need to
3276 * fix this up. But BPF static linker already fixes this up and fills
3277 * all the sizes and offsets during static linking. So this step has
3278 * to be optional. But the STV_HIDDEN handling is non-optional for any
3279 * non-extern DATASEC, so the variable fixup loop below handles both
3280 * functions at the same time, paying the cost of BTF VAR <-> ELF
3281 * symbol matching just once.
3282 */
3283 if (t->size == 0) {
3284 err = find_elf_sec_sz(obj, sec_name, &size);
3285 if (err || !size) {
3286 pr_debug("sec '%s': failed to determine size from ELF: size %u, err %d\n",
3287 sec_name, size, err);
3288 return -ENOENT;
3289 }
b96c07f3 3290
4fcac46c
AN
3291 t->size = size;
3292 fixup_offsets = true;
3293 }
b96c07f3
AN
3294
3295 for (i = 0, vsi = btf_var_secinfos(t); i < vars; i++, vsi++) {
f33f742d
AN
3296 const struct btf_type *t_var;
3297 struct btf_var *var;
3298 const char *var_name;
3299 Elf64_Sym *sym;
3300
b96c07f3 3301 t_var = btf__type_by_id(btf, vsi->type);
88918dc1 3302 if (!t_var || !btf_is_var(t_var)) {
f33f742d 3303 pr_debug("sec '%s': unexpected non-VAR type found\n", sec_name);
b96c07f3
AN
3304 return -EINVAL;
3305 }
3306
88918dc1 3307 var = btf_var(t_var);
f33f742d 3308 if (var->linkage == BTF_VAR_STATIC || var->linkage == BTF_VAR_GLOBAL_EXTERN)
b96c07f3
AN
3309 continue;
3310
f33f742d
AN
3311 var_name = btf__name_by_offset(btf, t_var->name_off);
3312 if (!var_name) {
3313 pr_debug("sec '%s': failed to find name of DATASEC's member #%d\n",
3314 sec_name, i);
b96c07f3
AN
3315 return -ENOENT;
3316 }
3317
f33f742d
AN
3318 sym = find_elf_var_sym(obj, var_name);
3319 if (IS_ERR(sym)) {
3320 pr_debug("sec '%s': failed to find ELF symbol for VAR '%s'\n",
3321 sec_name, var_name);
b96c07f3
AN
3322 return -ENOENT;
3323 }
3324
4fcac46c
AN
3325 if (fixup_offsets)
3326 vsi->offset = sym->st_value;
3327
3328 /* if variable is a global/weak symbol, but has restricted
3329 * (STV_HIDDEN or STV_INTERNAL) visibility, mark its BTF VAR
3330 * as static. This follows similar logic for functions (BPF
3331 * subprogs) and influences libbpf's further decisions about
3332 * whether to make global data BPF array maps as
3333 * BPF_F_MMAPABLE.
3334 */
3335 if (ELF64_ST_VISIBILITY(sym->st_other) == STV_HIDDEN
3336 || ELF64_ST_VISIBILITY(sym->st_other) == STV_INTERNAL)
3337 var->linkage = BTF_VAR_STATIC;
b96c07f3
AN
3338 }
3339
3340sort_vars:
3341 qsort(btf_var_secinfos(t), vars, sizeof(*vsi), compare_vsi_off);
3342 return 0;
3343}
3344
f33f742d 3345static int bpf_object_fixup_btf(struct bpf_object *obj)
b96c07f3 3346{
f33f742d 3347 int i, n, err = 0;
b96c07f3 3348
f33f742d
AN
3349 if (!obj->btf)
3350 return 0;
3351
3352 n = btf__type_cnt(obj->btf);
6a886de0 3353 for (i = 1; i < n; i++) {
f33f742d 3354 struct btf_type *t = btf_type_by_id(obj->btf, i);
b96c07f3
AN
3355
3356 /* Loader needs to fix up some of the things compiler
3357 * couldn't get its hands on while emitting BTF. This
3358 * is section size and global variable offset. We use
3359 * the info from the ELF itself for this purpose.
3360 */
3361 if (btf_is_datasec(t)) {
f33f742d 3362 err = btf_fixup_datasec(obj, obj->btf, t);
b96c07f3 3363 if (err)
f33f742d 3364 return err;
b96c07f3
AN
3365 }
3366 }
3367
166750bc
AN
3368 return 0;
3369}
3370
fe62de31 3371static bool prog_needs_vmlinux_btf(struct bpf_program *prog)
a6ed02ca 3372{
1e092a03
KS
3373 if (prog->type == BPF_PROG_TYPE_STRUCT_OPS ||
3374 prog->type == BPF_PROG_TYPE_LSM)
a6ed02ca
KS
3375 return true;
3376
3377 /* BPF_PROG_TYPE_TRACING programs which do not attach to other programs
3378 * also need vmlinux BTF
3379 */
3380 if (prog->type == BPF_PROG_TYPE_TRACING && !prog->attach_prog_fd)
3381 return true;
3382
3383 return false;
3384}
3385
8b7b0e5f
DV
3386static bool map_needs_vmlinux_btf(struct bpf_map *map)
3387{
3388 return bpf_map__is_struct_ops(map);
3389}
3390
fe62de31 3391static bool obj_needs_vmlinux_btf(const struct bpf_object *obj)
a6ed02ca
KS
3392{
3393 struct bpf_program *prog;
8b7b0e5f 3394 struct bpf_map *map;
fe62de31 3395 int i;
a6ed02ca 3396
1373ff59
SC
3397 /* CO-RE relocations need kernel BTF, only when btf_custom_path
3398 * is not specified
3399 */
3400 if (obj->btf_ext && obj->btf_ext->core_relo_info.len && !obj->btf_custom_path)
fe62de31 3401 return true;
192b6638 3402
d370bbe1
HL
3403 /* Support for typed ksyms needs kernel BTF */
3404 for (i = 0; i < obj->nr_extern; i++) {
3405 const struct extern_desc *ext;
3406
3407 ext = &obj->externs[i];
fe62de31
AN
3408 if (ext->type == EXT_KSYM && ext->ksym.type_id)
3409 return true;
d370bbe1
HL
3410 }
3411
a6ed02ca 3412 bpf_object__for_each_program(prog, obj) {
a3820c48 3413 if (!prog->autoload)
d9297581 3414 continue;
fe62de31
AN
3415 if (prog_needs_vmlinux_btf(prog))
3416 return true;
a6ed02ca
KS
3417 }
3418
8b7b0e5f
DV
3419 bpf_object__for_each_map(map, obj) {
3420 if (map_needs_vmlinux_btf(map))
3421 return true;
3422 }
3423
fe62de31
AN
3424 return false;
3425}
3426
3427static int bpf_object__load_vmlinux_btf(struct bpf_object *obj, bool force)
3428{
3429 int err;
3430
3431 /* btf_vmlinux could be loaded earlier */
67234743 3432 if (obj->btf_vmlinux || obj->gen_loader)
fe62de31
AN
3433 return 0;
3434
3435 if (!force && !obj_needs_vmlinux_btf(obj))
192b6638
AN
3436 return 0;
3437
a710eed3 3438 obj->btf_vmlinux = btf__load_vmlinux_btf();
e9fc3ce9
AN
3439 err = libbpf_get_error(obj->btf_vmlinux);
3440 if (err) {
192b6638
AN
3441 pr_warn("Error loading vmlinux BTF: %d\n", err);
3442 obj->btf_vmlinux = NULL;
3443 return err;
3444 }
a6ed02ca
KS
3445 return 0;
3446}
3447
063183bf
AN
3448static int bpf_object__sanitize_and_load_btf(struct bpf_object *obj)
3449{
0f0e55d8
AN
3450 struct btf *kern_btf = obj->btf;
3451 bool btf_mandatory, sanitize;
aea28a60 3452 int i, err = 0;
063183bf
AN
3453
3454 if (!obj->btf)
3455 return 0;
3456
9ca1f56a 3457 if (!kernel_supports(obj, FEAT_BTF)) {
68b08647
AN
3458 if (kernel_needs_btf(obj)) {
3459 err = -EOPNOTSUPP;
3460 goto report;
3461 }
3462 pr_debug("Kernel doesn't support BTF, skipping uploading it.\n");
3463 return 0;
3464 }
3465
aea28a60
AN
3466 /* Even though some subprogs are global/weak, user might prefer more
3467 * permissive BPF verification process that BPF verifier performs for
3468 * static functions, taking into account more context from the caller
3469 * functions. In such case, they need to mark such subprogs with
3470 * __attribute__((visibility("hidden"))) and libbpf will adjust
3471 * corresponding FUNC BTF type to be marked as static and trigger more
3472 * involved BPF verification process.
3473 */
3474 for (i = 0; i < obj->nr_programs; i++) {
3475 struct bpf_program *prog = &obj->programs[i];
3476 struct btf_type *t;
3477 const char *name;
3478 int j, n;
3479
3480 if (!prog->mark_btf_static || !prog_is_subprog(obj, prog))
3481 continue;
3482
6a886de0
HC
3483 n = btf__type_cnt(obj->btf);
3484 for (j = 1; j < n; j++) {
aea28a60
AN
3485 t = btf_type_by_id(obj->btf, j);
3486 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL)
3487 continue;
3488
3489 name = btf__str_by_offset(obj->btf, t->name_off);
3490 if (strcmp(name, prog->name) != 0)
3491 continue;
3492
3493 t->info = btf_type_info(BTF_KIND_FUNC, BTF_FUNC_STATIC, 0);
3494 break;
3495 }
3496 }
3497
0f0e55d8
AN
3498 sanitize = btf_needs_sanitization(obj);
3499 if (sanitize) {
5c3320d7 3500 const void *raw_data;
0f0e55d8 3501 __u32 sz;
063183bf 3502
0f0e55d8 3503 /* clone BTF to sanitize a copy and leave the original intact */
6a886de0 3504 raw_data = btf__raw_data(obj->btf, &sz);
5c3320d7 3505 kern_btf = btf__new(raw_data, sz);
e9fc3ce9
AN
3506 err = libbpf_get_error(kern_btf);
3507 if (err)
3508 return err;
04efe591 3509
4c01925f
AN
3510 /* enforce 8-byte pointers for BPF-targeted BTFs */
3511 btf__set_pointer_size(obj->btf, 8);
f2a62588
YS
3512 err = bpf_object__sanitize_btf(obj, kern_btf);
3513 if (err)
3514 return err;
063183bf 3515 }
0f0e55d8 3516
67234743
AS
3517 if (obj->gen_loader) {
3518 __u32 raw_size = 0;
6a886de0 3519 const void *raw_data = btf__raw_data(kern_btf, &raw_size);
67234743
AS
3520
3521 if (!raw_data)
3522 return -ENOMEM;
3523 bpf_gen__load_btf(obj->gen_loader, raw_data, raw_size);
3524 /* Pretend to have valid FD to pass various fd >= 0 checks.
3525 * This fd == 0 will not be used with any syscall and will be reset to -1 eventually.
3526 */
3527 btf__set_fd(kern_btf, 0);
3528 } else {
e0e3ea88
AN
3529 /* currently BPF_BTF_LOAD only supports log_level 1 */
3530 err = btf_load_into_kernel(kern_btf, obj->log_buf, obj->log_size,
6b434b61 3531 obj->log_level ? 1 : 0, obj->token_fd);
67234743 3532 }
0f0e55d8
AN
3533 if (sanitize) {
3534 if (!err) {
3535 /* move fd to libbpf's BTF */
3536 btf__set_fd(obj->btf, btf__fd(kern_btf));
3537 btf__set_fd(kern_btf, -1);
3538 }
3539 btf__free(kern_btf);
3540 }
68b08647 3541report:
0f0e55d8
AN
3542 if (err) {
3543 btf_mandatory = kernel_needs_btf(obj);
3544 pr_warn("Error loading .BTF into kernel: %d. %s\n", err,
3545 btf_mandatory ? "BTF is mandatory, can't proceed."
3546 : "BTF is optional, ignoring.");
3547 if (!btf_mandatory)
3548 err = 0;
3549 }
3550 return err;
063183bf
AN
3551}
3552
88a82120
AN
3553static const char *elf_sym_str(const struct bpf_object *obj, size_t off)
3554{
3555 const char *name;
3556
3557 name = elf_strptr(obj->efile.elf, obj->efile.strtabidx, off);
3558 if (!name) {
3559 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3560 off, obj->path, elf_errmsg(-1));
3561 return NULL;
3562 }
3563
3564 return name;
3565}
3566
3567static const char *elf_sec_str(const struct bpf_object *obj, size_t off)
3568{
3569 const char *name;
3570
3571 name = elf_strptr(obj->efile.elf, obj->efile.shstrndx, off);
3572 if (!name) {
3573 pr_warn("elf: failed to get section name string at offset %zu from %s: %s\n",
3574 off, obj->path, elf_errmsg(-1));
3575 return NULL;
3576 }
3577
3578 return name;
3579}
3580
3581static Elf_Scn *elf_sec_by_idx(const struct bpf_object *obj, size_t idx)
3582{
3583 Elf_Scn *scn;
3584
3585 scn = elf_getscn(obj->efile.elf, idx);
3586 if (!scn) {
3587 pr_warn("elf: failed to get section(%zu) from %s: %s\n",
3588 idx, obj->path, elf_errmsg(-1));
3589 return NULL;
3590 }
3591 return scn;
3592}
3593
3594static Elf_Scn *elf_sec_by_name(const struct bpf_object *obj, const char *name)
3595{
3596 Elf_Scn *scn = NULL;
3597 Elf *elf = obj->efile.elf;
3598 const char *sec_name;
3599
3600 while ((scn = elf_nextscn(elf, scn)) != NULL) {
3601 sec_name = elf_sec_name(obj, scn);
3602 if (!sec_name)
3603 return NULL;
3604
3605 if (strcmp(sec_name, name) != 0)
3606 continue;
3607
3608 return scn;
3609 }
3610 return NULL;
3611}
3612
ad23b723 3613static Elf64_Shdr *elf_sec_hdr(const struct bpf_object *obj, Elf_Scn *scn)
88a82120 3614{
ad23b723
AN
3615 Elf64_Shdr *shdr;
3616
88a82120 3617 if (!scn)
ad23b723 3618 return NULL;
88a82120 3619
ad23b723
AN
3620 shdr = elf64_getshdr(scn);
3621 if (!shdr) {
88a82120
AN
3622 pr_warn("elf: failed to get section(%zu) header from %s: %s\n",
3623 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
ad23b723 3624 return NULL;
88a82120
AN
3625 }
3626
ad23b723 3627 return shdr;
88a82120
AN
3628}
3629
3630static const char *elf_sec_name(const struct bpf_object *obj, Elf_Scn *scn)
3631{
3632 const char *name;
ad23b723 3633 Elf64_Shdr *sh;
88a82120
AN
3634
3635 if (!scn)
3636 return NULL;
3637
ad23b723
AN
3638 sh = elf_sec_hdr(obj, scn);
3639 if (!sh)
88a82120
AN
3640 return NULL;
3641
ad23b723 3642 name = elf_sec_str(obj, sh->sh_name);
88a82120
AN
3643 if (!name) {
3644 pr_warn("elf: failed to get section(%zu) name from %s: %s\n",
3645 elf_ndxscn(scn), obj->path, elf_errmsg(-1));
3646 return NULL;
3647 }
3648
3649 return name;
3650}
3651
3652static Elf_Data *elf_sec_data(const struct bpf_object *obj, Elf_Scn *scn)
3653{
3654 Elf_Data *data;
3655
3656 if (!scn)
3657 return NULL;
3658
3659 data = elf_getdata(scn, 0);
3660 if (!data) {
3661 pr_warn("elf: failed to get section(%zu) %s data from %s: %s\n",
3662 elf_ndxscn(scn), elf_sec_name(obj, scn) ?: "<?>",
3663 obj->path, elf_errmsg(-1));
3664 return NULL;
3665 }
3666
3667 return data;
3668}
3669
ad23b723
AN
3670static Elf64_Sym *elf_sym_by_idx(const struct bpf_object *obj, size_t idx)
3671{
3672 if (idx >= obj->efile.symbols->d_size / sizeof(Elf64_Sym))
3673 return NULL;
3674
3675 return (Elf64_Sym *)obj->efile.symbols->d_buf + idx;
3676}
3677
3678static Elf64_Rel *elf_rel_by_idx(Elf_Data *data, size_t idx)
3679{
3680 if (idx >= data->d_size / sizeof(Elf64_Rel))
3681 return NULL;
3682
3683 return (Elf64_Rel *)data->d_buf + idx;
3684}
3685
50e09460
AN
3686static bool is_sec_name_dwarf(const char *name)
3687{
3688 /* approximation, but the actual list is too long */
13d35a0c 3689 return str_has_pfx(name, ".debug_");
50e09460
AN
3690}
3691
ad23b723 3692static bool ignore_elf_section(Elf64_Shdr *hdr, const char *name)
50e09460
AN
3693{
3694 /* no special handling of .strtab */
3695 if (hdr->sh_type == SHT_STRTAB)
3696 return true;
3697
3698 /* ignore .llvm_addrsig section as well */
faf6ed32 3699 if (hdr->sh_type == SHT_LLVM_ADDRSIG)
50e09460
AN
3700 return true;
3701
3702 /* no subprograms will lead to an empty .text section, ignore it */
3703 if (hdr->sh_type == SHT_PROGBITS && hdr->sh_size == 0 &&
3704 strcmp(name, ".text") == 0)
3705 return true;
3706
3707 /* DWARF sections */
3708 if (is_sec_name_dwarf(name))
3709 return true;
3710
13d35a0c 3711 if (str_has_pfx(name, ".rel")) {
50e09460
AN
3712 name += sizeof(".rel") - 1;
3713 /* DWARF section relocations */
3714 if (is_sec_name_dwarf(name))
3715 return true;
3716
3717 /* .BTF and .BTF.ext don't need relocations */
3718 if (strcmp(name, BTF_ELF_SEC) == 0 ||
3719 strcmp(name, BTF_EXT_ELF_SEC) == 0)
3720 return true;
3721 }
3722
3723 return false;
3724}
3725
db2b8b06
AN
3726static int cmp_progs(const void *_a, const void *_b)
3727{
3728 const struct bpf_program *a = _a;
3729 const struct bpf_program *b = _b;
3730
3731 if (a->sec_idx != b->sec_idx)
3732 return a->sec_idx < b->sec_idx ? -1 : 1;
3733
3734 /* sec_insn_off can't be the same within the section */
3735 return a->sec_insn_off < b->sec_insn_off ? -1 : 1;
3736}
3737
0d13bfce 3738static int bpf_object__elf_collect(struct bpf_object *obj)
29603665 3739{
25bbbd7a 3740 struct elf_sec_desc *sec_desc;
29603665 3741 Elf *elf = obj->efile.elf;
f0187f0b 3742 Elf_Data *btf_ext_data = NULL;
1713d68b 3743 Elf_Data *btf_data = NULL;
666810e8 3744 int idx = 0, err = 0;
0201c575
AN
3745 const char *name;
3746 Elf_Data *data;
3747 Elf_Scn *scn;
ad23b723 3748 Elf64_Shdr *sh;
29603665 3749
0d6988e1 3750 /* ELF section indices are 0-based, but sec #0 is special "invalid"
51deedc9
SHY
3751 * section. Since section count retrieved by elf_getshdrnum() does
3752 * include sec #0, it is already the necessary size of an array to keep
3753 * all the sections.
25bbbd7a 3754 */
51deedc9
SHY
3755 if (elf_getshdrnum(obj->efile.elf, &obj->efile.sec_cnt)) {
3756 pr_warn("elf: failed to get the number of sections for %s: %s\n",
3757 obj->path, elf_errmsg(-1));
3758 return -LIBBPF_ERRNO__FORMAT;
3759 }
25bbbd7a
AN
3760 obj->efile.secs = calloc(obj->efile.sec_cnt, sizeof(*obj->efile.secs));
3761 if (!obj->efile.secs)
3762 return -ENOMEM;
29603665 3763
0201c575
AN
3764 /* a bunch of ELF parsing functionality depends on processing symbols,
3765 * so do the first pass and find the symbol table
3766 */
3767 scn = NULL;
29603665 3768 while ((scn = elf_nextscn(elf, scn)) != NULL) {
ad23b723
AN
3769 sh = elf_sec_hdr(obj, scn);
3770 if (!sh)
0201c575
AN
3771 return -LIBBPF_ERRNO__FORMAT;
3772
ad23b723 3773 if (sh->sh_type == SHT_SYMTAB) {
0201c575
AN
3774 if (obj->efile.symbols) {
3775 pr_warn("elf: multiple symbol tables in %s\n", obj->path);
3776 return -LIBBPF_ERRNO__FORMAT;
3777 }
29603665 3778
0201c575
AN
3779 data = elf_sec_data(obj, scn);
3780 if (!data)
3781 return -LIBBPF_ERRNO__FORMAT;
3782
25bbbd7a
AN
3783 idx = elf_ndxscn(scn);
3784
0201c575 3785 obj->efile.symbols = data;
25bbbd7a 3786 obj->efile.symbols_shndx = idx;
ad23b723 3787 obj->efile.strtabidx = sh->sh_link;
0201c575
AN
3788 }
3789 }
3790
03e601f4
THJ
3791 if (!obj->efile.symbols) {
3792 pr_warn("elf: couldn't find symbol table in %s, stripped object file?\n",
3793 obj->path);
3794 return -ENOENT;
3795 }
3796
0201c575
AN
3797 scn = NULL;
3798 while ((scn = elf_nextscn(elf, scn)) != NULL) {
25bbbd7a
AN
3799 idx = elf_ndxscn(scn);
3800 sec_desc = &obj->efile.secs[idx];
88a82120 3801
ad23b723
AN
3802 sh = elf_sec_hdr(obj, scn);
3803 if (!sh)
01b29d1d 3804 return -LIBBPF_ERRNO__FORMAT;
29603665 3805
ad23b723 3806 name = elf_sec_str(obj, sh->sh_name);
88a82120 3807 if (!name)
01b29d1d 3808 return -LIBBPF_ERRNO__FORMAT;
29603665 3809
ad23b723 3810 if (ignore_elf_section(sh, name))
50e09460
AN
3811 continue;
3812
88a82120
AN
3813 data = elf_sec_data(obj, scn);
3814 if (!data)
01b29d1d 3815 return -LIBBPF_ERRNO__FORMAT;
88a82120
AN
3816
3817 pr_debug("elf: section(%d) %s, size %ld, link %d, flags %lx, type=%d\n",
077c066a 3818 idx, name, (unsigned long)data->d_size,
ad23b723
AN
3819 (int)sh->sh_link, (unsigned long)sh->sh_flags,
3820 (int)sh->sh_type);
cb1e5e96 3821
1713d68b 3822 if (strcmp(name, "license") == 0) {
88a82120 3823 err = bpf_object__init_license(obj, data->d_buf, data->d_size);
01b29d1d
AN
3824 if (err)
3825 return err;
1713d68b 3826 } else if (strcmp(name, "version") == 0) {
88a82120 3827 err = bpf_object__init_kversion(obj, data->d_buf, data->d_size);
54b8625c
JF
3828 if (err)
3829 return err;
1713d68b 3830 } else if (strcmp(name, "maps") == 0) {
e19db676
AN
3831 pr_warn("elf: legacy map definitions in 'maps' section are not supported by libbpf v1.0+\n");
3832 return -ENOTSUP;
abd29c93
AN
3833 } else if (strcmp(name, MAPS_ELF_SEC) == 0) {
3834 obj->efile.btf_maps_shndx = idx;
1713d68b 3835 } else if (strcmp(name, BTF_ELF_SEC) == 0) {
62554d52
AN
3836 if (sh->sh_type != SHT_PROGBITS)
3837 return -LIBBPF_ERRNO__FORMAT;
1713d68b 3838 btf_data = data;
2993e051 3839 } else if (strcmp(name, BTF_EXT_ELF_SEC) == 0) {
62554d52
AN
3840 if (sh->sh_type != SHT_PROGBITS)
3841 return -LIBBPF_ERRNO__FORMAT;
f0187f0b 3842 btf_ext_data = data;
ad23b723 3843 } else if (sh->sh_type == SHT_SYMTAB) {
0201c575 3844 /* already processed during the first pass above */
ad23b723
AN
3845 } else if (sh->sh_type == SHT_PROGBITS && data->d_size > 0) {
3846 if (sh->sh_flags & SHF_EXECINSTR) {
f8c7a4d4
JS
3847 if (strcmp(name, ".text") == 0)
3848 obj->efile.text_shndx = idx;
c1122392 3849 err = bpf_object__add_programs(obj, data, name, idx);
88a82120 3850 if (err)
01b29d1d 3851 return err;
aed65917
AN
3852 } else if (strcmp(name, DATA_SEC) == 0 ||
3853 str_has_pfx(name, DATA_SEC ".")) {
25bbbd7a
AN
3854 sec_desc->sec_type = SEC_DATA;
3855 sec_desc->shdr = sh;
3856 sec_desc->data = data;
aed65917
AN
3857 } else if (strcmp(name, RODATA_SEC) == 0 ||
3858 str_has_pfx(name, RODATA_SEC ".")) {
25bbbd7a
AN
3859 sec_desc->sec_type = SEC_RODATA;
3860 sec_desc->shdr = sh;
3861 sec_desc->data = data;
240bf8a5 3862 } else if (strcmp(name, STRUCT_OPS_SEC) == 0 ||
5ad0ecbe
EZ
3863 strcmp(name, STRUCT_OPS_LINK_SEC) == 0 ||
3864 strcmp(name, "?" STRUCT_OPS_SEC) == 0 ||
3865 strcmp(name, "?" STRUCT_OPS_LINK_SEC) == 0) {
240bf8a5
EZ
3866 sec_desc->sec_type = SEC_ST_OPS;
3867 sec_desc->shdr = sh;
3868 sec_desc->data = data;
3869 obj->efile.has_st_ops = true;
2e7ba4f8
AN
3870 } else if (strcmp(name, ARENA_SEC) == 0) {
3871 obj->efile.arena_data = data;
3872 obj->efile.arena_data_shndx = idx;
d859900c 3873 } else {
50e09460
AN
3874 pr_info("elf: skipping unrecognized data section(%d) %s\n",
3875 idx, name);
a5b8bd47 3876 }
ad23b723 3877 } else if (sh->sh_type == SHT_REL) {
25bbbd7a 3878 int targ_sec_idx = sh->sh_info; /* points to other section */
e3d91b0c 3879
b7332d28
AN
3880 if (sh->sh_entsize != sizeof(Elf64_Rel) ||
3881 targ_sec_idx >= obj->efile.sec_cnt)
3882 return -LIBBPF_ERRNO__FORMAT;
3883
e3d91b0c 3884 /* Only do relo for section with exec instructions */
25bbbd7a 3885 if (!section_have_execinstr(obj, targ_sec_idx) &&
646f02ff 3886 strcmp(name, ".rel" STRUCT_OPS_SEC) &&
809a69d6 3887 strcmp(name, ".rel" STRUCT_OPS_LINK_SEC) &&
5ad0ecbe
EZ
3888 strcmp(name, ".rel?" STRUCT_OPS_SEC) &&
3889 strcmp(name, ".rel?" STRUCT_OPS_LINK_SEC) &&
646f02ff 3890 strcmp(name, ".rel" MAPS_ELF_SEC)) {
50e09460 3891 pr_info("elf: skipping relo section(%d) %s for section(%d) %s\n",
25bbbd7a
AN
3892 idx, name, targ_sec_idx,
3893 elf_sec_name(obj, elf_sec_by_idx(obj, targ_sec_idx)) ?: "<?>");
e3d91b0c
JDB
3894 continue;
3895 }
b62f06e8 3896
25bbbd7a
AN
3897 sec_desc->sec_type = SEC_RELO;
3898 sec_desc->shdr = sh;
3899 sec_desc->data = data;
dc79f035
AS
3900 } else if (sh->sh_type == SHT_NOBITS && (strcmp(name, BSS_SEC) == 0 ||
3901 str_has_pfx(name, BSS_SEC "."))) {
25bbbd7a
AN
3902 sec_desc->sec_type = SEC_BSS;
3903 sec_desc->shdr = sh;
3904 sec_desc->data = data;
077c066a 3905 } else {
2e80be60 3906 pr_info("elf: skipping section(%d) %s (size %zu)\n", idx, name,
ad23b723 3907 (size_t)sh->sh_size);
bec7d68c 3908 }
29603665 3909 }
561bbcca 3910
d3a3aa0c 3911 if (!obj->efile.strtabidx || obj->efile.strtabidx > idx) {
88a82120 3912 pr_warn("elf: symbol strings section missing or invalid in %s\n", obj->path);
f102154d 3913 return -LIBBPF_ERRNO__FORMAT;
77ba9a5b 3914 }
db2b8b06
AN
3915
3916 /* sort BPF programs by section name and in-section instruction offset
e3ba8e4e
KM
3917 * for faster search
3918 */
2a6a9bf2
AN
3919 if (obj->nr_programs)
3920 qsort(obj->programs, obj->nr_programs, sizeof(*obj->programs), cmp_progs);
db2b8b06 3921
0d13bfce 3922 return bpf_object__init_btf(obj, btf_data, btf_ext_data);
29603665
WN
3923}
3924
ad23b723 3925static bool sym_is_extern(const Elf64_Sym *sym)
166750bc 3926{
ad23b723 3927 int bind = ELF64_ST_BIND(sym->st_info);
166750bc
AN
3928 /* externs are symbols w/ type=NOTYPE, bind=GLOBAL|WEAK, section=UND */
3929 return sym->st_shndx == SHN_UNDEF &&
3930 (bind == STB_GLOBAL || bind == STB_WEAK) &&
ad23b723 3931 ELF64_ST_TYPE(sym->st_info) == STT_NOTYPE;
166750bc
AN
3932}
3933
ad23b723 3934static bool sym_is_subprog(const Elf64_Sym *sym, int text_shndx)
53eddb5e 3935{
ad23b723
AN
3936 int bind = ELF64_ST_BIND(sym->st_info);
3937 int type = ELF64_ST_TYPE(sym->st_info);
53eddb5e
YS
3938
3939 /* in .text section */
3940 if (sym->st_shndx != text_shndx)
3941 return false;
3942
3943 /* local function */
3944 if (bind == STB_LOCAL && type == STT_SECTION)
3945 return true;
3946
3947 /* global function */
3948 return bind == STB_GLOBAL && type == STT_FUNC;
3949}
3950
166750bc
AN
3951static int find_extern_btf_id(const struct btf *btf, const char *ext_name)
3952{
3953 const struct btf_type *t;
5bd022ec 3954 const char *tname;
166750bc
AN
3955 int i, n;
3956
3957 if (!btf)
3958 return -ESRCH;
3959
6a886de0
HC
3960 n = btf__type_cnt(btf);
3961 for (i = 1; i < n; i++) {
166750bc
AN
3962 t = btf__type_by_id(btf, i);
3963
5bd022ec 3964 if (!btf_is_var(t) && !btf_is_func(t))
166750bc
AN
3965 continue;
3966
5bd022ec
MKL
3967 tname = btf__name_by_offset(btf, t->name_off);
3968 if (strcmp(tname, ext_name))
166750bc
AN
3969 continue;
3970
5bd022ec
MKL
3971 if (btf_is_var(t) &&
3972 btf_var(t)->linkage != BTF_VAR_GLOBAL_EXTERN)
3973 return -EINVAL;
3974
3975 if (btf_is_func(t) && btf_func_linkage(t) != BTF_FUNC_EXTERN)
166750bc
AN
3976 return -EINVAL;
3977
3978 return i;
3979 }
3980
3981 return -ENOENT;
3982}
3983
2e33efe3
AN
3984static int find_extern_sec_btf_id(struct btf *btf, int ext_btf_id) {
3985 const struct btf_var_secinfo *vs;
3986 const struct btf_type *t;
3987 int i, j, n;
3988
3989 if (!btf)
3990 return -ESRCH;
3991
6a886de0
HC
3992 n = btf__type_cnt(btf);
3993 for (i = 1; i < n; i++) {
2e33efe3
AN
3994 t = btf__type_by_id(btf, i);
3995
3996 if (!btf_is_datasec(t))
3997 continue;
3998
3999 vs = btf_var_secinfos(t);
4000 for (j = 0; j < btf_vlen(t); j++, vs++) {
4001 if (vs->type == ext_btf_id)
4002 return i;
4003 }
4004 }
4005
4006 return -ENOENT;
4007}
4008
4009static enum kcfg_type find_kcfg_type(const struct btf *btf, int id,
4010 bool *is_signed)
166750bc
AN
4011{
4012 const struct btf_type *t;
4013 const char *name;
4014
4015 t = skip_mods_and_typedefs(btf, id, NULL);
4016 name = btf__name_by_offset(btf, t->name_off);
4017
4018 if (is_signed)
4019 *is_signed = false;
4020 switch (btf_kind(t)) {
4021 case BTF_KIND_INT: {
4022 int enc = btf_int_encoding(t);
4023
4024 if (enc & BTF_INT_BOOL)
2e33efe3 4025 return t->size == 1 ? KCFG_BOOL : KCFG_UNKNOWN;
166750bc
AN
4026 if (is_signed)
4027 *is_signed = enc & BTF_INT_SIGNED;
4028 if (t->size == 1)
2e33efe3 4029 return KCFG_CHAR;
166750bc 4030 if (t->size < 1 || t->size > 8 || (t->size & (t->size - 1)))
2e33efe3
AN
4031 return KCFG_UNKNOWN;
4032 return KCFG_INT;
166750bc
AN
4033 }
4034 case BTF_KIND_ENUM:
4035 if (t->size != 4)
2e33efe3 4036 return KCFG_UNKNOWN;
166750bc 4037 if (strcmp(name, "libbpf_tristate"))
2e33efe3
AN
4038 return KCFG_UNKNOWN;
4039 return KCFG_TRISTATE;
f2a62588
YS
4040 case BTF_KIND_ENUM64:
4041 if (strcmp(name, "libbpf_tristate"))
4042 return KCFG_UNKNOWN;
4043 return KCFG_TRISTATE;
166750bc
AN
4044 case BTF_KIND_ARRAY:
4045 if (btf_array(t)->nelems == 0)
2e33efe3
AN
4046 return KCFG_UNKNOWN;
4047 if (find_kcfg_type(btf, btf_array(t)->type, NULL) != KCFG_CHAR)
4048 return KCFG_UNKNOWN;
4049 return KCFG_CHAR_ARR;
166750bc 4050 default:
2e33efe3 4051 return KCFG_UNKNOWN;
166750bc
AN
4052 }
4053}
4054
4055static int cmp_externs(const void *_a, const void *_b)
4056{
4057 const struct extern_desc *a = _a;
4058 const struct extern_desc *b = _b;
4059
2e33efe3
AN
4060 if (a->type != b->type)
4061 return a->type < b->type ? -1 : 1;
4062
4063 if (a->type == EXT_KCFG) {
4064 /* descending order by alignment requirements */
4065 if (a->kcfg.align != b->kcfg.align)
4066 return a->kcfg.align > b->kcfg.align ? -1 : 1;
4067 /* ascending order by size, within same alignment class */
4068 if (a->kcfg.sz != b->kcfg.sz)
4069 return a->kcfg.sz < b->kcfg.sz ? -1 : 1;
4070 }
4071
166750bc
AN
4072 /* resolve ties by name */
4073 return strcmp(a->name, b->name);
4074}
4075
1c0c7074
AN
4076static int find_int_btf_id(const struct btf *btf)
4077{
4078 const struct btf_type *t;
4079 int i, n;
4080
6a886de0
HC
4081 n = btf__type_cnt(btf);
4082 for (i = 1; i < n; i++) {
1c0c7074
AN
4083 t = btf__type_by_id(btf, i);
4084
4085 if (btf_is_int(t) && btf_int_bits(t) == 32)
4086 return i;
4087 }
4088
4089 return 0;
4090}
4091
5bd022ec
MKL
4092static int add_dummy_ksym_var(struct btf *btf)
4093{
4094 int i, int_btf_id, sec_btf_id, dummy_var_btf_id;
4095 const struct btf_var_secinfo *vs;
4096 const struct btf_type *sec;
4097
9683e577
IR
4098 if (!btf)
4099 return 0;
4100
5bd022ec
MKL
4101 sec_btf_id = btf__find_by_name_kind(btf, KSYMS_SEC,
4102 BTF_KIND_DATASEC);
4103 if (sec_btf_id < 0)
4104 return 0;
4105
4106 sec = btf__type_by_id(btf, sec_btf_id);
4107 vs = btf_var_secinfos(sec);
4108 for (i = 0; i < btf_vlen(sec); i++, vs++) {
4109 const struct btf_type *vt;
4110
4111 vt = btf__type_by_id(btf, vs->type);
4112 if (btf_is_func(vt))
4113 break;
4114 }
4115
4116 /* No func in ksyms sec. No need to add dummy var. */
4117 if (i == btf_vlen(sec))
4118 return 0;
4119
4120 int_btf_id = find_int_btf_id(btf);
4121 dummy_var_btf_id = btf__add_var(btf,
4122 "dummy_ksym",
4123 BTF_VAR_GLOBAL_ALLOCATED,
4124 int_btf_id);
4125 if (dummy_var_btf_id < 0)
4126 pr_warn("cannot create a dummy_ksym var\n");
4127
4128 return dummy_var_btf_id;
4129}
4130
166750bc
AN
4131static int bpf_object__collect_externs(struct bpf_object *obj)
4132{
1c0c7074 4133 struct btf_type *sec, *kcfg_sec = NULL, *ksym_sec = NULL;
166750bc
AN
4134 const struct btf_type *t;
4135 struct extern_desc *ext;
5bd022ec 4136 int i, n, off, dummy_var_btf_id;
2e33efe3 4137 const char *ext_name, *sec_name;
5964a223 4138 size_t ext_essent_len;
166750bc 4139 Elf_Scn *scn;
ad23b723 4140 Elf64_Shdr *sh;
166750bc
AN
4141
4142 if (!obj->efile.symbols)
4143 return 0;
4144
88a82120 4145 scn = elf_sec_by_idx(obj, obj->efile.symbols_shndx);
ad23b723 4146 sh = elf_sec_hdr(obj, scn);
83390787 4147 if (!sh || sh->sh_entsize != sizeof(Elf64_Sym))
166750bc 4148 return -LIBBPF_ERRNO__FORMAT;
166750bc 4149
5bd022ec
MKL
4150 dummy_var_btf_id = add_dummy_ksym_var(obj->btf);
4151 if (dummy_var_btf_id < 0)
4152 return dummy_var_btf_id;
4153
ad23b723 4154 n = sh->sh_size / sh->sh_entsize;
166750bc 4155 pr_debug("looking for externs among %d symbols...\n", n);
88a82120 4156
166750bc 4157 for (i = 0; i < n; i++) {
ad23b723 4158 Elf64_Sym *sym = elf_sym_by_idx(obj, i);
166750bc 4159
ad23b723 4160 if (!sym)
166750bc 4161 return -LIBBPF_ERRNO__FORMAT;
ad23b723 4162 if (!sym_is_extern(sym))
166750bc 4163 continue;
ad23b723 4164 ext_name = elf_sym_str(obj, sym->st_name);
166750bc
AN
4165 if (!ext_name || !ext_name[0])
4166 continue;
4167
4168 ext = obj->externs;
029258d7 4169 ext = libbpf_reallocarray(ext, obj->nr_extern + 1, sizeof(*ext));
166750bc
AN
4170 if (!ext)
4171 return -ENOMEM;
4172 obj->externs = ext;
4173 ext = &ext[obj->nr_extern];
4174 memset(ext, 0, sizeof(*ext));
4175 obj->nr_extern++;
4176
4177 ext->btf_id = find_extern_btf_id(obj->btf, ext_name);
4178 if (ext->btf_id <= 0) {
4179 pr_warn("failed to find BTF for extern '%s': %d\n",
4180 ext_name, ext->btf_id);
4181 return ext->btf_id;
4182 }
4183 t = btf__type_by_id(obj->btf, ext->btf_id);
4184 ext->name = btf__name_by_offset(obj->btf, t->name_off);
4185 ext->sym_idx = i;
ad23b723 4186 ext->is_weak = ELF64_ST_BIND(sym->st_info) == STB_WEAK;
2e33efe3 4187
5964a223
DM
4188 ext_essent_len = bpf_core_essential_name_len(ext->name);
4189 ext->essent_name = NULL;
4190 if (ext_essent_len != strlen(ext->name)) {
4191 ext->essent_name = strndup(ext->name, ext_essent_len);
4192 if (!ext->essent_name)
4193 return -ENOMEM;
4194 }
4195
2e33efe3
AN
4196 ext->sec_btf_id = find_extern_sec_btf_id(obj->btf, ext->btf_id);
4197 if (ext->sec_btf_id <= 0) {
4198 pr_warn("failed to find BTF for extern '%s' [%d] section: %d\n",
4199 ext_name, ext->btf_id, ext->sec_btf_id);
4200 return ext->sec_btf_id;
166750bc 4201 }
2e33efe3
AN
4202 sec = (void *)btf__type_by_id(obj->btf, ext->sec_btf_id);
4203 sec_name = btf__name_by_offset(obj->btf, sec->name_off);
4204
4205 if (strcmp(sec_name, KCONFIG_SEC) == 0) {
5bd022ec
MKL
4206 if (btf_is_func(t)) {
4207 pr_warn("extern function %s is unsupported under %s section\n",
4208 ext->name, KCONFIG_SEC);
4209 return -ENOTSUP;
4210 }
2e33efe3
AN
4211 kcfg_sec = sec;
4212 ext->type = EXT_KCFG;
4213 ext->kcfg.sz = btf__resolve_size(obj->btf, t->type);
4214 if (ext->kcfg.sz <= 0) {
4215 pr_warn("failed to resolve size of extern (kcfg) '%s': %d\n",
4216 ext_name, ext->kcfg.sz);
4217 return ext->kcfg.sz;
4218 }
4219 ext->kcfg.align = btf__align_of(obj->btf, t->type);
4220 if (ext->kcfg.align <= 0) {
4221 pr_warn("failed to determine alignment of extern (kcfg) '%s': %d\n",
4222 ext_name, ext->kcfg.align);
4223 return -EINVAL;
4224 }
4225 ext->kcfg.type = find_kcfg_type(obj->btf, t->type,
e3ba8e4e 4226 &ext->kcfg.is_signed);
2e33efe3 4227 if (ext->kcfg.type == KCFG_UNKNOWN) {
55d00c37 4228 pr_warn("extern (kcfg) '%s': type is unsupported\n", ext_name);
2e33efe3
AN
4229 return -ENOTSUP;
4230 }
1c0c7074 4231 } else if (strcmp(sec_name, KSYMS_SEC) == 0) {
1c0c7074
AN
4232 ksym_sec = sec;
4233 ext->type = EXT_KSYM;
d370bbe1
HL
4234 skip_mods_and_typedefs(obj->btf, t->type,
4235 &ext->ksym.type_id);
2e33efe3
AN
4236 } else {
4237 pr_warn("unrecognized extern section '%s'\n", sec_name);
166750bc
AN
4238 return -ENOTSUP;
4239 }
4240 }
4241 pr_debug("collected %d externs total\n", obj->nr_extern);
4242
4243 if (!obj->nr_extern)
4244 return 0;
4245
2e33efe3 4246 /* sort externs by type, for kcfg ones also by (align, size, name) */
166750bc 4247 qsort(obj->externs, obj->nr_extern, sizeof(*ext), cmp_externs);
166750bc 4248
1c0c7074
AN
4249 /* for .ksyms section, we need to turn all externs into allocated
4250 * variables in BTF to pass kernel verification; we do this by
4251 * pretending that each extern is a 8-byte variable
4252 */
4253 if (ksym_sec) {
4254 /* find existing 4-byte integer type in BTF to use for fake
4255 * extern variables in DATASEC
4256 */
4257 int int_btf_id = find_int_btf_id(obj->btf);
5bd022ec
MKL
4258 /* For extern function, a dummy_var added earlier
4259 * will be used to replace the vs->type and
4260 * its name string will be used to refill
4261 * the missing param's name.
4262 */
4263 const struct btf_type *dummy_var;
1c0c7074 4264
5bd022ec 4265 dummy_var = btf__type_by_id(obj->btf, dummy_var_btf_id);
1c0c7074
AN
4266 for (i = 0; i < obj->nr_extern; i++) {
4267 ext = &obj->externs[i];
4268 if (ext->type != EXT_KSYM)
4269 continue;
4270 pr_debug("extern (ksym) #%d: symbol %d, name %s\n",
4271 i, ext->sym_idx, ext->name);
4272 }
4273
4274 sec = ksym_sec;
4275 n = btf_vlen(sec);
4276 for (i = 0, off = 0; i < n; i++, off += sizeof(int)) {
4277 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4278 struct btf_type *vt;
4279
4280 vt = (void *)btf__type_by_id(obj->btf, vs->type);
4281 ext_name = btf__name_by_offset(obj->btf, vt->name_off);
4282 ext = find_extern_by_name(obj, ext_name);
4283 if (!ext) {
5bd022ec
MKL
4284 pr_warn("failed to find extern definition for BTF %s '%s'\n",
4285 btf_kind_str(vt), ext_name);
1c0c7074
AN
4286 return -ESRCH;
4287 }
5bd022ec
MKL
4288 if (btf_is_func(vt)) {
4289 const struct btf_type *func_proto;
4290 struct btf_param *param;
4291 int j;
4292
4293 func_proto = btf__type_by_id(obj->btf,
4294 vt->type);
4295 param = btf_params(func_proto);
4296 /* Reuse the dummy_var string if the
4297 * func proto does not have param name.
4298 */
4299 for (j = 0; j < btf_vlen(func_proto); j++)
4300 if (param[j].type && !param[j].name_off)
4301 param[j].name_off =
4302 dummy_var->name_off;
4303 vs->type = dummy_var_btf_id;
4304 vt->info &= ~0xffff;
4305 vt->info |= BTF_FUNC_GLOBAL;
4306 } else {
4307 btf_var(vt)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4308 vt->type = int_btf_id;
4309 }
1c0c7074
AN
4310 vs->offset = off;
4311 vs->size = sizeof(int);
4312 }
4313 sec->size = off;
4314 }
4315
2e33efe3
AN
4316 if (kcfg_sec) {
4317 sec = kcfg_sec;
4318 /* for kcfg externs calculate their offsets within a .kconfig map */
4319 off = 0;
4320 for (i = 0; i < obj->nr_extern; i++) {
4321 ext = &obj->externs[i];
4322 if (ext->type != EXT_KCFG)
4323 continue;
166750bc 4324
2e33efe3
AN
4325 ext->kcfg.data_off = roundup(off, ext->kcfg.align);
4326 off = ext->kcfg.data_off + ext->kcfg.sz;
1c0c7074 4327 pr_debug("extern (kcfg) #%d: symbol %d, off %u, name %s\n",
2e33efe3
AN
4328 i, ext->sym_idx, ext->kcfg.data_off, ext->name);
4329 }
4330 sec->size = off;
4331 n = btf_vlen(sec);
4332 for (i = 0; i < n; i++) {
4333 struct btf_var_secinfo *vs = btf_var_secinfos(sec) + i;
4334
4335 t = btf__type_by_id(obj->btf, vs->type);
4336 ext_name = btf__name_by_offset(obj->btf, t->name_off);
4337 ext = find_extern_by_name(obj, ext_name);
4338 if (!ext) {
4339 pr_warn("failed to find extern definition for BTF var '%s'\n",
4340 ext_name);
4341 return -ESRCH;
4342 }
4343 btf_var(t)->linkage = BTF_VAR_GLOBAL_ALLOCATED;
4344 vs->offset = ext->kcfg.data_off;
166750bc 4345 }
166750bc 4346 }
166750bc
AN
4347 return 0;
4348}
4349
bd054102
AN
4350static bool prog_is_subprog(const struct bpf_object *obj, const struct bpf_program *prog)
4351{
197afc63 4352 return prog->sec_idx == obj->efile.text_shndx && obj->nr_programs > 1;
c3c55696
AN
4353}
4354
01af3bf0
AN
4355struct bpf_program *
4356bpf_object__find_program_by_name(const struct bpf_object *obj,
4357 const char *name)
4358{
4359 struct bpf_program *prog;
4360
4361 bpf_object__for_each_program(prog, obj) {
c3c55696
AN
4362 if (prog_is_subprog(obj, prog))
4363 continue;
01af3bf0
AN
4364 if (!strcmp(prog->name, name))
4365 return prog;
4366 }
e9fc3ce9 4367 return errno = ENOENT, NULL;
01af3bf0
AN
4368}
4369
d859900c
DB
4370static bool bpf_object__shndx_is_data(const struct bpf_object *obj,
4371 int shndx)
4372{
25bbbd7a
AN
4373 switch (obj->efile.secs[shndx].sec_type) {
4374 case SEC_BSS:
4375 case SEC_DATA:
4376 case SEC_RODATA:
4377 return true;
4378 default:
4379 return false;
4380 }
d859900c
DB
4381}
4382
4383static bool bpf_object__shndx_is_maps(const struct bpf_object *obj,
4384 int shndx)
4385{
e19db676 4386 return shndx == obj->efile.btf_maps_shndx;
d859900c
DB
4387}
4388
d859900c
DB
4389static enum libbpf_map_type
4390bpf_object__section_to_libbpf_map_type(const struct bpf_object *obj, int shndx)
4391{
25bbbd7a
AN
4392 if (shndx == obj->efile.symbols_shndx)
4393 return LIBBPF_MAP_KCONFIG;
4394
4395 switch (obj->efile.secs[shndx].sec_type) {
4396 case SEC_BSS:
d859900c 4397 return LIBBPF_MAP_BSS;
25bbbd7a
AN
4398 case SEC_DATA:
4399 return LIBBPF_MAP_DATA;
4400 case SEC_RODATA:
d859900c 4401 return LIBBPF_MAP_RODATA;
25bbbd7a 4402 default:
d859900c 4403 return LIBBPF_MAP_UNSPEC;
25bbbd7a 4404 }
d859900c
DB
4405}
4406
1f8e2bcb
AN
4407static int bpf_program__record_reloc(struct bpf_program *prog,
4408 struct reloc_desc *reloc_desc,
9c0f8cbd 4409 __u32 insn_idx, const char *sym_name,
ad23b723 4410 const Elf64_Sym *sym, const Elf64_Rel *rel)
1f8e2bcb
AN
4411{
4412 struct bpf_insn *insn = &prog->insns[insn_idx];
4413 size_t map_idx, nr_maps = prog->obj->nr_maps;
4414 struct bpf_object *obj = prog->obj;
4415 __u32 shdr_idx = sym->st_shndx;
4416 enum libbpf_map_type type;
9c0f8cbd 4417 const char *sym_sec_name;
1f8e2bcb
AN
4418 struct bpf_map *map;
4419
aa0b8d43 4420 if (!is_call_insn(insn) && !is_ldimm64_insn(insn)) {
9c0f8cbd
AN
4421 pr_warn("prog '%s': invalid relo against '%s' for insns[%d].code 0x%x\n",
4422 prog->name, sym_name, insn_idx, insn->code);
1f8e2bcb
AN
4423 return -LIBBPF_ERRNO__RELOC;
4424 }
166750bc
AN
4425
4426 if (sym_is_extern(sym)) {
ad23b723 4427 int sym_idx = ELF64_R_SYM(rel->r_info);
166750bc
AN
4428 int i, n = obj->nr_extern;
4429 struct extern_desc *ext;
4430
4431 for (i = 0; i < n; i++) {
4432 ext = &obj->externs[i];
4433 if (ext->sym_idx == sym_idx)
4434 break;
4435 }
4436 if (i >= n) {
9c0f8cbd
AN
4437 pr_warn("prog '%s': extern relo failed to find extern for '%s' (%d)\n",
4438 prog->name, sym_name, sym_idx);
166750bc
AN
4439 return -LIBBPF_ERRNO__RELOC;
4440 }
9c0f8cbd
AN
4441 pr_debug("prog '%s': found extern #%d '%s' (sym %d) for insn #%u\n",
4442 prog->name, i, ext->name, ext->sym_idx, insn_idx);
5bd022ec 4443 if (insn->code == (BPF_JMP | BPF_CALL))
a18f7214 4444 reloc_desc->type = RELO_EXTERN_CALL;
5bd022ec 4445 else
a18f7214 4446 reloc_desc->type = RELO_EXTERN_LD64;
166750bc 4447 reloc_desc->insn_idx = insn_idx;
3055ddd6 4448 reloc_desc->ext_idx = i;
166750bc
AN
4449 return 0;
4450 }
4451
aa0b8d43
MKL
4452 /* sub-program call relocation */
4453 if (is_call_insn(insn)) {
4454 if (insn->src_reg != BPF_PSEUDO_CALL) {
4455 pr_warn("prog '%s': incorrect bpf_call opcode\n", prog->name);
4456 return -LIBBPF_ERRNO__RELOC;
4457 }
4458 /* text_shndx can be 0, if no default "main" program exists */
4459 if (!shdr_idx || shdr_idx != obj->efile.text_shndx) {
4460 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
4461 pr_warn("prog '%s': bad call relo against '%s' in section '%s'\n",
4462 prog->name, sym_name, sym_sec_name);
4463 return -LIBBPF_ERRNO__RELOC;
4464 }
4465 if (sym->st_value % BPF_INSN_SZ) {
4466 pr_warn("prog '%s': bad call relo against '%s' at offset %zu\n",
4467 prog->name, sym_name, (size_t)sym->st_value);
4468 return -LIBBPF_ERRNO__RELOC;
4469 }
4470 reloc_desc->type = RELO_CALL;
4471 reloc_desc->insn_idx = insn_idx;
4472 reloc_desc->sym_off = sym->st_value;
4473 return 0;
4474 }
4475
1f8e2bcb 4476 if (!shdr_idx || shdr_idx >= SHN_LORESERVE) {
9c0f8cbd
AN
4477 pr_warn("prog '%s': invalid relo against '%s' in special section 0x%x; forgot to initialize global var?..\n",
4478 prog->name, sym_name, shdr_idx);
1f8e2bcb
AN
4479 return -LIBBPF_ERRNO__RELOC;
4480 }
4481
53eddb5e
YS
4482 /* loading subprog addresses */
4483 if (sym_is_subprog(sym, obj->efile.text_shndx)) {
4484 /* global_func: sym->st_value = offset in the section, insn->imm = 0.
4485 * local_func: sym->st_value = 0, insn->imm = offset in the section.
4486 */
4487 if ((sym->st_value % BPF_INSN_SZ) || (insn->imm % BPF_INSN_SZ)) {
4488 pr_warn("prog '%s': bad subprog addr relo against '%s' at offset %zu+%d\n",
4489 prog->name, sym_name, (size_t)sym->st_value, insn->imm);
4490 return -LIBBPF_ERRNO__RELOC;
4491 }
4492
4493 reloc_desc->type = RELO_SUBPROG_ADDR;
4494 reloc_desc->insn_idx = insn_idx;
4495 reloc_desc->sym_off = sym->st_value;
4496 return 0;
4497 }
4498
1f8e2bcb 4499 type = bpf_object__section_to_libbpf_map_type(obj, shdr_idx);
9c0f8cbd 4500 sym_sec_name = elf_sec_name(obj, elf_sec_by_idx(obj, shdr_idx));
1f8e2bcb 4501
2e7ba4f8
AN
4502 /* arena data relocation */
4503 if (shdr_idx == obj->efile.arena_data_shndx) {
4504 reloc_desc->type = RELO_DATA;
4505 reloc_desc->insn_idx = insn_idx;
4506 reloc_desc->map_idx = obj->arena_map - obj->maps;
4507 reloc_desc->sym_off = sym->st_value;
4508 return 0;
4509 }
4510
1f8e2bcb
AN
4511 /* generic map reference relocation */
4512 if (type == LIBBPF_MAP_UNSPEC) {
4513 if (!bpf_object__shndx_is_maps(obj, shdr_idx)) {
9c0f8cbd
AN
4514 pr_warn("prog '%s': bad map relo against '%s' in section '%s'\n",
4515 prog->name, sym_name, sym_sec_name);
1f8e2bcb
AN
4516 return -LIBBPF_ERRNO__RELOC;
4517 }
4518 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4519 map = &obj->maps[map_idx];
4520 if (map->libbpf_type != type ||
4521 map->sec_idx != sym->st_shndx ||
4522 map->sec_offset != sym->st_value)
4523 continue;
9c0f8cbd
AN
4524 pr_debug("prog '%s': found map %zd (%s, sec %d, off %zu) for insn #%u\n",
4525 prog->name, map_idx, map->name, map->sec_idx,
1f8e2bcb
AN
4526 map->sec_offset, insn_idx);
4527 break;
4528 }
4529 if (map_idx >= nr_maps) {
9c0f8cbd
AN
4530 pr_warn("prog '%s': map relo failed to find map for section '%s', off %zu\n",
4531 prog->name, sym_sec_name, (size_t)sym->st_value);
1f8e2bcb
AN
4532 return -LIBBPF_ERRNO__RELOC;
4533 }
4534 reloc_desc->type = RELO_LD64;
4535 reloc_desc->insn_idx = insn_idx;
4536 reloc_desc->map_idx = map_idx;
53f8dd43 4537 reloc_desc->sym_off = 0; /* sym->st_value determines map_idx */
1f8e2bcb
AN
4538 return 0;
4539 }
4540
4541 /* global data map relocation */
4542 if (!bpf_object__shndx_is_data(obj, shdr_idx)) {
9c0f8cbd
AN
4543 pr_warn("prog '%s': bad data relo against section '%s'\n",
4544 prog->name, sym_sec_name);
1f8e2bcb 4545 return -LIBBPF_ERRNO__RELOC;
1f8e2bcb 4546 }
1f8e2bcb
AN
4547 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
4548 map = &obj->maps[map_idx];
25bbbd7a 4549 if (map->libbpf_type != type || map->sec_idx != sym->st_shndx)
1f8e2bcb 4550 continue;
9c0f8cbd
AN
4551 pr_debug("prog '%s': found data map %zd (%s, sec %d, off %zu) for insn %u\n",
4552 prog->name, map_idx, map->name, map->sec_idx,
4553 map->sec_offset, insn_idx);
1f8e2bcb
AN
4554 break;
4555 }
4556 if (map_idx >= nr_maps) {
9c0f8cbd
AN
4557 pr_warn("prog '%s': data relo failed to find map for section '%s'\n",
4558 prog->name, sym_sec_name);
1f8e2bcb
AN
4559 return -LIBBPF_ERRNO__RELOC;
4560 }
4561
4562 reloc_desc->type = RELO_DATA;
4563 reloc_desc->insn_idx = insn_idx;
4564 reloc_desc->map_idx = map_idx;
53f8dd43 4565 reloc_desc->sym_off = sym->st_value;
1f8e2bcb
AN
4566 return 0;
4567}
4568
db2b8b06
AN
4569static bool prog_contains_insn(const struct bpf_program *prog, size_t insn_idx)
4570{
4571 return insn_idx >= prog->sec_insn_off &&
4572 insn_idx < prog->sec_insn_off + prog->sec_insn_cnt;
4573}
4574
4575static struct bpf_program *find_prog_by_sec_insn(const struct bpf_object *obj,
4576 size_t sec_idx, size_t insn_idx)
4577{
4578 int l = 0, r = obj->nr_programs - 1, m;
4579 struct bpf_program *prog;
4580
d0d382f9
SHY
4581 if (!obj->nr_programs)
4582 return NULL;
4583
db2b8b06
AN
4584 while (l < r) {
4585 m = l + (r - l + 1) / 2;
4586 prog = &obj->programs[m];
4587
4588 if (prog->sec_idx < sec_idx ||
4589 (prog->sec_idx == sec_idx && prog->sec_insn_off <= insn_idx))
4590 l = m;
4591 else
4592 r = m - 1;
4593 }
4594 /* matching program could be at index l, but it still might be the
4595 * wrong one, so we need to double check conditions for the last time
4596 */
4597 prog = &obj->programs[l];
4598 if (prog->sec_idx == sec_idx && prog_contains_insn(prog, insn_idx))
4599 return prog;
4600 return NULL;
4601}
4602
34090915 4603static int
ad23b723 4604bpf_object__collect_prog_relos(struct bpf_object *obj, Elf64_Shdr *shdr, Elf_Data *data)
34090915 4605{
9c0f8cbd 4606 const char *relo_sec_name, *sec_name;
b7332d28 4607 size_t sec_idx = shdr->sh_info, sym_idx;
c3c55696
AN
4608 struct bpf_program *prog;
4609 struct reloc_desc *relos;
1f8e2bcb 4610 int err, i, nrels;
c3c55696
AN
4611 const char *sym_name;
4612 __u32 insn_idx;
6245947c
AN
4613 Elf_Scn *scn;
4614 Elf_Data *scn_data;
ad23b723
AN
4615 Elf64_Sym *sym;
4616 Elf64_Rel *rel;
34090915 4617
b7332d28
AN
4618 if (sec_idx >= obj->efile.sec_cnt)
4619 return -EINVAL;
4620
6245947c
AN
4621 scn = elf_sec_by_idx(obj, sec_idx);
4622 scn_data = elf_sec_data(obj, scn);
fc3a5534
MZ
4623 if (!scn_data)
4624 return -LIBBPF_ERRNO__FORMAT;
6245947c 4625
9c0f8cbd 4626 relo_sec_name = elf_sec_str(obj, shdr->sh_name);
6245947c 4627 sec_name = elf_sec_name(obj, scn);
9c0f8cbd
AN
4628 if (!relo_sec_name || !sec_name)
4629 return -EINVAL;
4630
4631 pr_debug("sec '%s': collecting relocation for section(%zu) '%s'\n",
4632 relo_sec_name, sec_idx, sec_name);
34090915
WN
4633 nrels = shdr->sh_size / shdr->sh_entsize;
4634
34090915 4635 for (i = 0; i < nrels; i++) {
ad23b723
AN
4636 rel = elf_rel_by_idx(data, i);
4637 if (!rel) {
9c0f8cbd 4638 pr_warn("sec '%s': failed to get relo #%d\n", relo_sec_name, i);
6371ca3b 4639 return -LIBBPF_ERRNO__FORMAT;
34090915 4640 }
ad23b723 4641
b7332d28
AN
4642 sym_idx = ELF64_R_SYM(rel->r_info);
4643 sym = elf_sym_by_idx(obj, sym_idx);
ad23b723 4644 if (!sym) {
b7332d28
AN
4645 pr_warn("sec '%s': symbol #%zu not found for relo #%d\n",
4646 relo_sec_name, sym_idx, i);
4647 return -LIBBPF_ERRNO__FORMAT;
4648 }
4649
4650 if (sym->st_shndx >= obj->efile.sec_cnt) {
4651 pr_warn("sec '%s': corrupted symbol #%zu pointing to invalid section #%zu for relo #%d\n",
4652 relo_sec_name, sym_idx, (size_t)sym->st_shndx, i);
6371ca3b 4653 return -LIBBPF_ERRNO__FORMAT;
34090915 4654 }
6245947c 4655
ad23b723 4656 if (rel->r_offset % BPF_INSN_SZ || rel->r_offset >= scn_data->d_size) {
9c0f8cbd 4657 pr_warn("sec '%s': invalid offset 0x%zx for relo #%d\n",
b7332d28 4658 relo_sec_name, (size_t)rel->r_offset, i);
1f8e2bcb 4659 return -LIBBPF_ERRNO__FORMAT;
9c0f8cbd 4660 }
d859900c 4661
ad23b723 4662 insn_idx = rel->r_offset / BPF_INSN_SZ;
c3c55696
AN
4663 /* relocations against static functions are recorded as
4664 * relocations against the section that contains a function;
4665 * in such case, symbol will be STT_SECTION and sym.st_name
4666 * will point to empty string (0), so fetch section name
4667 * instead
4668 */
ad23b723
AN
4669 if (ELF64_ST_TYPE(sym->st_info) == STT_SECTION && sym->st_name == 0)
4670 sym_name = elf_sec_name(obj, elf_sec_by_idx(obj, sym->st_shndx));
c3c55696 4671 else
ad23b723 4672 sym_name = elf_sym_str(obj, sym->st_name);
c3c55696 4673 sym_name = sym_name ?: "<?";
d859900c 4674
9c0f8cbd
AN
4675 pr_debug("sec '%s': relo #%d: insn #%u against '%s'\n",
4676 relo_sec_name, i, insn_idx, sym_name);
666810e8 4677
c3c55696
AN
4678 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
4679 if (!prog) {
6245947c 4680 pr_debug("sec '%s': relo #%d: couldn't find program in section '%s' for insn #%u, probably overridden weak function, skipping...\n",
c3c55696 4681 relo_sec_name, i, sec_name, insn_idx);
6245947c 4682 continue;
c3c55696
AN
4683 }
4684
4685 relos = libbpf_reallocarray(prog->reloc_desc,
4686 prog->nr_reloc + 1, sizeof(*relos));
4687 if (!relos)
4688 return -ENOMEM;
4689 prog->reloc_desc = relos;
4690
4691 /* adjust insn_idx to local BPF program frame of reference */
4692 insn_idx -= prog->sec_insn_off;
4693 err = bpf_program__record_reloc(prog, &relos[prog->nr_reloc],
ad23b723 4694 insn_idx, sym_name, sym, rel);
1f8e2bcb
AN
4695 if (err)
4696 return err;
c3c55696
AN
4697
4698 prog->nr_reloc++;
34090915
WN
4699 }
4700 return 0;
4701}
4702
4fcac46c 4703static int map_fill_btf_type_info(struct bpf_object *obj, struct bpf_map *map)
8a138aed 4704{
aaf6886d 4705 int id;
8a138aed 4706
a8fee962
AN
4707 if (!obj->btf)
4708 return -ENOENT;
4709
590a0088
MKL
4710 /* if it's BTF-defined map, we don't need to search for type IDs.
4711 * For struct_ops map, it does not need btf_key_type_id and
4712 * btf_value_type_id.
4713 */
aaf6886d 4714 if (map->sec_idx == obj->efile.btf_maps_shndx || bpf_map__is_struct_ops(map))
abd29c93
AN
4715 return 0;
4716
aaf6886d
AN
4717 /*
4718 * LLVM annotates global data differently in BTF, that is,
4719 * only as '.data', '.bss' or '.rodata'.
4720 */
4721 if (!bpf_map__is_internal(map))
4722 return -ENOENT;
4723
4724 id = btf__find_by_name(obj->btf, map->real_name);
4725 if (id < 0)
4726 return id;
8a138aed 4727
aaf6886d
AN
4728 map->btf_key_type_id = 0;
4729 map->btf_value_type_id = id;
8a138aed
MKL
4730 return 0;
4731}
4732
97eb3138
MP
4733static int bpf_get_map_info_from_fdinfo(int fd, struct bpf_map_info *info)
4734{
4735 char file[PATH_MAX], buff[4096];
4736 FILE *fp;
4737 __u32 val;
4738 int err;
4739
4740 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
4741 memset(info, 0, sizeof(*info));
4742
59842c54 4743 fp = fopen(file, "re");
97eb3138
MP
4744 if (!fp) {
4745 err = -errno;
4746 pr_warn("failed to open %s: %d. No procfs support?\n", file,
4747 err);
4748 return err;
4749 }
4750
4751 while (fgets(buff, sizeof(buff), fp)) {
4752 if (sscanf(buff, "map_type:\t%u", &val) == 1)
4753 info->type = val;
4754 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
4755 info->key_size = val;
4756 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
4757 info->value_size = val;
4758 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
4759 info->max_entries = val;
4760 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
4761 info->map_flags = val;
4762 }
4763
4764 fclose(fp);
4765
4766 return 0;
4767}
4768
ec41817b
AN
4769bool bpf_map__autocreate(const struct bpf_map *map)
4770{
4771 return map->autocreate;
4772}
4773
4774int bpf_map__set_autocreate(struct bpf_map *map, bool autocreate)
4775{
4776 if (map->obj->loaded)
4777 return libbpf_err(-EBUSY);
4778
4779 map->autocreate = autocreate;
4780 return 0;
4781}
4782
26736eb9
JK
4783int bpf_map__reuse_fd(struct bpf_map *map, int fd)
4784{
813847a3 4785 struct bpf_map_info info;
bf3f0037 4786 __u32 len = sizeof(info), name_len;
26736eb9
JK
4787 int new_fd, err;
4788 char *new_name;
4789
813847a3 4790 memset(&info, 0, len);
629dfc66 4791 err = bpf_map_get_info_by_fd(fd, &info, &len);
97eb3138
MP
4792 if (err && errno == EINVAL)
4793 err = bpf_get_map_info_from_fdinfo(fd, &info);
26736eb9 4794 if (err)
e9fc3ce9 4795 return libbpf_err(err);
26736eb9 4796
bf3f0037
AW
4797 name_len = strlen(info.name);
4798 if (name_len == BPF_OBJ_NAME_LEN - 1 && strncmp(map->name, info.name, name_len) == 0)
4799 new_name = strdup(map->name);
4800 else
4801 new_name = strdup(info.name);
4802
26736eb9 4803 if (!new_name)
e9fc3ce9 4804 return libbpf_err(-errno);
26736eb9 4805
4aadd292
AN
4806 /*
4807 * Like dup(), but make sure new FD is >= 3 and has O_CLOEXEC set.
4808 * This is similar to what we do in ensure_good_fd(), but without
4809 * closing original FD.
4810 */
4811 new_fd = fcntl(fd, F_DUPFD_CLOEXEC, 3);
d1b4574a
THJ
4812 if (new_fd < 0) {
4813 err = -errno;
26736eb9 4814 goto err_free_new_name;
d1b4574a 4815 }
26736eb9 4816
dac645b9
AN
4817 err = reuse_fd(map->fd, new_fd);
4818 if (err)
4819 goto err_free_new_name;
4820
26736eb9
JK
4821 free(map->name);
4822
26736eb9
JK
4823 map->name = new_name;
4824 map->def.type = info.type;
4825 map->def.key_size = info.key_size;
4826 map->def.value_size = info.value_size;
4827 map->def.max_entries = info.max_entries;
4828 map->def.map_flags = info.map_flags;
4829 map->btf_key_type_id = info.btf_key_type_id;
4830 map->btf_value_type_id = info.btf_value_type_id;
ec6d5f47 4831 map->reused = true;
47512102 4832 map->map_extra = info.map_extra;
26736eb9
JK
4833
4834 return 0;
4835
26736eb9
JK
4836err_free_new_name:
4837 free(new_name);
e9fc3ce9 4838 return libbpf_err(err);
26736eb9
JK
4839}
4840
1bdb6c9a 4841__u32 bpf_map__max_entries(const struct bpf_map *map)
1a11a4c7 4842{
1bdb6c9a
AN
4843 return map->def.max_entries;
4844}
1a11a4c7 4845
b3278099
AN
4846struct bpf_map *bpf_map__inner_map(struct bpf_map *map)
4847{
4848 if (!bpf_map_type__is_map_in_map(map->def.type))
e9fc3ce9 4849 return errno = EINVAL, NULL;
b3278099
AN
4850
4851 return map->inner_map;
4852}
4853
1bdb6c9a
AN
4854int bpf_map__set_max_entries(struct bpf_map *map, __u32 max_entries)
4855{
597fbc46 4856 if (map->obj->loaded)
e9fc3ce9 4857 return libbpf_err(-EBUSY);
597fbc46 4858
1a11a4c7 4859 map->def.max_entries = max_entries;
597fbc46
AN
4860
4861 /* auto-adjust BPF ringbuf map max_entries to be a multiple of page size */
b66ccae0 4862 if (map_is_ringbuf(map))
597fbc46
AN
4863 map->def.max_entries = adjust_ringbuf_sz(map->def.max_entries);
4864
1a11a4c7
AI
4865 return 0;
4866}
4867
6b434b61
AN
4868static int bpf_object_prepare_token(struct bpf_object *obj)
4869{
4870 const char *bpffs_path;
4871 int bpffs_fd = -1, token_fd, err;
4872 bool mandatory;
4873 enum libbpf_print_level level;
4874
4875 /* token is explicitly prevented */
4876 if (obj->token_path && obj->token_path[0] == '\0') {
4877 pr_debug("object '%s': token is prevented, skipping...\n", obj->name);
4878 return 0;
4879 }
4880
4881 mandatory = obj->token_path != NULL;
4882 level = mandatory ? LIBBPF_WARN : LIBBPF_DEBUG;
4883
4884 bpffs_path = obj->token_path ?: BPF_FS_DEFAULT_PATH;
4885 bpffs_fd = open(bpffs_path, O_DIRECTORY, O_RDWR);
4886 if (bpffs_fd < 0) {
4887 err = -errno;
4888 __pr(level, "object '%s': failed (%d) to open BPF FS mount at '%s'%s\n",
4889 obj->name, err, bpffs_path,
4890 mandatory ? "" : ", skipping optional step...");
4891 return mandatory ? err : 0;
4892 }
4893
4894 token_fd = bpf_token_create(bpffs_fd, 0);
4895 close(bpffs_fd);
4896 if (token_fd < 0) {
4897 if (!mandatory && token_fd == -ENOENT) {
4898 pr_debug("object '%s': BPF FS at '%s' doesn't have BPF token delegation set up, skipping...\n",
4899 obj->name, bpffs_path);
4900 return 0;
4901 }
4902 __pr(level, "object '%s': failed (%d) to create BPF token from '%s'%s\n",
4903 obj->name, token_fd, bpffs_path,
4904 mandatory ? "" : ", skipping optional step...");
4905 return mandatory ? token_fd : 0;
4906 }
4907
4908 obj->feat_cache = calloc(1, sizeof(*obj->feat_cache));
4909 if (!obj->feat_cache) {
4910 close(token_fd);
4911 return -ENOMEM;
4912 }
4913
4914 obj->token_fd = token_fd;
4915 obj->feat_cache->token_fd = token_fd;
4916
4917 return 0;
4918}
4919
47eff617 4920static int
fd9eef1a 4921bpf_object__probe_loading(struct bpf_object *obj)
47eff617 4922{
47eff617
SF
4923 char *cp, errmsg[STRERR_BUFSIZE];
4924 struct bpf_insn insns[] = {
4925 BPF_MOV64_IMM(BPF_REG_0, 0),
4926 BPF_EXIT_INSN(),
4927 };
e32660ac 4928 int ret, insn_cnt = ARRAY_SIZE(insns);
6b434b61
AN
4929 LIBBPF_OPTS(bpf_prog_load_opts, opts,
4930 .token_fd = obj->token_fd,
4931 .prog_flags = obj->token_fd ? BPF_F_TOKEN_FD : 0,
4932 );
47eff617 4933
f9bceaa5
SF
4934 if (obj->gen_loader)
4935 return 0;
4936
e542f2c4
AN
4937 ret = bump_rlimit_memlock();
4938 if (ret)
4939 pr_warn("Failed to bump RLIMIT_MEMLOCK (err = %d), you might need to do it explicitly!\n", ret);
4940
47eff617 4941 /* make sure basic loading works */
6b434b61 4942 ret = bpf_prog_load(BPF_PROG_TYPE_SOCKET_FILTER, NULL, "GPL", insns, insn_cnt, &opts);
e32660ac 4943 if (ret < 0)
6b434b61 4944 ret = bpf_prog_load(BPF_PROG_TYPE_TRACEPOINT, NULL, "GPL", insns, insn_cnt, &opts);
47eff617 4945 if (ret < 0) {
fd9eef1a
EC
4946 ret = errno;
4947 cp = libbpf_strerror_r(ret, errmsg, sizeof(errmsg));
4948 pr_warn("Error in %s():%s(%d). Couldn't load trivial BPF "
4949 "program. Make sure your kernel supports BPF "
4950 "(CONFIG_BPF_SYSCALL=y) and/or that RLIMIT_MEMLOCK is "
4951 "set to big enough value.\n", __func__, cp, ret);
4952 return -ret;
47eff617
SF
4953 }
4954 close(ret);
4955
fd9eef1a
EC
4956 return 0;
4957}
4958
d6dd1d49
AN
4959bool kernel_supports(const struct bpf_object *obj, enum kern_feature_id feat_id)
4960{
8263b338 4961 if (obj->gen_loader)
d6dd1d49
AN
4962 /* To generate loader program assume the latest kernel
4963 * to avoid doing extra prog_load, map_create syscalls.
4964 */
4965 return true;
4966
6b434b61
AN
4967 if (obj->token_fd)
4968 return feat_supported(obj->feat_cache, feat_id);
4969
d6dd1d49
AN
4970 return feat_supported(NULL, feat_id);
4971}
4972
57a00f41
THJ
4973static bool map_is_reuse_compat(const struct bpf_map *map, int map_fd)
4974{
813847a3 4975 struct bpf_map_info map_info;
57a00f41 4976 char msg[STRERR_BUFSIZE];
813847a3 4977 __u32 map_info_len = sizeof(map_info);
97eb3138 4978 int err;
57a00f41 4979
813847a3 4980 memset(&map_info, 0, map_info_len);
629dfc66 4981 err = bpf_map_get_info_by_fd(map_fd, &map_info, &map_info_len);
97eb3138
MP
4982 if (err && errno == EINVAL)
4983 err = bpf_get_map_info_from_fdinfo(map_fd, &map_info);
4984 if (err) {
4985 pr_warn("failed to get map info for map FD %d: %s\n", map_fd,
4986 libbpf_strerror_r(errno, msg, sizeof(msg)));
57a00f41
THJ
4987 return false;
4988 }
4989
4990 return (map_info.type == map->def.type &&
4991 map_info.key_size == map->def.key_size &&
4992 map_info.value_size == map->def.value_size &&
4993 map_info.max_entries == map->def.max_entries &&
47512102
JK
4994 map_info.map_flags == map->def.map_flags &&
4995 map_info.map_extra == map->map_extra);
57a00f41
THJ
4996}
4997
4998static int
4999bpf_object__reuse_map(struct bpf_map *map)
5000{
5001 char *cp, errmsg[STRERR_BUFSIZE];
5002 int err, pin_fd;
5003
5004 pin_fd = bpf_obj_get(map->pin_path);
5005 if (pin_fd < 0) {
5006 err = -errno;
5007 if (err == -ENOENT) {
5008 pr_debug("found no pinned map to reuse at '%s'\n",
5009 map->pin_path);
5010 return 0;
5011 }
5012
5013 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
5014 pr_warn("couldn't retrieve pinned map '%s': %s\n",
5015 map->pin_path, cp);
5016 return err;
5017 }
5018
5019 if (!map_is_reuse_compat(map, pin_fd)) {
5020 pr_warn("couldn't reuse pinned map at '%s': parameter mismatch\n",
5021 map->pin_path);
5022 close(pin_fd);
5023 return -EINVAL;
5024 }
5025
5026 err = bpf_map__reuse_fd(map, pin_fd);
d0f325c3 5027 close(pin_fd);
e3ba8e4e 5028 if (err)
57a00f41 5029 return err;
e3ba8e4e 5030
57a00f41
THJ
5031 map->pinned = true;
5032 pr_debug("reused pinned map at '%s'\n", map->pin_path);
5033
5034 return 0;
5035}
5036
d859900c
DB
5037static int
5038bpf_object__populate_internal_map(struct bpf_object *obj, struct bpf_map *map)
5039{
166750bc 5040 enum libbpf_map_type map_type = map->libbpf_type;
d859900c
DB
5041 char *cp, errmsg[STRERR_BUFSIZE];
5042 int err, zero = 0;
d859900c 5043
67234743
AS
5044 if (obj->gen_loader) {
5045 bpf_gen__map_update_elem(obj->gen_loader, map - obj->maps,
5046 map->mmaped, map->def.value_size);
5047 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG)
5048 bpf_gen__map_freeze(obj->gen_loader, map - obj->maps);
5049 return 0;
5050 }
2e7ba4f8 5051
eba9c5f4
AN
5052 err = bpf_map_update_elem(map->fd, &zero, map->mmaped, 0);
5053 if (err) {
5054 err = -errno;
5055 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5056 pr_warn("Error setting initial map(%s) contents: %s\n",
5057 map->name, cp);
5058 return err;
5059 }
d859900c 5060
81bfdd08
AN
5061 /* Freeze .rodata and .kconfig map as read-only from syscall side. */
5062 if (map_type == LIBBPF_MAP_RODATA || map_type == LIBBPF_MAP_KCONFIG) {
d859900c
DB
5063 err = bpf_map_freeze(map->fd);
5064 if (err) {
eba9c5f4
AN
5065 err = -errno;
5066 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
be18010e
KW
5067 pr_warn("Error freezing map(%s) as read-only: %s\n",
5068 map->name, cp);
eba9c5f4 5069 return err;
d859900c
DB
5070 }
5071 }
eba9c5f4 5072 return 0;
d859900c
DB
5073}
5074
2d39d7c5
AN
5075static void bpf_map__destroy(struct bpf_map *map);
5076
f08c18e0
AN
5077static bool map_is_created(const struct bpf_map *map)
5078{
5079 return map->obj->loaded || map->reused;
5080}
5081
67234743 5082static int bpf_object__create_map(struct bpf_object *obj, struct bpf_map *map, bool is_inner)
2d39d7c5 5083{
992c4225 5084 LIBBPF_OPTS(bpf_map_create_opts, create_attr);
2d39d7c5 5085 struct bpf_map_def *def = &map->def;
992c4225 5086 const char *map_name = NULL;
dac645b9 5087 int err = 0, map_fd;
2d39d7c5 5088
9ca1f56a 5089 if (kernel_supports(obj, FEAT_PROG_NAME))
992c4225 5090 map_name = map->name;
2d39d7c5 5091 create_attr.map_ifindex = map->map_ifindex;
2d39d7c5 5092 create_attr.map_flags = def->map_flags;
1bdb6c9a 5093 create_attr.numa_node = map->numa_node;
47512102 5094 create_attr.map_extra = map->map_extra;
6b434b61
AN
5095 create_attr.token_fd = obj->token_fd;
5096 if (obj->token_fd)
5097 create_attr.map_flags |= BPF_F_TOKEN_FD;
2d39d7c5 5098
9e926acd 5099 if (bpf_map__is_struct_ops(map)) {
992c4225 5100 create_attr.btf_vmlinux_value_type_id = map->btf_vmlinux_value_type_id;
9e926acd
KFL
5101 if (map->mod_btf_fd >= 0) {
5102 create_attr.value_type_btf_obj_fd = map->mod_btf_fd;
5103 create_attr.map_flags |= BPF_F_VTYPE_BTF_OBJ_FD;
5104 }
5105 }
2d39d7c5 5106
262cfb74 5107 if (obj->btf && btf__fd(obj->btf) >= 0) {
2d39d7c5
AN
5108 create_attr.btf_fd = btf__fd(obj->btf);
5109 create_attr.btf_key_type_id = map->btf_key_type_id;
5110 create_attr.btf_value_type_id = map->btf_value_type_id;
5111 }
5112
646f02ff
AN
5113 if (bpf_map_type__is_map_in_map(def->type)) {
5114 if (map->inner_map) {
f04deb90
AG
5115 err = map_set_def_max_entries(map->inner_map);
5116 if (err)
5117 return err;
67234743 5118 err = bpf_object__create_map(obj, map->inner_map, true);
646f02ff
AN
5119 if (err) {
5120 pr_warn("map '%s': failed to create inner map: %d\n",
5121 map->name, err);
5122 return err;
5123 }
f08c18e0 5124 map->inner_map_fd = map->inner_map->fd;
646f02ff
AN
5125 }
5126 if (map->inner_map_fd >= 0)
5127 create_attr.inner_map_fd = map->inner_map_fd;
5128 }
5129
f7310523
HC
5130 switch (def->type) {
5131 case BPF_MAP_TYPE_PERF_EVENT_ARRAY:
5132 case BPF_MAP_TYPE_CGROUP_ARRAY:
5133 case BPF_MAP_TYPE_STACK_TRACE:
5134 case BPF_MAP_TYPE_ARRAY_OF_MAPS:
5135 case BPF_MAP_TYPE_HASH_OF_MAPS:
5136 case BPF_MAP_TYPE_DEVMAP:
5137 case BPF_MAP_TYPE_DEVMAP_HASH:
5138 case BPF_MAP_TYPE_CPUMAP:
5139 case BPF_MAP_TYPE_XSKMAP:
5140 case BPF_MAP_TYPE_SOCKMAP:
5141 case BPF_MAP_TYPE_SOCKHASH:
5142 case BPF_MAP_TYPE_QUEUE:
5143 case BPF_MAP_TYPE_STACK:
79ff13e9 5144 case BPF_MAP_TYPE_ARENA:
f7310523
HC
5145 create_attr.btf_fd = 0;
5146 create_attr.btf_key_type_id = 0;
5147 create_attr.btf_value_type_id = 0;
5148 map->btf_key_type_id = 0;
5149 map->btf_value_type_id = 0;
3644d285
KFL
5150 break;
5151 case BPF_MAP_TYPE_STRUCT_OPS:
5152 create_attr.btf_value_type_id = 0;
5153 break;
f7310523
HC
5154 default:
5155 break;
5156 }
5157
67234743 5158 if (obj->gen_loader) {
992c4225 5159 bpf_gen__map_create(obj->gen_loader, def->type, map_name,
a4fbfdd7 5160 def->key_size, def->value_size, def->max_entries,
992c4225 5161 &create_attr, is_inner ? -1 : map - obj->maps);
dac645b9
AN
5162 /* We keep pretenting we have valid FD to pass various fd >= 0
5163 * checks by just keeping original placeholder FDs in place.
5164 * See bpf_object__add_map() comment.
5165 * This placeholder fd will not be used with any syscall and
5166 * will be reset to -1 eventually.
67234743 5167 */
dac645b9 5168 map_fd = map->fd;
67234743 5169 } else {
dac645b9
AN
5170 map_fd = bpf_map_create(def->type, map_name,
5171 def->key_size, def->value_size,
5172 def->max_entries, &create_attr);
67234743 5173 }
dac645b9 5174 if (map_fd < 0 && (create_attr.btf_key_type_id || create_attr.btf_value_type_id)) {
2d39d7c5 5175 char *cp, errmsg[STRERR_BUFSIZE];
2d39d7c5 5176
a21ab4c5 5177 err = -errno;
2d39d7c5
AN
5178 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5179 pr_warn("Error in bpf_create_map_xattr(%s):%s(%d). Retrying without BTF.\n",
5180 map->name, cp, err);
5181 create_attr.btf_fd = 0;
5182 create_attr.btf_key_type_id = 0;
5183 create_attr.btf_value_type_id = 0;
5184 map->btf_key_type_id = 0;
5185 map->btf_value_type_id = 0;
dac645b9
AN
5186 map_fd = bpf_map_create(def->type, map_name,
5187 def->key_size, def->value_size,
5188 def->max_entries, &create_attr);
2d39d7c5
AN
5189 }
5190
646f02ff 5191 if (bpf_map_type__is_map_in_map(def->type) && map->inner_map) {
67234743
AS
5192 if (obj->gen_loader)
5193 map->inner_map->fd = -1;
646f02ff
AN
5194 bpf_map__destroy(map->inner_map);
5195 zfree(&map->inner_map);
5196 }
5197
dac645b9
AN
5198 if (map_fd < 0)
5199 return map_fd;
5200
5201 /* obj->gen_loader case, prevent reuse_fd() from closing map_fd */
5202 if (map->fd == map_fd)
5203 return 0;
5204
5205 /* Keep placeholder FD value but now point it to the BPF map object.
5206 * This way everything that relied on this map's FD (e.g., relocated
5207 * ldimm64 instructions) will stay valid and won't need adjustments.
5208 * map->fd stays valid but now point to what map_fd points to.
5209 */
5210 return reuse_fd(map->fd, map_fd);
2d39d7c5
AN
5211}
5212
341ac5ff 5213static int init_map_in_map_slots(struct bpf_object *obj, struct bpf_map *map)
a0f2b7ac
HL
5214{
5215 const struct bpf_map *targ_map;
5216 unsigned int i;
67234743 5217 int fd, err = 0;
a0f2b7ac
HL
5218
5219 for (i = 0; i < map->init_slots_sz; i++) {
5220 if (!map->init_slots[i])
5221 continue;
5222
5223 targ_map = map->init_slots[i];
f08c18e0 5224 fd = targ_map->fd;
341ac5ff 5225
67234743 5226 if (obj->gen_loader) {
be05c944
AS
5227 bpf_gen__populate_outer_map(obj->gen_loader,
5228 map - obj->maps, i,
5229 targ_map - obj->maps);
67234743
AS
5230 } else {
5231 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5232 }
a0f2b7ac
HL
5233 if (err) {
5234 err = -errno;
5235 pr_warn("map '%s': failed to initialize slot [%d] to map '%s' fd=%d: %d\n",
341ac5ff 5236 map->name, i, targ_map->name, fd, err);
a0f2b7ac
HL
5237 return err;
5238 }
5239 pr_debug("map '%s': slot [%d] set to map '%s' fd=%d\n",
5240 map->name, i, targ_map->name, fd);
5241 }
5242
5243 zfree(&map->init_slots);
5244 map->init_slots_sz = 0;
5245
5246 return 0;
5247}
5248
341ac5ff
HC
5249static int init_prog_array_slots(struct bpf_object *obj, struct bpf_map *map)
5250{
5251 const struct bpf_program *targ_prog;
5252 unsigned int i;
5253 int fd, err;
5254
5255 if (obj->gen_loader)
5256 return -ENOTSUP;
5257
5258 for (i = 0; i < map->init_slots_sz; i++) {
5259 if (!map->init_slots[i])
5260 continue;
5261
5262 targ_prog = map->init_slots[i];
5263 fd = bpf_program__fd(targ_prog);
5264
5265 err = bpf_map_update_elem(map->fd, &i, &fd, 0);
5266 if (err) {
5267 err = -errno;
5268 pr_warn("map '%s': failed to initialize slot [%d] to prog '%s' fd=%d: %d\n",
5269 map->name, i, targ_prog->name, fd, err);
5270 return err;
5271 }
5272 pr_debug("map '%s': slot [%d] set to prog '%s' fd=%d\n",
5273 map->name, i, targ_prog->name, fd);
5274 }
5275
5276 zfree(&map->init_slots);
5277 map->init_slots_sz = 0;
5278
5279 return 0;
5280}
5281
5282static int bpf_object_init_prog_arrays(struct bpf_object *obj)
5283{
5284 struct bpf_map *map;
5285 int i, err;
5286
5287 for (i = 0; i < obj->nr_maps; i++) {
5288 map = &obj->maps[i];
5289
5290 if (!map->init_slots_sz || map->def.type != BPF_MAP_TYPE_PROG_ARRAY)
5291 continue;
5292
5293 err = init_prog_array_slots(obj, map);
dac645b9 5294 if (err < 0)
341ac5ff 5295 return err;
341ac5ff
HC
5296 }
5297 return 0;
5298}
5299
a4fbfdd7
ST
5300static int map_set_def_max_entries(struct bpf_map *map)
5301{
5302 if (map->def.type == BPF_MAP_TYPE_PERF_EVENT_ARRAY && !map->def.max_entries) {
5303 int nr_cpus;
5304
5305 nr_cpus = libbpf_num_possible_cpus();
5306 if (nr_cpus < 0) {
5307 pr_warn("map '%s': failed to determine number of system CPUs: %d\n",
5308 map->name, nr_cpus);
5309 return nr_cpus;
5310 }
5311 pr_debug("map '%s': setting size to %d\n", map->name, nr_cpus);
5312 map->def.max_entries = nr_cpus;
5313 }
5314
5315 return 0;
5316}
5317
52d3352e
WN
5318static int
5319bpf_object__create_maps(struct bpf_object *obj)
5320{
2d39d7c5
AN
5321 struct bpf_map *map;
5322 char *cp, errmsg[STRERR_BUFSIZE];
5323 unsigned int i, j;
8a138aed 5324 int err;
043c5bb3 5325 bool retried;
52d3352e 5326
9d759a9b 5327 for (i = 0; i < obj->nr_maps; i++) {
2d39d7c5 5328 map = &obj->maps[i];
8a138aed 5329
16e0c35c
AN
5330 /* To support old kernels, we skip creating global data maps
5331 * (.rodata, .data, .kconfig, etc); later on, during program
5332 * loading, if we detect that at least one of the to-be-loaded
5333 * programs is referencing any global data map, we'll error
5334 * out with program name and relocation index logged.
5335 * This approach allows to accommodate Clang emitting
5336 * unnecessary .rodata.str1.1 sections for string literals,
5337 * but also it allows to have CO-RE applications that use
5338 * global variables in some of BPF programs, but not others.
5339 * If those global variable-using programs are not loaded at
5340 * runtime due to bpf_program__set_autoload(prog, false),
5341 * bpf_object loading will succeed just fine even on old
5342 * kernels.
5343 */
ec41817b
AN
5344 if (bpf_map__is_internal(map) && !kernel_supports(obj, FEAT_GLOBAL_DATA))
5345 map->autocreate = false;
5346
5347 if (!map->autocreate) {
5348 pr_debug("map '%s': skipped auto-creating...\n", map->name);
16e0c35c 5349 continue;
229fae38 5350 }
16e0c35c 5351
a4fbfdd7
ST
5352 err = map_set_def_max_entries(map);
5353 if (err)
5354 goto err_out;
5355
043c5bb3
MP
5356 retried = false;
5357retry:
57a00f41
THJ
5358 if (map->pin_path) {
5359 err = bpf_object__reuse_map(map);
5360 if (err) {
2d39d7c5 5361 pr_warn("map '%s': error reusing pinned map\n",
57a00f41 5362 map->name);
2d39d7c5 5363 goto err_out;
57a00f41 5364 }
043c5bb3
MP
5365 if (retried && map->fd < 0) {
5366 pr_warn("map '%s': cannot find pinned map\n",
5367 map->name);
5368 err = -ENOENT;
5369 goto err_out;
5370 }
57a00f41
THJ
5371 }
5372
fa98b54b 5373 if (map->reused) {
2d39d7c5 5374 pr_debug("map '%s': skipping creation (preset fd=%d)\n",
26736eb9 5375 map->name, map->fd);
2c193d32 5376 } else {
67234743 5377 err = bpf_object__create_map(obj, map, false);
2c193d32 5378 if (err)
d859900c 5379 goto err_out;
d859900c 5380
2c193d32
HL
5381 pr_debug("map '%s': created successfully, fd=%d\n",
5382 map->name, map->fd);
646f02ff 5383
2c193d32
HL
5384 if (bpf_map__is_internal(map)) {
5385 err = bpf_object__populate_internal_map(obj, map);
dac645b9 5386 if (err < 0)
2c193d32 5387 goto err_out;
d859900c 5388 }
79ff13e9 5389 if (map->def.type == BPF_MAP_TYPE_ARENA) {
5ab8cb89
AN
5390 map->mmaped = mmap((void *)(long)map->map_extra,
5391 bpf_map_mmap_sz(map), PROT_READ | PROT_WRITE,
79ff13e9
AS
5392 map->map_extra ? MAP_SHARED | MAP_FIXED : MAP_SHARED,
5393 map->fd, 0);
5394 if (map->mmaped == MAP_FAILED) {
5395 err = -errno;
5396 map->mmaped = NULL;
5397 pr_warn("map '%s': failed to mmap arena: %d\n",
5398 map->name, err);
5399 return err;
5400 }
2e7ba4f8
AN
5401 if (obj->arena_data) {
5402 memcpy(map->mmaped, obj->arena_data, obj->arena_data_sz);
5403 zfree(&obj->arena_data);
5404 }
79ff13e9 5405 }
341ac5ff
HC
5406 if (map->init_slots_sz && map->def.type != BPF_MAP_TYPE_PROG_ARRAY) {
5407 err = init_map_in_map_slots(obj, map);
dac645b9 5408 if (err < 0)
646f02ff 5409 goto err_out;
646f02ff 5410 }
646f02ff
AN
5411 }
5412
57a00f41
THJ
5413 if (map->pin_path && !map->pinned) {
5414 err = bpf_map__pin(map, NULL);
5415 if (err) {
043c5bb3
MP
5416 if (!retried && err == -EEXIST) {
5417 retried = true;
5418 goto retry;
5419 }
2d39d7c5
AN
5420 pr_warn("map '%s': failed to auto-pin at '%s': %d\n",
5421 map->name, map->pin_path, err);
2d39d7c5 5422 goto err_out;
57a00f41
THJ
5423 }
5424 }
52d3352e
WN
5425 }
5426
52d3352e 5427 return 0;
2d39d7c5
AN
5428
5429err_out:
5430 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
5431 pr_warn("map '%s': failed to create: %s(%d)\n", map->name, cp, err);
5432 pr_perm_msg(err);
5433 for (j = 0; j < i; j++)
5434 zclose(obj->maps[j].fd);
5435 return err;
52d3352e
WN
5436}
5437
ddc7c304
AN
5438static bool bpf_core_is_flavor_sep(const char *s)
5439{
5440 /* check X___Y name pattern, where X and Y are not underscores */
5441 return s[0] != '_' && /* X */
5442 s[1] == '_' && s[2] == '_' && s[3] == '_' && /* ___ */
5443 s[4] != '_'; /* Y */
5444}
5445
5446/* Given 'some_struct_name___with_flavor' return the length of a name prefix
5447 * before last triple underscore. Struct name part after last triple
5448 * underscore is ignored by BPF CO-RE relocation during relocation matching.
5449 */
b0588390 5450size_t bpf_core_essential_name_len(const char *name)
ddc7c304
AN
5451{
5452 size_t n = strlen(name);
5453 int i;
5454
5455 for (i = n - 5; i >= 0; i--) {
5456 if (bpf_core_is_flavor_sep(name + i))
5457 return i + 1;
5458 }
5459 return n;
5460}
5461
8de6cae4 5462void bpf_core_free_cands(struct bpf_core_cand_list *cands)
ddc7c304 5463{
8de6cae4
MV
5464 if (!cands)
5465 return;
5466
0f7515ca
AN
5467 free(cands->cands);
5468 free(cands);
ddc7c304
AN
5469}
5470
8de6cae4
MV
5471int bpf_core_add_cands(struct bpf_core_cand *local_cand,
5472 size_t local_essent_len,
5473 const struct btf *targ_btf,
5474 const char *targ_btf_name,
5475 int targ_start_id,
5476 struct bpf_core_cand_list *cands)
ddc7c304 5477{
301ba4d7 5478 struct bpf_core_cand *new_cands, *cand;
03d5b991
AN
5479 const struct btf_type *t, *local_t;
5480 const char *targ_name, *local_name;
0f7515ca
AN
5481 size_t targ_essent_len;
5482 int n, i;
ddc7c304 5483
03d5b991
AN
5484 local_t = btf__type_by_id(local_cand->btf, local_cand->id);
5485 local_name = btf__str_by_offset(local_cand->btf, local_t->name_off);
5486
6a886de0
HC
5487 n = btf__type_cnt(targ_btf);
5488 for (i = targ_start_id; i < n; i++) {
ddc7c304 5489 t = btf__type_by_id(targ_btf, i);
23b2a3a8 5490 if (!btf_kind_core_compat(t, local_t))
ddc7c304
AN
5491 continue;
5492
3fc32f40
AN
5493 targ_name = btf__name_by_offset(targ_btf, t->name_off);
5494 if (str_is_empty(targ_name))
d121e1d3
AN
5495 continue;
5496
ddc7c304
AN
5497 targ_essent_len = bpf_core_essential_name_len(targ_name);
5498 if (targ_essent_len != local_essent_len)
5499 continue;
5500
03d5b991 5501 if (strncmp(local_name, targ_name, local_essent_len) != 0)
0f7515ca
AN
5502 continue;
5503
5504 pr_debug("CO-RE relocating [%d] %s %s: found target candidate [%d] %s %s in [%s]\n",
03d5b991
AN
5505 local_cand->id, btf_kind_str(local_t),
5506 local_name, i, btf_kind_str(t), targ_name,
0f7515ca
AN
5507 targ_btf_name);
5508 new_cands = libbpf_reallocarray(cands->cands, cands->len + 1,
5509 sizeof(*cands->cands));
5510 if (!new_cands)
5511 return -ENOMEM;
5512
5513 cand = &new_cands[cands->len];
5514 cand->btf = targ_btf;
0f7515ca
AN
5515 cand->id = i;
5516
5517 cands->cands = new_cands;
5518 cands->len++;
ddc7c304 5519 }
0f7515ca
AN
5520 return 0;
5521}
5522
4f33a53d
AN
5523static int load_module_btfs(struct bpf_object *obj)
5524{
5525 struct bpf_btf_info info;
5526 struct module_btf *mod_btf;
5527 struct btf *btf;
5528 char name[64];
5529 __u32 id = 0, len;
5530 int err, fd;
5531
5532 if (obj->btf_modules_loaded)
5533 return 0;
5534
67234743
AS
5535 if (obj->gen_loader)
5536 return 0;
5537
4f33a53d
AN
5538 /* don't do this again, even if we find no module BTFs */
5539 obj->btf_modules_loaded = true;
5540
5541 /* kernel too old to support module BTFs */
9ca1f56a 5542 if (!kernel_supports(obj, FEAT_MODULE_BTF))
4f33a53d
AN
5543 return 0;
5544
5545 while (true) {
5546 err = bpf_btf_get_next_id(id, &id);
5547 if (err && errno == ENOENT)
5548 return 0;
2d2c9516
AT
5549 if (err && errno == EPERM) {
5550 pr_debug("skipping module BTFs loading, missing privileges\n");
5551 return 0;
5552 }
4f33a53d
AN
5553 if (err) {
5554 err = -errno;
5555 pr_warn("failed to iterate BTF objects: %d\n", err);
5556 return err;
ddc7c304 5557 }
4f33a53d
AN
5558
5559 fd = bpf_btf_get_fd_by_id(id);
5560 if (fd < 0) {
5561 if (errno == ENOENT)
5562 continue; /* expected race: BTF was unloaded */
5563 err = -errno;
5564 pr_warn("failed to get BTF object #%d FD: %d\n", id, err);
5565 return err;
5566 }
5567
5568 len = sizeof(info);
5569 memset(&info, 0, sizeof(info));
5570 info.name = ptr_to_u64(name);
5571 info.name_len = sizeof(name);
5572
629dfc66 5573 err = bpf_btf_get_info_by_fd(fd, &info, &len);
4f33a53d
AN
5574 if (err) {
5575 err = -errno;
5576 pr_warn("failed to get BTF object #%d info: %d\n", id, err);
91abb4a6 5577 goto err_out;
4f33a53d
AN
5578 }
5579
5580 /* ignore non-module BTFs */
5581 if (!info.kernel_btf || strcmp(name, "vmlinux") == 0) {
5582 close(fd);
5583 continue;
5584 }
5585
5586 btf = btf_get_from_fd(fd, obj->btf_vmlinux);
e9fc3ce9
AN
5587 err = libbpf_get_error(btf);
5588 if (err) {
5589 pr_warn("failed to load module [%s]'s BTF object #%d: %d\n",
5590 name, id, err);
91abb4a6 5591 goto err_out;
4f33a53d
AN
5592 }
5593
3b029e06 5594 err = libbpf_ensure_mem((void **)&obj->btf_modules, &obj->btf_module_cap,
e3ba8e4e 5595 sizeof(*obj->btf_modules), obj->btf_module_cnt + 1);
4f33a53d 5596 if (err)
91abb4a6 5597 goto err_out;
4f33a53d
AN
5598
5599 mod_btf = &obj->btf_modules[obj->btf_module_cnt++];
5600
5601 mod_btf->btf = btf;
5602 mod_btf->id = id;
91abb4a6 5603 mod_btf->fd = fd;
4f33a53d 5604 mod_btf->name = strdup(name);
91abb4a6
AN
5605 if (!mod_btf->name) {
5606 err = -ENOMEM;
5607 goto err_out;
5608 }
5609 continue;
5610
5611err_out:
5612 close(fd);
5613 return err;
ddc7c304 5614 }
4f33a53d
AN
5615
5616 return 0;
5617}
5618
301ba4d7 5619static struct bpf_core_cand_list *
0f7515ca
AN
5620bpf_core_find_cands(struct bpf_object *obj, const struct btf *local_btf, __u32 local_type_id)
5621{
301ba4d7
AS
5622 struct bpf_core_cand local_cand = {};
5623 struct bpf_core_cand_list *cands;
4f33a53d 5624 const struct btf *main_btf;
03d5b991
AN
5625 const struct btf_type *local_t;
5626 const char *local_name;
0f7515ca 5627 size_t local_essent_len;
4f33a53d 5628 int err, i;
0f7515ca
AN
5629
5630 local_cand.btf = local_btf;
03d5b991
AN
5631 local_cand.id = local_type_id;
5632 local_t = btf__type_by_id(local_btf, local_type_id);
5633 if (!local_t)
0f7515ca
AN
5634 return ERR_PTR(-EINVAL);
5635
03d5b991
AN
5636 local_name = btf__name_by_offset(local_btf, local_t->name_off);
5637 if (str_is_empty(local_name))
0f7515ca 5638 return ERR_PTR(-EINVAL);
03d5b991 5639 local_essent_len = bpf_core_essential_name_len(local_name);
0f7515ca
AN
5640
5641 cands = calloc(1, sizeof(*cands));
5642 if (!cands)
5643 return ERR_PTR(-ENOMEM);
5644
5645 /* Attempt to find target candidates in vmlinux BTF first */
4f33a53d
AN
5646 main_btf = obj->btf_vmlinux_override ?: obj->btf_vmlinux;
5647 err = bpf_core_add_cands(&local_cand, local_essent_len, main_btf, "vmlinux", 1, cands);
5648 if (err)
5649 goto err_out;
5650
5651 /* if vmlinux BTF has any candidate, don't got for module BTFs */
5652 if (cands->len)
5653 return cands;
5654
5655 /* if vmlinux BTF was overridden, don't attempt to load module BTFs */
5656 if (obj->btf_vmlinux_override)
5657 return cands;
5658
5659 /* now look through module BTFs, trying to still find candidates */
5660 err = load_module_btfs(obj);
5661 if (err)
5662 goto err_out;
5663
5664 for (i = 0; i < obj->btf_module_cnt; i++) {
5665 err = bpf_core_add_cands(&local_cand, local_essent_len,
5666 obj->btf_modules[i].btf,
5667 obj->btf_modules[i].name,
6a886de0 5668 btf__type_cnt(obj->btf_vmlinux),
4f33a53d
AN
5669 cands);
5670 if (err)
5671 goto err_out;
0f7515ca
AN
5672 }
5673
5674 return cands;
ddc7c304 5675err_out:
4f33a53d 5676 bpf_core_free_cands(cands);
ddc7c304
AN
5677 return ERR_PTR(err);
5678}
5679
3fc32f40
AN
5680/* Check local and target types for compatibility. This check is used for
5681 * type-based CO-RE relocations and follow slightly different rules than
5682 * field-based relocations. This function assumes that root types were already
5683 * checked for name match. Beyond that initial root-level name check, names
5684 * are completely ignored. Compatibility rules are as follows:
5685 * - any two STRUCTs/UNIONs/FWDs/ENUMs/INTs are considered compatible, but
5686 * kind should match for local and target types (i.e., STRUCT is not
5687 * compatible with UNION);
5688 * - for ENUMs, the size is ignored;
5689 * - for INT, size and signedness are ignored;
5690 * - for ARRAY, dimensionality is ignored, element types are checked for
5691 * compatibility recursively;
5692 * - CONST/VOLATILE/RESTRICT modifiers are ignored;
5693 * - TYPEDEFs/PTRs are compatible if types they pointing to are compatible;
5694 * - FUNC_PROTOs are compatible if they have compatible signature: same
5695 * number of input args and compatible return and argument types.
5696 * These rules are not set in stone and probably will be adjusted as we get
5697 * more experience with using BPF CO-RE relocations.
5698 */
b0588390
AS
5699int bpf_core_types_are_compat(const struct btf *local_btf, __u32 local_id,
5700 const struct btf *targ_btf, __u32 targ_id)
3fc32f40 5701{
fd75733d 5702 return __bpf_core_types_are_compat(local_btf, local_id, targ_btf, targ_id, 32);
3fc32f40
AN
5703}
5704
ec6209c8
DM
5705int bpf_core_types_match(const struct btf *local_btf, __u32 local_id,
5706 const struct btf *targ_btf, __u32 targ_id)
5707{
5708 return __bpf_core_types_match(local_btf, local_id, targ_btf, targ_id, false, 32);
5709}
5710
c302378b 5711static size_t bpf_core_hash_fn(const long key, void *ctx)
ddc7c304 5712{
c302378b 5713 return key;
ddc7c304
AN
5714}
5715
c302378b 5716static bool bpf_core_equal_fn(const long k1, const long k2, void *ctx)
ddc7c304
AN
5717{
5718 return k1 == k2;
5719}
5720
d0e92887
AS
5721static int record_relo_core(struct bpf_program *prog,
5722 const struct bpf_core_relo *core_relo, int insn_idx)
5723{
5724 struct reloc_desc *relos, *relo;
5725
5726 relos = libbpf_reallocarray(prog->reloc_desc,
5727 prog->nr_reloc + 1, sizeof(*relos));
5728 if (!relos)
5729 return -ENOMEM;
5730 relo = &relos[prog->nr_reloc];
5731 relo->type = RELO_CORE;
5732 relo->insn_idx = insn_idx;
5733 relo->core_relo = core_relo;
5734 prog->reloc_desc = relos;
5735 prog->nr_reloc++;
5736 return 0;
5737}
5738
9fdc4273
AN
5739static const struct bpf_core_relo *find_relo_core(struct bpf_program *prog, int insn_idx)
5740{
5741 struct reloc_desc *relo;
5742 int i;
5743
5744 for (i = 0; i < prog->nr_reloc; i++) {
5745 relo = &prog->reloc_desc[i];
5746 if (relo->type != RELO_CORE || relo->insn_idx != insn_idx)
5747 continue;
5748
5749 return relo->core_relo;
5750 }
5751
5752 return NULL;
5753}
5754
adb8fa19
MV
5755static int bpf_core_resolve_relo(struct bpf_program *prog,
5756 const struct bpf_core_relo *relo,
5757 int relo_idx,
5758 const struct btf *local_btf,
5759 struct hashmap *cand_cache,
5760 struct bpf_core_relo_res *targ_res)
3ee4f533 5761{
78c1f8d0 5762 struct bpf_core_spec specs_scratch[3] = {};
301ba4d7 5763 struct bpf_core_cand_list *cands = NULL;
3ee4f533
AS
5764 const char *prog_name = prog->name;
5765 const struct btf_type *local_type;
5766 const char *local_name;
5767 __u32 local_id = relo->type_id;
adb8fa19 5768 int err;
3ee4f533
AS
5769
5770 local_type = btf__type_by_id(local_btf, local_id);
5771 if (!local_type)
5772 return -EINVAL;
5773
5774 local_name = btf__name_by_offset(local_btf, local_type->name_off);
5775 if (!local_name)
5776 return -EINVAL;
5777
46334a0c 5778 if (relo->kind != BPF_CORE_TYPE_ID_LOCAL &&
c302378b 5779 !hashmap__find(cand_cache, local_id, &cands)) {
3ee4f533
AS
5780 cands = bpf_core_find_cands(prog->obj, local_btf, local_id);
5781 if (IS_ERR(cands)) {
5782 pr_warn("prog '%s': relo #%d: target candidate search failed for [%d] %s %s: %ld\n",
5783 prog_name, relo_idx, local_id, btf_kind_str(local_type),
5784 local_name, PTR_ERR(cands));
5785 return PTR_ERR(cands);
5786 }
c302378b 5787 err = hashmap__set(cand_cache, local_id, cands, NULL, NULL);
3ee4f533
AS
5788 if (err) {
5789 bpf_core_free_cands(cands);
5790 return err;
5791 }
5792 }
5793
adb8fa19
MV
5794 return bpf_core_calc_relo_insn(prog_name, relo, relo_idx, local_btf, cands, specs_scratch,
5795 targ_res);
3ee4f533
AS
5796}
5797
ddc7c304 5798static int
28b93c64 5799bpf_object__relocate_core(struct bpf_object *obj, const char *targ_btf_path)
ddc7c304
AN
5800{
5801 const struct btf_ext_info_sec *sec;
adb8fa19 5802 struct bpf_core_relo_res targ_res;
28b93c64 5803 const struct bpf_core_relo *rec;
ddc7c304
AN
5804 const struct btf_ext_info *seg;
5805 struct hashmap_entry *entry;
5806 struct hashmap *cand_cache = NULL;
5807 struct bpf_program *prog;
adb8fa19 5808 struct bpf_insn *insn;
ddc7c304 5809 const char *sec_name;
11d5daa8 5810 int i, err = 0, insn_idx, sec_idx, sec_num;
ddc7c304 5811
28b93c64
AN
5812 if (obj->btf_ext->core_relo_info.len == 0)
5813 return 0;
5814
0f7515ca
AN
5815 if (targ_btf_path) {
5816 obj->btf_vmlinux_override = btf__parse(targ_btf_path, NULL);
e9fc3ce9
AN
5817 err = libbpf_get_error(obj->btf_vmlinux_override);
5818 if (err) {
0f7515ca
AN
5819 pr_warn("failed to parse target BTF: %d\n", err);
5820 return err;
5821 }
ddc7c304
AN
5822 }
5823
5824 cand_cache = hashmap__new(bpf_core_hash_fn, bpf_core_equal_fn, NULL);
5825 if (IS_ERR(cand_cache)) {
5826 err = PTR_ERR(cand_cache);
5827 goto out;
5828 }
5829
28b93c64 5830 seg = &obj->btf_ext->core_relo_info;
11d5daa8 5831 sec_num = 0;
ddc7c304 5832 for_each_btf_ext_sec(seg, sec) {
11d5daa8
AN
5833 sec_idx = seg->sec_idxs[sec_num];
5834 sec_num++;
5835
ddc7c304
AN
5836 sec_name = btf__name_by_offset(obj->btf, sec->sec_name_off);
5837 if (str_is_empty(sec_name)) {
5838 err = -EINVAL;
5839 goto out;
5840 }
ddc7c304 5841
11d5daa8 5842 pr_debug("sec '%s': found %d CO-RE relocations\n", sec_name, sec->num_info);
ddc7c304
AN
5843
5844 for_each_btf_ext_rec(seg, sec, i, rec) {
adb8fa19
MV
5845 if (rec->insn_off % BPF_INSN_SZ)
5846 return -EINVAL;
db2b8b06
AN
5847 insn_idx = rec->insn_off / BPF_INSN_SZ;
5848 prog = find_prog_by_sec_insn(obj, sec_idx, insn_idx);
5849 if (!prog) {
e89d57d9
AN
5850 /* When __weak subprog is "overridden" by another instance
5851 * of the subprog from a different object file, linker still
5852 * appends all the .BTF.ext info that used to belong to that
5853 * eliminated subprogram.
5854 * This is similar to what x86-64 linker does for relocations.
5855 * So just ignore such relocations just like we ignore
5856 * subprog instructions when discovering subprograms.
5857 */
5858 pr_debug("sec '%s': skipping CO-RE relocation #%d for insn #%d belonging to eliminated weak subprogram\n",
5859 sec_name, i, insn_idx);
5860 continue;
db2b8b06 5861 }
47f7cf63
AN
5862 /* no need to apply CO-RE relocation if the program is
5863 * not going to be loaded
5864 */
a3820c48 5865 if (!prog->autoload)
47f7cf63 5866 continue;
db2b8b06 5867
adb8fa19
MV
5868 /* adjust insn_idx from section frame of reference to the local
5869 * program's frame of reference; (sub-)program code is not yet
5870 * relocated, so it's enough to just subtract in-section offset
5871 */
5872 insn_idx = insn_idx - prog->sec_insn_off;
5873 if (insn_idx >= prog->insns_cnt)
5874 return -EINVAL;
5875 insn = &prog->insns[insn_idx];
5876
185cfe83
AN
5877 err = record_relo_core(prog, rec, insn_idx);
5878 if (err) {
5879 pr_warn("prog '%s': relo #%d: failed to record relocation: %d\n",
5880 prog->name, i, err);
5881 goto out;
adb8fa19
MV
5882 }
5883
185cfe83
AN
5884 if (prog->obj->gen_loader)
5885 continue;
5886
adb8fa19 5887 err = bpf_core_resolve_relo(prog, rec, i, obj->btf, cand_cache, &targ_res);
ddc7c304 5888 if (err) {
be18010e 5889 pr_warn("prog '%s': relo #%d: failed to relocate: %d\n",
9c0f8cbd 5890 prog->name, i, err);
ddc7c304
AN
5891 goto out;
5892 }
adb8fa19
MV
5893
5894 err = bpf_core_patch_insn(prog->name, insn, insn_idx, rec, i, &targ_res);
5895 if (err) {
5896 pr_warn("prog '%s': relo #%d: failed to patch insn #%u: %d\n",
5897 prog->name, i, insn_idx, err);
5898 goto out;
5899 }
ddc7c304
AN
5900 }
5901 }
5902
5903out:
4f33a53d 5904 /* obj->btf_vmlinux and module BTFs are freed after object load */
0f7515ca
AN
5905 btf__free(obj->btf_vmlinux_override);
5906 obj->btf_vmlinux_override = NULL;
5907
ddc7c304
AN
5908 if (!IS_ERR_OR_NULL(cand_cache)) {
5909 hashmap__for_each_entry(cand_cache, entry, i) {
c302378b 5910 bpf_core_free_cands(entry->pvalue);
ddc7c304
AN
5911 }
5912 hashmap__free(cand_cache);
5913 }
5914 return err;
5915}
5916
ec41817b 5917/* base map load ldimm64 special constant, used also for log fixup logic */
3055ddd6
AN
5918#define POISON_LDIMM64_MAP_BASE 2001000000
5919#define POISON_LDIMM64_MAP_PFX "200100"
ec41817b
AN
5920
5921static void poison_map_ldimm64(struct bpf_program *prog, int relo_idx,
5922 int insn_idx, struct bpf_insn *insn,
5923 int map_idx, const struct bpf_map *map)
5924{
5925 int i;
5926
5927 pr_debug("prog '%s': relo #%d: poisoning insn #%d that loads map #%d '%s'\n",
5928 prog->name, relo_idx, insn_idx, map_idx, map->name);
5929
5930 /* we turn single ldimm64 into two identical invalid calls */
5931 for (i = 0; i < 2; i++) {
5932 insn->code = BPF_JMP | BPF_CALL;
5933 insn->dst_reg = 0;
5934 insn->src_reg = 0;
5935 insn->off = 0;
5936 /* if this instruction is reachable (not a dead code),
5937 * verifier will complain with something like:
5938 * invalid func unknown#2001000123
5939 * where lower 123 is map index into obj->maps[] array
5940 */
3055ddd6 5941 insn->imm = POISON_LDIMM64_MAP_BASE + map_idx;
ec41817b
AN
5942
5943 insn++;
5944 }
5945}
5946
05b6f766
AN
5947/* unresolved kfunc call special constant, used also for log fixup logic */
5948#define POISON_CALL_KFUNC_BASE 2002000000
5949#define POISON_CALL_KFUNC_PFX "2002"
5950
5951static void poison_kfunc_call(struct bpf_program *prog, int relo_idx,
5952 int insn_idx, struct bpf_insn *insn,
5953 int ext_idx, const struct extern_desc *ext)
5954{
5955 pr_debug("prog '%s': relo #%d: poisoning insn #%d that calls kfunc '%s'\n",
5956 prog->name, relo_idx, insn_idx, ext->name);
5957
5958 /* we turn kfunc call into invalid helper call with identifiable constant */
5959 insn->code = BPF_JMP | BPF_CALL;
5960 insn->dst_reg = 0;
5961 insn->src_reg = 0;
5962 insn->off = 0;
5963 /* if this instruction is reachable (not a dead code),
5964 * verifier will complain with something like:
5965 * invalid func unknown#2001000123
5966 * where lower 123 is extern index into obj->externs[] array
5967 */
5968 insn->imm = POISON_CALL_KFUNC_BASE + ext_idx;
5969}
5970
c3c55696
AN
5971/* Relocate data references within program code:
5972 * - map references;
5973 * - global variable references;
5974 * - extern references.
5975 */
48cca7e4 5976static int
c3c55696 5977bpf_object__relocate_data(struct bpf_object *obj, struct bpf_program *prog)
8a47a6c5 5978{
c3c55696 5979 int i;
8a47a6c5
WN
5980
5981 for (i = 0; i < prog->nr_reloc; i++) {
53f8dd43 5982 struct reloc_desc *relo = &prog->reloc_desc[i];
166750bc 5983 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
ec41817b 5984 const struct bpf_map *map;
2e33efe3 5985 struct extern_desc *ext;
8a47a6c5 5986
166750bc
AN
5987 switch (relo->type) {
5988 case RELO_LD64:
ec41817b 5989 map = &obj->maps[relo->map_idx];
e2fa0156
AS
5990 if (obj->gen_loader) {
5991 insn[0].src_reg = BPF_PSEUDO_MAP_IDX;
5992 insn[0].imm = relo->map_idx;
ec41817b 5993 } else if (map->autocreate) {
e2fa0156 5994 insn[0].src_reg = BPF_PSEUDO_MAP_FD;
ec41817b
AN
5995 insn[0].imm = map->fd;
5996 } else {
5997 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
5998 relo->map_idx, map);
e2fa0156 5999 }
166750bc
AN
6000 break;
6001 case RELO_DATA:
ec41817b 6002 map = &obj->maps[relo->map_idx];
166750bc 6003 insn[1].imm = insn[0].imm + relo->sym_off;
e2fa0156
AS
6004 if (obj->gen_loader) {
6005 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6006 insn[0].imm = relo->map_idx;
ec41817b 6007 } else if (map->autocreate) {
e2fa0156 6008 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
ec41817b
AN
6009 insn[0].imm = map->fd;
6010 } else {
6011 poison_map_ldimm64(prog, i, relo->insn_idx, insn,
6012 relo->map_idx, map);
e2fa0156 6013 }
166750bc 6014 break;
a18f7214 6015 case RELO_EXTERN_LD64:
3055ddd6 6016 ext = &obj->externs[relo->ext_idx];
1c0c7074 6017 if (ext->type == EXT_KCFG) {
e2fa0156
AS
6018 if (obj->gen_loader) {
6019 insn[0].src_reg = BPF_PSEUDO_MAP_IDX_VALUE;
6020 insn[0].imm = obj->kconfig_map_idx;
6021 } else {
6022 insn[0].src_reg = BPF_PSEUDO_MAP_VALUE;
6023 insn[0].imm = obj->maps[obj->kconfig_map_idx].fd;
6024 }
1c0c7074
AN
6025 insn[1].imm = ext->kcfg.data_off;
6026 } else /* EXT_KSYM */ {
2211c825 6027 if (ext->ksym.type_id && ext->is_set) { /* typed ksyms */
d370bbe1 6028 insn[0].src_reg = BPF_PSEUDO_BTF_ID;
284d2587
AN
6029 insn[0].imm = ext->ksym.kernel_btf_id;
6030 insn[1].imm = ext->ksym.kernel_btf_obj_fd;
2211c825 6031 } else { /* typeless ksyms or unresolved typed ksyms */
d370bbe1
HL
6032 insn[0].imm = (__u32)ext->ksym.addr;
6033 insn[1].imm = ext->ksym.addr >> 32;
6034 }
1c0c7074 6035 }
166750bc 6036 break;
a18f7214 6037 case RELO_EXTERN_CALL:
3055ddd6 6038 ext = &obj->externs[relo->ext_idx];
5bd022ec 6039 insn[0].src_reg = BPF_PSEUDO_KFUNC_CALL;
466b2e13
KKD
6040 if (ext->is_set) {
6041 insn[0].imm = ext->ksym.kernel_btf_id;
6042 insn[0].off = ext->ksym.btf_fd_idx;
05b6f766
AN
6043 } else { /* unresolved weak kfunc call */
6044 poison_kfunc_call(prog, i, relo->insn_idx, insn,
6045 relo->ext_idx, ext);
466b2e13 6046 }
5bd022ec 6047 break;
53eddb5e 6048 case RELO_SUBPROG_ADDR:
b1268826
AS
6049 if (insn[0].src_reg != BPF_PSEUDO_FUNC) {
6050 pr_warn("prog '%s': relo #%d: bad insn\n",
6051 prog->name, i);
6052 return -EINVAL;
6053 }
6054 /* handled already */
53eddb5e 6055 break;
166750bc 6056 case RELO_CALL:
b1268826 6057 /* handled already */
166750bc 6058 break;
d0e92887
AS
6059 case RELO_CORE:
6060 /* will be handled by bpf_program_record_relos() */
6061 break;
166750bc 6062 default:
9c0f8cbd
AN
6063 pr_warn("prog '%s': relo #%d: bad relo type %d\n",
6064 prog->name, i, relo->type);
166750bc 6065 return -EINVAL;
8a47a6c5 6066 }
8a47a6c5
WN
6067 }
6068
c3c55696
AN
6069 return 0;
6070}
6071
8505e870
AN
6072static int adjust_prog_btf_ext_info(const struct bpf_object *obj,
6073 const struct bpf_program *prog,
6074 const struct btf_ext_info *ext_info,
6075 void **prog_info, __u32 *prog_rec_cnt,
6076 __u32 *prog_rec_sz)
6077{
6078 void *copy_start = NULL, *copy_end = NULL;
6079 void *rec, *rec_end, *new_prog_info;
6080 const struct btf_ext_info_sec *sec;
6081 size_t old_sz, new_sz;
11d5daa8 6082 int i, sec_num, sec_idx, off_adj;
8505e870 6083
11d5daa8 6084 sec_num = 0;
8505e870 6085 for_each_btf_ext_sec(ext_info, sec) {
11d5daa8
AN
6086 sec_idx = ext_info->sec_idxs[sec_num];
6087 sec_num++;
6088 if (prog->sec_idx != sec_idx)
8505e870
AN
6089 continue;
6090
6091 for_each_btf_ext_rec(ext_info, sec, i, rec) {
6092 __u32 insn_off = *(__u32 *)rec / BPF_INSN_SZ;
6093
6094 if (insn_off < prog->sec_insn_off)
6095 continue;
6096 if (insn_off >= prog->sec_insn_off + prog->sec_insn_cnt)
6097 break;
6098
6099 if (!copy_start)
6100 copy_start = rec;
6101 copy_end = rec + ext_info->rec_size;
6102 }
6103
6104 if (!copy_start)
6105 return -ENOENT;
6106
6107 /* append func/line info of a given (sub-)program to the main
6108 * program func/line info
6109 */
8eb62958 6110 old_sz = (size_t)(*prog_rec_cnt) * ext_info->rec_size;
8505e870
AN
6111 new_sz = old_sz + (copy_end - copy_start);
6112 new_prog_info = realloc(*prog_info, new_sz);
6113 if (!new_prog_info)
6114 return -ENOMEM;
6115 *prog_info = new_prog_info;
6116 *prog_rec_cnt = new_sz / ext_info->rec_size;
6117 memcpy(new_prog_info + old_sz, copy_start, copy_end - copy_start);
6118
6119 /* Kernel instruction offsets are in units of 8-byte
6120 * instructions, while .BTF.ext instruction offsets generated
6121 * by Clang are in units of bytes. So convert Clang offsets
6122 * into kernel offsets and adjust offset according to program
6123 * relocated position.
6124 */
6125 off_adj = prog->sub_insn_off - prog->sec_insn_off;
6126 rec = new_prog_info + old_sz;
6127 rec_end = new_prog_info + new_sz;
6128 for (; rec < rec_end; rec += ext_info->rec_size) {
6129 __u32 *insn_off = rec;
6130
6131 *insn_off = *insn_off / BPF_INSN_SZ + off_adj;
6132 }
6133 *prog_rec_sz = ext_info->rec_size;
6134 return 0;
6135 }
6136
6137 return -ENOENT;
6138}
6139
6140static int
6141reloc_prog_func_and_line_info(const struct bpf_object *obj,
6142 struct bpf_program *main_prog,
6143 const struct bpf_program *prog)
6144{
6145 int err;
6146
6147 /* no .BTF.ext relocation if .BTF.ext is missing or kernel doesn't
2f38fe68 6148 * support func/line info
8505e870 6149 */
9ca1f56a 6150 if (!obj->btf_ext || !kernel_supports(obj, FEAT_BTF_FUNC))
8505e870
AN
6151 return 0;
6152
6153 /* only attempt func info relocation if main program's func_info
6154 * relocation was successful
6155 */
6156 if (main_prog != prog && !main_prog->func_info)
6157 goto line_info;
6158
6159 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->func_info,
6160 &main_prog->func_info,
6161 &main_prog->func_info_cnt,
6162 &main_prog->func_info_rec_size);
6163 if (err) {
6164 if (err != -ENOENT) {
6165 pr_warn("prog '%s': error relocating .BTF.ext function info: %d\n",
6166 prog->name, err);
6167 return err;
6168 }
6169 if (main_prog->func_info) {
6170 /*
6171 * Some info has already been found but has problem
6172 * in the last btf_ext reloc. Must have to error out.
6173 */
6174 pr_warn("prog '%s': missing .BTF.ext function info.\n", prog->name);
6175 return err;
6176 }
6177 /* Have problem loading the very first info. Ignore the rest. */
6178 pr_warn("prog '%s': missing .BTF.ext function info for the main program, skipping all of .BTF.ext func info.\n",
6179 prog->name);
6180 }
6181
6182line_info:
6183 /* don't relocate line info if main program's relocation failed */
6184 if (main_prog != prog && !main_prog->line_info)
6185 return 0;
6186
6187 err = adjust_prog_btf_ext_info(obj, prog, &obj->btf_ext->line_info,
6188 &main_prog->line_info,
6189 &main_prog->line_info_cnt,
6190 &main_prog->line_info_rec_size);
6191 if (err) {
6192 if (err != -ENOENT) {
6193 pr_warn("prog '%s': error relocating .BTF.ext line info: %d\n",
6194 prog->name, err);
6195 return err;
6196 }
6197 if (main_prog->line_info) {
6198 /*
6199 * Some info has already been found but has problem
6200 * in the last btf_ext reloc. Must have to error out.
6201 */
6202 pr_warn("prog '%s': missing .BTF.ext line info.\n", prog->name);
6203 return err;
6204 }
6205 /* Have problem loading the very first info. Ignore the rest. */
6206 pr_warn("prog '%s': missing .BTF.ext line info for the main program, skipping all of .BTF.ext line info.\n",
6207 prog->name);
6208 }
6209 return 0;
6210}
6211
c3c55696
AN
6212static int cmp_relo_by_insn_idx(const void *key, const void *elem)
6213{
6214 size_t insn_idx = *(const size_t *)key;
6215 const struct reloc_desc *relo = elem;
6216
6217 if (insn_idx == relo->insn_idx)
6218 return 0;
6219 return insn_idx < relo->insn_idx ? -1 : 1;
6220}
6221
6222static struct reloc_desc *find_prog_insn_relo(const struct bpf_program *prog, size_t insn_idx)
6223{
2a6a9bf2
AN
6224 if (!prog->nr_reloc)
6225 return NULL;
c3c55696
AN
6226 return bsearch(&insn_idx, prog->reloc_desc, prog->nr_reloc,
6227 sizeof(*prog->reloc_desc), cmp_relo_by_insn_idx);
6228}
6229
b1268826
AS
6230static int append_subprog_relos(struct bpf_program *main_prog, struct bpf_program *subprog)
6231{
6232 int new_cnt = main_prog->nr_reloc + subprog->nr_reloc;
6233 struct reloc_desc *relos;
6234 int i;
6235
6236 if (main_prog == subprog)
6237 return 0;
6238 relos = libbpf_reallocarray(main_prog->reloc_desc, new_cnt, sizeof(*relos));
8a0260db
AN
6239 /* if new count is zero, reallocarray can return a valid NULL result;
6240 * in this case the previous pointer will be freed, so we *have to*
6241 * reassign old pointer to the new value (even if it's NULL)
6242 */
6243 if (!relos && new_cnt)
b1268826 6244 return -ENOMEM;
2a6a9bf2
AN
6245 if (subprog->nr_reloc)
6246 memcpy(relos + main_prog->nr_reloc, subprog->reloc_desc,
6247 sizeof(*relos) * subprog->nr_reloc);
b1268826
AS
6248
6249 for (i = main_prog->nr_reloc; i < new_cnt; i++)
6250 relos[i].insn_idx += subprog->sub_insn_off;
6251 /* After insn_idx adjustment the 'relos' array is still sorted
6252 * by insn_idx and doesn't break bsearch.
6253 */
6254 main_prog->reloc_desc = relos;
6255 main_prog->nr_reloc = new_cnt;
6256 return 0;
6257}
6258
6c918709
KKD
6259static int
6260bpf_object__append_subprog_code(struct bpf_object *obj, struct bpf_program *main_prog,
6261 struct bpf_program *subprog)
6262{
6263 struct bpf_insn *insns;
6264 size_t new_cnt;
6265 int err;
6266
6267 subprog->sub_insn_off = main_prog->insns_cnt;
6268
6269 new_cnt = main_prog->insns_cnt + subprog->insns_cnt;
6270 insns = libbpf_reallocarray(main_prog->insns, new_cnt, sizeof(*insns));
6271 if (!insns) {
6272 pr_warn("prog '%s': failed to realloc prog code\n", main_prog->name);
6273 return -ENOMEM;
6274 }
6275 main_prog->insns = insns;
6276 main_prog->insns_cnt = new_cnt;
6277
6278 memcpy(main_prog->insns + subprog->sub_insn_off, subprog->insns,
6279 subprog->insns_cnt * sizeof(*insns));
6280
6281 pr_debug("prog '%s': added %zu insns from sub-prog '%s'\n",
6282 main_prog->name, subprog->insns_cnt, subprog->name);
6283
6284 /* The subprog insns are now appended. Append its relos too. */
6285 err = append_subprog_relos(main_prog, subprog);
6286 if (err)
6287 return err;
6288 return 0;
6289}
6290
c3c55696
AN
6291static int
6292bpf_object__reloc_code(struct bpf_object *obj, struct bpf_program *main_prog,
6293 struct bpf_program *prog)
6294{
7e2925f6 6295 size_t sub_insn_idx, insn_idx;
c3c55696 6296 struct bpf_program *subprog;
c3c55696 6297 struct reloc_desc *relo;
7e2925f6 6298 struct bpf_insn *insn;
c3c55696
AN
6299 int err;
6300
6301 err = reloc_prog_func_and_line_info(obj, main_prog, prog);
6302 if (err)
6303 return err;
6304
6305 for (insn_idx = 0; insn_idx < prog->sec_insn_cnt; insn_idx++) {
6306 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
53eddb5e 6307 if (!insn_is_subprog_call(insn) && !insn_is_pseudo_func(insn))
c3c55696
AN
6308 continue;
6309
6310 relo = find_prog_insn_relo(prog, insn_idx);
a18f7214 6311 if (relo && relo->type == RELO_EXTERN_CALL)
b1268826
AS
6312 /* kfunc relocations will be handled later
6313 * in bpf_object__relocate_data()
6314 */
6315 continue;
53eddb5e 6316 if (relo && relo->type != RELO_CALL && relo->type != RELO_SUBPROG_ADDR) {
c3c55696
AN
6317 pr_warn("prog '%s': unexpected relo for insn #%zu, type %d\n",
6318 prog->name, insn_idx, relo->type);
6319 return -LIBBPF_ERRNO__RELOC;
6320 }
6321 if (relo) {
6322 /* sub-program instruction index is a combination of
6323 * an offset of a symbol pointed to by relocation and
6324 * call instruction's imm field; for global functions,
6325 * call always has imm = -1, but for static functions
6326 * relocation is against STT_SECTION and insn->imm
6327 * points to a start of a static function
53eddb5e
YS
6328 *
6329 * for subprog addr relocation, the relo->sym_off + insn->imm is
6330 * the byte offset in the corresponding section.
c3c55696 6331 */
53eddb5e
YS
6332 if (relo->type == RELO_CALL)
6333 sub_insn_idx = relo->sym_off / BPF_INSN_SZ + insn->imm + 1;
6334 else
6335 sub_insn_idx = (relo->sym_off + insn->imm) / BPF_INSN_SZ;
6336 } else if (insn_is_pseudo_func(insn)) {
6337 /*
6338 * RELO_SUBPROG_ADDR relo is always emitted even if both
6339 * functions are in the same section, so it shouldn't reach here.
6340 */
6341 pr_warn("prog '%s': missing subprog addr relo for insn #%zu\n",
6342 prog->name, insn_idx);
6343 return -LIBBPF_ERRNO__RELOC;
c3c55696
AN
6344 } else {
6345 /* if subprogram call is to a static function within
6346 * the same ELF section, there won't be any relocation
6347 * emitted, but it also means there is no additional
6348 * offset necessary, insns->imm is relative to
6349 * instruction's original position within the section
6350 */
6351 sub_insn_idx = prog->sec_insn_off + insn_idx + insn->imm + 1;
6352 }
6353
6354 /* we enforce that sub-programs should be in .text section */
6355 subprog = find_prog_by_sec_insn(obj, obj->efile.text_shndx, sub_insn_idx);
6356 if (!subprog) {
6357 pr_warn("prog '%s': no .text section found yet sub-program call exists\n",
6358 prog->name);
6359 return -LIBBPF_ERRNO__RELOC;
6360 }
6361
6362 /* if it's the first call instruction calling into this
6363 * subprogram (meaning this subprog hasn't been processed
6364 * yet) within the context of current main program:
6365 * - append it at the end of main program's instructions blog;
6366 * - process is recursively, while current program is put on hold;
6367 * - if that subprogram calls some other not yet processes
6368 * subprogram, same thing will happen recursively until
6369 * there are no more unprocesses subprograms left to append
6370 * and relocate.
6371 */
6372 if (subprog->sub_insn_off == 0) {
6c918709 6373 err = bpf_object__append_subprog_code(obj, main_prog, subprog);
b1268826
AS
6374 if (err)
6375 return err;
c3c55696
AN
6376 err = bpf_object__reloc_code(obj, main_prog, subprog);
6377 if (err)
6378 return err;
6379 }
6380
6381 /* main_prog->insns memory could have been re-allocated, so
6382 * calculate pointer again
6383 */
6384 insn = &main_prog->insns[prog->sub_insn_off + insn_idx];
6385 /* calculate correct instruction position within current main
6386 * prog; each main prog can have a different set of
6387 * subprograms appended (potentially in different order as
6388 * well), so position of any subprog can be different for
e3ba8e4e
KM
6389 * different main programs
6390 */
c3c55696
AN
6391 insn->imm = subprog->sub_insn_off - (prog->sub_insn_off + insn_idx) - 1;
6392
c3c55696
AN
6393 pr_debug("prog '%s': insn #%zu relocated, imm %d points to subprog '%s' (now at %zu offset)\n",
6394 prog->name, insn_idx, insn->imm, subprog->name, subprog->sub_insn_off);
6395 }
6396
6397 return 0;
6398}
6399
6400/*
6401 * Relocate sub-program calls.
6402 *
6403 * Algorithm operates as follows. Each entry-point BPF program (referred to as
6404 * main prog) is processed separately. For each subprog (non-entry functions,
6405 * that can be called from either entry progs or other subprogs) gets their
6406 * sub_insn_off reset to zero. This serves as indicator that this subprogram
6407 * hasn't been yet appended and relocated within current main prog. Once its
6408 * relocated, sub_insn_off will point at the position within current main prog
6409 * where given subprog was appended. This will further be used to relocate all
6410 * the call instructions jumping into this subprog.
6411 *
6412 * We start with main program and process all call instructions. If the call
6413 * is into a subprog that hasn't been processed (i.e., subprog->sub_insn_off
6414 * is zero), subprog instructions are appended at the end of main program's
6415 * instruction array. Then main program is "put on hold" while we recursively
6416 * process newly appended subprogram. If that subprogram calls into another
6417 * subprogram that hasn't been appended, new subprogram is appended again to
6418 * the *main* prog's instructions (subprog's instructions are always left
6419 * untouched, as they need to be in unmodified state for subsequent main progs
6420 * and subprog instructions are always sent only as part of a main prog) and
6421 * the process continues recursively. Once all the subprogs called from a main
6422 * prog or any of its subprogs are appended (and relocated), all their
6423 * positions within finalized instructions array are known, so it's easy to
6424 * rewrite call instructions with correct relative offsets, corresponding to
6425 * desired target subprog.
6426 *
6427 * Its important to realize that some subprogs might not be called from some
6428 * main prog and any of its called/used subprogs. Those will keep their
6429 * subprog->sub_insn_off as zero at all times and won't be appended to current
6430 * main prog and won't be relocated within the context of current main prog.
6431 * They might still be used from other main progs later.
6432 *
6433 * Visually this process can be shown as below. Suppose we have two main
6434 * programs mainA and mainB and BPF object contains three subprogs: subA,
6435 * subB, and subC. mainA calls only subA, mainB calls only subC, but subA and
6436 * subC both call subB:
6437 *
6438 * +--------+ +-------+
6439 * | v v |
6440 * +--+---+ +--+-+-+ +---+--+
6441 * | subA | | subB | | subC |
6442 * +--+---+ +------+ +---+--+
6443 * ^ ^
6444 * | |
6445 * +---+-------+ +------+----+
6446 * | mainA | | mainB |
6447 * +-----------+ +-----------+
6448 *
6449 * We'll start relocating mainA, will find subA, append it and start
6450 * processing sub A recursively:
6451 *
6452 * +-----------+------+
6453 * | mainA | subA |
6454 * +-----------+------+
6455 *
6456 * At this point we notice that subB is used from subA, so we append it and
6457 * relocate (there are no further subcalls from subB):
6458 *
6459 * +-----------+------+------+
6460 * | mainA | subA | subB |
6461 * +-----------+------+------+
6462 *
6463 * At this point, we relocate subA calls, then go one level up and finish with
6464 * relocatin mainA calls. mainA is done.
6465 *
6466 * For mainB process is similar but results in different order. We start with
6467 * mainB and skip subA and subB, as mainB never calls them (at least
6468 * directly), but we see subC is needed, so we append and start processing it:
6469 *
6470 * +-----------+------+
6471 * | mainB | subC |
6472 * +-----------+------+
6473 * Now we see subC needs subB, so we go back to it, append and relocate it:
6474 *
6475 * +-----------+------+------+
6476 * | mainB | subC | subB |
6477 * +-----------+------+------+
6478 *
6479 * At this point we unwind recursion, relocate calls in subC, then in mainB.
6480 */
6481static int
6482bpf_object__relocate_calls(struct bpf_object *obj, struct bpf_program *prog)
6483{
6484 struct bpf_program *subprog;
d3d93e34 6485 int i, err;
c3c55696 6486
c3c55696
AN
6487 /* mark all subprogs as not relocated (yet) within the context of
6488 * current main program
6489 */
6490 for (i = 0; i < obj->nr_programs; i++) {
6491 subprog = &obj->programs[i];
6492 if (!prog_is_subprog(obj, subprog))
6493 continue;
6494
6495 subprog->sub_insn_off = 0;
c3c55696
AN
6496 }
6497
6498 err = bpf_object__reloc_code(obj, prog, prog);
6499 if (err)
6500 return err;
6501
8a47a6c5
WN
6502 return 0;
6503}
6504
67234743
AS
6505static void
6506bpf_object__free_relocs(struct bpf_object *obj)
6507{
6508 struct bpf_program *prog;
6509 int i;
6510
6511 /* free up relocation descriptors */
6512 for (i = 0; i < obj->nr_programs; i++) {
6513 prog = &obj->programs[i];
6514 zfree(&prog->reloc_desc);
6515 prog->nr_reloc = 0;
6516 }
6517}
6518
d0e92887
AS
6519static int cmp_relocs(const void *_a, const void *_b)
6520{
6521 const struct reloc_desc *a = _a;
6522 const struct reloc_desc *b = _b;
6523
6524 if (a->insn_idx != b->insn_idx)
6525 return a->insn_idx < b->insn_idx ? -1 : 1;
6526
6527 /* no two relocations should have the same insn_idx, but ... */
6528 if (a->type != b->type)
6529 return a->type < b->type ? -1 : 1;
6530
6531 return 0;
6532}
6533
6534static void bpf_object__sort_relos(struct bpf_object *obj)
6535{
6536 int i;
6537
6538 for (i = 0; i < obj->nr_programs; i++) {
6539 struct bpf_program *p = &obj->programs[i];
6540
6541 if (!p->nr_reloc)
6542 continue;
6543
6544 qsort(p->reloc_desc, p->nr_reloc, sizeof(*p->reloc_desc), cmp_relocs);
6545 }
6546}
6547
fb03be7c
AN
6548static int bpf_prog_assign_exc_cb(struct bpf_object *obj, struct bpf_program *prog)
6549{
6550 const char *str = "exception_callback:";
6551 size_t pfx_len = strlen(str);
6552 int i, j, n;
6553
6554 if (!obj->btf || !kernel_supports(obj, FEAT_BTF_DECL_TAG))
6555 return 0;
6556
6557 n = btf__type_cnt(obj->btf);
6558 for (i = 1; i < n; i++) {
6559 const char *name;
6560 struct btf_type *t;
6561
6562 t = btf_type_by_id(obj->btf, i);
6563 if (!btf_is_decl_tag(t) || btf_decl_tag(t)->component_idx != -1)
6564 continue;
6565
6566 name = btf__str_by_offset(obj->btf, t->name_off);
6567 if (strncmp(name, str, pfx_len) != 0)
6568 continue;
6569
6570 t = btf_type_by_id(obj->btf, t->type);
6571 if (!btf_is_func(t) || btf_func_linkage(t) != BTF_FUNC_GLOBAL) {
6572 pr_warn("prog '%s': exception_callback:<value> decl tag not applied to the main program\n",
6573 prog->name);
6574 return -EINVAL;
6575 }
6576 if (strcmp(prog->name, btf__str_by_offset(obj->btf, t->name_off)) != 0)
6577 continue;
6578 /* Multiple callbacks are specified for the same prog,
6579 * the verifier will eventually return an error for this
6580 * case, hence simply skip appending a subprog.
6581 */
6582 if (prog->exception_cb_idx >= 0) {
6583 prog->exception_cb_idx = -1;
6584 break;
6585 }
6586
6587 name += pfx_len;
6588 if (str_is_empty(name)) {
6589 pr_warn("prog '%s': exception_callback:<value> decl tag contains empty value\n",
6590 prog->name);
6591 return -EINVAL;
6592 }
6593
6594 for (j = 0; j < obj->nr_programs; j++) {
6595 struct bpf_program *subprog = &obj->programs[j];
6596
6597 if (!prog_is_subprog(obj, subprog))
6598 continue;
6599 if (strcmp(name, subprog->name) != 0)
6600 continue;
6601 /* Enforce non-hidden, as from verifier point of
6602 * view it expects global functions, whereas the
6603 * mark_btf_static fixes up linkage as static.
6604 */
6605 if (!subprog->sym_global || subprog->mark_btf_static) {
6606 pr_warn("prog '%s': exception callback %s must be a global non-hidden function\n",
6607 prog->name, subprog->name);
6608 return -EINVAL;
6609 }
6610 /* Let's see if we already saw a static exception callback with the same name */
6611 if (prog->exception_cb_idx >= 0) {
6612 pr_warn("prog '%s': multiple subprogs with same name as exception callback '%s'\n",
6613 prog->name, subprog->name);
6614 return -EINVAL;
6615 }
6616 prog->exception_cb_idx = j;
6617 break;
6618 }
6619
6620 if (prog->exception_cb_idx >= 0)
6621 continue;
6622
6623 pr_warn("prog '%s': cannot find exception callback '%s'\n", prog->name, name);
6624 return -ENOENT;
6625 }
6626
6627 return 0;
6628}
6629
2f38fe68
AN
6630static struct {
6631 enum bpf_prog_type prog_type;
6632 const char *ctx_name;
6633} global_ctx_map[] = {
6634 { BPF_PROG_TYPE_CGROUP_DEVICE, "bpf_cgroup_dev_ctx" },
6635 { BPF_PROG_TYPE_CGROUP_SKB, "__sk_buff" },
6636 { BPF_PROG_TYPE_CGROUP_SOCK, "bpf_sock" },
6637 { BPF_PROG_TYPE_CGROUP_SOCK_ADDR, "bpf_sock_addr" },
6638 { BPF_PROG_TYPE_CGROUP_SOCKOPT, "bpf_sockopt" },
6639 { BPF_PROG_TYPE_CGROUP_SYSCTL, "bpf_sysctl" },
6640 { BPF_PROG_TYPE_FLOW_DISSECTOR, "__sk_buff" },
6641 { BPF_PROG_TYPE_KPROBE, "bpf_user_pt_regs_t" },
6642 { BPF_PROG_TYPE_LWT_IN, "__sk_buff" },
6643 { BPF_PROG_TYPE_LWT_OUT, "__sk_buff" },
6644 { BPF_PROG_TYPE_LWT_SEG6LOCAL, "__sk_buff" },
6645 { BPF_PROG_TYPE_LWT_XMIT, "__sk_buff" },
6646 { BPF_PROG_TYPE_NETFILTER, "bpf_nf_ctx" },
6647 { BPF_PROG_TYPE_PERF_EVENT, "bpf_perf_event_data" },
6648 { BPF_PROG_TYPE_RAW_TRACEPOINT, "bpf_raw_tracepoint_args" },
6649 { BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE, "bpf_raw_tracepoint_args" },
6650 { BPF_PROG_TYPE_SCHED_ACT, "__sk_buff" },
6651 { BPF_PROG_TYPE_SCHED_CLS, "__sk_buff" },
6652 { BPF_PROG_TYPE_SK_LOOKUP, "bpf_sk_lookup" },
6653 { BPF_PROG_TYPE_SK_MSG, "sk_msg_md" },
6654 { BPF_PROG_TYPE_SK_REUSEPORT, "sk_reuseport_md" },
6655 { BPF_PROG_TYPE_SK_SKB, "__sk_buff" },
6656 { BPF_PROG_TYPE_SOCK_OPS, "bpf_sock_ops" },
6657 { BPF_PROG_TYPE_SOCKET_FILTER, "__sk_buff" },
6658 { BPF_PROG_TYPE_XDP, "xdp_md" },
6659 /* all other program types don't have "named" context structs */
6660};
6661
9eea8faf
AN
6662/* forward declarations for arch-specific underlying types of bpf_user_pt_regs_t typedef,
6663 * for below __builtin_types_compatible_p() checks;
6664 * with this approach we don't need any extra arch-specific #ifdef guards
6665 */
6666struct pt_regs;
6667struct user_pt_regs;
6668struct user_regs_struct;
6669
76ec90a9
AN
6670static bool need_func_arg_type_fixup(const struct btf *btf, const struct bpf_program *prog,
6671 const char *subprog_name, int arg_idx,
6672 int arg_type_id, const char *ctx_name)
6673{
6674 const struct btf_type *t;
6675 const char *tname;
6676
6677 /* check if existing parameter already matches verifier expectations */
6678 t = skip_mods_and_typedefs(btf, arg_type_id, NULL);
6679 if (!btf_is_ptr(t))
6680 goto out_warn;
6681
6682 /* typedef bpf_user_pt_regs_t is a special PITA case, valid for kprobe
6683 * and perf_event programs, so check this case early on and forget
6684 * about it for subsequent checks
6685 */
6686 while (btf_is_mod(t))
6687 t = btf__type_by_id(btf, t->type);
6688 if (btf_is_typedef(t) &&
6689 (prog->type == BPF_PROG_TYPE_KPROBE || prog->type == BPF_PROG_TYPE_PERF_EVENT)) {
6690 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6691 if (strcmp(tname, "bpf_user_pt_regs_t") == 0)
6692 return false; /* canonical type for kprobe/perf_event */
6693 }
6694
6695 /* now we can ignore typedefs moving forward */
6696 t = skip_mods_and_typedefs(btf, t->type, NULL);
6697
6698 /* if it's `void *`, definitely fix up BTF info */
6699 if (btf_is_void(t))
6700 return true;
6701
6702 /* if it's already proper canonical type, no need to fix up */
6703 tname = btf__str_by_offset(btf, t->name_off) ?: "<anon>";
6704 if (btf_is_struct(t) && strcmp(tname, ctx_name) == 0)
6705 return false;
6706
6707 /* special cases */
6708 switch (prog->type) {
6709 case BPF_PROG_TYPE_KPROBE:
76ec90a9
AN
6710 /* `struct pt_regs *` is expected, but we need to fix up */
6711 if (btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
6712 return true;
6713 break;
9eea8faf
AN
6714 case BPF_PROG_TYPE_PERF_EVENT:
6715 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct pt_regs) &&
6716 btf_is_struct(t) && strcmp(tname, "pt_regs") == 0)
d7bc416a 6717 return true;
9eea8faf
AN
6718 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_pt_regs) &&
6719 btf_is_struct(t) && strcmp(tname, "user_pt_regs") == 0)
d7bc416a 6720 return true;
9eea8faf
AN
6721 if (__builtin_types_compatible_p(bpf_user_pt_regs_t, struct user_regs_struct) &&
6722 btf_is_struct(t) && strcmp(tname, "user_regs_struct") == 0)
d7bc416a 6723 return true;
9eea8faf 6724 break;
76ec90a9
AN
6725 case BPF_PROG_TYPE_RAW_TRACEPOINT:
6726 case BPF_PROG_TYPE_RAW_TRACEPOINT_WRITABLE:
6727 /* allow u64* as ctx */
6728 if (btf_is_int(t) && t->size == 8)
6729 return true;
6730 break;
6731 default:
6732 break;
6733 }
6734
6735out_warn:
6736 pr_warn("prog '%s': subprog '%s' arg#%d is expected to be of `struct %s *` type\n",
6737 prog->name, subprog_name, arg_idx, ctx_name);
6738 return false;
6739}
6740
2f38fe68
AN
6741static int clone_func_btf_info(struct btf *btf, int orig_fn_id, struct bpf_program *prog)
6742{
6743 int fn_id, fn_proto_id, ret_type_id, orig_proto_id;
6744 int i, err, arg_cnt, fn_name_off, linkage;
6745 struct btf_type *fn_t, *fn_proto_t, *t;
6746 struct btf_param *p;
6747
6748 /* caller already validated FUNC -> FUNC_PROTO validity */
6749 fn_t = btf_type_by_id(btf, orig_fn_id);
6750 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6751
6752 /* Note that each btf__add_xxx() operation invalidates
6753 * all btf_type and string pointers, so we need to be
6754 * very careful when cloning BTF types. BTF type
6755 * pointers have to be always refetched. And to avoid
6756 * problems with invalidated string pointers, we
6757 * add empty strings initially, then just fix up
6758 * name_off offsets in place. Offsets are stable for
6759 * existing strings, so that works out.
6760 */
6761 fn_name_off = fn_t->name_off; /* we are about to invalidate fn_t */
6762 linkage = btf_func_linkage(fn_t);
6763 orig_proto_id = fn_t->type; /* original FUNC_PROTO ID */
6764 ret_type_id = fn_proto_t->type; /* fn_proto_t will be invalidated */
6765 arg_cnt = btf_vlen(fn_proto_t);
6766
6767 /* clone FUNC_PROTO and its params */
6768 fn_proto_id = btf__add_func_proto(btf, ret_type_id);
6769 if (fn_proto_id < 0)
6770 return -EINVAL;
6771
6772 for (i = 0; i < arg_cnt; i++) {
6773 int name_off;
6774
6775 /* copy original parameter data */
6776 t = btf_type_by_id(btf, orig_proto_id);
6777 p = &btf_params(t)[i];
6778 name_off = p->name_off;
6779
6780 err = btf__add_func_param(btf, "", p->type);
6781 if (err)
6782 return err;
6783
6784 fn_proto_t = btf_type_by_id(btf, fn_proto_id);
6785 p = &btf_params(fn_proto_t)[i];
6786 p->name_off = name_off; /* use remembered str offset */
6787 }
6788
6789 /* clone FUNC now, btf__add_func() enforces non-empty name, so use
6790 * entry program's name as a placeholder, which we replace immediately
6791 * with original name_off
6792 */
6793 fn_id = btf__add_func(btf, prog->name, linkage, fn_proto_id);
6794 if (fn_id < 0)
6795 return -EINVAL;
6796
6797 fn_t = btf_type_by_id(btf, fn_id);
6798 fn_t->name_off = fn_name_off; /* reuse original string */
6799
6800 return fn_id;
6801}
6802
6803/* Check if main program or global subprog's function prototype has `arg:ctx`
6804 * argument tags, and, if necessary, substitute correct type to match what BPF
6805 * verifier would expect, taking into account specific program type. This
6806 * allows to support __arg_ctx tag transparently on old kernels that don't yet
6807 * have a native support for it in the verifier, making user's life much
6808 * easier.
6809 */
6810static int bpf_program_fixup_func_info(struct bpf_object *obj, struct bpf_program *prog)
6811{
76ec90a9 6812 const char *ctx_name = NULL, *ctx_tag = "arg:ctx", *fn_name;
2f38fe68
AN
6813 struct bpf_func_info_min *func_rec;
6814 struct btf_type *fn_t, *fn_proto_t;
6815 struct btf *btf = obj->btf;
6816 const struct btf_type *t;
6817 struct btf_param *p;
6818 int ptr_id = 0, struct_id, tag_id, orig_fn_id;
6819 int i, n, arg_idx, arg_cnt, err, rec_idx;
6820 int *orig_ids;
6821
6822 /* no .BTF.ext, no problem */
6823 if (!obj->btf_ext || !prog->func_info)
6824 return 0;
6825
01b55f4f 6826 /* don't do any fix ups if kernel natively supports __arg_ctx */
0e6d0a9d 6827 if (kernel_supports(obj, FEAT_ARG_CTX_TAG))
01b55f4f
AN
6828 return 0;
6829
2f38fe68
AN
6830 /* some BPF program types just don't have named context structs, so
6831 * this fallback mechanism doesn't work for them
6832 */
6833 for (i = 0; i < ARRAY_SIZE(global_ctx_map); i++) {
6834 if (global_ctx_map[i].prog_type != prog->type)
6835 continue;
6836 ctx_name = global_ctx_map[i].ctx_name;
6837 break;
6838 }
6839 if (!ctx_name)
6840 return 0;
6841
6842 /* remember original func BTF IDs to detect if we already cloned them */
6843 orig_ids = calloc(prog->func_info_cnt, sizeof(*orig_ids));
6844 if (!orig_ids)
6845 return -ENOMEM;
6846 for (i = 0; i < prog->func_info_cnt; i++) {
6847 func_rec = prog->func_info + prog->func_info_rec_size * i;
6848 orig_ids[i] = func_rec->type_id;
6849 }
6850
6851 /* go through each DECL_TAG with "arg:ctx" and see if it points to one
6852 * of our subprogs; if yes and subprog is global and needs adjustment,
6853 * clone and adjust FUNC -> FUNC_PROTO combo
6854 */
6855 for (i = 1, n = btf__type_cnt(btf); i < n; i++) {
6856 /* only DECL_TAG with "arg:ctx" value are interesting */
6857 t = btf__type_by_id(btf, i);
6858 if (!btf_is_decl_tag(t))
6859 continue;
6860 if (strcmp(btf__str_by_offset(btf, t->name_off), ctx_tag) != 0)
6861 continue;
6862
6863 /* only global funcs need adjustment, if at all */
6864 orig_fn_id = t->type;
6865 fn_t = btf_type_by_id(btf, orig_fn_id);
6866 if (!btf_is_func(fn_t) || btf_func_linkage(fn_t) != BTF_FUNC_GLOBAL)
6867 continue;
6868
6869 /* sanity check FUNC -> FUNC_PROTO chain, just in case */
6870 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6871 if (!fn_proto_t || !btf_is_func_proto(fn_proto_t))
6872 continue;
6873
6874 /* find corresponding func_info record */
6875 func_rec = NULL;
6876 for (rec_idx = 0; rec_idx < prog->func_info_cnt; rec_idx++) {
6877 if (orig_ids[rec_idx] == t->type) {
6878 func_rec = prog->func_info + prog->func_info_rec_size * rec_idx;
6879 break;
6880 }
6881 }
6882 /* current main program doesn't call into this subprog */
6883 if (!func_rec)
6884 continue;
6885
6886 /* some more sanity checking of DECL_TAG */
6887 arg_cnt = btf_vlen(fn_proto_t);
6888 arg_idx = btf_decl_tag(t)->component_idx;
6889 if (arg_idx < 0 || arg_idx >= arg_cnt)
6890 continue;
6891
76ec90a9 6892 /* check if we should fix up argument type */
2f38fe68 6893 p = &btf_params(fn_proto_t)[arg_idx];
76ec90a9
AN
6894 fn_name = btf__str_by_offset(btf, fn_t->name_off) ?: "<anon>";
6895 if (!need_func_arg_type_fixup(btf, prog, fn_name, arg_idx, p->type, ctx_name))
6896 continue;
2f38fe68
AN
6897
6898 /* clone fn/fn_proto, unless we already did it for another arg */
6899 if (func_rec->type_id == orig_fn_id) {
6900 int fn_id;
6901
6902 fn_id = clone_func_btf_info(btf, orig_fn_id, prog);
6903 if (fn_id < 0) {
6904 err = fn_id;
6905 goto err_out;
6906 }
6907
6908 /* point func_info record to a cloned FUNC type */
6909 func_rec->type_id = fn_id;
6910 }
6911
6912 /* create PTR -> STRUCT type chain to mark PTR_TO_CTX argument;
6913 * we do it just once per main BPF program, as all global
6914 * funcs share the same program type, so need only PTR ->
6915 * STRUCT type chain
6916 */
6917 if (ptr_id == 0) {
6918 struct_id = btf__add_struct(btf, ctx_name, 0);
6919 ptr_id = btf__add_ptr(btf, struct_id);
6920 if (ptr_id < 0 || struct_id < 0) {
6921 err = -EINVAL;
6922 goto err_out;
6923 }
6924 }
6925
6926 /* for completeness, clone DECL_TAG and point it to cloned param */
6927 tag_id = btf__add_decl_tag(btf, ctx_tag, func_rec->type_id, arg_idx);
6928 if (tag_id < 0) {
6929 err = -EINVAL;
6930 goto err_out;
6931 }
6932
6933 /* all the BTF manipulations invalidated pointers, refetch them */
6934 fn_t = btf_type_by_id(btf, func_rec->type_id);
6935 fn_proto_t = btf_type_by_id(btf, fn_t->type);
6936
6937 /* fix up type ID pointed to by param */
6938 p = &btf_params(fn_proto_t)[arg_idx];
6939 p->type = ptr_id;
6940 }
6941
6942 free(orig_ids);
6943 return 0;
6944err_out:
6945 free(orig_ids);
6946 return err;
6947}
6948
6949static int bpf_object__relocate(struct bpf_object *obj, const char *targ_btf_path)
8a47a6c5
WN
6950{
6951 struct bpf_program *prog;
b1268826 6952 size_t i, j;
8a47a6c5
WN
6953 int err;
6954
ddc7c304
AN
6955 if (obj->btf_ext) {
6956 err = bpf_object__relocate_core(obj, targ_btf_path);
6957 if (err) {
be18010e
KW
6958 pr_warn("failed to perform CO-RE relocations: %d\n",
6959 err);
ddc7c304
AN
6960 return err;
6961 }
185cfe83 6962 bpf_object__sort_relos(obj);
ddc7c304 6963 }
b1268826
AS
6964
6965 /* Before relocating calls pre-process relocations and mark
6966 * few ld_imm64 instructions that points to subprogs.
6967 * Otherwise bpf_object__reloc_code() later would have to consider
6968 * all ld_imm64 insns as relocation candidates. That would
6969 * reduce relocation speed, since amount of find_prog_insn_relo()
6970 * would increase and most of them will fail to find a relo.
9173cac3
AN
6971 */
6972 for (i = 0; i < obj->nr_programs; i++) {
6973 prog = &obj->programs[i];
b1268826
AS
6974 for (j = 0; j < prog->nr_reloc; j++) {
6975 struct reloc_desc *relo = &prog->reloc_desc[j];
6976 struct bpf_insn *insn = &prog->insns[relo->insn_idx];
6977
6978 /* mark the insn, so it's recognized by insn_is_pseudo_func() */
6979 if (relo->type == RELO_SUBPROG_ADDR)
6980 insn[0].src_reg = BPF_PSEUDO_FUNC;
9173cac3 6981 }
9173cac3 6982 }
b1268826
AS
6983
6984 /* relocate subprogram calls and append used subprograms to main
c3c55696
AN
6985 * programs; each copy of subprogram code needs to be relocated
6986 * differently for each main program, because its code location might
b1268826
AS
6987 * have changed.
6988 * Append subprog relos to main programs to allow data relos to be
6989 * processed after text is completely relocated.
9173cac3 6990 */
8a47a6c5
WN
6991 for (i = 0; i < obj->nr_programs; i++) {
6992 prog = &obj->programs[i];
c3c55696
AN
6993 /* sub-program's sub-calls are relocated within the context of
6994 * its main program only
6995 */
6996 if (prog_is_subprog(obj, prog))
9173cac3 6997 continue;
a3820c48 6998 if (!prog->autoload)
16e0c35c 6999 continue;
8a47a6c5 7000
c3c55696 7001 err = bpf_object__relocate_calls(obj, prog);
8a47a6c5 7002 if (err) {
9c0f8cbd
AN
7003 pr_warn("prog '%s': failed to relocate calls: %d\n",
7004 prog->name, err);
8a47a6c5
WN
7005 return err;
7006 }
7e2925f6 7007
fb03be7c
AN
7008 err = bpf_prog_assign_exc_cb(obj, prog);
7009 if (err)
7010 return err;
7e2925f6
KKD
7011 /* Now, also append exception callback if it has not been done already. */
7012 if (prog->exception_cb_idx >= 0) {
7013 struct bpf_program *subprog = &obj->programs[prog->exception_cb_idx];
7014
7015 /* Calling exception callback directly is disallowed, which the
7016 * verifier will reject later. In case it was processed already,
7017 * we can skip this step, otherwise for all other valid cases we
7018 * have to append exception callback now.
7019 */
7020 if (subprog->sub_insn_off == 0) {
7021 err = bpf_object__append_subprog_code(obj, prog, subprog);
7022 if (err)
7023 return err;
7024 err = bpf_object__reloc_code(obj, prog, subprog);
7025 if (err)
7026 return err;
7027 }
7028 }
8a47a6c5 7029 }
b1268826
AS
7030 for (i = 0; i < obj->nr_programs; i++) {
7031 prog = &obj->programs[i];
7032 if (prog_is_subprog(obj, prog))
7033 continue;
a3820c48 7034 if (!prog->autoload)
16e0c35c 7035 continue;
2f38fe68
AN
7036
7037 /* Process data relos for main programs */
b1268826
AS
7038 err = bpf_object__relocate_data(obj, prog);
7039 if (err) {
7040 pr_warn("prog '%s': failed to relocate data references: %d\n",
7041 prog->name, err);
7042 return err;
7043 }
2f38fe68
AN
7044
7045 /* Fix up .BTF.ext information, if necessary */
7046 err = bpf_program_fixup_func_info(obj, prog);
7047 if (err) {
7048 pr_warn("prog '%s': failed to perform .BTF.ext fix ups: %d\n",
7049 prog->name, err);
7050 return err;
7051 }
b1268826 7052 }
185cfe83 7053
8a47a6c5
WN
7054 return 0;
7055}
7056
646f02ff 7057static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
ad23b723 7058 Elf64_Shdr *shdr, Elf_Data *data);
646f02ff
AN
7059
7060static int bpf_object__collect_map_relos(struct bpf_object *obj,
ad23b723 7061 Elf64_Shdr *shdr, Elf_Data *data)
646f02ff 7062{
15728ad3
AN
7063 const int bpf_ptr_sz = 8, host_ptr_sz = sizeof(void *);
7064 int i, j, nrels, new_sz;
063e6881 7065 const struct btf_var_secinfo *vi = NULL;
646f02ff 7066 const struct btf_type *sec, *var, *def;
341ac5ff
HC
7067 struct bpf_map *map = NULL, *targ_map = NULL;
7068 struct bpf_program *targ_prog = NULL;
7069 bool is_prog_array, is_map_in_map;
646f02ff 7070 const struct btf_member *member;
341ac5ff 7071 const char *name, *mname, *type;
646f02ff 7072 unsigned int moff;
ad23b723
AN
7073 Elf64_Sym *sym;
7074 Elf64_Rel *rel;
646f02ff
AN
7075 void *tmp;
7076
7077 if (!obj->efile.btf_maps_sec_btf_id || !obj->btf)
7078 return -EINVAL;
7079 sec = btf__type_by_id(obj->btf, obj->efile.btf_maps_sec_btf_id);
7080 if (!sec)
7081 return -EINVAL;
7082
646f02ff
AN
7083 nrels = shdr->sh_size / shdr->sh_entsize;
7084 for (i = 0; i < nrels; i++) {
ad23b723
AN
7085 rel = elf_rel_by_idx(data, i);
7086 if (!rel) {
646f02ff
AN
7087 pr_warn(".maps relo #%d: failed to get ELF relo\n", i);
7088 return -LIBBPF_ERRNO__FORMAT;
7089 }
ad23b723
AN
7090
7091 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
7092 if (!sym) {
646f02ff 7093 pr_warn(".maps relo #%d: symbol %zx not found\n",
ad23b723 7094 i, (size_t)ELF64_R_SYM(rel->r_info));
646f02ff
AN
7095 return -LIBBPF_ERRNO__FORMAT;
7096 }
ad23b723 7097 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
646f02ff 7098
ad23b723
AN
7099 pr_debug(".maps relo #%d: for %zd value %zd rel->r_offset %zu name %d ('%s')\n",
7100 i, (ssize_t)(rel->r_info >> 32), (size_t)sym->st_value,
7101 (size_t)rel->r_offset, sym->st_name, name);
646f02ff
AN
7102
7103 for (j = 0; j < obj->nr_maps; j++) {
7104 map = &obj->maps[j];
7105 if (map->sec_idx != obj->efile.btf_maps_shndx)
7106 continue;
7107
7108 vi = btf_var_secinfos(sec) + map->btf_var_idx;
ad23b723
AN
7109 if (vi->offset <= rel->r_offset &&
7110 rel->r_offset + bpf_ptr_sz <= vi->offset + vi->size)
646f02ff
AN
7111 break;
7112 }
7113 if (j == obj->nr_maps) {
ad23b723
AN
7114 pr_warn(".maps relo #%d: cannot find map '%s' at rel->r_offset %zu\n",
7115 i, name, (size_t)rel->r_offset);
646f02ff
AN
7116 return -EINVAL;
7117 }
7118
341ac5ff
HC
7119 is_map_in_map = bpf_map_type__is_map_in_map(map->def.type);
7120 is_prog_array = map->def.type == BPF_MAP_TYPE_PROG_ARRAY;
7121 type = is_map_in_map ? "map" : "prog";
7122 if (is_map_in_map) {
7123 if (sym->st_shndx != obj->efile.btf_maps_shndx) {
7124 pr_warn(".maps relo #%d: '%s' isn't a BTF-defined map\n",
7125 i, name);
7126 return -LIBBPF_ERRNO__RELOC;
7127 }
7128 if (map->def.type == BPF_MAP_TYPE_HASH_OF_MAPS &&
7129 map->def.key_size != sizeof(int)) {
7130 pr_warn(".maps relo #%d: hash-of-maps '%s' should have key size %zu.\n",
7131 i, map->name, sizeof(int));
7132 return -EINVAL;
7133 }
7134 targ_map = bpf_object__find_map_by_name(obj, name);
7135 if (!targ_map) {
7136 pr_warn(".maps relo #%d: '%s' isn't a valid map reference\n",
7137 i, name);
7138 return -ESRCH;
7139 }
7140 } else if (is_prog_array) {
7141 targ_prog = bpf_object__find_program_by_name(obj, name);
7142 if (!targ_prog) {
7143 pr_warn(".maps relo #%d: '%s' isn't a valid program reference\n",
7144 i, name);
7145 return -ESRCH;
7146 }
7147 if (targ_prog->sec_idx != sym->st_shndx ||
7148 targ_prog->sec_insn_off * 8 != sym->st_value ||
7149 prog_is_subprog(obj, targ_prog)) {
7150 pr_warn(".maps relo #%d: '%s' isn't an entry-point program\n",
7151 i, name);
7152 return -LIBBPF_ERRNO__RELOC;
7153 }
7154 } else {
646f02ff
AN
7155 return -EINVAL;
7156 }
7157
646f02ff
AN
7158 var = btf__type_by_id(obj->btf, vi->type);
7159 def = skip_mods_and_typedefs(obj->btf, var->type, NULL);
7160 if (btf_vlen(def) == 0)
7161 return -EINVAL;
7162 member = btf_members(def) + btf_vlen(def) - 1;
7163 mname = btf__name_by_offset(obj->btf, member->name_off);
7164 if (strcmp(mname, "values"))
7165 return -EINVAL;
7166
7167 moff = btf_member_bit_offset(def, btf_vlen(def) - 1) / 8;
ad23b723 7168 if (rel->r_offset - vi->offset < moff)
646f02ff
AN
7169 return -EINVAL;
7170
ad23b723 7171 moff = rel->r_offset - vi->offset - moff;
15728ad3
AN
7172 /* here we use BPF pointer size, which is always 64 bit, as we
7173 * are parsing ELF that was built for BPF target
7174 */
7175 if (moff % bpf_ptr_sz)
646f02ff 7176 return -EINVAL;
15728ad3 7177 moff /= bpf_ptr_sz;
646f02ff
AN
7178 if (moff >= map->init_slots_sz) {
7179 new_sz = moff + 1;
029258d7 7180 tmp = libbpf_reallocarray(map->init_slots, new_sz, host_ptr_sz);
646f02ff
AN
7181 if (!tmp)
7182 return -ENOMEM;
7183 map->init_slots = tmp;
7184 memset(map->init_slots + map->init_slots_sz, 0,
15728ad3 7185 (new_sz - map->init_slots_sz) * host_ptr_sz);
646f02ff
AN
7186 map->init_slots_sz = new_sz;
7187 }
341ac5ff 7188 map->init_slots[moff] = is_map_in_map ? (void *)targ_map : (void *)targ_prog;
646f02ff 7189
341ac5ff
HC
7190 pr_debug(".maps relo #%d: map '%s' slot [%d] points to %s '%s'\n",
7191 i, map->name, moff, type, name);
646f02ff
AN
7192 }
7193
7194 return 0;
7195}
590a0088 7196
c3c55696
AN
7197static int bpf_object__collect_relos(struct bpf_object *obj)
7198{
7199 int i, err;
34090915 7200
25bbbd7a
AN
7201 for (i = 0; i < obj->efile.sec_cnt; i++) {
7202 struct elf_sec_desc *sec_desc = &obj->efile.secs[i];
7203 Elf64_Shdr *shdr;
7204 Elf_Data *data;
7205 int idx;
7206
7207 if (sec_desc->sec_type != SEC_RELO)
7208 continue;
7209
7210 shdr = sec_desc->shdr;
7211 data = sec_desc->data;
7212 idx = shdr->sh_info;
34090915 7213
240bf8a5 7214 if (shdr->sh_type != SHT_REL || idx < 0 || idx >= obj->efile.sec_cnt) {
be18010e 7215 pr_warn("internal error at %d\n", __LINE__);
6371ca3b 7216 return -LIBBPF_ERRNO__INTERNAL;
34090915
WN
7217 }
7218
240bf8a5 7219 if (obj->efile.secs[idx].sec_type == SEC_ST_OPS)
646f02ff 7220 err = bpf_object__collect_st_ops_relos(obj, shdr, data);
c3c55696 7221 else if (idx == obj->efile.btf_maps_shndx)
646f02ff 7222 err = bpf_object__collect_map_relos(obj, shdr, data);
c3c55696
AN
7223 else
7224 err = bpf_object__collect_prog_relos(obj, shdr, data);
34090915 7225 if (err)
6371ca3b 7226 return err;
34090915 7227 }
c3c55696 7228
d0e92887 7229 bpf_object__sort_relos(obj);
34090915
WN
7230 return 0;
7231}
7232
109cea5a
AN
7233static bool insn_is_helper_call(struct bpf_insn *insn, enum bpf_func_id *func_id)
7234{
9b2f6fec 7235 if (BPF_CLASS(insn->code) == BPF_JMP &&
109cea5a
AN
7236 BPF_OP(insn->code) == BPF_CALL &&
7237 BPF_SRC(insn->code) == BPF_K &&
9b2f6fec
AN
7238 insn->src_reg == 0 &&
7239 insn->dst_reg == 0) {
7240 *func_id = insn->imm;
109cea5a
AN
7241 return true;
7242 }
7243 return false;
7244}
7245
42869d28 7246static int bpf_object__sanitize_prog(struct bpf_object *obj, struct bpf_program *prog)
109cea5a
AN
7247{
7248 struct bpf_insn *insn = prog->insns;
7249 enum bpf_func_id func_id;
7250 int i;
7251
67234743
AS
7252 if (obj->gen_loader)
7253 return 0;
7254
109cea5a
AN
7255 for (i = 0; i < prog->insns_cnt; i++, insn++) {
7256 if (!insn_is_helper_call(insn, &func_id))
7257 continue;
7258
7259 /* on kernels that don't yet support
7260 * bpf_probe_read_{kernel,user}[_str] helpers, fall back
7261 * to bpf_probe_read() which works well for old kernels
7262 */
7263 switch (func_id) {
7264 case BPF_FUNC_probe_read_kernel:
7265 case BPF_FUNC_probe_read_user:
9ca1f56a 7266 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
109cea5a
AN
7267 insn->imm = BPF_FUNC_probe_read;
7268 break;
7269 case BPF_FUNC_probe_read_kernel_str:
7270 case BPF_FUNC_probe_read_user_str:
9ca1f56a 7271 if (!kernel_supports(obj, FEAT_PROBE_READ_KERN))
109cea5a
AN
7272 insn->imm = BPF_FUNC_probe_read_str;
7273 break;
7274 default:
7275 break;
7276 }
7277 }
7278 return 0;
7279}
7280
15ea31fa
AN
7281static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
7282 int *btf_obj_fd, int *btf_type_id);
12d9466d 7283
4fa5bcfe
AN
7284/* this is called as prog->sec_def->prog_prepare_load_fn for libbpf-supported sec_defs */
7285static int libbpf_prepare_prog_load(struct bpf_program *prog,
7286 struct bpf_prog_load_opts *opts, long cookie)
12d9466d 7287{
15ea31fa
AN
7288 enum sec_def_flags def = cookie;
7289
12d9466d 7290 /* old kernels might not support specifying expected_attach_type */
15ea31fa 7291 if ((def & SEC_EXP_ATTACH_OPT) && !kernel_supports(prog->obj, FEAT_EXP_ATTACH_TYPE))
d10ef2b8 7292 opts->expected_attach_type = 0;
12d9466d 7293
15ea31fa 7294 if (def & SEC_SLEEPABLE)
d10ef2b8 7295 opts->prog_flags |= BPF_F_SLEEPABLE;
12d9466d 7296
082c4bfb
LB
7297 if (prog->type == BPF_PROG_TYPE_XDP && (def & SEC_XDP_FRAGS))
7298 opts->prog_flags |= BPF_F_XDP_HAS_FRAGS;
7299
5902da6d
JO
7300 /* special check for usdt to use uprobe_multi link */
7301 if ((def & SEC_USDT) && kernel_supports(prog->obj, FEAT_UPROBE_MULTI_LINK))
7302 prog->expected_attach_type = BPF_TRACE_UPROBE_MULTI;
7303
cc7d8f2c 7304 if ((def & SEC_ATTACH_BTF) && !prog->attach_btf_id) {
12d9466d 7305 int btf_obj_fd = 0, btf_type_id = 0, err;
15ea31fa 7306 const char *attach_name;
12d9466d 7307
cc7d8f2c
AN
7308 attach_name = strchr(prog->sec_name, '/');
7309 if (!attach_name) {
7310 /* if BPF program is annotated with just SEC("fentry")
7311 * (or similar) without declaratively specifying
7312 * target, then it is expected that target will be
7313 * specified with bpf_program__set_attach_target() at
7314 * runtime before BPF object load step. If not, then
7315 * there is nothing to load into the kernel as BPF
7316 * verifier won't be able to validate BPF program
7317 * correctness anyways.
7318 */
7319 pr_warn("prog '%s': no BTF-based attach target is specified, use bpf_program__set_attach_target()\n",
7320 prog->name);
7321 return -EINVAL;
7322 }
7323 attach_name++; /* skip over / */
7324
15ea31fa 7325 err = libbpf_find_attach_btf_id(prog, attach_name, &btf_obj_fd, &btf_type_id);
12d9466d
AN
7326 if (err)
7327 return err;
7328
7329 /* cache resolved BTF FD and BTF type ID in the prog */
7330 prog->attach_btf_obj_fd = btf_obj_fd;
7331 prog->attach_btf_id = btf_type_id;
7332
7333 /* but by now libbpf common logic is not utilizing
7334 * prog->atach_btf_obj_fd/prog->attach_btf_id anymore because
d10ef2b8
AN
7335 * this callback is called after opts were populated by
7336 * libbpf, so this callback has to update opts explicitly here
12d9466d 7337 */
d10ef2b8
AN
7338 opts->attach_btf_obj_fd = btf_obj_fd;
7339 opts->attach_btf_id = btf_type_id;
12d9466d
AN
7340 }
7341 return 0;
7342}
7343
9fdc4273
AN
7344static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz);
7345
cf90a20d
AN
7346static int bpf_object_load_prog(struct bpf_object *obj, struct bpf_program *prog,
7347 struct bpf_insn *insns, int insns_cnt,
7348 const char *license, __u32 kern_version, int *prog_fd)
55cffde2 7349{
d10ef2b8
AN
7350 LIBBPF_OPTS(bpf_prog_load_opts, load_attr);
7351 const char *prog_name = NULL;
1ce6a9fc 7352 char *cp, errmsg[STRERR_BUFSIZE];
8395f320 7353 size_t log_buf_size = 0;
b3ce9079 7354 char *log_buf = NULL, *tmp;
b3ce9079
AN
7355 bool own_log_buf = true;
7356 __u32 log_level = prog->log_level;
9bf48fa1 7357 int ret, err;
55cffde2 7358
80b2b5c3
AM
7359 if (prog->type == BPF_PROG_TYPE_UNSPEC) {
7360 /*
7361 * The program type must be set. Most likely we couldn't find a proper
7362 * section definition at load time, and thus we didn't infer the type.
7363 */
7364 pr_warn("prog '%s': missing BPF prog type, check ELF section name '%s'\n",
7365 prog->name, prog->sec_name);
7366 return -EINVAL;
7367 }
7368
fba01a06
AN
7369 if (!insns || !insns_cnt)
7370 return -EINVAL;
7371
25bbbd7a 7372 if (kernel_supports(obj, FEAT_PROG_NAME))
d10ef2b8 7373 prog_name = prog->name;
12d9466d
AN
7374 load_attr.attach_prog_fd = prog->attach_prog_fd;
7375 load_attr.attach_btf_obj_fd = prog->attach_btf_obj_fd;
6aef10a4
AN
7376 load_attr.attach_btf_id = prog->attach_btf_id;
7377 load_attr.kern_version = kern_version;
7378 load_attr.prog_ifindex = prog->prog_ifindex;
7379
0f0e55d8 7380 /* specify func_info/line_info only if kernel supports them */
9bf48fa1
QM
7381 if (obj->btf && btf__fd(obj->btf) >= 0 && kernel_supports(obj, FEAT_BTF_FUNC)) {
7382 load_attr.prog_btf_fd = btf__fd(obj->btf);
0f0e55d8
AN
7383 load_attr.func_info = prog->func_info;
7384 load_attr.func_info_rec_size = prog->func_info_rec_size;
7385 load_attr.func_info_cnt = prog->func_info_cnt;
7386 load_attr.line_info = prog->line_info;
7387 load_attr.line_info_rec_size = prog->line_info_rec_size;
7388 load_attr.line_info_cnt = prog->line_info_cnt;
7389 }
b3ce9079 7390 load_attr.log_level = log_level;
04656198 7391 load_attr.prog_flags = prog->prog_flags;
25bbbd7a 7392 load_attr.fd_array = obj->fd_array;
55cffde2 7393
6b434b61
AN
7394 load_attr.token_fd = obj->token_fd;
7395 if (obj->token_fd)
7396 load_attr.prog_flags |= BPF_F_TOKEN_FD;
7397
12d9466d 7398 /* adjust load_attr if sec_def provides custom preload callback */
4fa5bcfe
AN
7399 if (prog->sec_def && prog->sec_def->prog_prepare_load_fn) {
7400 err = prog->sec_def->prog_prepare_load_fn(prog, &load_attr, prog->sec_def->cookie);
12d9466d
AN
7401 if (err < 0) {
7402 pr_warn("prog '%s': failed to prepare load attributes: %d\n",
7403 prog->name, err);
7404 return err;
7405 }
b63b3c49
JO
7406 insns = prog->insns;
7407 insns_cnt = prog->insns_cnt;
12d9466d
AN
7408 }
7409
5902da6d
JO
7410 /* allow prog_prepare_load_fn to change expected_attach_type */
7411 load_attr.expected_attach_type = prog->expected_attach_type;
7412
25bbbd7a 7413 if (obj->gen_loader) {
d10ef2b8
AN
7414 bpf_gen__prog_load(obj->gen_loader, prog->type, prog->name,
7415 license, insns, insns_cnt, &load_attr,
25bbbd7a 7416 prog - obj->programs);
be2f2d16 7417 *prog_fd = -1;
67234743
AS
7418 return 0;
7419 }
8395f320 7420
b3ce9079 7421retry_load:
bb412cf1 7422 /* if log_level is zero, we don't request logs initially even if
b3ce9079
AN
7423 * custom log_buf is specified; if the program load fails, then we'll
7424 * bump log_level to 1 and use either custom log_buf or we'll allocate
7425 * our own and retry the load to get details on what failed
7426 */
7427 if (log_level) {
7428 if (prog->log_buf) {
7429 log_buf = prog->log_buf;
7430 log_buf_size = prog->log_size;
7431 own_log_buf = false;
7432 } else if (obj->log_buf) {
7433 log_buf = obj->log_buf;
7434 log_buf_size = obj->log_size;
7435 own_log_buf = false;
7436 } else {
7437 log_buf_size = max((size_t)BPF_LOG_BUF_SIZE, log_buf_size * 2);
7438 tmp = realloc(log_buf, log_buf_size);
7439 if (!tmp) {
7440 ret = -ENOMEM;
7441 goto out;
7442 }
7443 log_buf = tmp;
7444 log_buf[0] = '\0';
7445 own_log_buf = true;
7446 }
8395f320 7447 }
55cffde2 7448
6aef10a4 7449 load_attr.log_buf = log_buf;
d10ef2b8 7450 load_attr.log_size = log_buf_size;
b3ce9079 7451 load_attr.log_level = log_level;
55cffde2 7452
b3ce9079 7453 ret = bpf_prog_load(prog->type, prog_name, license, insns, insns_cnt, &load_attr);
55cffde2 7454 if (ret >= 0) {
b3ce9079 7455 if (log_level && own_log_buf) {
ad9a7f96
AN
7456 pr_debug("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7457 prog->name, log_buf);
7458 }
5d23328d 7459
25bbbd7a
AN
7460 if (obj->has_rodata && kernel_supports(obj, FEAT_PROG_BIND_MAP)) {
7461 struct bpf_map *map;
7462 int i;
7463
7464 for (i = 0; i < obj->nr_maps; i++) {
7465 map = &prog->obj->maps[i];
7466 if (map->libbpf_type != LIBBPF_MAP_RODATA)
7467 continue;
5d23328d 7468
f08c18e0 7469 if (bpf_prog_bind_map(ret, map->fd, NULL)) {
25bbbd7a 7470 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
ad9a7f96
AN
7471 pr_warn("prog '%s': failed to bind map '%s': %s\n",
7472 prog->name, map->real_name, cp);
25bbbd7a
AN
7473 /* Don't fail hard if can't bind rodata. */
7474 }
5d23328d
YZ
7475 }
7476 }
7477
be2f2d16 7478 *prog_fd = ret;
55cffde2
WN
7479 ret = 0;
7480 goto out;
7481 }
7482
b3ce9079
AN
7483 if (log_level == 0) {
7484 log_level = 1;
da11b417
AS
7485 goto retry_load;
7486 }
b3ce9079
AN
7487 /* On ENOSPC, increase log buffer size and retry, unless custom
7488 * log_buf is specified.
7489 * Be careful to not overflow u32, though. Kernel's log buf size limit
7490 * isn't part of UAPI so it can always be bumped to full 4GB. So don't
7491 * multiply by 2 unless we are sure we'll fit within 32 bits.
7492 * Currently, we'll get -EINVAL when we reach (UINT_MAX >> 2).
7493 */
7494 if (own_log_buf && errno == ENOSPC && log_buf_size <= UINT_MAX / 2)
7495 goto retry_load;
2eda2145
AN
7496
7497 ret = -errno;
9fdc4273
AN
7498
7499 /* post-process verifier log to improve error descriptions */
7500 fixup_verifier_log(prog, log_buf, log_buf_size);
7501
24d6a808 7502 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
ad9a7f96 7503 pr_warn("prog '%s': BPF program load failed: %s\n", prog->name, cp);
dc3a2d25 7504 pr_perm_msg(ret);
55cffde2 7505
b3ce9079 7506 if (own_log_buf && log_buf && log_buf[0] != '\0') {
ad9a7f96
AN
7507 pr_warn("prog '%s': -- BEGIN PROG LOAD LOG --\n%s-- END PROG LOAD LOG --\n",
7508 prog->name, log_buf);
7509 }
55cffde2
WN
7510
7511out:
b3ce9079
AN
7512 if (own_log_buf)
7513 free(log_buf);
55cffde2
WN
7514 return ret;
7515}
7516
9fdc4273
AN
7517static char *find_prev_line(char *buf, char *cur)
7518{
7519 char *p;
7520
7521 if (cur == buf) /* end of a log buf */
7522 return NULL;
7523
7524 p = cur - 1;
7525 while (p - 1 >= buf && *(p - 1) != '\n')
7526 p--;
7527
7528 return p;
7529}
7530
7531static void patch_log(char *buf, size_t buf_sz, size_t log_sz,
7532 char *orig, size_t orig_sz, const char *patch)
7533{
7534 /* size of the remaining log content to the right from the to-be-replaced part */
7535 size_t rem_sz = (buf + log_sz) - (orig + orig_sz);
7536 size_t patch_sz = strlen(patch);
7537
7538 if (patch_sz != orig_sz) {
7539 /* If patch line(s) are longer than original piece of verifier log,
7540 * shift log contents by (patch_sz - orig_sz) bytes to the right
7541 * starting from after to-be-replaced part of the log.
7542 *
7543 * If patch line(s) are shorter than original piece of verifier log,
7544 * shift log contents by (orig_sz - patch_sz) bytes to the left
7545 * starting from after to-be-replaced part of the log
7546 *
7547 * We need to be careful about not overflowing available
7548 * buf_sz capacity. If that's the case, we'll truncate the end
7549 * of the original log, as necessary.
7550 */
7551 if (patch_sz > orig_sz) {
7552 if (orig + patch_sz >= buf + buf_sz) {
7553 /* patch is big enough to cover remaining space completely */
7554 patch_sz -= (orig + patch_sz) - (buf + buf_sz) + 1;
7555 rem_sz = 0;
7556 } else if (patch_sz - orig_sz > buf_sz - log_sz) {
7557 /* patch causes part of remaining log to be truncated */
7558 rem_sz -= (patch_sz - orig_sz) - (buf_sz - log_sz);
7559 }
7560 }
7561 /* shift remaining log to the right by calculated amount */
7562 memmove(orig + patch_sz, orig + orig_sz, rem_sz);
7563 }
7564
7565 memcpy(orig, patch, patch_sz);
7566}
7567
7568static void fixup_log_failed_core_relo(struct bpf_program *prog,
7569 char *buf, size_t buf_sz, size_t log_sz,
7570 char *line1, char *line2, char *line3)
7571{
7572 /* Expected log for failed and not properly guarded CO-RE relocation:
7573 * line1 -> 123: (85) call unknown#195896080
7574 * line2 -> invalid func unknown#195896080
7575 * line3 -> <anything else or end of buffer>
7576 *
7577 * "123" is the index of the instruction that was poisoned. We extract
7578 * instruction index to find corresponding CO-RE relocation and
7579 * replace this part of the log with more relevant information about
7580 * failed CO-RE relocation.
7581 */
7582 const struct bpf_core_relo *relo;
7583 struct bpf_core_spec spec;
7584 char patch[512], spec_buf[256];
b198881d 7585 int insn_idx, err, spec_len;
9fdc4273
AN
7586
7587 if (sscanf(line1, "%d: (%*d) call unknown#195896080\n", &insn_idx) != 1)
7588 return;
7589
7590 relo = find_relo_core(prog, insn_idx);
7591 if (!relo)
7592 return;
7593
7594 err = bpf_core_parse_spec(prog->name, prog->obj->btf, relo, &spec);
7595 if (err)
7596 return;
7597
b198881d 7598 spec_len = bpf_core_format_spec(spec_buf, sizeof(spec_buf), &spec);
9fdc4273
AN
7599 snprintf(patch, sizeof(patch),
7600 "%d: <invalid CO-RE relocation>\n"
b198881d
AN
7601 "failed to resolve CO-RE relocation %s%s\n",
7602 insn_idx, spec_buf, spec_len >= sizeof(spec_buf) ? "..." : "");
9fdc4273
AN
7603
7604 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7605}
7606
ec41817b
AN
7607static void fixup_log_missing_map_load(struct bpf_program *prog,
7608 char *buf, size_t buf_sz, size_t log_sz,
7609 char *line1, char *line2, char *line3)
7610{
3055ddd6 7611 /* Expected log for failed and not properly guarded map reference:
ec41817b
AN
7612 * line1 -> 123: (85) call unknown#2001000345
7613 * line2 -> invalid func unknown#2001000345
7614 * line3 -> <anything else or end of buffer>
7615 *
7616 * "123" is the index of the instruction that was poisoned.
3055ddd6 7617 * "345" in "2001000345" is a map index in obj->maps to fetch map name.
ec41817b
AN
7618 */
7619 struct bpf_object *obj = prog->obj;
7620 const struct bpf_map *map;
7621 int insn_idx, map_idx;
7622 char patch[128];
7623
7624 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &map_idx) != 2)
7625 return;
7626
3055ddd6 7627 map_idx -= POISON_LDIMM64_MAP_BASE;
ec41817b
AN
7628 if (map_idx < 0 || map_idx >= obj->nr_maps)
7629 return;
7630 map = &obj->maps[map_idx];
7631
7632 snprintf(patch, sizeof(patch),
7633 "%d: <invalid BPF map reference>\n"
7634 "BPF map '%s' is referenced but wasn't created\n",
7635 insn_idx, map->name);
7636
7637 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7638}
7639
05b6f766
AN
7640static void fixup_log_missing_kfunc_call(struct bpf_program *prog,
7641 char *buf, size_t buf_sz, size_t log_sz,
7642 char *line1, char *line2, char *line3)
7643{
7644 /* Expected log for failed and not properly guarded kfunc call:
7645 * line1 -> 123: (85) call unknown#2002000345
7646 * line2 -> invalid func unknown#2002000345
7647 * line3 -> <anything else or end of buffer>
7648 *
7649 * "123" is the index of the instruction that was poisoned.
7650 * "345" in "2002000345" is an extern index in obj->externs to fetch kfunc name.
7651 */
7652 struct bpf_object *obj = prog->obj;
7653 const struct extern_desc *ext;
7654 int insn_idx, ext_idx;
7655 char patch[128];
7656
7657 if (sscanf(line1, "%d: (%*d) call unknown#%d\n", &insn_idx, &ext_idx) != 2)
7658 return;
7659
7660 ext_idx -= POISON_CALL_KFUNC_BASE;
7661 if (ext_idx < 0 || ext_idx >= obj->nr_extern)
7662 return;
7663 ext = &obj->externs[ext_idx];
7664
7665 snprintf(patch, sizeof(patch),
7666 "%d: <invalid kfunc call>\n"
7667 "kfunc '%s' is referenced but wasn't resolved\n",
7668 insn_idx, ext->name);
7669
7670 patch_log(buf, buf_sz, log_sz, line1, line3 - line1, patch);
7671}
7672
9fdc4273
AN
7673static void fixup_verifier_log(struct bpf_program *prog, char *buf, size_t buf_sz)
7674{
7675 /* look for familiar error patterns in last N lines of the log */
7676 const size_t max_last_line_cnt = 10;
7677 char *prev_line, *cur_line, *next_line;
7678 size_t log_sz;
7679 int i;
7680
7681 if (!buf)
7682 return;
7683
7684 log_sz = strlen(buf) + 1;
7685 next_line = buf + log_sz - 1;
7686
7687 for (i = 0; i < max_last_line_cnt; i++, next_line = cur_line) {
7688 cur_line = find_prev_line(buf, next_line);
7689 if (!cur_line)
7690 return;
7691
9fdc4273
AN
7692 if (str_has_pfx(cur_line, "invalid func unknown#195896080\n")) {
7693 prev_line = find_prev_line(buf, cur_line);
7694 if (!prev_line)
7695 continue;
7696
3055ddd6 7697 /* failed CO-RE relocation case */
9fdc4273
AN
7698 fixup_log_failed_core_relo(prog, buf, buf_sz, log_sz,
7699 prev_line, cur_line, next_line);
7700 return;
3055ddd6 7701 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_LDIMM64_MAP_PFX)) {
ec41817b
AN
7702 prev_line = find_prev_line(buf, cur_line);
7703 if (!prev_line)
7704 continue;
7705
3055ddd6 7706 /* reference to uncreated BPF map */
ec41817b
AN
7707 fixup_log_missing_map_load(prog, buf, buf_sz, log_sz,
7708 prev_line, cur_line, next_line);
7709 return;
05b6f766
AN
7710 } else if (str_has_pfx(cur_line, "invalid func unknown#"POISON_CALL_KFUNC_PFX)) {
7711 prev_line = find_prev_line(buf, cur_line);
7712 if (!prev_line)
7713 continue;
7714
7715 /* reference to unresolved kfunc */
7716 fixup_log_missing_kfunc_call(prog, buf, buf_sz, log_sz,
7717 prev_line, cur_line, next_line);
7718 return;
9fdc4273
AN
7719 }
7720 }
7721}
7722
d0e92887 7723static int bpf_program_record_relos(struct bpf_program *prog)
67234743
AS
7724{
7725 struct bpf_object *obj = prog->obj;
7726 int i;
7727
7728 for (i = 0; i < prog->nr_reloc; i++) {
7729 struct reloc_desc *relo = &prog->reloc_desc[i];
3055ddd6 7730 struct extern_desc *ext = &obj->externs[relo->ext_idx];
708cdc57 7731 int kind;
67234743
AS
7732
7733 switch (relo->type) {
a18f7214 7734 case RELO_EXTERN_LD64:
67234743
AS
7735 if (ext->type != EXT_KSYM)
7736 continue;
708cdc57
AS
7737 kind = btf_is_var(btf__type_by_id(obj->btf, ext->btf_id)) ?
7738 BTF_KIND_VAR : BTF_KIND_FUNC;
c24941cd
KKD
7739 bpf_gen__record_extern(obj->gen_loader, ext->name,
7740 ext->is_weak, !ext->ksym.type_id,
708cdc57 7741 true, kind, relo->insn_idx);
67234743 7742 break;
a18f7214 7743 case RELO_EXTERN_CALL:
c24941cd 7744 bpf_gen__record_extern(obj->gen_loader, ext->name,
708cdc57 7745 ext->is_weak, false, false, BTF_KIND_FUNC,
67234743
AS
7746 relo->insn_idx);
7747 break;
d0e92887
AS
7748 case RELO_CORE: {
7749 struct bpf_core_relo cr = {
7750 .insn_off = relo->insn_idx * 8,
7751 .type_id = relo->core_relo->type_id,
7752 .access_str_off = relo->core_relo->access_str_off,
7753 .kind = relo->core_relo->kind,
7754 };
7755
7756 bpf_gen__record_relo_core(obj->gen_loader, &cr);
7757 break;
7758 }
67234743
AS
7759 default:
7760 continue;
7761 }
7762 }
7763 return 0;
7764}
7765
55cffde2 7766static int
60276f98 7767bpf_object__load_progs(struct bpf_object *obj, int log_level)
55cffde2 7768{
d9297581 7769 struct bpf_program *prog;
55cffde2
WN
7770 size_t i;
7771 int err;
7772
109cea5a
AN
7773 for (i = 0; i < obj->nr_programs; i++) {
7774 prog = &obj->programs[i];
7775 err = bpf_object__sanitize_prog(obj, prog);
7776 if (err)
7777 return err;
7778 }
7779
55cffde2 7780 for (i = 0; i < obj->nr_programs; i++) {
d9297581 7781 prog = &obj->programs[i];
c3c55696 7782 if (prog_is_subprog(obj, prog))
48cca7e4 7783 continue;
a3820c48 7784 if (!prog->autoload) {
9c0f8cbd 7785 pr_debug("prog '%s': skipped loading\n", prog->name);
d9297581
AN
7786 continue;
7787 }
7788 prog->log_level |= log_level;
cf90a20d
AN
7789
7790 if (obj->gen_loader)
7791 bpf_program_record_relos(prog);
7792
7793 err = bpf_object_load_prog(obj, prog, prog->insns, prog->insns_cnt,
7794 obj->license, obj->kern_version, &prog->fd);
7795 if (err) {
7796 pr_warn("prog '%s': failed to load: %d\n", prog->name, err);
55cffde2 7797 return err;
cf90a20d 7798 }
55cffde2 7799 }
185cfe83
AN
7800
7801 bpf_object__free_relocs(obj);
55cffde2
WN
7802 return 0;
7803}
7804
25498a19
AN
7805static const struct bpf_sec_def *find_sec_def(const char *sec_name);
7806
91b4d1d1
AN
7807static int bpf_object_init_progs(struct bpf_object *obj, const struct bpf_object_open_opts *opts)
7808{
7809 struct bpf_program *prog;
12d9466d 7810 int err;
91b4d1d1
AN
7811
7812 bpf_object__for_each_program(prog, obj) {
7813 prog->sec_def = find_sec_def(prog->sec_name);
7814 if (!prog->sec_def) {
7815 /* couldn't guess, but user might manually specify */
7816 pr_debug("prog '%s': unrecognized ELF section name '%s'\n",
7817 prog->name, prog->sec_name);
7818 continue;
7819 }
7820
df286716
GS
7821 prog->type = prog->sec_def->prog_type;
7822 prog->expected_attach_type = prog->sec_def->expected_attach_type;
91b4d1d1 7823
12d9466d
AN
7824 /* sec_def can have custom callback which should be called
7825 * after bpf_program is initialized to adjust its properties
7826 */
4fa5bcfe
AN
7827 if (prog->sec_def->prog_setup_fn) {
7828 err = prog->sec_def->prog_setup_fn(prog, prog->sec_def->cookie);
12d9466d
AN
7829 if (err < 0) {
7830 pr_warn("prog '%s': failed to initialize: %d\n",
7831 prog->name, err);
7832 return err;
7833 }
7834 }
91b4d1d1
AN
7835 }
7836
7837 return 0;
7838}
7839
ad9a7f96
AN
7840static struct bpf_object *bpf_object_open(const char *path, const void *obj_buf, size_t obj_buf_sz,
7841 const struct bpf_object_open_opts *opts)
1a5e3fb1 7842{
6b434b61 7843 const char *obj_name, *kconfig, *btf_tmp_path, *token_path;
1a5e3fb1 7844 struct bpf_object *obj;
291ee02b 7845 char tmp_name[64];
d17aff80 7846 int err;
e0e3ea88
AN
7847 char *log_buf;
7848 size_t log_size;
7849 __u32 log_level;
1a5e3fb1
WN
7850
7851 if (elf_version(EV_CURRENT) == EV_NONE) {
be18010e
KW
7852 pr_warn("failed to init libelf for %s\n",
7853 path ? : "(mem buf)");
6371ca3b 7854 return ERR_PTR(-LIBBPF_ERRNO__LIBELF);
1a5e3fb1
WN
7855 }
7856
291ee02b
AN
7857 if (!OPTS_VALID(opts, bpf_object_open_opts))
7858 return ERR_PTR(-EINVAL);
7859
1aace10f 7860 obj_name = OPTS_GET(opts, object_name, NULL);
291ee02b
AN
7861 if (obj_buf) {
7862 if (!obj_name) {
7863 snprintf(tmp_name, sizeof(tmp_name), "%lx-%lx",
7864 (unsigned long)obj_buf,
7865 (unsigned long)obj_buf_sz);
7866 obj_name = tmp_name;
7867 }
7868 path = obj_name;
7869 pr_debug("loading object '%s' from buffer\n", obj_name);
7870 }
7871
e0e3ea88
AN
7872 log_buf = OPTS_GET(opts, kernel_log_buf, NULL);
7873 log_size = OPTS_GET(opts, kernel_log_size, 0);
7874 log_level = OPTS_GET(opts, kernel_log_level, 0);
7875 if (log_size > UINT_MAX)
7876 return ERR_PTR(-EINVAL);
7877 if (log_size && !log_buf)
7878 return ERR_PTR(-EINVAL);
7879
6b434b61 7880 token_path = OPTS_GET(opts, bpf_token_path, NULL);
cac270ad
AN
7881 /* if user didn't specify bpf_token_path explicitly, check if
7882 * LIBBPF_BPF_TOKEN_PATH envvar was set and treat it as bpf_token_path
7883 * option
7884 */
7885 if (!token_path)
7886 token_path = getenv("LIBBPF_BPF_TOKEN_PATH");
6b434b61
AN
7887 if (token_path && strlen(token_path) >= PATH_MAX)
7888 return ERR_PTR(-ENAMETOOLONG);
7889
2ce8450e 7890 obj = bpf_object__new(path, obj_buf, obj_buf_sz, obj_name);
6371ca3b
WN
7891 if (IS_ERR(obj))
7892 return obj;
1a5e3fb1 7893
e0e3ea88
AN
7894 obj->log_buf = log_buf;
7895 obj->log_size = log_size;
7896 obj->log_level = log_level;
7897
6b434b61
AN
7898 if (token_path) {
7899 obj->token_path = strdup(token_path);
7900 if (!obj->token_path) {
7901 err = -ENOMEM;
7902 goto out;
7903 }
7904 }
7905
1373ff59
SC
7906 btf_tmp_path = OPTS_GET(opts, btf_custom_path, NULL);
7907 if (btf_tmp_path) {
7908 if (strlen(btf_tmp_path) >= PATH_MAX) {
7909 err = -ENAMETOOLONG;
7910 goto out;
7911 }
7912 obj->btf_custom_path = strdup(btf_tmp_path);
7913 if (!obj->btf_custom_path) {
7914 err = -ENOMEM;
7915 goto out;
7916 }
7917 }
7918
8601fd42
AN
7919 kconfig = OPTS_GET(opts, kconfig, NULL);
7920 if (kconfig) {
7921 obj->kconfig = strdup(kconfig);
18353c87
SC
7922 if (!obj->kconfig) {
7923 err = -ENOMEM;
7924 goto out;
7925 }
166750bc 7926 }
291ee02b 7927
0d13bfce
AN
7928 err = bpf_object__elf_init(obj);
7929 err = err ? : bpf_object__check_endianness(obj);
7930 err = err ? : bpf_object__elf_collect(obj);
166750bc 7931 err = err ? : bpf_object__collect_externs(obj);
f33f742d 7932 err = err ? : bpf_object_fixup_btf(obj);
0d13bfce 7933 err = err ? : bpf_object__init_maps(obj, opts);
91b4d1d1 7934 err = err ? : bpf_object_init_progs(obj, opts);
c3c55696 7935 err = err ? : bpf_object__collect_relos(obj);
0d13bfce
AN
7936 if (err)
7937 goto out;
dd4436bb 7938
91b4d1d1 7939 bpf_object__elf_finish(obj);
dd4436bb 7940
1a5e3fb1
WN
7941 return obj;
7942out:
7943 bpf_object__close(obj);
6371ca3b 7944 return ERR_PTR(err);
1a5e3fb1
WN
7945}
7946
2ce8450e 7947struct bpf_object *
01af3bf0 7948bpf_object__open_file(const char *path, const struct bpf_object_open_opts *opts)
2ce8450e 7949{
2ce8450e 7950 if (!path)
e9fc3ce9 7951 return libbpf_err_ptr(-EINVAL);
2ce8450e
AN
7952
7953 pr_debug("loading %s\n", path);
7954
ad9a7f96 7955 return libbpf_ptr(bpf_object_open(path, NULL, 0, opts));
2ce8450e
AN
7956}
7957
146bf811
AN
7958struct bpf_object *bpf_object__open(const char *path)
7959{
7960 return bpf_object__open_file(path, NULL);
7961}
7962
2ce8450e
AN
7963struct bpf_object *
7964bpf_object__open_mem(const void *obj_buf, size_t obj_buf_sz,
01af3bf0 7965 const struct bpf_object_open_opts *opts)
6c956392 7966{
2ce8450e 7967 if (!obj_buf || obj_buf_sz == 0)
e9fc3ce9 7968 return libbpf_err_ptr(-EINVAL);
6c956392 7969
ad9a7f96 7970 return libbpf_ptr(bpf_object_open(NULL, obj_buf, obj_buf_sz, opts));
2ce8450e
AN
7971}
7972
4a404a7e 7973static int bpf_object_unload(struct bpf_object *obj)
52d3352e
WN
7974{
7975 size_t i;
7976
7977 if (!obj)
e9fc3ce9 7978 return libbpf_err(-EINVAL);
52d3352e 7979
590a0088 7980 for (i = 0; i < obj->nr_maps; i++) {
9d759a9b 7981 zclose(obj->maps[i].fd);
590a0088
MKL
7982 if (obj->maps[i].st_ops)
7983 zfree(&obj->maps[i].st_ops->kern_vdata);
7984 }
52d3352e 7985
55cffde2
WN
7986 for (i = 0; i < obj->nr_programs; i++)
7987 bpf_program__unload(&obj->programs[i]);
7988
52d3352e
WN
7989 return 0;
7990}
7991
0d13bfce
AN
7992static int bpf_object__sanitize_maps(struct bpf_object *obj)
7993{
7994 struct bpf_map *m;
7995
7996 bpf_object__for_each_map(m, obj) {
7997 if (!bpf_map__is_internal(m))
7998 continue;
9ca1f56a 7999 if (!kernel_supports(obj, FEAT_ARRAY_MMAP))
6920b086 8000 m->def.map_flags &= ~BPF_F_MMAPABLE;
0d13bfce
AN
8001 }
8002
8003 return 0;
8004}
8005
ad2b0528
YS
8006typedef int (*kallsyms_cb_t)(unsigned long long sym_addr, char sym_type,
8007 const char *sym_name, void *ctx);
8008
8009static int libbpf_kallsyms_parse(kallsyms_cb_t cb, void *ctx)
1c0c7074
AN
8010{
8011 char sym_type, sym_name[500];
8012 unsigned long long sym_addr;
1c0c7074
AN
8013 int ret, err = 0;
8014 FILE *f;
8015
59842c54 8016 f = fopen("/proc/kallsyms", "re");
1c0c7074
AN
8017 if (!f) {
8018 err = -errno;
8019 pr_warn("failed to open /proc/kallsyms: %d\n", err);
8020 return err;
8021 }
8022
8023 while (true) {
8024 ret = fscanf(f, "%llx %c %499s%*[^\n]\n",
8025 &sym_addr, &sym_type, sym_name);
8026 if (ret == EOF && feof(f))
8027 break;
8028 if (ret != 3) {
135c783f 8029 pr_warn("failed to read kallsyms entry: %d\n", ret);
1c0c7074 8030 err = -EINVAL;
85153ac0 8031 break;
1c0c7074
AN
8032 }
8033
85153ac0
JO
8034 err = cb(sym_addr, sym_type, sym_name, ctx);
8035 if (err)
8036 break;
1c0c7074
AN
8037 }
8038
1c0c7074
AN
8039 fclose(f);
8040 return err;
8041}
8042
85153ac0
JO
8043static int kallsyms_cb(unsigned long long sym_addr, char sym_type,
8044 const char *sym_name, void *ctx)
8045{
8046 struct bpf_object *obj = ctx;
8047 const struct btf_type *t;
8048 struct extern_desc *ext;
c56e5977 8049 char *res;
85153ac0 8050
c56e5977
YS
8051 res = strstr(sym_name, ".llvm.");
8052 if (sym_type == 'd' && res)
8053 ext = find_extern_by_name_with_len(obj, sym_name, res - sym_name);
8054 else
8055 ext = find_extern_by_name(obj, sym_name);
85153ac0
JO
8056 if (!ext || ext->type != EXT_KSYM)
8057 return 0;
8058
8059 t = btf__type_by_id(obj->btf, ext->btf_id);
8060 if (!btf_is_var(t))
8061 return 0;
8062
8063 if (ext->is_set && ext->ksym.addr != sym_addr) {
55d00c37 8064 pr_warn("extern (ksym) '%s': resolution is ambiguous: 0x%llx or 0x%llx\n",
85153ac0
JO
8065 sym_name, ext->ksym.addr, sym_addr);
8066 return -EINVAL;
8067 }
8068 if (!ext->is_set) {
8069 ext->is_set = true;
8070 ext->ksym.addr = sym_addr;
55d00c37 8071 pr_debug("extern (ksym) '%s': set to 0x%llx\n", sym_name, sym_addr);
85153ac0
JO
8072 }
8073 return 0;
8074}
8075
8076static int bpf_object__read_kallsyms_file(struct bpf_object *obj)
8077{
8078 return libbpf_kallsyms_parse(kallsyms_cb, obj);
8079}
8080
774e132e
MKL
8081static int find_ksym_btf_id(struct bpf_object *obj, const char *ksym_name,
8082 __u16 kind, struct btf **res_btf,
9dbe6015 8083 struct module_btf **res_mod_btf)
d370bbe1 8084{
9dbe6015 8085 struct module_btf *mod_btf;
284d2587 8086 struct btf *btf;
9dbe6015 8087 int i, id, err;
d370bbe1 8088
933d1aa3 8089 btf = obj->btf_vmlinux;
9dbe6015 8090 mod_btf = NULL;
774e132e
MKL
8091 id = btf__find_by_name_kind(btf, ksym_name, kind);
8092
933d1aa3
MKL
8093 if (id == -ENOENT) {
8094 err = load_module_btfs(obj);
8095 if (err)
8096 return err;
d370bbe1 8097
933d1aa3 8098 for (i = 0; i < obj->btf_module_cnt; i++) {
9dbe6015
KKD
8099 /* we assume module_btf's BTF FD is always >0 */
8100 mod_btf = &obj->btf_modules[i];
8101 btf = mod_btf->btf;
8102 id = btf__find_by_name_kind_own(btf, ksym_name, kind);
933d1aa3
MKL
8103 if (id != -ENOENT)
8104 break;
8105 }
8106 }
2211c825 8107 if (id <= 0)
933d1aa3 8108 return -ESRCH;
d370bbe1 8109
774e132e 8110 *res_btf = btf;
9dbe6015 8111 *res_mod_btf = mod_btf;
774e132e
MKL
8112 return id;
8113}
8114
8115static int bpf_object__resolve_ksym_var_btf_id(struct bpf_object *obj,
8116 struct extern_desc *ext)
8117{
8118 const struct btf_type *targ_var, *targ_type;
8119 __u32 targ_type_id, local_type_id;
9dbe6015 8120 struct module_btf *mod_btf = NULL;
774e132e 8121 const char *targ_var_name;
774e132e 8122 struct btf *btf = NULL;
9dbe6015 8123 int id, err;
774e132e 8124
9dbe6015 8125 id = find_ksym_btf_id(obj, ext->name, BTF_KIND_VAR, &btf, &mod_btf);
466b2e13
KKD
8126 if (id < 0) {
8127 if (id == -ESRCH && ext->is_weak)
8128 return 0;
2211c825
HL
8129 pr_warn("extern (var ksym) '%s': not found in kernel BTF\n",
8130 ext->name);
774e132e 8131 return id;
2211c825 8132 }
774e132e 8133
933d1aa3
MKL
8134 /* find local type_id */
8135 local_type_id = ext->ksym.type_id;
284d2587 8136
933d1aa3
MKL
8137 /* find target type_id */
8138 targ_var = btf__type_by_id(btf, id);
8139 targ_var_name = btf__name_by_offset(btf, targ_var->name_off);
8140 targ_type = skip_mods_and_typedefs(btf, targ_var->type, &targ_type_id);
d370bbe1 8141
933d1aa3
MKL
8142 err = bpf_core_types_are_compat(obj->btf, local_type_id,
8143 btf, targ_type_id);
8144 if (err <= 0) {
8145 const struct btf_type *local_type;
8146 const char *targ_name, *local_name;
d370bbe1 8147
933d1aa3
MKL
8148 local_type = btf__type_by_id(obj->btf, local_type_id);
8149 local_name = btf__name_by_offset(obj->btf, local_type->name_off);
8150 targ_name = btf__name_by_offset(btf, targ_type->name_off);
d370bbe1 8151
933d1aa3
MKL
8152 pr_warn("extern (var ksym) '%s': incompatible types, expected [%d] %s %s, but kernel has [%d] %s %s\n",
8153 ext->name, local_type_id,
8154 btf_kind_str(local_type), local_name, targ_type_id,
8155 btf_kind_str(targ_type), targ_name);
8156 return -EINVAL;
8157 }
d370bbe1 8158
933d1aa3 8159 ext->is_set = true;
9dbe6015 8160 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
933d1aa3
MKL
8161 ext->ksym.kernel_btf_id = id;
8162 pr_debug("extern (var ksym) '%s': resolved to [%d] %s %s\n",
8163 ext->name, id, btf_kind_str(targ_var), targ_var_name);
d370bbe1 8164
933d1aa3
MKL
8165 return 0;
8166}
d370bbe1 8167
5bd022ec
MKL
8168static int bpf_object__resolve_ksym_func_btf_id(struct bpf_object *obj,
8169 struct extern_desc *ext)
8170{
8171 int local_func_proto_id, kfunc_proto_id, kfunc_id;
9dbe6015 8172 struct module_btf *mod_btf = NULL;
5bd022ec
MKL
8173 const struct btf_type *kern_func;
8174 struct btf *kern_btf = NULL;
9dbe6015 8175 int ret;
5bd022ec
MKL
8176
8177 local_func_proto_id = ext->ksym.type_id;
8178
5964a223
DM
8179 kfunc_id = find_ksym_btf_id(obj, ext->essent_name ?: ext->name, BTF_KIND_FUNC, &kern_btf,
8180 &mod_btf);
5bd022ec 8181 if (kfunc_id < 0) {
466b2e13
KKD
8182 if (kfunc_id == -ESRCH && ext->is_weak)
8183 return 0;
8184 pr_warn("extern (func ksym) '%s': not found in kernel or module BTFs\n",
5bd022ec
MKL
8185 ext->name);
8186 return kfunc_id;
8187 }
8188
5bd022ec
MKL
8189 kern_func = btf__type_by_id(kern_btf, kfunc_id);
8190 kfunc_proto_id = kern_func->type;
8191
8192 ret = bpf_core_types_are_compat(obj->btf, local_func_proto_id,
8193 kern_btf, kfunc_proto_id);
8194 if (ret <= 0) {
5964a223
DM
8195 if (ext->is_weak)
8196 return 0;
8197
f709160d
AN
8198 pr_warn("extern (func ksym) '%s': func_proto [%d] incompatible with %s [%d]\n",
8199 ext->name, local_func_proto_id,
8200 mod_btf ? mod_btf->name : "vmlinux", kfunc_proto_id);
5bd022ec
MKL
8201 return -EINVAL;
8202 }
8203
9dbe6015
KKD
8204 /* set index for module BTF fd in fd_array, if unset */
8205 if (mod_btf && !mod_btf->fd_array_idx) {
8206 /* insn->off is s16 */
8207 if (obj->fd_array_cnt == INT16_MAX) {
8208 pr_warn("extern (func ksym) '%s': module BTF fd index %d too big to fit in bpf_insn offset\n",
8209 ext->name, mod_btf->fd_array_idx);
8210 return -E2BIG;
8211 }
8212 /* Cannot use index 0 for module BTF fd */
8213 if (!obj->fd_array_cnt)
8214 obj->fd_array_cnt = 1;
8215
8216 ret = libbpf_ensure_mem((void **)&obj->fd_array, &obj->fd_array_cap, sizeof(int),
8217 obj->fd_array_cnt + 1);
8218 if (ret)
8219 return ret;
8220 mod_btf->fd_array_idx = obj->fd_array_cnt;
8221 /* we assume module BTF FD is always >0 */
8222 obj->fd_array[obj->fd_array_cnt++] = mod_btf->fd;
8223 }
8224
5bd022ec 8225 ext->is_set = true;
5bd022ec 8226 ext->ksym.kernel_btf_id = kfunc_id;
9dbe6015 8227 ext->ksym.btf_fd_idx = mod_btf ? mod_btf->fd_array_idx : 0;
5fc13ad5
AS
8228 /* Also set kernel_btf_obj_fd to make sure that bpf_object__relocate_data()
8229 * populates FD into ld_imm64 insn when it's used to point to kfunc.
8230 * {kernel_btf_id, btf_fd_idx} -> fixup bpf_call.
8231 * {kernel_btf_id, kernel_btf_obj_fd} -> fixup ld_imm64.
8232 */
8233 ext->ksym.kernel_btf_obj_fd = mod_btf ? mod_btf->fd : 0;
f709160d
AN
8234 pr_debug("extern (func ksym) '%s': resolved to %s [%d]\n",
8235 ext->name, mod_btf ? mod_btf->name : "vmlinux", kfunc_id);
5bd022ec
MKL
8236
8237 return 0;
8238}
8239
933d1aa3
MKL
8240static int bpf_object__resolve_ksyms_btf_id(struct bpf_object *obj)
8241{
5bd022ec 8242 const struct btf_type *t;
933d1aa3
MKL
8243 struct extern_desc *ext;
8244 int i, err;
8245
8246 for (i = 0; i < obj->nr_extern; i++) {
8247 ext = &obj->externs[i];
8248 if (ext->type != EXT_KSYM || !ext->ksym.type_id)
8249 continue;
8250
67234743
AS
8251 if (obj->gen_loader) {
8252 ext->is_set = true;
8253 ext->ksym.kernel_btf_obj_fd = 0;
8254 ext->ksym.kernel_btf_id = 0;
8255 continue;
8256 }
5bd022ec
MKL
8257 t = btf__type_by_id(obj->btf, ext->btf_id);
8258 if (btf_is_var(t))
8259 err = bpf_object__resolve_ksym_var_btf_id(obj, ext);
8260 else
8261 err = bpf_object__resolve_ksym_func_btf_id(obj, ext);
933d1aa3
MKL
8262 if (err)
8263 return err;
d370bbe1
HL
8264 }
8265 return 0;
8266}
8267
166750bc 8268static int bpf_object__resolve_externs(struct bpf_object *obj,
8601fd42 8269 const char *extra_kconfig)
166750bc 8270{
1c0c7074 8271 bool need_config = false, need_kallsyms = false;
d370bbe1 8272 bool need_vmlinux_btf = false;
166750bc 8273 struct extern_desc *ext;
2e33efe3 8274 void *kcfg_data = NULL;
166750bc 8275 int err, i;
166750bc
AN
8276
8277 if (obj->nr_extern == 0)
8278 return 0;
8279
2e33efe3
AN
8280 if (obj->kconfig_map_idx >= 0)
8281 kcfg_data = obj->maps[obj->kconfig_map_idx].mmaped;
166750bc
AN
8282
8283 for (i = 0; i < obj->nr_extern; i++) {
8284 ext = &obj->externs[i];
8285
55d00c37
AN
8286 if (ext->type == EXT_KSYM) {
8287 if (ext->ksym.type_id)
8288 need_vmlinux_btf = true;
8289 else
8290 need_kallsyms = true;
8291 continue;
8292 } else if (ext->type == EXT_KCFG) {
8293 void *ext_ptr = kcfg_data + ext->kcfg.data_off;
8294 __u64 value = 0;
166750bc 8295
55d00c37
AN
8296 /* Kconfig externs need actual /proc/config.gz */
8297 if (str_has_pfx(ext->name, "CONFIG_")) {
8298 need_config = true;
8299 continue;
8300 }
8301
8302 /* Virtual kcfg externs are customly handled by libbpf */
8303 if (strcmp(ext->name, "LINUX_KERNEL_VERSION") == 0) {
8304 value = get_kernel_version();
8305 if (!value) {
8306 pr_warn("extern (kcfg) '%s': failed to get kernel version\n", ext->name);
8307 return -EINVAL;
8308 }
8309 } else if (strcmp(ext->name, "LINUX_HAS_BPF_COOKIE") == 0) {
8310 value = kernel_supports(obj, FEAT_BPF_COOKIE);
6f5d467d
AN
8311 } else if (strcmp(ext->name, "LINUX_HAS_SYSCALL_WRAPPER") == 0) {
8312 value = kernel_supports(obj, FEAT_SYSCALL_WRAPPER);
55d00c37
AN
8313 } else if (!str_has_pfx(ext->name, "LINUX_") || !ext->is_weak) {
8314 /* Currently libbpf supports only CONFIG_ and LINUX_ prefixed
8315 * __kconfig externs, where LINUX_ ones are virtual and filled out
8316 * customly by libbpf (their values don't come from Kconfig).
8317 * If LINUX_xxx variable is not recognized by libbpf, but is marked
8318 * __weak, it defaults to zero value, just like for CONFIG_xxx
8319 * externs.
8320 */
8321 pr_warn("extern (kcfg) '%s': unrecognized virtual extern\n", ext->name);
166750bc
AN
8322 return -EINVAL;
8323 }
55d00c37
AN
8324
8325 err = set_kcfg_value_num(ext, ext_ptr, value);
166750bc
AN
8326 if (err)
8327 return err;
55d00c37
AN
8328 pr_debug("extern (kcfg) '%s': set to 0x%llx\n",
8329 ext->name, (long long)value);
166750bc 8330 } else {
55d00c37 8331 pr_warn("extern '%s': unrecognized extern kind\n", ext->name);
166750bc
AN
8332 return -EINVAL;
8333 }
8334 }
8601fd42 8335 if (need_config && extra_kconfig) {
2e33efe3 8336 err = bpf_object__read_kconfig_mem(obj, extra_kconfig, kcfg_data);
8601fd42
AN
8337 if (err)
8338 return -EINVAL;
8339 need_config = false;
8340 for (i = 0; i < obj->nr_extern; i++) {
8341 ext = &obj->externs[i];
2e33efe3 8342 if (ext->type == EXT_KCFG && !ext->is_set) {
8601fd42
AN
8343 need_config = true;
8344 break;
8345 }
8346 }
8347 }
166750bc 8348 if (need_config) {
2e33efe3 8349 err = bpf_object__read_kconfig_file(obj, kcfg_data);
166750bc
AN
8350 if (err)
8351 return -EINVAL;
8352 }
1c0c7074
AN
8353 if (need_kallsyms) {
8354 err = bpf_object__read_kallsyms_file(obj);
8355 if (err)
8356 return -EINVAL;
8357 }
d370bbe1
HL
8358 if (need_vmlinux_btf) {
8359 err = bpf_object__resolve_ksyms_btf_id(obj);
8360 if (err)
8361 return -EINVAL;
8362 }
166750bc
AN
8363 for (i = 0; i < obj->nr_extern; i++) {
8364 ext = &obj->externs[i];
8365
8366 if (!ext->is_set && !ext->is_weak) {
55d00c37 8367 pr_warn("extern '%s' (strong): not resolved\n", ext->name);
166750bc
AN
8368 return -ESRCH;
8369 } else if (!ext->is_set) {
55d00c37 8370 pr_debug("extern '%s' (weak): not resolved, defaulting to zero\n",
166750bc
AN
8371 ext->name);
8372 }
8373 }
8374
8375 return 0;
8376}
8377
8d1608d7
KFL
8378static void bpf_map_prepare_vdata(const struct bpf_map *map)
8379{
8380 struct bpf_struct_ops *st_ops;
8381 __u32 i;
8382
8383 st_ops = map->st_ops;
8384 for (i = 0; i < btf_vlen(st_ops->type); i++) {
8385 struct bpf_program *prog = st_ops->progs[i];
8386 void *kern_data;
8387 int prog_fd;
8388
8389 if (!prog)
8390 continue;
8391
8392 prog_fd = bpf_program__fd(prog);
8393 kern_data = st_ops->kern_vdata + st_ops->kern_func_off[i];
8394 *(unsigned long *)kern_data = prog_fd;
8395 }
8396}
8397
8398static int bpf_object_prepare_struct_ops(struct bpf_object *obj)
8399{
8db05261 8400 struct bpf_map *map;
8d1608d7
KFL
8401 int i;
8402
8db05261
EZ
8403 for (i = 0; i < obj->nr_maps; i++) {
8404 map = &obj->maps[i];
8405
8406 if (!bpf_map__is_struct_ops(map))
8407 continue;
8408
8409 if (!map->autocreate)
8410 continue;
8411
8412 bpf_map_prepare_vdata(map);
8413 }
8d1608d7
KFL
8414
8415 return 0;
8416}
8417
e7b924ca 8418static int bpf_object_load(struct bpf_object *obj, int extra_log_level, const char *target_btf_path)
52d3352e 8419{
ec6d5f47 8420 int err, i;
6371ca3b 8421
52d3352e 8422 if (!obj)
e9fc3ce9 8423 return libbpf_err(-EINVAL);
52d3352e
WN
8424
8425 if (obj->loaded) {
d9297581 8426 pr_warn("object '%s': load can't be attempted twice\n", obj->name);
e9fc3ce9 8427 return libbpf_err(-EINVAL);
52d3352e
WN
8428 }
8429
67234743 8430 if (obj->gen_loader)
be315829 8431 bpf_gen__init(obj->gen_loader, extra_log_level, obj->nr_programs, obj->nr_maps);
67234743 8432
6b434b61
AN
8433 err = bpf_object_prepare_token(obj);
8434 err = err ? : bpf_object__probe_loading(obj);
fe62de31 8435 err = err ? : bpf_object__load_vmlinux_btf(obj, false);
8601fd42 8436 err = err ? : bpf_object__resolve_externs(obj, obj->kconfig);
0d13bfce 8437 err = err ? : bpf_object__sanitize_maps(obj);
590a0088 8438 err = err ? : bpf_object__init_kern_struct_ops_maps(obj);
fe9d049c 8439 err = err ? : bpf_object_adjust_struct_ops_autoload(obj);
e7b924ca 8440 err = err ? : bpf_object__relocate(obj, obj->btf_custom_path ? : target_btf_path);
1004742d 8441 err = err ? : bpf_object__sanitize_and_load_btf(obj);
dac645b9 8442 err = err ? : bpf_object__create_maps(obj);
e7b924ca 8443 err = err ? : bpf_object__load_progs(obj, extra_log_level);
341ac5ff 8444 err = err ? : bpf_object_init_prog_arrays(obj);
8d1608d7 8445 err = err ? : bpf_object_prepare_struct_ops(obj);
a6ed02ca 8446
67234743
AS
8447 if (obj->gen_loader) {
8448 /* reset FDs */
4729445b
KKD
8449 if (obj->btf)
8450 btf__set_fd(obj->btf, -1);
67234743 8451 if (!err)
ba05fd36 8452 err = bpf_gen__finish(obj->gen_loader, obj->nr_programs, obj->nr_maps);
67234743
AS
8453 }
8454
9dbe6015
KKD
8455 /* clean up fd_array */
8456 zfree(&obj->fd_array);
8457
4f33a53d
AN
8458 /* clean up module BTFs */
8459 for (i = 0; i < obj->btf_module_cnt; i++) {
91abb4a6 8460 close(obj->btf_modules[i].fd);
4f33a53d
AN
8461 btf__free(obj->btf_modules[i].btf);
8462 free(obj->btf_modules[i].name);
8463 }
8464 free(obj->btf_modules);
8465
8466 /* clean up vmlinux BTF */
a6ed02ca
KS
8467 btf__free(obj->btf_vmlinux);
8468 obj->btf_vmlinux = NULL;
8469
d9297581
AN
8470 obj->loaded = true; /* doesn't matter if successfully or not */
8471
0d13bfce
AN
8472 if (err)
8473 goto out;
52d3352e
WN
8474
8475 return 0;
8476out:
ec6d5f47
THJ
8477 /* unpin any maps that were auto-pinned during load */
8478 for (i = 0; i < obj->nr_maps; i++)
8479 if (obj->maps[i].pinned && !obj->maps[i].reused)
8480 bpf_map__unpin(&obj->maps[i], NULL);
8481
4a404a7e 8482 bpf_object_unload(obj);
be18010e 8483 pr_warn("failed to load object '%s'\n", obj->path);
e9fc3ce9 8484 return libbpf_err(err);
52d3352e
WN
8485}
8486
e7b924ca
AN
8487int bpf_object__load(struct bpf_object *obj)
8488{
8489 return bpf_object_load(obj, 0, NULL);
60276f98
QM
8490}
8491
196f8487
THJ
8492static int make_parent_dir(const char *path)
8493{
8494 char *cp, errmsg[STRERR_BUFSIZE];
8495 char *dname, *dir;
8496 int err = 0;
8497
8498 dname = strdup(path);
8499 if (dname == NULL)
8500 return -ENOMEM;
8501
8502 dir = dirname(dname);
8503 if (mkdir(dir, 0700) && errno != EEXIST)
8504 err = -errno;
8505
8506 free(dname);
8507 if (err) {
8508 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8509 pr_warn("failed to mkdir %s: %s\n", path, cp);
8510 }
8511 return err;
8512}
8513
f367540c
JS
8514static int check_path(const char *path)
8515{
1ce6a9fc 8516 char *cp, errmsg[STRERR_BUFSIZE];
f367540c
JS
8517 struct statfs st_fs;
8518 char *dname, *dir;
8519 int err = 0;
8520
8521 if (path == NULL)
8522 return -EINVAL;
8523
8524 dname = strdup(path);
8525 if (dname == NULL)
8526 return -ENOMEM;
8527
8528 dir = dirname(dname);
8529 if (statfs(dir, &st_fs)) {
24d6a808 8530 cp = libbpf_strerror_r(errno, errmsg, sizeof(errmsg));
be18010e 8531 pr_warn("failed to statfs %s: %s\n", dir, cp);
f367540c
JS
8532 err = -errno;
8533 }
8534 free(dname);
8535
8536 if (!err && st_fs.f_type != BPF_FS_MAGIC) {
be18010e 8537 pr_warn("specified path %s is not on BPF FS\n", path);
f367540c
JS
8538 err = -EINVAL;
8539 }
8540
8541 return err;
8542}
8543
cf90a20d 8544int bpf_program__pin(struct bpf_program *prog, const char *path)
f367540c 8545{
1ce6a9fc 8546 char *cp, errmsg[STRERR_BUFSIZE];
f367540c
JS
8547 int err;
8548
cf90a20d
AN
8549 if (prog->fd < 0) {
8550 pr_warn("prog '%s': can't pin program that wasn't loaded\n", prog->name);
8551 return libbpf_err(-EINVAL);
8552 }
8553
196f8487
THJ
8554 err = make_parent_dir(path);
8555 if (err)
e9fc3ce9 8556 return libbpf_err(err);
196f8487 8557
f367540c
JS
8558 err = check_path(path);
8559 if (err)
e9fc3ce9 8560 return libbpf_err(err);
f367540c 8561
cf90a20d 8562 if (bpf_obj_pin(prog->fd, path)) {
23ab656b
THJ
8563 err = -errno;
8564 cp = libbpf_strerror_r(err, errmsg, sizeof(errmsg));
cf90a20d 8565 pr_warn("prog '%s': failed to pin at '%s': %s\n", prog->name, path, cp);
e9fc3ce9 8566 return libbpf_err(err);
f367540c 8567 }
f367540c 8568
cf90a20d 8569 pr_debug("prog '%s': pinned at '%s'\n", prog->name, path);
f367540c
JS
8570 return 0;
8571}
8572
cf90a20d 8573int bpf_program__unpin(struct bpf_program *prog, const char *path)
0c19a9fb
SF
8574{
8575 int err;
8576
cf90a20d
AN
8577 if (prog->fd < 0) {
8578 pr_warn("prog '%s': can't unpin program that wasn't loaded\n", prog->name);
e9fc3ce9 8579 return libbpf_err(-EINVAL);
0c19a9fb
SF
8580 }
8581
f367540c
JS
8582 err = check_path(path);
8583 if (err)
e9fc3ce9 8584 return libbpf_err(err);
f367540c 8585
cf90a20d 8586 err = unlink(path);
0c19a9fb 8587 if (err)
e9fc3ce9 8588 return libbpf_err(-errno);
0c19a9fb 8589
cf90a20d 8590 pr_debug("prog '%s': unpinned from '%s'\n", prog->name, path);
f367540c
JS
8591 return 0;
8592}
8593
b6989f35
JS
8594int bpf_map__pin(struct bpf_map *map, const char *path)
8595{
1ce6a9fc 8596 char *cp, errmsg[STRERR_BUFSIZE];
b6989f35
JS
8597 int err;
8598
b6989f35 8599 if (map == NULL) {
be18010e 8600 pr_warn("invalid map pointer\n");
e9fc3ce9 8601 return libbpf_err(-EINVAL);
b6989f35
JS
8602 }
8603
7b30c296
MY
8604 if (map->fd < 0) {
8605 pr_warn("map '%s': can't pin BPF map without FD (was it created?)\n", map->name);
8606 return libbpf_err(-EINVAL);
8607 }
8608
4580b25f
THJ
8609 if (map->pin_path) {
8610 if (path && strcmp(path, map->pin_path)) {
8611 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8612 bpf_map__name(map), map->pin_path, path);
e9fc3ce9 8613 return libbpf_err(-EINVAL);
4580b25f
THJ
8614 } else if (map->pinned) {
8615 pr_debug("map '%s' already pinned at '%s'; not re-pinning\n",
8616 bpf_map__name(map), map->pin_path);
8617 return 0;
8618 }
8619 } else {
8620 if (!path) {
8621 pr_warn("missing a path to pin map '%s' at\n",
8622 bpf_map__name(map));
e9fc3ce9 8623 return libbpf_err(-EINVAL);
4580b25f
THJ
8624 } else if (map->pinned) {
8625 pr_warn("map '%s' already pinned\n", bpf_map__name(map));
e9fc3ce9 8626 return libbpf_err(-EEXIST);
4580b25f
THJ
8627 }
8628
8629 map->pin_path = strdup(path);
8630 if (!map->pin_path) {
8631 err = -errno;
8632 goto out_err;
8633 }
b6989f35
JS
8634 }
8635
196f8487
THJ
8636 err = make_parent_dir(map->pin_path);
8637 if (err)
e9fc3ce9 8638 return libbpf_err(err);
196f8487 8639
4580b25f
THJ
8640 err = check_path(map->pin_path);
8641 if (err)
e9fc3ce9 8642 return libbpf_err(err);
4580b25f
THJ
8643
8644 if (bpf_obj_pin(map->fd, map->pin_path)) {
8645 err = -errno;
8646 goto out_err;
8647 }
8648
8649 map->pinned = true;
8650 pr_debug("pinned map '%s'\n", map->pin_path);
0c19a9fb 8651
b6989f35 8652 return 0;
4580b25f
THJ
8653
8654out_err:
8655 cp = libbpf_strerror_r(-err, errmsg, sizeof(errmsg));
8656 pr_warn("failed to pin map: %s\n", cp);
e9fc3ce9 8657 return libbpf_err(err);
b6989f35
JS
8658}
8659
0c19a9fb
SF
8660int bpf_map__unpin(struct bpf_map *map, const char *path)
8661{
8662 int err;
8663
0c19a9fb 8664 if (map == NULL) {
be18010e 8665 pr_warn("invalid map pointer\n");
e9fc3ce9 8666 return libbpf_err(-EINVAL);
0c19a9fb
SF
8667 }
8668
4580b25f
THJ
8669 if (map->pin_path) {
8670 if (path && strcmp(path, map->pin_path)) {
8671 pr_warn("map '%s' already has pin path '%s' different from '%s'\n",
8672 bpf_map__name(map), map->pin_path, path);
e9fc3ce9 8673 return libbpf_err(-EINVAL);
4580b25f
THJ
8674 }
8675 path = map->pin_path;
8676 } else if (!path) {
8677 pr_warn("no path to unpin map '%s' from\n",
8678 bpf_map__name(map));
e9fc3ce9 8679 return libbpf_err(-EINVAL);
4580b25f
THJ
8680 }
8681
8682 err = check_path(path);
8683 if (err)
e9fc3ce9 8684 return libbpf_err(err);
4580b25f 8685
0c19a9fb
SF
8686 err = unlink(path);
8687 if (err != 0)
e9fc3ce9 8688 return libbpf_err(-errno);
4580b25f
THJ
8689
8690 map->pinned = false;
8691 pr_debug("unpinned map '%s' from '%s'\n", bpf_map__name(map), path);
0c19a9fb
SF
8692
8693 return 0;
8694}
8695
4580b25f
THJ
8696int bpf_map__set_pin_path(struct bpf_map *map, const char *path)
8697{
8698 char *new = NULL;
8699
8700 if (path) {
8701 new = strdup(path);
8702 if (!new)
e9fc3ce9 8703 return libbpf_err(-errno);
4580b25f
THJ
8704 }
8705
8706 free(map->pin_path);
8707 map->pin_path = new;
8708 return 0;
8709}
8710
20eccf29
AN
8711__alias(bpf_map__pin_path)
8712const char *bpf_map__get_pin_path(const struct bpf_map *map);
4580b25f 8713
e244d34d
EL
8714const char *bpf_map__pin_path(const struct bpf_map *map)
8715{
8716 return map->pin_path;
8717}
8718
4580b25f
THJ
8719bool bpf_map__is_pinned(const struct bpf_map *map)
8720{
8721 return map->pinned;
8722}
8723
9cf309c5
THJ
8724static void sanitize_pin_path(char *s)
8725{
8726 /* bpffs disallows periods in path names */
8727 while (*s) {
8728 if (*s == '.')
8729 *s = '_';
8730 s++;
8731 }
8732}
8733
0c19a9fb 8734int bpf_object__pin_maps(struct bpf_object *obj, const char *path)
d5148d85 8735{
d5148d85
JS
8736 struct bpf_map *map;
8737 int err;
8738
8739 if (!obj)
e9fc3ce9 8740 return libbpf_err(-ENOENT);
d5148d85
JS
8741
8742 if (!obj->loaded) {
be18010e 8743 pr_warn("object not yet loaded; load it first\n");
e9fc3ce9 8744 return libbpf_err(-ENOENT);
d5148d85
JS
8745 }
8746
f74a53d9 8747 bpf_object__for_each_map(map, obj) {
4580b25f 8748 char *pin_path = NULL;
0c19a9fb 8749 char buf[PATH_MAX];
0c19a9fb 8750
ec41817b 8751 if (!map->autocreate)
229fae38
SC
8752 continue;
8753
4580b25f 8754 if (path) {
e588c116
WY
8755 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8756 if (err)
4580b25f 8757 goto err_unpin_maps;
9cf309c5 8758 sanitize_pin_path(buf);
4580b25f
THJ
8759 pin_path = buf;
8760 } else if (!map->pin_path) {
8761 continue;
0c19a9fb
SF
8762 }
8763
4580b25f 8764 err = bpf_map__pin(map, pin_path);
0c19a9fb
SF
8765 if (err)
8766 goto err_unpin_maps;
8767 }
8768
8769 return 0;
8770
8771err_unpin_maps:
bcc40fc0 8772 while ((map = bpf_object__prev_map(obj, map))) {
4580b25f 8773 if (!map->pin_path)
0c19a9fb
SF
8774 continue;
8775
4580b25f 8776 bpf_map__unpin(map, NULL);
0c19a9fb
SF
8777 }
8778
e9fc3ce9 8779 return libbpf_err(err);
0c19a9fb
SF
8780}
8781
8782int bpf_object__unpin_maps(struct bpf_object *obj, const char *path)
8783{
8784 struct bpf_map *map;
8785 int err;
8786
8787 if (!obj)
e9fc3ce9 8788 return libbpf_err(-ENOENT);
0c19a9fb 8789
f74a53d9 8790 bpf_object__for_each_map(map, obj) {
4580b25f 8791 char *pin_path = NULL;
d5148d85 8792 char buf[PATH_MAX];
d5148d85 8793
4580b25f 8794 if (path) {
e588c116
WY
8795 err = pathname_concat(buf, sizeof(buf), path, bpf_map__name(map));
8796 if (err)
8797 return libbpf_err(err);
9cf309c5 8798 sanitize_pin_path(buf);
4580b25f
THJ
8799 pin_path = buf;
8800 } else if (!map->pin_path) {
8801 continue;
8802 }
d5148d85 8803
4580b25f 8804 err = bpf_map__unpin(map, pin_path);
d5148d85 8805 if (err)
e9fc3ce9 8806 return libbpf_err(err);
d5148d85
JS
8807 }
8808
0c19a9fb
SF
8809 return 0;
8810}
8811
8812int bpf_object__pin_programs(struct bpf_object *obj, const char *path)
8813{
8814 struct bpf_program *prog;
e588c116 8815 char buf[PATH_MAX];
0c19a9fb
SF
8816 int err;
8817
8818 if (!obj)
e9fc3ce9 8819 return libbpf_err(-ENOENT);
0c19a9fb
SF
8820
8821 if (!obj->loaded) {
be18010e 8822 pr_warn("object not yet loaded; load it first\n");
e9fc3ce9 8823 return libbpf_err(-ENOENT);
0c19a9fb
SF
8824 }
8825
0c19a9fb 8826 bpf_object__for_each_program(prog, obj) {
e588c116
WY
8827 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8828 if (err)
0c19a9fb 8829 goto err_unpin_programs;
0c19a9fb
SF
8830
8831 err = bpf_program__pin(prog, buf);
8832 if (err)
8833 goto err_unpin_programs;
8834 }
8835
8836 return 0;
8837
8838err_unpin_programs:
bcc40fc0 8839 while ((prog = bpf_object__prev_program(obj, prog))) {
e588c116 8840 if (pathname_concat(buf, sizeof(buf), path, prog->name))
0c19a9fb
SF
8841 continue;
8842
8843 bpf_program__unpin(prog, buf);
8844 }
8845
e9fc3ce9 8846 return libbpf_err(err);
0c19a9fb
SF
8847}
8848
8849int bpf_object__unpin_programs(struct bpf_object *obj, const char *path)
8850{
8851 struct bpf_program *prog;
8852 int err;
8853
8854 if (!obj)
e9fc3ce9 8855 return libbpf_err(-ENOENT);
0c19a9fb 8856
d5148d85
JS
8857 bpf_object__for_each_program(prog, obj) {
8858 char buf[PATH_MAX];
d5148d85 8859
e588c116
WY
8860 err = pathname_concat(buf, sizeof(buf), path, prog->name);
8861 if (err)
8862 return libbpf_err(err);
d5148d85 8863
0c19a9fb 8864 err = bpf_program__unpin(prog, buf);
d5148d85 8865 if (err)
e9fc3ce9 8866 return libbpf_err(err);
d5148d85
JS
8867 }
8868
8869 return 0;
8870}
8871
0c19a9fb
SF
8872int bpf_object__pin(struct bpf_object *obj, const char *path)
8873{
8874 int err;
8875
8876 err = bpf_object__pin_maps(obj, path);
8877 if (err)
e9fc3ce9 8878 return libbpf_err(err);
0c19a9fb
SF
8879
8880 err = bpf_object__pin_programs(obj, path);
8881 if (err) {
8882 bpf_object__unpin_maps(obj, path);
e9fc3ce9 8883 return libbpf_err(err);
0c19a9fb
SF
8884 }
8885
8886 return 0;
8887}
8888
068ca522
DX
8889int bpf_object__unpin(struct bpf_object *obj, const char *path)
8890{
8891 int err;
8892
8893 err = bpf_object__unpin_programs(obj, path);
8894 if (err)
8895 return libbpf_err(err);
8896
8897 err = bpf_object__unpin_maps(obj, path);
8898 if (err)
8899 return libbpf_err(err);
8900
8901 return 0;
8902}
8903
2d39d7c5
AN
8904static void bpf_map__destroy(struct bpf_map *map)
8905{
646f02ff
AN
8906 if (map->inner_map) {
8907 bpf_map__destroy(map->inner_map);
8908 zfree(&map->inner_map);
8909 }
8910
8911 zfree(&map->init_slots);
8912 map->init_slots_sz = 0;
8913
2e7ba4f8
AN
8914 if (map->mmaped && map->mmaped != map->obj->arena_data)
8915 munmap(map->mmaped, bpf_map_mmap_sz(map));
8916 map->mmaped = NULL;
2d39d7c5
AN
8917
8918 if (map->st_ops) {
8919 zfree(&map->st_ops->data);
8920 zfree(&map->st_ops->progs);
8921 zfree(&map->st_ops->kern_func_off);
8922 zfree(&map->st_ops);
8923 }
8924
8925 zfree(&map->name);
aed65917 8926 zfree(&map->real_name);
2d39d7c5
AN
8927 zfree(&map->pin_path);
8928
8929 if (map->fd >= 0)
8930 zclose(map->fd);
8931}
8932
1a5e3fb1
WN
8933void bpf_object__close(struct bpf_object *obj)
8934{
a5b8bd47
WN
8935 size_t i;
8936
50450fc7 8937 if (IS_ERR_OR_NULL(obj))
1a5e3fb1
WN
8938 return;
8939
2e4913e0
AN
8940 usdt_manager_free(obj->usdt_man);
8941 obj->usdt_man = NULL;
8942
67234743 8943 bpf_gen__free(obj->gen_loader);
1a5e3fb1 8944 bpf_object__elf_finish(obj);
4a404a7e 8945 bpf_object_unload(obj);
8a138aed 8946 btf__free(obj->btf);
29d67fde 8947 btf__free(obj->btf_vmlinux);
2993e051 8948 btf_ext__free(obj->btf_ext);
1a5e3fb1 8949
2d39d7c5
AN
8950 for (i = 0; i < obj->nr_maps; i++)
8951 bpf_map__destroy(&obj->maps[i]);
d859900c 8952
1373ff59 8953 zfree(&obj->btf_custom_path);
8601fd42 8954 zfree(&obj->kconfig);
5964a223
DM
8955
8956 for (i = 0; i < obj->nr_extern; i++)
8957 zfree(&obj->externs[i].essent_name);
8958
166750bc
AN
8959 zfree(&obj->externs);
8960 obj->nr_extern = 0;
8961
9d759a9b
WN
8962 zfree(&obj->maps);
8963 obj->nr_maps = 0;
a5b8bd47
WN
8964
8965 if (obj->programs && obj->nr_programs) {
8966 for (i = 0; i < obj->nr_programs; i++)
8967 bpf_program__exit(&obj->programs[i]);
8968 }
8969 zfree(&obj->programs);
8970
6b434b61
AN
8971 zfree(&obj->feat_cache);
8972 zfree(&obj->token_path);
8973 if (obj->token_fd > 0)
8974 close(obj->token_fd);
8975
2e7ba4f8
AN
8976 zfree(&obj->arena_data);
8977
1a5e3fb1
WN
8978 free(obj);
8979}
aa9b1ac3 8980
a324aae3 8981const char *bpf_object__name(const struct bpf_object *obj)
acf860ae 8982{
e9fc3ce9 8983 return obj ? obj->name : libbpf_err_ptr(-EINVAL);
acf860ae
WN
8984}
8985
a324aae3 8986unsigned int bpf_object__kversion(const struct bpf_object *obj)
45825d8a 8987{
a7fe0450 8988 return obj ? obj->kern_version : 0;
45825d8a
WN
8989}
8990
a324aae3 8991struct btf *bpf_object__btf(const struct bpf_object *obj)
789f6bab
AI
8992{
8993 return obj ? obj->btf : NULL;
8994}
8995
8a138aed
MKL
8996int bpf_object__btf_fd(const struct bpf_object *obj)
8997{
8998 return obj->btf ? btf__fd(obj->btf) : -1;
8999}
9000
155f556d
RDT
9001int bpf_object__set_kversion(struct bpf_object *obj, __u32 kern_version)
9002{
9003 if (obj->loaded)
e9fc3ce9 9004 return libbpf_err(-EINVAL);
155f556d
RDT
9005
9006 obj->kern_version = kern_version;
9007
9008 return 0;
9009}
9010
67234743
AS
9011int bpf_object__gen_loader(struct bpf_object *obj, struct gen_loader_opts *opts)
9012{
9013 struct bpf_gen *gen;
9014
9015 if (!opts)
9016 return -EFAULT;
9017 if (!OPTS_VALID(opts, gen_loader_opts))
9018 return -EINVAL;
9019 gen = calloc(sizeof(*gen), 1);
9020 if (!gen)
9021 return -ENOMEM;
9022 gen->opts = opts;
9023 obj->gen_loader = gen;
9024 return 0;
9025}
9026
eac7d845 9027static struct bpf_program *
a324aae3
AN
9028__bpf_program__iter(const struct bpf_program *p, const struct bpf_object *obj,
9029 bool forward)
aa9b1ac3 9030{
a83d6e76 9031 size_t nr_programs = obj->nr_programs;
0c19a9fb 9032 ssize_t idx;
aa9b1ac3 9033
a83d6e76 9034 if (!nr_programs)
aa9b1ac3 9035 return NULL;
aa9b1ac3 9036
a83d6e76
MKL
9037 if (!p)
9038 /* Iter from the beginning */
9039 return forward ? &obj->programs[0] :
9040 &obj->programs[nr_programs - 1];
9041
0c19a9fb 9042 if (p->obj != obj) {
be18010e 9043 pr_warn("error: program handler doesn't match object\n");
e9fc3ce9 9044 return errno = EINVAL, NULL;
aa9b1ac3
WN
9045 }
9046
a83d6e76 9047 idx = (p - obj->programs) + (forward ? 1 : -1);
0c19a9fb 9048 if (idx >= obj->nr_programs || idx < 0)
aa9b1ac3
WN
9049 return NULL;
9050 return &obj->programs[idx];
9051}
9052
2088a3a7
HC
9053struct bpf_program *
9054bpf_object__next_program(const struct bpf_object *obj, struct bpf_program *prev)
eac7d845
JK
9055{
9056 struct bpf_program *prog = prev;
9057
9058 do {
a83d6e76 9059 prog = __bpf_program__iter(prog, obj, true);
c3c55696 9060 } while (prog && prog_is_subprog(obj, prog));
0c19a9fb
SF
9061
9062 return prog;
9063}
9064
2088a3a7
HC
9065struct bpf_program *
9066bpf_object__prev_program(const struct bpf_object *obj, struct bpf_program *next)
0c19a9fb
SF
9067{
9068 struct bpf_program *prog = next;
9069
0c19a9fb 9070 do {
a83d6e76 9071 prog = __bpf_program__iter(prog, obj, false);
c3c55696 9072 } while (prog && prog_is_subprog(obj, prog));
eac7d845
JK
9073
9074 return prog;
9075}
9076
9aba3613
JK
9077void bpf_program__set_ifindex(struct bpf_program *prog, __u32 ifindex)
9078{
9079 prog->prog_ifindex = ifindex;
9080}
9081
01af3bf0
AN
9082const char *bpf_program__name(const struct bpf_program *prog)
9083{
9084 return prog->name;
9085}
9086
52109584
AN
9087const char *bpf_program__section_name(const struct bpf_program *prog)
9088{
9089 return prog->sec_name;
9090}
9091
d9297581
AN
9092bool bpf_program__autoload(const struct bpf_program *prog)
9093{
a3820c48 9094 return prog->autoload;
d9297581
AN
9095}
9096
9097int bpf_program__set_autoload(struct bpf_program *prog, bool autoload)
9098{
9099 if (prog->obj->loaded)
e9fc3ce9 9100 return libbpf_err(-EINVAL);
d9297581 9101
a3820c48 9102 prog->autoload = autoload;
d9297581
AN
9103 return 0;
9104}
9105
43cb8cba
HL
9106bool bpf_program__autoattach(const struct bpf_program *prog)
9107{
9108 return prog->autoattach;
9109}
9110
9111void bpf_program__set_autoattach(struct bpf_program *prog, bool autoattach)
9112{
9113 prog->autoattach = autoattach;
9114}
9115
65a7fa2e
AN
9116const struct bpf_insn *bpf_program__insns(const struct bpf_program *prog)
9117{
9118 return prog->insns;
9119}
9120
9121size_t bpf_program__insn_cnt(const struct bpf_program *prog)
9122{
9123 return prog->insns_cnt;
9124}
9125
b63b3c49
JO
9126int bpf_program__set_insns(struct bpf_program *prog,
9127 struct bpf_insn *new_insns, size_t new_insn_cnt)
9128{
9129 struct bpf_insn *insns;
9130
9131 if (prog->obj->loaded)
9132 return -EBUSY;
9133
9134 insns = libbpf_reallocarray(prog->insns, new_insn_cnt, sizeof(*insns));
8a0260db
AN
9135 /* NULL is a valid return from reallocarray if the new count is zero */
9136 if (!insns && new_insn_cnt) {
b63b3c49
JO
9137 pr_warn("prog '%s': failed to realloc prog code\n", prog->name);
9138 return -ENOMEM;
9139 }
9140 memcpy(insns, new_insns, new_insn_cnt * sizeof(*insns));
9141
9142 prog->insns = insns;
9143 prog->insns_cnt = new_insn_cnt;
9144 return 0;
9145}
9146
cf90a20d 9147int bpf_program__fd(const struct bpf_program *prog)
b580563e 9148{
1e960043 9149 if (!prog)
e9fc3ce9 9150 return libbpf_err(-EINVAL);
1e960043 9151
cf90a20d 9152 if (prog->fd < 0)
e9fc3ce9 9153 return libbpf_err(-ENOENT);
b580563e 9154
cf90a20d 9155 return prog->fd;
aa9b1ac3 9156}
9d759a9b 9157
20eccf29
AN
9158__alias(bpf_program__type)
9159enum bpf_prog_type bpf_program__get_type(const struct bpf_program *prog);
9160
9161enum bpf_prog_type bpf_program__type(const struct bpf_program *prog)
f1eead9e
AN
9162{
9163 return prog->type;
9164}
9165
c628747c
AN
9166static size_t custom_sec_def_cnt;
9167static struct bpf_sec_def *custom_sec_defs;
9168static struct bpf_sec_def custom_fallback_def;
9169static bool has_custom_fallback_def;
9170static int last_custom_sec_def_handler_id;
9171
93442f13 9172int bpf_program__set_type(struct bpf_program *prog, enum bpf_prog_type type)
5f44e4c8 9173{
93442f13
GS
9174 if (prog->obj->loaded)
9175 return libbpf_err(-EBUSY);
9176
c628747c
AN
9177 /* if type is not changed, do nothing */
9178 if (prog->type == type)
9179 return 0;
9180
5f44e4c8 9181 prog->type = type;
c628747c
AN
9182
9183 /* If a program type was changed, we need to reset associated SEC()
9184 * handler, as it will be invalid now. The only exception is a generic
9185 * fallback handler, which by definition is program type-agnostic and
9186 * is a catch-all custom handler, optionally set by the application,
9187 * so should be able to handle any type of BPF program.
9188 */
9189 if (prog->sec_def != &custom_fallback_def)
9190 prog->sec_def = NULL;
93442f13 9191 return 0;
5f44e4c8
WN
9192}
9193
20eccf29
AN
9194__alias(bpf_program__expected_attach_type)
9195enum bpf_attach_type bpf_program__get_expected_attach_type(const struct bpf_program *prog);
9196
9197enum bpf_attach_type bpf_program__expected_attach_type(const struct bpf_program *prog)
f1eead9e
AN
9198{
9199 return prog->expected_attach_type;
9200}
9201
93442f13 9202int bpf_program__set_expected_attach_type(struct bpf_program *prog,
16962b24 9203 enum bpf_attach_type type)
d7be143b 9204{
93442f13
GS
9205 if (prog->obj->loaded)
9206 return libbpf_err(-EBUSY);
9207
d7be143b 9208 prog->expected_attach_type = type;
93442f13 9209 return 0;
d7be143b
AI
9210}
9211
a6ca7158
AN
9212__u32 bpf_program__flags(const struct bpf_program *prog)
9213{
9214 return prog->prog_flags;
9215}
9216
8cccee9e 9217int bpf_program__set_flags(struct bpf_program *prog, __u32 flags)
a6ca7158
AN
9218{
9219 if (prog->obj->loaded)
9220 return libbpf_err(-EBUSY);
9221
8cccee9e 9222 prog->prog_flags = flags;
a6ca7158
AN
9223 return 0;
9224}
9225
dbdd2c7f
AN
9226__u32 bpf_program__log_level(const struct bpf_program *prog)
9227{
9228 return prog->log_level;
9229}
9230
9231int bpf_program__set_log_level(struct bpf_program *prog, __u32 log_level)
9232{
9233 if (prog->obj->loaded)
9234 return libbpf_err(-EBUSY);
9235
9236 prog->log_level = log_level;
9237 return 0;
9238}
9239
b3ce9079
AN
9240const char *bpf_program__log_buf(const struct bpf_program *prog, size_t *log_size)
9241{
9242 *log_size = prog->log_size;
9243 return prog->log_buf;
9244}
9245
9246int bpf_program__set_log_buf(struct bpf_program *prog, char *log_buf, size_t log_size)
9247{
9248 if (log_size && !log_buf)
9249 return -EINVAL;
9250 if (prog->log_size > UINT_MAX)
9251 return -EINVAL;
9252 if (prog->obj->loaded)
9253 return -EBUSY;
9254
9255 prog->log_buf = log_buf;
9256 prog->log_size = log_size;
a6ca7158
AN
9257 return 0;
9258}
9259
15ea31fa 9260#define SEC_DEF(sec_pfx, ptype, atype, flags, ...) { \
697f104d 9261 .sec = (char *)sec_pfx, \
d7a18ea7 9262 .prog_type = BPF_PROG_TYPE_##ptype, \
15ea31fa
AN
9263 .expected_attach_type = atype, \
9264 .cookie = (long)(flags), \
4fa5bcfe 9265 .prog_prepare_load_fn = libbpf_prepare_prog_load, \
d7a18ea7
AN
9266 __VA_ARGS__ \
9267}
9268
4fa5bcfe 9269static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
39f8dc43 9270static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link);
708ac5be 9271static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link);
2e4913e0 9272static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link);
4fa5bcfe
AN
9273static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9274static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9275static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link);
ddc6b049 9276static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
2ca178f0 9277static int attach_kprobe_session(const struct bpf_program *prog, long cookie, struct bpf_link **link);
5bfdd32d 9278static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link);
4fa5bcfe
AN
9279static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link);
9280static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link);
d7a18ea7 9281
d7a18ea7 9282static const struct bpf_sec_def section_defs[] = {
450b167f
AN
9283 SEC_DEF("socket", SOCKET_FILTER, 0, SEC_NONE),
9284 SEC_DEF("sk_reuseport/migrate", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT_OR_MIGRATE, SEC_ATTACHABLE),
9285 SEC_DEF("sk_reuseport", SK_REUSEPORT, BPF_SK_REUSEPORT_SELECT, SEC_ATTACHABLE),
9af8efc4 9286 SEC_DEF("kprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
39f8dc43 9287 SEC_DEF("uprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
c4cac71f 9288 SEC_DEF("uprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9af8efc4 9289 SEC_DEF("kretprobe+", KPROBE, 0, SEC_NONE, attach_kprobe),
39f8dc43 9290 SEC_DEF("uretprobe+", KPROBE, 0, SEC_NONE, attach_uprobe),
c4cac71f 9291 SEC_DEF("uretprobe.s+", KPROBE, 0, SEC_SLEEPABLE, attach_uprobe),
9af8efc4
AN
9292 SEC_DEF("kprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
9293 SEC_DEF("kretprobe.multi+", KPROBE, BPF_TRACE_KPROBE_MULTI, SEC_NONE, attach_kprobe_multi),
2ca178f0 9294 SEC_DEF("kprobe.session+", KPROBE, BPF_TRACE_KPROBE_SESSION, SEC_NONE, attach_kprobe_session),
5bfdd32d
JO
9295 SEC_DEF("uprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9296 SEC_DEF("uretprobe.multi+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_NONE, attach_uprobe_multi),
9297 SEC_DEF("uprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
9298 SEC_DEF("uretprobe.multi.s+", KPROBE, BPF_TRACE_UPROBE_MULTI, SEC_SLEEPABLE, attach_uprobe_multi),
708ac5be
AN
9299 SEC_DEF("ksyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
9300 SEC_DEF("kretsyscall+", KPROBE, 0, SEC_NONE, attach_ksyscall),
5902da6d
JO
9301 SEC_DEF("usdt+", KPROBE, 0, SEC_USDT, attach_usdt),
9302 SEC_DEF("usdt.s+", KPROBE, 0, SEC_USDT | SEC_SLEEPABLE, attach_usdt),
fe20ce3a
DB
9303 SEC_DEF("tc/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE), /* alias for tcx */
9304 SEC_DEF("tc/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE), /* alias for tcx */
9305 SEC_DEF("tcx/ingress", SCHED_CLS, BPF_TCX_INGRESS, SEC_NONE),
9306 SEC_DEF("tcx/egress", SCHED_CLS, BPF_TCX_EGRESS, SEC_NONE),
9307 SEC_DEF("tc", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9308 SEC_DEF("classifier", SCHED_CLS, 0, SEC_NONE), /* deprecated / legacy, use tcx */
9309 SEC_DEF("action", SCHED_ACT, 0, SEC_NONE), /* deprecated / legacy, use tcx */
05c31b4a
DB
9310 SEC_DEF("netkit/primary", SCHED_CLS, BPF_NETKIT_PRIMARY, SEC_NONE),
9311 SEC_DEF("netkit/peer", SCHED_CLS, BPF_NETKIT_PEER, SEC_NONE),
9af8efc4
AN
9312 SEC_DEF("tracepoint+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9313 SEC_DEF("tp+", TRACEPOINT, 0, SEC_NONE, attach_tp),
9314 SEC_DEF("raw_tracepoint+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9315 SEC_DEF("raw_tp+", RAW_TRACEPOINT, 0, SEC_NONE, attach_raw_tp),
9316 SEC_DEF("raw_tracepoint.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
9317 SEC_DEF("raw_tp.w+", RAW_TRACEPOINT_WRITABLE, 0, SEC_NONE, attach_raw_tp),
cc7d8f2c
AN
9318 SEC_DEF("tp_btf+", TRACING, BPF_TRACE_RAW_TP, SEC_ATTACH_BTF, attach_trace),
9319 SEC_DEF("fentry+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF, attach_trace),
9320 SEC_DEF("fmod_ret+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF, attach_trace),
9321 SEC_DEF("fexit+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF, attach_trace),
9322 SEC_DEF("fentry.s+", TRACING, BPF_TRACE_FENTRY, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9323 SEC_DEF("fmod_ret.s+", TRACING, BPF_MODIFY_RETURN, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9324 SEC_DEF("fexit.s+", TRACING, BPF_TRACE_FEXIT, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_trace),
9325 SEC_DEF("freplace+", EXT, 0, SEC_ATTACH_BTF, attach_trace),
9326 SEC_DEF("lsm+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF, attach_lsm),
9327 SEC_DEF("lsm.s+", LSM, BPF_LSM_MAC, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_lsm),
bffcf348 9328 SEC_DEF("lsm_cgroup+", LSM, BPF_LSM_CGROUP, SEC_ATTACH_BTF),
cc7d8f2c
AN
9329 SEC_DEF("iter+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF, attach_iter),
9330 SEC_DEF("iter.s+", TRACING, BPF_TRACE_ITER, SEC_ATTACH_BTF | SEC_SLEEPABLE, attach_iter),
15ea31fa 9331 SEC_DEF("syscall", SYSCALL, 0, SEC_SLEEPABLE),
082c4bfb 9332 SEC_DEF("xdp.frags/devmap", XDP, BPF_XDP_DEVMAP, SEC_XDP_FRAGS),
4a4d4cee 9333 SEC_DEF("xdp/devmap", XDP, BPF_XDP_DEVMAP, SEC_ATTACHABLE),
082c4bfb 9334 SEC_DEF("xdp.frags/cpumap", XDP, BPF_XDP_CPUMAP, SEC_XDP_FRAGS),
4a4d4cee 9335 SEC_DEF("xdp/cpumap", XDP, BPF_XDP_CPUMAP, SEC_ATTACHABLE),
082c4bfb 9336 SEC_DEF("xdp.frags", XDP, BPF_XDP, SEC_XDP_FRAGS),
450b167f
AN
9337 SEC_DEF("xdp", XDP, BPF_XDP, SEC_ATTACHABLE_OPT),
9338 SEC_DEF("perf_event", PERF_EVENT, 0, SEC_NONE),
9339 SEC_DEF("lwt_in", LWT_IN, 0, SEC_NONE),
9340 SEC_DEF("lwt_out", LWT_OUT, 0, SEC_NONE),
9341 SEC_DEF("lwt_xmit", LWT_XMIT, 0, SEC_NONE),
9342 SEC_DEF("lwt_seg6local", LWT_SEG6LOCAL, 0, SEC_NONE),
9343 SEC_DEF("sockops", SOCK_OPS, BPF_CGROUP_SOCK_OPS, SEC_ATTACHABLE_OPT),
9344 SEC_DEF("sk_skb/stream_parser", SK_SKB, BPF_SK_SKB_STREAM_PARSER, SEC_ATTACHABLE_OPT),
9345 SEC_DEF("sk_skb/stream_verdict",SK_SKB, BPF_SK_SKB_STREAM_VERDICT, SEC_ATTACHABLE_OPT),
61df5756 9346 SEC_DEF("sk_skb/verdict", SK_SKB, BPF_SK_SKB_VERDICT, SEC_ATTACHABLE_OPT),
450b167f
AN
9347 SEC_DEF("sk_skb", SK_SKB, 0, SEC_NONE),
9348 SEC_DEF("sk_msg", SK_MSG, BPF_SK_MSG_VERDICT, SEC_ATTACHABLE_OPT),
9349 SEC_DEF("lirc_mode2", LIRC_MODE2, BPF_LIRC_MODE2, SEC_ATTACHABLE_OPT),
9350 SEC_DEF("flow_dissector", FLOW_DISSECTOR, BPF_FLOW_DISSECTOR, SEC_ATTACHABLE_OPT),
9351 SEC_DEF("cgroup_skb/ingress", CGROUP_SKB, BPF_CGROUP_INET_INGRESS, SEC_ATTACHABLE_OPT),
9352 SEC_DEF("cgroup_skb/egress", CGROUP_SKB, BPF_CGROUP_INET_EGRESS, SEC_ATTACHABLE_OPT),
9353 SEC_DEF("cgroup/skb", CGROUP_SKB, 0, SEC_NONE),
9354 SEC_DEF("cgroup/sock_create", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE),
9355 SEC_DEF("cgroup/sock_release", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_RELEASE, SEC_ATTACHABLE),
9356 SEC_DEF("cgroup/sock", CGROUP_SOCK, BPF_CGROUP_INET_SOCK_CREATE, SEC_ATTACHABLE_OPT),
9357 SEC_DEF("cgroup/post_bind4", CGROUP_SOCK, BPF_CGROUP_INET4_POST_BIND, SEC_ATTACHABLE),
9358 SEC_DEF("cgroup/post_bind6", CGROUP_SOCK, BPF_CGROUP_INET6_POST_BIND, SEC_ATTACHABLE),
9359 SEC_DEF("cgroup/bind4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_BIND, SEC_ATTACHABLE),
9360 SEC_DEF("cgroup/bind6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_BIND, SEC_ATTACHABLE),
9361 SEC_DEF("cgroup/connect4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_CONNECT, SEC_ATTACHABLE),
9362 SEC_DEF("cgroup/connect6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_CONNECT, SEC_ATTACHABLE),
bf90438c 9363 SEC_DEF("cgroup/connect_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_CONNECT, SEC_ATTACHABLE),
450b167f
AN
9364 SEC_DEF("cgroup/sendmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_SENDMSG, SEC_ATTACHABLE),
9365 SEC_DEF("cgroup/sendmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_SENDMSG, SEC_ATTACHABLE),
bf90438c 9366 SEC_DEF("cgroup/sendmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_SENDMSG, SEC_ATTACHABLE),
450b167f
AN
9367 SEC_DEF("cgroup/recvmsg4", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP4_RECVMSG, SEC_ATTACHABLE),
9368 SEC_DEF("cgroup/recvmsg6", CGROUP_SOCK_ADDR, BPF_CGROUP_UDP6_RECVMSG, SEC_ATTACHABLE),
bf90438c 9369 SEC_DEF("cgroup/recvmsg_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_RECVMSG, SEC_ATTACHABLE),
450b167f
AN
9370 SEC_DEF("cgroup/getpeername4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETPEERNAME, SEC_ATTACHABLE),
9371 SEC_DEF("cgroup/getpeername6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETPEERNAME, SEC_ATTACHABLE),
bf90438c 9372 SEC_DEF("cgroup/getpeername_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETPEERNAME, SEC_ATTACHABLE),
450b167f
AN
9373 SEC_DEF("cgroup/getsockname4", CGROUP_SOCK_ADDR, BPF_CGROUP_INET4_GETSOCKNAME, SEC_ATTACHABLE),
9374 SEC_DEF("cgroup/getsockname6", CGROUP_SOCK_ADDR, BPF_CGROUP_INET6_GETSOCKNAME, SEC_ATTACHABLE),
bf90438c 9375 SEC_DEF("cgroup/getsockname_unix", CGROUP_SOCK_ADDR, BPF_CGROUP_UNIX_GETSOCKNAME, SEC_ATTACHABLE),
450b167f
AN
9376 SEC_DEF("cgroup/sysctl", CGROUP_SYSCTL, BPF_CGROUP_SYSCTL, SEC_ATTACHABLE),
9377 SEC_DEF("cgroup/getsockopt", CGROUP_SOCKOPT, BPF_CGROUP_GETSOCKOPT, SEC_ATTACHABLE),
9378 SEC_DEF("cgroup/setsockopt", CGROUP_SOCKOPT, BPF_CGROUP_SETSOCKOPT, SEC_ATTACHABLE),
9379 SEC_DEF("cgroup/dev", CGROUP_DEVICE, BPF_CGROUP_DEVICE, SEC_ATTACHABLE_OPT),
dd94d45c 9380 SEC_DEF("struct_ops+", STRUCT_OPS, 0, SEC_NONE),
913b2255 9381 SEC_DEF("struct_ops.s+", STRUCT_OPS, 0, SEC_SLEEPABLE),
450b167f 9382 SEC_DEF("sk_lookup", SK_LOOKUP, BPF_SK_LOOKUP, SEC_ATTACHABLE),
132328e8 9383 SEC_DEF("netfilter", NETFILTER, BPF_NETFILTER, SEC_NONE),
583c9009 9384};
d7be143b 9385
697f104d
AN
9386int libbpf_register_prog_handler(const char *sec,
9387 enum bpf_prog_type prog_type,
9388 enum bpf_attach_type exp_attach_type,
9389 const struct libbpf_prog_handler_opts *opts)
d7a18ea7 9390{
697f104d 9391 struct bpf_sec_def *sec_def;
d7a18ea7 9392
697f104d
AN
9393 if (!OPTS_VALID(opts, libbpf_prog_handler_opts))
9394 return libbpf_err(-EINVAL);
dd94d45c 9395
697f104d
AN
9396 if (last_custom_sec_def_handler_id == INT_MAX) /* prevent overflow */
9397 return libbpf_err(-E2BIG);
dd94d45c 9398
697f104d
AN
9399 if (sec) {
9400 sec_def = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt + 1,
9401 sizeof(*sec_def));
9402 if (!sec_def)
9403 return libbpf_err(-ENOMEM);
dd94d45c 9404
697f104d
AN
9405 custom_sec_defs = sec_def;
9406 sec_def = &custom_sec_defs[custom_sec_def_cnt];
9407 } else {
9408 if (has_custom_fallback_def)
9409 return libbpf_err(-EBUSY);
dd94d45c 9410
697f104d
AN
9411 sec_def = &custom_fallback_def;
9412 }
9413
9414 sec_def->sec = sec ? strdup(sec) : NULL;
9415 if (sec && !sec_def->sec)
9416 return libbpf_err(-ENOMEM);
9417
9418 sec_def->prog_type = prog_type;
9419 sec_def->expected_attach_type = exp_attach_type;
9420 sec_def->cookie = OPTS_GET(opts, cookie, 0);
9421
9422 sec_def->prog_setup_fn = OPTS_GET(opts, prog_setup_fn, NULL);
9423 sec_def->prog_prepare_load_fn = OPTS_GET(opts, prog_prepare_load_fn, NULL);
9424 sec_def->prog_attach_fn = OPTS_GET(opts, prog_attach_fn, NULL);
9425
9426 sec_def->handler_id = ++last_custom_sec_def_handler_id;
9427
9428 if (sec)
9429 custom_sec_def_cnt++;
9430 else
9431 has_custom_fallback_def = true;
9432
9433 return sec_def->handler_id;
9434}
9435
9436int libbpf_unregister_prog_handler(int handler_id)
9437{
9438 struct bpf_sec_def *sec_defs;
9439 int i;
9440
9441 if (handler_id <= 0)
9442 return libbpf_err(-EINVAL);
9443
9444 if (has_custom_fallback_def && custom_fallback_def.handler_id == handler_id) {
9445 memset(&custom_fallback_def, 0, sizeof(custom_fallback_def));
9446 has_custom_fallback_def = false;
9447 return 0;
9448 }
9449
9450 for (i = 0; i < custom_sec_def_cnt; i++) {
9451 if (custom_sec_defs[i].handler_id == handler_id)
9452 break;
9453 }
9454
9455 if (i == custom_sec_def_cnt)
9456 return libbpf_err(-ENOENT);
9457
9458 free(custom_sec_defs[i].sec);
9459 for (i = i + 1; i < custom_sec_def_cnt; i++)
9460 custom_sec_defs[i - 1] = custom_sec_defs[i];
9461 custom_sec_def_cnt--;
9462
9463 /* try to shrink the array, but it's ok if we couldn't */
9464 sec_defs = libbpf_reallocarray(custom_sec_defs, custom_sec_def_cnt, sizeof(*sec_defs));
8a0260db
AN
9465 /* if new count is zero, reallocarray can return a valid NULL result;
9466 * in this case the previous pointer will be freed, so we *have to*
9467 * reassign old pointer to the new value (even if it's NULL)
9468 */
9469 if (sec_defs || custom_sec_def_cnt == 0)
697f104d
AN
9470 custom_sec_defs = sec_defs;
9471
9472 return 0;
9473}
9474
450b167f 9475static bool sec_def_matches(const struct bpf_sec_def *sec_def, const char *sec_name)
697f104d
AN
9476{
9477 size_t len = strlen(sec_def->sec);
9478
9479 /* "type/" always has to have proper SEC("type/extras") form */
9480 if (sec_def->sec[len - 1] == '/') {
9481 if (str_has_pfx(sec_name, sec_def->sec))
9482 return true;
9483 return false;
9484 }
9485
9486 /* "type+" means it can be either exact SEC("type") or
9487 * well-formed SEC("type/extras") with proper '/' separator
9488 */
9489 if (sec_def->sec[len - 1] == '+') {
9490 len--;
9491 /* not even a prefix */
9492 if (strncmp(sec_name, sec_def->sec, len) != 0)
9493 return false;
9494 /* exact match or has '/' separator */
9495 if (sec_name[len] == '\0' || sec_name[len] == '/')
9496 return true;
9497 return false;
9498 }
9499
697f104d
AN
9500 return strcmp(sec_name, sec_def->sec) == 0;
9501}
9502
9503static const struct bpf_sec_def *find_sec_def(const char *sec_name)
9504{
9505 const struct bpf_sec_def *sec_def;
9506 int i, n;
697f104d
AN
9507
9508 n = custom_sec_def_cnt;
9509 for (i = 0; i < n; i++) {
9510 sec_def = &custom_sec_defs[i];
450b167f 9511 if (sec_def_matches(sec_def, sec_name))
697f104d
AN
9512 return sec_def;
9513 }
9514
9515 n = ARRAY_SIZE(section_defs);
9516 for (i = 0; i < n; i++) {
9517 sec_def = &section_defs[i];
450b167f 9518 if (sec_def_matches(sec_def, sec_name))
dd94d45c 9519 return sec_def;
d7a18ea7 9520 }
697f104d
AN
9521
9522 if (has_custom_fallback_def)
9523 return &custom_fallback_def;
9524
d7a18ea7
AN
9525 return NULL;
9526}
9527
697f104d
AN
9528#define MAX_TYPE_NAME_SIZE 32
9529
c76e4c22
TS
9530static char *libbpf_get_type_names(bool attach_type)
9531{
d7a18ea7 9532 int i, len = ARRAY_SIZE(section_defs) * MAX_TYPE_NAME_SIZE;
c76e4c22
TS
9533 char *buf;
9534
9535 buf = malloc(len);
9536 if (!buf)
9537 return NULL;
9538
9539 buf[0] = '\0';
9540 /* Forge string buf with all available names */
d7a18ea7 9541 for (i = 0; i < ARRAY_SIZE(section_defs); i++) {
15ea31fa
AN
9542 const struct bpf_sec_def *sec_def = &section_defs[i];
9543
9544 if (attach_type) {
4fa5bcfe 9545 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
15ea31fa
AN
9546 continue;
9547
9548 if (!(sec_def->cookie & SEC_ATTACHABLE))
9549 continue;
9550 }
c76e4c22 9551
d7a18ea7 9552 if (strlen(buf) + strlen(section_defs[i].sec) + 2 > len) {
c76e4c22
TS
9553 free(buf);
9554 return NULL;
9555 }
9556 strcat(buf, " ");
d7a18ea7 9557 strcat(buf, section_defs[i].sec);
c76e4c22
TS
9558 }
9559
9560 return buf;
9561}
9562
b60df2a0
JK
9563int libbpf_prog_type_by_name(const char *name, enum bpf_prog_type *prog_type,
9564 enum bpf_attach_type *expected_attach_type)
583c9009 9565{
d7a18ea7 9566 const struct bpf_sec_def *sec_def;
c76e4c22 9567 char *type_names;
583c9009 9568
b60df2a0 9569 if (!name)
e9fc3ce9 9570 return libbpf_err(-EINVAL);
583c9009 9571
d7a18ea7
AN
9572 sec_def = find_sec_def(name);
9573 if (sec_def) {
9574 *prog_type = sec_def->prog_type;
9575 *expected_attach_type = sec_def->expected_attach_type;
b60df2a0
JK
9576 return 0;
9577 }
d7a18ea7 9578
4a3d6c6a 9579 pr_debug("failed to guess program type from ELF section '%s'\n", name);
c76e4c22
TS
9580 type_names = libbpf_get_type_names(false);
9581 if (type_names != NULL) {
3f519353 9582 pr_debug("supported section(type) names are:%s\n", type_names);
c76e4c22
TS
9583 free(type_names);
9584 }
9585
e9fc3ce9 9586 return libbpf_err(-ESRCH);
b60df2a0 9587}
583c9009 9588
ccde5760
DM
9589const char *libbpf_bpf_attach_type_str(enum bpf_attach_type t)
9590{
9591 if (t < 0 || t >= ARRAY_SIZE(attach_type_name))
9592 return NULL;
9593
9594 return attach_type_name[t];
9595}
9596
ba5d1b58
DM
9597const char *libbpf_bpf_link_type_str(enum bpf_link_type t)
9598{
9599 if (t < 0 || t >= ARRAY_SIZE(link_type_name))
9600 return NULL;
9601
9602 return link_type_name[t];
9603}
9604
3e6dc020
DM
9605const char *libbpf_bpf_map_type_str(enum bpf_map_type t)
9606{
9607 if (t < 0 || t >= ARRAY_SIZE(map_type_name))
9608 return NULL;
9609
9610 return map_type_name[t];
9611}
9612
d18616e7
DM
9613const char *libbpf_bpf_prog_type_str(enum bpf_prog_type t)
9614{
9615 if (t < 0 || t >= ARRAY_SIZE(prog_type_name))
9616 return NULL;
9617
9618 return prog_type_name[t];
9619}
9620
590a0088 9621static struct bpf_map *find_struct_ops_map_by_offset(struct bpf_object *obj,
809a69d6 9622 int sec_idx,
590a0088
MKL
9623 size_t offset)
9624{
9625 struct bpf_map *map;
9626 size_t i;
9627
9628 for (i = 0; i < obj->nr_maps; i++) {
9629 map = &obj->maps[i];
9630 if (!bpf_map__is_struct_ops(map))
9631 continue;
809a69d6
KFL
9632 if (map->sec_idx == sec_idx &&
9633 map->sec_offset <= offset &&
590a0088
MKL
9634 offset - map->sec_offset < map->def.value_size)
9635 return map;
9636 }
9637
9638 return NULL;
9639}
9640
69e4a9d2
KFL
9641/* Collect the reloc from ELF, populate the st_ops->progs[], and update
9642 * st_ops->data for shadow type.
9643 */
646f02ff 9644static int bpf_object__collect_st_ops_relos(struct bpf_object *obj,
ad23b723 9645 Elf64_Shdr *shdr, Elf_Data *data)
590a0088
MKL
9646{
9647 const struct btf_member *member;
9648 struct bpf_struct_ops *st_ops;
9649 struct bpf_program *prog;
9650 unsigned int shdr_idx;
9651 const struct btf *btf;
9652 struct bpf_map *map;
7e06aad5 9653 unsigned int moff, insn_idx;
590a0088 9654 const char *name;
1d1a3bcf 9655 __u32 member_idx;
ad23b723
AN
9656 Elf64_Sym *sym;
9657 Elf64_Rel *rel;
590a0088
MKL
9658 int i, nrels;
9659
590a0088
MKL
9660 btf = obj->btf;
9661 nrels = shdr->sh_size / shdr->sh_entsize;
9662 for (i = 0; i < nrels; i++) {
ad23b723
AN
9663 rel = elf_rel_by_idx(data, i);
9664 if (!rel) {
590a0088
MKL
9665 pr_warn("struct_ops reloc: failed to get %d reloc\n", i);
9666 return -LIBBPF_ERRNO__FORMAT;
9667 }
9668
ad23b723
AN
9669 sym = elf_sym_by_idx(obj, ELF64_R_SYM(rel->r_info));
9670 if (!sym) {
590a0088 9671 pr_warn("struct_ops reloc: symbol %zx not found\n",
ad23b723 9672 (size_t)ELF64_R_SYM(rel->r_info));
590a0088
MKL
9673 return -LIBBPF_ERRNO__FORMAT;
9674 }
9675
ad23b723 9676 name = elf_sym_str(obj, sym->st_name) ?: "<?>";
809a69d6 9677 map = find_struct_ops_map_by_offset(obj, shdr->sh_info, rel->r_offset);
590a0088 9678 if (!map) {
ad23b723
AN
9679 pr_warn("struct_ops reloc: cannot find map at rel->r_offset %zu\n",
9680 (size_t)rel->r_offset);
590a0088
MKL
9681 return -EINVAL;
9682 }
9683
ad23b723
AN
9684 moff = rel->r_offset - map->sec_offset;
9685 shdr_idx = sym->st_shndx;
590a0088 9686 st_ops = map->st_ops;
ad23b723 9687 pr_debug("struct_ops reloc %s: for %lld value %lld shdr_idx %u rel->r_offset %zu map->sec_offset %zu name %d (\'%s\')\n",
590a0088 9688 map->name,
ad23b723
AN
9689 (long long)(rel->r_info >> 32),
9690 (long long)sym->st_value,
9691 shdr_idx, (size_t)rel->r_offset,
9692 map->sec_offset, sym->st_name, name);
590a0088
MKL
9693
9694 if (shdr_idx >= SHN_LORESERVE) {
ad23b723
AN
9695 pr_warn("struct_ops reloc %s: rel->r_offset %zu shdr_idx %u unsupported non-static function\n",
9696 map->name, (size_t)rel->r_offset, shdr_idx);
590a0088
MKL
9697 return -LIBBPF_ERRNO__RELOC;
9698 }
ad23b723 9699 if (sym->st_value % BPF_INSN_SZ) {
7e06aad5 9700 pr_warn("struct_ops reloc %s: invalid target program offset %llu\n",
ad23b723 9701 map->name, (unsigned long long)sym->st_value);
7e06aad5
AN
9702 return -LIBBPF_ERRNO__FORMAT;
9703 }
ad23b723 9704 insn_idx = sym->st_value / BPF_INSN_SZ;
590a0088
MKL
9705
9706 member = find_member_by_offset(st_ops->type, moff * 8);
9707 if (!member) {
9708 pr_warn("struct_ops reloc %s: cannot find member at moff %u\n",
9709 map->name, moff);
9710 return -EINVAL;
9711 }
9712 member_idx = member - btf_members(st_ops->type);
9713 name = btf__name_by_offset(btf, member->name_off);
9714
9715 if (!resolve_func_ptr(btf, member->type, NULL)) {
9716 pr_warn("struct_ops reloc %s: cannot relocate non func ptr %s\n",
9717 map->name, name);
9718 return -EINVAL;
9719 }
9720
7e06aad5 9721 prog = find_prog_by_sec_insn(obj, shdr_idx, insn_idx);
590a0088
MKL
9722 if (!prog) {
9723 pr_warn("struct_ops reloc %s: cannot find prog at shdr_idx %u to relocate func ptr %s\n",
9724 map->name, shdr_idx, name);
9725 return -EINVAL;
9726 }
9727
91b4d1d1
AN
9728 /* prevent the use of BPF prog with invalid type */
9729 if (prog->type != BPF_PROG_TYPE_STRUCT_OPS) {
9730 pr_warn("struct_ops reloc %s: prog %s is not struct_ops BPF program\n",
9731 map->name, prog->name);
9732 return -EINVAL;
9733 }
590a0088 9734
590a0088 9735 st_ops->progs[member_idx] = prog;
69e4a9d2
KFL
9736
9737 /* st_ops->data will be exposed to users, being returned by
9738 * bpf_map__initial_value() as a pointer to the shadow
9739 * type. All function pointers in the original struct type
9740 * should be converted to a pointer to struct bpf_program
9741 * in the shadow type.
9742 */
9743 *((struct bpf_program **)(st_ops->data + moff)) = prog;
590a0088
MKL
9744 }
9745
9746 return 0;
590a0088
MKL
9747}
9748
a6ed02ca 9749#define BTF_TRACE_PREFIX "btf_trace_"
1e092a03 9750#define BTF_LSM_PREFIX "bpf_lsm_"
21aef70e 9751#define BTF_ITER_PREFIX "bpf_iter_"
a6ed02ca
KS
9752#define BTF_MAX_NAME_SIZE 128
9753
67234743
AS
9754void btf_get_kernel_prefix_kind(enum bpf_attach_type attach_type,
9755 const char **prefix, int *kind)
9756{
9757 switch (attach_type) {
9758 case BPF_TRACE_RAW_TP:
9759 *prefix = BTF_TRACE_PREFIX;
9760 *kind = BTF_KIND_TYPEDEF;
9761 break;
9762 case BPF_LSM_MAC:
bffcf348 9763 case BPF_LSM_CGROUP:
67234743
AS
9764 *prefix = BTF_LSM_PREFIX;
9765 *kind = BTF_KIND_FUNC;
9766 break;
9767 case BPF_TRACE_ITER:
9768 *prefix = BTF_ITER_PREFIX;
9769 *kind = BTF_KIND_FUNC;
9770 break;
9771 default:
9772 *prefix = "";
9773 *kind = BTF_KIND_FUNC;
9774 }
9775}
9776
a6ed02ca
KS
9777static int find_btf_by_prefix_kind(const struct btf *btf, const char *prefix,
9778 const char *name, __u32 kind)
9779{
9780 char btf_type_name[BTF_MAX_NAME_SIZE];
9781 int ret;
9782
9783 ret = snprintf(btf_type_name, sizeof(btf_type_name),
9784 "%s%s", prefix, name);
9785 /* snprintf returns the number of characters written excluding the
c139e40a 9786 * terminating null. So, if >= BTF_MAX_NAME_SIZE are written, it
a6ed02ca
KS
9787 * indicates truncation.
9788 */
9789 if (ret < 0 || ret >= sizeof(btf_type_name))
9790 return -ENAMETOOLONG;
9791 return btf__find_by_name_kind(btf, btf_type_name, kind);
9792}
9793
91abb4a6
AN
9794static inline int find_attach_btf_id(struct btf *btf, const char *name,
9795 enum bpf_attach_type attach_type)
a6ed02ca 9796{
67234743
AS
9797 const char *prefix;
9798 int kind;
a6ed02ca 9799
67234743
AS
9800 btf_get_kernel_prefix_kind(attach_type, &prefix, &kind);
9801 return find_btf_by_prefix_kind(btf, prefix, name, kind);
a6ed02ca
KS
9802}
9803
b8c54ea4
AS
9804int libbpf_find_vmlinux_btf_id(const char *name,
9805 enum bpf_attach_type attach_type)
12a8654b 9806{
a6ed02ca 9807 struct btf *btf;
3521ffa2 9808 int err;
12a8654b 9809
a710eed3 9810 btf = btf__load_vmlinux_btf();
e9fc3ce9
AN
9811 err = libbpf_get_error(btf);
9812 if (err) {
12a8654b 9813 pr_warn("vmlinux BTF is not found\n");
e9fc3ce9 9814 return libbpf_err(err);
12a8654b
AS
9815 }
9816
91abb4a6
AN
9817 err = find_attach_btf_id(btf, name, attach_type);
9818 if (err <= 0)
9819 pr_warn("%s is not found in vmlinux BTF\n", name);
9820
3521ffa2 9821 btf__free(btf);
e9fc3ce9 9822 return libbpf_err(err);
b8c54ea4
AS
9823}
9824
e7bf94db
AS
9825static int libbpf_find_prog_btf_id(const char *name, __u32 attach_prog_fd)
9826{
813847a3 9827 struct bpf_prog_info info;
ebc7b50a 9828 __u32 info_len = sizeof(info);
6cc93e2f 9829 struct btf *btf;
6d2d73cd 9830 int err;
e7bf94db 9831
813847a3 9832 memset(&info, 0, info_len);
629dfc66 9833 err = bpf_prog_get_info_by_fd(attach_prog_fd, &info, &info_len);
e9fc3ce9 9834 if (err) {
629dfc66 9835 pr_warn("failed bpf_prog_get_info_by_fd for FD %d: %d\n",
ebc7b50a 9836 attach_prog_fd, err);
e9fc3ce9 9837 return err;
e7bf94db 9838 }
6d2d73cd
QM
9839
9840 err = -EINVAL;
ebc7b50a 9841 if (!info.btf_id) {
e7bf94db
AS
9842 pr_warn("The target program doesn't have BTF\n");
9843 goto out;
9844 }
ebc7b50a
DM
9845 btf = btf__load_from_kernel_by_id(info.btf_id);
9846 err = libbpf_get_error(btf);
9847 if (err) {
9848 pr_warn("Failed to get BTF %d of the program: %d\n", info.btf_id, err);
e7bf94db
AS
9849 goto out;
9850 }
9851 err = btf__find_by_name_kind(btf, name, BTF_KIND_FUNC);
9852 btf__free(btf);
9853 if (err <= 0) {
9854 pr_warn("%s is not found in prog's BTF\n", name);
9855 goto out;
9856 }
9857out:
e7bf94db
AS
9858 return err;
9859}
9860
91abb4a6
AN
9861static int find_kernel_btf_id(struct bpf_object *obj, const char *attach_name,
9862 enum bpf_attach_type attach_type,
9863 int *btf_obj_fd, int *btf_type_id)
9864{
8f8a0242
VM
9865 int ret, i, mod_len;
9866 const char *fn_name, *mod_name = NULL;
91abb4a6 9867
8f8a0242
VM
9868 fn_name = strchr(attach_name, ':');
9869 if (fn_name) {
9870 mod_name = attach_name;
9871 mod_len = fn_name - mod_name;
9872 fn_name++;
9873 }
9874
9875 if (!mod_name || strncmp(mod_name, "vmlinux", mod_len) == 0) {
9876 ret = find_attach_btf_id(obj->btf_vmlinux,
9877 mod_name ? fn_name : attach_name,
9878 attach_type);
9879 if (ret > 0) {
9880 *btf_obj_fd = 0; /* vmlinux BTF */
9881 *btf_type_id = ret;
9882 return 0;
9883 }
9884 if (ret != -ENOENT)
9885 return ret;
91abb4a6 9886 }
91abb4a6
AN
9887
9888 ret = load_module_btfs(obj);
9889 if (ret)
9890 return ret;
9891
9892 for (i = 0; i < obj->btf_module_cnt; i++) {
9893 const struct module_btf *mod = &obj->btf_modules[i];
9894
8f8a0242
VM
9895 if (mod_name && strncmp(mod->name, mod_name, mod_len) != 0)
9896 continue;
9897
9898 ret = find_attach_btf_id(mod->btf,
9899 mod_name ? fn_name : attach_name,
9900 attach_type);
91abb4a6
AN
9901 if (ret > 0) {
9902 *btf_obj_fd = mod->fd;
9903 *btf_type_id = ret;
9904 return 0;
9905 }
9906 if (ret == -ENOENT)
9907 continue;
9908
9909 return ret;
9910 }
9911
9912 return -ESRCH;
9913}
9914
15ea31fa
AN
9915static int libbpf_find_attach_btf_id(struct bpf_program *prog, const char *attach_name,
9916 int *btf_obj_fd, int *btf_type_id)
b8c54ea4 9917{
a6ed02ca
KS
9918 enum bpf_attach_type attach_type = prog->expected_attach_type;
9919 __u32 attach_prog_fd = prog->attach_prog_fd;
b6291a6f 9920 int err = 0;
b8c54ea4 9921
91abb4a6 9922 /* BPF program's BTF ID */
749c202c
AN
9923 if (prog->type == BPF_PROG_TYPE_EXT || attach_prog_fd) {
9924 if (!attach_prog_fd) {
9925 pr_warn("prog '%s': attach program FD is not set\n", prog->name);
9926 return -EINVAL;
9927 }
91abb4a6
AN
9928 err = libbpf_find_prog_btf_id(attach_name, attach_prog_fd);
9929 if (err < 0) {
749c202c
AN
9930 pr_warn("prog '%s': failed to find BPF program (FD %d) BTF ID for '%s': %d\n",
9931 prog->name, attach_prog_fd, attach_name, err);
91abb4a6
AN
9932 return err;
9933 }
9934 *btf_obj_fd = 0;
9935 *btf_type_id = err;
9936 return 0;
9937 }
9938
9939 /* kernel/module BTF ID */
67234743
AS
9940 if (prog->obj->gen_loader) {
9941 bpf_gen__record_attach_target(prog->obj->gen_loader, attach_name, attach_type);
9942 *btf_obj_fd = 0;
9943 *btf_type_id = 1;
9944 } else {
9e926acd
KFL
9945 err = find_kernel_btf_id(prog->obj, attach_name,
9946 attach_type, btf_obj_fd,
9947 btf_type_id);
67234743 9948 }
91abb4a6 9949 if (err) {
749c202c
AN
9950 pr_warn("prog '%s': failed to find kernel BTF type ID of '%s': %d\n",
9951 prog->name, attach_name, err);
b8c54ea4 9952 return err;
12a8654b 9953 }
91abb4a6 9954 return 0;
12a8654b
AS
9955}
9956
956b620f
AI
9957int libbpf_attach_type_by_name(const char *name,
9958 enum bpf_attach_type *attach_type)
9959{
c76e4c22 9960 char *type_names;
b6291a6f 9961 const struct bpf_sec_def *sec_def;
956b620f
AI
9962
9963 if (!name)
e9fc3ce9 9964 return libbpf_err(-EINVAL);
956b620f 9965
b6291a6f
AN
9966 sec_def = find_sec_def(name);
9967 if (!sec_def) {
9968 pr_debug("failed to guess attach type based on ELF section name '%s'\n", name);
9969 type_names = libbpf_get_type_names(true);
9970 if (type_names != NULL) {
9971 pr_debug("attachable section(type) names are:%s\n", type_names);
9972 free(type_names);
9973 }
9974
9975 return libbpf_err(-EINVAL);
c76e4c22
TS
9976 }
9977
4fa5bcfe 9978 if (sec_def->prog_prepare_load_fn != libbpf_prepare_prog_load)
15ea31fa
AN
9979 return libbpf_err(-EINVAL);
9980 if (!(sec_def->cookie & SEC_ATTACHABLE))
b6291a6f
AN
9981 return libbpf_err(-EINVAL);
9982
9983 *attach_type = sec_def->expected_attach_type;
9984 return 0;
956b620f
AI
9985}
9986
a324aae3 9987int bpf_map__fd(const struct bpf_map *map)
9d759a9b 9988{
f08c18e0
AN
9989 if (!map)
9990 return libbpf_err(-EINVAL);
9991 if (!map_is_created(map))
9992 return -1;
9993 return map->fd;
9d759a9b
WN
9994}
9995
aed65917
AN
9996static bool map_uses_real_name(const struct bpf_map *map)
9997{
9998 /* Since libbpf started to support custom .data.* and .rodata.* maps,
9999 * their user-visible name differs from kernel-visible name. Users see
10000 * such map's corresponding ELF section name as a map name.
10001 * This check distinguishes .data/.rodata from .data.* and .rodata.*
10002 * maps to know which name has to be returned to the user.
10003 */
10004 if (map->libbpf_type == LIBBPF_MAP_DATA && strcmp(map->real_name, DATA_SEC) != 0)
10005 return true;
10006 if (map->libbpf_type == LIBBPF_MAP_RODATA && strcmp(map->real_name, RODATA_SEC) != 0)
10007 return true;
10008 return false;
10009}
10010
a324aae3 10011const char *bpf_map__name(const struct bpf_map *map)
561bbcca 10012{
aed65917
AN
10013 if (!map)
10014 return NULL;
10015
10016 if (map_uses_real_name(map))
10017 return map->real_name;
10018
10019 return map->name;
561bbcca
WN
10020}
10021
1bdb6c9a
AN
10022enum bpf_map_type bpf_map__type(const struct bpf_map *map)
10023{
10024 return map->def.type;
10025}
10026
10027int bpf_map__set_type(struct bpf_map *map, enum bpf_map_type type)
10028{
f08c18e0 10029 if (map_is_created(map))
e9fc3ce9 10030 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10031 map->def.type = type;
10032 return 0;
10033}
10034
10035__u32 bpf_map__map_flags(const struct bpf_map *map)
10036{
10037 return map->def.map_flags;
10038}
10039
10040int bpf_map__set_map_flags(struct bpf_map *map, __u32 flags)
10041{
f08c18e0 10042 if (map_is_created(map))
e9fc3ce9 10043 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10044 map->def.map_flags = flags;
10045 return 0;
10046}
10047
47512102
JK
10048__u64 bpf_map__map_extra(const struct bpf_map *map)
10049{
10050 return map->map_extra;
10051}
10052
10053int bpf_map__set_map_extra(struct bpf_map *map, __u64 map_extra)
10054{
f08c18e0 10055 if (map_is_created(map))
47512102
JK
10056 return libbpf_err(-EBUSY);
10057 map->map_extra = map_extra;
10058 return 0;
10059}
10060
1bdb6c9a
AN
10061__u32 bpf_map__numa_node(const struct bpf_map *map)
10062{
10063 return map->numa_node;
10064}
10065
10066int bpf_map__set_numa_node(struct bpf_map *map, __u32 numa_node)
10067{
f08c18e0 10068 if (map_is_created(map))
e9fc3ce9 10069 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10070 map->numa_node = numa_node;
10071 return 0;
10072}
10073
10074__u32 bpf_map__key_size(const struct bpf_map *map)
10075{
10076 return map->def.key_size;
10077}
10078
10079int bpf_map__set_key_size(struct bpf_map *map, __u32 size)
10080{
f08c18e0 10081 if (map_is_created(map))
e9fc3ce9 10082 return libbpf_err(-EBUSY);
1bdb6c9a
AN
10083 map->def.key_size = size;
10084 return 0;
10085}
10086
10087__u32 bpf_map__value_size(const struct bpf_map *map)
10088{
10089 return map->def.value_size;
10090}
10091
9d0a2331
JK
10092static int map_btf_datasec_resize(struct bpf_map *map, __u32 size)
10093{
10094 struct btf *btf;
10095 struct btf_type *datasec_type, *var_type;
10096 struct btf_var_secinfo *var;
10097 const struct btf_type *array_type;
10098 const struct btf_array *array;
4c857a71
JK
10099 int vlen, element_sz, new_array_id;
10100 __u32 nr_elements;
9d0a2331
JK
10101
10102 /* check btf existence */
10103 btf = bpf_object__btf(map->obj);
10104 if (!btf)
10105 return -ENOENT;
10106
10107 /* verify map is datasec */
10108 datasec_type = btf_type_by_id(btf, bpf_map__btf_value_type_id(map));
10109 if (!btf_is_datasec(datasec_type)) {
10110 pr_warn("map '%s': cannot be resized, map value type is not a datasec\n",
10111 bpf_map__name(map));
10112 return -EINVAL;
10113 }
10114
10115 /* verify datasec has at least one var */
10116 vlen = btf_vlen(datasec_type);
10117 if (vlen == 0) {
10118 pr_warn("map '%s': cannot be resized, map value datasec is empty\n",
10119 bpf_map__name(map));
10120 return -EINVAL;
10121 }
10122
10123 /* verify last var in the datasec is an array */
10124 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10125 var_type = btf_type_by_id(btf, var->type);
10126 array_type = skip_mods_and_typedefs(btf, var_type->type, NULL);
10127 if (!btf_is_array(array_type)) {
10128 pr_warn("map '%s': cannot be resized, last var must be an array\n",
10129 bpf_map__name(map));
10130 return -EINVAL;
10131 }
10132
10133 /* verify request size aligns with array */
10134 array = btf_array(array_type);
10135 element_sz = btf__resolve_size(btf, array->type);
10136 if (element_sz <= 0 || (size - var->offset) % element_sz != 0) {
10137 pr_warn("map '%s': cannot be resized, element size (%d) doesn't align with new total size (%u)\n",
10138 bpf_map__name(map), element_sz, size);
10139 return -EINVAL;
10140 }
10141
10142 /* create a new array based on the existing array, but with new length */
10143 nr_elements = (size - var->offset) / element_sz;
10144 new_array_id = btf__add_array(btf, array->index_type, array->type, nr_elements);
10145 if (new_array_id < 0)
10146 return new_array_id;
10147
10148 /* adding a new btf type invalidates existing pointers to btf objects,
10149 * so refresh pointers before proceeding
10150 */
10151 datasec_type = btf_type_by_id(btf, map->btf_value_type_id);
10152 var = &btf_var_secinfos(datasec_type)[vlen - 1];
10153 var_type = btf_type_by_id(btf, var->type);
10154
10155 /* finally update btf info */
10156 datasec_type->size = size;
10157 var->size = size - var->offset;
10158 var_type->type = new_array_id;
10159
10160 return 0;
10161}
10162
1bdb6c9a
AN
10163int bpf_map__set_value_size(struct bpf_map *map, __u32 size)
10164{
f08c18e0 10165 if (map->obj->loaded || map->reused)
e9fc3ce9 10166 return libbpf_err(-EBUSY);
9d0a2331
JK
10167
10168 if (map->mmaped) {
9d0a2331 10169 size_t mmap_old_sz, mmap_new_sz;
79ff13e9
AS
10170 int err;
10171
10172 if (map->def.type != BPF_MAP_TYPE_ARRAY)
10173 return -EOPNOTSUPP;
9d0a2331 10174
79ff13e9
AS
10175 mmap_old_sz = bpf_map_mmap_sz(map);
10176 mmap_new_sz = array_map_mmap_sz(size, map->def.max_entries);
9d0a2331
JK
10177 err = bpf_map_mmap_resize(map, mmap_old_sz, mmap_new_sz);
10178 if (err) {
10179 pr_warn("map '%s': failed to resize memory-mapped region: %d\n",
10180 bpf_map__name(map), err);
10181 return err;
10182 }
10183 err = map_btf_datasec_resize(map, size);
10184 if (err && err != -ENOENT) {
10185 pr_warn("map '%s': failed to adjust resized BTF, clearing BTF key/value info: %d\n",
10186 bpf_map__name(map), err);
10187 map->btf_value_type_id = 0;
10188 map->btf_key_type_id = 0;
10189 }
10190 }
10191
1bdb6c9a
AN
10192 map->def.value_size = size;
10193 return 0;
10194}
10195
5b891af7 10196__u32 bpf_map__btf_key_type_id(const struct bpf_map *map)
8a138aed 10197{
61746dbe 10198 return map ? map->btf_key_type_id : 0;
8a138aed
MKL
10199}
10200
5b891af7 10201__u32 bpf_map__btf_value_type_id(const struct bpf_map *map)
8a138aed 10202{
61746dbe 10203 return map ? map->btf_value_type_id : 0;
8a138aed
MKL
10204}
10205
e2842be5
THJ
10206int bpf_map__set_initial_value(struct bpf_map *map,
10207 const void *data, size_t size)
10208{
2e7ba4f8
AN
10209 size_t actual_sz;
10210
f08c18e0
AN
10211 if (map->obj->loaded || map->reused)
10212 return libbpf_err(-EBUSY);
10213
2e7ba4f8
AN
10214 if (!map->mmaped || map->libbpf_type == LIBBPF_MAP_KCONFIG)
10215 return libbpf_err(-EINVAL);
10216
10217 if (map->def.type == BPF_MAP_TYPE_ARENA)
10218 actual_sz = map->obj->arena_data_sz;
10219 else
10220 actual_sz = map->def.value_size;
10221 if (size != actual_sz)
e9fc3ce9 10222 return libbpf_err(-EINVAL);
e2842be5
THJ
10223
10224 memcpy(map->mmaped, data, size);
10225 return 0;
10226}
10227
2e7ba4f8 10228void *bpf_map__initial_value(const struct bpf_map *map, size_t *psize)
7723256b 10229{
69e4a9d2
KFL
10230 if (bpf_map__is_struct_ops(map)) {
10231 if (psize)
10232 *psize = map->def.value_size;
10233 return map->st_ops->data;
10234 }
10235
7723256b
AS
10236 if (!map->mmaped)
10237 return NULL;
2e7ba4f8
AN
10238
10239 if (map->def.type == BPF_MAP_TYPE_ARENA)
10240 *psize = map->obj->arena_data_sz;
10241 else
10242 *psize = map->def.value_size;
10243
7723256b
AS
10244 return map->mmaped;
10245}
10246
a324aae3 10247bool bpf_map__is_internal(const struct bpf_map *map)
d859900c
DB
10248{
10249 return map->libbpf_type != LIBBPF_MAP_UNSPEC;
10250}
10251
1bdb6c9a
AN
10252__u32 bpf_map__ifindex(const struct bpf_map *map)
10253{
10254 return map->map_ifindex;
10255}
10256
10257int bpf_map__set_ifindex(struct bpf_map *map, __u32 ifindex)
9aba3613 10258{
f08c18e0 10259 if (map_is_created(map))
e9fc3ce9 10260 return libbpf_err(-EBUSY);
9aba3613 10261 map->map_ifindex = ifindex;
1bdb6c9a 10262 return 0;
9aba3613
JK
10263}
10264
addb9fc9
NS
10265int bpf_map__set_inner_map_fd(struct bpf_map *map, int fd)
10266{
10267 if (!bpf_map_type__is_map_in_map(map->def.type)) {
be18010e 10268 pr_warn("error: unsupported map type\n");
e9fc3ce9 10269 return libbpf_err(-EINVAL);
addb9fc9
NS
10270 }
10271 if (map->inner_map_fd != -1) {
be18010e 10272 pr_warn("error: inner_map_fd already specified\n");
e9fc3ce9 10273 return libbpf_err(-EINVAL);
addb9fc9 10274 }
8f7b239e
AN
10275 if (map->inner_map) {
10276 bpf_map__destroy(map->inner_map);
10277 zfree(&map->inner_map);
10278 }
addb9fc9
NS
10279 map->inner_map_fd = fd;
10280 return 0;
10281}
10282
0c19a9fb 10283static struct bpf_map *
a324aae3 10284__bpf_map__iter(const struct bpf_map *m, const struct bpf_object *obj, int i)
9d759a9b 10285{
0c19a9fb 10286 ssize_t idx;
9d759a9b
WN
10287 struct bpf_map *s, *e;
10288
10289 if (!obj || !obj->maps)
e9fc3ce9 10290 return errno = EINVAL, NULL;
9d759a9b
WN
10291
10292 s = obj->maps;
10293 e = obj->maps + obj->nr_maps;
10294
0c19a9fb 10295 if ((m < s) || (m >= e)) {
be18010e
KW
10296 pr_warn("error in %s: map handler doesn't belong to object\n",
10297 __func__);
e9fc3ce9 10298 return errno = EINVAL, NULL;
9d759a9b
WN
10299 }
10300
0c19a9fb
SF
10301 idx = (m - obj->maps) + i;
10302 if (idx >= obj->nr_maps || idx < 0)
9d759a9b
WN
10303 return NULL;
10304 return &obj->maps[idx];
10305}
561bbcca 10306
2088a3a7
HC
10307struct bpf_map *
10308bpf_object__next_map(const struct bpf_object *obj, const struct bpf_map *prev)
0c19a9fb
SF
10309{
10310 if (prev == NULL)
10311 return obj->maps;
10312
10313 return __bpf_map__iter(prev, obj, 1);
10314}
10315
2088a3a7
HC
10316struct bpf_map *
10317bpf_object__prev_map(const struct bpf_object *obj, const struct bpf_map *next)
0c19a9fb
SF
10318{
10319 if (next == NULL) {
10320 if (!obj->nr_maps)
10321 return NULL;
10322 return obj->maps + obj->nr_maps - 1;
10323 }
10324
10325 return __bpf_map__iter(next, obj, -1);
10326}
10327
561bbcca 10328struct bpf_map *
a324aae3 10329bpf_object__find_map_by_name(const struct bpf_object *obj, const char *name)
561bbcca
WN
10330{
10331 struct bpf_map *pos;
10332
f74a53d9 10333 bpf_object__for_each_map(pos, obj) {
26071635
AN
10334 /* if it's a special internal map name (which always starts
10335 * with dot) then check if that special name matches the
10336 * real map name (ELF section name)
10337 */
10338 if (name[0] == '.') {
10339 if (pos->real_name && strcmp(pos->real_name, name) == 0)
10340 return pos;
10341 continue;
10342 }
10343 /* otherwise map name has to be an exact match */
aed65917
AN
10344 if (map_uses_real_name(pos)) {
10345 if (strcmp(pos->real_name, name) == 0)
10346 return pos;
10347 continue;
10348 }
10349 if (strcmp(pos->name, name) == 0)
561bbcca
WN
10350 return pos;
10351 }
e9fc3ce9 10352 return errno = ENOENT, NULL;
561bbcca 10353}
5a6acad1 10354
f3cea32d 10355int
a324aae3 10356bpf_object__find_map_fd_by_name(const struct bpf_object *obj, const char *name)
f3cea32d
MF
10357{
10358 return bpf_map__fd(bpf_object__find_map_by_name(obj, name));
10359}
10360
737d0646
AN
10361static int validate_map_op(const struct bpf_map *map, size_t key_sz,
10362 size_t value_sz, bool check_value_sz)
10363{
f08c18e0 10364 if (!map_is_created(map)) /* map is not yet created */
737d0646
AN
10365 return -ENOENT;
10366
10367 if (map->def.key_size != key_sz) {
10368 pr_warn("map '%s': unexpected key size %zu provided, expected %u\n",
10369 map->name, key_sz, map->def.key_size);
10370 return -EINVAL;
10371 }
10372
7b30c296
MY
10373 if (map->fd < 0) {
10374 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
10375 return -EINVAL;
10376 }
10377
737d0646
AN
10378 if (!check_value_sz)
10379 return 0;
10380
10381 switch (map->def.type) {
10382 case BPF_MAP_TYPE_PERCPU_ARRAY:
10383 case BPF_MAP_TYPE_PERCPU_HASH:
10384 case BPF_MAP_TYPE_LRU_PERCPU_HASH:
10385 case BPF_MAP_TYPE_PERCPU_CGROUP_STORAGE: {
10386 int num_cpu = libbpf_num_possible_cpus();
10387 size_t elem_sz = roundup(map->def.value_size, 8);
10388
10389 if (value_sz != num_cpu * elem_sz) {
10390 pr_warn("map '%s': unexpected value size %zu provided for per-CPU map, expected %d * %zu = %zd\n",
10391 map->name, value_sz, num_cpu, elem_sz, num_cpu * elem_sz);
10392 return -EINVAL;
10393 }
10394 break;
10395 }
10396 default:
10397 if (map->def.value_size != value_sz) {
10398 pr_warn("map '%s': unexpected value size %zu provided, expected %u\n",
10399 map->name, value_sz, map->def.value_size);
10400 return -EINVAL;
10401 }
10402 break;
10403 }
10404 return 0;
10405}
10406
10407int bpf_map__lookup_elem(const struct bpf_map *map,
10408 const void *key, size_t key_sz,
10409 void *value, size_t value_sz, __u64 flags)
10410{
10411 int err;
10412
10413 err = validate_map_op(map, key_sz, value_sz, true);
10414 if (err)
10415 return libbpf_err(err);
10416
10417 return bpf_map_lookup_elem_flags(map->fd, key, value, flags);
10418}
10419
10420int bpf_map__update_elem(const struct bpf_map *map,
10421 const void *key, size_t key_sz,
10422 const void *value, size_t value_sz, __u64 flags)
10423{
10424 int err;
10425
10426 err = validate_map_op(map, key_sz, value_sz, true);
10427 if (err)
10428 return libbpf_err(err);
10429
10430 return bpf_map_update_elem(map->fd, key, value, flags);
10431}
10432
10433int bpf_map__delete_elem(const struct bpf_map *map,
10434 const void *key, size_t key_sz, __u64 flags)
10435{
10436 int err;
10437
10438 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10439 if (err)
10440 return libbpf_err(err);
10441
10442 return bpf_map_delete_elem_flags(map->fd, key, flags);
10443}
10444
10445int bpf_map__lookup_and_delete_elem(const struct bpf_map *map,
10446 const void *key, size_t key_sz,
10447 void *value, size_t value_sz, __u64 flags)
10448{
10449 int err;
10450
10451 err = validate_map_op(map, key_sz, value_sz, true);
10452 if (err)
10453 return libbpf_err(err);
10454
10455 return bpf_map_lookup_and_delete_elem_flags(map->fd, key, value, flags);
10456}
10457
10458int bpf_map__get_next_key(const struct bpf_map *map,
10459 const void *cur_key, void *next_key, size_t key_sz)
10460{
10461 int err;
10462
10463 err = validate_map_op(map, key_sz, 0, false /* check_value_sz */);
10464 if (err)
10465 return libbpf_err(err);
10466
10467 return bpf_map_get_next_key(map->fd, cur_key, next_key);
10468}
10469
e28ff1a8
JS
10470long libbpf_get_error(const void *ptr)
10471{
e9fc3ce9
AN
10472 if (!IS_ERR_OR_NULL(ptr))
10473 return 0;
10474
10475 if (IS_ERR(ptr))
10476 errno = -PTR_ERR(ptr);
10477
10478 /* If ptr == NULL, then errno should be already set by the failing
10479 * API, because libbpf never returns NULL on success and it now always
10480 * sets errno on error. So no extra errno handling for ptr == NULL
10481 * case.
10482 */
10483 return -errno;
e28ff1a8 10484}
6f6d33f3 10485
cc4f864b
AN
10486/* Replace link's underlying BPF program with the new one */
10487int bpf_link__update_program(struct bpf_link *link, struct bpf_program *prog)
10488{
e9fc3ce9 10489 int ret;
7b30c296
MY
10490 int prog_fd = bpf_program__fd(prog);
10491
10492 if (prog_fd < 0) {
10493 pr_warn("prog '%s': can't use BPF program without FD (was it loaded?)\n",
10494 prog->name);
10495 return libbpf_err(-EINVAL);
10496 }
c139e40a 10497
7b30c296 10498 ret = bpf_link_update(bpf_link__fd(link), prog_fd, NULL);
e9fc3ce9 10499 return libbpf_err_errno(ret);
cc4f864b
AN
10500}
10501
d6958706
AN
10502/* Release "ownership" of underlying BPF resource (typically, BPF program
10503 * attached to some BPF hook, e.g., tracepoint, kprobe, etc). Disconnected
10504 * link, when destructed through bpf_link__destroy() call won't attempt to
10505 * detach/unregisted that BPF resource. This is useful in situations where,
10506 * say, attached BPF program has to outlive userspace program that attached it
10507 * in the system. Depending on type of BPF program, though, there might be
10508 * additional steps (like pinning BPF program in BPF FS) necessary to ensure
10509 * exit of userspace program doesn't trigger automatic detachment and clean up
10510 * inside the kernel.
10511 */
10512void bpf_link__disconnect(struct bpf_link *link)
10513{
10514 link->disconnected = true;
10515}
10516
1c2e9efc
AN
10517int bpf_link__destroy(struct bpf_link *link)
10518{
d6958706 10519 int err = 0;
1c2e9efc 10520
50450fc7 10521 if (IS_ERR_OR_NULL(link))
1c2e9efc
AN
10522 return 0;
10523
d6958706
AN
10524 if (!link->disconnected && link->detach)
10525 err = link->detach(link);
c016b68e
AN
10526 if (link->pin_path)
10527 free(link->pin_path);
d88b71d4
AN
10528 if (link->dealloc)
10529 link->dealloc(link);
10530 else
10531 free(link);
1c2e9efc 10532
e9fc3ce9 10533 return libbpf_err(err);
1c2e9efc
AN
10534}
10535
c016b68e
AN
10536int bpf_link__fd(const struct bpf_link *link)
10537{
10538 return link->fd;
10539}
10540
10541const char *bpf_link__pin_path(const struct bpf_link *link)
10542{
10543 return link->pin_path;
10544}
10545
10546static int bpf_link__detach_fd(struct bpf_link *link)
10547{
e9fc3ce9 10548 return libbpf_err_errno(close(link->fd));
c016b68e
AN
10549}
10550
10551struct bpf_link *bpf_link__open(const char *path)
10552{
10553 struct bpf_link *link;
10554 int fd;
10555
10556 fd = bpf_obj_get(path);
10557 if (fd < 0) {
10558 fd = -errno;
10559 pr_warn("failed to open link at %s: %d\n", path, fd);
e9fc3ce9 10560 return libbpf_err_ptr(fd);
c016b68e
AN
10561 }
10562
10563 link = calloc(1, sizeof(*link));
10564 if (!link) {
10565 close(fd);
e9fc3ce9 10566 return libbpf_err_ptr(-ENOMEM);
c016b68e
AN
10567 }
10568 link->detach = &bpf_link__detach_fd;
10569 link->fd = fd;
10570
10571 link->pin_path = strdup(path);
10572 if (!link->pin_path) {
10573 bpf_link__destroy(link);
e9fc3ce9 10574 return libbpf_err_ptr(-ENOMEM);
c016b68e
AN
10575 }
10576
10577 return link;
10578}
10579
2e49527e
AN
10580int bpf_link__detach(struct bpf_link *link)
10581{
10582 return bpf_link_detach(link->fd) ? -errno : 0;
10583}
10584
c016b68e
AN
10585int bpf_link__pin(struct bpf_link *link, const char *path)
10586{
10587 int err;
10588
10589 if (link->pin_path)
e9fc3ce9 10590 return libbpf_err(-EBUSY);
c016b68e
AN
10591 err = make_parent_dir(path);
10592 if (err)
e9fc3ce9 10593 return libbpf_err(err);
c016b68e
AN
10594 err = check_path(path);
10595 if (err)
e9fc3ce9 10596 return libbpf_err(err);
c016b68e
AN
10597
10598 link->pin_path = strdup(path);
10599 if (!link->pin_path)
e9fc3ce9 10600 return libbpf_err(-ENOMEM);
c016b68e
AN
10601
10602 if (bpf_obj_pin(link->fd, link->pin_path)) {
10603 err = -errno;
10604 zfree(&link->pin_path);
e9fc3ce9 10605 return libbpf_err(err);
c016b68e
AN
10606 }
10607
10608 pr_debug("link fd=%d: pinned at %s\n", link->fd, link->pin_path);
10609 return 0;
10610}
10611
10612int bpf_link__unpin(struct bpf_link *link)
10613{
10614 int err;
10615
10616 if (!link->pin_path)
e9fc3ce9 10617 return libbpf_err(-EINVAL);
c016b68e
AN
10618
10619 err = unlink(link->pin_path);
10620 if (err != 0)
af0efa05 10621 return -errno;
c016b68e
AN
10622
10623 pr_debug("link fd=%d: unpinned from %s\n", link->fd, link->pin_path);
10624 zfree(&link->pin_path);
10625 return 0;
10626}
63f2f5ee 10627
668ace0e
AN
10628struct bpf_link_perf {
10629 struct bpf_link link;
10630 int perf_event_fd;
ca304b40
RDT
10631 /* legacy kprobe support: keep track of probe identifier and type */
10632 char *legacy_probe_name;
46ed5fc3 10633 bool legacy_is_kprobe;
ca304b40 10634 bool legacy_is_retprobe;
668ace0e
AN
10635};
10636
46ed5fc3 10637static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe);
cc10623c 10638static int remove_uprobe_event_legacy(const char *probe_name, bool retprobe);
46ed5fc3 10639
668ace0e 10640static int bpf_link_perf_detach(struct bpf_link *link)
63f2f5ee 10641{
668ace0e
AN
10642 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10643 int err = 0;
63f2f5ee 10644
668ace0e 10645 if (ioctl(perf_link->perf_event_fd, PERF_EVENT_IOC_DISABLE, 0) < 0)
63f2f5ee
AN
10646 err = -errno;
10647
668ace0e
AN
10648 if (perf_link->perf_event_fd != link->fd)
10649 close(perf_link->perf_event_fd);
c016b68e 10650 close(link->fd);
668ace0e 10651
cc10623c 10652 /* legacy uprobe/kprobe needs to be removed after perf event fd closure */
46ed5fc3
AN
10653 if (perf_link->legacy_probe_name) {
10654 if (perf_link->legacy_is_kprobe) {
10655 err = remove_kprobe_event_legacy(perf_link->legacy_probe_name,
10656 perf_link->legacy_is_retprobe);
cc10623c
AN
10657 } else {
10658 err = remove_uprobe_event_legacy(perf_link->legacy_probe_name,
10659 perf_link->legacy_is_retprobe);
46ed5fc3
AN
10660 }
10661 }
ca304b40
RDT
10662
10663 return err;
63f2f5ee
AN
10664}
10665
668ace0e
AN
10666static void bpf_link_perf_dealloc(struct bpf_link *link)
10667{
10668 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
10669
ca304b40 10670 free(perf_link->legacy_probe_name);
668ace0e
AN
10671 free(perf_link);
10672}
10673
942025c9 10674struct bpf_link *bpf_program__attach_perf_event_opts(const struct bpf_program *prog, int pfd,
47faff37 10675 const struct bpf_perf_event_opts *opts)
63f2f5ee
AN
10676{
10677 char errmsg[STRERR_BUFSIZE];
668ace0e
AN
10678 struct bpf_link_perf *link;
10679 int prog_fd, link_fd = -1, err;
f8b299bc 10680 bool force_ioctl_attach;
63f2f5ee 10681
47faff37
AN
10682 if (!OPTS_VALID(opts, bpf_perf_event_opts))
10683 return libbpf_err_ptr(-EINVAL);
10684
63f2f5ee 10685 if (pfd < 0) {
52109584
AN
10686 pr_warn("prog '%s': invalid perf event FD %d\n",
10687 prog->name, pfd);
e9fc3ce9 10688 return libbpf_err_ptr(-EINVAL);
63f2f5ee
AN
10689 }
10690 prog_fd = bpf_program__fd(prog);
10691 if (prog_fd < 0) {
7b30c296 10692 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
52109584 10693 prog->name);
e9fc3ce9 10694 return libbpf_err_ptr(-EINVAL);
63f2f5ee
AN
10695 }
10696
d6958706 10697 link = calloc(1, sizeof(*link));
63f2f5ee 10698 if (!link)
e9fc3ce9 10699 return libbpf_err_ptr(-ENOMEM);
668ace0e
AN
10700 link->link.detach = &bpf_link_perf_detach;
10701 link->link.dealloc = &bpf_link_perf_dealloc;
10702 link->perf_event_fd = pfd;
63f2f5ee 10703
f8b299bc
MD
10704 force_ioctl_attach = OPTS_GET(opts, force_ioctl_attach, false);
10705 if (kernel_supports(prog->obj, FEAT_PERF_LINK) && !force_ioctl_attach) {
47faff37
AN
10706 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_opts,
10707 .perf_event.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0));
10708
10709 link_fd = bpf_link_create(prog_fd, pfd, BPF_PERF_EVENT, &link_opts);
668ace0e
AN
10710 if (link_fd < 0) {
10711 err = -errno;
10712 pr_warn("prog '%s': failed to create BPF link for perf_event FD %d: %d (%s)\n",
10713 prog->name, pfd,
10714 err, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10715 goto err_out;
10716 }
10717 link->link.fd = link_fd;
10718 } else {
47faff37
AN
10719 if (OPTS_GET(opts, bpf_cookie, 0)) {
10720 pr_warn("prog '%s': user context value is not supported\n", prog->name);
10721 err = -EOPNOTSUPP;
10722 goto err_out;
10723 }
10724
668ace0e
AN
10725 if (ioctl(pfd, PERF_EVENT_IOC_SET_BPF, prog_fd) < 0) {
10726 err = -errno;
10727 pr_warn("prog '%s': failed to attach to perf_event FD %d: %s\n",
10728 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10729 if (err == -EPROTO)
10730 pr_warn("prog '%s': try add PERF_SAMPLE_CALLCHAIN to or remove exclude_callchain_[kernel|user] from pfd %d\n",
10731 prog->name, pfd);
10732 goto err_out;
10733 }
10734 link->link.fd = pfd;
63f2f5ee
AN
10735 }
10736 if (ioctl(pfd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
10737 err = -errno;
668ace0e 10738 pr_warn("prog '%s': failed to enable perf_event FD %d: %s\n",
52109584 10739 prog->name, pfd, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
668ace0e 10740 goto err_out;
63f2f5ee 10741 }
668ace0e
AN
10742
10743 return &link->link;
10744err_out:
10745 if (link_fd >= 0)
10746 close(link_fd);
10747 free(link);
10748 return libbpf_err_ptr(err);
63f2f5ee
AN
10749}
10750
942025c9 10751struct bpf_link *bpf_program__attach_perf_event(const struct bpf_program *prog, int pfd)
47faff37
AN
10752{
10753 return bpf_program__attach_perf_event_opts(prog, pfd, NULL);
10754}
10755
b2650027
AN
10756/*
10757 * this function is expected to parse integer in the range of [0, 2^31-1] from
10758 * given file using scanf format string fmt. If actual parsed value is
10759 * negative, the result might be indistinguishable from error
10760 */
10761static int parse_uint_from_file(const char *file, const char *fmt)
10762{
10763 char buf[STRERR_BUFSIZE];
10764 int err, ret;
10765 FILE *f;
10766
59842c54 10767 f = fopen(file, "re");
b2650027
AN
10768 if (!f) {
10769 err = -errno;
10770 pr_debug("failed to open '%s': %s\n", file,
10771 libbpf_strerror_r(err, buf, sizeof(buf)));
10772 return err;
10773 }
10774 err = fscanf(f, fmt, &ret);
10775 if (err != 1) {
10776 err = err == EOF ? -EIO : -errno;
10777 pr_debug("failed to parse '%s': %s\n", file,
10778 libbpf_strerror_r(err, buf, sizeof(buf)));
10779 fclose(f);
10780 return err;
10781 }
10782 fclose(f);
10783 return ret;
10784}
10785
10786static int determine_kprobe_perf_type(void)
10787{
10788 const char *file = "/sys/bus/event_source/devices/kprobe/type";
10789
10790 return parse_uint_from_file(file, "%d\n");
10791}
10792
10793static int determine_uprobe_perf_type(void)
10794{
10795 const char *file = "/sys/bus/event_source/devices/uprobe/type";
10796
10797 return parse_uint_from_file(file, "%d\n");
10798}
10799
10800static int determine_kprobe_retprobe_bit(void)
10801{
10802 const char *file = "/sys/bus/event_source/devices/kprobe/format/retprobe";
10803
10804 return parse_uint_from_file(file, "config:%d\n");
10805}
10806
10807static int determine_uprobe_retprobe_bit(void)
10808{
10809 const char *file = "/sys/bus/event_source/devices/uprobe/format/retprobe";
10810
10811 return parse_uint_from_file(file, "config:%d\n");
10812}
10813
5e3b8356
AN
10814#define PERF_UPROBE_REF_CTR_OFFSET_BITS 32
10815#define PERF_UPROBE_REF_CTR_OFFSET_SHIFT 32
10816
b2650027 10817static int perf_event_open_probe(bool uprobe, bool retprobe, const char *name,
5e3b8356 10818 uint64_t offset, int pid, size_t ref_ctr_off)
b2650027 10819{
813847a3
AN
10820 const size_t attr_sz = sizeof(struct perf_event_attr);
10821 struct perf_event_attr attr;
b2650027 10822 char errmsg[STRERR_BUFSIZE];
708ac5be 10823 int type, pfd;
b2650027 10824
1520e846 10825 if ((__u64)ref_ctr_off >= (1ULL << PERF_UPROBE_REF_CTR_OFFSET_BITS))
5e3b8356
AN
10826 return -EINVAL;
10827
813847a3
AN
10828 memset(&attr, 0, attr_sz);
10829
b2650027
AN
10830 type = uprobe ? determine_uprobe_perf_type()
10831 : determine_kprobe_perf_type();
10832 if (type < 0) {
be18010e
KW
10833 pr_warn("failed to determine %s perf type: %s\n",
10834 uprobe ? "uprobe" : "kprobe",
10835 libbpf_strerror_r(type, errmsg, sizeof(errmsg)));
b2650027
AN
10836 return type;
10837 }
10838 if (retprobe) {
10839 int bit = uprobe ? determine_uprobe_retprobe_bit()
10840 : determine_kprobe_retprobe_bit();
10841
10842 if (bit < 0) {
be18010e
KW
10843 pr_warn("failed to determine %s retprobe bit: %s\n",
10844 uprobe ? "uprobe" : "kprobe",
10845 libbpf_strerror_r(bit, errmsg, sizeof(errmsg)));
b2650027
AN
10846 return bit;
10847 }
10848 attr.config |= 1 << bit;
10849 }
813847a3 10850 attr.size = attr_sz;
b2650027 10851 attr.type = type;
5e3b8356 10852 attr.config |= (__u64)ref_ctr_off << PERF_UPROBE_REF_CTR_OFFSET_SHIFT;
36db2a94
AN
10853 attr.config1 = ptr_to_u64(name); /* kprobe_func or uprobe_path */
10854 attr.config2 = offset; /* kprobe_addr or probe_offset */
b2650027
AN
10855
10856 /* pid filter is meaningful only for uprobes */
10857 pfd = syscall(__NR_perf_event_open, &attr,
10858 pid < 0 ? -1 : pid /* pid */,
10859 pid == -1 ? 0 : -1 /* cpu */,
10860 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
708ac5be 10861 return pfd >= 0 ? pfd : -errno;
b2650027
AN
10862}
10863
46ed5fc3
AN
10864static int append_to_file(const char *file, const char *fmt, ...)
10865{
10866 int fd, n, err = 0;
10867 va_list ap;
01dc26c9
LP
10868 char buf[1024];
10869
10870 va_start(ap, fmt);
10871 n = vsnprintf(buf, sizeof(buf), fmt, ap);
10872 va_end(ap);
10873
10874 if (n < 0 || n >= sizeof(buf))
10875 return -EINVAL;
46ed5fc3 10876
92274e24 10877 fd = open(file, O_WRONLY | O_APPEND | O_CLOEXEC, 0);
46ed5fc3
AN
10878 if (fd < 0)
10879 return -errno;
10880
01dc26c9 10881 if (write(fd, buf, n) < 0)
46ed5fc3
AN
10882 err = -errno;
10883
10884 close(fd);
10885 return err;
10886}
10887
a1ac9fd6
AN
10888#define DEBUGFS "/sys/kernel/debug/tracing"
10889#define TRACEFS "/sys/kernel/tracing"
10890
10891static bool use_debugfs(void)
10892{
10893 static int has_debugfs = -1;
10894
10895 if (has_debugfs < 0)
6a4ab886 10896 has_debugfs = faccessat(AT_FDCWD, DEBUGFS, F_OK, AT_EACCESS) == 0;
a1ac9fd6
AN
10897
10898 return has_debugfs == 1;
10899}
10900
10901static const char *tracefs_path(void)
10902{
10903 return use_debugfs() ? DEBUGFS : TRACEFS;
10904}
10905
10906static const char *tracefs_kprobe_events(void)
10907{
10908 return use_debugfs() ? DEBUGFS"/kprobe_events" : TRACEFS"/kprobe_events";
10909}
10910
10911static const char *tracefs_uprobe_events(void)
10912{
10913 return use_debugfs() ? DEBUGFS"/uprobe_events" : TRACEFS"/uprobe_events";
10914}
10915
8a3fe76f
JL
10916static const char *tracefs_available_filter_functions(void)
10917{
10918 return use_debugfs() ? DEBUGFS"/available_filter_functions"
10919 : TRACEFS"/available_filter_functions";
10920}
10921
56baeeba
JL
10922static const char *tracefs_available_filter_functions_addrs(void)
10923{
10924 return use_debugfs() ? DEBUGFS"/available_filter_functions_addrs"
10925 : TRACEFS"/available_filter_functions_addrs";
10926}
10927
46ed5fc3
AN
10928static void gen_kprobe_legacy_event_name(char *buf, size_t buf_sz,
10929 const char *kfunc_name, size_t offset)
10930{
51a33c60 10931 static int index = 0;
2fa07453 10932 int i;
51a33c60
QW
10933
10934 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx_%d", getpid(), kfunc_name, offset,
10935 __sync_fetch_and_add(&index, 1));
2fa07453
MD
10936
10937 /* sanitize binary_path in the probe name */
10938 for (i = 0; buf[i]; i++) {
10939 if (!isalnum(buf[i]))
10940 buf[i] = '_';
10941 }
46ed5fc3
AN
10942}
10943
10944static int add_kprobe_event_legacy(const char *probe_name, bool retprobe,
10945 const char *kfunc_name, size_t offset)
10946{
a1ac9fd6 10947 return append_to_file(tracefs_kprobe_events(), "%c:%s/%s %s+0x%zx",
46ed5fc3
AN
10948 retprobe ? 'r' : 'p',
10949 retprobe ? "kretprobes" : "kprobes",
10950 probe_name, kfunc_name, offset);
10951}
10952
10953static int remove_kprobe_event_legacy(const char *probe_name, bool retprobe)
10954{
a1ac9fd6
AN
10955 return append_to_file(tracefs_kprobe_events(), "-:%s/%s",
10956 retprobe ? "kretprobes" : "kprobes", probe_name);
46ed5fc3
AN
10957}
10958
10959static int determine_kprobe_perf_type_legacy(const char *probe_name, bool retprobe)
10960{
10961 char file[256];
10962
a1ac9fd6
AN
10963 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
10964 tracefs_path(), retprobe ? "kretprobes" : "kprobes", probe_name);
46ed5fc3
AN
10965
10966 return parse_uint_from_file(file, "%d\n");
10967}
10968
10969static int perf_event_kprobe_open_legacy(const char *probe_name, bool retprobe,
10970 const char *kfunc_name, size_t offset, int pid)
ca304b40 10971{
813847a3
AN
10972 const size_t attr_sz = sizeof(struct perf_event_attr);
10973 struct perf_event_attr attr;
ca304b40
RDT
10974 char errmsg[STRERR_BUFSIZE];
10975 int type, pfd, err;
10976
46ed5fc3 10977 err = add_kprobe_event_legacy(probe_name, retprobe, kfunc_name, offset);
ca304b40 10978 if (err < 0) {
46ed5fc3
AN
10979 pr_warn("failed to add legacy kprobe event for '%s+0x%zx': %s\n",
10980 kfunc_name, offset,
ca304b40
RDT
10981 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10982 return err;
10983 }
46ed5fc3 10984 type = determine_kprobe_perf_type_legacy(probe_name, retprobe);
ca304b40 10985 if (type < 0) {
80940293 10986 err = type;
46ed5fc3
AN
10987 pr_warn("failed to determine legacy kprobe event id for '%s+0x%zx': %s\n",
10988 kfunc_name, offset,
80940293
CW
10989 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
10990 goto err_clean_legacy;
ca304b40 10991 }
813847a3
AN
10992
10993 memset(&attr, 0, attr_sz);
10994 attr.size = attr_sz;
ca304b40
RDT
10995 attr.config = type;
10996 attr.type = PERF_TYPE_TRACEPOINT;
10997
10998 pfd = syscall(__NR_perf_event_open, &attr,
10999 pid < 0 ? -1 : pid, /* pid */
11000 pid == -1 ? 0 : -1, /* cpu */
11001 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11002 if (pfd < 0) {
11003 err = -errno;
11004 pr_warn("legacy kprobe perf_event_open() failed: %s\n",
11005 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
80940293 11006 goto err_clean_legacy;
ca304b40
RDT
11007 }
11008 return pfd;
80940293
CW
11009
11010err_clean_legacy:
11011 /* Clear the newly added legacy kprobe_event */
11012 remove_kprobe_event_legacy(probe_name, retprobe);
11013 return err;
ca304b40
RDT
11014}
11015
708ac5be
AN
11016static const char *arch_specific_syscall_pfx(void)
11017{
11018#if defined(__x86_64__)
11019 return "x64";
11020#elif defined(__i386__)
11021 return "ia32";
11022#elif defined(__s390x__)
11023 return "s390x";
11024#elif defined(__s390__)
11025 return "s390";
11026#elif defined(__arm__)
11027 return "arm";
11028#elif defined(__aarch64__)
11029 return "arm64";
11030#elif defined(__mips__)
11031 return "mips";
11032#elif defined(__riscv)
11033 return "riscv";
64893e83
DM
11034#elif defined(__powerpc__)
11035 return "powerpc";
11036#elif defined(__powerpc64__)
11037 return "powerpc64";
708ac5be
AN
11038#else
11039 return NULL;
11040#endif
11041}
11042
f3dcee93 11043int probe_kern_syscall_wrapper(int token_fd)
708ac5be
AN
11044{
11045 char syscall_name[64];
11046 const char *ksys_pfx;
11047
11048 ksys_pfx = arch_specific_syscall_pfx();
11049 if (!ksys_pfx)
11050 return 0;
11051
11052 snprintf(syscall_name, sizeof(syscall_name), "__%s_sys_bpf", ksys_pfx);
11053
11054 if (determine_kprobe_perf_type() >= 0) {
11055 int pfd;
11056
11057 pfd = perf_event_open_probe(false, false, syscall_name, 0, getpid(), 0);
11058 if (pfd >= 0)
11059 close(pfd);
11060
11061 return pfd >= 0 ? 1 : 0;
11062 } else { /* legacy mode */
11063 char probe_name[128];
11064
11065 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name), syscall_name, 0);
11066 if (add_kprobe_event_legacy(probe_name, false, syscall_name, 0) < 0)
11067 return 0;
11068
11069 (void)remove_kprobe_event_legacy(probe_name, false);
11070 return 1;
11071 }
11072}
11073
da97553e 11074struct bpf_link *
942025c9 11075bpf_program__attach_kprobe_opts(const struct bpf_program *prog,
ac0ed488 11076 const char *func_name,
47faff37 11077 const struct bpf_kprobe_opts *opts)
b2650027 11078{
47faff37 11079 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
f8b299bc 11080 enum probe_attach_mode attach_mode;
b2650027 11081 char errmsg[STRERR_BUFSIZE];
ca304b40 11082 char *legacy_probe = NULL;
b2650027 11083 struct bpf_link *link;
46ed5fc3 11084 size_t offset;
ca304b40 11085 bool retprobe, legacy;
b2650027
AN
11086 int pfd, err;
11087
da97553e
JO
11088 if (!OPTS_VALID(opts, bpf_kprobe_opts))
11089 return libbpf_err_ptr(-EINVAL);
11090
f8b299bc 11091 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
da97553e
JO
11092 retprobe = OPTS_GET(opts, retprobe, false);
11093 offset = OPTS_GET(opts, offset, 0);
47faff37 11094 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
da97553e 11095
ca304b40 11096 legacy = determine_kprobe_perf_type() < 0;
f8b299bc
MD
11097 switch (attach_mode) {
11098 case PROBE_ATTACH_MODE_LEGACY:
11099 legacy = true;
11100 pe_opts.force_ioctl_attach = true;
11101 break;
11102 case PROBE_ATTACH_MODE_PERF:
11103 if (legacy)
11104 return libbpf_err_ptr(-ENOTSUP);
11105 pe_opts.force_ioctl_attach = true;
11106 break;
11107 case PROBE_ATTACH_MODE_LINK:
11108 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
11109 return libbpf_err_ptr(-ENOTSUP);
11110 break;
11111 case PROBE_ATTACH_MODE_DEFAULT:
11112 break;
11113 default:
11114 return libbpf_err_ptr(-EINVAL);
11115 }
11116
ca304b40
RDT
11117 if (!legacy) {
11118 pfd = perf_event_open_probe(false /* uprobe */, retprobe,
11119 func_name, offset,
11120 -1 /* pid */, 0 /* ref_ctr_off */);
11121 } else {
46ed5fc3
AN
11122 char probe_name[256];
11123
11124 gen_kprobe_legacy_event_name(probe_name, sizeof(probe_name),
11125 func_name, offset);
11126
71cff670 11127 legacy_probe = strdup(probe_name);
ca304b40
RDT
11128 if (!legacy_probe)
11129 return libbpf_err_ptr(-ENOMEM);
11130
46ed5fc3 11131 pfd = perf_event_kprobe_open_legacy(legacy_probe, retprobe, func_name,
ca304b40
RDT
11132 offset, -1 /* pid */);
11133 }
b2650027 11134 if (pfd < 0) {
46ed5fc3
AN
11135 err = -errno;
11136 pr_warn("prog '%s': failed to create %s '%s+0x%zx' perf event: %s\n",
11137 prog->name, retprobe ? "kretprobe" : "kprobe",
11138 func_name, offset,
303a2572
AN
11139 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11140 goto err_out;
b2650027 11141 }
47faff37 11142 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
e9fc3ce9
AN
11143 err = libbpf_get_error(link);
11144 if (err) {
b2650027 11145 close(pfd);
46ed5fc3
AN
11146 pr_warn("prog '%s': failed to attach to %s '%s+0x%zx': %s\n",
11147 prog->name, retprobe ? "kretprobe" : "kprobe",
11148 func_name, offset,
be18010e 11149 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
80940293 11150 goto err_clean_legacy;
b2650027 11151 }
ca304b40
RDT
11152 if (legacy) {
11153 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
11154
11155 perf_link->legacy_probe_name = legacy_probe;
46ed5fc3 11156 perf_link->legacy_is_kprobe = true;
ca304b40
RDT
11157 perf_link->legacy_is_retprobe = retprobe;
11158 }
11159
b2650027 11160 return link;
80940293
CW
11161
11162err_clean_legacy:
11163 if (legacy)
11164 remove_kprobe_event_legacy(legacy_probe, retprobe);
303a2572
AN
11165err_out:
11166 free(legacy_probe);
11167 return libbpf_err_ptr(err);
b2650027
AN
11168}
11169
942025c9 11170struct bpf_link *bpf_program__attach_kprobe(const struct bpf_program *prog,
ac0ed488
JO
11171 bool retprobe,
11172 const char *func_name)
11173{
da97553e 11174 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts,
ac0ed488 11175 .retprobe = retprobe,
da97553e 11176 );
ac0ed488
JO
11177
11178 return bpf_program__attach_kprobe_opts(prog, func_name, &opts);
11179}
11180
708ac5be
AN
11181struct bpf_link *bpf_program__attach_ksyscall(const struct bpf_program *prog,
11182 const char *syscall_name,
11183 const struct bpf_ksyscall_opts *opts)
11184{
11185 LIBBPF_OPTS(bpf_kprobe_opts, kprobe_opts);
11186 char func_name[128];
11187
11188 if (!OPTS_VALID(opts, bpf_ksyscall_opts))
11189 return libbpf_err_ptr(-EINVAL);
11190
11191 if (kernel_supports(prog->obj, FEAT_SYSCALL_WRAPPER)) {
64893e83
DM
11192 /* arch_specific_syscall_pfx() should never return NULL here
11193 * because it is guarded by kernel_supports(). However, since
11194 * compiler does not know that we have an explicit conditional
11195 * as well.
11196 */
708ac5be 11197 snprintf(func_name, sizeof(func_name), "__%s_sys_%s",
64893e83 11198 arch_specific_syscall_pfx() ? : "", syscall_name);
708ac5be
AN
11199 } else {
11200 snprintf(func_name, sizeof(func_name), "__se_sys_%s", syscall_name);
11201 }
11202
11203 kprobe_opts.retprobe = OPTS_GET(opts, retprobe, false);
11204 kprobe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
11205
11206 return bpf_program__attach_kprobe_opts(prog, func_name, &kprobe_opts);
11207}
11208
ddc6b049 11209/* Adapted from perf/util/string.c */
e613d1d0 11210bool glob_match(const char *str, const char *pat)
ddc6b049
JO
11211{
11212 while (*str && *pat && *pat != '*') {
11213 if (*pat == '?') { /* Matches any single character */
11214 str++;
11215 pat++;
11216 continue;
11217 }
11218 if (*str != *pat)
11219 return false;
11220 str++;
11221 pat++;
11222 }
11223 /* Check wild card */
11224 if (*pat == '*') {
11225 while (*pat == '*')
11226 pat++;
11227 if (!*pat) /* Tail wild card matches all */
11228 return true;
11229 while (*str)
11230 if (glob_match(str++, pat))
11231 return true;
11232 }
11233 return !*str && !*pat;
11234}
11235
11236struct kprobe_multi_resolve {
11237 const char *pattern;
11238 unsigned long *addrs;
11239 size_t cap;
11240 size_t cnt;
11241};
11242
8a3fe76f
JL
11243struct avail_kallsyms_data {
11244 char **syms;
11245 size_t cnt;
11246 struct kprobe_multi_resolve *res;
11247};
11248
11249static int avail_func_cmp(const void *a, const void *b)
11250{
11251 return strcmp(*(const char **)a, *(const char **)b);
11252}
11253
11254static int avail_kallsyms_cb(unsigned long long sym_addr, char sym_type,
11255 const char *sym_name, void *ctx)
ddc6b049 11256{
8a3fe76f
JL
11257 struct avail_kallsyms_data *data = ctx;
11258 struct kprobe_multi_resolve *res = data->res;
ddc6b049
JO
11259 int err;
11260
8a3fe76f 11261 if (!bsearch(&sym_name, data->syms, data->cnt, sizeof(*data->syms), avail_func_cmp))
ddc6b049
JO
11262 return 0;
11263
8a3fe76f 11264 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap, sizeof(*res->addrs), res->cnt + 1);
ddc6b049
JO
11265 if (err)
11266 return err;
11267
8a3fe76f 11268 res->addrs[res->cnt++] = (unsigned long)sym_addr;
ddc6b049
JO
11269 return 0;
11270}
11271
8a3fe76f
JL
11272static int libbpf_available_kallsyms_parse(struct kprobe_multi_resolve *res)
11273{
11274 const char *available_functions_file = tracefs_available_filter_functions();
11275 struct avail_kallsyms_data data;
11276 char sym_name[500];
11277 FILE *f;
11278 int err = 0, ret, i;
11279 char **syms = NULL;
11280 size_t cap = 0, cnt = 0;
11281
11282 f = fopen(available_functions_file, "re");
11283 if (!f) {
11284 err = -errno;
11285 pr_warn("failed to open %s: %d\n", available_functions_file, err);
11286 return err;
11287 }
11288
11289 while (true) {
11290 char *name;
11291
11292 ret = fscanf(f, "%499s%*[^\n]\n", sym_name);
11293 if (ret == EOF && feof(f))
11294 break;
11295
11296 if (ret != 1) {
11297 pr_warn("failed to parse available_filter_functions entry: %d\n", ret);
11298 err = -EINVAL;
11299 goto cleanup;
11300 }
11301
11302 if (!glob_match(sym_name, res->pattern))
11303 continue;
11304
11305 err = libbpf_ensure_mem((void **)&syms, &cap, sizeof(*syms), cnt + 1);
11306 if (err)
11307 goto cleanup;
11308
11309 name = strdup(sym_name);
11310 if (!name) {
11311 err = -errno;
11312 goto cleanup;
11313 }
11314
11315 syms[cnt++] = name;
11316 }
11317
11318 /* no entries found, bail out */
11319 if (cnt == 0) {
11320 err = -ENOENT;
11321 goto cleanup;
11322 }
11323
11324 /* sort available functions */
11325 qsort(syms, cnt, sizeof(*syms), avail_func_cmp);
11326
11327 data.syms = syms;
11328 data.res = res;
11329 data.cnt = cnt;
11330 libbpf_kallsyms_parse(avail_kallsyms_cb, &data);
11331
11332 if (res->cnt == 0)
11333 err = -ENOENT;
11334
11335cleanup:
11336 for (i = 0; i < cnt; i++)
11337 free((char *)syms[i]);
11338 free(syms);
11339
11340 fclose(f);
11341 return err;
11342}
11343
56baeeba
JL
11344static bool has_available_filter_functions_addrs(void)
11345{
11346 return access(tracefs_available_filter_functions_addrs(), R_OK) != -1;
11347}
11348
11349static int libbpf_available_kprobes_parse(struct kprobe_multi_resolve *res)
11350{
11351 const char *available_path = tracefs_available_filter_functions_addrs();
11352 char sym_name[500];
11353 FILE *f;
11354 int ret, err = 0;
11355 unsigned long long sym_addr;
11356
11357 f = fopen(available_path, "re");
11358 if (!f) {
11359 err = -errno;
11360 pr_warn("failed to open %s: %d\n", available_path, err);
11361 return err;
11362 }
11363
11364 while (true) {
11365 ret = fscanf(f, "%llx %499s%*[^\n]\n", &sym_addr, sym_name);
11366 if (ret == EOF && feof(f))
11367 break;
11368
11369 if (ret != 2) {
11370 pr_warn("failed to parse available_filter_functions_addrs entry: %d\n",
11371 ret);
11372 err = -EINVAL;
11373 goto cleanup;
11374 }
11375
11376 if (!glob_match(sym_name, res->pattern))
11377 continue;
11378
11379 err = libbpf_ensure_mem((void **)&res->addrs, &res->cap,
11380 sizeof(*res->addrs), res->cnt + 1);
11381 if (err)
11382 goto cleanup;
11383
11384 res->addrs[res->cnt++] = (unsigned long)sym_addr;
11385 }
11386
11387 if (res->cnt == 0)
11388 err = -ENOENT;
11389
11390cleanup:
11391 fclose(f);
11392 return err;
11393}
11394
ddc6b049
JO
11395struct bpf_link *
11396bpf_program__attach_kprobe_multi_opts(const struct bpf_program *prog,
11397 const char *pattern,
11398 const struct bpf_kprobe_multi_opts *opts)
11399{
11400 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11401 struct kprobe_multi_resolve res = {
11402 .pattern = pattern,
11403 };
2ca178f0 11404 enum bpf_attach_type attach_type;
ddc6b049
JO
11405 struct bpf_link *link = NULL;
11406 char errmsg[STRERR_BUFSIZE];
11407 const unsigned long *addrs;
11408 int err, link_fd, prog_fd;
2ca178f0 11409 bool retprobe, session;
ddc6b049
JO
11410 const __u64 *cookies;
11411 const char **syms;
ddc6b049
JO
11412 size_t cnt;
11413
11414 if (!OPTS_VALID(opts, bpf_kprobe_multi_opts))
11415 return libbpf_err_ptr(-EINVAL);
11416
7b30c296
MY
11417 prog_fd = bpf_program__fd(prog);
11418 if (prog_fd < 0) {
11419 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11420 prog->name);
11421 return libbpf_err_ptr(-EINVAL);
11422 }
11423
ddc6b049
JO
11424 syms = OPTS_GET(opts, syms, false);
11425 addrs = OPTS_GET(opts, addrs, false);
11426 cnt = OPTS_GET(opts, cnt, false);
11427 cookies = OPTS_GET(opts, cookies, false);
11428
11429 if (!pattern && !addrs && !syms)
11430 return libbpf_err_ptr(-EINVAL);
11431 if (pattern && (addrs || syms || cookies || cnt))
11432 return libbpf_err_ptr(-EINVAL);
11433 if (!pattern && !cnt)
11434 return libbpf_err_ptr(-EINVAL);
11435 if (addrs && syms)
11436 return libbpf_err_ptr(-EINVAL);
11437
11438 if (pattern) {
56baeeba
JL
11439 if (has_available_filter_functions_addrs())
11440 err = libbpf_available_kprobes_parse(&res);
11441 else
11442 err = libbpf_available_kallsyms_parse(&res);
ddc6b049
JO
11443 if (err)
11444 goto error;
ddc6b049
JO
11445 addrs = res.addrs;
11446 cnt = res.cnt;
11447 }
11448
11449 retprobe = OPTS_GET(opts, retprobe, false);
2ca178f0
JO
11450 session = OPTS_GET(opts, session, false);
11451
11452 if (retprobe && session)
11453 return libbpf_err_ptr(-EINVAL);
11454
11455 attach_type = session ? BPF_TRACE_KPROBE_SESSION : BPF_TRACE_KPROBE_MULTI;
ddc6b049
JO
11456
11457 lopts.kprobe_multi.syms = syms;
11458 lopts.kprobe_multi.addrs = addrs;
11459 lopts.kprobe_multi.cookies = cookies;
11460 lopts.kprobe_multi.cnt = cnt;
11461 lopts.kprobe_multi.flags = retprobe ? BPF_F_KPROBE_MULTI_RETURN : 0;
11462
11463 link = calloc(1, sizeof(*link));
11464 if (!link) {
11465 err = -ENOMEM;
11466 goto error;
11467 }
11468 link->detach = &bpf_link__detach_fd;
11469
2ca178f0 11470 link_fd = bpf_link_create(prog_fd, 0, attach_type, &lopts);
ddc6b049
JO
11471 if (link_fd < 0) {
11472 err = -errno;
11473 pr_warn("prog '%s': failed to attach: %s\n",
11474 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11475 goto error;
11476 }
11477 link->fd = link_fd;
11478 free(res.addrs);
11479 return link;
11480
11481error:
11482 free(link);
11483 free(res.addrs);
11484 return libbpf_err_ptr(err);
11485}
11486
4fa5bcfe 11487static int attach_kprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7 11488{
da97553e 11489 DECLARE_LIBBPF_OPTS(bpf_kprobe_opts, opts);
a2488b5f 11490 unsigned long offset = 0;
d7a18ea7 11491 const char *func_name;
a2488b5f 11492 char *func;
4fa5bcfe 11493 int n;
d7a18ea7 11494
9af8efc4
AN
11495 *link = NULL;
11496
11497 /* no auto-attach for SEC("kprobe") and SEC("kretprobe") */
11498 if (strcmp(prog->sec_name, "kprobe") == 0 || strcmp(prog->sec_name, "kretprobe") == 0)
11499 return 0;
11500
13d35a0c
AN
11501 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe/");
11502 if (opts.retprobe)
11503 func_name = prog->sec_name + sizeof("kretprobe/") - 1;
11504 else
11505 func_name = prog->sec_name + sizeof("kprobe/") - 1;
d7a18ea7 11506
e3f9bc35 11507 n = sscanf(func_name, "%m[a-zA-Z0-9_.]+%li", &func, &offset);
a2488b5f 11508 if (n < 1) {
a2488b5f 11509 pr_warn("kprobe name is invalid: %s\n", func_name);
4fa5bcfe 11510 return -EINVAL;
a2488b5f
AM
11511 }
11512 if (opts.retprobe && offset != 0) {
1f71a468 11513 free(func);
a2488b5f 11514 pr_warn("kretprobes do not support offset specification\n");
4fa5bcfe 11515 return -EINVAL;
a2488b5f 11516 }
d7a18ea7 11517
a2488b5f 11518 opts.offset = offset;
4fa5bcfe 11519 *link = bpf_program__attach_kprobe_opts(prog, func, &opts);
a2488b5f 11520 free(func);
4fa5bcfe 11521 return libbpf_get_error(*link);
d7a18ea7
AN
11522}
11523
708ac5be
AN
11524static int attach_ksyscall(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11525{
11526 LIBBPF_OPTS(bpf_ksyscall_opts, opts);
11527 const char *syscall_name;
11528
11529 *link = NULL;
11530
11531 /* no auto-attach for SEC("ksyscall") and SEC("kretsyscall") */
11532 if (strcmp(prog->sec_name, "ksyscall") == 0 || strcmp(prog->sec_name, "kretsyscall") == 0)
11533 return 0;
11534
11535 opts.retprobe = str_has_pfx(prog->sec_name, "kretsyscall/");
11536 if (opts.retprobe)
11537 syscall_name = prog->sec_name + sizeof("kretsyscall/") - 1;
11538 else
11539 syscall_name = prog->sec_name + sizeof("ksyscall/") - 1;
11540
11541 *link = bpf_program__attach_ksyscall(prog, syscall_name, &opts);
11542 return *link ? 0 : -errno;
11543}
11544
ddc6b049
JO
11545static int attach_kprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11546{
11547 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts);
11548 const char *spec;
11549 char *pattern;
11550 int n;
11551
9af8efc4
AN
11552 *link = NULL;
11553
11554 /* no auto-attach for SEC("kprobe.multi") and SEC("kretprobe.multi") */
11555 if (strcmp(prog->sec_name, "kprobe.multi") == 0 ||
11556 strcmp(prog->sec_name, "kretprobe.multi") == 0)
11557 return 0;
11558
ddc6b049
JO
11559 opts.retprobe = str_has_pfx(prog->sec_name, "kretprobe.multi/");
11560 if (opts.retprobe)
11561 spec = prog->sec_name + sizeof("kretprobe.multi/") - 1;
11562 else
11563 spec = prog->sec_name + sizeof("kprobe.multi/") - 1;
11564
11565 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11566 if (n < 1) {
11567 pr_warn("kprobe multi pattern is invalid: %s\n", pattern);
11568 return -EINVAL;
11569 }
11570
11571 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11572 free(pattern);
11573 return libbpf_get_error(*link);
11574}
11575
2ca178f0
JO
11576static int attach_kprobe_session(const struct bpf_program *prog, long cookie,
11577 struct bpf_link **link)
11578{
11579 LIBBPF_OPTS(bpf_kprobe_multi_opts, opts, .session = true);
11580 const char *spec;
11581 char *pattern;
11582 int n;
11583
11584 *link = NULL;
11585
11586 /* no auto-attach for SEC("kprobe.session") */
11587 if (strcmp(prog->sec_name, "kprobe.session") == 0)
11588 return 0;
11589
11590 spec = prog->sec_name + sizeof("kprobe.session/") - 1;
11591 n = sscanf(spec, "%m[a-zA-Z0-9_.*?]", &pattern);
11592 if (n < 1) {
11593 pr_warn("kprobe session pattern is invalid: %s\n", pattern);
11594 return -EINVAL;
11595 }
11596
11597 *link = bpf_program__attach_kprobe_multi_opts(prog, pattern, &opts);
11598 free(pattern);
11599 return *link ? 0 : -errno;
11600}
11601
5bfdd32d
JO
11602static int attach_uprobe_multi(const struct bpf_program *prog, long cookie, struct bpf_link **link)
11603{
11604 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL;
11605 LIBBPF_OPTS(bpf_uprobe_multi_opts, opts);
11606 int n, ret = -EINVAL;
11607
11608 *link = NULL;
11609
2147c8d0 11610 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
5bfdd32d
JO
11611 &probe_type, &binary_path, &func_name);
11612 switch (n) {
11613 case 1:
11614 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
11615 ret = 0;
11616 break;
11617 case 3:
11618 opts.retprobe = strcmp(probe_type, "uretprobe.multi") == 0;
11619 *link = bpf_program__attach_uprobe_multi(prog, -1, binary_path, func_name, &opts);
11620 ret = libbpf_get_error(*link);
11621 break;
11622 default:
11623 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
11624 prog->sec_name);
11625 break;
11626 }
11627 free(probe_type);
11628 free(binary_path);
11629 free(func_name);
11630 return ret;
11631}
11632
cc10623c
AN
11633static void gen_uprobe_legacy_event_name(char *buf, size_t buf_sz,
11634 const char *binary_path, uint64_t offset)
11635{
11636 int i;
11637
11638 snprintf(buf, buf_sz, "libbpf_%u_%s_0x%zx", getpid(), binary_path, (size_t)offset);
11639
11640 /* sanitize binary_path in the probe name */
11641 for (i = 0; buf[i]; i++) {
11642 if (!isalnum(buf[i]))
11643 buf[i] = '_';
11644 }
11645}
11646
11647static inline int add_uprobe_event_legacy(const char *probe_name, bool retprobe,
11648 const char *binary_path, size_t offset)
11649{
a1ac9fd6 11650 return append_to_file(tracefs_uprobe_events(), "%c:%s/%s %s:0x%zx",
cc10623c
AN
11651 retprobe ? 'r' : 'p',
11652 retprobe ? "uretprobes" : "uprobes",
11653 probe_name, binary_path, offset);
11654}
11655
11656static inline int remove_uprobe_event_legacy(const char *probe_name, bool retprobe)
11657{
a1ac9fd6
AN
11658 return append_to_file(tracefs_uprobe_events(), "-:%s/%s",
11659 retprobe ? "uretprobes" : "uprobes", probe_name);
cc10623c
AN
11660}
11661
11662static int determine_uprobe_perf_type_legacy(const char *probe_name, bool retprobe)
11663{
11664 char file[512];
11665
a1ac9fd6
AN
11666 snprintf(file, sizeof(file), "%s/events/%s/%s/id",
11667 tracefs_path(), retprobe ? "uretprobes" : "uprobes", probe_name);
cc10623c
AN
11668
11669 return parse_uint_from_file(file, "%d\n");
11670}
11671
11672static int perf_event_uprobe_open_legacy(const char *probe_name, bool retprobe,
11673 const char *binary_path, size_t offset, int pid)
11674{
813847a3 11675 const size_t attr_sz = sizeof(struct perf_event_attr);
cc10623c
AN
11676 struct perf_event_attr attr;
11677 int type, pfd, err;
11678
11679 err = add_uprobe_event_legacy(probe_name, retprobe, binary_path, offset);
11680 if (err < 0) {
11681 pr_warn("failed to add legacy uprobe event for %s:0x%zx: %d\n",
11682 binary_path, (size_t)offset, err);
11683 return err;
11684 }
11685 type = determine_uprobe_perf_type_legacy(probe_name, retprobe);
11686 if (type < 0) {
2655144f 11687 err = type;
cc10623c 11688 pr_warn("failed to determine legacy uprobe event id for %s:0x%zx: %d\n",
2655144f
CW
11689 binary_path, offset, err);
11690 goto err_clean_legacy;
cc10623c
AN
11691 }
11692
813847a3
AN
11693 memset(&attr, 0, attr_sz);
11694 attr.size = attr_sz;
cc10623c
AN
11695 attr.config = type;
11696 attr.type = PERF_TYPE_TRACEPOINT;
11697
11698 pfd = syscall(__NR_perf_event_open, &attr,
11699 pid < 0 ? -1 : pid, /* pid */
11700 pid == -1 ? 0 : -1, /* cpu */
11701 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
11702 if (pfd < 0) {
11703 err = -errno;
11704 pr_warn("legacy uprobe perf_event_open() failed: %d\n", err);
2655144f 11705 goto err_clean_legacy;
cc10623c
AN
11706 }
11707 return pfd;
2655144f
CW
11708
11709err_clean_legacy:
11710 /* Clear the newly added legacy uprobe_event */
11711 remove_uprobe_event_legacy(probe_name, retprobe);
11712 return err;
cc10623c
AN
11713}
11714
c44fd845
DM
11715/* Find offset of function name in archive specified by path. Currently
11716 * supported are .zip files that do not compress their contents, as used on
11717 * Android in the form of APKs, for example. "file_name" is the name of the ELF
11718 * file inside the archive. "func_name" matches symbol name or name@@LIB for
11719 * library functions.
11720 *
11721 * An overview of the APK format specifically provided here:
11722 * https://en.wikipedia.org/w/index.php?title=Apk_(file_format)&oldid=1139099120#Package_contents
11723 */
11724static long elf_find_func_offset_from_archive(const char *archive_path, const char *file_name,
11725 const char *func_name)
11726{
11727 struct zip_archive *archive;
11728 struct zip_entry entry;
11729 long ret;
11730 Elf *elf;
11731
11732 archive = zip_archive_open(archive_path);
11733 if (IS_ERR(archive)) {
11734 ret = PTR_ERR(archive);
11735 pr_warn("zip: failed to open %s: %ld\n", archive_path, ret);
11736 return ret;
11737 }
11738
11739 ret = zip_archive_find_entry(archive, file_name, &entry);
11740 if (ret) {
11741 pr_warn("zip: could not find archive member %s in %s: %ld\n", file_name,
11742 archive_path, ret);
11743 goto out;
11744 }
11745 pr_debug("zip: found entry for %s in %s at 0x%lx\n", file_name, archive_path,
11746 (unsigned long)entry.data_offset);
11747
11748 if (entry.compression) {
11749 pr_warn("zip: entry %s of %s is compressed and cannot be handled\n", file_name,
11750 archive_path);
11751 ret = -LIBBPF_ERRNO__FORMAT;
11752 goto out;
11753 }
11754
11755 elf = elf_memory((void *)entry.data, entry.data_length);
11756 if (!elf) {
11757 pr_warn("elf: could not read elf file %s from %s: %s\n", file_name, archive_path,
11758 elf_errmsg(-1));
11759 ret = -LIBBPF_ERRNO__LIBELF;
11760 goto out;
11761 }
11762
11763 ret = elf_find_func_offset(elf, file_name, func_name);
11764 if (ret > 0) {
11765 pr_debug("elf: symbol address match for %s of %s in %s: 0x%x + 0x%lx = 0x%lx\n",
11766 func_name, file_name, archive_path, entry.data_offset, ret,
11767 ret + entry.data_offset);
11768 ret += entry.data_offset;
11769 }
11770 elf_end(elf);
11771
11772out:
11773 zip_archive_close(archive);
11774 return ret;
11775}
11776
56818931
IL
11777static const char *arch_specific_lib_paths(void)
11778{
11779 /*
11780 * Based on https://packages.debian.org/sid/libc6.
11781 *
11782 * Assume that the traced program is built for the same architecture
11783 * as libbpf, which should cover the vast majority of cases.
11784 */
11785#if defined(__x86_64__)
11786 return "/lib/x86_64-linux-gnu";
11787#elif defined(__i386__)
11788 return "/lib/i386-linux-gnu";
11789#elif defined(__s390x__)
11790 return "/lib/s390x-linux-gnu";
11791#elif defined(__s390__)
11792 return "/lib/s390-linux-gnu";
11793#elif defined(__arm__) && defined(__SOFTFP__)
11794 return "/lib/arm-linux-gnueabi";
11795#elif defined(__arm__) && !defined(__SOFTFP__)
11796 return "/lib/arm-linux-gnueabihf";
11797#elif defined(__aarch64__)
11798 return "/lib/aarch64-linux-gnu";
11799#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 64
11800 return "/lib/mips64el-linux-gnuabi64";
11801#elif defined(__mips__) && defined(__MIPSEL__) && _MIPS_SZLONG == 32
11802 return "/lib/mipsel-linux-gnu";
11803#elif defined(__powerpc64__) && __BYTE_ORDER__ == __ORDER_LITTLE_ENDIAN__
11804 return "/lib/powerpc64le-linux-gnu";
11805#elif defined(__sparc__) && defined(__arch64__)
11806 return "/lib/sparc64-linux-gnu";
11807#elif defined(__riscv) && __riscv_xlen == 64
11808 return "/lib/riscv64-linux-gnu";
11809#else
11810 return NULL;
11811#endif
11812}
11813
1ce3a60e
AM
11814/* Get full path to program/shared library. */
11815static int resolve_full_path(const char *file, char *result, size_t result_sz)
11816{
56818931 11817 const char *search_paths[3] = {};
9e32084e 11818 int i, perm;
1ce3a60e 11819
a1c9d61b 11820 if (str_has_sfx(file, ".so") || strstr(file, ".so.")) {
1ce3a60e
AM
11821 search_paths[0] = getenv("LD_LIBRARY_PATH");
11822 search_paths[1] = "/usr/lib64:/usr/lib";
56818931 11823 search_paths[2] = arch_specific_lib_paths();
9e32084e 11824 perm = R_OK;
1ce3a60e
AM
11825 } else {
11826 search_paths[0] = getenv("PATH");
11827 search_paths[1] = "/usr/bin:/usr/sbin";
9e32084e 11828 perm = R_OK | X_OK;
1ce3a60e
AM
11829 }
11830
11831 for (i = 0; i < ARRAY_SIZE(search_paths); i++) {
11832 const char *s;
11833
11834 if (!search_paths[i])
11835 continue;
11836 for (s = search_paths[i]; s != NULL; s = strchr(s, ':')) {
11837 char *next_path;
11838 int seg_len;
11839
11840 if (s[0] == ':')
11841 s++;
11842 next_path = strchr(s, ':');
11843 seg_len = next_path ? next_path - s : strlen(s);
11844 if (!seg_len)
11845 continue;
11846 snprintf(result, result_sz, "%.*s/%s", seg_len, s, file);
9e32084e 11847 /* ensure it has required permissions */
6a4ab886 11848 if (faccessat(AT_FDCWD, result, perm, AT_EACCESS) < 0)
1ce3a60e
AM
11849 continue;
11850 pr_debug("resolved '%s' to '%s'\n", file, result);
11851 return 0;
11852 }
11853 }
11854 return -ENOENT;
11855}
11856
3140cf12
JO
11857struct bpf_link *
11858bpf_program__attach_uprobe_multi(const struct bpf_program *prog,
11859 pid_t pid,
11860 const char *path,
11861 const char *func_pattern,
11862 const struct bpf_uprobe_multi_opts *opts)
11863{
11864 const unsigned long *ref_ctr_offsets = NULL, *offsets = NULL;
11865 LIBBPF_OPTS(bpf_link_create_opts, lopts);
11866 unsigned long *resolved_offsets = NULL;
11867 int err = 0, link_fd, prog_fd;
11868 struct bpf_link *link = NULL;
11869 char errmsg[STRERR_BUFSIZE];
11870 char full_path[PATH_MAX];
11871 const __u64 *cookies;
11872 const char **syms;
11873 size_t cnt;
11874
11875 if (!OPTS_VALID(opts, bpf_uprobe_multi_opts))
11876 return libbpf_err_ptr(-EINVAL);
11877
7b30c296
MY
11878 prog_fd = bpf_program__fd(prog);
11879 if (prog_fd < 0) {
11880 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
11881 prog->name);
11882 return libbpf_err_ptr(-EINVAL);
11883 }
11884
3140cf12
JO
11885 syms = OPTS_GET(opts, syms, NULL);
11886 offsets = OPTS_GET(opts, offsets, NULL);
11887 ref_ctr_offsets = OPTS_GET(opts, ref_ctr_offsets, NULL);
11888 cookies = OPTS_GET(opts, cookies, NULL);
11889 cnt = OPTS_GET(opts, cnt, 0);
11890
11891 /*
11892 * User can specify 2 mutually exclusive set of inputs:
11893 *
11894 * 1) use only path/func_pattern/pid arguments
11895 *
11896 * 2) use path/pid with allowed combinations of:
11897 * syms/offsets/ref_ctr_offsets/cookies/cnt
11898 *
11899 * - syms and offsets are mutually exclusive
11900 * - ref_ctr_offsets and cookies are optional
11901 *
11902 * Any other usage results in error.
11903 */
11904
11905 if (!path)
11906 return libbpf_err_ptr(-EINVAL);
11907 if (!func_pattern && cnt == 0)
11908 return libbpf_err_ptr(-EINVAL);
11909
11910 if (func_pattern) {
11911 if (syms || offsets || ref_ctr_offsets || cookies || cnt)
11912 return libbpf_err_ptr(-EINVAL);
11913 } else {
11914 if (!!syms == !!offsets)
11915 return libbpf_err_ptr(-EINVAL);
11916 }
11917
11918 if (func_pattern) {
11919 if (!strchr(path, '/')) {
11920 err = resolve_full_path(path, full_path, sizeof(full_path));
11921 if (err) {
11922 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
11923 prog->name, path, err);
11924 return libbpf_err_ptr(err);
11925 }
11926 path = full_path;
11927 }
11928
11929 err = elf_resolve_pattern_offsets(path, func_pattern,
11930 &resolved_offsets, &cnt);
11931 if (err < 0)
11932 return libbpf_err_ptr(err);
11933 offsets = resolved_offsets;
11934 } else if (syms) {
48f0dfd8 11935 err = elf_resolve_syms_offsets(path, cnt, syms, &resolved_offsets, STT_FUNC);
3140cf12
JO
11936 if (err < 0)
11937 return libbpf_err_ptr(err);
11938 offsets = resolved_offsets;
11939 }
11940
11941 lopts.uprobe_multi.path = path;
11942 lopts.uprobe_multi.offsets = offsets;
11943 lopts.uprobe_multi.ref_ctr_offsets = ref_ctr_offsets;
11944 lopts.uprobe_multi.cookies = cookies;
11945 lopts.uprobe_multi.cnt = cnt;
11946 lopts.uprobe_multi.flags = OPTS_GET(opts, retprobe, false) ? BPF_F_UPROBE_MULTI_RETURN : 0;
11947
11948 if (pid == 0)
11949 pid = getpid();
11950 if (pid > 0)
11951 lopts.uprobe_multi.pid = pid;
11952
11953 link = calloc(1, sizeof(*link));
11954 if (!link) {
11955 err = -ENOMEM;
11956 goto error;
11957 }
11958 link->detach = &bpf_link__detach_fd;
11959
3140cf12
JO
11960 link_fd = bpf_link_create(prog_fd, 0, BPF_TRACE_UPROBE_MULTI, &lopts);
11961 if (link_fd < 0) {
11962 err = -errno;
11963 pr_warn("prog '%s': failed to attach multi-uprobe: %s\n",
11964 prog->name, libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
11965 goto error;
11966 }
11967 link->fd = link_fd;
11968 free(resolved_offsets);
11969 return link;
11970
11971error:
11972 free(resolved_offsets);
11973 free(link);
11974 return libbpf_err_ptr(err);
11975}
11976
47faff37 11977LIBBPF_API struct bpf_link *
942025c9 11978bpf_program__attach_uprobe_opts(const struct bpf_program *prog, pid_t pid,
47faff37
AN
11979 const char *binary_path, size_t func_offset,
11980 const struct bpf_uprobe_opts *opts)
b2650027 11981{
c44fd845 11982 const char *archive_path = NULL, *archive_sep = NULL;
cc10623c 11983 char errmsg[STRERR_BUFSIZE], *legacy_probe = NULL;
c44fd845 11984 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
f8b299bc 11985 enum probe_attach_mode attach_mode;
c44fd845 11986 char full_path[PATH_MAX];
b2650027 11987 struct bpf_link *link;
5e3b8356 11988 size_t ref_ctr_off;
b2650027 11989 int pfd, err;
cc10623c 11990 bool retprobe, legacy;
433966e3 11991 const char *func_name;
47faff37
AN
11992
11993 if (!OPTS_VALID(opts, bpf_uprobe_opts))
11994 return libbpf_err_ptr(-EINVAL);
11995
f8b299bc 11996 attach_mode = OPTS_GET(opts, attach_mode, PROBE_ATTACH_MODE_DEFAULT);
47faff37 11997 retprobe = OPTS_GET(opts, retprobe, false);
5e3b8356 11998 ref_ctr_off = OPTS_GET(opts, ref_ctr_offset, 0);
47faff37 11999 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
b2650027 12000
8ed2f5a6
HC
12001 if (!binary_path)
12002 return libbpf_err_ptr(-EINVAL);
12003
c44fd845
DM
12004 /* Check if "binary_path" refers to an archive. */
12005 archive_sep = strstr(binary_path, "!/");
12006 if (archive_sep) {
12007 full_path[0] = '\0';
12008 libbpf_strlcpy(full_path, binary_path,
12009 min(sizeof(full_path), (size_t)(archive_sep - binary_path + 1)));
12010 archive_path = full_path;
12011 binary_path = archive_sep + 2;
12012 } else if (!strchr(binary_path, '/')) {
12013 err = resolve_full_path(binary_path, full_path, sizeof(full_path));
1ce3a60e 12014 if (err) {
2e4913e0
AN
12015 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12016 prog->name, binary_path, err);
1ce3a60e
AM
12017 return libbpf_err_ptr(err);
12018 }
c44fd845 12019 binary_path = full_path;
1ce3a60e 12020 }
433966e3
AM
12021 func_name = OPTS_GET(opts, func_name, NULL);
12022 if (func_name) {
12023 long sym_off;
12024
c44fd845
DM
12025 if (archive_path) {
12026 sym_off = elf_find_func_offset_from_archive(archive_path, binary_path,
12027 func_name);
12028 binary_path = archive_path;
12029 } else {
12030 sym_off = elf_find_func_offset_from_file(binary_path, func_name);
12031 }
433966e3
AM
12032 if (sym_off < 0)
12033 return libbpf_err_ptr(sym_off);
12034 func_offset += sym_off;
12035 }
1ce3a60e 12036
cc10623c 12037 legacy = determine_uprobe_perf_type() < 0;
f8b299bc
MD
12038 switch (attach_mode) {
12039 case PROBE_ATTACH_MODE_LEGACY:
12040 legacy = true;
12041 pe_opts.force_ioctl_attach = true;
12042 break;
12043 case PROBE_ATTACH_MODE_PERF:
12044 if (legacy)
12045 return libbpf_err_ptr(-ENOTSUP);
12046 pe_opts.force_ioctl_attach = true;
12047 break;
12048 case PROBE_ATTACH_MODE_LINK:
12049 if (legacy || !kernel_supports(prog->obj, FEAT_PERF_LINK))
12050 return libbpf_err_ptr(-ENOTSUP);
12051 break;
12052 case PROBE_ATTACH_MODE_DEFAULT:
12053 break;
12054 default:
12055 return libbpf_err_ptr(-EINVAL);
12056 }
12057
cc10623c
AN
12058 if (!legacy) {
12059 pfd = perf_event_open_probe(true /* uprobe */, retprobe, binary_path,
12060 func_offset, pid, ref_ctr_off);
12061 } else {
1ce3a60e 12062 char probe_name[PATH_MAX + 64];
cc10623c
AN
12063
12064 if (ref_ctr_off)
12065 return libbpf_err_ptr(-EINVAL);
12066
12067 gen_uprobe_legacy_event_name(probe_name, sizeof(probe_name),
12068 binary_path, func_offset);
12069
12070 legacy_probe = strdup(probe_name);
12071 if (!legacy_probe)
12072 return libbpf_err_ptr(-ENOMEM);
12073
12074 pfd = perf_event_uprobe_open_legacy(legacy_probe, retprobe,
12075 binary_path, func_offset, pid);
12076 }
b2650027 12077 if (pfd < 0) {
cc10623c 12078 err = -errno;
52109584
AN
12079 pr_warn("prog '%s': failed to create %s '%s:0x%zx' perf event: %s\n",
12080 prog->name, retprobe ? "uretprobe" : "uprobe",
be18010e 12081 binary_path, func_offset,
cc10623c
AN
12082 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
12083 goto err_out;
b2650027 12084 }
cc10623c 12085
47faff37 12086 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
e9fc3ce9
AN
12087 err = libbpf_get_error(link);
12088 if (err) {
b2650027 12089 close(pfd);
52109584
AN
12090 pr_warn("prog '%s': failed to attach to %s '%s:0x%zx': %s\n",
12091 prog->name, retprobe ? "uretprobe" : "uprobe",
be18010e
KW
12092 binary_path, func_offset,
12093 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
2655144f 12094 goto err_clean_legacy;
cc10623c
AN
12095 }
12096 if (legacy) {
12097 struct bpf_link_perf *perf_link = container_of(link, struct bpf_link_perf, link);
12098
12099 perf_link->legacy_probe_name = legacy_probe;
12100 perf_link->legacy_is_kprobe = false;
12101 perf_link->legacy_is_retprobe = retprobe;
b2650027
AN
12102 }
12103 return link;
2655144f
CW
12104
12105err_clean_legacy:
12106 if (legacy)
12107 remove_uprobe_event_legacy(legacy_probe, retprobe);
cc10623c
AN
12108err_out:
12109 free(legacy_probe);
12110 return libbpf_err_ptr(err);
b2650027
AN
12111}
12112
39f8dc43
AM
12113/* Format of u[ret]probe section definition supporting auto-attach:
12114 * u[ret]probe/binary:function[+offset]
12115 *
12116 * binary can be an absolute/relative path or a filename; the latter is resolved to a
12117 * full binary path via bpf_program__attach_uprobe_opts.
12118 *
12119 * Specifying uprobe+ ensures we carry out strict matching; either "uprobe" must be
12120 * specified (and auto-attach is not possible) or the above format is specified for
12121 * auto-attach.
12122 */
12123static int attach_uprobe(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12124{
12125 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts);
2147c8d0
HC
12126 char *probe_type = NULL, *binary_path = NULL, *func_name = NULL, *func_off;
12127 int n, c, ret = -EINVAL;
90db26e6 12128 long offset = 0;
39f8dc43
AM
12129
12130 *link = NULL;
12131
2147c8d0
HC
12132 n = sscanf(prog->sec_name, "%m[^/]/%m[^:]:%m[^\n]",
12133 &probe_type, &binary_path, &func_name);
90db26e6
AM
12134 switch (n) {
12135 case 1:
12136 /* handle SEC("u[ret]probe") - format is valid, but auto-attach is impossible. */
12137 ret = 0;
12138 break;
12139 case 2:
12140 pr_warn("prog '%s': section '%s' missing ':function[+offset]' specification\n",
12141 prog->name, prog->sec_name);
12142 break;
12143 case 3:
2147c8d0
HC
12144 /* check if user specifies `+offset`, if yes, this should be
12145 * the last part of the string, make sure sscanf read to EOL
12146 */
12147 func_off = strrchr(func_name, '+');
12148 if (func_off) {
12149 n = sscanf(func_off, "+%li%n", &offset, &c);
12150 if (n == 1 && *(func_off + c) == '\0')
12151 func_off[0] = '\0';
12152 else
12153 offset = 0;
12154 }
c4cac71f
DK
12155 opts.retprobe = strcmp(probe_type, "uretprobe") == 0 ||
12156 strcmp(probe_type, "uretprobe.s") == 0;
90db26e6
AM
12157 if (opts.retprobe && offset != 0) {
12158 pr_warn("prog '%s': uretprobes do not support offset specification\n",
12159 prog->name);
12160 break;
12161 }
12162 opts.func_name = func_name;
12163 *link = bpf_program__attach_uprobe_opts(prog, -1, binary_path, offset, &opts);
12164 ret = libbpf_get_error(*link);
12165 break;
12166 default:
12167 pr_warn("prog '%s': invalid format of section definition '%s'\n", prog->name,
39f8dc43 12168 prog->sec_name);
90db26e6 12169 break;
39f8dc43 12170 }
90db26e6
AM
12171 free(probe_type);
12172 free(binary_path);
12173 free(func_name);
39f8dc43 12174
90db26e6 12175 return ret;
39f8dc43
AM
12176}
12177
942025c9 12178struct bpf_link *bpf_program__attach_uprobe(const struct bpf_program *prog,
47faff37
AN
12179 bool retprobe, pid_t pid,
12180 const char *binary_path,
12181 size_t func_offset)
12182{
12183 DECLARE_LIBBPF_OPTS(bpf_uprobe_opts, opts, .retprobe = retprobe);
12184
12185 return bpf_program__attach_uprobe_opts(prog, pid, binary_path, func_offset, &opts);
12186}
12187
2e4913e0
AN
12188struct bpf_link *bpf_program__attach_usdt(const struct bpf_program *prog,
12189 pid_t pid, const char *binary_path,
12190 const char *usdt_provider, const char *usdt_name,
12191 const struct bpf_usdt_opts *opts)
12192{
12193 char resolved_path[512];
12194 struct bpf_object *obj = prog->obj;
12195 struct bpf_link *link;
5af25a41 12196 __u64 usdt_cookie;
2e4913e0
AN
12197 int err;
12198
12199 if (!OPTS_VALID(opts, bpf_uprobe_opts))
12200 return libbpf_err_ptr(-EINVAL);
12201
12202 if (bpf_program__fd(prog) < 0) {
7b30c296 12203 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
2e4913e0
AN
12204 prog->name);
12205 return libbpf_err_ptr(-EINVAL);
12206 }
12207
8ed2f5a6
HC
12208 if (!binary_path)
12209 return libbpf_err_ptr(-EINVAL);
12210
2e4913e0
AN
12211 if (!strchr(binary_path, '/')) {
12212 err = resolve_full_path(binary_path, resolved_path, sizeof(resolved_path));
12213 if (err) {
12214 pr_warn("prog '%s': failed to resolve full path for '%s': %d\n",
12215 prog->name, binary_path, err);
12216 return libbpf_err_ptr(err);
12217 }
12218 binary_path = resolved_path;
12219 }
12220
12221 /* USDT manager is instantiated lazily on first USDT attach. It will
12222 * be destroyed together with BPF object in bpf_object__close().
12223 */
12224 if (IS_ERR(obj->usdt_man))
12225 return libbpf_ptr(obj->usdt_man);
12226 if (!obj->usdt_man) {
12227 obj->usdt_man = usdt_manager_new(obj);
12228 if (IS_ERR(obj->usdt_man))
12229 return libbpf_ptr(obj->usdt_man);
12230 }
12231
12232 usdt_cookie = OPTS_GET(opts, usdt_cookie, 0);
12233 link = usdt_manager_attach_usdt(obj->usdt_man, prog, pid, binary_path,
e3ba8e4e 12234 usdt_provider, usdt_name, usdt_cookie);
2e4913e0
AN
12235 err = libbpf_get_error(link);
12236 if (err)
12237 return libbpf_err_ptr(err);
12238 return link;
12239}
12240
12241static int attach_usdt(const struct bpf_program *prog, long cookie, struct bpf_link **link)
12242{
12243 char *path = NULL, *provider = NULL, *name = NULL;
12244 const char *sec_name;
12245 int n, err;
12246
12247 sec_name = bpf_program__section_name(prog);
12248 if (strcmp(sec_name, "usdt") == 0) {
12249 /* no auto-attach for just SEC("usdt") */
12250 *link = NULL;
12251 return 0;
12252 }
12253
12254 n = sscanf(sec_name, "usdt/%m[^:]:%m[^:]:%m[^:]", &path, &provider, &name);
12255 if (n != 3) {
12256 pr_warn("invalid section '%s', expected SEC(\"usdt/<path>:<provider>:<name>\")\n",
12257 sec_name);
12258 err = -EINVAL;
12259 } else {
12260 *link = bpf_program__attach_usdt(prog, -1 /* any process */, path,
12261 provider, name, NULL);
12262 err = libbpf_get_error(*link);
12263 }
12264 free(path);
12265 free(provider);
12266 free(name);
12267 return err;
12268}
12269
f6de59c1
AN
12270static int determine_tracepoint_id(const char *tp_category,
12271 const char *tp_name)
12272{
12273 char file[PATH_MAX];
12274 int ret;
12275
a1ac9fd6
AN
12276 ret = snprintf(file, sizeof(file), "%s/events/%s/%s/id",
12277 tracefs_path(), tp_category, tp_name);
f6de59c1
AN
12278 if (ret < 0)
12279 return -errno;
12280 if (ret >= sizeof(file)) {
12281 pr_debug("tracepoint %s/%s path is too long\n",
12282 tp_category, tp_name);
12283 return -E2BIG;
12284 }
12285 return parse_uint_from_file(file, "%d\n");
12286}
12287
12288static int perf_event_open_tracepoint(const char *tp_category,
12289 const char *tp_name)
12290{
813847a3
AN
12291 const size_t attr_sz = sizeof(struct perf_event_attr);
12292 struct perf_event_attr attr;
f6de59c1
AN
12293 char errmsg[STRERR_BUFSIZE];
12294 int tp_id, pfd, err;
12295
12296 tp_id = determine_tracepoint_id(tp_category, tp_name);
12297 if (tp_id < 0) {
be18010e
KW
12298 pr_warn("failed to determine tracepoint '%s/%s' perf event ID: %s\n",
12299 tp_category, tp_name,
12300 libbpf_strerror_r(tp_id, errmsg, sizeof(errmsg)));
f6de59c1
AN
12301 return tp_id;
12302 }
12303
813847a3 12304 memset(&attr, 0, attr_sz);
f6de59c1 12305 attr.type = PERF_TYPE_TRACEPOINT;
813847a3 12306 attr.size = attr_sz;
f6de59c1
AN
12307 attr.config = tp_id;
12308
12309 pfd = syscall(__NR_perf_event_open, &attr, -1 /* pid */, 0 /* cpu */,
12310 -1 /* group_fd */, PERF_FLAG_FD_CLOEXEC);
12311 if (pfd < 0) {
12312 err = -errno;
be18010e
KW
12313 pr_warn("tracepoint '%s/%s' perf_event_open() failed: %s\n",
12314 tp_category, tp_name,
12315 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
f6de59c1
AN
12316 return err;
12317 }
12318 return pfd;
12319}
12320
942025c9 12321struct bpf_link *bpf_program__attach_tracepoint_opts(const struct bpf_program *prog,
47faff37
AN
12322 const char *tp_category,
12323 const char *tp_name,
12324 const struct bpf_tracepoint_opts *opts)
f6de59c1 12325{
47faff37 12326 DECLARE_LIBBPF_OPTS(bpf_perf_event_opts, pe_opts);
f6de59c1
AN
12327 char errmsg[STRERR_BUFSIZE];
12328 struct bpf_link *link;
12329 int pfd, err;
12330
47faff37
AN
12331 if (!OPTS_VALID(opts, bpf_tracepoint_opts))
12332 return libbpf_err_ptr(-EINVAL);
12333
12334 pe_opts.bpf_cookie = OPTS_GET(opts, bpf_cookie, 0);
12335
f6de59c1
AN
12336 pfd = perf_event_open_tracepoint(tp_category, tp_name);
12337 if (pfd < 0) {
52109584
AN
12338 pr_warn("prog '%s': failed to create tracepoint '%s/%s' perf event: %s\n",
12339 prog->name, tp_category, tp_name,
be18010e 12340 libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
e9fc3ce9 12341 return libbpf_err_ptr(pfd);
f6de59c1 12342 }
47faff37 12343 link = bpf_program__attach_perf_event_opts(prog, pfd, &pe_opts);
e9fc3ce9
AN
12344 err = libbpf_get_error(link);
12345 if (err) {
f6de59c1 12346 close(pfd);
52109584
AN
12347 pr_warn("prog '%s': failed to attach to tracepoint '%s/%s': %s\n",
12348 prog->name, tp_category, tp_name,
be18010e 12349 libbpf_strerror_r(err, errmsg, sizeof(errmsg)));
e9fc3ce9 12350 return libbpf_err_ptr(err);
f6de59c1
AN
12351 }
12352 return link;
12353}
12354
942025c9 12355struct bpf_link *bpf_program__attach_tracepoint(const struct bpf_program *prog,
47faff37
AN
12356 const char *tp_category,
12357 const char *tp_name)
12358{
12359 return bpf_program__attach_tracepoint_opts(prog, tp_category, tp_name, NULL);
12360}
12361
4fa5bcfe 12362static int attach_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7
AN
12363{
12364 char *sec_name, *tp_cat, *tp_name;
d7a18ea7 12365
9af8efc4
AN
12366 *link = NULL;
12367
12368 /* no auto-attach for SEC("tp") or SEC("tracepoint") */
12369 if (strcmp(prog->sec_name, "tp") == 0 || strcmp(prog->sec_name, "tracepoint") == 0)
12370 return 0;
12371
ac6a6586
AN
12372 sec_name = strdup(prog->sec_name);
12373 if (!sec_name)
12374 return -ENOMEM;
12375
13d35a0c
AN
12376 /* extract "tp/<category>/<name>" or "tracepoint/<category>/<name>" */
12377 if (str_has_pfx(prog->sec_name, "tp/"))
12378 tp_cat = sec_name + sizeof("tp/") - 1;
12379 else
12380 tp_cat = sec_name + sizeof("tracepoint/") - 1;
d7a18ea7
AN
12381 tp_name = strchr(tp_cat, '/');
12382 if (!tp_name) {
e9fc3ce9 12383 free(sec_name);
4fa5bcfe 12384 return -EINVAL;
d7a18ea7
AN
12385 }
12386 *tp_name = '\0';
12387 tp_name++;
12388
4fa5bcfe 12389 *link = bpf_program__attach_tracepoint(prog, tp_cat, tp_name);
d7a18ea7 12390 free(sec_name);
4fa5bcfe 12391 return libbpf_get_error(*link);
d7a18ea7
AN
12392}
12393
36ffb202
AN
12394struct bpf_link *
12395bpf_program__attach_raw_tracepoint_opts(const struct bpf_program *prog,
12396 const char *tp_name,
12397 struct bpf_raw_tracepoint_opts *opts)
84bf5e1f 12398{
36ffb202 12399 LIBBPF_OPTS(bpf_raw_tp_opts, raw_opts);
84bf5e1f 12400 char errmsg[STRERR_BUFSIZE];
c016b68e 12401 struct bpf_link *link;
84bf5e1f
AN
12402 int prog_fd, pfd;
12403
36ffb202
AN
12404 if (!OPTS_VALID(opts, bpf_raw_tracepoint_opts))
12405 return libbpf_err_ptr(-EINVAL);
12406
84bf5e1f
AN
12407 prog_fd = bpf_program__fd(prog);
12408 if (prog_fd < 0) {
52109584 12409 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12410 return libbpf_err_ptr(-EINVAL);
84bf5e1f
AN
12411 }
12412
d6958706 12413 link = calloc(1, sizeof(*link));
84bf5e1f 12414 if (!link)
e9fc3ce9 12415 return libbpf_err_ptr(-ENOMEM);
c016b68e 12416 link->detach = &bpf_link__detach_fd;
84bf5e1f 12417
36ffb202
AN
12418 raw_opts.tp_name = tp_name;
12419 raw_opts.cookie = OPTS_GET(opts, cookie, 0);
12420 pfd = bpf_raw_tracepoint_open_opts(prog_fd, &raw_opts);
84bf5e1f
AN
12421 if (pfd < 0) {
12422 pfd = -errno;
12423 free(link);
52109584
AN
12424 pr_warn("prog '%s': failed to attach to raw tracepoint '%s': %s\n",
12425 prog->name, tp_name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
e9fc3ce9 12426 return libbpf_err_ptr(pfd);
84bf5e1f
AN
12427 }
12428 link->fd = pfd;
c016b68e 12429 return link;
84bf5e1f
AN
12430}
12431
36ffb202
AN
12432struct bpf_link *bpf_program__attach_raw_tracepoint(const struct bpf_program *prog,
12433 const char *tp_name)
12434{
12435 return bpf_program__attach_raw_tracepoint_opts(prog, tp_name, NULL);
12436}
12437
4fa5bcfe 12438static int attach_raw_tp(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7 12439{
ccaf12d6 12440 static const char *const prefixes[] = {
9af8efc4
AN
12441 "raw_tp",
12442 "raw_tracepoint",
12443 "raw_tp.w",
12444 "raw_tracepoint.w",
ccaf12d6
HT
12445 };
12446 size_t i;
12447 const char *tp_name = NULL;
13d35a0c 12448
9af8efc4
AN
12449 *link = NULL;
12450
ccaf12d6 12451 for (i = 0; i < ARRAY_SIZE(prefixes); i++) {
9af8efc4
AN
12452 size_t pfx_len;
12453
12454 if (!str_has_pfx(prog->sec_name, prefixes[i]))
12455 continue;
12456
12457 pfx_len = strlen(prefixes[i]);
12458 /* no auto-attach case of, e.g., SEC("raw_tp") */
12459 if (prog->sec_name[pfx_len] == '\0')
12460 return 0;
12461
12462 if (prog->sec_name[pfx_len] != '/')
12463 continue;
12464
12465 tp_name = prog->sec_name + pfx_len + 1;
12466 break;
ccaf12d6 12467 }
9af8efc4 12468
ccaf12d6
HT
12469 if (!tp_name) {
12470 pr_warn("prog '%s': invalid section name '%s'\n",
12471 prog->name, prog->sec_name);
4fa5bcfe 12472 return -EINVAL;
ccaf12d6 12473 }
d7a18ea7 12474
4fa5bcfe 12475 *link = bpf_program__attach_raw_tracepoint(prog, tp_name);
5fd2a60a 12476 return libbpf_get_error(*link);
d7a18ea7
AN
12477}
12478
1e092a03 12479/* Common logic for all BPF program types that attach to a btf_id */
129b9c5e
KFL
12480static struct bpf_link *bpf_program__attach_btf_id(const struct bpf_program *prog,
12481 const struct bpf_trace_opts *opts)
b8c54ea4 12482{
129b9c5e 12483 LIBBPF_OPTS(bpf_link_create_opts, link_opts);
b8c54ea4 12484 char errmsg[STRERR_BUFSIZE];
c016b68e 12485 struct bpf_link *link;
b8c54ea4
AS
12486 int prog_fd, pfd;
12487
129b9c5e
KFL
12488 if (!OPTS_VALID(opts, bpf_trace_opts))
12489 return libbpf_err_ptr(-EINVAL);
12490
b8c54ea4
AS
12491 prog_fd = bpf_program__fd(prog);
12492 if (prog_fd < 0) {
52109584 12493 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12494 return libbpf_err_ptr(-EINVAL);
b8c54ea4
AS
12495 }
12496
d6958706 12497 link = calloc(1, sizeof(*link));
b8c54ea4 12498 if (!link)
e9fc3ce9 12499 return libbpf_err_ptr(-ENOMEM);
c016b68e 12500 link->detach = &bpf_link__detach_fd;
b8c54ea4 12501
8462e0b4 12502 /* libbpf is smart enough to redirect to BPF_RAW_TRACEPOINT_OPEN on old kernels */
129b9c5e
KFL
12503 link_opts.tracing.cookie = OPTS_GET(opts, cookie, 0);
12504 pfd = bpf_link_create(prog_fd, 0, bpf_program__expected_attach_type(prog), &link_opts);
b8c54ea4
AS
12505 if (pfd < 0) {
12506 pfd = -errno;
12507 free(link);
52109584
AN
12508 pr_warn("prog '%s': failed to attach: %s\n",
12509 prog->name, libbpf_strerror_r(pfd, errmsg, sizeof(errmsg)));
e9fc3ce9 12510 return libbpf_err_ptr(pfd);
b8c54ea4
AS
12511 }
12512 link->fd = pfd;
003fed59 12513 return link;
b8c54ea4
AS
12514}
12515
942025c9 12516struct bpf_link *bpf_program__attach_trace(const struct bpf_program *prog)
1e092a03 12517{
129b9c5e
KFL
12518 return bpf_program__attach_btf_id(prog, NULL);
12519}
12520
12521struct bpf_link *bpf_program__attach_trace_opts(const struct bpf_program *prog,
12522 const struct bpf_trace_opts *opts)
12523{
12524 return bpf_program__attach_btf_id(prog, opts);
1e092a03
KS
12525}
12526
942025c9 12527struct bpf_link *bpf_program__attach_lsm(const struct bpf_program *prog)
1e092a03 12528{
129b9c5e 12529 return bpf_program__attach_btf_id(prog, NULL);
1e092a03
KS
12530}
12531
4fa5bcfe 12532static int attach_trace(const struct bpf_program *prog, long cookie, struct bpf_link **link)
d7a18ea7 12533{
4fa5bcfe
AN
12534 *link = bpf_program__attach_trace(prog);
12535 return libbpf_get_error(*link);
d7a18ea7
AN
12536}
12537
4fa5bcfe 12538static int attach_lsm(const struct bpf_program *prog, long cookie, struct bpf_link **link)
1e092a03 12539{
4fa5bcfe
AN
12540 *link = bpf_program__attach_lsm(prog);
12541 return libbpf_get_error(*link);
1e092a03
KS
12542}
12543
d60d81ac 12544static struct bpf_link *
55cc3768
DB
12545bpf_program_attach_fd(const struct bpf_program *prog,
12546 int target_fd, const char *target_name,
12547 const struct bpf_link_create_opts *opts)
cc4f864b 12548{
cc4f864b
AN
12549 enum bpf_attach_type attach_type;
12550 char errmsg[STRERR_BUFSIZE];
12551 struct bpf_link *link;
12552 int prog_fd, link_fd;
12553
12554 prog_fd = bpf_program__fd(prog);
12555 if (prog_fd < 0) {
52109584 12556 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12557 return libbpf_err_ptr(-EINVAL);
cc4f864b
AN
12558 }
12559
12560 link = calloc(1, sizeof(*link));
12561 if (!link)
e9fc3ce9 12562 return libbpf_err_ptr(-ENOMEM);
cc4f864b
AN
12563 link->detach = &bpf_link__detach_fd;
12564
20eccf29 12565 attach_type = bpf_program__expected_attach_type(prog);
55cc3768 12566 link_fd = bpf_link_create(prog_fd, target_fd, attach_type, opts);
cc4f864b
AN
12567 if (link_fd < 0) {
12568 link_fd = -errno;
12569 free(link);
52109584
AN
12570 pr_warn("prog '%s': failed to attach to %s: %s\n",
12571 prog->name, target_name,
cc4f864b 12572 libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
e9fc3ce9 12573 return libbpf_err_ptr(link_fd);
cc4f864b
AN
12574 }
12575 link->fd = link_fd;
12576 return link;
12577}
12578
d60d81ac 12579struct bpf_link *
942025c9 12580bpf_program__attach_cgroup(const struct bpf_program *prog, int cgroup_fd)
d60d81ac 12581{
55cc3768 12582 return bpf_program_attach_fd(prog, cgroup_fd, "cgroup", NULL);
d60d81ac
JS
12583}
12584
12585struct bpf_link *
942025c9 12586bpf_program__attach_netns(const struct bpf_program *prog, int netns_fd)
d60d81ac 12587{
55cc3768 12588 return bpf_program_attach_fd(prog, netns_fd, "netns", NULL);
d60d81ac
JS
12589}
12590
849989af
YS
12591struct bpf_link *
12592bpf_program__attach_sockmap(const struct bpf_program *prog, int map_fd)
12593{
12594 return bpf_program_attach_fd(prog, map_fd, "sockmap", NULL);
12595}
12596
942025c9 12597struct bpf_link *bpf_program__attach_xdp(const struct bpf_program *prog, int ifindex)
dc8698ca
AN
12598{
12599 /* target_fd/target_ifindex use the same field in LINK_CREATE */
55cc3768
DB
12600 return bpf_program_attach_fd(prog, ifindex, "xdp", NULL);
12601}
12602
12603struct bpf_link *
12604bpf_program__attach_tcx(const struct bpf_program *prog, int ifindex,
12605 const struct bpf_tcx_opts *opts)
12606{
12607 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12608 __u32 relative_id;
12609 int relative_fd;
12610
12611 if (!OPTS_VALID(opts, bpf_tcx_opts))
12612 return libbpf_err_ptr(-EINVAL);
12613
12614 relative_id = OPTS_GET(opts, relative_id, 0);
12615 relative_fd = OPTS_GET(opts, relative_fd, 0);
12616
12617 /* validate we don't have unexpected combinations of non-zero fields */
12618 if (!ifindex) {
12619 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12620 prog->name);
12621 return libbpf_err_ptr(-EINVAL);
12622 }
12623 if (relative_fd && relative_id) {
12624 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12625 prog->name);
12626 return libbpf_err_ptr(-EINVAL);
12627 }
12628
12629 link_create_opts.tcx.expected_revision = OPTS_GET(opts, expected_revision, 0);
12630 link_create_opts.tcx.relative_fd = relative_fd;
12631 link_create_opts.tcx.relative_id = relative_id;
12632 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12633
12634 /* target_fd/target_ifindex use the same field in LINK_CREATE */
12635 return bpf_program_attach_fd(prog, ifindex, "tcx", &link_create_opts);
a5359091
THJ
12636}
12637
05c31b4a
DB
12638struct bpf_link *
12639bpf_program__attach_netkit(const struct bpf_program *prog, int ifindex,
12640 const struct bpf_netkit_opts *opts)
12641{
12642 LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
12643 __u32 relative_id;
12644 int relative_fd;
12645
12646 if (!OPTS_VALID(opts, bpf_netkit_opts))
12647 return libbpf_err_ptr(-EINVAL);
12648
12649 relative_id = OPTS_GET(opts, relative_id, 0);
12650 relative_fd = OPTS_GET(opts, relative_fd, 0);
12651
12652 /* validate we don't have unexpected combinations of non-zero fields */
12653 if (!ifindex) {
12654 pr_warn("prog '%s': target netdevice ifindex cannot be zero\n",
12655 prog->name);
12656 return libbpf_err_ptr(-EINVAL);
12657 }
12658 if (relative_fd && relative_id) {
12659 pr_warn("prog '%s': relative_fd and relative_id cannot be set at the same time\n",
12660 prog->name);
12661 return libbpf_err_ptr(-EINVAL);
12662 }
12663
12664 link_create_opts.netkit.expected_revision = OPTS_GET(opts, expected_revision, 0);
12665 link_create_opts.netkit.relative_fd = relative_fd;
12666 link_create_opts.netkit.relative_id = relative_id;
12667 link_create_opts.flags = OPTS_GET(opts, flags, 0);
12668
12669 return bpf_program_attach_fd(prog, ifindex, "netkit", &link_create_opts);
12670}
12671
942025c9 12672struct bpf_link *bpf_program__attach_freplace(const struct bpf_program *prog,
a5359091
THJ
12673 int target_fd,
12674 const char *attach_func_name)
12675{
12676 int btf_id;
12677
12678 if (!!target_fd != !!attach_func_name) {
12679 pr_warn("prog '%s': supply none or both of target_fd and attach_func_name\n",
12680 prog->name);
e9fc3ce9 12681 return libbpf_err_ptr(-EINVAL);
a5359091
THJ
12682 }
12683
12684 if (prog->type != BPF_PROG_TYPE_EXT) {
12685 pr_warn("prog '%s': only BPF_PROG_TYPE_EXT can attach as freplace",
12686 prog->name);
e9fc3ce9 12687 return libbpf_err_ptr(-EINVAL);
a5359091
THJ
12688 }
12689
12690 if (target_fd) {
55cc3768
DB
12691 LIBBPF_OPTS(bpf_link_create_opts, target_opts);
12692
a5359091
THJ
12693 btf_id = libbpf_find_prog_btf_id(attach_func_name, target_fd);
12694 if (btf_id < 0)
e9fc3ce9 12695 return libbpf_err_ptr(btf_id);
a5359091 12696
55cc3768
DB
12697 target_opts.target_btf_id = btf_id;
12698
12699 return bpf_program_attach_fd(prog, target_fd, "freplace",
12700 &target_opts);
a5359091
THJ
12701 } else {
12702 /* no target, so use raw_tracepoint_open for compatibility
12703 * with old kernels
12704 */
12705 return bpf_program__attach_trace(prog);
12706 }
dc8698ca
AN
12707}
12708
c09add2f 12709struct bpf_link *
942025c9 12710bpf_program__attach_iter(const struct bpf_program *prog,
c09add2f
YS
12711 const struct bpf_iter_attach_opts *opts)
12712{
cd31039a 12713 DECLARE_LIBBPF_OPTS(bpf_link_create_opts, link_create_opts);
c09add2f
YS
12714 char errmsg[STRERR_BUFSIZE];
12715 struct bpf_link *link;
12716 int prog_fd, link_fd;
cd31039a 12717 __u32 target_fd = 0;
c09add2f
YS
12718
12719 if (!OPTS_VALID(opts, bpf_iter_attach_opts))
e9fc3ce9 12720 return libbpf_err_ptr(-EINVAL);
c09add2f 12721
74fc097d
YS
12722 link_create_opts.iter_info = OPTS_GET(opts, link_info, (void *)0);
12723 link_create_opts.iter_info_len = OPTS_GET(opts, link_info_len, 0);
cd31039a 12724
c09add2f
YS
12725 prog_fd = bpf_program__fd(prog);
12726 if (prog_fd < 0) {
52109584 12727 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
e9fc3ce9 12728 return libbpf_err_ptr(-EINVAL);
c09add2f
YS
12729 }
12730
12731 link = calloc(1, sizeof(*link));
12732 if (!link)
e9fc3ce9 12733 return libbpf_err_ptr(-ENOMEM);
c09add2f
YS
12734 link->detach = &bpf_link__detach_fd;
12735
cd31039a
YS
12736 link_fd = bpf_link_create(prog_fd, target_fd, BPF_TRACE_ITER,
12737 &link_create_opts);
c09add2f
YS
12738 if (link_fd < 0) {
12739 link_fd = -errno;
12740 free(link);
52109584
AN
12741 pr_warn("prog '%s': failed to attach to iterator: %s\n",
12742 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
e9fc3ce9 12743 return libbpf_err_ptr(link_fd);
c09add2f
YS
12744 }
12745 link->fd = link_fd;
12746 return link;
12747}
12748
4fa5bcfe 12749static int attach_iter(const struct bpf_program *prog, long cookie, struct bpf_link **link)
e9fc3ce9 12750{
4fa5bcfe
AN
12751 *link = bpf_program__attach_iter(prog, NULL);
12752 return libbpf_get_error(*link);
e9fc3ce9
AN
12753}
12754
52364abb
FW
12755struct bpf_link *bpf_program__attach_netfilter(const struct bpf_program *prog,
12756 const struct bpf_netfilter_opts *opts)
12757{
12758 LIBBPF_OPTS(bpf_link_create_opts, lopts);
12759 struct bpf_link *link;
12760 int prog_fd, link_fd;
12761
12762 if (!OPTS_VALID(opts, bpf_netfilter_opts))
12763 return libbpf_err_ptr(-EINVAL);
12764
12765 prog_fd = bpf_program__fd(prog);
12766 if (prog_fd < 0) {
12767 pr_warn("prog '%s': can't attach before loaded\n", prog->name);
12768 return libbpf_err_ptr(-EINVAL);
12769 }
12770
12771 link = calloc(1, sizeof(*link));
12772 if (!link)
12773 return libbpf_err_ptr(-ENOMEM);
12774
12775 link->detach = &bpf_link__detach_fd;
12776
12777 lopts.netfilter.pf = OPTS_GET(opts, pf, 0);
12778 lopts.netfilter.hooknum = OPTS_GET(opts, hooknum, 0);
12779 lopts.netfilter.priority = OPTS_GET(opts, priority, 0);
12780 lopts.netfilter.flags = OPTS_GET(opts, flags, 0);
12781
12782 link_fd = bpf_link_create(prog_fd, 0, BPF_NETFILTER, &lopts);
12783 if (link_fd < 0) {
12784 char errmsg[STRERR_BUFSIZE];
12785
12786 link_fd = -errno;
12787 free(link);
12788 pr_warn("prog '%s': failed to attach to netfilter: %s\n",
12789 prog->name, libbpf_strerror_r(link_fd, errmsg, sizeof(errmsg)));
12790 return libbpf_err_ptr(link_fd);
12791 }
12792 link->fd = link_fd;
12793
12794 return link;
12795}
12796
942025c9 12797struct bpf_link *bpf_program__attach(const struct bpf_program *prog)
d7a18ea7 12798{
4fa5bcfe
AN
12799 struct bpf_link *link = NULL;
12800 int err;
12801
12802 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
12803 return libbpf_err_ptr(-EOPNOTSUPP);
d7a18ea7 12804
7b30c296
MY
12805 if (bpf_program__fd(prog) < 0) {
12806 pr_warn("prog '%s': can't attach BPF program without FD (was it loaded?)\n",
12807 prog->name);
12808 return libbpf_err_ptr(-EINVAL);
12809 }
12810
4fa5bcfe
AN
12811 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, &link);
12812 if (err)
12813 return libbpf_err_ptr(err);
12814
12815 /* When calling bpf_program__attach() explicitly, auto-attach support
12816 * is expected to work, so NULL returned link is considered an error.
12817 * This is different for skeleton's attach, see comment in
12818 * bpf_object__attach_skeleton().
12819 */
12820 if (!link)
12821 return libbpf_err_ptr(-EOPNOTSUPP);
12822
12823 return link;
d7a18ea7
AN
12824}
12825
8d1608d7
KFL
12826struct bpf_link_struct_ops {
12827 struct bpf_link link;
12828 int map_fd;
12829};
12830
590a0088
MKL
12831static int bpf_link__detach_struct_ops(struct bpf_link *link)
12832{
8d1608d7 12833 struct bpf_link_struct_ops *st_link;
590a0088
MKL
12834 __u32 zero = 0;
12835
8d1608d7 12836 st_link = container_of(link, struct bpf_link_struct_ops, link);
590a0088 12837
8d1608d7
KFL
12838 if (st_link->map_fd < 0)
12839 /* w/o a real link */
12840 return bpf_map_delete_elem(link->fd, &zero);
12841
12842 return close(link->fd);
590a0088
MKL
12843}
12844
942025c9 12845struct bpf_link *bpf_map__attach_struct_ops(const struct bpf_map *map)
590a0088 12846{
8d1608d7
KFL
12847 struct bpf_link_struct_ops *link;
12848 __u32 zero = 0;
12849 int err, fd;
590a0088 12850
7b30c296 12851 if (!bpf_map__is_struct_ops(map))
e9fc3ce9 12852 return libbpf_err_ptr(-EINVAL);
590a0088 12853
7b30c296
MY
12854 if (map->fd < 0) {
12855 pr_warn("map '%s': can't attach BPF map without FD (was it created?)\n", map->name);
12856 return libbpf_err_ptr(-EINVAL);
12857 }
12858
590a0088
MKL
12859 link = calloc(1, sizeof(*link));
12860 if (!link)
e9fc3ce9 12861 return libbpf_err_ptr(-EINVAL);
590a0088 12862
8d1608d7
KFL
12863 /* kern_vdata should be prepared during the loading phase. */
12864 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12865 /* It can be EBUSY if the map has been used to create or
12866 * update a link before. We don't allow updating the value of
12867 * a struct_ops once it is set. That ensures that the value
12868 * never changed. So, it is safe to skip EBUSY.
12869 */
12870 if (err && (!(map->def.map_flags & BPF_F_LINK) || err != -EBUSY)) {
12871 free(link);
12872 return libbpf_err_ptr(err);
12873 }
590a0088 12874
8d1608d7 12875 link->link.detach = bpf_link__detach_struct_ops;
590a0088 12876
8d1608d7
KFL
12877 if (!(map->def.map_flags & BPF_F_LINK)) {
12878 /* w/o a real link */
12879 link->link.fd = map->fd;
12880 link->map_fd = -1;
12881 return &link->link;
590a0088
MKL
12882 }
12883
8d1608d7
KFL
12884 fd = bpf_link_create(map->fd, 0, BPF_STRUCT_OPS, NULL);
12885 if (fd < 0) {
590a0088 12886 free(link);
8d1608d7 12887 return libbpf_err_ptr(fd);
590a0088
MKL
12888 }
12889
8d1608d7
KFL
12890 link->link.fd = fd;
12891 link->map_fd = map->fd;
590a0088 12892
8d1608d7 12893 return &link->link;
590a0088
MKL
12894}
12895
912dd4b0
KFL
12896/*
12897 * Swap the back struct_ops of a link with a new struct_ops map.
12898 */
12899int bpf_link__update_map(struct bpf_link *link, const struct bpf_map *map)
12900{
12901 struct bpf_link_struct_ops *st_ops_link;
12902 __u32 zero = 0;
12903 int err;
12904
7b30c296
MY
12905 if (!bpf_map__is_struct_ops(map))
12906 return -EINVAL;
12907
12908 if (map->fd < 0) {
12909 pr_warn("map '%s': can't use BPF map without FD (was it created?)\n", map->name);
912dd4b0 12910 return -EINVAL;
7b30c296 12911 }
912dd4b0
KFL
12912
12913 st_ops_link = container_of(link, struct bpf_link_struct_ops, link);
12914 /* Ensure the type of a link is correct */
12915 if (st_ops_link->map_fd < 0)
12916 return -EINVAL;
12917
12918 err = bpf_map_update_elem(map->fd, &zero, map->st_ops->kern_vdata, 0);
12919 /* It can be EBUSY if the map has been used to create or
12920 * update a link before. We don't allow updating the value of
12921 * a struct_ops once it is set. That ensures that the value
12922 * never changed. So, it is safe to skip EBUSY.
12923 */
12924 if (err && err != -EBUSY)
12925 return err;
12926
12927 err = bpf_link_update(link->fd, map->fd, NULL);
12928 if (err < 0)
12929 return err;
12930
12931 st_ops_link->map_fd = map->fd;
12932
12933 return 0;
12934}
12935
22dd7a58
AN
12936typedef enum bpf_perf_event_ret (*bpf_perf_event_print_t)(struct perf_event_header *hdr,
12937 void *private_data);
12938
7218c28c
CL
12939static enum bpf_perf_event_ret
12940perf_event_read_simple(void *mmap_mem, size_t mmap_size, size_t page_size,
12941 void **copy_mem, size_t *copy_size,
12942 bpf_perf_event_print_t fn, void *private_data)
d0cabbb0 12943{
3dca2115 12944 struct perf_event_mmap_page *header = mmap_mem;
a64af0ef 12945 __u64 data_head = ring_buffer_read_head(header);
d0cabbb0 12946 __u64 data_tail = header->data_tail;
3dca2115
DB
12947 void *base = ((__u8 *)header) + page_size;
12948 int ret = LIBBPF_PERF_EVENT_CONT;
12949 struct perf_event_header *ehdr;
12950 size_t ehdr_size;
12951
12952 while (data_head != data_tail) {
12953 ehdr = base + (data_tail & (mmap_size - 1));
12954 ehdr_size = ehdr->size;
12955
12956 if (((void *)ehdr) + ehdr_size > base + mmap_size) {
12957 void *copy_start = ehdr;
12958 size_t len_first = base + mmap_size - copy_start;
12959 size_t len_secnd = ehdr_size - len_first;
12960
12961 if (*copy_size < ehdr_size) {
12962 free(*copy_mem);
12963 *copy_mem = malloc(ehdr_size);
12964 if (!*copy_mem) {
12965 *copy_size = 0;
d0cabbb0
JK
12966 ret = LIBBPF_PERF_EVENT_ERROR;
12967 break;
12968 }
3dca2115 12969 *copy_size = ehdr_size;
d0cabbb0
JK
12970 }
12971
3dca2115
DB
12972 memcpy(*copy_mem, copy_start, len_first);
12973 memcpy(*copy_mem + len_first, base, len_secnd);
12974 ehdr = *copy_mem;
d0cabbb0
JK
12975 }
12976
3dca2115
DB
12977 ret = fn(ehdr, private_data);
12978 data_tail += ehdr_size;
d0cabbb0
JK
12979 if (ret != LIBBPF_PERF_EVENT_CONT)
12980 break;
d0cabbb0
JK
12981 }
12982
a64af0ef 12983 ring_buffer_write_tail(header, data_tail);
e9fc3ce9 12984 return libbpf_err(ret);
d0cabbb0 12985}
34be1646 12986
fb84b822
AN
12987struct perf_buffer;
12988
12989struct perf_buffer_params {
12990 struct perf_event_attr *attr;
12991 /* if event_cb is specified, it takes precendence */
12992 perf_buffer_event_fn event_cb;
12993 /* sample_cb and lost_cb are higher-level common-case callbacks */
12994 perf_buffer_sample_fn sample_cb;
12995 perf_buffer_lost_fn lost_cb;
12996 void *ctx;
12997 int cpu_cnt;
12998 int *cpus;
12999 int *map_keys;
13000};
13001
13002struct perf_cpu_buf {
13003 struct perf_buffer *pb;
13004 void *base; /* mmap()'ed memory */
13005 void *buf; /* for reconstructing segmented data */
13006 size_t buf_size;
13007 int fd;
13008 int cpu;
13009 int map_key;
13010};
13011
13012struct perf_buffer {
13013 perf_buffer_event_fn event_cb;
13014 perf_buffer_sample_fn sample_cb;
13015 perf_buffer_lost_fn lost_cb;
13016 void *ctx; /* passed into callbacks */
13017
13018 size_t page_size;
13019 size_t mmap_size;
13020 struct perf_cpu_buf **cpu_bufs;
13021 struct epoll_event *events;
783b8f01 13022 int cpu_cnt; /* number of allocated CPU buffers */
fb84b822
AN
13023 int epoll_fd; /* perf event FD */
13024 int map_fd; /* BPF_MAP_TYPE_PERF_EVENT_ARRAY BPF map FD */
13025};
13026
13027static void perf_buffer__free_cpu_buf(struct perf_buffer *pb,
13028 struct perf_cpu_buf *cpu_buf)
13029{
13030 if (!cpu_buf)
13031 return;
13032 if (cpu_buf->base &&
13033 munmap(cpu_buf->base, pb->mmap_size + pb->page_size))
be18010e 13034 pr_warn("failed to munmap cpu_buf #%d\n", cpu_buf->cpu);
fb84b822
AN
13035 if (cpu_buf->fd >= 0) {
13036 ioctl(cpu_buf->fd, PERF_EVENT_IOC_DISABLE, 0);
13037 close(cpu_buf->fd);
13038 }
13039 free(cpu_buf->buf);
13040 free(cpu_buf);
13041}
13042
13043void perf_buffer__free(struct perf_buffer *pb)
13044{
13045 int i;
13046
50450fc7 13047 if (IS_ERR_OR_NULL(pb))
fb84b822
AN
13048 return;
13049 if (pb->cpu_bufs) {
601b05ca 13050 for (i = 0; i < pb->cpu_cnt; i++) {
fb84b822
AN
13051 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13052
601b05ca
EC
13053 if (!cpu_buf)
13054 continue;
13055
fb84b822
AN
13056 bpf_map_delete_elem(pb->map_fd, &cpu_buf->map_key);
13057 perf_buffer__free_cpu_buf(pb, cpu_buf);
13058 }
13059 free(pb->cpu_bufs);
13060 }
13061 if (pb->epoll_fd >= 0)
13062 close(pb->epoll_fd);
13063 free(pb->events);
13064 free(pb);
13065}
13066
13067static struct perf_cpu_buf *
13068perf_buffer__open_cpu_buf(struct perf_buffer *pb, struct perf_event_attr *attr,
13069 int cpu, int map_key)
13070{
13071 struct perf_cpu_buf *cpu_buf;
13072 char msg[STRERR_BUFSIZE];
13073 int err;
13074
13075 cpu_buf = calloc(1, sizeof(*cpu_buf));
13076 if (!cpu_buf)
13077 return ERR_PTR(-ENOMEM);
13078
13079 cpu_buf->pb = pb;
13080 cpu_buf->cpu = cpu;
13081 cpu_buf->map_key = map_key;
13082
13083 cpu_buf->fd = syscall(__NR_perf_event_open, attr, -1 /* pid */, cpu,
13084 -1, PERF_FLAG_FD_CLOEXEC);
13085 if (cpu_buf->fd < 0) {
13086 err = -errno;
be18010e
KW
13087 pr_warn("failed to open perf buffer event on cpu #%d: %s\n",
13088 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13089 goto error;
13090 }
13091
13092 cpu_buf->base = mmap(NULL, pb->mmap_size + pb->page_size,
13093 PROT_READ | PROT_WRITE, MAP_SHARED,
13094 cpu_buf->fd, 0);
13095 if (cpu_buf->base == MAP_FAILED) {
13096 cpu_buf->base = NULL;
13097 err = -errno;
be18010e
KW
13098 pr_warn("failed to mmap perf buffer on cpu #%d: %s\n",
13099 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13100 goto error;
13101 }
13102
13103 if (ioctl(cpu_buf->fd, PERF_EVENT_IOC_ENABLE, 0) < 0) {
13104 err = -errno;
be18010e
KW
13105 pr_warn("failed to enable perf buffer event on cpu #%d: %s\n",
13106 cpu, libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13107 goto error;
13108 }
13109
13110 return cpu_buf;
13111
13112error:
13113 perf_buffer__free_cpu_buf(pb, cpu_buf);
13114 return (struct perf_cpu_buf *)ERR_PTR(err);
13115}
13116
13117static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13118 struct perf_buffer_params *p);
13119
22dd7a58
AN
13120struct perf_buffer *perf_buffer__new(int map_fd, size_t page_cnt,
13121 perf_buffer_sample_fn sample_cb,
13122 perf_buffer_lost_fn lost_cb,
13123 void *ctx,
13124 const struct perf_buffer_opts *opts)
fb84b822 13125{
813847a3 13126 const size_t attr_sz = sizeof(struct perf_event_attr);
fb84b822 13127 struct perf_buffer_params p = {};
813847a3 13128 struct perf_event_attr attr;
ab8684b8 13129 __u32 sample_period;
41788934
AN
13130
13131 if (!OPTS_VALID(opts, perf_buffer_opts))
13132 return libbpf_err_ptr(-EINVAL);
4be6e05c 13133
ab8684b8
JD
13134 sample_period = OPTS_GET(opts, sample_period, 1);
13135 if (!sample_period)
13136 sample_period = 1;
13137
813847a3
AN
13138 memset(&attr, 0, attr_sz);
13139 attr.size = attr_sz;
65bb2e0f 13140 attr.config = PERF_COUNT_SW_BPF_OUTPUT;
4be6e05c
ACM
13141 attr.type = PERF_TYPE_SOFTWARE;
13142 attr.sample_type = PERF_SAMPLE_RAW;
ab8684b8
JD
13143 attr.sample_period = sample_period;
13144 attr.wakeup_events = sample_period;
fb84b822
AN
13145
13146 p.attr = &attr;
41788934
AN
13147 p.sample_cb = sample_cb;
13148 p.lost_cb = lost_cb;
13149 p.ctx = ctx;
fb84b822 13150
e9fc3ce9 13151 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
fb84b822
AN
13152}
13153
22dd7a58
AN
13154struct perf_buffer *perf_buffer__new_raw(int map_fd, size_t page_cnt,
13155 struct perf_event_attr *attr,
13156 perf_buffer_event_fn event_cb, void *ctx,
13157 const struct perf_buffer_raw_opts *opts)
fb84b822
AN
13158{
13159 struct perf_buffer_params p = {};
13160
41332d6e 13161 if (!attr)
41788934
AN
13162 return libbpf_err_ptr(-EINVAL);
13163
13164 if (!OPTS_VALID(opts, perf_buffer_raw_opts))
13165 return libbpf_err_ptr(-EINVAL);
13166
13167 p.attr = attr;
13168 p.event_cb = event_cb;
13169 p.ctx = ctx;
13170 p.cpu_cnt = OPTS_GET(opts, cpu_cnt, 0);
13171 p.cpus = OPTS_GET(opts, cpus, NULL);
13172 p.map_keys = OPTS_GET(opts, map_keys, NULL);
fb84b822 13173
e9fc3ce9 13174 return libbpf_ptr(__perf_buffer__new(map_fd, page_cnt, &p));
fb84b822
AN
13175}
13176
13177static struct perf_buffer *__perf_buffer__new(int map_fd, size_t page_cnt,
13178 struct perf_buffer_params *p)
13179{
783b8f01 13180 const char *online_cpus_file = "/sys/devices/system/cpu/online";
0e289487 13181 struct bpf_map_info map;
fb84b822
AN
13182 char msg[STRERR_BUFSIZE];
13183 struct perf_buffer *pb;
783b8f01 13184 bool *online = NULL;
fb84b822 13185 __u32 map_info_len;
783b8f01 13186 int err, i, j, n;
fb84b822 13187
41332d6e 13188 if (page_cnt == 0 || (page_cnt & (page_cnt - 1))) {
be18010e
KW
13189 pr_warn("page count should be power of two, but is %zu\n",
13190 page_cnt);
fb84b822
AN
13191 return ERR_PTR(-EINVAL);
13192 }
13193
0e289487
AN
13194 /* best-effort sanity checks */
13195 memset(&map, 0, sizeof(map));
fb84b822 13196 map_info_len = sizeof(map);
629dfc66 13197 err = bpf_map_get_info_by_fd(map_fd, &map, &map_info_len);
fb84b822
AN
13198 if (err) {
13199 err = -errno;
0e289487
AN
13200 /* if BPF_OBJ_GET_INFO_BY_FD is supported, will return
13201 * -EBADFD, -EFAULT, or -E2BIG on real error
13202 */
13203 if (err != -EINVAL) {
13204 pr_warn("failed to get map info for map FD %d: %s\n",
13205 map_fd, libbpf_strerror_r(err, msg, sizeof(msg)));
13206 return ERR_PTR(err);
13207 }
13208 pr_debug("failed to get map info for FD %d; API not supported? Ignoring...\n",
13209 map_fd);
13210 } else {
13211 if (map.type != BPF_MAP_TYPE_PERF_EVENT_ARRAY) {
13212 pr_warn("map '%s' should be BPF_MAP_TYPE_PERF_EVENT_ARRAY\n",
13213 map.name);
13214 return ERR_PTR(-EINVAL);
13215 }
fb84b822
AN
13216 }
13217
13218 pb = calloc(1, sizeof(*pb));
13219 if (!pb)
13220 return ERR_PTR(-ENOMEM);
13221
13222 pb->event_cb = p->event_cb;
13223 pb->sample_cb = p->sample_cb;
13224 pb->lost_cb = p->lost_cb;
13225 pb->ctx = p->ctx;
13226
13227 pb->page_size = getpagesize();
13228 pb->mmap_size = pb->page_size * page_cnt;
13229 pb->map_fd = map_fd;
13230
13231 pb->epoll_fd = epoll_create1(EPOLL_CLOEXEC);
13232 if (pb->epoll_fd < 0) {
13233 err = -errno;
be18010e
KW
13234 pr_warn("failed to create epoll instance: %s\n",
13235 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13236 goto error;
13237 }
13238
13239 if (p->cpu_cnt > 0) {
13240 pb->cpu_cnt = p->cpu_cnt;
13241 } else {
13242 pb->cpu_cnt = libbpf_num_possible_cpus();
13243 if (pb->cpu_cnt < 0) {
13244 err = pb->cpu_cnt;
13245 goto error;
13246 }
0e289487 13247 if (map.max_entries && map.max_entries < pb->cpu_cnt)
fb84b822
AN
13248 pb->cpu_cnt = map.max_entries;
13249 }
13250
13251 pb->events = calloc(pb->cpu_cnt, sizeof(*pb->events));
13252 if (!pb->events) {
13253 err = -ENOMEM;
be18010e 13254 pr_warn("failed to allocate events: out of memory\n");
fb84b822
AN
13255 goto error;
13256 }
13257 pb->cpu_bufs = calloc(pb->cpu_cnt, sizeof(*pb->cpu_bufs));
13258 if (!pb->cpu_bufs) {
13259 err = -ENOMEM;
be18010e 13260 pr_warn("failed to allocate buffers: out of memory\n");
fb84b822
AN
13261 goto error;
13262 }
13263
783b8f01
AN
13264 err = parse_cpu_mask_file(online_cpus_file, &online, &n);
13265 if (err) {
13266 pr_warn("failed to get online CPU mask: %d\n", err);
13267 goto error;
13268 }
13269
13270 for (i = 0, j = 0; i < pb->cpu_cnt; i++) {
fb84b822
AN
13271 struct perf_cpu_buf *cpu_buf;
13272 int cpu, map_key;
13273
13274 cpu = p->cpu_cnt > 0 ? p->cpus[i] : i;
13275 map_key = p->cpu_cnt > 0 ? p->map_keys[i] : i;
13276
783b8f01
AN
13277 /* in case user didn't explicitly requested particular CPUs to
13278 * be attached to, skip offline/not present CPUs
13279 */
13280 if (p->cpu_cnt <= 0 && (cpu >= n || !online[cpu]))
13281 continue;
13282
fb84b822
AN
13283 cpu_buf = perf_buffer__open_cpu_buf(pb, p->attr, cpu, map_key);
13284 if (IS_ERR(cpu_buf)) {
13285 err = PTR_ERR(cpu_buf);
13286 goto error;
13287 }
13288
783b8f01 13289 pb->cpu_bufs[j] = cpu_buf;
fb84b822
AN
13290
13291 err = bpf_map_update_elem(pb->map_fd, &map_key,
13292 &cpu_buf->fd, 0);
13293 if (err) {
13294 err = -errno;
be18010e
KW
13295 pr_warn("failed to set cpu #%d, key %d -> perf FD %d: %s\n",
13296 cpu, map_key, cpu_buf->fd,
13297 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13298 goto error;
13299 }
13300
783b8f01
AN
13301 pb->events[j].events = EPOLLIN;
13302 pb->events[j].data.ptr = cpu_buf;
fb84b822 13303 if (epoll_ctl(pb->epoll_fd, EPOLL_CTL_ADD, cpu_buf->fd,
783b8f01 13304 &pb->events[j]) < 0) {
fb84b822 13305 err = -errno;
be18010e
KW
13306 pr_warn("failed to epoll_ctl cpu #%d perf FD %d: %s\n",
13307 cpu, cpu_buf->fd,
13308 libbpf_strerror_r(err, msg, sizeof(msg)));
fb84b822
AN
13309 goto error;
13310 }
783b8f01 13311 j++;
fb84b822 13312 }
783b8f01
AN
13313 pb->cpu_cnt = j;
13314 free(online);
fb84b822
AN
13315
13316 return pb;
13317
13318error:
783b8f01 13319 free(online);
fb84b822
AN
13320 if (pb)
13321 perf_buffer__free(pb);
13322 return ERR_PTR(err);
13323}
13324
13325struct perf_sample_raw {
13326 struct perf_event_header header;
13327 uint32_t size;
385bbf7b 13328 char data[];
fb84b822
AN
13329};
13330
13331struct perf_sample_lost {
13332 struct perf_event_header header;
13333 uint64_t id;
13334 uint64_t lost;
13335 uint64_t sample_id;
13336};
13337
13338static enum bpf_perf_event_ret
13339perf_buffer__process_record(struct perf_event_header *e, void *ctx)
13340{
13341 struct perf_cpu_buf *cpu_buf = ctx;
13342 struct perf_buffer *pb = cpu_buf->pb;
13343 void *data = e;
13344
13345 /* user wants full control over parsing perf event */
13346 if (pb->event_cb)
13347 return pb->event_cb(pb->ctx, cpu_buf->cpu, e);
13348
13349 switch (e->type) {
13350 case PERF_RECORD_SAMPLE: {
13351 struct perf_sample_raw *s = data;
13352
13353 if (pb->sample_cb)
13354 pb->sample_cb(pb->ctx, cpu_buf->cpu, s->data, s->size);
13355 break;
13356 }
13357 case PERF_RECORD_LOST: {
13358 struct perf_sample_lost *s = data;
13359
13360 if (pb->lost_cb)
13361 pb->lost_cb(pb->ctx, cpu_buf->cpu, s->lost);
13362 break;
13363 }
13364 default:
be18010e 13365 pr_warn("unknown perf sample type %d\n", e->type);
fb84b822
AN
13366 return LIBBPF_PERF_EVENT_ERROR;
13367 }
13368 return LIBBPF_PERF_EVENT_CONT;
13369}
13370
13371static int perf_buffer__process_records(struct perf_buffer *pb,
13372 struct perf_cpu_buf *cpu_buf)
13373{
13374 enum bpf_perf_event_ret ret;
13375
7218c28c
CL
13376 ret = perf_event_read_simple(cpu_buf->base, pb->mmap_size,
13377 pb->page_size, &cpu_buf->buf,
13378 &cpu_buf->buf_size,
13379 perf_buffer__process_record, cpu_buf);
fb84b822
AN
13380 if (ret != LIBBPF_PERF_EVENT_CONT)
13381 return ret;
13382 return 0;
13383}
13384
dca5612f
AN
13385int perf_buffer__epoll_fd(const struct perf_buffer *pb)
13386{
13387 return pb->epoll_fd;
13388}
13389
fb84b822
AN
13390int perf_buffer__poll(struct perf_buffer *pb, int timeout_ms)
13391{
13392 int i, cnt, err;
13393
13394 cnt = epoll_wait(pb->epoll_fd, pb->events, pb->cpu_cnt, timeout_ms);
e9fc3ce9 13395 if (cnt < 0)
af0efa05 13396 return -errno;
e9fc3ce9 13397
fb84b822
AN
13398 for (i = 0; i < cnt; i++) {
13399 struct perf_cpu_buf *cpu_buf = pb->events[i].data.ptr;
13400
13401 err = perf_buffer__process_records(pb, cpu_buf);
13402 if (err) {
be18010e 13403 pr_warn("error while processing records: %d\n", err);
e9fc3ce9 13404 return libbpf_err(err);
fb84b822
AN
13405 }
13406 }
e9fc3ce9 13407 return cnt;
fb84b822
AN
13408}
13409
dca5612f
AN
13410/* Return number of PERF_EVENT_ARRAY map slots set up by this perf_buffer
13411 * manager.
13412 */
13413size_t perf_buffer__buffer_cnt(const struct perf_buffer *pb)
13414{
13415 return pb->cpu_cnt;
13416}
13417
13418/*
13419 * Return perf_event FD of a ring buffer in *buf_idx* slot of
13420 * PERF_EVENT_ARRAY BPF map. This FD can be polled for new data using
13421 * select()/poll()/epoll() Linux syscalls.
13422 */
13423int perf_buffer__buffer_fd(const struct perf_buffer *pb, size_t buf_idx)
13424{
13425 struct perf_cpu_buf *cpu_buf;
13426
13427 if (buf_idx >= pb->cpu_cnt)
e9fc3ce9 13428 return libbpf_err(-EINVAL);
dca5612f
AN
13429
13430 cpu_buf = pb->cpu_bufs[buf_idx];
13431 if (!cpu_buf)
e9fc3ce9 13432 return libbpf_err(-ENOENT);
dca5612f
AN
13433
13434 return cpu_buf->fd;
13435}
13436
9ff5efde
JD
13437int perf_buffer__buffer(struct perf_buffer *pb, int buf_idx, void **buf, size_t *buf_size)
13438{
13439 struct perf_cpu_buf *cpu_buf;
13440
13441 if (buf_idx >= pb->cpu_cnt)
13442 return libbpf_err(-EINVAL);
13443
13444 cpu_buf = pb->cpu_bufs[buf_idx];
13445 if (!cpu_buf)
13446 return libbpf_err(-ENOENT);
13447
13448 *buf = cpu_buf->base;
13449 *buf_size = pb->mmap_size;
13450 return 0;
13451}
13452
dca5612f
AN
13453/*
13454 * Consume data from perf ring buffer corresponding to slot *buf_idx* in
13455 * PERF_EVENT_ARRAY BPF map without waiting/polling. If there is no data to
13456 * consume, do nothing and return success.
13457 * Returns:
13458 * - 0 on success;
13459 * - <0 on failure.
13460 */
13461int perf_buffer__consume_buffer(struct perf_buffer *pb, size_t buf_idx)
13462{
13463 struct perf_cpu_buf *cpu_buf;
13464
13465 if (buf_idx >= pb->cpu_cnt)
e9fc3ce9 13466 return libbpf_err(-EINVAL);
dca5612f
AN
13467
13468 cpu_buf = pb->cpu_bufs[buf_idx];
13469 if (!cpu_buf)
e9fc3ce9 13470 return libbpf_err(-ENOENT);
dca5612f
AN
13471
13472 return perf_buffer__process_records(pb, cpu_buf);
13473}
13474
272d51af
EC
13475int perf_buffer__consume(struct perf_buffer *pb)
13476{
13477 int i, err;
13478
13479 for (i = 0; i < pb->cpu_cnt; i++) {
13480 struct perf_cpu_buf *cpu_buf = pb->cpu_bufs[i];
13481
13482 if (!cpu_buf)
13483 continue;
13484
13485 err = perf_buffer__process_records(pb, cpu_buf);
13486 if (err) {
dca5612f 13487 pr_warn("perf_buffer: failed to process records in buffer #%d: %d\n", i, err);
e9fc3ce9 13488 return libbpf_err(err);
272d51af
EC
13489 }
13490 }
13491 return 0;
13492}
13493
ff26ce5c
EC
13494int bpf_program__set_attach_target(struct bpf_program *prog,
13495 int attach_prog_fd,
13496 const char *attach_func_name)
13497{
fe62de31 13498 int btf_obj_fd = 0, btf_id = 0, err;
ff26ce5c 13499
2d5ec1c6 13500 if (!prog || attach_prog_fd < 0)
e9fc3ce9 13501 return libbpf_err(-EINVAL);
ff26ce5c 13502
fe62de31 13503 if (prog->obj->loaded)
e9fc3ce9 13504 return libbpf_err(-EINVAL);
fe62de31 13505
2d5ec1c6
AN
13506 if (attach_prog_fd && !attach_func_name) {
13507 /* remember attach_prog_fd and let bpf_program__load() find
13508 * BTF ID during the program load
13509 */
13510 prog->attach_prog_fd = attach_prog_fd;
13511 return 0;
13512 }
13513
fe62de31 13514 if (attach_prog_fd) {
ff26ce5c
EC
13515 btf_id = libbpf_find_prog_btf_id(attach_func_name,
13516 attach_prog_fd);
fe62de31 13517 if (btf_id < 0)
e9fc3ce9 13518 return libbpf_err(btf_id);
fe62de31 13519 } else {
2d5ec1c6
AN
13520 if (!attach_func_name)
13521 return libbpf_err(-EINVAL);
13522
fe62de31
AN
13523 /* load btf_vmlinux, if not yet */
13524 err = bpf_object__load_vmlinux_btf(prog->obj, true);
13525 if (err)
e9fc3ce9 13526 return libbpf_err(err);
fe62de31
AN
13527 err = find_kernel_btf_id(prog->obj, attach_func_name,
13528 prog->expected_attach_type,
13529 &btf_obj_fd, &btf_id);
13530 if (err)
e9fc3ce9 13531 return libbpf_err(err);
fe62de31 13532 }
ff26ce5c
EC
13533
13534 prog->attach_btf_id = btf_id;
fe62de31 13535 prog->attach_btf_obj_fd = btf_obj_fd;
ff26ce5c
EC
13536 prog->attach_prog_fd = attach_prog_fd;
13537 return 0;
13538}
13539
6803ee25 13540int parse_cpu_mask_str(const char *s, bool **mask, int *mask_sz)
6446b315 13541{
6803ee25
AN
13542 int err = 0, n, len, start, end = -1;
13543 bool *tmp;
6446b315 13544
6803ee25
AN
13545 *mask = NULL;
13546 *mask_sz = 0;
13547
13548 /* Each sub string separated by ',' has format \d+-\d+ or \d+ */
13549 while (*s) {
13550 if (*s == ',' || *s == '\n') {
13551 s++;
13552 continue;
13553 }
13554 n = sscanf(s, "%d%n-%d%n", &start, &len, &end, &len);
13555 if (n <= 0 || n > 2) {
13556 pr_warn("Failed to get CPU range %s: %d\n", s, n);
13557 err = -EINVAL;
13558 goto cleanup;
13559 } else if (n == 1) {
13560 end = start;
13561 }
13562 if (start < 0 || start > end) {
13563 pr_warn("Invalid CPU range [%d,%d] in %s\n",
13564 start, end, s);
13565 err = -EINVAL;
13566 goto cleanup;
13567 }
13568 tmp = realloc(*mask, end + 1);
13569 if (!tmp) {
13570 err = -ENOMEM;
13571 goto cleanup;
13572 }
13573 *mask = tmp;
13574 memset(tmp + *mask_sz, 0, start - *mask_sz);
13575 memset(tmp + start, 1, end - start + 1);
13576 *mask_sz = end + 1;
13577 s += len;
13578 }
13579 if (!*mask_sz) {
13580 pr_warn("Empty CPU range\n");
13581 return -EINVAL;
13582 }
13583 return 0;
13584cleanup:
13585 free(*mask);
13586 *mask = NULL;
13587 return err;
13588}
13589
13590int parse_cpu_mask_file(const char *fcpu, bool **mask, int *mask_sz)
13591{
13592 int fd, err = 0, len;
13593 char buf[128];
6446b315 13594
92274e24 13595 fd = open(fcpu, O_RDONLY | O_CLOEXEC);
6446b315 13596 if (fd < 0) {
6803ee25
AN
13597 err = -errno;
13598 pr_warn("Failed to open cpu mask file %s: %d\n", fcpu, err);
13599 return err;
6446b315
HL
13600 }
13601 len = read(fd, buf, sizeof(buf));
13602 close(fd);
13603 if (len <= 0) {
6803ee25
AN
13604 err = len ? -errno : -EINVAL;
13605 pr_warn("Failed to read cpu mask from %s: %d\n", fcpu, err);
13606 return err;
6446b315 13607 }
6803ee25
AN
13608 if (len >= sizeof(buf)) {
13609 pr_warn("CPU mask is too big in file %s\n", fcpu);
13610 return -E2BIG;
6446b315
HL
13611 }
13612 buf[len] = '\0';
13613
6803ee25
AN
13614 return parse_cpu_mask_str(buf, mask, mask_sz);
13615}
13616
13617int libbpf_num_possible_cpus(void)
13618{
13619 static const char *fcpu = "/sys/devices/system/cpu/possible";
13620 static int cpus;
13621 int err, n, i, tmp_cpus;
13622 bool *mask;
13623
13624 tmp_cpus = READ_ONCE(cpus);
13625 if (tmp_cpus > 0)
13626 return tmp_cpus;
13627
13628 err = parse_cpu_mask_file(fcpu, &mask, &n);
13629 if (err)
e9fc3ce9 13630 return libbpf_err(err);
6803ee25
AN
13631
13632 tmp_cpus = 0;
13633 for (i = 0; i < n; i++) {
13634 if (mask[i])
13635 tmp_cpus++;
6446b315 13636 }
6803ee25 13637 free(mask);
56fbc241
TC
13638
13639 WRITE_ONCE(cpus, tmp_cpus);
13640 return tmp_cpus;
6446b315 13641}
d66562fb 13642
430025e5
DK
13643static int populate_skeleton_maps(const struct bpf_object *obj,
13644 struct bpf_map_skeleton *maps,
13645 size_t map_cnt)
13646{
13647 int i;
13648
13649 for (i = 0; i < map_cnt; i++) {
13650 struct bpf_map **map = maps[i].map;
13651 const char *name = maps[i].name;
13652 void **mmaped = maps[i].mmaped;
13653
13654 *map = bpf_object__find_map_by_name(obj, name);
13655 if (!*map) {
13656 pr_warn("failed to find skeleton map '%s'\n", name);
13657 return -ESRCH;
13658 }
13659
13660 /* externs shouldn't be pre-setup from user code */
13661 if (mmaped && (*map)->libbpf_type != LIBBPF_MAP_KCONFIG)
13662 *mmaped = (*map)->mmaped;
13663 }
13664 return 0;
13665}
13666
13667static int populate_skeleton_progs(const struct bpf_object *obj,
13668 struct bpf_prog_skeleton *progs,
13669 size_t prog_cnt)
13670{
13671 int i;
13672
13673 for (i = 0; i < prog_cnt; i++) {
13674 struct bpf_program **prog = progs[i].prog;
13675 const char *name = progs[i].name;
13676
13677 *prog = bpf_object__find_program_by_name(obj, name);
13678 if (!*prog) {
13679 pr_warn("failed to find skeleton program '%s'\n", name);
13680 return -ESRCH;
13681 }
13682 }
13683 return 0;
13684}
13685
d66562fb
AN
13686int bpf_object__open_skeleton(struct bpf_object_skeleton *s,
13687 const struct bpf_object_open_opts *opts)
13688{
13689 DECLARE_LIBBPF_OPTS(bpf_object_open_opts, skel_opts,
13690 .object_name = s->name,
13691 );
13692 struct bpf_object *obj;
430025e5 13693 int err;
d66562fb
AN
13694
13695 /* Attempt to preserve opts->object_name, unless overriden by user
13696 * explicitly. Overwriting object name for skeletons is discouraged,
13697 * as it breaks global data maps, because they contain object name
13698 * prefix as their own map name prefix. When skeleton is generated,
13699 * bpftool is making an assumption that this name will stay the same.
13700 */
13701 if (opts) {
13702 memcpy(&skel_opts, opts, sizeof(*opts));
13703 if (!opts->object_name)
13704 skel_opts.object_name = s->name;
13705 }
13706
13707 obj = bpf_object__open_mem(s->data, s->data_sz, &skel_opts);
e9fc3ce9
AN
13708 err = libbpf_get_error(obj);
13709 if (err) {
13710 pr_warn("failed to initialize skeleton BPF object '%s': %d\n",
13711 s->name, err);
13712 return libbpf_err(err);
d66562fb
AN
13713 }
13714
13715 *s->obj = obj;
430025e5
DK
13716 err = populate_skeleton_maps(obj, s->maps, s->map_cnt);
13717 if (err) {
13718 pr_warn("failed to populate skeleton maps for '%s': %d\n", s->name, err);
13719 return libbpf_err(err);
13720 }
13721
13722 err = populate_skeleton_progs(obj, s->progs, s->prog_cnt);
13723 if (err) {
13724 pr_warn("failed to populate skeleton progs for '%s': %d\n", s->name, err);
13725 return libbpf_err(err);
13726 }
d66562fb 13727
430025e5
DK
13728 return 0;
13729}
d66562fb 13730
430025e5
DK
13731int bpf_object__open_subskeleton(struct bpf_object_subskeleton *s)
13732{
13733 int err, len, var_idx, i;
13734 const char *var_name;
13735 const struct bpf_map *map;
13736 struct btf *btf;
13737 __u32 map_type_id;
13738 const struct btf_type *map_type, *var_type;
13739 const struct bpf_var_skeleton *var_skel;
13740 struct btf_var_secinfo *var;
d66562fb 13741
430025e5
DK
13742 if (!s->obj)
13743 return libbpf_err(-EINVAL);
13744
13745 btf = bpf_object__btf(s->obj);
13746 if (!btf) {
13747 pr_warn("subskeletons require BTF at runtime (object %s)\n",
e3ba8e4e 13748 bpf_object__name(s->obj));
430025e5 13749 return libbpf_err(-errno);
d66562fb
AN
13750 }
13751
430025e5
DK
13752 err = populate_skeleton_maps(s->obj, s->maps, s->map_cnt);
13753 if (err) {
13754 pr_warn("failed to populate subskeleton maps: %d\n", err);
13755 return libbpf_err(err);
13756 }
d66562fb 13757
430025e5
DK
13758 err = populate_skeleton_progs(s->obj, s->progs, s->prog_cnt);
13759 if (err) {
13760 pr_warn("failed to populate subskeleton maps: %d\n", err);
13761 return libbpf_err(err);
d66562fb
AN
13762 }
13763
430025e5
DK
13764 for (var_idx = 0; var_idx < s->var_cnt; var_idx++) {
13765 var_skel = &s->vars[var_idx];
13766 map = *var_skel->map;
13767 map_type_id = bpf_map__btf_value_type_id(map);
13768 map_type = btf__type_by_id(btf, map_type_id);
13769
13770 if (!btf_is_datasec(map_type)) {
13771 pr_warn("type for map '%1$s' is not a datasec: %2$s",
13772 bpf_map__name(map),
13773 __btf_kind_str(btf_kind(map_type)));
13774 return libbpf_err(-EINVAL);
13775 }
13776
13777 len = btf_vlen(map_type);
13778 var = btf_var_secinfos(map_type);
13779 for (i = 0; i < len; i++, var++) {
13780 var_type = btf__type_by_id(btf, var->type);
13781 var_name = btf__name_by_offset(btf, var_type->name_off);
13782 if (strcmp(var_name, var_skel->name) == 0) {
13783 *var_skel->addr = map->mmaped + var->offset;
13784 break;
13785 }
13786 }
13787 }
d66562fb
AN
13788 return 0;
13789}
13790
430025e5
DK
13791void bpf_object__destroy_subskeleton(struct bpf_object_subskeleton *s)
13792{
13793 if (!s)
13794 return;
13795 free(s->maps);
13796 free(s->progs);
13797 free(s->vars);
13798 free(s);
13799}
13800
d66562fb
AN
13801int bpf_object__load_skeleton(struct bpf_object_skeleton *s)
13802{
13803 int i, err;
13804
13805 err = bpf_object__load(*s->obj);
13806 if (err) {
13807 pr_warn("failed to load BPF skeleton '%s': %d\n", s->name, err);
e9fc3ce9 13808 return libbpf_err(err);
d66562fb
AN
13809 }
13810
13811 for (i = 0; i < s->map_cnt; i++) {
13812 struct bpf_map *map = *s->maps[i].map;
79ff13e9 13813 size_t mmap_sz = bpf_map_mmap_sz(map);
f08c18e0 13814 int prot, map_fd = map->fd;
d66562fb 13815 void **mmaped = s->maps[i].mmaped;
d66562fb
AN
13816
13817 if (!mmaped)
13818 continue;
13819
13820 if (!(map->def.map_flags & BPF_F_MMAPABLE)) {
13821 *mmaped = NULL;
13822 continue;
13823 }
13824
2e7ba4f8
AN
13825 if (map->def.type == BPF_MAP_TYPE_ARENA) {
13826 *mmaped = map->mmaped;
13827 continue;
13828 }
13829
d66562fb
AN
13830 if (map->def.map_flags & BPF_F_RDONLY_PROG)
13831 prot = PROT_READ;
13832 else
13833 prot = PROT_READ | PROT_WRITE;
13834
13835 /* Remap anonymous mmap()-ed "map initialization image" as
13836 * a BPF map-backed mmap()-ed memory, but preserving the same
13837 * memory address. This will cause kernel to change process'
13838 * page table to point to a different piece of kernel memory,
13839 * but from userspace point of view memory address (and its
13840 * contents, being identical at this point) will stay the
13841 * same. This mapping will be released by bpf_object__close()
13842 * as per normal clean up procedure, so we don't need to worry
13843 * about it from skeleton's clean up perspective.
13844 */
9d0a2331 13845 *mmaped = mmap(map->mmaped, mmap_sz, prot, MAP_SHARED | MAP_FIXED, map_fd, 0);
2ad97d47 13846 if (*mmaped == MAP_FAILED) {
d66562fb
AN
13847 err = -errno;
13848 *mmaped = NULL;
13849 pr_warn("failed to re-mmap() map '%s': %d\n",
13850 bpf_map__name(map), err);
e9fc3ce9 13851 return libbpf_err(err);
d66562fb
AN
13852 }
13853 }
13854
13855 return 0;
13856}
13857
13858int bpf_object__attach_skeleton(struct bpf_object_skeleton *s)
13859{
e9fc3ce9 13860 int i, err;
d66562fb
AN
13861
13862 for (i = 0; i < s->prog_cnt; i++) {
13863 struct bpf_program *prog = *s->progs[i].prog;
13864 struct bpf_link **link = s->progs[i].link;
d66562fb 13865
43cb8cba 13866 if (!prog->autoload || !prog->autoattach)
d9297581
AN
13867 continue;
13868
5532dfd4 13869 /* auto-attaching not supported for this program */
4fa5bcfe 13870 if (!prog->sec_def || !prog->sec_def->prog_attach_fn)
d66562fb
AN
13871 continue;
13872
4fa5bcfe
AN
13873 /* if user already set the link manually, don't attempt auto-attach */
13874 if (*link)
13875 continue;
13876
13877 err = prog->sec_def->prog_attach_fn(prog, prog->sec_def->cookie, link);
e9fc3ce9 13878 if (err) {
4fa5bcfe 13879 pr_warn("prog '%s': failed to auto-attach: %d\n",
e9fc3ce9
AN
13880 bpf_program__name(prog), err);
13881 return libbpf_err(err);
d66562fb 13882 }
4fa5bcfe
AN
13883
13884 /* It's possible that for some SEC() definitions auto-attach
13885 * is supported in some cases (e.g., if definition completely
13886 * specifies target information), but is not in other cases.
13887 * SEC("uprobe") is one such case. If user specified target
13888 * binary and function name, such BPF program can be
13889 * auto-attached. But if not, it shouldn't trigger skeleton's
13890 * attach to fail. It should just be skipped.
13891 * attach_fn signals such case with returning 0 (no error) and
13892 * setting link to NULL.
13893 */
d66562fb
AN
13894 }
13895
13896 return 0;
13897}
13898
13899void bpf_object__detach_skeleton(struct bpf_object_skeleton *s)
13900{
13901 int i;
13902
13903 for (i = 0; i < s->prog_cnt; i++) {
13904 struct bpf_link **link = s->progs[i].link;
13905
50450fc7 13906 bpf_link__destroy(*link);
d66562fb
AN
13907 *link = NULL;
13908 }
13909}
13910
13911void bpf_object__destroy_skeleton(struct bpf_object_skeleton *s)
13912{
a32ea51a
YS
13913 if (!s)
13914 return;
13915
d66562fb
AN
13916 if (s->progs)
13917 bpf_object__detach_skeleton(s);
13918 if (s->obj)
13919 bpf_object__close(*s->obj);
13920 free(s->maps);
13921 free(s->progs);
13922 free(s);
13923}